2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
37 * - subvol delete -> delete when ref goes to 0? delete limits also?
41 * - copy also limits on subvol creation
43 * - caches fuer ulists
44 * - performance benchmarks
45 * - check all ioctl parameters
49 * one struct for each qgroup, organized in fs_info->qgroup_tree.
57 u64 rfer
; /* referenced */
58 u64 rfer_cmpr
; /* referenced compressed */
59 u64 excl
; /* exclusive */
60 u64 excl_cmpr
; /* exclusive compressed */
65 u64 lim_flags
; /* which limits are set */
72 * reservation tracking
79 struct list_head groups
; /* groups this group is member of */
80 struct list_head members
; /* groups that are members of this group */
81 struct list_head dirty
; /* dirty groups */
82 struct rb_node node
; /* tree of qgroups */
85 * temp variables for accounting operations
92 * glue structure to represent the relations between qgroups.
94 struct btrfs_qgroup_list
{
95 struct list_head next_group
;
96 struct list_head next_member
;
97 struct btrfs_qgroup
*group
;
98 struct btrfs_qgroup
*member
;
101 struct qgroup_rescan
{
102 struct btrfs_work work
;
103 struct btrfs_fs_info
*fs_info
;
106 static void qgroup_rescan_start(struct btrfs_fs_info
*fs_info
,
107 struct qgroup_rescan
*qscan
);
109 /* must be called with qgroup_ioctl_lock held */
110 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
113 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
114 struct btrfs_qgroup
*qgroup
;
117 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
118 if (qgroup
->qgroupid
< qgroupid
)
120 else if (qgroup
->qgroupid
> qgroupid
)
128 /* must be called with qgroup_lock held */
129 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
132 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
133 struct rb_node
*parent
= NULL
;
134 struct btrfs_qgroup
*qgroup
;
138 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
140 if (qgroup
->qgroupid
< qgroupid
)
142 else if (qgroup
->qgroupid
> qgroupid
)
148 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
150 return ERR_PTR(-ENOMEM
);
152 qgroup
->qgroupid
= qgroupid
;
153 INIT_LIST_HEAD(&qgroup
->groups
);
154 INIT_LIST_HEAD(&qgroup
->members
);
155 INIT_LIST_HEAD(&qgroup
->dirty
);
157 rb_link_node(&qgroup
->node
, parent
, p
);
158 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
163 /* must be called with qgroup_lock held */
164 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
166 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
167 struct btrfs_qgroup_list
*list
;
172 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
173 list_del(&qgroup
->dirty
);
175 while (!list_empty(&qgroup
->groups
)) {
176 list
= list_first_entry(&qgroup
->groups
,
177 struct btrfs_qgroup_list
, next_group
);
178 list_del(&list
->next_group
);
179 list_del(&list
->next_member
);
183 while (!list_empty(&qgroup
->members
)) {
184 list
= list_first_entry(&qgroup
->members
,
185 struct btrfs_qgroup_list
, next_member
);
186 list_del(&list
->next_group
);
187 list_del(&list
->next_member
);
195 /* must be called with qgroup_lock held */
196 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
197 u64 memberid
, u64 parentid
)
199 struct btrfs_qgroup
*member
;
200 struct btrfs_qgroup
*parent
;
201 struct btrfs_qgroup_list
*list
;
203 member
= find_qgroup_rb(fs_info
, memberid
);
204 parent
= find_qgroup_rb(fs_info
, parentid
);
205 if (!member
|| !parent
)
208 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
212 list
->group
= parent
;
213 list
->member
= member
;
214 list_add_tail(&list
->next_group
, &member
->groups
);
215 list_add_tail(&list
->next_member
, &parent
->members
);
220 /* must be called with qgroup_lock held */
221 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
222 u64 memberid
, u64 parentid
)
224 struct btrfs_qgroup
*member
;
225 struct btrfs_qgroup
*parent
;
226 struct btrfs_qgroup_list
*list
;
228 member
= find_qgroup_rb(fs_info
, memberid
);
229 parent
= find_qgroup_rb(fs_info
, parentid
);
230 if (!member
|| !parent
)
233 list_for_each_entry(list
, &member
->groups
, next_group
) {
234 if (list
->group
== parent
) {
235 list_del(&list
->next_group
);
236 list_del(&list
->next_member
);
245 * The full config is read in one go, only called from open_ctree()
246 * It doesn't use any locking, as at this point we're still single-threaded
248 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
250 struct btrfs_key key
;
251 struct btrfs_key found_key
;
252 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
253 struct btrfs_path
*path
= NULL
;
254 struct extent_buffer
*l
;
259 if (!fs_info
->quota_enabled
)
262 path
= btrfs_alloc_path();
268 /* default this to quota off, in case no status key is found */
269 fs_info
->qgroup_flags
= 0;
272 * pass 1: read status, all qgroup infos and limits
277 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
282 struct btrfs_qgroup
*qgroup
;
284 slot
= path
->slots
[0];
286 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
288 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
289 struct btrfs_qgroup_status_item
*ptr
;
291 ptr
= btrfs_item_ptr(l
, slot
,
292 struct btrfs_qgroup_status_item
);
294 if (btrfs_qgroup_status_version(l
, ptr
) !=
295 BTRFS_QGROUP_STATUS_VERSION
) {
297 "btrfs: old qgroup version, quota disabled\n");
300 if (btrfs_qgroup_status_generation(l
, ptr
) !=
301 fs_info
->generation
) {
302 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
304 "btrfs: qgroup generation mismatch, "
305 "marked as inconsistent\n");
307 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
309 fs_info
->qgroup_rescan_progress
.objectid
=
310 btrfs_qgroup_status_rescan(l
, ptr
);
311 if (fs_info
->qgroup_flags
&
312 BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
313 struct qgroup_rescan
*qscan
=
314 kmalloc(sizeof(*qscan
), GFP_NOFS
);
319 fs_info
->qgroup_rescan_progress
.type
= 0;
320 fs_info
->qgroup_rescan_progress
.offset
= 0;
321 qgroup_rescan_start(fs_info
, qscan
);
326 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
327 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
330 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
331 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
332 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
333 printk(KERN_ERR
"btrfs: inconsitent qgroup config\n");
334 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
337 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
338 if (IS_ERR(qgroup
)) {
339 ret
= PTR_ERR(qgroup
);
343 switch (found_key
.type
) {
344 case BTRFS_QGROUP_INFO_KEY
: {
345 struct btrfs_qgroup_info_item
*ptr
;
347 ptr
= btrfs_item_ptr(l
, slot
,
348 struct btrfs_qgroup_info_item
);
349 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
350 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
351 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
352 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
353 /* generation currently unused */
356 case BTRFS_QGROUP_LIMIT_KEY
: {
357 struct btrfs_qgroup_limit_item
*ptr
;
359 ptr
= btrfs_item_ptr(l
, slot
,
360 struct btrfs_qgroup_limit_item
);
361 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
362 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
363 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
364 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
365 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
370 ret
= btrfs_next_item(quota_root
, path
);
376 btrfs_release_path(path
);
379 * pass 2: read all qgroup relations
382 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
384 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
388 slot
= path
->slots
[0];
390 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
392 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
395 if (found_key
.objectid
> found_key
.offset
) {
396 /* parent <- member, not needed to build config */
397 /* FIXME should we omit the key completely? */
401 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
403 if (ret
== -ENOENT
) {
405 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
406 (unsigned long long)found_key
.objectid
,
407 (unsigned long long)found_key
.offset
);
408 ret
= 0; /* ignore the error */
413 ret
= btrfs_next_item(quota_root
, path
);
420 fs_info
->qgroup_flags
|= flags
;
421 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
)) {
422 fs_info
->quota_enabled
= 0;
423 fs_info
->pending_quota_state
= 0;
425 btrfs_free_path(path
);
427 return ret
< 0 ? ret
: 0;
431 * This is only called from close_ctree() or open_ctree(), both in single-
432 * treaded paths. Clean up the in-memory structures. No locking needed.
434 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
437 struct btrfs_qgroup
*qgroup
;
438 struct btrfs_qgroup_list
*list
;
440 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
441 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
442 rb_erase(n
, &fs_info
->qgroup_tree
);
444 while (!list_empty(&qgroup
->groups
)) {
445 list
= list_first_entry(&qgroup
->groups
,
446 struct btrfs_qgroup_list
,
448 list_del(&list
->next_group
);
449 list_del(&list
->next_member
);
453 while (!list_empty(&qgroup
->members
)) {
454 list
= list_first_entry(&qgroup
->members
,
455 struct btrfs_qgroup_list
,
457 list_del(&list
->next_group
);
458 list_del(&list
->next_member
);
465 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
466 struct btrfs_root
*quota_root
,
470 struct btrfs_path
*path
;
471 struct btrfs_key key
;
473 path
= btrfs_alloc_path();
478 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
481 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
483 btrfs_mark_buffer_dirty(path
->nodes
[0]);
485 btrfs_free_path(path
);
489 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
490 struct btrfs_root
*quota_root
,
494 struct btrfs_path
*path
;
495 struct btrfs_key key
;
497 path
= btrfs_alloc_path();
502 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
505 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
514 ret
= btrfs_del_item(trans
, quota_root
, path
);
516 btrfs_free_path(path
);
520 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
521 struct btrfs_root
*quota_root
, u64 qgroupid
)
524 struct btrfs_path
*path
;
525 struct btrfs_qgroup_info_item
*qgroup_info
;
526 struct btrfs_qgroup_limit_item
*qgroup_limit
;
527 struct extent_buffer
*leaf
;
528 struct btrfs_key key
;
530 path
= btrfs_alloc_path();
535 key
.type
= BTRFS_QGROUP_INFO_KEY
;
536 key
.offset
= qgroupid
;
538 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
539 sizeof(*qgroup_info
));
543 leaf
= path
->nodes
[0];
544 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
545 struct btrfs_qgroup_info_item
);
546 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
547 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
548 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
549 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
550 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
552 btrfs_mark_buffer_dirty(leaf
);
554 btrfs_release_path(path
);
556 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
557 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
558 sizeof(*qgroup_limit
));
562 leaf
= path
->nodes
[0];
563 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
564 struct btrfs_qgroup_limit_item
);
565 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
566 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
567 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
568 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
569 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
571 btrfs_mark_buffer_dirty(leaf
);
575 btrfs_free_path(path
);
579 static int del_qgroup_item(struct btrfs_trans_handle
*trans
,
580 struct btrfs_root
*quota_root
, u64 qgroupid
)
583 struct btrfs_path
*path
;
584 struct btrfs_key key
;
586 path
= btrfs_alloc_path();
591 key
.type
= BTRFS_QGROUP_INFO_KEY
;
592 key
.offset
= qgroupid
;
593 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
602 ret
= btrfs_del_item(trans
, quota_root
, path
);
606 btrfs_release_path(path
);
608 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
609 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
618 ret
= btrfs_del_item(trans
, quota_root
, path
);
621 btrfs_free_path(path
);
625 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
626 struct btrfs_root
*root
, u64 qgroupid
,
627 u64 flags
, u64 max_rfer
, u64 max_excl
,
628 u64 rsv_rfer
, u64 rsv_excl
)
630 struct btrfs_path
*path
;
631 struct btrfs_key key
;
632 struct extent_buffer
*l
;
633 struct btrfs_qgroup_limit_item
*qgroup_limit
;
638 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
639 key
.offset
= qgroupid
;
641 path
= btrfs_alloc_path();
645 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
653 slot
= path
->slots
[0];
654 qgroup_limit
= btrfs_item_ptr(l
, path
->slots
[0],
655 struct btrfs_qgroup_limit_item
);
656 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, flags
);
657 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, max_rfer
);
658 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, max_excl
);
659 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, rsv_rfer
);
660 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, rsv_excl
);
662 btrfs_mark_buffer_dirty(l
);
665 btrfs_free_path(path
);
669 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
670 struct btrfs_root
*root
,
671 struct btrfs_qgroup
*qgroup
)
673 struct btrfs_path
*path
;
674 struct btrfs_key key
;
675 struct extent_buffer
*l
;
676 struct btrfs_qgroup_info_item
*qgroup_info
;
681 key
.type
= BTRFS_QGROUP_INFO_KEY
;
682 key
.offset
= qgroup
->qgroupid
;
684 path
= btrfs_alloc_path();
688 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
696 slot
= path
->slots
[0];
697 qgroup_info
= btrfs_item_ptr(l
, path
->slots
[0],
698 struct btrfs_qgroup_info_item
);
699 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
700 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
701 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
702 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
703 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
705 btrfs_mark_buffer_dirty(l
);
708 btrfs_free_path(path
);
712 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
,
713 struct btrfs_fs_info
*fs_info
,
714 struct btrfs_root
*root
)
716 struct btrfs_path
*path
;
717 struct btrfs_key key
;
718 struct extent_buffer
*l
;
719 struct btrfs_qgroup_status_item
*ptr
;
724 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
727 path
= btrfs_alloc_path();
731 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
739 slot
= path
->slots
[0];
740 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
741 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
742 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
743 btrfs_set_qgroup_status_rescan(l
, ptr
,
744 fs_info
->qgroup_rescan_progress
.objectid
);
746 btrfs_mark_buffer_dirty(l
);
749 btrfs_free_path(path
);
754 * called with qgroup_lock held
756 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
757 struct btrfs_root
*root
)
759 struct btrfs_path
*path
;
760 struct btrfs_key key
;
761 struct extent_buffer
*leaf
= NULL
;
765 path
= btrfs_alloc_path();
769 path
->leave_spinning
= 1;
776 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
779 leaf
= path
->nodes
[0];
780 nr
= btrfs_header_nritems(leaf
);
784 * delete the leaf one by one
785 * since the whole tree is going
789 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
793 btrfs_release_path(path
);
797 root
->fs_info
->pending_quota_state
= 0;
798 btrfs_free_path(path
);
802 int btrfs_quota_enable(struct btrfs_trans_handle
*trans
,
803 struct btrfs_fs_info
*fs_info
)
805 struct btrfs_root
*quota_root
;
806 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
807 struct btrfs_path
*path
= NULL
;
808 struct btrfs_qgroup_status_item
*ptr
;
809 struct extent_buffer
*leaf
;
810 struct btrfs_key key
;
811 struct btrfs_key found_key
;
812 struct btrfs_qgroup
*qgroup
= NULL
;
816 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
817 if (fs_info
->quota_root
) {
818 fs_info
->pending_quota_state
= 1;
823 * initially create the quota tree
825 quota_root
= btrfs_create_tree(trans
, fs_info
,
826 BTRFS_QUOTA_TREE_OBJECTID
);
827 if (IS_ERR(quota_root
)) {
828 ret
= PTR_ERR(quota_root
);
832 path
= btrfs_alloc_path();
839 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
842 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
847 leaf
= path
->nodes
[0];
848 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
849 struct btrfs_qgroup_status_item
);
850 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
851 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
852 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
853 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
854 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
855 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
857 btrfs_mark_buffer_dirty(leaf
);
860 key
.type
= BTRFS_ROOT_REF_KEY
;
863 btrfs_release_path(path
);
864 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
872 slot
= path
->slots
[0];
873 leaf
= path
->nodes
[0];
874 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
876 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
877 ret
= add_qgroup_item(trans
, quota_root
,
882 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
883 if (IS_ERR(qgroup
)) {
884 ret
= PTR_ERR(qgroup
);
888 ret
= btrfs_next_item(tree_root
, path
);
896 btrfs_release_path(path
);
897 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
901 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
902 if (IS_ERR(qgroup
)) {
903 ret
= PTR_ERR(qgroup
);
906 spin_lock(&fs_info
->qgroup_lock
);
907 fs_info
->quota_root
= quota_root
;
908 fs_info
->pending_quota_state
= 1;
909 spin_unlock(&fs_info
->qgroup_lock
);
911 btrfs_free_path(path
);
914 free_extent_buffer(quota_root
->node
);
915 free_extent_buffer(quota_root
->commit_root
);
919 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
923 int btrfs_quota_disable(struct btrfs_trans_handle
*trans
,
924 struct btrfs_fs_info
*fs_info
)
926 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
927 struct btrfs_root
*quota_root
;
930 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
931 if (!fs_info
->quota_root
)
933 spin_lock(&fs_info
->qgroup_lock
);
934 fs_info
->quota_enabled
= 0;
935 fs_info
->pending_quota_state
= 0;
936 quota_root
= fs_info
->quota_root
;
937 fs_info
->quota_root
= NULL
;
938 btrfs_free_qgroup_config(fs_info
);
939 spin_unlock(&fs_info
->qgroup_lock
);
946 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
950 ret
= btrfs_del_root(trans
, tree_root
, "a_root
->root_key
);
954 list_del("a_root
->dirty_list
);
956 btrfs_tree_lock(quota_root
->node
);
957 clean_tree_block(trans
, tree_root
, quota_root
->node
);
958 btrfs_tree_unlock(quota_root
->node
);
959 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
961 free_extent_buffer(quota_root
->node
);
962 free_extent_buffer(quota_root
->commit_root
);
965 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
969 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
970 struct btrfs_qgroup
*qgroup
)
972 if (list_empty(&qgroup
->dirty
))
973 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
976 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
,
977 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
979 struct btrfs_root
*quota_root
;
980 struct btrfs_qgroup
*parent
;
981 struct btrfs_qgroup
*member
;
982 struct btrfs_qgroup_list
*list
;
985 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
986 quota_root
= fs_info
->quota_root
;
991 member
= find_qgroup_rb(fs_info
, src
);
992 parent
= find_qgroup_rb(fs_info
, dst
);
993 if (!member
|| !parent
) {
998 /* check if such qgroup relation exist firstly */
999 list_for_each_entry(list
, &member
->groups
, next_group
) {
1000 if (list
->group
== parent
) {
1006 ret
= add_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1010 ret
= add_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1012 del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1016 spin_lock(&fs_info
->qgroup_lock
);
1017 ret
= add_relation_rb(quota_root
->fs_info
, src
, dst
);
1018 spin_unlock(&fs_info
->qgroup_lock
);
1020 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1024 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
,
1025 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1027 struct btrfs_root
*quota_root
;
1028 struct btrfs_qgroup
*parent
;
1029 struct btrfs_qgroup
*member
;
1030 struct btrfs_qgroup_list
*list
;
1034 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1035 quota_root
= fs_info
->quota_root
;
1041 member
= find_qgroup_rb(fs_info
, src
);
1042 parent
= find_qgroup_rb(fs_info
, dst
);
1043 if (!member
|| !parent
) {
1048 /* check if such qgroup relation exist firstly */
1049 list_for_each_entry(list
, &member
->groups
, next_group
) {
1050 if (list
->group
== parent
)
1056 ret
= del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1057 err
= del_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1061 spin_lock(&fs_info
->qgroup_lock
);
1062 del_relation_rb(fs_info
, src
, dst
);
1063 spin_unlock(&fs_info
->qgroup_lock
);
1065 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1069 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
,
1070 struct btrfs_fs_info
*fs_info
, u64 qgroupid
, char *name
)
1072 struct btrfs_root
*quota_root
;
1073 struct btrfs_qgroup
*qgroup
;
1076 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1077 quota_root
= fs_info
->quota_root
;
1082 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1088 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1092 spin_lock(&fs_info
->qgroup_lock
);
1093 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1094 spin_unlock(&fs_info
->qgroup_lock
);
1097 ret
= PTR_ERR(qgroup
);
1099 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1103 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
,
1104 struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
1106 struct btrfs_root
*quota_root
;
1107 struct btrfs_qgroup
*qgroup
;
1110 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1111 quota_root
= fs_info
->quota_root
;
1117 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1122 /* check if there are no relations to this qgroup */
1123 if (!list_empty(&qgroup
->groups
) ||
1124 !list_empty(&qgroup
->members
)) {
1129 ret
= del_qgroup_item(trans
, quota_root
, qgroupid
);
1131 spin_lock(&fs_info
->qgroup_lock
);
1132 del_qgroup_rb(quota_root
->fs_info
, qgroupid
);
1133 spin_unlock(&fs_info
->qgroup_lock
);
1135 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1139 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
,
1140 struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
1141 struct btrfs_qgroup_limit
*limit
)
1143 struct btrfs_root
*quota_root
;
1144 struct btrfs_qgroup
*qgroup
;
1147 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1148 quota_root
= fs_info
->quota_root
;
1154 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1159 ret
= update_qgroup_limit_item(trans
, quota_root
, qgroupid
,
1160 limit
->flags
, limit
->max_rfer
,
1161 limit
->max_excl
, limit
->rsv_rfer
,
1164 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1165 printk(KERN_INFO
"unable to update quota limit for %llu\n",
1166 (unsigned long long)qgroupid
);
1169 spin_lock(&fs_info
->qgroup_lock
);
1170 qgroup
->lim_flags
= limit
->flags
;
1171 qgroup
->max_rfer
= limit
->max_rfer
;
1172 qgroup
->max_excl
= limit
->max_excl
;
1173 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1174 qgroup
->rsv_excl
= limit
->rsv_excl
;
1175 spin_unlock(&fs_info
->qgroup_lock
);
1177 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1182 * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1183 * the modification into a list that's later used by btrfs_end_transaction to
1184 * pass the recorded modifications on to btrfs_qgroup_account_ref.
1186 int btrfs_qgroup_record_ref(struct btrfs_trans_handle
*trans
,
1187 struct btrfs_delayed_ref_node
*node
,
1188 struct btrfs_delayed_extent_op
*extent_op
)
1190 struct qgroup_update
*u
;
1192 BUG_ON(!trans
->delayed_ref_elem
.seq
);
1193 u
= kmalloc(sizeof(*u
), GFP_NOFS
);
1198 u
->extent_op
= extent_op
;
1199 list_add_tail(&u
->list
, &trans
->qgroup_ref_list
);
1204 static int qgroup_account_ref_step1(struct btrfs_fs_info
*fs_info
,
1205 struct ulist
*roots
, struct ulist
*tmp
,
1208 struct ulist_node
*unode
;
1209 struct ulist_iterator uiter
;
1210 struct ulist_node
*tmp_unode
;
1211 struct ulist_iterator tmp_uiter
;
1212 struct btrfs_qgroup
*qg
;
1215 ULIST_ITER_INIT(&uiter
);
1216 while ((unode
= ulist_next(roots
, &uiter
))) {
1217 qg
= find_qgroup_rb(fs_info
, unode
->val
);
1222 /* XXX id not needed */
1223 ret
= ulist_add(tmp
, qg
->qgroupid
,
1224 (u64
)(uintptr_t)qg
, GFP_ATOMIC
);
1227 ULIST_ITER_INIT(&tmp_uiter
);
1228 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
1229 struct btrfs_qgroup_list
*glist
;
1231 qg
= (struct btrfs_qgroup
*)(uintptr_t)tmp_unode
->aux
;
1232 if (qg
->refcnt
< seq
)
1233 qg
->refcnt
= seq
+ 1;
1237 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1238 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1239 (u64
)(uintptr_t)glist
->group
,
1250 static int qgroup_account_ref_step2(struct btrfs_fs_info
*fs_info
,
1251 struct ulist
*roots
, struct ulist
*tmp
,
1252 u64 seq
, int sgn
, u64 num_bytes
,
1253 struct btrfs_qgroup
*qgroup
)
1255 struct ulist_node
*unode
;
1256 struct ulist_iterator uiter
;
1257 struct btrfs_qgroup
*qg
;
1258 struct btrfs_qgroup_list
*glist
;
1262 ret
= ulist_add(tmp
, qgroup
->qgroupid
, (uintptr_t)qgroup
, GFP_ATOMIC
);
1266 ULIST_ITER_INIT(&uiter
);
1267 while ((unode
= ulist_next(tmp
, &uiter
))) {
1268 qg
= (struct btrfs_qgroup
*)(uintptr_t)unode
->aux
;
1269 if (qg
->refcnt
< seq
) {
1270 /* not visited by step 1 */
1271 qg
->rfer
+= sgn
* num_bytes
;
1272 qg
->rfer_cmpr
+= sgn
* num_bytes
;
1273 if (roots
->nnodes
== 0) {
1274 qg
->excl
+= sgn
* num_bytes
;
1275 qg
->excl_cmpr
+= sgn
* num_bytes
;
1277 qgroup_dirty(fs_info
, qg
);
1279 WARN_ON(qg
->tag
>= seq
);
1282 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1283 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1284 (uintptr_t)glist
->group
, GFP_ATOMIC
);
1293 static int qgroup_account_ref_step3(struct btrfs_fs_info
*fs_info
,
1294 struct ulist
*roots
, struct ulist
*tmp
,
1295 u64 seq
, int sgn
, u64 num_bytes
)
1297 struct ulist_node
*unode
;
1298 struct ulist_iterator uiter
;
1299 struct btrfs_qgroup
*qg
;
1300 struct ulist_node
*tmp_unode
;
1301 struct ulist_iterator tmp_uiter
;
1304 ULIST_ITER_INIT(&uiter
);
1305 while ((unode
= ulist_next(roots
, &uiter
))) {
1306 qg
= find_qgroup_rb(fs_info
, unode
->val
);
1311 ret
= ulist_add(tmp
, qg
->qgroupid
, (uintptr_t)qg
, GFP_ATOMIC
);
1315 ULIST_ITER_INIT(&tmp_uiter
);
1316 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
1317 struct btrfs_qgroup_list
*glist
;
1319 qg
= (struct btrfs_qgroup
*)(uintptr_t)tmp_unode
->aux
;
1323 if (qg
->refcnt
- seq
== roots
->nnodes
) {
1324 qg
->excl
-= sgn
* num_bytes
;
1325 qg
->excl_cmpr
-= sgn
* num_bytes
;
1326 qgroup_dirty(fs_info
, qg
);
1329 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1330 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1331 (uintptr_t)glist
->group
,
1343 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1344 * from the fs. First, all roots referencing the extent are searched, and
1345 * then the space is accounted accordingly to the different roots. The
1346 * accounting algorithm works in 3 steps documented inline.
1348 int btrfs_qgroup_account_ref(struct btrfs_trans_handle
*trans
,
1349 struct btrfs_fs_info
*fs_info
,
1350 struct btrfs_delayed_ref_node
*node
,
1351 struct btrfs_delayed_extent_op
*extent_op
)
1353 struct btrfs_key ins
;
1354 struct btrfs_root
*quota_root
;
1356 struct btrfs_qgroup
*qgroup
;
1357 struct ulist
*roots
= NULL
;
1358 struct ulist
*tmp
= NULL
;
1363 if (!fs_info
->quota_enabled
)
1366 BUG_ON(!fs_info
->quota_root
);
1368 ins
.objectid
= node
->bytenr
;
1369 ins
.offset
= node
->num_bytes
;
1370 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1372 if (node
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
1373 node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1374 struct btrfs_delayed_tree_ref
*ref
;
1375 ref
= btrfs_delayed_node_to_tree_ref(node
);
1376 ref_root
= ref
->root
;
1377 } else if (node
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
1378 node
->type
== BTRFS_SHARED_DATA_REF_KEY
) {
1379 struct btrfs_delayed_data_ref
*ref
;
1380 ref
= btrfs_delayed_node_to_data_ref(node
);
1381 ref_root
= ref
->root
;
1386 if (!is_fstree(ref_root
)) {
1388 * non-fs-trees are not being accounted
1393 switch (node
->action
) {
1394 case BTRFS_ADD_DELAYED_REF
:
1395 case BTRFS_ADD_DELAYED_EXTENT
:
1397 seq
= btrfs_tree_mod_seq_prev(node
->seq
);
1399 case BTRFS_DROP_DELAYED_REF
:
1403 case BTRFS_UPDATE_DELAYED_HEAD
:
1409 mutex_lock(&fs_info
->qgroup_rescan_lock
);
1410 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
1411 if (fs_info
->qgroup_rescan_progress
.objectid
<= node
->bytenr
) {
1412 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
1416 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
1419 * the delayed ref sequence number we pass depends on the direction of
1420 * the operation. for add operations, we pass
1421 * tree_mod_log_prev_seq(node->seq) to skip
1422 * the delayed ref's current sequence number, because we need the state
1423 * of the tree before the add operation. for delete operations, we pass
1424 * (node->seq) to include the delayed ref's current sequence number,
1425 * because we need the state of the tree after the delete operation.
1427 ret
= btrfs_find_all_roots(trans
, fs_info
, node
->bytenr
, seq
, &roots
);
1431 mutex_lock(&fs_info
->qgroup_rescan_lock
);
1432 spin_lock(&fs_info
->qgroup_lock
);
1433 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
1434 if (fs_info
->qgroup_rescan_progress
.objectid
<= node
->bytenr
) {
1440 quota_root
= fs_info
->quota_root
;
1444 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1449 * step 1: for each old ref, visit all nodes once and inc refcnt
1451 tmp
= ulist_alloc(GFP_ATOMIC
);
1456 seq
= fs_info
->qgroup_seq
;
1457 fs_info
->qgroup_seq
+= roots
->nnodes
+ 1; /* max refcnt */
1459 ret
= qgroup_account_ref_step1(fs_info
, roots
, tmp
, seq
);
1464 * step 2: walk from the new root
1466 ret
= qgroup_account_ref_step2(fs_info
, roots
, tmp
, seq
, sgn
,
1467 node
->num_bytes
, qgroup
);
1472 * step 3: walk again from old refs
1474 ret
= qgroup_account_ref_step3(fs_info
, roots
, tmp
, seq
, sgn
,
1480 spin_unlock(&fs_info
->qgroup_lock
);
1481 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
1489 * called from commit_transaction. Writes all changed qgroups to disk.
1491 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
,
1492 struct btrfs_fs_info
*fs_info
)
1494 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
1496 int start_rescan_worker
= 0;
1501 if (!fs_info
->quota_enabled
&& fs_info
->pending_quota_state
)
1502 start_rescan_worker
= 1;
1504 fs_info
->quota_enabled
= fs_info
->pending_quota_state
;
1506 spin_lock(&fs_info
->qgroup_lock
);
1507 while (!list_empty(&fs_info
->dirty_qgroups
)) {
1508 struct btrfs_qgroup
*qgroup
;
1509 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
1510 struct btrfs_qgroup
, dirty
);
1511 list_del_init(&qgroup
->dirty
);
1512 spin_unlock(&fs_info
->qgroup_lock
);
1513 ret
= update_qgroup_info_item(trans
, quota_root
, qgroup
);
1515 fs_info
->qgroup_flags
|=
1516 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1517 spin_lock(&fs_info
->qgroup_lock
);
1519 if (fs_info
->quota_enabled
)
1520 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
1522 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1523 spin_unlock(&fs_info
->qgroup_lock
);
1525 ret
= update_qgroup_status_item(trans
, fs_info
, quota_root
);
1527 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1529 if (!ret
&& start_rescan_worker
) {
1530 ret
= btrfs_qgroup_rescan(fs_info
);
1532 pr_err("btrfs: start rescan quota failed: %d\n", ret
);
1542 * copy the acounting information between qgroups. This is necessary when a
1543 * snapshot or a subvolume is created
1545 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
,
1546 struct btrfs_fs_info
*fs_info
, u64 srcid
, u64 objectid
,
1547 struct btrfs_qgroup_inherit
*inherit
)
1552 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
1553 struct btrfs_qgroup
*srcgroup
;
1554 struct btrfs_qgroup
*dstgroup
;
1558 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1559 if (!fs_info
->quota_enabled
)
1568 i_qgroups
= (u64
*)(inherit
+ 1);
1569 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
1570 2 * inherit
->num_excl_copies
;
1571 for (i
= 0; i
< nums
; ++i
) {
1572 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
1582 * create a tracking group for the subvol itself
1584 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
1588 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
1589 ret
= update_qgroup_limit_item(trans
, quota_root
, objectid
,
1591 inherit
->lim
.max_rfer
,
1592 inherit
->lim
.max_excl
,
1593 inherit
->lim
.rsv_rfer
,
1594 inherit
->lim
.rsv_excl
);
1600 struct btrfs_root
*srcroot
;
1601 struct btrfs_key srckey
;
1604 srckey
.objectid
= srcid
;
1605 srckey
.type
= BTRFS_ROOT_ITEM_KEY
;
1606 srckey
.offset
= (u64
)-1;
1607 srcroot
= btrfs_read_fs_root_no_name(fs_info
, &srckey
);
1608 if (IS_ERR(srcroot
)) {
1609 ret
= PTR_ERR(srcroot
);
1614 srcroot_level
= btrfs_header_level(srcroot
->node
);
1615 level_size
= btrfs_level_size(srcroot
, srcroot_level
);
1620 * add qgroup to all inherited groups
1623 i_qgroups
= (u64
*)(inherit
+ 1);
1624 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
1625 ret
= add_qgroup_relation_item(trans
, quota_root
,
1626 objectid
, *i_qgroups
);
1629 ret
= add_qgroup_relation_item(trans
, quota_root
,
1630 *i_qgroups
, objectid
);
1638 spin_lock(&fs_info
->qgroup_lock
);
1640 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
1641 if (IS_ERR(dstgroup
)) {
1642 ret
= PTR_ERR(dstgroup
);
1647 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
1650 dstgroup
->rfer
= srcgroup
->rfer
- level_size
;
1651 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
- level_size
;
1652 srcgroup
->excl
= level_size
;
1653 srcgroup
->excl_cmpr
= level_size
;
1654 qgroup_dirty(fs_info
, dstgroup
);
1655 qgroup_dirty(fs_info
, srcgroup
);
1661 i_qgroups
= (u64
*)(inherit
+ 1);
1662 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
1663 ret
= add_relation_rb(quota_root
->fs_info
, objectid
,
1670 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
) {
1671 struct btrfs_qgroup
*src
;
1672 struct btrfs_qgroup
*dst
;
1674 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
1675 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
1682 dst
->rfer
= src
->rfer
- level_size
;
1683 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
1686 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
) {
1687 struct btrfs_qgroup
*src
;
1688 struct btrfs_qgroup
*dst
;
1690 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
1691 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
1698 dst
->excl
= src
->excl
+ level_size
;
1699 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
1704 spin_unlock(&fs_info
->qgroup_lock
);
1706 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1711 * reserve some space for a qgroup and all its parents. The reservation takes
1712 * place with start_transaction or dealloc_reserve, similar to ENOSPC
1713 * accounting. If not enough space is available, EDQUOT is returned.
1714 * We assume that the requested space is new for all qgroups.
1716 int btrfs_qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
)
1718 struct btrfs_root
*quota_root
;
1719 struct btrfs_qgroup
*qgroup
;
1720 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1721 u64 ref_root
= root
->root_key
.objectid
;
1723 struct ulist
*ulist
= NULL
;
1724 struct ulist_node
*unode
;
1725 struct ulist_iterator uiter
;
1727 if (!is_fstree(ref_root
))
1733 spin_lock(&fs_info
->qgroup_lock
);
1734 quota_root
= fs_info
->quota_root
;
1738 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1743 * in a first step, we check all affected qgroups if any limits would
1746 ulist
= ulist_alloc(GFP_ATOMIC
);
1751 ret
= ulist_add(ulist
, qgroup
->qgroupid
,
1752 (uintptr_t)qgroup
, GFP_ATOMIC
);
1755 ULIST_ITER_INIT(&uiter
);
1756 while ((unode
= ulist_next(ulist
, &uiter
))) {
1757 struct btrfs_qgroup
*qg
;
1758 struct btrfs_qgroup_list
*glist
;
1760 qg
= (struct btrfs_qgroup
*)(uintptr_t)unode
->aux
;
1762 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
1763 qg
->reserved
+ (s64
)qg
->rfer
+ num_bytes
>
1769 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
1770 qg
->reserved
+ (s64
)qg
->excl
+ num_bytes
>
1776 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1777 ret
= ulist_add(ulist
, glist
->group
->qgroupid
,
1778 (uintptr_t)glist
->group
, GFP_ATOMIC
);
1785 * no limits exceeded, now record the reservation into all qgroups
1787 ULIST_ITER_INIT(&uiter
);
1788 while ((unode
= ulist_next(ulist
, &uiter
))) {
1789 struct btrfs_qgroup
*qg
;
1791 qg
= (struct btrfs_qgroup
*)(uintptr_t)unode
->aux
;
1793 qg
->reserved
+= num_bytes
;
1797 spin_unlock(&fs_info
->qgroup_lock
);
1803 void btrfs_qgroup_free(struct btrfs_root
*root
, u64 num_bytes
)
1805 struct btrfs_root
*quota_root
;
1806 struct btrfs_qgroup
*qgroup
;
1807 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1808 struct ulist
*ulist
= NULL
;
1809 struct ulist_node
*unode
;
1810 struct ulist_iterator uiter
;
1811 u64 ref_root
= root
->root_key
.objectid
;
1814 if (!is_fstree(ref_root
))
1820 spin_lock(&fs_info
->qgroup_lock
);
1822 quota_root
= fs_info
->quota_root
;
1826 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1830 ulist
= ulist_alloc(GFP_ATOMIC
);
1832 btrfs_std_error(fs_info
, -ENOMEM
);
1835 ret
= ulist_add(ulist
, qgroup
->qgroupid
,
1836 (uintptr_t)qgroup
, GFP_ATOMIC
);
1839 ULIST_ITER_INIT(&uiter
);
1840 while ((unode
= ulist_next(ulist
, &uiter
))) {
1841 struct btrfs_qgroup
*qg
;
1842 struct btrfs_qgroup_list
*glist
;
1844 qg
= (struct btrfs_qgroup
*)(uintptr_t)unode
->aux
;
1846 qg
->reserved
-= num_bytes
;
1848 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1849 ret
= ulist_add(ulist
, glist
->group
->qgroupid
,
1850 (uintptr_t)glist
->group
, GFP_ATOMIC
);
1857 spin_unlock(&fs_info
->qgroup_lock
);
1861 void assert_qgroups_uptodate(struct btrfs_trans_handle
*trans
)
1863 if (list_empty(&trans
->qgroup_ref_list
) && !trans
->delayed_ref_elem
.seq
)
1865 pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
1866 trans
, list_empty(&trans
->qgroup_ref_list
) ? "" : " not",
1867 (u32
)(trans
->delayed_ref_elem
.seq
>> 32),
1868 (u32
)trans
->delayed_ref_elem
.seq
);
1873 * returns < 0 on error, 0 when more leafs are to be scanned.
1874 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
1877 qgroup_rescan_leaf(struct qgroup_rescan
*qscan
, struct btrfs_path
*path
,
1878 struct btrfs_trans_handle
*trans
, struct ulist
*tmp
,
1879 struct extent_buffer
*scratch_leaf
)
1881 struct btrfs_key found
;
1882 struct btrfs_fs_info
*fs_info
= qscan
->fs_info
;
1883 struct ulist
*roots
= NULL
;
1884 struct ulist_node
*unode
;
1885 struct ulist_iterator uiter
;
1886 struct seq_list tree_mod_seq_elem
= {};
1891 path
->leave_spinning
= 1;
1892 mutex_lock(&fs_info
->qgroup_rescan_lock
);
1893 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
1894 &fs_info
->qgroup_rescan_progress
,
1897 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
1898 (unsigned long long)fs_info
->qgroup_rescan_progress
.objectid
,
1899 fs_info
->qgroup_rescan_progress
.type
,
1900 (unsigned long long)fs_info
->qgroup_rescan_progress
.offset
,
1905 * The rescan is about to end, we will not be scanning any
1906 * further blocks. We cannot unset the RESCAN flag here, because
1907 * we want to commit the transaction if everything went well.
1908 * To make the live accounting work in this phase, we set our
1909 * scan progress pointer such that every real extent objectid
1912 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
1913 btrfs_release_path(path
);
1914 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
1918 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
1919 btrfs_header_nritems(path
->nodes
[0]) - 1);
1920 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
1922 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1923 memcpy(scratch_leaf
, path
->nodes
[0], sizeof(*scratch_leaf
));
1924 slot
= path
->slots
[0];
1925 btrfs_release_path(path
);
1926 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
1928 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
1929 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
1930 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
)
1932 ret
= btrfs_find_all_roots(trans
, fs_info
, found
.objectid
,
1933 tree_mod_seq_elem
.seq
, &roots
);
1936 spin_lock(&fs_info
->qgroup_lock
);
1937 seq
= fs_info
->qgroup_seq
;
1938 fs_info
->qgroup_seq
+= roots
->nnodes
+ 1; /* max refcnt */
1940 ret
= qgroup_account_ref_step1(fs_info
, roots
, tmp
, seq
);
1942 spin_unlock(&fs_info
->qgroup_lock
);
1948 * step2 of btrfs_qgroup_account_ref works from a single root,
1949 * we're doing all at once here.
1952 ULIST_ITER_INIT(&uiter
);
1953 while ((unode
= ulist_next(roots
, &uiter
))) {
1954 struct btrfs_qgroup
*qg
;
1956 qg
= find_qgroup_rb(fs_info
, unode
->val
);
1960 ret
= ulist_add(tmp
, qg
->qgroupid
, (uintptr_t)qg
,
1963 spin_unlock(&fs_info
->qgroup_lock
);
1969 /* this loop is similar to step 2 of btrfs_qgroup_account_ref */
1970 ULIST_ITER_INIT(&uiter
);
1971 while ((unode
= ulist_next(tmp
, &uiter
))) {
1972 struct btrfs_qgroup
*qg
;
1973 struct btrfs_qgroup_list
*glist
;
1975 qg
= (struct btrfs_qgroup
*)(uintptr_t) unode
->aux
;
1976 qg
->rfer
+= found
.offset
;
1977 qg
->rfer_cmpr
+= found
.offset
;
1978 WARN_ON(qg
->tag
>= seq
);
1979 if (qg
->refcnt
- seq
== roots
->nnodes
) {
1980 qg
->excl
+= found
.offset
;
1981 qg
->excl_cmpr
+= found
.offset
;
1983 qgroup_dirty(fs_info
, qg
);
1985 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1986 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1987 (uintptr_t)glist
->group
,
1990 spin_unlock(&fs_info
->qgroup_lock
);
1997 spin_unlock(&fs_info
->qgroup_lock
);
2003 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2008 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
2010 struct qgroup_rescan
*qscan
= container_of(work
, struct qgroup_rescan
,
2012 struct btrfs_path
*path
;
2013 struct btrfs_trans_handle
*trans
= NULL
;
2014 struct btrfs_fs_info
*fs_info
= qscan
->fs_info
;
2015 struct ulist
*tmp
= NULL
;
2016 struct extent_buffer
*scratch_leaf
= NULL
;
2019 path
= btrfs_alloc_path();
2022 tmp
= ulist_alloc(GFP_NOFS
);
2025 scratch_leaf
= kmalloc(sizeof(*scratch_leaf
), GFP_NOFS
);
2031 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
2032 if (IS_ERR(trans
)) {
2033 err
= PTR_ERR(trans
);
2036 if (!fs_info
->quota_enabled
) {
2039 err
= qgroup_rescan_leaf(qscan
, path
, trans
,
2043 btrfs_commit_transaction(trans
, fs_info
->fs_root
);
2045 btrfs_end_transaction(trans
, fs_info
->fs_root
);
2049 kfree(scratch_leaf
);
2051 btrfs_free_path(path
);
2054 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2055 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2058 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
2059 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2060 } else if (err
< 0) {
2061 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2063 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2066 pr_info("btrfs: qgroup scan completed%s\n",
2067 err
== 2 ? " (inconsistency flag cleared)" : "");
2069 pr_err("btrfs: qgroup scan failed with %d\n", err
);
2074 qgroup_rescan_start(struct btrfs_fs_info
*fs_info
, struct qgroup_rescan
*qscan
)
2076 memset(&qscan
->work
, 0, sizeof(qscan
->work
));
2077 qscan
->work
.func
= btrfs_qgroup_rescan_worker
;
2078 qscan
->fs_info
= fs_info
;
2080 pr_info("btrfs: qgroup scan started\n");
2081 btrfs_queue_worker(&fs_info
->qgroup_rescan_workers
, &qscan
->work
);
2085 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
2089 struct btrfs_qgroup
*qgroup
;
2090 struct qgroup_rescan
*qscan
= kmalloc(sizeof(*qscan
), GFP_NOFS
);
2095 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2096 spin_lock(&fs_info
->qgroup_lock
);
2097 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
2099 else if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
2102 spin_unlock(&fs_info
->qgroup_lock
);
2103 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2108 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2109 memset(&fs_info
->qgroup_rescan_progress
, 0,
2110 sizeof(fs_info
->qgroup_rescan_progress
));
2112 /* clear all current qgroup tracking information */
2113 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
2114 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
2116 qgroup
->rfer_cmpr
= 0;
2118 qgroup
->excl_cmpr
= 0;
2120 spin_unlock(&fs_info
->qgroup_lock
);
2121 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2123 qgroup_rescan_start(fs_info
, qscan
);