1 // SPDX-License-Identifier: GPL-2.0
3 #include "btree_update.h"
11 static const char * const bch2_quota_types
[] = {
17 static const char * const bch2_quota_counters
[] = {
22 static int bch2_sb_quota_validate(struct bch_sb
*sb
, struct bch_sb_field
*f
,
23 enum bch_validate_flags flags
, struct printbuf
*err
)
25 struct bch_sb_field_quota
*q
= field_to_type(f
, quota
);
27 if (vstruct_bytes(&q
->field
) < sizeof(*q
)) {
28 prt_printf(err
, "wrong size (got %zu should be %zu)",
29 vstruct_bytes(&q
->field
), sizeof(*q
));
30 return -BCH_ERR_invalid_sb_quota
;
36 static void bch2_sb_quota_to_text(struct printbuf
*out
, struct bch_sb
*sb
,
37 struct bch_sb_field
*f
)
39 struct bch_sb_field_quota
*q
= field_to_type(f
, quota
);
40 unsigned qtyp
, counter
;
42 for (qtyp
= 0; qtyp
< ARRAY_SIZE(q
->q
); qtyp
++) {
43 prt_printf(out
, "%s: flags %llx",
44 bch2_quota_types
[qtyp
],
45 le64_to_cpu(q
->q
[qtyp
].flags
));
47 for (counter
= 0; counter
< Q_COUNTERS
; counter
++)
48 prt_printf(out
, " %s timelimit %u warnlimit %u",
49 bch2_quota_counters
[counter
],
50 le32_to_cpu(q
->q
[qtyp
].c
[counter
].timelimit
),
51 le32_to_cpu(q
->q
[qtyp
].c
[counter
].warnlimit
));
57 const struct bch_sb_field_ops bch_sb_field_ops_quota
= {
58 .validate
= bch2_sb_quota_validate
,
59 .to_text
= bch2_sb_quota_to_text
,
62 int bch2_quota_validate(struct bch_fs
*c
, struct bkey_s_c k
,
63 enum bch_validate_flags flags
)
67 bkey_fsck_err_on(k
.k
->p
.inode
>= QTYP_NR
,
68 c
, quota_type_invalid
,
69 "invalid quota type (%llu >= %u)",
70 k
.k
->p
.inode
, QTYP_NR
);
75 void bch2_quota_to_text(struct printbuf
*out
, struct bch_fs
*c
,
78 struct bkey_s_c_quota dq
= bkey_s_c_to_quota(k
);
81 for (i
= 0; i
< Q_COUNTERS
; i
++)
82 prt_printf(out
, "%s hardlimit %llu softlimit %llu",
83 bch2_quota_counters
[i
],
84 le64_to_cpu(dq
.v
->c
[i
].hardlimit
),
85 le64_to_cpu(dq
.v
->c
[i
].softlimit
));
88 #ifdef CONFIG_BCACHEFS_QUOTA
90 #include <linux/cred.h>
92 #include <linux/quota.h>
94 static void qc_info_to_text(struct printbuf
*out
, struct qc_info
*i
)
96 printbuf_tabstops_reset(out
);
97 printbuf_tabstop_push(out
, 20);
99 prt_printf(out
, "i_fieldmask\t%x\n", i
->i_fieldmask
);
100 prt_printf(out
, "i_flags\t%u\n", i
->i_flags
);
101 prt_printf(out
, "i_spc_timelimit\t%u\n", i
->i_spc_timelimit
);
102 prt_printf(out
, "i_ino_timelimit\t%u\n", i
->i_ino_timelimit
);
103 prt_printf(out
, "i_rt_spc_timelimit\t%u\n", i
->i_rt_spc_timelimit
);
104 prt_printf(out
, "i_spc_warnlimit\t%u\n", i
->i_spc_warnlimit
);
105 prt_printf(out
, "i_ino_warnlimit\t%u\n", i
->i_ino_warnlimit
);
106 prt_printf(out
, "i_rt_spc_warnlimit\t%u\n", i
->i_rt_spc_warnlimit
);
109 static void qc_dqblk_to_text(struct printbuf
*out
, struct qc_dqblk
*q
)
111 printbuf_tabstops_reset(out
);
112 printbuf_tabstop_push(out
, 20);
114 prt_printf(out
, "d_fieldmask\t%x\n", q
->d_fieldmask
);
115 prt_printf(out
, "d_spc_hardlimit\t%llu\n", q
->d_spc_hardlimit
);
116 prt_printf(out
, "d_spc_softlimit\t%llu\n", q
->d_spc_softlimit
);
117 prt_printf(out
, "d_ino_hardlimit\%llu\n", q
->d_ino_hardlimit
);
118 prt_printf(out
, "d_ino_softlimit\t%llu\n", q
->d_ino_softlimit
);
119 prt_printf(out
, "d_space\t%llu\n", q
->d_space
);
120 prt_printf(out
, "d_ino_count\t%llu\n", q
->d_ino_count
);
121 prt_printf(out
, "d_ino_timer\t%llu\n", q
->d_ino_timer
);
122 prt_printf(out
, "d_spc_timer\t%llu\n", q
->d_spc_timer
);
123 prt_printf(out
, "d_ino_warns\t%i\n", q
->d_ino_warns
);
124 prt_printf(out
, "d_spc_warns\t%i\n", q
->d_spc_warns
);
127 static inline unsigned __next_qtype(unsigned i
, unsigned qtypes
)
130 return qtypes
? i
+ __ffs(qtypes
) : QTYP_NR
;
133 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
135 (_i = __next_qtype(_i, _qtypes), \
136 _q = &(_c)->quotas[_i], \
140 static bool ignore_hardlimit(struct bch_memquota_type
*q
)
142 if (capable(CAP_SYS_RESOURCE
))
145 struct mem_dqinfo
*info
= &sb_dqopt(dquot
->dq_sb
)->info
[dquot
->dq_id
.type
];
147 return capable(CAP_SYS_RESOURCE
) &&
148 (info
->dqi_format
->qf_fmt_id
!= QFMT_VFS_OLD
||
149 !(info
->dqi_flags
& DQF_ROOT_SQUASH
));
155 SOFTWARN
, /* Softlimit reached */
156 SOFTLONGWARN
, /* Grace time expired */
157 HARDWARN
, /* Hardlimit reached */
159 HARDBELOW
, /* Usage got below inode hardlimit */
160 SOFTBELOW
, /* Usage got below inode softlimit */
163 static int quota_nl
[][Q_COUNTERS
] = {
164 [HARDWARN
][Q_SPC
] = QUOTA_NL_BHARDWARN
,
165 [SOFTLONGWARN
][Q_SPC
] = QUOTA_NL_BSOFTLONGWARN
,
166 [SOFTWARN
][Q_SPC
] = QUOTA_NL_BSOFTWARN
,
167 [HARDBELOW
][Q_SPC
] = QUOTA_NL_BHARDBELOW
,
168 [SOFTBELOW
][Q_SPC
] = QUOTA_NL_BSOFTBELOW
,
170 [HARDWARN
][Q_INO
] = QUOTA_NL_IHARDWARN
,
171 [SOFTLONGWARN
][Q_INO
] = QUOTA_NL_ISOFTLONGWARN
,
172 [SOFTWARN
][Q_INO
] = QUOTA_NL_ISOFTWARN
,
173 [HARDBELOW
][Q_INO
] = QUOTA_NL_IHARDBELOW
,
174 [SOFTBELOW
][Q_INO
] = QUOTA_NL_ISOFTBELOW
,
182 } m
[QTYP_NR
* Q_COUNTERS
];
185 static void prepare_msg(unsigned qtype
,
186 enum quota_counters counter
,
187 struct quota_msgs
*msgs
,
188 enum quota_msg msg_type
)
190 BUG_ON(msgs
->nr
>= ARRAY_SIZE(msgs
->m
));
192 msgs
->m
[msgs
->nr
].qtype
= qtype
;
193 msgs
->m
[msgs
->nr
].msg
= quota_nl
[msg_type
][counter
];
197 static void prepare_warning(struct memquota_counter
*qc
,
199 enum quota_counters counter
,
200 struct quota_msgs
*msgs
,
201 enum quota_msg msg_type
)
203 if (qc
->warning_issued
& (1 << msg_type
))
206 prepare_msg(qtype
, counter
, msgs
, msg_type
);
209 static void flush_warnings(struct bch_qid qid
,
210 struct super_block
*sb
,
211 struct quota_msgs
*msgs
)
215 for (i
= 0; i
< msgs
->nr
; i
++)
216 quota_send_warning(make_kqid(&init_user_ns
, msgs
->m
[i
].qtype
, qid
.q
[i
]),
217 sb
->s_dev
, msgs
->m
[i
].msg
);
220 static int bch2_quota_check_limit(struct bch_fs
*c
,
222 struct bch_memquota
*mq
,
223 struct quota_msgs
*msgs
,
224 enum quota_counters counter
,
226 enum quota_acct_mode mode
)
228 struct bch_memquota_type
*q
= &c
->quotas
[qtype
];
229 struct memquota_counter
*qc
= &mq
->c
[counter
];
234 if (mode
== KEY_TYPE_QUOTA_NOCHECK
)
238 if (n
< qc
->hardlimit
&&
239 (qc
->warning_issued
& (1 << HARDWARN
))) {
240 qc
->warning_issued
&= ~(1 << HARDWARN
);
241 prepare_msg(qtype
, counter
, msgs
, HARDBELOW
);
244 if (n
< qc
->softlimit
&&
245 (qc
->warning_issued
& (1 << SOFTWARN
))) {
246 qc
->warning_issued
&= ~(1 << SOFTWARN
);
247 prepare_msg(qtype
, counter
, msgs
, SOFTBELOW
);
250 qc
->warning_issued
= 0;
256 !ignore_hardlimit(q
)) {
257 prepare_warning(qc
, qtype
, counter
, msgs
, HARDWARN
);
263 if (qc
->timer
== 0) {
264 qc
->timer
= ktime_get_real_seconds() + q
->limits
[counter
].timelimit
;
265 prepare_warning(qc
, qtype
, counter
, msgs
, SOFTWARN
);
266 } else if (ktime_get_real_seconds() >= qc
->timer
&&
267 !ignore_hardlimit(q
)) {
268 prepare_warning(qc
, qtype
, counter
, msgs
, SOFTLONGWARN
);
276 int bch2_quota_acct(struct bch_fs
*c
, struct bch_qid qid
,
277 enum quota_counters counter
, s64 v
,
278 enum quota_acct_mode mode
)
280 unsigned qtypes
= enabled_qtypes(c
);
281 struct bch_memquota_type
*q
;
282 struct bch_memquota
*mq
[QTYP_NR
];
283 struct quota_msgs msgs
;
287 memset(&msgs
, 0, sizeof(msgs
));
289 for_each_set_qtype(c
, i
, q
, qtypes
) {
290 mq
[i
] = genradix_ptr_alloc(&q
->table
, qid
.q
[i
], GFP_KERNEL
);
295 for_each_set_qtype(c
, i
, q
, qtypes
)
296 mutex_lock_nested(&q
->lock
, i
);
298 for_each_set_qtype(c
, i
, q
, qtypes
) {
299 ret
= bch2_quota_check_limit(c
, i
, mq
[i
], &msgs
, counter
, v
, mode
);
304 for_each_set_qtype(c
, i
, q
, qtypes
)
305 mq
[i
]->c
[counter
].v
+= v
;
307 for_each_set_qtype(c
, i
, q
, qtypes
)
308 mutex_unlock(&q
->lock
);
310 flush_warnings(qid
, c
->vfs_sb
, &msgs
);
315 static void __bch2_quota_transfer(struct bch_memquota
*src_q
,
316 struct bch_memquota
*dst_q
,
317 enum quota_counters counter
, s64 v
)
319 BUG_ON(v
> src_q
->c
[counter
].v
);
320 BUG_ON(v
+ dst_q
->c
[counter
].v
< v
);
322 src_q
->c
[counter
].v
-= v
;
323 dst_q
->c
[counter
].v
+= v
;
326 int bch2_quota_transfer(struct bch_fs
*c
, unsigned qtypes
,
328 struct bch_qid src
, u64 space
,
329 enum quota_acct_mode mode
)
331 struct bch_memquota_type
*q
;
332 struct bch_memquota
*src_q
[3], *dst_q
[3];
333 struct quota_msgs msgs
;
337 qtypes
&= enabled_qtypes(c
);
339 memset(&msgs
, 0, sizeof(msgs
));
341 for_each_set_qtype(c
, i
, q
, qtypes
) {
342 src_q
[i
] = genradix_ptr_alloc(&q
->table
, src
.q
[i
], GFP_KERNEL
);
343 dst_q
[i
] = genradix_ptr_alloc(&q
->table
, dst
.q
[i
], GFP_KERNEL
);
344 if (!src_q
[i
] || !dst_q
[i
])
348 for_each_set_qtype(c
, i
, q
, qtypes
)
349 mutex_lock_nested(&q
->lock
, i
);
351 for_each_set_qtype(c
, i
, q
, qtypes
) {
352 ret
= bch2_quota_check_limit(c
, i
, dst_q
[i
], &msgs
, Q_SPC
,
353 dst_q
[i
]->c
[Q_SPC
].v
+ space
,
358 ret
= bch2_quota_check_limit(c
, i
, dst_q
[i
], &msgs
, Q_INO
,
359 dst_q
[i
]->c
[Q_INO
].v
+ 1,
365 for_each_set_qtype(c
, i
, q
, qtypes
) {
366 __bch2_quota_transfer(src_q
[i
], dst_q
[i
], Q_SPC
, space
);
367 __bch2_quota_transfer(src_q
[i
], dst_q
[i
], Q_INO
, 1);
371 for_each_set_qtype(c
, i
, q
, qtypes
)
372 mutex_unlock(&q
->lock
);
374 flush_warnings(dst
, c
->vfs_sb
, &msgs
);
379 static int __bch2_quota_set(struct bch_fs
*c
, struct bkey_s_c k
,
380 struct qc_dqblk
*qdq
)
382 struct bkey_s_c_quota dq
;
383 struct bch_memquota_type
*q
;
384 struct bch_memquota
*mq
;
387 BUG_ON(k
.k
->p
.inode
>= QTYP_NR
);
389 if (!((1U << k
.k
->p
.inode
) & enabled_qtypes(c
)))
394 dq
= bkey_s_c_to_quota(k
);
395 q
= &c
->quotas
[k
.k
->p
.inode
];
397 mutex_lock(&q
->lock
);
398 mq
= genradix_ptr_alloc(&q
->table
, k
.k
->p
.offset
, GFP_KERNEL
);
400 mutex_unlock(&q
->lock
);
404 for (i
= 0; i
< Q_COUNTERS
; i
++) {
405 mq
->c
[i
].hardlimit
= le64_to_cpu(dq
.v
->c
[i
].hardlimit
);
406 mq
->c
[i
].softlimit
= le64_to_cpu(dq
.v
->c
[i
].softlimit
);
409 if (qdq
&& qdq
->d_fieldmask
& QC_SPC_TIMER
)
410 mq
->c
[Q_SPC
].timer
= qdq
->d_spc_timer
;
411 if (qdq
&& qdq
->d_fieldmask
& QC_SPC_WARNS
)
412 mq
->c
[Q_SPC
].warns
= qdq
->d_spc_warns
;
413 if (qdq
&& qdq
->d_fieldmask
& QC_INO_TIMER
)
414 mq
->c
[Q_INO
].timer
= qdq
->d_ino_timer
;
415 if (qdq
&& qdq
->d_fieldmask
& QC_INO_WARNS
)
416 mq
->c
[Q_INO
].warns
= qdq
->d_ino_warns
;
418 mutex_unlock(&q
->lock
);
424 void bch2_fs_quota_exit(struct bch_fs
*c
)
428 for (i
= 0; i
< ARRAY_SIZE(c
->quotas
); i
++)
429 genradix_free(&c
->quotas
[i
].table
);
432 void bch2_fs_quota_init(struct bch_fs
*c
)
436 for (i
= 0; i
< ARRAY_SIZE(c
->quotas
); i
++)
437 mutex_init(&c
->quotas
[i
].lock
);
440 static struct bch_sb_field_quota
*bch2_sb_get_or_create_quota(struct bch_sb_handle
*sb
)
442 struct bch_sb_field_quota
*sb_quota
= bch2_sb_field_get(sb
->sb
, quota
);
447 sb_quota
= bch2_sb_field_resize(sb
, quota
, sizeof(*sb_quota
) / sizeof(u64
));
451 for (qtype
= 0; qtype
< QTYP_NR
; qtype
++)
452 for (qc
= 0; qc
< Q_COUNTERS
; qc
++)
453 sb_quota
->q
[qtype
].c
[qc
].timelimit
=
454 cpu_to_le32(7 * 24 * 60 * 60);
460 static void bch2_sb_quota_read(struct bch_fs
*c
)
462 struct bch_sb_field_quota
*sb_quota
;
465 sb_quota
= bch2_sb_field_get(c
->disk_sb
.sb
, quota
);
469 for (i
= 0; i
< QTYP_NR
; i
++) {
470 struct bch_memquota_type
*q
= &c
->quotas
[i
];
472 for (j
= 0; j
< Q_COUNTERS
; j
++) {
473 q
->limits
[j
].timelimit
=
474 le32_to_cpu(sb_quota
->q
[i
].c
[j
].timelimit
);
475 q
->limits
[j
].warnlimit
=
476 le32_to_cpu(sb_quota
->q
[i
].c
[j
].warnlimit
);
481 static int bch2_fs_quota_read_inode(struct btree_trans
*trans
,
482 struct btree_iter
*iter
,
485 struct bch_fs
*c
= trans
->c
;
486 struct bch_inode_unpacked u
;
487 struct bch_snapshot_tree s_t
;
488 u32 tree
= bch2_snapshot_tree(c
, k
.k
->p
.snapshot
);
490 int ret
= bch2_snapshot_tree_lookup(trans
, tree
, &s_t
);
491 bch2_fs_inconsistent_on(bch2_err_matches(ret
, ENOENT
), c
,
492 "%s: snapshot tree %u not found", __func__
, tree
);
496 if (!s_t
.master_subvol
)
499 ret
= bch2_inode_find_by_inum_nowarn_trans(trans
,
501 le32_to_cpu(s_t
.master_subvol
),
505 * Inode might be deleted in this snapshot - the easiest way to handle
506 * that is to just skip it here:
508 if (bch2_err_matches(ret
, ENOENT
))
514 bch2_quota_acct(c
, bch_qid(&u
), Q_SPC
, u
.bi_sectors
,
515 KEY_TYPE_QUOTA_NOCHECK
);
516 bch2_quota_acct(c
, bch_qid(&u
), Q_INO
, 1,
517 KEY_TYPE_QUOTA_NOCHECK
);
519 bch2_btree_iter_set_pos(iter
, bpos_nosnap_successor(iter
->pos
));
523 int bch2_fs_quota_read(struct bch_fs
*c
)
526 mutex_lock(&c
->sb_lock
);
527 struct bch_sb_field_quota
*sb_quota
= bch2_sb_get_or_create_quota(&c
->disk_sb
);
529 mutex_unlock(&c
->sb_lock
);
530 return -BCH_ERR_ENOSPC_sb_quota
;
533 bch2_sb_quota_read(c
);
534 mutex_unlock(&c
->sb_lock
);
536 int ret
= bch2_trans_run(c
,
537 for_each_btree_key(trans
, iter
, BTREE_ID_quotas
, POS_MIN
,
538 BTREE_ITER_prefetch
, k
,
539 __bch2_quota_set(c
, k
, NULL
)) ?:
540 for_each_btree_key(trans
, iter
, BTREE_ID_inodes
, POS_MIN
,
541 BTREE_ITER_prefetch
|BTREE_ITER_all_snapshots
, k
,
542 bch2_fs_quota_read_inode(trans
, &iter
, k
)));
547 /* Enable/disable/delete quotas for an entire filesystem: */
549 static int bch2_quota_enable(struct super_block
*sb
, unsigned uflags
)
551 struct bch_fs
*c
= sb
->s_fs_info
;
552 struct bch_sb_field_quota
*sb_quota
;
555 if (sb
->s_flags
& SB_RDONLY
)
558 /* Accounting must be enabled at mount time: */
559 if (uflags
& (FS_QUOTA_UDQ_ACCT
|FS_QUOTA_GDQ_ACCT
|FS_QUOTA_PDQ_ACCT
))
562 /* Can't enable enforcement without accounting: */
563 if ((uflags
& FS_QUOTA_UDQ_ENFD
) && !c
->opts
.usrquota
)
566 if ((uflags
& FS_QUOTA_GDQ_ENFD
) && !c
->opts
.grpquota
)
569 if (uflags
& FS_QUOTA_PDQ_ENFD
&& !c
->opts
.prjquota
)
572 mutex_lock(&c
->sb_lock
);
573 sb_quota
= bch2_sb_get_or_create_quota(&c
->disk_sb
);
575 ret
= -BCH_ERR_ENOSPC_sb_quota
;
579 if (uflags
& FS_QUOTA_UDQ_ENFD
)
580 SET_BCH_SB_USRQUOTA(c
->disk_sb
.sb
, true);
582 if (uflags
& FS_QUOTA_GDQ_ENFD
)
583 SET_BCH_SB_GRPQUOTA(c
->disk_sb
.sb
, true);
585 if (uflags
& FS_QUOTA_PDQ_ENFD
)
586 SET_BCH_SB_PRJQUOTA(c
->disk_sb
.sb
, true);
590 mutex_unlock(&c
->sb_lock
);
592 return bch2_err_class(ret
);
595 static int bch2_quota_disable(struct super_block
*sb
, unsigned uflags
)
597 struct bch_fs
*c
= sb
->s_fs_info
;
599 if (sb
->s_flags
& SB_RDONLY
)
602 mutex_lock(&c
->sb_lock
);
603 if (uflags
& FS_QUOTA_UDQ_ENFD
)
604 SET_BCH_SB_USRQUOTA(c
->disk_sb
.sb
, false);
606 if (uflags
& FS_QUOTA_GDQ_ENFD
)
607 SET_BCH_SB_GRPQUOTA(c
->disk_sb
.sb
, false);
609 if (uflags
& FS_QUOTA_PDQ_ENFD
)
610 SET_BCH_SB_PRJQUOTA(c
->disk_sb
.sb
, false);
613 mutex_unlock(&c
->sb_lock
);
618 static int bch2_quota_remove(struct super_block
*sb
, unsigned uflags
)
620 struct bch_fs
*c
= sb
->s_fs_info
;
623 if (sb
->s_flags
& SB_RDONLY
)
626 if (uflags
& FS_USER_QUOTA
) {
627 if (c
->opts
.usrquota
)
630 ret
= bch2_btree_delete_range(c
, BTREE_ID_quotas
,
632 POS(QTYP_USR
, U64_MAX
),
638 if (uflags
& FS_GROUP_QUOTA
) {
639 if (c
->opts
.grpquota
)
642 ret
= bch2_btree_delete_range(c
, BTREE_ID_quotas
,
644 POS(QTYP_GRP
, U64_MAX
),
650 if (uflags
& FS_PROJ_QUOTA
) {
651 if (c
->opts
.prjquota
)
654 ret
= bch2_btree_delete_range(c
, BTREE_ID_quotas
,
656 POS(QTYP_PRJ
, U64_MAX
),
666 * Return quota status information, such as enforcements, quota file inode
669 static int bch2_quota_get_state(struct super_block
*sb
, struct qc_state
*state
)
671 struct bch_fs
*c
= sb
->s_fs_info
;
672 unsigned qtypes
= enabled_qtypes(c
);
675 memset(state
, 0, sizeof(*state
));
677 for (i
= 0; i
< QTYP_NR
; i
++) {
678 state
->s_state
[i
].flags
|= QCI_SYSFILE
;
680 if (!(qtypes
& (1 << i
)))
683 state
->s_state
[i
].flags
|= QCI_ACCT_ENABLED
;
685 state
->s_state
[i
].spc_timelimit
= c
->quotas
[i
].limits
[Q_SPC
].timelimit
;
686 state
->s_state
[i
].spc_warnlimit
= c
->quotas
[i
].limits
[Q_SPC
].warnlimit
;
688 state
->s_state
[i
].ino_timelimit
= c
->quotas
[i
].limits
[Q_INO
].timelimit
;
689 state
->s_state
[i
].ino_warnlimit
= c
->quotas
[i
].limits
[Q_INO
].warnlimit
;
696 * Adjust quota timers & warnings
698 static int bch2_quota_set_info(struct super_block
*sb
, int type
,
699 struct qc_info
*info
)
701 struct bch_fs
*c
= sb
->s_fs_info
;
702 struct bch_sb_field_quota
*sb_quota
;
706 struct printbuf buf
= PRINTBUF
;
708 qc_info_to_text(&buf
, info
);
709 pr_info("setting:\n%s", buf
.buf
);
713 if (sb
->s_flags
& SB_RDONLY
)
719 if (!((1 << type
) & enabled_qtypes(c
)))
722 if (info
->i_fieldmask
&
723 ~(QC_SPC_TIMER
|QC_INO_TIMER
|QC_SPC_WARNS
|QC_INO_WARNS
))
726 mutex_lock(&c
->sb_lock
);
727 sb_quota
= bch2_sb_get_or_create_quota(&c
->disk_sb
);
729 ret
= -BCH_ERR_ENOSPC_sb_quota
;
733 if (info
->i_fieldmask
& QC_SPC_TIMER
)
734 sb_quota
->q
[type
].c
[Q_SPC
].timelimit
=
735 cpu_to_le32(info
->i_spc_timelimit
);
737 if (info
->i_fieldmask
& QC_SPC_WARNS
)
738 sb_quota
->q
[type
].c
[Q_SPC
].warnlimit
=
739 cpu_to_le32(info
->i_spc_warnlimit
);
741 if (info
->i_fieldmask
& QC_INO_TIMER
)
742 sb_quota
->q
[type
].c
[Q_INO
].timelimit
=
743 cpu_to_le32(info
->i_ino_timelimit
);
745 if (info
->i_fieldmask
& QC_INO_WARNS
)
746 sb_quota
->q
[type
].c
[Q_INO
].warnlimit
=
747 cpu_to_le32(info
->i_ino_warnlimit
);
749 bch2_sb_quota_read(c
);
753 mutex_unlock(&c
->sb_lock
);
755 return bch2_err_class(ret
);
758 /* Get/set individual quotas: */
760 static void __bch2_quota_get(struct qc_dqblk
*dst
, struct bch_memquota
*src
)
762 dst
->d_space
= src
->c
[Q_SPC
].v
<< 9;
763 dst
->d_spc_hardlimit
= src
->c
[Q_SPC
].hardlimit
<< 9;
764 dst
->d_spc_softlimit
= src
->c
[Q_SPC
].softlimit
<< 9;
765 dst
->d_spc_timer
= src
->c
[Q_SPC
].timer
;
766 dst
->d_spc_warns
= src
->c
[Q_SPC
].warns
;
768 dst
->d_ino_count
= src
->c
[Q_INO
].v
;
769 dst
->d_ino_hardlimit
= src
->c
[Q_INO
].hardlimit
;
770 dst
->d_ino_softlimit
= src
->c
[Q_INO
].softlimit
;
771 dst
->d_ino_timer
= src
->c
[Q_INO
].timer
;
772 dst
->d_ino_warns
= src
->c
[Q_INO
].warns
;
775 static int bch2_get_quota(struct super_block
*sb
, struct kqid kqid
,
776 struct qc_dqblk
*qdq
)
778 struct bch_fs
*c
= sb
->s_fs_info
;
779 struct bch_memquota_type
*q
= &c
->quotas
[kqid
.type
];
780 qid_t qid
= from_kqid(&init_user_ns
, kqid
);
781 struct bch_memquota
*mq
;
783 memset(qdq
, 0, sizeof(*qdq
));
785 mutex_lock(&q
->lock
);
786 mq
= genradix_ptr(&q
->table
, qid
);
788 __bch2_quota_get(qdq
, mq
);
789 mutex_unlock(&q
->lock
);
794 static int bch2_get_next_quota(struct super_block
*sb
, struct kqid
*kqid
,
795 struct qc_dqblk
*qdq
)
797 struct bch_fs
*c
= sb
->s_fs_info
;
798 struct bch_memquota_type
*q
= &c
->quotas
[kqid
->type
];
799 qid_t qid
= from_kqid(&init_user_ns
, *kqid
);
800 struct genradix_iter iter
;
801 struct bch_memquota
*mq
;
804 mutex_lock(&q
->lock
);
806 genradix_for_each_from(&q
->table
, iter
, mq
, qid
)
807 if (memcmp(mq
, page_address(ZERO_PAGE(0)), sizeof(*mq
))) {
808 __bch2_quota_get(qdq
, mq
);
809 *kqid
= make_kqid(current_user_ns(), kqid
->type
, iter
.pos
);
815 mutex_unlock(&q
->lock
);
816 return bch2_err_class(ret
);
819 static int bch2_set_quota_trans(struct btree_trans
*trans
,
820 struct bkey_i_quota
*new_quota
,
821 struct qc_dqblk
*qdq
)
823 struct btree_iter iter
;
827 k
= bch2_bkey_get_iter(trans
, &iter
, BTREE_ID_quotas
, new_quota
->k
.p
,
828 BTREE_ITER_slots
|BTREE_ITER_intent
);
833 if (k
.k
->type
== KEY_TYPE_quota
)
834 new_quota
->v
= *bkey_s_c_to_quota(k
).v
;
836 if (qdq
->d_fieldmask
& QC_SPC_SOFT
)
837 new_quota
->v
.c
[Q_SPC
].softlimit
= cpu_to_le64(qdq
->d_spc_softlimit
>> 9);
838 if (qdq
->d_fieldmask
& QC_SPC_HARD
)
839 new_quota
->v
.c
[Q_SPC
].hardlimit
= cpu_to_le64(qdq
->d_spc_hardlimit
>> 9);
841 if (qdq
->d_fieldmask
& QC_INO_SOFT
)
842 new_quota
->v
.c
[Q_INO
].softlimit
= cpu_to_le64(qdq
->d_ino_softlimit
);
843 if (qdq
->d_fieldmask
& QC_INO_HARD
)
844 new_quota
->v
.c
[Q_INO
].hardlimit
= cpu_to_le64(qdq
->d_ino_hardlimit
);
846 ret
= bch2_trans_update(trans
, &iter
, &new_quota
->k_i
, 0);
847 bch2_trans_iter_exit(trans
, &iter
);
851 static int bch2_set_quota(struct super_block
*sb
, struct kqid qid
,
852 struct qc_dqblk
*qdq
)
854 struct bch_fs
*c
= sb
->s_fs_info
;
855 struct bkey_i_quota new_quota
;
859 struct printbuf buf
= PRINTBUF
;
861 qc_dqblk_to_text(&buf
, qdq
);
862 pr_info("setting:\n%s", buf
.buf
);
866 if (sb
->s_flags
& SB_RDONLY
)
869 bkey_quota_init(&new_quota
.k_i
);
870 new_quota
.k
.p
= POS(qid
.type
, from_kqid(&init_user_ns
, qid
));
872 ret
= bch2_trans_commit_do(c
, NULL
, NULL
, 0,
873 bch2_set_quota_trans(trans
, &new_quota
, qdq
)) ?:
874 __bch2_quota_set(c
, bkey_i_to_s_c(&new_quota
.k_i
), qdq
);
876 return bch2_err_class(ret
);
879 const struct quotactl_ops bch2_quotactl_operations
= {
880 .quota_enable
= bch2_quota_enable
,
881 .quota_disable
= bch2_quota_disable
,
882 .rm_xquota
= bch2_quota_remove
,
884 .get_state
= bch2_quota_get_state
,
885 .set_info
= bch2_quota_set_info
,
887 .get_dqblk
= bch2_get_quota
,
888 .get_nextdqblk
= bch2_get_next_quota
,
889 .set_dqblk
= bch2_set_quota
,
892 #endif /* CONFIG_BCACHEFS_QUOTA */