1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
8 * Quota change tags are associated with each transaction that allocates or
9 * deallocates space. Those changes are accumulated locally to each node (in a
10 * per-node file) and then are periodically synced to the quota file. This
11 * avoids the bottleneck of constantly touching the quota file, but introduces
12 * fuzziness in the current usage value of IDs that are being used on different
13 * nodes in the cluster simultaneously. So, it is possible for a user on
14 * multiple nodes to overrun their quota, but that overrun is controlable.
15 * Since quota tags are part of transactions, there is no need for a quota check
16 * program to be run on node crashes or anything like that.
18 * There are couple of knobs that let the administrator manage the quota
19 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
20 * sitting on one node before being synced to the quota file. (The default is
21 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
22 * of quota file syncs increases as the user moves closer to their limit. The
23 * more frequent the syncs, the more accurate the quota enforcement, but that
24 * means that there is more contention between the nodes for the quota file.
25 * The default value is one. This sets the maximum theoretical quota overrun
26 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
27 * practice, the maximum overrun you see should be much less.) A "quota_scale"
28 * number greater than one makes quota syncs more frequent and reduces the
29 * maximum overrun. Numbers less than one (but greater than zero) make quota
30 * syncs less frequent.
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33 * the quota file, so it is not being constantly read.
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 #include <linux/sched.h>
39 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
50 #include <linux/quota.h>
51 #include <linux/dqblk_xfs.h>
52 #include <linux/lockref.h>
53 #include <linux/list_lru.h>
54 #include <linux/rcupdate.h>
55 #include <linux/rculist_bl.h>
56 #include <linux/bit_spinlock.h>
57 #include <linux/jhash.h>
58 #include <linux/vmalloc.h>
74 #define GFS2_QD_HASH_SHIFT 12
75 #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
76 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
78 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
79 /* -> sd_bitmap_lock */
80 static DEFINE_SPINLOCK(qd_lock
);
81 struct list_lru gfs2_qd_lru
;
83 static struct hlist_bl_head qd_hash_table
[GFS2_QD_HASH_SIZE
];
85 static unsigned int gfs2_qd_hash(const struct gfs2_sbd
*sdp
,
86 const struct kqid qid
)
90 h
= jhash(&sdp
, sizeof(struct gfs2_sbd
*), 0);
91 h
= jhash(&qid
, sizeof(struct kqid
), h
);
93 return h
& GFS2_QD_HASH_MASK
;
96 static inline void spin_lock_bucket(unsigned int hash
)
98 hlist_bl_lock(&qd_hash_table
[hash
]);
101 static inline void spin_unlock_bucket(unsigned int hash
)
103 hlist_bl_unlock(&qd_hash_table
[hash
]);
106 static void gfs2_qd_dealloc(struct rcu_head
*rcu
)
108 struct gfs2_quota_data
*qd
= container_of(rcu
, struct gfs2_quota_data
, qd_rcu
);
109 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
111 kmem_cache_free(gfs2_quotad_cachep
, qd
);
112 if (atomic_dec_and_test(&sdp
->sd_quota_count
))
113 wake_up(&sdp
->sd_kill_wait
);
116 static void gfs2_qd_dispose(struct gfs2_quota_data
*qd
)
118 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
121 list_del(&qd
->qd_list
);
122 spin_unlock(&qd_lock
);
124 spin_lock_bucket(qd
->qd_hash
);
125 hlist_bl_del_rcu(&qd
->qd_hlist
);
126 spin_unlock_bucket(qd
->qd_hash
);
128 if (!gfs2_withdrawing_or_withdrawn(sdp
)) {
129 gfs2_assert_warn(sdp
, !qd
->qd_change
);
130 gfs2_assert_warn(sdp
, !qd
->qd_slot_ref
);
131 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
134 gfs2_glock_put(qd
->qd_gl
);
135 call_rcu(&qd
->qd_rcu
, gfs2_qd_dealloc
);
138 static void gfs2_qd_list_dispose(struct list_head
*list
)
140 struct gfs2_quota_data
*qd
;
142 while (!list_empty(list
)) {
143 qd
= list_first_entry(list
, struct gfs2_quota_data
, qd_lru
);
144 list_del(&qd
->qd_lru
);
151 static enum lru_status
gfs2_qd_isolate(struct list_head
*item
,
152 struct list_lru_one
*lru
, void *arg
)
154 struct list_head
*dispose
= arg
;
155 struct gfs2_quota_data
*qd
=
156 list_entry(item
, struct gfs2_quota_data
, qd_lru
);
157 enum lru_status status
;
159 if (!spin_trylock(&qd
->qd_lockref
.lock
))
163 if (qd
->qd_lockref
.count
== 0) {
164 lockref_mark_dead(&qd
->qd_lockref
);
165 list_lru_isolate_move(lru
, &qd
->qd_lru
, dispose
);
166 status
= LRU_REMOVED
;
169 spin_unlock(&qd
->qd_lockref
.lock
);
173 static unsigned long gfs2_qd_shrink_scan(struct shrinker
*shrink
,
174 struct shrink_control
*sc
)
179 if (!(sc
->gfp_mask
& __GFP_FS
))
182 freed
= list_lru_shrink_walk(&gfs2_qd_lru
, sc
,
183 gfs2_qd_isolate
, &dispose
);
185 gfs2_qd_list_dispose(&dispose
);
190 static unsigned long gfs2_qd_shrink_count(struct shrinker
*shrink
,
191 struct shrink_control
*sc
)
193 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru
, sc
));
196 static struct shrinker
*gfs2_qd_shrinker
;
198 int __init
gfs2_qd_shrinker_init(void)
200 gfs2_qd_shrinker
= shrinker_alloc(SHRINKER_NUMA_AWARE
, "gfs2-qd");
201 if (!gfs2_qd_shrinker
)
204 gfs2_qd_shrinker
->count_objects
= gfs2_qd_shrink_count
;
205 gfs2_qd_shrinker
->scan_objects
= gfs2_qd_shrink_scan
;
207 shrinker_register(gfs2_qd_shrinker
);
212 void gfs2_qd_shrinker_exit(void)
214 shrinker_free(gfs2_qd_shrinker
);
217 static u64
qd2index(struct gfs2_quota_data
*qd
)
219 struct kqid qid
= qd
->qd_id
;
220 return (2 * (u64
)from_kqid(&init_user_ns
, qid
)) +
221 ((qid
.type
== USRQUOTA
) ? 0 : 1);
224 static u64
qd2offset(struct gfs2_quota_data
*qd
)
226 return qd2index(qd
) * sizeof(struct gfs2_quota
);
229 static struct gfs2_quota_data
*qd_alloc(unsigned hash
, struct gfs2_sbd
*sdp
, struct kqid qid
)
231 struct gfs2_quota_data
*qd
;
234 qd
= kmem_cache_zalloc(gfs2_quotad_cachep
, GFP_NOFS
);
239 qd
->qd_lockref
.count
= 0;
240 spin_lock_init(&qd
->qd_lockref
.lock
);
243 INIT_LIST_HEAD(&qd
->qd_lru
);
246 error
= gfs2_glock_get(sdp
, qd2index(qd
),
247 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
254 kmem_cache_free(gfs2_quotad_cachep
, qd
);
258 static struct gfs2_quota_data
*gfs2_qd_search_bucket(unsigned int hash
,
259 const struct gfs2_sbd
*sdp
,
262 struct gfs2_quota_data
*qd
;
263 struct hlist_bl_node
*h
;
265 hlist_bl_for_each_entry_rcu(qd
, h
, &qd_hash_table
[hash
], qd_hlist
) {
266 if (!qid_eq(qd
->qd_id
, qid
))
268 if (qd
->qd_sbd
!= sdp
)
270 if (lockref_get_not_dead(&qd
->qd_lockref
)) {
271 list_lru_del_obj(&gfs2_qd_lru
, &qd
->qd_lru
);
280 static int qd_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
281 struct gfs2_quota_data
**qdp
)
283 struct gfs2_quota_data
*qd
, *new_qd
;
284 unsigned int hash
= gfs2_qd_hash(sdp
, qid
);
287 *qdp
= qd
= gfs2_qd_search_bucket(hash
, sdp
, qid
);
293 new_qd
= qd_alloc(hash
, sdp
, qid
);
298 spin_lock_bucket(hash
);
299 *qdp
= qd
= gfs2_qd_search_bucket(hash
, sdp
, qid
);
301 new_qd
->qd_lockref
.count
++;
303 list_add(&new_qd
->qd_list
, &sdp
->sd_quota_list
);
304 hlist_bl_add_head_rcu(&new_qd
->qd_hlist
, &qd_hash_table
[hash
]);
305 atomic_inc(&sdp
->sd_quota_count
);
307 spin_unlock_bucket(hash
);
308 spin_unlock(&qd_lock
);
311 gfs2_glock_put(new_qd
->qd_gl
);
312 kmem_cache_free(gfs2_quotad_cachep
, new_qd
);
319 static void __qd_hold(struct gfs2_quota_data
*qd
)
321 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
322 gfs2_assert(sdp
, qd
->qd_lockref
.count
> 0);
323 qd
->qd_lockref
.count
++;
326 static void qd_put(struct gfs2_quota_data
*qd
)
328 struct gfs2_sbd
*sdp
;
330 if (lockref_put_or_lock(&qd
->qd_lockref
))
333 BUG_ON(__lockref_is_dead(&qd
->qd_lockref
));
335 if (unlikely(!test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
))) {
336 lockref_mark_dead(&qd
->qd_lockref
);
337 spin_unlock(&qd
->qd_lockref
.lock
);
343 qd
->qd_lockref
.count
= 0;
344 list_lru_add_obj(&gfs2_qd_lru
, &qd
->qd_lru
);
345 spin_unlock(&qd
->qd_lockref
.lock
);
348 static int slot_get(struct gfs2_quota_data
*qd
)
350 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
354 spin_lock(&sdp
->sd_bitmap_lock
);
355 if (qd
->qd_slot_ref
== 0) {
356 bit
= find_first_zero_bit(sdp
->sd_quota_bitmap
,
357 sdp
->sd_quota_slots
);
358 if (bit
>= sdp
->sd_quota_slots
) {
362 set_bit(bit
, sdp
->sd_quota_bitmap
);
367 spin_unlock(&sdp
->sd_bitmap_lock
);
371 static void slot_hold(struct gfs2_quota_data
*qd
)
373 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
375 spin_lock(&sdp
->sd_bitmap_lock
);
376 gfs2_assert(sdp
, qd
->qd_slot_ref
);
378 spin_unlock(&sdp
->sd_bitmap_lock
);
381 static void slot_put(struct gfs2_quota_data
*qd
)
383 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
385 spin_lock(&sdp
->sd_bitmap_lock
);
386 gfs2_assert(sdp
, qd
->qd_slot_ref
);
387 if (!--qd
->qd_slot_ref
) {
388 BUG_ON(!test_and_clear_bit(qd
->qd_slot
, sdp
->sd_quota_bitmap
));
391 spin_unlock(&sdp
->sd_bitmap_lock
);
394 static int bh_get(struct gfs2_quota_data
*qd
)
396 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
397 struct inode
*inode
= sdp
->sd_qc_inode
;
398 struct gfs2_inode
*ip
= GFS2_I(inode
);
399 unsigned int block
, offset
;
400 struct buffer_head
*bh
= NULL
;
401 struct iomap iomap
= { };
404 spin_lock(&qd
->qd_lockref
.lock
);
405 if (qd
->qd_bh_count
) {
407 spin_unlock(&qd
->qd_lockref
.lock
);
410 spin_unlock(&qd
->qd_lockref
.lock
);
412 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
413 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;
415 error
= gfs2_iomap_get(inode
,
416 (loff_t
)block
<< inode
->i_blkbits
,
417 i_blocksize(inode
), &iomap
);
421 if (iomap
.type
!= IOMAP_MAPPED
)
424 error
= gfs2_meta_read(ip
->i_gl
, iomap
.addr
>> inode
->i_blkbits
,
429 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
432 spin_lock(&qd
->qd_lockref
.lock
);
433 if (qd
->qd_bh
== NULL
) {
435 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
436 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
437 offset
* sizeof(struct gfs2_quota_change
));
441 spin_unlock(&qd
->qd_lockref
.lock
);
449 static void bh_put(struct gfs2_quota_data
*qd
)
451 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
452 struct buffer_head
*bh
= NULL
;
454 spin_lock(&qd
->qd_lockref
.lock
);
455 gfs2_assert(sdp
, qd
->qd_bh_count
);
456 if (!--qd
->qd_bh_count
) {
461 spin_unlock(&qd
->qd_lockref
.lock
);
465 static bool qd_grab_sync(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
,
470 spin_lock(&qd
->qd_lockref
.lock
);
471 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
472 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
473 qd
->qd_sync_gen
>= sync_gen
)
476 if (__lockref_is_dead(&qd
->qd_lockref
))
478 qd
->qd_lockref
.count
++;
480 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
481 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
482 qd
->qd_change_sync
= qd
->qd_change
;
487 spin_unlock(&qd
->qd_lockref
.lock
);
491 static void qd_ungrab_sync(struct gfs2_quota_data
*qd
)
493 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
498 static void qdsb_put(struct gfs2_quota_data
*qd
)
505 static void qd_unlock(struct gfs2_quota_data
*qd
)
507 spin_lock(&qd
->qd_lockref
.lock
);
508 gfs2_assert_warn(qd
->qd_sbd
, test_bit(QDF_LOCKED
, &qd
->qd_flags
));
509 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
510 spin_unlock(&qd
->qd_lockref
.lock
);
514 static int qdsb_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
515 struct gfs2_quota_data
**qdp
)
519 error
= qd_get(sdp
, qid
, qdp
);
523 error
= slot_get(*qdp
);
527 error
= bh_get(*qdp
);
541 * gfs2_qa_get - make sure we have a quota allocations data structure,
543 * @ip: the inode for this reservation
545 int gfs2_qa_get(struct gfs2_inode
*ip
)
547 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
548 struct inode
*inode
= &ip
->i_inode
;
550 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
553 spin_lock(&inode
->i_lock
);
554 if (ip
->i_qadata
== NULL
) {
555 struct gfs2_qadata
*tmp
;
557 spin_unlock(&inode
->i_lock
);
558 tmp
= kmem_cache_zalloc(gfs2_qadata_cachep
, GFP_NOFS
);
562 spin_lock(&inode
->i_lock
);
563 if (ip
->i_qadata
== NULL
)
566 kmem_cache_free(gfs2_qadata_cachep
, tmp
);
568 ip
->i_qadata
->qa_ref
++;
569 spin_unlock(&inode
->i_lock
);
573 void gfs2_qa_put(struct gfs2_inode
*ip
)
575 struct inode
*inode
= &ip
->i_inode
;
577 spin_lock(&inode
->i_lock
);
578 if (ip
->i_qadata
&& --ip
->i_qadata
->qa_ref
== 0) {
579 kmem_cache_free(gfs2_qadata_cachep
, ip
->i_qadata
);
582 spin_unlock(&inode
->i_lock
);
585 int gfs2_quota_hold(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
587 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
588 struct gfs2_quota_data
**qd
;
591 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
594 error
= gfs2_qa_get(ip
);
598 qd
= ip
->i_qadata
->qa_qd
;
600 if (gfs2_assert_warn(sdp
, !ip
->i_qadata
->qa_qd_num
) ||
601 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))) {
607 error
= qdsb_get(sdp
, make_kqid_uid(ip
->i_inode
.i_uid
), qd
);
610 ip
->i_qadata
->qa_qd_num
++;
613 error
= qdsb_get(sdp
, make_kqid_gid(ip
->i_inode
.i_gid
), qd
);
616 ip
->i_qadata
->qa_qd_num
++;
619 if (!uid_eq(uid
, NO_UID_QUOTA_CHANGE
) &&
620 !uid_eq(uid
, ip
->i_inode
.i_uid
)) {
621 error
= qdsb_get(sdp
, make_kqid_uid(uid
), qd
);
624 ip
->i_qadata
->qa_qd_num
++;
628 if (!gid_eq(gid
, NO_GID_QUOTA_CHANGE
) &&
629 !gid_eq(gid
, ip
->i_inode
.i_gid
)) {
630 error
= qdsb_get(sdp
, make_kqid_gid(gid
), qd
);
633 ip
->i_qadata
->qa_qd_num
++;
639 gfs2_quota_unhold(ip
);
644 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
646 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
649 if (ip
->i_qadata
== NULL
)
652 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
654 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
655 qdsb_put(ip
->i_qadata
->qa_qd
[x
]);
656 ip
->i_qadata
->qa_qd
[x
] = NULL
;
658 ip
->i_qadata
->qa_qd_num
= 0;
662 static int sort_qd(const void *a
, const void *b
)
664 const struct gfs2_quota_data
*qd_a
= *(const struct gfs2_quota_data
**)a
;
665 const struct gfs2_quota_data
*qd_b
= *(const struct gfs2_quota_data
**)b
;
667 if (qid_lt(qd_a
->qd_id
, qd_b
->qd_id
))
669 if (qid_lt(qd_b
->qd_id
, qd_a
->qd_id
))
674 static void do_qc(struct gfs2_quota_data
*qd
, s64 change
)
676 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
677 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
678 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
679 bool needs_put
= false;
682 gfs2_trans_add_meta(ip
->i_gl
, qd
->qd_bh
);
685 * The QDF_CHANGE flag indicates that the slot in the quota change file
686 * is used. Here, we use the value of qc->qc_change when the slot is
687 * used, and we assume a value of 0 otherwise.
690 spin_lock(&qd
->qd_lockref
.lock
);
693 if (test_bit(QDF_CHANGE
, &qd
->qd_flags
))
694 x
= be64_to_cpu(qc
->qc_change
);
696 qd
->qd_change
+= change
;
698 if (!x
&& test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
699 /* The slot in the quota change file becomes unused. */
700 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
704 } else if (x
&& !test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
705 /* The slot in the quota change file becomes used. */
706 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
711 if (qd
->qd_id
.type
== USRQUOTA
)
712 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
713 qc
->qc_id
= cpu_to_be32(from_kqid(&init_user_ns
, qd
->qd_id
));
715 qc
->qc_change
= cpu_to_be64(x
);
717 spin_unlock(&qd
->qd_lockref
.lock
);
723 if (change
< 0) /* Reset quiet flag if we freed some blocks */
724 clear_bit(QDF_QMSG_QUIET
, &qd
->qd_flags
);
727 static int gfs2_write_buf_to_page(struct gfs2_sbd
*sdp
, unsigned long index
,
728 unsigned off
, void *buf
, unsigned bytes
)
730 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
731 struct inode
*inode
= &ip
->i_inode
;
732 struct address_space
*mapping
= inode
->i_mapping
;
734 struct buffer_head
*bh
;
736 unsigned bsize
= sdp
->sd_sb
.sb_bsize
, bnum
= 0, boff
= 0;
737 unsigned to_write
= bytes
, pg_off
= off
;
739 blk
= index
<< (PAGE_SHIFT
- sdp
->sd_sb
.sb_bsize_shift
);
742 folio
= filemap_grab_folio(mapping
, index
);
744 return PTR_ERR(folio
);
745 bh
= folio_buffers(folio
);
747 bh
= create_empty_buffers(folio
, bsize
, 0);
750 /* Find the beginning block within the folio */
751 if (pg_off
>= ((bnum
* bsize
) + bsize
)) {
752 bh
= bh
->b_this_page
;
757 if (!buffer_mapped(bh
)) {
758 gfs2_block_map(inode
, blk
, bh
, 1);
759 if (!buffer_mapped(bh
))
761 /* If it's a newly allocated disk block, zero it */
763 folio_zero_range(folio
, bnum
* bsize
,
766 if (folio_test_uptodate(folio
))
767 set_buffer_uptodate(bh
);
768 if (bh_read(bh
, REQ_META
| REQ_PRIO
) < 0)
770 gfs2_trans_add_data(ip
->i_gl
, bh
);
772 /* If we need to write to the next block as well */
773 if (to_write
> (bsize
- boff
)) {
774 pg_off
+= (bsize
- boff
);
775 to_write
-= (bsize
- boff
);
776 boff
= pg_off
% bsize
;
782 /* Write to the folio, now that we have setup the buffer(s) */
783 memcpy_to_folio(folio
, off
, buf
, bytes
);
784 flush_dcache_folio(folio
);
796 static int gfs2_write_disk_quota(struct gfs2_sbd
*sdp
, struct gfs2_quota
*qp
,
799 unsigned long pg_beg
;
800 unsigned pg_off
, nbytes
, overflow
= 0;
804 nbytes
= sizeof(struct gfs2_quota
);
806 pg_beg
= loc
>> PAGE_SHIFT
;
807 pg_off
= offset_in_page(loc
);
809 /* If the quota straddles a page boundary, split the write in two */
810 if ((pg_off
+ nbytes
) > PAGE_SIZE
)
811 overflow
= (pg_off
+ nbytes
) - PAGE_SIZE
;
814 error
= gfs2_write_buf_to_page(sdp
, pg_beg
, pg_off
, ptr
,
816 /* If there's an overflow, write the remaining bytes to the next page */
817 if (!error
&& overflow
)
818 error
= gfs2_write_buf_to_page(sdp
, pg_beg
+ 1, 0,
819 ptr
+ nbytes
- overflow
,
825 * gfs2_adjust_quota - adjust record of current block usage
826 * @sdp: The superblock
827 * @loc: Offset of the entry in the quota file
828 * @change: The amount of usage change to record
829 * @qd: The quota data
830 * @fdq: The updated limits to record
832 * This function was mostly borrowed from gfs2_block_truncate_page which was
833 * in turn mostly borrowed from ext3
835 * Returns: 0 or -ve on error
838 static int gfs2_adjust_quota(struct gfs2_sbd
*sdp
, loff_t loc
,
839 s64 change
, struct gfs2_quota_data
*qd
,
840 struct qc_dqblk
*fdq
)
842 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
843 struct inode
*inode
= &ip
->i_inode
;
848 if (gfs2_is_stuffed(ip
)) {
849 err
= gfs2_unstuff_dinode(ip
);
854 memset(&q
, 0, sizeof(struct gfs2_quota
));
855 err
= gfs2_internal_read(ip
, (char *)&q
, &loc
, sizeof(q
));
859 loc
-= sizeof(q
); /* gfs2_internal_read would've advanced the loc ptr */
860 be64_add_cpu(&q
.qu_value
, change
);
861 if (((s64
)be64_to_cpu(q
.qu_value
)) < 0)
862 q
.qu_value
= 0; /* Never go negative on quota usage */
863 spin_lock(&qd
->qd_lockref
.lock
);
864 qd
->qd_qb
.qb_value
= q
.qu_value
;
866 if (fdq
->d_fieldmask
& QC_SPC_SOFT
) {
867 q
.qu_warn
= cpu_to_be64(fdq
->d_spc_softlimit
>> sdp
->sd_sb
.sb_bsize_shift
);
868 qd
->qd_qb
.qb_warn
= q
.qu_warn
;
870 if (fdq
->d_fieldmask
& QC_SPC_HARD
) {
871 q
.qu_limit
= cpu_to_be64(fdq
->d_spc_hardlimit
>> sdp
->sd_sb
.sb_bsize_shift
);
872 qd
->qd_qb
.qb_limit
= q
.qu_limit
;
874 if (fdq
->d_fieldmask
& QC_SPACE
) {
875 q
.qu_value
= cpu_to_be64(fdq
->d_space
>> sdp
->sd_sb
.sb_bsize_shift
);
876 qd
->qd_qb
.qb_value
= q
.qu_value
;
879 spin_unlock(&qd
->qd_lockref
.lock
);
881 err
= gfs2_write_disk_quota(sdp
, &q
, loc
);
883 size
= loc
+ sizeof(struct gfs2_quota
);
884 if (size
> inode
->i_size
)
885 i_size_write(inode
, size
);
886 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
887 mark_inode_dirty(inode
);
888 set_bit(QDF_REFRESH
, &qd
->qd_flags
);
894 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
,
897 struct gfs2_sbd
*sdp
= (*qda
)->qd_sbd
;
898 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
899 struct gfs2_alloc_parms ap
= {};
900 unsigned int data_blocks
, ind_blocks
;
901 struct gfs2_holder
*ghs
, i_gh
;
903 struct gfs2_quota_data
*qd
;
906 unsigned int nalloc
= 0, blocks
;
909 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
910 &data_blocks
, &ind_blocks
);
912 ghs
= kmalloc_array(num_qd
, sizeof(struct gfs2_holder
), GFP_NOFS
);
916 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
917 inode_lock(&ip
->i_inode
);
918 for (qx
= 0; qx
< num_qd
; qx
++) {
919 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
, LM_ST_EXCLUSIVE
,
920 GL_NOCACHE
, &ghs
[qx
]);
925 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
929 for (x
= 0; x
< num_qd
; x
++) {
930 offset
= qd2offset(qda
[x
]);
931 if (gfs2_write_alloc_required(ip
, offset
,
932 sizeof(struct gfs2_quota
)))
937 * 1 blk for unstuffing inode if stuffed. We add this extra
938 * block to the reservation unconditionally. If the inode
939 * doesn't need unstuffing, the block will be released to the
940 * rgrp since it won't be allocated during the transaction
942 /* +3 in the end for unstuffing block, inode size update block
943 * and another block in case quota straddles page boundary and
944 * two blocks need to be updated instead of 1 */
945 blocks
= num_qd
* data_blocks
+ RES_DINODE
+ num_qd
+ 3;
947 reserved
= 1 + (nalloc
* (data_blocks
+ ind_blocks
));
948 ap
.target
= reserved
;
949 error
= gfs2_inplace_reserve(ip
, &ap
);
954 blocks
+= gfs2_rg_blocks(ip
, reserved
) + nalloc
* ind_blocks
+ RES_STATFS
;
956 error
= gfs2_trans_begin(sdp
, blocks
, 0);
960 for (x
= 0; x
< num_qd
; x
++) {
962 offset
= qd2offset(qd
);
963 error
= gfs2_adjust_quota(sdp
, offset
, qd
->qd_change_sync
, qd
,
968 do_qc(qd
, -qd
->qd_change_sync
);
969 set_bit(QDF_REFRESH
, &qd
->qd_flags
);
975 gfs2_inplace_release(ip
);
977 gfs2_glock_dq_uninit(&i_gh
);
980 gfs2_glock_dq_uninit(&ghs
[qx
]);
981 inode_unlock(&ip
->i_inode
);
983 gfs2_log_flush(ip
->i_gl
->gl_name
.ln_sbd
, ip
->i_gl
,
984 GFS2_LOG_HEAD_FLUSH_NORMAL
| GFS2_LFC_DO_SYNC
);
986 for (x
= 0; x
< num_qd
; x
++) {
988 spin_lock(&qd
->qd_lockref
.lock
);
989 if (qd
->qd_sync_gen
< sync_gen
)
990 qd
->qd_sync_gen
= sync_gen
;
991 spin_unlock(&qd
->qd_lockref
.lock
);
997 static int update_qd(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
)
999 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1000 struct gfs2_quota q
;
1001 struct gfs2_quota_lvb
*qlvb
;
1005 memset(&q
, 0, sizeof(struct gfs2_quota
));
1006 pos
= qd2offset(qd
);
1007 error
= gfs2_internal_read(ip
, (char *)&q
, &pos
, sizeof(q
));
1011 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
1012 qlvb
->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
1014 qlvb
->qb_limit
= q
.qu_limit
;
1015 qlvb
->qb_warn
= q
.qu_warn
;
1016 qlvb
->qb_value
= q
.qu_value
;
1017 spin_lock(&qd
->qd_lockref
.lock
);
1019 spin_unlock(&qd
->qd_lockref
.lock
);
1024 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
1025 struct gfs2_holder
*q_gh
)
1027 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
1028 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1029 struct gfs2_holder i_gh
;
1032 gfs2_assert_warn(sdp
, sdp
== qd
->qd_gl
->gl_name
.ln_sbd
);
1034 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
1038 if (test_and_clear_bit(QDF_REFRESH
, &qd
->qd_flags
))
1039 force_refresh
= FORCE
;
1041 spin_lock(&qd
->qd_lockref
.lock
);
1042 qd
->qd_qb
= *(struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
1043 spin_unlock(&qd
->qd_lockref
.lock
);
1045 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= cpu_to_be32(GFS2_MAGIC
)) {
1046 gfs2_glock_dq_uninit(q_gh
);
1047 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
,
1052 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &i_gh
);
1056 error
= update_qd(sdp
, qd
);
1060 gfs2_glock_dq_uninit(&i_gh
);
1061 gfs2_glock_dq_uninit(q_gh
);
1069 gfs2_glock_dq_uninit(&i_gh
);
1071 gfs2_glock_dq_uninit(q_gh
);
1075 int gfs2_quota_lock(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
1077 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1078 struct gfs2_quota_data
*qd
;
1082 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1085 error
= gfs2_quota_hold(ip
, uid
, gid
);
1089 sort(ip
->i_qadata
->qa_qd
, ip
->i_qadata
->qa_qd_num
,
1090 sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
1092 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1093 qd
= ip
->i_qadata
->qa_qd
[x
];
1094 error
= do_glock(qd
, NO_FORCE
, &ip
->i_qadata
->qa_qd_ghs
[x
]);
1100 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
1103 gfs2_glock_dq_uninit(&ip
->i_qadata
->qa_qd_ghs
[x
]);
1104 gfs2_quota_unhold(ip
);
1110 static bool need_sync(struct gfs2_quota_data
*qd
)
1112 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
1113 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
1114 s64 value
, change
, limit
;
1115 unsigned int num
, den
;
1118 spin_lock(&qd
->qd_lockref
.lock
);
1119 if (!qd
->qd_qb
.qb_limit
)
1122 change
= qd
->qd_change
;
1125 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1126 limit
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
);
1130 spin_lock(>
->gt_spin
);
1131 num
= gt
->gt_quota_scale_num
;
1132 den
= gt
->gt_quota_scale_den
;
1133 spin_unlock(>
->gt_spin
);
1135 change
*= gfs2_jindex_size(sdp
) * num
;
1136 change
= div_s64(change
, den
);
1137 if (value
+ change
< limit
)
1142 spin_unlock(&qd
->qd_lockref
.lock
);
1146 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
1148 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1149 struct gfs2_quota_data
*qda
[2 * GFS2_MAXQUOTAS
];
1150 unsigned int count
= 0;
1153 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1156 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1157 struct gfs2_quota_data
*qd
;
1161 qd
= ip
->i_qadata
->qa_qd
[x
];
1162 sync
= need_sync(qd
);
1164 gfs2_glock_dq_uninit(&ip
->i_qadata
->qa_qd_ghs
[x
]);
1168 spin_lock(&qd_lock
);
1169 sync
= qd_grab_sync(sdp
, qd
, U64_MAX
);
1170 spin_unlock(&qd_lock
);
1175 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
1186 u64 sync_gen
= READ_ONCE(sdp
->sd_quota_sync_gen
);
1188 do_sync(count
, qda
, sync_gen
);
1189 for (x
= 0; x
< count
; x
++)
1193 gfs2_quota_unhold(ip
);
1196 #define MAX_LINE 256
1198 static void print_message(struct gfs2_quota_data
*qd
, char *type
)
1200 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
1202 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_QUIET
) {
1203 fs_info(sdp
, "quota %s for %s %u\n",
1205 (qd
->qd_id
.type
== USRQUOTA
) ? "user" : "group",
1206 from_kqid(&init_user_ns
, qd
->qd_id
));
1211 * gfs2_quota_check - check if allocating new blocks will exceed quota
1212 * @ip: The inode for which this check is being performed
1213 * @uid: The uid to check against
1214 * @gid: The gid to check against
1215 * @ap: The allocation parameters. ap->target contains the requested
1216 * blocks. ap->min_target, if set, contains the minimum blks
1219 * Returns: 0 on success.
1220 * min_req = ap->min_target ? ap->min_target : ap->target;
1221 * quota must allow at least min_req blks for success and
1222 * ap->allowed is set to the number of blocks allowed
1224 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1225 * of blocks available.
1227 int gfs2_quota_check(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
,
1228 struct gfs2_alloc_parms
*ap
)
1230 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1231 struct gfs2_quota_data
*qd
;
1232 s64 value
, warn
, limit
;
1236 ap
->allowed
= UINT_MAX
; /* Assume we are permitted a whole lot */
1237 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1240 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1241 qd
= ip
->i_qadata
->qa_qd
[x
];
1243 if (!(qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1244 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))))
1247 spin_lock(&qd
->qd_lockref
.lock
);
1248 warn
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_warn
);
1249 limit
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
);
1250 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1251 value
+= qd
->qd_change
;
1252 spin_unlock(&qd
->qd_lockref
.lock
);
1254 if (limit
> 0 && (limit
- value
) < ap
->allowed
)
1255 ap
->allowed
= limit
- value
;
1256 /* If we can't meet the target */
1257 if (limit
&& limit
< (value
+ (s64
)ap
->target
)) {
1258 /* If no min_target specified or we don't meet
1259 * min_target, return -EDQUOT */
1260 if (!ap
->min_target
|| ap
->min_target
> ap
->allowed
) {
1261 if (!test_and_set_bit(QDF_QMSG_QUIET
,
1263 print_message(qd
, "exceeded");
1264 quota_send_warning(qd
->qd_id
,
1266 QUOTA_NL_BHARDWARN
);
1271 } else if (warn
&& warn
< value
&&
1272 time_after_eq(jiffies
, qd
->qd_last_warn
+
1273 gfs2_tune_get(sdp
, gt_quota_warn_period
)
1275 quota_send_warning(qd
->qd_id
,
1276 sdp
->sd_vfs
->s_dev
, QUOTA_NL_BSOFTWARN
);
1277 print_message(qd
, "warning");
1279 qd
->qd_last_warn
= jiffies
;
1285 void gfs2_quota_change(struct gfs2_inode
*ip
, s64 change
,
1286 kuid_t uid
, kgid_t gid
)
1288 struct gfs2_quota_data
*qd
;
1290 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1292 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
||
1293 gfs2_assert_warn(sdp
, change
))
1295 if (ip
->i_diskflags
& GFS2_DIF_SYSTEM
)
1298 if (gfs2_assert_withdraw(sdp
, ip
->i_qadata
&&
1299 ip
->i_qadata
->qa_ref
> 0))
1301 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1302 qd
= ip
->i_qadata
->qa_qd
[x
];
1304 if (qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1305 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))) {
1311 int gfs2_quota_sync(struct super_block
*sb
, int type
)
1313 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1314 struct gfs2_quota_data
**qda
;
1315 unsigned int max_qd
= PAGE_SIZE
/ sizeof(struct gfs2_holder
);
1319 if (sb_rdonly(sdp
->sd_vfs
))
1322 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1326 mutex_lock(&sdp
->sd_quota_sync_mutex
);
1327 sync_gen
= sdp
->sd_quota_sync_gen
+ 1;
1330 struct gfs2_quota_data
*iter
;
1331 unsigned int num_qd
= 0;
1334 spin_lock(&qd_lock
);
1335 list_for_each_entry(iter
, &sdp
->sd_quota_list
, qd_list
) {
1336 if (qd_grab_sync(sdp
, iter
, sync_gen
)) {
1337 qda
[num_qd
++] = iter
;
1338 if (num_qd
== max_qd
)
1342 spin_unlock(&qd_lock
);
1347 for (x
= 0; x
< num_qd
; x
++) {
1348 error
= bh_get(qda
[x
]);
1353 qd_ungrab_sync(qda
[--num_qd
]);
1358 WRITE_ONCE(sdp
->sd_quota_sync_gen
, sync_gen
);
1359 error
= do_sync(num_qd
, qda
, sync_gen
);
1362 for (x
= 0; x
< num_qd
; x
++)
1366 mutex_unlock(&sdp
->sd_quota_sync_mutex
);
1372 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, struct kqid qid
)
1374 struct gfs2_quota_data
*qd
;
1375 struct gfs2_holder q_gh
;
1378 error
= qd_get(sdp
, qid
, &qd
);
1382 error
= do_glock(qd
, FORCE
, &q_gh
);
1384 gfs2_glock_dq_uninit(&q_gh
);
1390 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1392 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1393 u64 size
= i_size_read(sdp
->sd_qc_inode
);
1394 unsigned int blocks
= size
>> sdp
->sd_sb
.sb_bsize_shift
;
1395 unsigned int x
, slot
= 0;
1396 unsigned int found
= 0;
1398 unsigned int bm_size
;
1399 struct buffer_head
*bh
;
1404 if (gfs2_check_internal_file_size(sdp
->sd_qc_inode
, 1, 64 << 20))
1407 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1408 bm_size
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * sizeof(unsigned long));
1409 bm_size
*= sizeof(unsigned long);
1411 sdp
->sd_quota_bitmap
= kzalloc(bm_size
, GFP_NOFS
| __GFP_NOWARN
);
1412 if (sdp
->sd_quota_bitmap
== NULL
)
1413 sdp
->sd_quota_bitmap
= __vmalloc(bm_size
, GFP_NOFS
|
1415 if (!sdp
->sd_quota_bitmap
)
1418 for (x
= 0; x
< blocks
; x
++) {
1419 struct gfs2_quota_change
*qc
;
1424 error
= gfs2_get_extent(&ip
->i_inode
, x
, &dblock
, &extlen
);
1429 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1432 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
1435 qc
= (struct gfs2_quota_change
*)(bh
->b_data
+ sizeof(struct gfs2_meta_header
));
1436 for (y
= 0; y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1438 struct gfs2_quota_data
*old_qd
, *qd
;
1439 s64 qc_change
= be64_to_cpu(qc
->qc_change
);
1440 u32 qc_flags
= be32_to_cpu(qc
->qc_flags
);
1441 enum quota_type qtype
= (qc_flags
& GFS2_QCF_USER
) ?
1442 USRQUOTA
: GRPQUOTA
;
1443 struct kqid qc_id
= make_kqid(&init_user_ns
, qtype
,
1444 be32_to_cpu(qc
->qc_id
));
1449 hash
= gfs2_qd_hash(sdp
, qc_id
);
1450 qd
= qd_alloc(hash
, sdp
, qc_id
);
1454 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1455 qd
->qd_change
= qc_change
;
1457 qd
->qd_slot_ref
= 1;
1459 spin_lock(&qd_lock
);
1460 spin_lock_bucket(hash
);
1461 old_qd
= gfs2_qd_search_bucket(hash
, sdp
, qc_id
);
1463 fs_err(sdp
, "Corruption found in quota_change%u"
1464 "file: duplicate identifier in "
1466 sdp
->sd_jdesc
->jd_jid
, slot
);
1468 spin_unlock_bucket(hash
);
1469 spin_unlock(&qd_lock
);
1472 gfs2_glock_put(qd
->qd_gl
);
1473 kmem_cache_free(gfs2_quotad_cachep
, qd
);
1475 /* zero out the duplicate slot */
1477 memset(qc
, 0, sizeof(*qc
));
1478 mark_buffer_dirty(bh
);
1483 BUG_ON(test_and_set_bit(slot
, sdp
->sd_quota_bitmap
));
1484 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1485 atomic_inc(&sdp
->sd_quota_count
);
1486 hlist_bl_add_head_rcu(&qd
->qd_hlist
, &qd_hash_table
[hash
]);
1487 spin_unlock_bucket(hash
);
1488 spin_unlock(&qd_lock
);
1493 if (buffer_dirty(bh
))
1494 sync_dirty_buffer(bh
);
1501 fs_info(sdp
, "found %u quota changes\n", found
);
1506 if (buffer_dirty(bh
))
1507 sync_dirty_buffer(bh
);
1510 gfs2_quota_cleanup(sdp
);
1514 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1516 struct gfs2_quota_data
*qd
;
1520 BUG_ON(!test_bit(SDF_NORECOVERY
, &sdp
->sd_flags
) &&
1521 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
));
1523 spin_lock(&qd_lock
);
1524 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
1525 spin_lock(&qd
->qd_lockref
.lock
);
1526 if (qd
->qd_lockref
.count
!= 0) {
1527 spin_unlock(&qd
->qd_lockref
.lock
);
1530 lockref_mark_dead(&qd
->qd_lockref
);
1531 spin_unlock(&qd
->qd_lockref
.lock
);
1533 list_lru_del_obj(&gfs2_qd_lru
, &qd
->qd_lru
);
1534 list_add(&qd
->qd_lru
, &dispose
);
1536 spin_unlock(&qd_lock
);
1538 gfs2_qd_list_dispose(&dispose
);
1540 wait_event_timeout(sdp
->sd_kill_wait
,
1541 (count
= atomic_read(&sdp
->sd_quota_count
)) == 0,
1545 fs_err(sdp
, "%d left-over quota data objects\n", count
);
1547 kvfree(sdp
->sd_quota_bitmap
);
1548 sdp
->sd_quota_bitmap
= NULL
;
1551 static void quotad_error(struct gfs2_sbd
*sdp
, const char *msg
, int error
)
1553 if (error
== 0 || error
== -EROFS
)
1555 if (!gfs2_withdrawing_or_withdrawn(sdp
)) {
1556 if (!cmpxchg(&sdp
->sd_log_error
, 0, error
))
1557 fs_err(sdp
, "gfs2_quotad: %s error %d\n", msg
, error
);
1558 wake_up(&sdp
->sd_logd_waitq
);
1562 static void quotad_check_timeo(struct gfs2_sbd
*sdp
, const char *msg
,
1563 int (*fxn
)(struct super_block
*sb
, int type
),
1564 unsigned long t
, unsigned long *timeo
,
1565 unsigned int *new_timeo
)
1568 int error
= fxn(sdp
->sd_vfs
, 0);
1569 quotad_error(sdp
, msg
, error
);
1570 *timeo
= gfs2_tune_get_i(&sdp
->sd_tune
, new_timeo
) * HZ
;
1576 void gfs2_wake_up_statfs(struct gfs2_sbd
*sdp
) {
1577 if (!sdp
->sd_statfs_force_sync
) {
1578 sdp
->sd_statfs_force_sync
= 1;
1579 wake_up(&sdp
->sd_quota_wait
);
1585 * gfs2_quotad - Write cached quota changes into the quota file
1586 * @data: Pointer to GFS2 superblock
1590 int gfs2_quotad(void *data
)
1592 struct gfs2_sbd
*sdp
= data
;
1593 struct gfs2_tune
*tune
= &sdp
->sd_tune
;
1594 unsigned long statfs_timeo
= 0;
1595 unsigned long quotad_timeo
= 0;
1596 unsigned long t
= 0;
1599 while (!kthread_should_stop()) {
1600 if (gfs2_withdrawing_or_withdrawn(sdp
))
1603 /* Update the master statfs file */
1604 if (sdp
->sd_statfs_force_sync
) {
1605 int error
= gfs2_statfs_sync(sdp
->sd_vfs
, 0);
1606 quotad_error(sdp
, "statfs", error
);
1607 statfs_timeo
= gfs2_tune_get(sdp
, gt_statfs_quantum
) * HZ
;
1610 quotad_check_timeo(sdp
, "statfs", gfs2_statfs_sync
, t
,
1612 &tune
->gt_statfs_quantum
);
1614 /* Update quota file */
1615 quotad_check_timeo(sdp
, "sync", gfs2_quota_sync
, t
,
1616 "ad_timeo
, &tune
->gt_quota_quantum
);
1618 t
= min(quotad_timeo
, statfs_timeo
);
1620 t
= wait_event_freezable_timeout(sdp
->sd_quota_wait
,
1621 sdp
->sd_statfs_force_sync
||
1622 gfs2_withdrawing_or_withdrawn(sdp
) ||
1623 kthread_should_stop(),
1626 if (sdp
->sd_statfs_force_sync
)
1633 static int gfs2_quota_get_state(struct super_block
*sb
, struct qc_state
*state
)
1635 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1637 memset(state
, 0, sizeof(*state
));
1639 switch (sdp
->sd_args
.ar_quota
) {
1640 case GFS2_QUOTA_QUIET
:
1643 state
->s_state
[USRQUOTA
].flags
|= QCI_LIMITS_ENFORCED
;
1644 state
->s_state
[GRPQUOTA
].flags
|= QCI_LIMITS_ENFORCED
;
1646 case GFS2_QUOTA_ACCOUNT
:
1647 state
->s_state
[USRQUOTA
].flags
|= QCI_ACCT_ENABLED
|
1649 state
->s_state
[GRPQUOTA
].flags
|= QCI_ACCT_ENABLED
|
1652 case GFS2_QUOTA_OFF
:
1655 if (sdp
->sd_quota_inode
) {
1656 state
->s_state
[USRQUOTA
].ino
=
1657 GFS2_I(sdp
->sd_quota_inode
)->i_no_addr
;
1658 state
->s_state
[USRQUOTA
].blocks
= sdp
->sd_quota_inode
->i_blocks
;
1660 state
->s_state
[USRQUOTA
].nextents
= 1; /* unsupported */
1661 state
->s_state
[GRPQUOTA
] = state
->s_state
[USRQUOTA
];
1662 state
->s_incoredqs
= list_lru_count(&gfs2_qd_lru
);
1666 static int gfs2_get_dqblk(struct super_block
*sb
, struct kqid qid
,
1667 struct qc_dqblk
*fdq
)
1669 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1670 struct gfs2_quota_lvb
*qlvb
;
1671 struct gfs2_quota_data
*qd
;
1672 struct gfs2_holder q_gh
;
1675 memset(fdq
, 0, sizeof(*fdq
));
1677 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1678 return -ESRCH
; /* Crazy XFS error code */
1680 if ((qid
.type
!= USRQUOTA
) &&
1681 (qid
.type
!= GRPQUOTA
))
1684 error
= qd_get(sdp
, qid
, &qd
);
1687 error
= do_glock(qd
, FORCE
, &q_gh
);
1691 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
1692 fdq
->d_spc_hardlimit
= be64_to_cpu(qlvb
->qb_limit
) << sdp
->sd_sb
.sb_bsize_shift
;
1693 fdq
->d_spc_softlimit
= be64_to_cpu(qlvb
->qb_warn
) << sdp
->sd_sb
.sb_bsize_shift
;
1694 fdq
->d_space
= be64_to_cpu(qlvb
->qb_value
) << sdp
->sd_sb
.sb_bsize_shift
;
1696 gfs2_glock_dq_uninit(&q_gh
);
1702 /* GFS2 only supports a subset of the XFS fields */
1703 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1705 static int gfs2_set_dqblk(struct super_block
*sb
, struct kqid qid
,
1706 struct qc_dqblk
*fdq
)
1708 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1709 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1710 struct gfs2_quota_data
*qd
;
1711 struct gfs2_holder q_gh
, i_gh
;
1712 unsigned int data_blocks
, ind_blocks
;
1713 unsigned int blocks
= 0;
1718 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1719 return -ESRCH
; /* Crazy XFS error code */
1721 if ((qid
.type
!= USRQUOTA
) &&
1722 (qid
.type
!= GRPQUOTA
))
1725 if (fdq
->d_fieldmask
& ~GFS2_FIELDMASK
)
1728 error
= qd_get(sdp
, qid
, &qd
);
1732 error
= gfs2_qa_get(ip
);
1736 inode_lock(&ip
->i_inode
);
1737 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
, 0, &q_gh
);
1740 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1744 /* Check for existing entry, if none then alloc new blocks */
1745 error
= update_qd(sdp
, qd
);
1749 /* If nothing has changed, this is a no-op */
1750 if ((fdq
->d_fieldmask
& QC_SPC_SOFT
) &&
1751 ((fdq
->d_spc_softlimit
>> sdp
->sd_sb
.sb_bsize_shift
) == be64_to_cpu(qd
->qd_qb
.qb_warn
)))
1752 fdq
->d_fieldmask
^= QC_SPC_SOFT
;
1754 if ((fdq
->d_fieldmask
& QC_SPC_HARD
) &&
1755 ((fdq
->d_spc_hardlimit
>> sdp
->sd_sb
.sb_bsize_shift
) == be64_to_cpu(qd
->qd_qb
.qb_limit
)))
1756 fdq
->d_fieldmask
^= QC_SPC_HARD
;
1758 if ((fdq
->d_fieldmask
& QC_SPACE
) &&
1759 ((fdq
->d_space
>> sdp
->sd_sb
.sb_bsize_shift
) == be64_to_cpu(qd
->qd_qb
.qb_value
)))
1760 fdq
->d_fieldmask
^= QC_SPACE
;
1762 if (fdq
->d_fieldmask
== 0)
1765 offset
= qd2offset(qd
);
1766 alloc_required
= gfs2_write_alloc_required(ip
, offset
, sizeof(struct gfs2_quota
));
1767 if (gfs2_is_stuffed(ip
))
1769 if (alloc_required
) {
1770 struct gfs2_alloc_parms ap
= {};
1771 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
1772 &data_blocks
, &ind_blocks
);
1773 blocks
= 1 + data_blocks
+ ind_blocks
;
1775 error
= gfs2_inplace_reserve(ip
, &ap
);
1778 blocks
+= gfs2_rg_blocks(ip
, blocks
);
1781 /* Some quotas span block boundaries and can update two blocks,
1782 adding an extra block to the transaction to handle such quotas */
1783 error
= gfs2_trans_begin(sdp
, blocks
+ RES_DINODE
+ 2, 0);
1788 error
= gfs2_adjust_quota(sdp
, offset
, 0, qd
, fdq
);
1790 clear_bit(QDF_QMSG_QUIET
, &qd
->qd_flags
);
1792 gfs2_trans_end(sdp
);
1795 gfs2_inplace_release(ip
);
1797 gfs2_glock_dq_uninit(&i_gh
);
1799 gfs2_glock_dq_uninit(&q_gh
);
1802 inode_unlock(&ip
->i_inode
);
1808 const struct quotactl_ops gfs2_quotactl_ops
= {
1809 .quota_sync
= gfs2_quota_sync
,
1810 .get_state
= gfs2_quota_get_state
,
1811 .get_dqblk
= gfs2_get_dqblk
,
1812 .set_dqblk
= gfs2_set_dqblk
,
1815 void __init
gfs2_quota_hash_init(void)
1819 for(i
= 0; i
< GFS2_QD_HASH_SIZE
; i
++)
1820 INIT_HLIST_BL_HEAD(&qd_hash_table
[i
]);