1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
8 * Quota change tags are associated with each transaction that allocates or
9 * deallocates space. Those changes are accumulated locally to each node (in a
10 * per-node file) and then are periodically synced to the quota file. This
11 * avoids the bottleneck of constantly touching the quota file, but introduces
12 * fuzziness in the current usage value of IDs that are being used on different
13 * nodes in the cluster simultaneously. So, it is possible for a user on
14 * multiple nodes to overrun their quota, but that overrun is controlable.
15 * Since quota tags are part of transactions, there is no need for a quota check
16 * program to be run on node crashes or anything like that.
18 * There are couple of knobs that let the administrator manage the quota
19 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
20 * sitting on one node before being synced to the quota file. (The default is
21 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
22 * of quota file syncs increases as the user moves closer to their limit. The
23 * more frequent the syncs, the more accurate the quota enforcement, but that
24 * means that there is more contention between the nodes for the quota file.
25 * The default value is one. This sets the maximum theoretical quota overrun
26 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
27 * practice, the maximum overrun you see should be much less.) A "quota_scale"
28 * number greater than one makes quota syncs more frequent and reduces the
29 * maximum overrun. Numbers less than one (but greater than zero) make quota
30 * syncs less frequent.
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33 * the quota file, so it is not being constantly read.
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 #include <linux/sched.h>
39 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
50 #include <linux/quota.h>
51 #include <linux/dqblk_xfs.h>
52 #include <linux/lockref.h>
53 #include <linux/list_lru.h>
54 #include <linux/rcupdate.h>
55 #include <linux/rculist_bl.h>
56 #include <linux/bit_spinlock.h>
57 #include <linux/jhash.h>
58 #include <linux/vmalloc.h>
74 #define GFS2_QD_HASH_SHIFT 12
75 #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
76 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
78 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
79 /* -> sd_bitmap_lock */
80 static DEFINE_SPINLOCK(qd_lock
);
81 struct list_lru gfs2_qd_lru
;
83 static struct hlist_bl_head qd_hash_table
[GFS2_QD_HASH_SIZE
];
85 static unsigned int gfs2_qd_hash(const struct gfs2_sbd
*sdp
,
86 const struct kqid qid
)
90 h
= jhash(&sdp
, sizeof(struct gfs2_sbd
*), 0);
91 h
= jhash(&qid
, sizeof(struct kqid
), h
);
93 return h
& GFS2_QD_HASH_MASK
;
96 static inline void spin_lock_bucket(unsigned int hash
)
98 hlist_bl_lock(&qd_hash_table
[hash
]);
101 static inline void spin_unlock_bucket(unsigned int hash
)
103 hlist_bl_unlock(&qd_hash_table
[hash
]);
106 static void gfs2_qd_dealloc(struct rcu_head
*rcu
)
108 struct gfs2_quota_data
*qd
= container_of(rcu
, struct gfs2_quota_data
, qd_rcu
);
109 kmem_cache_free(gfs2_quotad_cachep
, qd
);
112 static void gfs2_qd_dispose(struct list_head
*list
)
114 struct gfs2_quota_data
*qd
;
115 struct gfs2_sbd
*sdp
;
117 while (!list_empty(list
)) {
118 qd
= list_first_entry(list
, struct gfs2_quota_data
, qd_lru
);
119 sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
121 list_del(&qd
->qd_lru
);
123 /* Free from the filesystem-specific list */
125 list_del(&qd
->qd_list
);
126 spin_unlock(&qd_lock
);
128 spin_lock_bucket(qd
->qd_hash
);
129 hlist_bl_del_rcu(&qd
->qd_hlist
);
130 spin_unlock_bucket(qd
->qd_hash
);
132 gfs2_assert_warn(sdp
, !qd
->qd_change
);
133 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
134 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
136 gfs2_glock_put(qd
->qd_gl
);
137 atomic_dec(&sdp
->sd_quota_count
);
139 /* Delete it from the common reclaim list */
140 call_rcu(&qd
->qd_rcu
, gfs2_qd_dealloc
);
145 static enum lru_status
gfs2_qd_isolate(struct list_head
*item
,
146 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
148 struct list_head
*dispose
= arg
;
149 struct gfs2_quota_data
*qd
= list_entry(item
, struct gfs2_quota_data
, qd_lru
);
151 if (!spin_trylock(&qd
->qd_lockref
.lock
))
154 if (qd
->qd_lockref
.count
== 0) {
155 lockref_mark_dead(&qd
->qd_lockref
);
156 list_lru_isolate_move(lru
, &qd
->qd_lru
, dispose
);
159 spin_unlock(&qd
->qd_lockref
.lock
);
163 static unsigned long gfs2_qd_shrink_scan(struct shrinker
*shrink
,
164 struct shrink_control
*sc
)
169 if (!(sc
->gfp_mask
& __GFP_FS
))
172 freed
= list_lru_shrink_walk(&gfs2_qd_lru
, sc
,
173 gfs2_qd_isolate
, &dispose
);
175 gfs2_qd_dispose(&dispose
);
180 static unsigned long gfs2_qd_shrink_count(struct shrinker
*shrink
,
181 struct shrink_control
*sc
)
183 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru
, sc
));
186 struct shrinker gfs2_qd_shrinker
= {
187 .count_objects
= gfs2_qd_shrink_count
,
188 .scan_objects
= gfs2_qd_shrink_scan
,
189 .seeks
= DEFAULT_SEEKS
,
190 .flags
= SHRINKER_NUMA_AWARE
,
194 static u64
qd2index(struct gfs2_quota_data
*qd
)
196 struct kqid qid
= qd
->qd_id
;
197 return (2 * (u64
)from_kqid(&init_user_ns
, qid
)) +
198 ((qid
.type
== USRQUOTA
) ? 0 : 1);
201 static u64
qd2offset(struct gfs2_quota_data
*qd
)
205 offset
= qd2index(qd
);
206 offset
*= sizeof(struct gfs2_quota
);
211 static struct gfs2_quota_data
*qd_alloc(unsigned hash
, struct gfs2_sbd
*sdp
, struct kqid qid
)
213 struct gfs2_quota_data
*qd
;
216 qd
= kmem_cache_zalloc(gfs2_quotad_cachep
, GFP_NOFS
);
221 qd
->qd_lockref
.count
= 1;
222 spin_lock_init(&qd
->qd_lockref
.lock
);
225 INIT_LIST_HEAD(&qd
->qd_lru
);
228 error
= gfs2_glock_get(sdp
, qd2index(qd
),
229 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
236 kmem_cache_free(gfs2_quotad_cachep
, qd
);
240 static struct gfs2_quota_data
*gfs2_qd_search_bucket(unsigned int hash
,
241 const struct gfs2_sbd
*sdp
,
244 struct gfs2_quota_data
*qd
;
245 struct hlist_bl_node
*h
;
247 hlist_bl_for_each_entry_rcu(qd
, h
, &qd_hash_table
[hash
], qd_hlist
) {
248 if (!qid_eq(qd
->qd_id
, qid
))
250 if (qd
->qd_sbd
!= sdp
)
252 if (lockref_get_not_dead(&qd
->qd_lockref
)) {
253 list_lru_del(&gfs2_qd_lru
, &qd
->qd_lru
);
262 static int qd_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
263 struct gfs2_quota_data
**qdp
)
265 struct gfs2_quota_data
*qd
, *new_qd
;
266 unsigned int hash
= gfs2_qd_hash(sdp
, qid
);
269 *qdp
= qd
= gfs2_qd_search_bucket(hash
, sdp
, qid
);
275 new_qd
= qd_alloc(hash
, sdp
, qid
);
280 spin_lock_bucket(hash
);
281 *qdp
= qd
= gfs2_qd_search_bucket(hash
, sdp
, qid
);
284 list_add(&new_qd
->qd_list
, &sdp
->sd_quota_list
);
285 hlist_bl_add_head_rcu(&new_qd
->qd_hlist
, &qd_hash_table
[hash
]);
286 atomic_inc(&sdp
->sd_quota_count
);
288 spin_unlock_bucket(hash
);
289 spin_unlock(&qd_lock
);
292 gfs2_glock_put(new_qd
->qd_gl
);
293 kmem_cache_free(gfs2_quotad_cachep
, new_qd
);
300 static void qd_hold(struct gfs2_quota_data
*qd
)
302 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
303 gfs2_assert(sdp
, !__lockref_is_dead(&qd
->qd_lockref
));
304 lockref_get(&qd
->qd_lockref
);
307 static void qd_put(struct gfs2_quota_data
*qd
)
309 if (lockref_put_or_lock(&qd
->qd_lockref
))
312 qd
->qd_lockref
.count
= 0;
313 list_lru_add(&gfs2_qd_lru
, &qd
->qd_lru
);
314 spin_unlock(&qd
->qd_lockref
.lock
);
318 static int slot_get(struct gfs2_quota_data
*qd
)
320 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
324 spin_lock(&sdp
->sd_bitmap_lock
);
325 if (qd
->qd_slot_count
!= 0)
329 bit
= find_first_zero_bit(sdp
->sd_quota_bitmap
, sdp
->sd_quota_slots
);
330 if (bit
< sdp
->sd_quota_slots
) {
331 set_bit(bit
, sdp
->sd_quota_bitmap
);
337 spin_unlock(&sdp
->sd_bitmap_lock
);
342 static void slot_hold(struct gfs2_quota_data
*qd
)
344 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
346 spin_lock(&sdp
->sd_bitmap_lock
);
347 gfs2_assert(sdp
, qd
->qd_slot_count
);
349 spin_unlock(&sdp
->sd_bitmap_lock
);
352 static void slot_put(struct gfs2_quota_data
*qd
)
354 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
356 spin_lock(&sdp
->sd_bitmap_lock
);
357 gfs2_assert(sdp
, qd
->qd_slot_count
);
358 if (!--qd
->qd_slot_count
) {
359 BUG_ON(!test_and_clear_bit(qd
->qd_slot
, sdp
->sd_quota_bitmap
));
362 spin_unlock(&sdp
->sd_bitmap_lock
);
365 static int bh_get(struct gfs2_quota_data
*qd
)
367 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
368 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
369 unsigned int block
, offset
;
370 struct buffer_head
*bh
;
372 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
374 mutex_lock(&sdp
->sd_quota_mutex
);
376 if (qd
->qd_bh_count
++) {
377 mutex_unlock(&sdp
->sd_quota_mutex
);
381 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
382 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;
384 bh_map
.b_size
= BIT(ip
->i_inode
.i_blkbits
);
385 error
= gfs2_block_map(&ip
->i_inode
, block
, &bh_map
, 0);
388 error
= gfs2_meta_read(ip
->i_gl
, bh_map
.b_blocknr
, DIO_WAIT
, 0, &bh
);
392 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
396 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
397 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
398 offset
* sizeof(struct gfs2_quota_change
));
400 mutex_unlock(&sdp
->sd_quota_mutex
);
408 mutex_unlock(&sdp
->sd_quota_mutex
);
412 static void bh_put(struct gfs2_quota_data
*qd
)
414 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
416 mutex_lock(&sdp
->sd_quota_mutex
);
417 gfs2_assert(sdp
, qd
->qd_bh_count
);
418 if (!--qd
->qd_bh_count
) {
423 mutex_unlock(&sdp
->sd_quota_mutex
);
426 static int qd_check_sync(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
,
429 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
430 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
431 (sync_gen
&& (qd
->qd_sync_gen
>= *sync_gen
)))
434 if (!lockref_get_not_dead(&qd
->qd_lockref
))
437 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
438 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
439 qd
->qd_change_sync
= qd
->qd_change
;
444 static int qd_fish(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
**qdp
)
446 struct gfs2_quota_data
*qd
= NULL
;
452 if (sb_rdonly(sdp
->sd_vfs
))
457 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
458 found
= qd_check_sync(sdp
, qd
, &sdp
->sd_quota_sync_gen
);
466 spin_unlock(&qd_lock
);
469 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
472 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
484 static void qd_unlock(struct gfs2_quota_data
*qd
)
486 gfs2_assert_warn(qd
->qd_gl
->gl_name
.ln_sbd
,
487 test_bit(QDF_LOCKED
, &qd
->qd_flags
));
488 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
494 static int qdsb_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
495 struct gfs2_quota_data
**qdp
)
499 error
= qd_get(sdp
, qid
, qdp
);
503 error
= slot_get(*qdp
);
507 error
= bh_get(*qdp
);
520 static void qdsb_put(struct gfs2_quota_data
*qd
)
528 * gfs2_qa_get - make sure we have a quota allocations data structure,
530 * @ip: the inode for this reservation
532 int gfs2_qa_get(struct gfs2_inode
*ip
)
535 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
537 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
540 down_write(&ip
->i_rw_mutex
);
541 if (ip
->i_qadata
== NULL
) {
542 ip
->i_qadata
= kmem_cache_zalloc(gfs2_qadata_cachep
, GFP_NOFS
);
548 ip
->i_qadata
->qa_ref
++;
550 up_write(&ip
->i_rw_mutex
);
554 void gfs2_qa_put(struct gfs2_inode
*ip
)
556 down_write(&ip
->i_rw_mutex
);
557 if (ip
->i_qadata
&& --ip
->i_qadata
->qa_ref
== 0) {
558 kmem_cache_free(gfs2_qadata_cachep
, ip
->i_qadata
);
561 up_write(&ip
->i_rw_mutex
);
564 int gfs2_quota_hold(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
566 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
567 struct gfs2_quota_data
**qd
;
570 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
573 error
= gfs2_qa_get(ip
);
577 qd
= ip
->i_qadata
->qa_qd
;
579 if (gfs2_assert_warn(sdp
, !ip
->i_qadata
->qa_qd_num
) ||
580 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))) {
585 error
= qdsb_get(sdp
, make_kqid_uid(ip
->i_inode
.i_uid
), qd
);
588 ip
->i_qadata
->qa_qd_num
++;
591 error
= qdsb_get(sdp
, make_kqid_gid(ip
->i_inode
.i_gid
), qd
);
594 ip
->i_qadata
->qa_qd_num
++;
597 if (!uid_eq(uid
, NO_UID_QUOTA_CHANGE
) &&
598 !uid_eq(uid
, ip
->i_inode
.i_uid
)) {
599 error
= qdsb_get(sdp
, make_kqid_uid(uid
), qd
);
602 ip
->i_qadata
->qa_qd_num
++;
606 if (!gid_eq(gid
, NO_GID_QUOTA_CHANGE
) &&
607 !gid_eq(gid
, ip
->i_inode
.i_gid
)) {
608 error
= qdsb_get(sdp
, make_kqid_gid(gid
), qd
);
611 ip
->i_qadata
->qa_qd_num
++;
617 gfs2_quota_unhold(ip
);
622 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
624 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
627 if (ip
->i_qadata
== NULL
)
630 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
632 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
633 qdsb_put(ip
->i_qadata
->qa_qd
[x
]);
634 ip
->i_qadata
->qa_qd
[x
] = NULL
;
636 ip
->i_qadata
->qa_qd_num
= 0;
640 static int sort_qd(const void *a
, const void *b
)
642 const struct gfs2_quota_data
*qd_a
= *(const struct gfs2_quota_data
**)a
;
643 const struct gfs2_quota_data
*qd_b
= *(const struct gfs2_quota_data
**)b
;
645 if (qid_lt(qd_a
->qd_id
, qd_b
->qd_id
))
647 if (qid_lt(qd_b
->qd_id
, qd_a
->qd_id
))
652 static void do_qc(struct gfs2_quota_data
*qd
, s64 change
)
654 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
655 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
656 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
659 mutex_lock(&sdp
->sd_quota_mutex
);
660 gfs2_trans_add_meta(ip
->i_gl
, qd
->qd_bh
);
662 if (!test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
665 if (qd
->qd_id
.type
== USRQUOTA
)
666 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
667 qc
->qc_id
= cpu_to_be32(from_kqid(&init_user_ns
, qd
->qd_id
));
670 x
= be64_to_cpu(qc
->qc_change
) + change
;
671 qc
->qc_change
= cpu_to_be64(x
);
675 spin_unlock(&qd_lock
);
678 gfs2_assert_warn(sdp
, test_bit(QDF_CHANGE
, &qd
->qd_flags
));
679 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
684 } else if (!test_and_set_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
689 if (change
< 0) /* Reset quiet flag if we freed some blocks */
690 clear_bit(QDF_QMSG_QUIET
, &qd
->qd_flags
);
691 mutex_unlock(&sdp
->sd_quota_mutex
);
694 static int gfs2_write_buf_to_page(struct gfs2_inode
*ip
, unsigned long index
,
695 unsigned off
, void *buf
, unsigned bytes
)
697 struct inode
*inode
= &ip
->i_inode
;
698 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
699 struct address_space
*mapping
= inode
->i_mapping
;
701 struct buffer_head
*bh
;
704 unsigned bsize
= sdp
->sd_sb
.sb_bsize
, bnum
= 0, boff
= 0;
705 unsigned to_write
= bytes
, pg_off
= off
;
708 blk
= index
<< (PAGE_SHIFT
- sdp
->sd_sb
.sb_bsize_shift
);
711 page
= find_or_create_page(mapping
, index
, GFP_NOFS
);
714 if (!page_has_buffers(page
))
715 create_empty_buffers(page
, bsize
, 0);
717 bh
= page_buffers(page
);
719 /* Find the beginning block within the page */
720 if (pg_off
>= ((bnum
* bsize
) + bsize
)) {
721 bh
= bh
->b_this_page
;
726 if (!buffer_mapped(bh
)) {
727 gfs2_block_map(inode
, blk
, bh
, 1);
728 if (!buffer_mapped(bh
))
730 /* If it's a newly allocated disk block, zero it */
732 zero_user(page
, bnum
* bsize
, bh
->b_size
);
734 if (PageUptodate(page
))
735 set_buffer_uptodate(bh
);
736 if (!buffer_uptodate(bh
)) {
737 ll_rw_block(REQ_OP_READ
, REQ_META
| REQ_PRIO
, 1, &bh
);
739 if (!buffer_uptodate(bh
))
742 if (gfs2_is_jdata(ip
))
743 gfs2_trans_add_data(ip
->i_gl
, bh
);
745 gfs2_ordered_add_inode(ip
);
747 /* If we need to write to the next block as well */
748 if (to_write
> (bsize
- boff
)) {
749 pg_off
+= (bsize
- boff
);
750 to_write
-= (bsize
- boff
);
751 boff
= pg_off
% bsize
;
757 /* Write to the page, now that we have setup the buffer(s) */
758 kaddr
= kmap_atomic(page
);
759 memcpy(kaddr
+ off
, buf
, bytes
);
760 flush_dcache_page(page
);
761 kunmap_atomic(kaddr
);
773 static int gfs2_write_disk_quota(struct gfs2_inode
*ip
, struct gfs2_quota
*qp
,
776 unsigned long pg_beg
;
777 unsigned pg_off
, nbytes
, overflow
= 0;
778 int pg_oflow
= 0, error
;
781 nbytes
= sizeof(struct gfs2_quota
);
783 pg_beg
= loc
>> PAGE_SHIFT
;
784 pg_off
= offset_in_page(loc
);
786 /* If the quota straddles a page boundary, split the write in two */
787 if ((pg_off
+ nbytes
) > PAGE_SIZE
) {
789 overflow
= (pg_off
+ nbytes
) - PAGE_SIZE
;
793 error
= gfs2_write_buf_to_page(ip
, pg_beg
, pg_off
, ptr
,
795 /* If there's an overflow, write the remaining bytes to the next page */
796 if (!error
&& pg_oflow
)
797 error
= gfs2_write_buf_to_page(ip
, pg_beg
+ 1, 0,
798 ptr
+ nbytes
- overflow
,
804 * gfs2_adjust_quota - adjust record of current block usage
805 * @ip: The quota inode
806 * @loc: Offset of the entry in the quota file
807 * @change: The amount of usage change to record
808 * @qd: The quota data
809 * @fdq: The updated limits to record
811 * This function was mostly borrowed from gfs2_block_truncate_page which was
812 * in turn mostly borrowed from ext3
814 * Returns: 0 or -ve on error
817 static int gfs2_adjust_quota(struct gfs2_inode
*ip
, loff_t loc
,
818 s64 change
, struct gfs2_quota_data
*qd
,
819 struct qc_dqblk
*fdq
)
821 struct inode
*inode
= &ip
->i_inode
;
822 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
827 if (gfs2_is_stuffed(ip
)) {
828 err
= gfs2_unstuff_dinode(ip
, NULL
);
833 memset(&q
, 0, sizeof(struct gfs2_quota
));
834 err
= gfs2_internal_read(ip
, (char *)&q
, &loc
, sizeof(q
));
838 loc
-= sizeof(q
); /* gfs2_internal_read would've advanced the loc ptr */
840 be64_add_cpu(&q
.qu_value
, change
);
841 if (((s64
)be64_to_cpu(q
.qu_value
)) < 0)
842 q
.qu_value
= 0; /* Never go negative on quota usage */
843 qd
->qd_qb
.qb_value
= q
.qu_value
;
845 if (fdq
->d_fieldmask
& QC_SPC_SOFT
) {
846 q
.qu_warn
= cpu_to_be64(fdq
->d_spc_softlimit
>> sdp
->sd_sb
.sb_bsize_shift
);
847 qd
->qd_qb
.qb_warn
= q
.qu_warn
;
849 if (fdq
->d_fieldmask
& QC_SPC_HARD
) {
850 q
.qu_limit
= cpu_to_be64(fdq
->d_spc_hardlimit
>> sdp
->sd_sb
.sb_bsize_shift
);
851 qd
->qd_qb
.qb_limit
= q
.qu_limit
;
853 if (fdq
->d_fieldmask
& QC_SPACE
) {
854 q
.qu_value
= cpu_to_be64(fdq
->d_space
>> sdp
->sd_sb
.sb_bsize_shift
);
855 qd
->qd_qb
.qb_value
= q
.qu_value
;
859 err
= gfs2_write_disk_quota(ip
, &q
, loc
);
861 size
= loc
+ sizeof(struct gfs2_quota
);
862 if (size
> inode
->i_size
)
863 i_size_write(inode
, size
);
864 inode
->i_mtime
= inode
->i_atime
= current_time(inode
);
865 mark_inode_dirty(inode
);
866 set_bit(QDF_REFRESH
, &qd
->qd_flags
);
872 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
)
874 struct gfs2_sbd
*sdp
= (*qda
)->qd_gl
->gl_name
.ln_sbd
;
875 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
876 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
877 unsigned int data_blocks
, ind_blocks
;
878 struct gfs2_holder
*ghs
, i_gh
;
880 struct gfs2_quota_data
*qd
;
883 unsigned int nalloc
= 0, blocks
;
886 error
= gfs2_qa_get(ip
);
890 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
891 &data_blocks
, &ind_blocks
);
893 ghs
= kmalloc_array(num_qd
, sizeof(struct gfs2_holder
), GFP_NOFS
);
899 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
900 inode_lock(&ip
->i_inode
);
901 for (qx
= 0; qx
< num_qd
; qx
++) {
902 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
, LM_ST_EXCLUSIVE
,
903 GL_NOCACHE
, &ghs
[qx
]);
908 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
912 for (x
= 0; x
< num_qd
; x
++) {
913 offset
= qd2offset(qda
[x
]);
914 if (gfs2_write_alloc_required(ip
, offset
,
915 sizeof(struct gfs2_quota
)))
920 * 1 blk for unstuffing inode if stuffed. We add this extra
921 * block to the reservation unconditionally. If the inode
922 * doesn't need unstuffing, the block will be released to the
923 * rgrp since it won't be allocated during the transaction
925 /* +3 in the end for unstuffing block, inode size update block
926 * and another block in case quota straddles page boundary and
927 * two blocks need to be updated instead of 1 */
928 blocks
= num_qd
* data_blocks
+ RES_DINODE
+ num_qd
+ 3;
930 reserved
= 1 + (nalloc
* (data_blocks
+ ind_blocks
));
931 ap
.target
= reserved
;
932 error
= gfs2_inplace_reserve(ip
, &ap
);
937 blocks
+= gfs2_rg_blocks(ip
, reserved
) + nalloc
* ind_blocks
+ RES_STATFS
;
939 error
= gfs2_trans_begin(sdp
, blocks
, 0);
943 for (x
= 0; x
< num_qd
; x
++) {
945 offset
= qd2offset(qd
);
946 error
= gfs2_adjust_quota(ip
, offset
, qd
->qd_change_sync
, qd
, NULL
);
950 do_qc(qd
, -qd
->qd_change_sync
);
951 set_bit(QDF_REFRESH
, &qd
->qd_flags
);
959 gfs2_inplace_release(ip
);
961 gfs2_glock_dq_uninit(&i_gh
);
964 gfs2_glock_dq_uninit(&ghs
[qx
]);
965 inode_unlock(&ip
->i_inode
);
967 gfs2_log_flush(ip
->i_gl
->gl_name
.ln_sbd
, ip
->i_gl
,
968 GFS2_LOG_HEAD_FLUSH_NORMAL
| GFS2_LFC_DO_SYNC
);
974 static int update_qd(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
)
976 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
978 struct gfs2_quota_lvb
*qlvb
;
982 memset(&q
, 0, sizeof(struct gfs2_quota
));
984 error
= gfs2_internal_read(ip
, (char *)&q
, &pos
, sizeof(q
));
988 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
989 qlvb
->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
991 qlvb
->qb_limit
= q
.qu_limit
;
992 qlvb
->qb_warn
= q
.qu_warn
;
993 qlvb
->qb_value
= q
.qu_value
;
999 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
1000 struct gfs2_holder
*q_gh
)
1002 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
1003 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1004 struct gfs2_holder i_gh
;
1008 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
1012 if (test_and_clear_bit(QDF_REFRESH
, &qd
->qd_flags
))
1013 force_refresh
= FORCE
;
1015 qd
->qd_qb
= *(struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
1017 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= cpu_to_be32(GFS2_MAGIC
)) {
1018 gfs2_glock_dq_uninit(q_gh
);
1019 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
,
1024 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &i_gh
);
1028 error
= update_qd(sdp
, qd
);
1032 gfs2_glock_dq_uninit(&i_gh
);
1033 gfs2_glock_dq_uninit(q_gh
);
1041 gfs2_glock_dq_uninit(&i_gh
);
1043 gfs2_glock_dq_uninit(q_gh
);
1047 int gfs2_quota_lock(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
1049 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1050 struct gfs2_quota_data
*qd
;
1054 if (capable(CAP_SYS_RESOURCE
) ||
1055 sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
1058 error
= gfs2_quota_hold(ip
, uid
, gid
);
1062 sort(ip
->i_qadata
->qa_qd
, ip
->i_qadata
->qa_qd_num
,
1063 sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
1065 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1066 qd
= ip
->i_qadata
->qa_qd
[x
];
1067 error
= do_glock(qd
, NO_FORCE
, &ip
->i_qadata
->qa_qd_ghs
[x
]);
1073 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
1076 gfs2_glock_dq_uninit(&ip
->i_qadata
->qa_qd_ghs
[x
]);
1077 gfs2_quota_unhold(ip
);
1083 static int need_sync(struct gfs2_quota_data
*qd
)
1085 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
1086 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
1088 unsigned int num
, den
;
1091 if (!qd
->qd_qb
.qb_limit
)
1094 spin_lock(&qd_lock
);
1095 value
= qd
->qd_change
;
1096 spin_unlock(&qd_lock
);
1098 spin_lock(>
->gt_spin
);
1099 num
= gt
->gt_quota_scale_num
;
1100 den
= gt
->gt_quota_scale_den
;
1101 spin_unlock(>
->gt_spin
);
1105 else if ((s64
)be64_to_cpu(qd
->qd_qb
.qb_value
) >=
1106 (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
1109 value
*= gfs2_jindex_size(sdp
) * num
;
1110 value
= div_s64(value
, den
);
1111 value
+= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1112 if (value
< (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
1119 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
1121 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1122 struct gfs2_quota_data
*qda
[4];
1123 unsigned int count
= 0;
1127 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1130 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1131 struct gfs2_quota_data
*qd
;
1134 qd
= ip
->i_qadata
->qa_qd
[x
];
1135 sync
= need_sync(qd
);
1137 gfs2_glock_dq_uninit(&ip
->i_qadata
->qa_qd_ghs
[x
]);
1141 spin_lock(&qd_lock
);
1142 found
= qd_check_sync(sdp
, qd
, NULL
);
1143 spin_unlock(&qd_lock
);
1148 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
1150 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
1160 do_sync(count
, qda
);
1161 for (x
= 0; x
< count
; x
++)
1166 gfs2_quota_unhold(ip
);
1169 #define MAX_LINE 256
1171 static int print_message(struct gfs2_quota_data
*qd
, char *type
)
1173 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_name
.ln_sbd
;
1175 fs_info(sdp
, "quota %s for %s %u\n",
1177 (qd
->qd_id
.type
== USRQUOTA
) ? "user" : "group",
1178 from_kqid(&init_user_ns
, qd
->qd_id
));
1184 * gfs2_quota_check - check if allocating new blocks will exceed quota
1185 * @ip: The inode for which this check is being performed
1186 * @uid: The uid to check against
1187 * @gid: The gid to check against
1188 * @ap: The allocation parameters. ap->target contains the requested
1189 * blocks. ap->min_target, if set, contains the minimum blks
1192 * Returns: 0 on success.
1193 * min_req = ap->min_target ? ap->min_target : ap->target;
1194 * quota must allow at least min_req blks for success and
1195 * ap->allowed is set to the number of blocks allowed
1197 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1198 * of blocks available.
1200 int gfs2_quota_check(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
,
1201 struct gfs2_alloc_parms
*ap
)
1203 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1204 struct gfs2_quota_data
*qd
;
1205 s64 value
, warn
, limit
;
1209 ap
->allowed
= UINT_MAX
; /* Assume we are permitted a whole lot */
1210 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1213 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
1216 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1217 qd
= ip
->i_qadata
->qa_qd
[x
];
1219 if (!(qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1220 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))))
1223 warn
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_warn
);
1224 limit
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
);
1225 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1226 spin_lock(&qd_lock
);
1227 value
+= qd
->qd_change
;
1228 spin_unlock(&qd_lock
);
1230 if (limit
> 0 && (limit
- value
) < ap
->allowed
)
1231 ap
->allowed
= limit
- value
;
1232 /* If we can't meet the target */
1233 if (limit
&& limit
< (value
+ (s64
)ap
->target
)) {
1234 /* If no min_target specified or we don't meet
1235 * min_target, return -EDQUOT */
1236 if (!ap
->min_target
|| ap
->min_target
> ap
->allowed
) {
1237 if (!test_and_set_bit(QDF_QMSG_QUIET
,
1239 print_message(qd
, "exceeded");
1240 quota_send_warning(qd
->qd_id
,
1242 QUOTA_NL_BHARDWARN
);
1247 } else if (warn
&& warn
< value
&&
1248 time_after_eq(jiffies
, qd
->qd_last_warn
+
1249 gfs2_tune_get(sdp
, gt_quota_warn_period
)
1251 quota_send_warning(qd
->qd_id
,
1252 sdp
->sd_vfs
->s_dev
, QUOTA_NL_BSOFTWARN
);
1253 error
= print_message(qd
, "warning");
1254 qd
->qd_last_warn
= jiffies
;
1260 void gfs2_quota_change(struct gfs2_inode
*ip
, s64 change
,
1261 kuid_t uid
, kgid_t gid
)
1263 struct gfs2_quota_data
*qd
;
1265 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1267 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
||
1268 gfs2_assert_warn(sdp
, change
))
1270 if (ip
->i_diskflags
& GFS2_DIF_SYSTEM
)
1273 BUG_ON(ip
->i_qadata
->qa_ref
<= 0);
1274 for (x
= 0; x
< ip
->i_qadata
->qa_qd_num
; x
++) {
1275 qd
= ip
->i_qadata
->qa_qd
[x
];
1277 if (qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1278 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))) {
1284 int gfs2_quota_sync(struct super_block
*sb
, int type
)
1286 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1287 struct gfs2_quota_data
**qda
;
1288 unsigned int max_qd
= PAGE_SIZE
/ sizeof(struct gfs2_holder
);
1289 unsigned int num_qd
;
1293 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1297 mutex_lock(&sdp
->sd_quota_sync_mutex
);
1298 sdp
->sd_quota_sync_gen
++;
1304 error
= qd_fish(sdp
, qda
+ num_qd
);
1305 if (error
|| !qda
[num_qd
])
1307 if (++num_qd
== max_qd
)
1313 error
= do_sync(num_qd
, qda
);
1315 for (x
= 0; x
< num_qd
; x
++)
1316 qda
[x
]->qd_sync_gen
=
1317 sdp
->sd_quota_sync_gen
;
1319 for (x
= 0; x
< num_qd
; x
++)
1322 } while (!error
&& num_qd
== max_qd
);
1324 mutex_unlock(&sdp
->sd_quota_sync_mutex
);
1330 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, struct kqid qid
)
1332 struct gfs2_quota_data
*qd
;
1333 struct gfs2_holder q_gh
;
1336 error
= qd_get(sdp
, qid
, &qd
);
1340 error
= do_glock(qd
, FORCE
, &q_gh
);
1342 gfs2_glock_dq_uninit(&q_gh
);
1348 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1350 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1351 u64 size
= i_size_read(sdp
->sd_qc_inode
);
1352 unsigned int blocks
= size
>> sdp
->sd_sb
.sb_bsize_shift
;
1353 unsigned int x
, slot
= 0;
1354 unsigned int found
= 0;
1356 unsigned int bm_size
;
1361 if (gfs2_check_internal_file_size(sdp
->sd_qc_inode
, 1, 64 << 20))
1364 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1365 bm_size
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * sizeof(unsigned long));
1366 bm_size
*= sizeof(unsigned long);
1368 sdp
->sd_quota_bitmap
= kzalloc(bm_size
, GFP_NOFS
| __GFP_NOWARN
);
1369 if (sdp
->sd_quota_bitmap
== NULL
)
1370 sdp
->sd_quota_bitmap
= __vmalloc(bm_size
, GFP_NOFS
|
1371 __GFP_ZERO
, PAGE_KERNEL
);
1372 if (!sdp
->sd_quota_bitmap
)
1375 for (x
= 0; x
< blocks
; x
++) {
1376 struct buffer_head
*bh
;
1377 const struct gfs2_quota_change
*qc
;
1382 error
= gfs2_extent_map(&ip
->i_inode
, x
, &new, &dblock
, &extlen
);
1387 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1390 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
)) {
1395 qc
= (const struct gfs2_quota_change
*)(bh
->b_data
+ sizeof(struct gfs2_meta_header
));
1396 for (y
= 0; y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1398 struct gfs2_quota_data
*qd
;
1399 s64 qc_change
= be64_to_cpu(qc
->qc_change
);
1400 u32 qc_flags
= be32_to_cpu(qc
->qc_flags
);
1401 enum quota_type qtype
= (qc_flags
& GFS2_QCF_USER
) ?
1402 USRQUOTA
: GRPQUOTA
;
1403 struct kqid qc_id
= make_kqid(&init_user_ns
, qtype
,
1404 be32_to_cpu(qc
->qc_id
));
1409 hash
= gfs2_qd_hash(sdp
, qc_id
);
1410 qd
= qd_alloc(hash
, sdp
, qc_id
);
1416 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1417 qd
->qd_change
= qc_change
;
1419 qd
->qd_slot_count
= 1;
1421 spin_lock(&qd_lock
);
1422 BUG_ON(test_and_set_bit(slot
, sdp
->sd_quota_bitmap
));
1423 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1424 atomic_inc(&sdp
->sd_quota_count
);
1425 spin_unlock(&qd_lock
);
1427 spin_lock_bucket(hash
);
1428 hlist_bl_add_head_rcu(&qd
->qd_hlist
, &qd_hash_table
[hash
]);
1429 spin_unlock_bucket(hash
);
1440 fs_info(sdp
, "found %u quota changes\n", found
);
1445 gfs2_quota_cleanup(sdp
);
1449 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1451 struct list_head
*head
= &sdp
->sd_quota_list
;
1452 struct gfs2_quota_data
*qd
;
1454 spin_lock(&qd_lock
);
1455 while (!list_empty(head
)) {
1456 qd
= list_last_entry(head
, struct gfs2_quota_data
, qd_list
);
1458 list_del(&qd
->qd_list
);
1460 /* Also remove if this qd exists in the reclaim list */
1461 list_lru_del(&gfs2_qd_lru
, &qd
->qd_lru
);
1462 atomic_dec(&sdp
->sd_quota_count
);
1463 spin_unlock(&qd_lock
);
1465 spin_lock_bucket(qd
->qd_hash
);
1466 hlist_bl_del_rcu(&qd
->qd_hlist
);
1467 spin_unlock_bucket(qd
->qd_hash
);
1469 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1470 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1471 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1473 gfs2_glock_put(qd
->qd_gl
);
1474 call_rcu(&qd
->qd_rcu
, gfs2_qd_dealloc
);
1476 spin_lock(&qd_lock
);
1478 spin_unlock(&qd_lock
);
1480 gfs2_assert_warn(sdp
, !atomic_read(&sdp
->sd_quota_count
));
1482 kvfree(sdp
->sd_quota_bitmap
);
1483 sdp
->sd_quota_bitmap
= NULL
;
1486 static void quotad_error(struct gfs2_sbd
*sdp
, const char *msg
, int error
)
1488 if (error
== 0 || error
== -EROFS
)
1490 if (!gfs2_withdrawn(sdp
)) {
1491 if (!cmpxchg(&sdp
->sd_log_error
, 0, error
))
1492 fs_err(sdp
, "gfs2_quotad: %s error %d\n", msg
, error
);
1493 wake_up(&sdp
->sd_logd_waitq
);
1497 static void quotad_check_timeo(struct gfs2_sbd
*sdp
, const char *msg
,
1498 int (*fxn
)(struct super_block
*sb
, int type
),
1499 unsigned long t
, unsigned long *timeo
,
1500 unsigned int *new_timeo
)
1503 int error
= fxn(sdp
->sd_vfs
, 0);
1504 quotad_error(sdp
, msg
, error
);
1505 *timeo
= gfs2_tune_get_i(&sdp
->sd_tune
, new_timeo
) * HZ
;
1511 static void quotad_check_trunc_list(struct gfs2_sbd
*sdp
)
1513 struct gfs2_inode
*ip
;
1517 spin_lock(&sdp
->sd_trunc_lock
);
1518 if (!list_empty(&sdp
->sd_trunc_list
)) {
1519 ip
= list_first_entry(&sdp
->sd_trunc_list
,
1520 struct gfs2_inode
, i_trunc_list
);
1521 list_del_init(&ip
->i_trunc_list
);
1523 spin_unlock(&sdp
->sd_trunc_lock
);
1526 gfs2_glock_finish_truncate(ip
);
1530 void gfs2_wake_up_statfs(struct gfs2_sbd
*sdp
) {
1531 if (!sdp
->sd_statfs_force_sync
) {
1532 sdp
->sd_statfs_force_sync
= 1;
1533 wake_up(&sdp
->sd_quota_wait
);
1539 * gfs2_quotad - Write cached quota changes into the quota file
1540 * @sdp: Pointer to GFS2 superblock
1544 int gfs2_quotad(void *data
)
1546 struct gfs2_sbd
*sdp
= data
;
1547 struct gfs2_tune
*tune
= &sdp
->sd_tune
;
1548 unsigned long statfs_timeo
= 0;
1549 unsigned long quotad_timeo
= 0;
1550 unsigned long t
= 0;
1554 while (!kthread_should_stop()) {
1556 if (gfs2_withdrawn(sdp
))
1558 /* Update the master statfs file */
1559 if (sdp
->sd_statfs_force_sync
) {
1560 int error
= gfs2_statfs_sync(sdp
->sd_vfs
, 0);
1561 quotad_error(sdp
, "statfs", error
);
1562 statfs_timeo
= gfs2_tune_get(sdp
, gt_statfs_quantum
) * HZ
;
1565 quotad_check_timeo(sdp
, "statfs", gfs2_statfs_sync
, t
,
1567 &tune
->gt_statfs_quantum
);
1569 /* Update quota file */
1570 quotad_check_timeo(sdp
, "sync", gfs2_quota_sync
, t
,
1571 "ad_timeo
, &tune
->gt_quota_quantum
);
1573 /* Check for & recover partially truncated inodes */
1574 quotad_check_trunc_list(sdp
);
1579 t
= min(quotad_timeo
, statfs_timeo
);
1581 prepare_to_wait(&sdp
->sd_quota_wait
, &wait
, TASK_INTERRUPTIBLE
);
1582 spin_lock(&sdp
->sd_trunc_lock
);
1583 empty
= list_empty(&sdp
->sd_trunc_list
);
1584 spin_unlock(&sdp
->sd_trunc_lock
);
1585 if (empty
&& !sdp
->sd_statfs_force_sync
)
1586 t
-= schedule_timeout(t
);
1589 finish_wait(&sdp
->sd_quota_wait
, &wait
);
1595 static int gfs2_quota_get_state(struct super_block
*sb
, struct qc_state
*state
)
1597 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1599 memset(state
, 0, sizeof(*state
));
1601 switch (sdp
->sd_args
.ar_quota
) {
1603 state
->s_state
[USRQUOTA
].flags
|= QCI_LIMITS_ENFORCED
;
1604 state
->s_state
[GRPQUOTA
].flags
|= QCI_LIMITS_ENFORCED
;
1606 case GFS2_QUOTA_ACCOUNT
:
1607 state
->s_state
[USRQUOTA
].flags
|= QCI_ACCT_ENABLED
|
1609 state
->s_state
[GRPQUOTA
].flags
|= QCI_ACCT_ENABLED
|
1612 case GFS2_QUOTA_OFF
:
1615 if (sdp
->sd_quota_inode
) {
1616 state
->s_state
[USRQUOTA
].ino
=
1617 GFS2_I(sdp
->sd_quota_inode
)->i_no_addr
;
1618 state
->s_state
[USRQUOTA
].blocks
= sdp
->sd_quota_inode
->i_blocks
;
1620 state
->s_state
[USRQUOTA
].nextents
= 1; /* unsupported */
1621 state
->s_state
[GRPQUOTA
] = state
->s_state
[USRQUOTA
];
1622 state
->s_incoredqs
= list_lru_count(&gfs2_qd_lru
);
1626 static int gfs2_get_dqblk(struct super_block
*sb
, struct kqid qid
,
1627 struct qc_dqblk
*fdq
)
1629 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1630 struct gfs2_quota_lvb
*qlvb
;
1631 struct gfs2_quota_data
*qd
;
1632 struct gfs2_holder q_gh
;
1635 memset(fdq
, 0, sizeof(*fdq
));
1637 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1638 return -ESRCH
; /* Crazy XFS error code */
1640 if ((qid
.type
!= USRQUOTA
) &&
1641 (qid
.type
!= GRPQUOTA
))
1644 error
= qd_get(sdp
, qid
, &qd
);
1647 error
= do_glock(qd
, FORCE
, &q_gh
);
1651 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
1652 fdq
->d_spc_hardlimit
= be64_to_cpu(qlvb
->qb_limit
) << sdp
->sd_sb
.sb_bsize_shift
;
1653 fdq
->d_spc_softlimit
= be64_to_cpu(qlvb
->qb_warn
) << sdp
->sd_sb
.sb_bsize_shift
;
1654 fdq
->d_space
= be64_to_cpu(qlvb
->qb_value
) << sdp
->sd_sb
.sb_bsize_shift
;
1656 gfs2_glock_dq_uninit(&q_gh
);
1662 /* GFS2 only supports a subset of the XFS fields */
1663 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1665 static int gfs2_set_dqblk(struct super_block
*sb
, struct kqid qid
,
1666 struct qc_dqblk
*fdq
)
1668 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1669 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1670 struct gfs2_quota_data
*qd
;
1671 struct gfs2_holder q_gh
, i_gh
;
1672 unsigned int data_blocks
, ind_blocks
;
1673 unsigned int blocks
= 0;
1678 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1679 return -ESRCH
; /* Crazy XFS error code */
1681 if ((qid
.type
!= USRQUOTA
) &&
1682 (qid
.type
!= GRPQUOTA
))
1685 if (fdq
->d_fieldmask
& ~GFS2_FIELDMASK
)
1688 error
= qd_get(sdp
, qid
, &qd
);
1692 error
= gfs2_qa_get(ip
);
1696 inode_lock(&ip
->i_inode
);
1697 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
, 0, &q_gh
);
1700 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1704 /* Check for existing entry, if none then alloc new blocks */
1705 error
= update_qd(sdp
, qd
);
1709 /* If nothing has changed, this is a no-op */
1710 if ((fdq
->d_fieldmask
& QC_SPC_SOFT
) &&
1711 ((fdq
->d_spc_softlimit
>> sdp
->sd_sb
.sb_bsize_shift
) == be64_to_cpu(qd
->qd_qb
.qb_warn
)))
1712 fdq
->d_fieldmask
^= QC_SPC_SOFT
;
1714 if ((fdq
->d_fieldmask
& QC_SPC_HARD
) &&
1715 ((fdq
->d_spc_hardlimit
>> sdp
->sd_sb
.sb_bsize_shift
) == be64_to_cpu(qd
->qd_qb
.qb_limit
)))
1716 fdq
->d_fieldmask
^= QC_SPC_HARD
;
1718 if ((fdq
->d_fieldmask
& QC_SPACE
) &&
1719 ((fdq
->d_space
>> sdp
->sd_sb
.sb_bsize_shift
) == be64_to_cpu(qd
->qd_qb
.qb_value
)))
1720 fdq
->d_fieldmask
^= QC_SPACE
;
1722 if (fdq
->d_fieldmask
== 0)
1725 offset
= qd2offset(qd
);
1726 alloc_required
= gfs2_write_alloc_required(ip
, offset
, sizeof(struct gfs2_quota
));
1727 if (gfs2_is_stuffed(ip
))
1729 if (alloc_required
) {
1730 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
1731 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
1732 &data_blocks
, &ind_blocks
);
1733 blocks
= 1 + data_blocks
+ ind_blocks
;
1735 error
= gfs2_inplace_reserve(ip
, &ap
);
1738 blocks
+= gfs2_rg_blocks(ip
, blocks
);
1741 /* Some quotas span block boundaries and can update two blocks,
1742 adding an extra block to the transaction to handle such quotas */
1743 error
= gfs2_trans_begin(sdp
, blocks
+ RES_DINODE
+ 2, 0);
1748 error
= gfs2_adjust_quota(ip
, offset
, 0, qd
, fdq
);
1750 clear_bit(QDF_QMSG_QUIET
, &qd
->qd_flags
);
1752 gfs2_trans_end(sdp
);
1755 gfs2_inplace_release(ip
);
1757 gfs2_glock_dq_uninit(&i_gh
);
1759 gfs2_glock_dq_uninit(&q_gh
);
1762 inode_unlock(&ip
->i_inode
);
1768 const struct quotactl_ops gfs2_quotactl_ops
= {
1769 .quota_sync
= gfs2_quota_sync
,
1770 .get_state
= gfs2_quota_get_state
,
1771 .get_dqblk
= gfs2_get_dqblk
,
1772 .set_dqblk
= gfs2_set_dqblk
,
1775 void __init
gfs2_quota_hash_init(void)
1779 for(i
= 0; i
< GFS2_QD_HASH_SIZE
; i
++)
1780 INIT_HLIST_BL_HEAD(&qd_hash_table
[i
]);