2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
50 #include <linux/dqblk_xfs.h>
69 struct gfs2_quota_change_host
{
71 u32 qc_flags
; /* GFS2_QCF_... */
75 static LIST_HEAD(qd_lru_list
);
76 static atomic_t qd_lru_count
= ATOMIC_INIT(0);
77 static DEFINE_SPINLOCK(qd_lru_lock
);
79 int gfs2_shrink_qd_memory(int nr
, gfp_t gfp_mask
)
81 struct gfs2_quota_data
*qd
;
87 if (!(gfp_mask
& __GFP_FS
))
90 spin_lock(&qd_lru_lock
);
91 while (nr
&& !list_empty(&qd_lru_list
)) {
92 qd
= list_entry(qd_lru_list
.next
,
93 struct gfs2_quota_data
, qd_reclaim
);
94 sdp
= qd
->qd_gl
->gl_sbd
;
96 /* Free from the filesystem-specific list */
97 list_del(&qd
->qd_list
);
99 gfs2_assert_warn(sdp
, !qd
->qd_change
);
100 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
101 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
103 gfs2_glock_put(qd
->qd_gl
);
104 atomic_dec(&sdp
->sd_quota_count
);
106 /* Delete it from the common reclaim list */
107 list_del_init(&qd
->qd_reclaim
);
108 atomic_dec(&qd_lru_count
);
109 spin_unlock(&qd_lru_lock
);
110 kmem_cache_free(gfs2_quotad_cachep
, qd
);
111 spin_lock(&qd_lru_lock
);
114 spin_unlock(&qd_lru_lock
);
117 return (atomic_read(&qd_lru_count
) * sysctl_vfs_cache_pressure
) / 100;
120 static u64
qd2offset(struct gfs2_quota_data
*qd
)
124 offset
= 2 * (u64
)qd
->qd_id
+ !test_bit(QDF_USER
, &qd
->qd_flags
);
125 offset
*= sizeof(struct gfs2_quota
);
130 static int qd_alloc(struct gfs2_sbd
*sdp
, int user
, u32 id
,
131 struct gfs2_quota_data
**qdp
)
133 struct gfs2_quota_data
*qd
;
136 qd
= kmem_cache_zalloc(gfs2_quotad_cachep
, GFP_NOFS
);
140 atomic_set(&qd
->qd_count
, 1);
143 set_bit(QDF_USER
, &qd
->qd_flags
);
145 INIT_LIST_HEAD(&qd
->qd_reclaim
);
147 error
= gfs2_glock_get(sdp
, 2 * (u64
)id
+ !user
,
148 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
157 kmem_cache_free(gfs2_quotad_cachep
, qd
);
161 static int qd_get(struct gfs2_sbd
*sdp
, int user
, u32 id
,
162 struct gfs2_quota_data
**qdp
)
164 struct gfs2_quota_data
*qd
= NULL
, *new_qd
= NULL
;
171 spin_lock(&qd_lru_lock
);
172 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
173 if (qd
->qd_id
== id
&&
174 !test_bit(QDF_USER
, &qd
->qd_flags
) == !user
) {
175 if (!atomic_read(&qd
->qd_count
) &&
176 !list_empty(&qd
->qd_reclaim
)) {
177 /* Remove it from reclaim list */
178 list_del_init(&qd
->qd_reclaim
);
179 atomic_dec(&qd_lru_count
);
181 atomic_inc(&qd
->qd_count
);
192 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
193 atomic_inc(&sdp
->sd_quota_count
);
197 spin_unlock(&qd_lru_lock
);
201 gfs2_glock_put(new_qd
->qd_gl
);
202 kmem_cache_free(gfs2_quotad_cachep
, new_qd
);
208 error
= qd_alloc(sdp
, user
, id
, &new_qd
);
214 static void qd_hold(struct gfs2_quota_data
*qd
)
216 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
217 gfs2_assert(sdp
, atomic_read(&qd
->qd_count
));
218 atomic_inc(&qd
->qd_count
);
221 static void qd_put(struct gfs2_quota_data
*qd
)
223 if (atomic_dec_and_lock(&qd
->qd_count
, &qd_lru_lock
)) {
224 /* Add to the reclaim list */
225 list_add_tail(&qd
->qd_reclaim
, &qd_lru_list
);
226 atomic_inc(&qd_lru_count
);
227 spin_unlock(&qd_lru_lock
);
231 static int slot_get(struct gfs2_quota_data
*qd
)
233 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
234 unsigned int c
, o
= 0, b
;
235 unsigned char byte
= 0;
237 spin_lock(&qd_lru_lock
);
239 if (qd
->qd_slot_count
++) {
240 spin_unlock(&qd_lru_lock
);
244 for (c
= 0; c
< sdp
->sd_quota_chunks
; c
++)
245 for (o
= 0; o
< PAGE_SIZE
; o
++) {
246 byte
= sdp
->sd_quota_bitmap
[c
][o
];
254 for (b
= 0; b
< 8; b
++)
255 if (!(byte
& (1 << b
)))
257 qd
->qd_slot
= c
* (8 * PAGE_SIZE
) + o
* 8 + b
;
259 if (qd
->qd_slot
>= sdp
->sd_quota_slots
)
262 sdp
->sd_quota_bitmap
[c
][o
] |= 1 << b
;
264 spin_unlock(&qd_lru_lock
);
270 spin_unlock(&qd_lru_lock
);
274 static void slot_hold(struct gfs2_quota_data
*qd
)
276 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
278 spin_lock(&qd_lru_lock
);
279 gfs2_assert(sdp
, qd
->qd_slot_count
);
281 spin_unlock(&qd_lru_lock
);
284 static void slot_put(struct gfs2_quota_data
*qd
)
286 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
288 spin_lock(&qd_lru_lock
);
289 gfs2_assert(sdp
, qd
->qd_slot_count
);
290 if (!--qd
->qd_slot_count
) {
291 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, qd
->qd_slot
, 0);
294 spin_unlock(&qd_lru_lock
);
297 static int bh_get(struct gfs2_quota_data
*qd
)
299 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
300 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
301 unsigned int block
, offset
;
302 struct buffer_head
*bh
;
304 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
306 mutex_lock(&sdp
->sd_quota_mutex
);
308 if (qd
->qd_bh_count
++) {
309 mutex_unlock(&sdp
->sd_quota_mutex
);
313 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
314 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;
316 bh_map
.b_size
= 1 << ip
->i_inode
.i_blkbits
;
317 error
= gfs2_block_map(&ip
->i_inode
, block
, &bh_map
, 0);
320 error
= gfs2_meta_read(ip
->i_gl
, bh_map
.b_blocknr
, DIO_WAIT
, &bh
);
324 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
328 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
329 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
330 offset
* sizeof(struct gfs2_quota_change
));
332 mutex_unlock(&sdp
->sd_quota_mutex
);
340 mutex_unlock(&sdp
->sd_quota_mutex
);
344 static void bh_put(struct gfs2_quota_data
*qd
)
346 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
348 mutex_lock(&sdp
->sd_quota_mutex
);
349 gfs2_assert(sdp
, qd
->qd_bh_count
);
350 if (!--qd
->qd_bh_count
) {
355 mutex_unlock(&sdp
->sd_quota_mutex
);
358 static int qd_fish(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
**qdp
)
360 struct gfs2_quota_data
*qd
= NULL
;
366 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
369 spin_lock(&qd_lru_lock
);
371 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
372 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
373 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
374 qd
->qd_sync_gen
>= sdp
->sd_quota_sync_gen
)
377 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
379 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
380 gfs2_assert_warn(sdp
, atomic_read(&qd
->qd_count
));
381 atomic_inc(&qd
->qd_count
);
382 qd
->qd_change_sync
= qd
->qd_change
;
383 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
393 spin_unlock(&qd_lru_lock
);
396 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
399 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
411 static int qd_trylock(struct gfs2_quota_data
*qd
)
413 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
415 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
418 spin_lock(&qd_lru_lock
);
420 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
421 !test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
422 spin_unlock(&qd_lru_lock
);
426 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
428 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
429 gfs2_assert_warn(sdp
, atomic_read(&qd
->qd_count
));
430 atomic_inc(&qd
->qd_count
);
431 qd
->qd_change_sync
= qd
->qd_change
;
432 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
435 spin_unlock(&qd_lru_lock
);
437 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
439 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
448 static void qd_unlock(struct gfs2_quota_data
*qd
)
450 gfs2_assert_warn(qd
->qd_gl
->gl_sbd
,
451 test_bit(QDF_LOCKED
, &qd
->qd_flags
));
452 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
458 static int qdsb_get(struct gfs2_sbd
*sdp
, int user
, u32 id
,
459 struct gfs2_quota_data
**qdp
)
463 error
= qd_get(sdp
, user
, id
, qdp
);
467 error
= slot_get(*qdp
);
471 error
= bh_get(*qdp
);
484 static void qdsb_put(struct gfs2_quota_data
*qd
)
491 int gfs2_quota_hold(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
493 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
494 struct gfs2_alloc
*al
= ip
->i_alloc
;
495 struct gfs2_quota_data
**qd
= al
->al_qd
;
498 if (gfs2_assert_warn(sdp
, !al
->al_qd_num
) ||
499 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
)))
502 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
505 error
= qdsb_get(sdp
, QUOTA_USER
, ip
->i_inode
.i_uid
, qd
);
511 error
= qdsb_get(sdp
, QUOTA_GROUP
, ip
->i_inode
.i_gid
, qd
);
517 if (uid
!= NO_QUOTA_CHANGE
&& uid
!= ip
->i_inode
.i_uid
) {
518 error
= qdsb_get(sdp
, QUOTA_USER
, uid
, qd
);
525 if (gid
!= NO_QUOTA_CHANGE
&& gid
!= ip
->i_inode
.i_gid
) {
526 error
= qdsb_get(sdp
, QUOTA_GROUP
, gid
, qd
);
535 gfs2_quota_unhold(ip
);
539 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
541 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
542 struct gfs2_alloc
*al
= ip
->i_alloc
;
545 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
547 for (x
= 0; x
< al
->al_qd_num
; x
++) {
548 qdsb_put(al
->al_qd
[x
]);
554 static int sort_qd(const void *a
, const void *b
)
556 const struct gfs2_quota_data
*qd_a
= *(const struct gfs2_quota_data
**)a
;
557 const struct gfs2_quota_data
*qd_b
= *(const struct gfs2_quota_data
**)b
;
559 if (!test_bit(QDF_USER
, &qd_a
->qd_flags
) !=
560 !test_bit(QDF_USER
, &qd_b
->qd_flags
)) {
561 if (test_bit(QDF_USER
, &qd_a
->qd_flags
))
566 if (qd_a
->qd_id
< qd_b
->qd_id
)
568 if (qd_a
->qd_id
> qd_b
->qd_id
)
574 static void do_qc(struct gfs2_quota_data
*qd
, s64 change
)
576 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
577 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
578 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
581 mutex_lock(&sdp
->sd_quota_mutex
);
582 gfs2_trans_add_bh(ip
->i_gl
, qd
->qd_bh
, 1);
584 if (!test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
587 if (test_bit(QDF_USER
, &qd
->qd_flags
))
588 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
589 qc
->qc_id
= cpu_to_be32(qd
->qd_id
);
592 x
= be64_to_cpu(qc
->qc_change
) + change
;
593 qc
->qc_change
= cpu_to_be64(x
);
595 spin_lock(&qd_lru_lock
);
597 spin_unlock(&qd_lru_lock
);
600 gfs2_assert_warn(sdp
, test_bit(QDF_CHANGE
, &qd
->qd_flags
));
601 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
606 } else if (!test_and_set_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
611 mutex_unlock(&sdp
->sd_quota_mutex
);
615 * gfs2_adjust_quota - adjust record of current block usage
616 * @ip: The quota inode
617 * @loc: Offset of the entry in the quota file
618 * @change: The amount of usage change to record
619 * @qd: The quota data
620 * @fdq: The updated limits to record
622 * This function was mostly borrowed from gfs2_block_truncate_page which was
623 * in turn mostly borrowed from ext3
625 * Returns: 0 or -ve on error
628 static int gfs2_adjust_quota(struct gfs2_inode
*ip
, loff_t loc
,
629 s64 change
, struct gfs2_quota_data
*qd
,
630 struct fs_disk_quota
*fdq
)
632 struct inode
*inode
= &ip
->i_inode
;
633 struct address_space
*mapping
= inode
->i_mapping
;
634 unsigned long index
= loc
>> PAGE_CACHE_SHIFT
;
635 unsigned offset
= loc
& (PAGE_CACHE_SIZE
- 1);
636 unsigned blocksize
, iblock
, pos
;
637 struct buffer_head
*bh
, *dibh
;
640 struct gfs2_quota
*qp
;
645 if (gfs2_is_stuffed(ip
))
646 gfs2_unstuff_dinode(ip
, NULL
);
648 page
= grab_cache_page(mapping
, index
);
652 blocksize
= inode
->i_sb
->s_blocksize
;
653 iblock
= index
<< (PAGE_CACHE_SHIFT
- inode
->i_sb
->s_blocksize_bits
);
655 if (!page_has_buffers(page
))
656 create_empty_buffers(page
, blocksize
, 0);
658 bh
= page_buffers(page
);
660 while (offset
>= pos
) {
661 bh
= bh
->b_this_page
;
666 if (!buffer_mapped(bh
)) {
667 gfs2_block_map(inode
, iblock
, bh
, 1);
668 if (!buffer_mapped(bh
))
672 if (PageUptodate(page
))
673 set_buffer_uptodate(bh
);
675 if (!buffer_uptodate(bh
)) {
676 ll_rw_block(READ_META
, 1, &bh
);
678 if (!buffer_uptodate(bh
))
682 gfs2_trans_add_bh(ip
->i_gl
, bh
, 0);
684 kaddr
= kmap_atomic(page
, KM_USER0
);
686 value
= (s64
)be64_to_cpu(qp
->qu_value
) + change
;
687 qp
->qu_value
= cpu_to_be64(value
);
688 qd
->qd_qb
.qb_value
= qp
->qu_value
;
690 if (fdq
->d_fieldmask
& FS_DQ_BSOFT
) {
691 qp
->qu_warn
= cpu_to_be64(fdq
->d_blk_softlimit
);
692 qd
->qd_qb
.qb_warn
= qp
->qu_warn
;
694 if (fdq
->d_fieldmask
& FS_DQ_BHARD
) {
695 qp
->qu_limit
= cpu_to_be64(fdq
->d_blk_hardlimit
);
696 qd
->qd_qb
.qb_limit
= qp
->qu_limit
;
699 flush_dcache_page(page
);
700 kunmap_atomic(kaddr
, KM_USER0
);
702 err
= gfs2_meta_inode_buffer(ip
, &dibh
);
706 size
= loc
+ sizeof(struct gfs2_quota
);
707 if (size
> inode
->i_size
) {
708 ip
->i_disksize
= size
;
709 i_size_write(inode
, size
);
711 inode
->i_mtime
= inode
->i_atime
= CURRENT_TIME
;
712 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
713 gfs2_dinode_out(ip
, dibh
->b_data
);
715 mark_inode_dirty(inode
);
719 page_cache_release(page
);
723 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
)
725 struct gfs2_sbd
*sdp
= (*qda
)->qd_gl
->gl_sbd
;
726 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
727 unsigned int data_blocks
, ind_blocks
;
728 struct gfs2_holder
*ghs
, i_gh
;
730 struct gfs2_quota_data
*qd
;
732 unsigned int nalloc
= 0, blocks
;
733 struct gfs2_alloc
*al
= NULL
;
736 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
737 &data_blocks
, &ind_blocks
);
739 ghs
= kcalloc(num_qd
, sizeof(struct gfs2_holder
), GFP_NOFS
);
743 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
744 mutex_lock_nested(&ip
->i_inode
.i_mutex
, I_MUTEX_QUOTA
);
745 for (qx
= 0; qx
< num_qd
; qx
++) {
746 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
, LM_ST_EXCLUSIVE
,
747 GL_NOCACHE
, &ghs
[qx
]);
752 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
756 for (x
= 0; x
< num_qd
; x
++) {
759 offset
= qd2offset(qda
[x
]);
760 error
= gfs2_write_alloc_required(ip
, offset
,
761 sizeof(struct gfs2_quota
),
769 al
= gfs2_alloc_get(ip
);
775 * 1 blk for unstuffing inode if stuffed. We add this extra
776 * block to the reservation unconditionally. If the inode
777 * doesn't need unstuffing, the block will be released to the
778 * rgrp since it won't be allocated during the transaction
780 al
->al_requested
= 1;
781 /* +1 in the end for block requested above for unstuffing */
782 blocks
= num_qd
* data_blocks
+ RES_DINODE
+ num_qd
+ 1;
785 al
->al_requested
+= nalloc
* (data_blocks
+ ind_blocks
);
786 error
= gfs2_inplace_reserve(ip
);
791 blocks
+= al
->al_rgd
->rd_length
+ nalloc
* ind_blocks
+ RES_STATFS
;
793 error
= gfs2_trans_begin(sdp
, blocks
, 0);
797 for (x
= 0; x
< num_qd
; x
++) {
799 offset
= qd2offset(qd
);
800 error
= gfs2_adjust_quota(ip
, offset
, qd
->qd_change_sync
, qd
, NULL
);
804 do_qc(qd
, -qd
->qd_change_sync
);
812 gfs2_inplace_release(ip
);
816 gfs2_glock_dq_uninit(&i_gh
);
819 gfs2_glock_dq_uninit(&ghs
[qx
]);
820 mutex_unlock(&ip
->i_inode
.i_mutex
);
822 gfs2_log_flush(ip
->i_gl
->gl_sbd
, ip
->i_gl
);
826 static int update_qd(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
)
828 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
830 struct gfs2_quota_lvb
*qlvb
;
834 memset(&q
, 0, sizeof(struct gfs2_quota
));
836 error
= gfs2_internal_read(ip
, NULL
, (char *)&q
, &pos
, sizeof(q
));
840 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lvb
;
841 qlvb
->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
843 qlvb
->qb_limit
= q
.qu_limit
;
844 qlvb
->qb_warn
= q
.qu_warn
;
845 qlvb
->qb_value
= q
.qu_value
;
851 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
852 struct gfs2_holder
*q_gh
)
854 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
855 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
856 struct gfs2_holder i_gh
;
860 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
864 qd
->qd_qb
= *(struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lvb
;
866 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= cpu_to_be32(GFS2_MAGIC
)) {
867 gfs2_glock_dq_uninit(q_gh
);
868 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
,
873 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &i_gh
);
877 error
= update_qd(sdp
, qd
);
881 gfs2_glock_dq_uninit(&i_gh
);
882 gfs2_glock_dq_uninit(q_gh
);
890 gfs2_glock_dq_uninit(&i_gh
);
892 gfs2_glock_dq_uninit(q_gh
);
896 int gfs2_quota_lock(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
898 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
899 struct gfs2_alloc
*al
= ip
->i_alloc
;
903 gfs2_quota_hold(ip
, uid
, gid
);
905 if (capable(CAP_SYS_RESOURCE
) ||
906 sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
909 sort(al
->al_qd
, al
->al_qd_num
, sizeof(struct gfs2_quota_data
*),
912 for (x
= 0; x
< al
->al_qd_num
; x
++) {
913 error
= do_glock(al
->al_qd
[x
], NO_FORCE
, &al
->al_qd_ghs
[x
]);
919 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
922 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
923 gfs2_quota_unhold(ip
);
929 static int need_sync(struct gfs2_quota_data
*qd
)
931 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
932 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
934 unsigned int num
, den
;
937 if (!qd
->qd_qb
.qb_limit
)
940 spin_lock(&qd_lru_lock
);
941 value
= qd
->qd_change
;
942 spin_unlock(&qd_lru_lock
);
944 spin_lock(>
->gt_spin
);
945 num
= gt
->gt_quota_scale_num
;
946 den
= gt
->gt_quota_scale_den
;
947 spin_unlock(>
->gt_spin
);
951 else if ((s64
)be64_to_cpu(qd
->qd_qb
.qb_value
) >=
952 (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
955 value
*= gfs2_jindex_size(sdp
) * num
;
956 value
= div_s64(value
, den
);
957 value
+= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
958 if (value
< (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
965 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
967 struct gfs2_alloc
*al
= ip
->i_alloc
;
968 struct gfs2_quota_data
*qda
[4];
969 unsigned int count
= 0;
972 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
975 for (x
= 0; x
< al
->al_qd_num
; x
++) {
976 struct gfs2_quota_data
*qd
;
980 sync
= need_sync(qd
);
982 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
984 if (sync
&& qd_trylock(qd
))
990 for (x
= 0; x
< count
; x
++)
995 gfs2_quota_unhold(ip
);
1000 static int print_message(struct gfs2_quota_data
*qd
, char *type
)
1002 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
1004 printk(KERN_INFO
"GFS2: fsid=%s: quota %s for %s %u\r\n",
1005 sdp
->sd_fsname
, type
,
1006 (test_bit(QDF_USER
, &qd
->qd_flags
)) ? "user" : "group",
1012 int gfs2_quota_check(struct gfs2_inode
*ip
, u32 uid
, u32 gid
)
1014 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1015 struct gfs2_alloc
*al
= ip
->i_alloc
;
1016 struct gfs2_quota_data
*qd
;
1021 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1024 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
1027 for (x
= 0; x
< al
->al_qd_num
; x
++) {
1030 if (!((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
1031 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))))
1034 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1035 spin_lock(&qd_lru_lock
);
1036 value
+= qd
->qd_change
;
1037 spin_unlock(&qd_lru_lock
);
1039 if (be64_to_cpu(qd
->qd_qb
.qb_limit
) && (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
) < value
) {
1040 print_message(qd
, "exceeded");
1043 } else if (be64_to_cpu(qd
->qd_qb
.qb_warn
) &&
1044 (s64
)be64_to_cpu(qd
->qd_qb
.qb_warn
) < value
&&
1045 time_after_eq(jiffies
, qd
->qd_last_warn
+
1047 gt_quota_warn_period
) * HZ
)) {
1048 error
= print_message(qd
, "warning");
1049 qd
->qd_last_warn
= jiffies
;
1056 void gfs2_quota_change(struct gfs2_inode
*ip
, s64 change
,
1059 struct gfs2_alloc
*al
= ip
->i_alloc
;
1060 struct gfs2_quota_data
*qd
;
1063 if (gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), change
))
1065 if (ip
->i_diskflags
& GFS2_DIF_SYSTEM
)
1068 for (x
= 0; x
< al
->al_qd_num
; x
++) {
1071 if ((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
1072 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))) {
1078 int gfs2_quota_sync(struct super_block
*sb
, int type
)
1080 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1081 struct gfs2_quota_data
**qda
;
1082 unsigned int max_qd
= gfs2_tune_get(sdp
, gt_quota_simul_sync
);
1083 unsigned int num_qd
;
1087 sdp
->sd_quota_sync_gen
++;
1089 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1097 error
= qd_fish(sdp
, qda
+ num_qd
);
1098 if (error
|| !qda
[num_qd
])
1100 if (++num_qd
== max_qd
)
1106 error
= do_sync(num_qd
, qda
);
1108 for (x
= 0; x
< num_qd
; x
++)
1109 qda
[x
]->qd_sync_gen
=
1110 sdp
->sd_quota_sync_gen
;
1112 for (x
= 0; x
< num_qd
; x
++)
1115 } while (!error
&& num_qd
== max_qd
);
1122 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, int user
, u32 id
)
1124 struct gfs2_quota_data
*qd
;
1125 struct gfs2_holder q_gh
;
1128 error
= qd_get(sdp
, user
, id
, &qd
);
1132 error
= do_glock(qd
, FORCE
, &q_gh
);
1134 gfs2_glock_dq_uninit(&q_gh
);
1140 static void gfs2_quota_change_in(struct gfs2_quota_change_host
*qc
, const void *buf
)
1142 const struct gfs2_quota_change
*str
= buf
;
1144 qc
->qc_change
= be64_to_cpu(str
->qc_change
);
1145 qc
->qc_flags
= be32_to_cpu(str
->qc_flags
);
1146 qc
->qc_id
= be32_to_cpu(str
->qc_id
);
1149 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1151 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1152 unsigned int blocks
= ip
->i_disksize
>> sdp
->sd_sb
.sb_bsize_shift
;
1153 unsigned int x
, slot
= 0;
1154 unsigned int found
= 0;
1159 if (!ip
->i_disksize
|| ip
->i_disksize
> (64 << 20) ||
1160 ip
->i_disksize
& (sdp
->sd_sb
.sb_bsize
- 1)) {
1161 gfs2_consist_inode(ip
);
1164 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1165 sdp
->sd_quota_chunks
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * PAGE_SIZE
);
1169 sdp
->sd_quota_bitmap
= kcalloc(sdp
->sd_quota_chunks
,
1170 sizeof(unsigned char *), GFP_NOFS
);
1171 if (!sdp
->sd_quota_bitmap
)
1174 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++) {
1175 sdp
->sd_quota_bitmap
[x
] = kzalloc(PAGE_SIZE
, GFP_NOFS
);
1176 if (!sdp
->sd_quota_bitmap
[x
])
1180 for (x
= 0; x
< blocks
; x
++) {
1181 struct buffer_head
*bh
;
1186 error
= gfs2_extent_map(&ip
->i_inode
, x
, &new, &dblock
, &extlen
);
1191 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1194 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
)) {
1199 for (y
= 0; y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1201 struct gfs2_quota_change_host qc
;
1202 struct gfs2_quota_data
*qd
;
1204 gfs2_quota_change_in(&qc
, bh
->b_data
+
1205 sizeof(struct gfs2_meta_header
) +
1206 y
* sizeof(struct gfs2_quota_change
));
1210 error
= qd_alloc(sdp
, (qc
.qc_flags
& GFS2_QCF_USER
),
1217 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1218 qd
->qd_change
= qc
.qc_change
;
1220 qd
->qd_slot_count
= 1;
1222 spin_lock(&qd_lru_lock
);
1223 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, slot
, 1);
1224 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1225 atomic_inc(&sdp
->sd_quota_count
);
1226 spin_unlock(&qd_lru_lock
);
1237 fs_info(sdp
, "found %u quota changes\n", found
);
1242 gfs2_quota_cleanup(sdp
);
1246 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1248 struct list_head
*head
= &sdp
->sd_quota_list
;
1249 struct gfs2_quota_data
*qd
;
1252 spin_lock(&qd_lru_lock
);
1253 while (!list_empty(head
)) {
1254 qd
= list_entry(head
->prev
, struct gfs2_quota_data
, qd_list
);
1256 if (atomic_read(&qd
->qd_count
) > 1 ||
1257 (atomic_read(&qd
->qd_count
) &&
1258 !test_bit(QDF_CHANGE
, &qd
->qd_flags
))) {
1259 list_move(&qd
->qd_list
, head
);
1260 spin_unlock(&qd_lru_lock
);
1262 spin_lock(&qd_lru_lock
);
1266 list_del(&qd
->qd_list
);
1267 /* Also remove if this qd exists in the reclaim list */
1268 if (!list_empty(&qd
->qd_reclaim
)) {
1269 list_del_init(&qd
->qd_reclaim
);
1270 atomic_dec(&qd_lru_count
);
1272 atomic_dec(&sdp
->sd_quota_count
);
1273 spin_unlock(&qd_lru_lock
);
1275 if (!atomic_read(&qd
->qd_count
)) {
1276 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1277 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1279 gfs2_assert_warn(sdp
, qd
->qd_slot_count
== 1);
1280 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1282 gfs2_glock_put(qd
->qd_gl
);
1283 kmem_cache_free(gfs2_quotad_cachep
, qd
);
1285 spin_lock(&qd_lru_lock
);
1287 spin_unlock(&qd_lru_lock
);
1289 gfs2_assert_warn(sdp
, !atomic_read(&sdp
->sd_quota_count
));
1291 if (sdp
->sd_quota_bitmap
) {
1292 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++)
1293 kfree(sdp
->sd_quota_bitmap
[x
]);
1294 kfree(sdp
->sd_quota_bitmap
);
1298 static void quotad_error(struct gfs2_sbd
*sdp
, const char *msg
, int error
)
1300 if (error
== 0 || error
== -EROFS
)
1302 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
1303 fs_err(sdp
, "gfs2_quotad: %s error %d\n", msg
, error
);
1306 static void quotad_check_timeo(struct gfs2_sbd
*sdp
, const char *msg
,
1307 int (*fxn
)(struct super_block
*sb
, int type
),
1308 unsigned long t
, unsigned long *timeo
,
1309 unsigned int *new_timeo
)
1312 int error
= fxn(sdp
->sd_vfs
, 0);
1313 quotad_error(sdp
, msg
, error
);
1314 *timeo
= gfs2_tune_get_i(&sdp
->sd_tune
, new_timeo
) * HZ
;
1320 static void quotad_check_trunc_list(struct gfs2_sbd
*sdp
)
1322 struct gfs2_inode
*ip
;
1326 spin_lock(&sdp
->sd_trunc_lock
);
1327 if (!list_empty(&sdp
->sd_trunc_list
)) {
1328 ip
= list_entry(sdp
->sd_trunc_list
.next
,
1329 struct gfs2_inode
, i_trunc_list
);
1330 list_del_init(&ip
->i_trunc_list
);
1332 spin_unlock(&sdp
->sd_trunc_lock
);
1335 gfs2_glock_finish_truncate(ip
);
1340 * gfs2_quotad - Write cached quota changes into the quota file
1341 * @sdp: Pointer to GFS2 superblock
1345 int gfs2_quotad(void *data
)
1347 struct gfs2_sbd
*sdp
= data
;
1348 struct gfs2_tune
*tune
= &sdp
->sd_tune
;
1349 unsigned long statfs_timeo
= 0;
1350 unsigned long quotad_timeo
= 0;
1351 unsigned long t
= 0;
1355 while (!kthread_should_stop()) {
1357 /* Update the master statfs file */
1358 quotad_check_timeo(sdp
, "statfs", gfs2_statfs_sync
, t
,
1359 &statfs_timeo
, &tune
->gt_statfs_quantum
);
1361 /* Update quota file */
1362 quotad_check_timeo(sdp
, "sync", gfs2_quota_sync
, t
,
1363 "ad_timeo
, &tune
->gt_quota_quantum
);
1365 /* Check for & recover partially truncated inodes */
1366 quotad_check_trunc_list(sdp
);
1368 if (freezing(current
))
1370 t
= min(quotad_timeo
, statfs_timeo
);
1372 prepare_to_wait(&sdp
->sd_quota_wait
, &wait
, TASK_INTERRUPTIBLE
);
1373 spin_lock(&sdp
->sd_trunc_lock
);
1374 empty
= list_empty(&sdp
->sd_trunc_list
);
1375 spin_unlock(&sdp
->sd_trunc_lock
);
1377 t
-= schedule_timeout(t
);
1380 finish_wait(&sdp
->sd_quota_wait
, &wait
);
1386 static int gfs2_quota_get_xstate(struct super_block
*sb
,
1387 struct fs_quota_stat
*fqs
)
1389 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1391 memset(fqs
, 0, sizeof(struct fs_quota_stat
));
1392 fqs
->qs_version
= FS_QSTAT_VERSION
;
1393 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_ON
)
1394 fqs
->qs_flags
= (XFS_QUOTA_UDQ_ENFD
| XFS_QUOTA_GDQ_ENFD
);
1395 else if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_ACCOUNT
)
1396 fqs
->qs_flags
= (XFS_QUOTA_UDQ_ACCT
| XFS_QUOTA_GDQ_ACCT
);
1397 if (sdp
->sd_quota_inode
) {
1398 fqs
->qs_uquota
.qfs_ino
= GFS2_I(sdp
->sd_quota_inode
)->i_no_addr
;
1399 fqs
->qs_uquota
.qfs_nblks
= sdp
->sd_quota_inode
->i_blocks
;
1401 fqs
->qs_uquota
.qfs_nextents
= 1; /* unsupported */
1402 fqs
->qs_gquota
= fqs
->qs_uquota
; /* its the same inode in both cases */
1403 fqs
->qs_incoredqs
= atomic_read(&qd_lru_count
);
1407 static int gfs2_xquota_get(struct super_block
*sb
, int type
, qid_t id
,
1408 struct fs_disk_quota
*fdq
)
1410 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1411 struct gfs2_quota_lvb
*qlvb
;
1412 struct gfs2_quota_data
*qd
;
1413 struct gfs2_holder q_gh
;
1416 memset(fdq
, 0, sizeof(struct fs_disk_quota
));
1418 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1419 return -ESRCH
; /* Crazy XFS error code */
1421 if (type
== USRQUOTA
)
1423 else if (type
== GRPQUOTA
)
1428 error
= qd_get(sdp
, type
, id
, &qd
);
1431 error
= do_glock(qd
, FORCE
, &q_gh
);
1435 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lvb
;
1436 fdq
->d_version
= FS_DQUOT_VERSION
;
1437 fdq
->d_flags
= (type
== QUOTA_USER
) ? XFS_USER_QUOTA
: XFS_GROUP_QUOTA
;
1439 fdq
->d_blk_hardlimit
= be64_to_cpu(qlvb
->qb_limit
);
1440 fdq
->d_blk_softlimit
= be64_to_cpu(qlvb
->qb_warn
);
1441 fdq
->d_bcount
= be64_to_cpu(qlvb
->qb_value
);
1443 gfs2_glock_dq_uninit(&q_gh
);
1449 /* GFS2 only supports a subset of the XFS fields */
1450 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
1452 static int gfs2_xquota_set(struct super_block
*sb
, int type
, qid_t id
,
1453 struct fs_disk_quota
*fdq
)
1455 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1456 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1457 struct gfs2_quota_data
*qd
;
1458 struct gfs2_holder q_gh
, i_gh
;
1459 unsigned int data_blocks
, ind_blocks
;
1460 unsigned int blocks
= 0;
1462 struct gfs2_alloc
*al
;
1466 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1467 return -ESRCH
; /* Crazy XFS error code */
1472 if (fdq
->d_flags
!= XFS_USER_QUOTA
)
1477 if (fdq
->d_flags
!= XFS_GROUP_QUOTA
)
1484 if (fdq
->d_fieldmask
& ~GFS2_FIELDMASK
)
1486 if (fdq
->d_id
!= id
)
1489 error
= qd_get(sdp
, type
, id
, &qd
);
1493 mutex_lock(&ip
->i_inode
.i_mutex
);
1494 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
, 0, &q_gh
);
1497 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1501 /* Check for existing entry, if none then alloc new blocks */
1502 error
= update_qd(sdp
, qd
);
1506 /* If nothing has changed, this is a no-op */
1507 if ((fdq
->d_fieldmask
& FS_DQ_BSOFT
) &&
1508 (fdq
->d_blk_softlimit
== be64_to_cpu(qd
->qd_qb
.qb_warn
)))
1509 fdq
->d_fieldmask
^= FS_DQ_BSOFT
;
1510 if ((fdq
->d_fieldmask
& FS_DQ_BHARD
) &&
1511 (fdq
->d_blk_hardlimit
== be64_to_cpu(qd
->qd_qb
.qb_limit
)))
1512 fdq
->d_fieldmask
^= FS_DQ_BHARD
;
1513 if (fdq
->d_fieldmask
== 0)
1516 offset
= qd2offset(qd
);
1517 error
= gfs2_write_alloc_required(ip
, offset
, sizeof(struct gfs2_quota
),
1521 if (alloc_required
) {
1522 al
= gfs2_alloc_get(ip
);
1525 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
1526 &data_blocks
, &ind_blocks
);
1527 blocks
= al
->al_requested
= 1 + data_blocks
+ ind_blocks
;
1528 error
= gfs2_inplace_reserve(ip
);
1533 error
= gfs2_trans_begin(sdp
, blocks
+ RES_DINODE
+ 1, 0);
1538 error
= gfs2_adjust_quota(ip
, offset
, 0, qd
, fdq
);
1540 gfs2_trans_end(sdp
);
1542 if (alloc_required
) {
1543 gfs2_inplace_release(ip
);
1548 gfs2_glock_dq_uninit(&i_gh
);
1550 gfs2_glock_dq_uninit(&q_gh
);
1552 mutex_unlock(&ip
->i_inode
.i_mutex
);
1557 const struct quotactl_ops gfs2_quotactl_ops
= {
1558 .quota_sync
= gfs2_quota_sync
,
1559 .get_xstate
= gfs2_quota_get_xstate
,
1560 .get_xquota
= gfs2_xquota_get
,
1561 .set_xquota
= gfs2_xquota_set
,