2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/completion.h>
44 #include <linux/buffer_head.h>
45 #include <linux/sort.h>
47 #include <linux/bio.h>
48 #include <linux/gfs2_ondisk.h>
49 #include <linux/kthread.h>
50 #include <linux/freezer.h>
51 #include <linux/quota.h>
52 #include <linux/dqblk_xfs.h>
68 struct gfs2_quota_change_host
{
70 u32 qc_flags
; /* GFS2_QCF_... */
74 static LIST_HEAD(qd_lru_list
);
75 static atomic_t qd_lru_count
= ATOMIC_INIT(0);
76 static DEFINE_SPINLOCK(qd_lru_lock
);
78 unsigned long gfs2_qd_shrink_scan(struct shrinker
*shrink
,
79 struct shrink_control
*sc
)
81 struct gfs2_quota_data
*qd
;
83 int nr_to_scan
= sc
->nr_to_scan
;
86 if (!(sc
->gfp_mask
& __GFP_FS
))
89 spin_lock(&qd_lru_lock
);
90 while (nr_to_scan
&& !list_empty(&qd_lru_list
)) {
91 qd
= list_entry(qd_lru_list
.next
,
92 struct gfs2_quota_data
, qd_reclaim
);
93 sdp
= qd
->qd_gl
->gl_sbd
;
95 /* Free from the filesystem-specific list */
96 list_del(&qd
->qd_list
);
98 gfs2_assert_warn(sdp
, !qd
->qd_change
);
99 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
100 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
102 gfs2_glock_put(qd
->qd_gl
);
103 atomic_dec(&sdp
->sd_quota_count
);
105 /* Delete it from the common reclaim list */
106 list_del_init(&qd
->qd_reclaim
);
107 atomic_dec(&qd_lru_count
);
108 spin_unlock(&qd_lru_lock
);
109 kmem_cache_free(gfs2_quotad_cachep
, qd
);
110 spin_lock(&qd_lru_lock
);
114 spin_unlock(&qd_lru_lock
);
118 unsigned long gfs2_qd_shrink_count(struct shrinker
*shrink
,
119 struct shrink_control
*sc
)
121 return vfs_pressure_ratio(atomic_read(&qd_lru_count
));
124 static u64
qd2index(struct gfs2_quota_data
*qd
)
126 struct kqid qid
= qd
->qd_id
;
127 return (2 * (u64
)from_kqid(&init_user_ns
, qid
)) +
128 ((qid
.type
== USRQUOTA
) ? 0 : 1);
131 static u64
qd2offset(struct gfs2_quota_data
*qd
)
135 offset
= qd2index(qd
);
136 offset
*= sizeof(struct gfs2_quota
);
141 static int qd_alloc(struct gfs2_sbd
*sdp
, struct kqid qid
,
142 struct gfs2_quota_data
**qdp
)
144 struct gfs2_quota_data
*qd
;
147 qd
= kmem_cache_zalloc(gfs2_quotad_cachep
, GFP_NOFS
);
151 atomic_set(&qd
->qd_count
, 1);
154 INIT_LIST_HEAD(&qd
->qd_reclaim
);
156 error
= gfs2_glock_get(sdp
, qd2index(qd
),
157 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
166 kmem_cache_free(gfs2_quotad_cachep
, qd
);
170 static int qd_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
171 struct gfs2_quota_data
**qdp
)
173 struct gfs2_quota_data
*qd
= NULL
, *new_qd
= NULL
;
180 spin_lock(&qd_lru_lock
);
181 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
182 if (qid_eq(qd
->qd_id
, qid
)) {
183 if (!atomic_read(&qd
->qd_count
) &&
184 !list_empty(&qd
->qd_reclaim
)) {
185 /* Remove it from reclaim list */
186 list_del_init(&qd
->qd_reclaim
);
187 atomic_dec(&qd_lru_count
);
189 atomic_inc(&qd
->qd_count
);
200 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
201 atomic_inc(&sdp
->sd_quota_count
);
205 spin_unlock(&qd_lru_lock
);
209 gfs2_glock_put(new_qd
->qd_gl
);
210 kmem_cache_free(gfs2_quotad_cachep
, new_qd
);
216 error
= qd_alloc(sdp
, qid
, &new_qd
);
222 static void qd_hold(struct gfs2_quota_data
*qd
)
224 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
225 gfs2_assert(sdp
, atomic_read(&qd
->qd_count
));
226 atomic_inc(&qd
->qd_count
);
229 static void qd_put(struct gfs2_quota_data
*qd
)
231 if (atomic_dec_and_lock(&qd
->qd_count
, &qd_lru_lock
)) {
232 /* Add to the reclaim list */
233 list_add_tail(&qd
->qd_reclaim
, &qd_lru_list
);
234 atomic_inc(&qd_lru_count
);
235 spin_unlock(&qd_lru_lock
);
239 static int slot_get(struct gfs2_quota_data
*qd
)
241 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
242 unsigned int c
, o
= 0, b
;
243 unsigned char byte
= 0;
245 spin_lock(&qd_lru_lock
);
247 if (qd
->qd_slot_count
++) {
248 spin_unlock(&qd_lru_lock
);
252 for (c
= 0; c
< sdp
->sd_quota_chunks
; c
++)
253 for (o
= 0; o
< PAGE_SIZE
; o
++) {
254 byte
= sdp
->sd_quota_bitmap
[c
][o
];
262 for (b
= 0; b
< 8; b
++)
263 if (!(byte
& (1 << b
)))
265 qd
->qd_slot
= c
* (8 * PAGE_SIZE
) + o
* 8 + b
;
267 if (qd
->qd_slot
>= sdp
->sd_quota_slots
)
270 sdp
->sd_quota_bitmap
[c
][o
] |= 1 << b
;
272 spin_unlock(&qd_lru_lock
);
278 spin_unlock(&qd_lru_lock
);
282 static void slot_hold(struct gfs2_quota_data
*qd
)
284 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
286 spin_lock(&qd_lru_lock
);
287 gfs2_assert(sdp
, qd
->qd_slot_count
);
289 spin_unlock(&qd_lru_lock
);
292 static void slot_put(struct gfs2_quota_data
*qd
)
294 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
296 spin_lock(&qd_lru_lock
);
297 gfs2_assert(sdp
, qd
->qd_slot_count
);
298 if (!--qd
->qd_slot_count
) {
299 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, qd
->qd_slot
, 0);
302 spin_unlock(&qd_lru_lock
);
305 static int bh_get(struct gfs2_quota_data
*qd
)
307 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
308 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
309 unsigned int block
, offset
;
310 struct buffer_head
*bh
;
312 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
314 mutex_lock(&sdp
->sd_quota_mutex
);
316 if (qd
->qd_bh_count
++) {
317 mutex_unlock(&sdp
->sd_quota_mutex
);
321 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
322 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;
324 bh_map
.b_size
= 1 << ip
->i_inode
.i_blkbits
;
325 error
= gfs2_block_map(&ip
->i_inode
, block
, &bh_map
, 0);
328 error
= gfs2_meta_read(ip
->i_gl
, bh_map
.b_blocknr
, DIO_WAIT
, &bh
);
332 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
336 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
337 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
338 offset
* sizeof(struct gfs2_quota_change
));
340 mutex_unlock(&sdp
->sd_quota_mutex
);
348 mutex_unlock(&sdp
->sd_quota_mutex
);
352 static void bh_put(struct gfs2_quota_data
*qd
)
354 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
356 mutex_lock(&sdp
->sd_quota_mutex
);
357 gfs2_assert(sdp
, qd
->qd_bh_count
);
358 if (!--qd
->qd_bh_count
) {
363 mutex_unlock(&sdp
->sd_quota_mutex
);
366 static int qd_fish(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
**qdp
)
368 struct gfs2_quota_data
*qd
= NULL
;
374 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
377 spin_lock(&qd_lru_lock
);
379 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
380 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
381 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
382 qd
->qd_sync_gen
>= sdp
->sd_quota_sync_gen
)
385 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
387 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
388 gfs2_assert_warn(sdp
, atomic_read(&qd
->qd_count
));
389 atomic_inc(&qd
->qd_count
);
390 qd
->qd_change_sync
= qd
->qd_change
;
391 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
401 spin_unlock(&qd_lru_lock
);
404 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
407 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
419 static int qd_trylock(struct gfs2_quota_data
*qd
)
421 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
423 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
426 spin_lock(&qd_lru_lock
);
428 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
429 !test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
430 spin_unlock(&qd_lru_lock
);
434 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
436 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
437 gfs2_assert_warn(sdp
, atomic_read(&qd
->qd_count
));
438 atomic_inc(&qd
->qd_count
);
439 qd
->qd_change_sync
= qd
->qd_change
;
440 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
443 spin_unlock(&qd_lru_lock
);
445 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
447 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
456 static void qd_unlock(struct gfs2_quota_data
*qd
)
458 gfs2_assert_warn(qd
->qd_gl
->gl_sbd
,
459 test_bit(QDF_LOCKED
, &qd
->qd_flags
));
460 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
466 static int qdsb_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
467 struct gfs2_quota_data
**qdp
)
471 error
= qd_get(sdp
, qid
, qdp
);
475 error
= slot_get(*qdp
);
479 error
= bh_get(*qdp
);
492 static void qdsb_put(struct gfs2_quota_data
*qd
)
499 int gfs2_quota_hold(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
501 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
502 struct gfs2_quota_data
**qd
;
505 if (ip
->i_res
== NULL
) {
506 error
= gfs2_rs_alloc(ip
);
511 qd
= ip
->i_res
->rs_qa_qd
;
513 if (gfs2_assert_warn(sdp
, !ip
->i_res
->rs_qa_qd_num
) ||
514 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
)))
517 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
520 error
= qdsb_get(sdp
, make_kqid_uid(ip
->i_inode
.i_uid
), qd
);
523 ip
->i_res
->rs_qa_qd_num
++;
526 error
= qdsb_get(sdp
, make_kqid_gid(ip
->i_inode
.i_gid
), qd
);
529 ip
->i_res
->rs_qa_qd_num
++;
532 if (!uid_eq(uid
, NO_UID_QUOTA_CHANGE
) &&
533 !uid_eq(uid
, ip
->i_inode
.i_uid
)) {
534 error
= qdsb_get(sdp
, make_kqid_uid(uid
), qd
);
537 ip
->i_res
->rs_qa_qd_num
++;
541 if (!gid_eq(gid
, NO_GID_QUOTA_CHANGE
) &&
542 !gid_eq(gid
, ip
->i_inode
.i_gid
)) {
543 error
= qdsb_get(sdp
, make_kqid_gid(gid
), qd
);
546 ip
->i_res
->rs_qa_qd_num
++;
552 gfs2_quota_unhold(ip
);
556 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
558 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
561 if (ip
->i_res
== NULL
)
563 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
565 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
566 qdsb_put(ip
->i_res
->rs_qa_qd
[x
]);
567 ip
->i_res
->rs_qa_qd
[x
] = NULL
;
569 ip
->i_res
->rs_qa_qd_num
= 0;
572 static int sort_qd(const void *a
, const void *b
)
574 const struct gfs2_quota_data
*qd_a
= *(const struct gfs2_quota_data
**)a
;
575 const struct gfs2_quota_data
*qd_b
= *(const struct gfs2_quota_data
**)b
;
577 if (qid_lt(qd_a
->qd_id
, qd_b
->qd_id
))
579 if (qid_lt(qd_b
->qd_id
, qd_a
->qd_id
))
584 static void do_qc(struct gfs2_quota_data
*qd
, s64 change
)
586 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
587 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
588 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
591 mutex_lock(&sdp
->sd_quota_mutex
);
592 gfs2_trans_add_meta(ip
->i_gl
, qd
->qd_bh
);
594 if (!test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
597 if (qd
->qd_id
.type
== USRQUOTA
)
598 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
599 qc
->qc_id
= cpu_to_be32(from_kqid(&init_user_ns
, qd
->qd_id
));
602 x
= be64_to_cpu(qc
->qc_change
) + change
;
603 qc
->qc_change
= cpu_to_be64(x
);
605 spin_lock(&qd_lru_lock
);
607 spin_unlock(&qd_lru_lock
);
610 gfs2_assert_warn(sdp
, test_bit(QDF_CHANGE
, &qd
->qd_flags
));
611 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
616 } else if (!test_and_set_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
621 mutex_unlock(&sdp
->sd_quota_mutex
);
625 * gfs2_adjust_quota - adjust record of current block usage
626 * @ip: The quota inode
627 * @loc: Offset of the entry in the quota file
628 * @change: The amount of usage change to record
629 * @qd: The quota data
630 * @fdq: The updated limits to record
632 * This function was mostly borrowed from gfs2_block_truncate_page which was
633 * in turn mostly borrowed from ext3
635 * Returns: 0 or -ve on error
638 static int gfs2_adjust_quota(struct gfs2_inode
*ip
, loff_t loc
,
639 s64 change
, struct gfs2_quota_data
*qd
,
640 struct fs_disk_quota
*fdq
)
642 struct inode
*inode
= &ip
->i_inode
;
643 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
644 struct address_space
*mapping
= inode
->i_mapping
;
645 unsigned long index
= loc
>> PAGE_CACHE_SHIFT
;
646 unsigned offset
= loc
& (PAGE_CACHE_SIZE
- 1);
647 unsigned blocksize
, iblock
, pos
;
648 struct buffer_head
*bh
;
651 struct gfs2_quota q
, *qp
;
655 if (gfs2_is_stuffed(ip
)) {
656 err
= gfs2_unstuff_dinode(ip
, NULL
);
661 memset(&q
, 0, sizeof(struct gfs2_quota
));
662 err
= gfs2_internal_read(ip
, (char *)&q
, &loc
, sizeof(q
));
668 qp
->qu_value
= be64_to_cpu(qp
->qu_value
);
669 qp
->qu_value
+= change
;
670 qp
->qu_value
= cpu_to_be64(qp
->qu_value
);
671 qd
->qd_qb
.qb_value
= qp
->qu_value
;
673 if (fdq
->d_fieldmask
& FS_DQ_BSOFT
) {
674 qp
->qu_warn
= cpu_to_be64(fdq
->d_blk_softlimit
>> sdp
->sd_fsb2bb_shift
);
675 qd
->qd_qb
.qb_warn
= qp
->qu_warn
;
677 if (fdq
->d_fieldmask
& FS_DQ_BHARD
) {
678 qp
->qu_limit
= cpu_to_be64(fdq
->d_blk_hardlimit
>> sdp
->sd_fsb2bb_shift
);
679 qd
->qd_qb
.qb_limit
= qp
->qu_limit
;
681 if (fdq
->d_fieldmask
& FS_DQ_BCOUNT
) {
682 qp
->qu_value
= cpu_to_be64(fdq
->d_bcount
>> sdp
->sd_fsb2bb_shift
);
683 qd
->qd_qb
.qb_value
= qp
->qu_value
;
687 /* Write the quota into the quota file on disk */
689 nbytes
= sizeof(struct gfs2_quota
);
691 page
= find_or_create_page(mapping
, index
, GFP_NOFS
);
695 blocksize
= inode
->i_sb
->s_blocksize
;
696 iblock
= index
<< (PAGE_CACHE_SHIFT
- inode
->i_sb
->s_blocksize_bits
);
698 if (!page_has_buffers(page
))
699 create_empty_buffers(page
, blocksize
, 0);
701 bh
= page_buffers(page
);
703 while (offset
>= pos
) {
704 bh
= bh
->b_this_page
;
709 if (!buffer_mapped(bh
)) {
710 gfs2_block_map(inode
, iblock
, bh
, 1);
711 if (!buffer_mapped(bh
))
713 /* If it's a newly allocated disk block for quota, zero it */
715 zero_user(page
, pos
- blocksize
, bh
->b_size
);
718 if (PageUptodate(page
))
719 set_buffer_uptodate(bh
);
721 if (!buffer_uptodate(bh
)) {
722 ll_rw_block(READ
| REQ_META
, 1, &bh
);
724 if (!buffer_uptodate(bh
))
728 gfs2_trans_add_data(ip
->i_gl
, bh
);
730 kaddr
= kmap_atomic(page
);
731 if (offset
+ sizeof(struct gfs2_quota
) > PAGE_CACHE_SIZE
)
732 nbytes
= PAGE_CACHE_SIZE
- offset
;
733 memcpy(kaddr
+ offset
, ptr
, nbytes
);
734 flush_dcache_page(page
);
735 kunmap_atomic(kaddr
);
737 page_cache_release(page
);
739 /* If quota straddles page boundary, we need to update the rest of the
740 * quota at the beginning of the next page */
741 if ((offset
+ sizeof(struct gfs2_quota
)) > PAGE_CACHE_SIZE
) {
743 nbytes
= sizeof(struct gfs2_quota
) - nbytes
;
749 size
= loc
+ sizeof(struct gfs2_quota
);
750 if (size
> inode
->i_size
)
751 i_size_write(inode
, size
);
752 inode
->i_mtime
= inode
->i_atime
= CURRENT_TIME
;
753 mark_inode_dirty(inode
);
758 page_cache_release(page
);
762 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
)
764 struct gfs2_sbd
*sdp
= (*qda
)->qd_gl
->gl_sbd
;
765 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
766 unsigned int data_blocks
, ind_blocks
;
767 struct gfs2_holder
*ghs
, i_gh
;
769 struct gfs2_quota_data
*qd
;
772 unsigned int nalloc
= 0, blocks
;
775 error
= gfs2_rs_alloc(ip
);
779 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
780 &data_blocks
, &ind_blocks
);
782 ghs
= kcalloc(num_qd
, sizeof(struct gfs2_holder
), GFP_NOFS
);
786 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
787 mutex_lock(&ip
->i_inode
.i_mutex
);
788 for (qx
= 0; qx
< num_qd
; qx
++) {
789 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
, LM_ST_EXCLUSIVE
,
790 GL_NOCACHE
, &ghs
[qx
]);
795 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
799 for (x
= 0; x
< num_qd
; x
++) {
800 offset
= qd2offset(qda
[x
]);
801 if (gfs2_write_alloc_required(ip
, offset
,
802 sizeof(struct gfs2_quota
)))
807 * 1 blk for unstuffing inode if stuffed. We add this extra
808 * block to the reservation unconditionally. If the inode
809 * doesn't need unstuffing, the block will be released to the
810 * rgrp since it won't be allocated during the transaction
812 /* +3 in the end for unstuffing block, inode size update block
813 * and another block in case quota straddles page boundary and
814 * two blocks need to be updated instead of 1 */
815 blocks
= num_qd
* data_blocks
+ RES_DINODE
+ num_qd
+ 3;
817 reserved
= 1 + (nalloc
* (data_blocks
+ ind_blocks
));
818 error
= gfs2_inplace_reserve(ip
, reserved
, 0);
823 blocks
+= gfs2_rg_blocks(ip
, reserved
) + nalloc
* ind_blocks
+ RES_STATFS
;
825 error
= gfs2_trans_begin(sdp
, blocks
, 0);
829 for (x
= 0; x
< num_qd
; x
++) {
831 offset
= qd2offset(qd
);
832 error
= gfs2_adjust_quota(ip
, offset
, qd
->qd_change_sync
, qd
, NULL
);
836 do_qc(qd
, -qd
->qd_change_sync
);
837 set_bit(QDF_REFRESH
, &qd
->qd_flags
);
845 gfs2_inplace_release(ip
);
847 gfs2_glock_dq_uninit(&i_gh
);
850 gfs2_glock_dq_uninit(&ghs
[qx
]);
851 mutex_unlock(&ip
->i_inode
.i_mutex
);
853 gfs2_log_flush(ip
->i_gl
->gl_sbd
, ip
->i_gl
);
857 static int update_qd(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
)
859 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
861 struct gfs2_quota_lvb
*qlvb
;
865 memset(&q
, 0, sizeof(struct gfs2_quota
));
867 error
= gfs2_internal_read(ip
, (char *)&q
, &pos
, sizeof(q
));
871 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
872 qlvb
->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
874 qlvb
->qb_limit
= q
.qu_limit
;
875 qlvb
->qb_warn
= q
.qu_warn
;
876 qlvb
->qb_value
= q
.qu_value
;
882 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
883 struct gfs2_holder
*q_gh
)
885 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
886 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
887 struct gfs2_holder i_gh
;
891 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
895 qd
->qd_qb
= *(struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
897 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= cpu_to_be32(GFS2_MAGIC
)) {
898 gfs2_glock_dq_uninit(q_gh
);
899 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
,
904 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &i_gh
);
908 error
= update_qd(sdp
, qd
);
912 gfs2_glock_dq_uninit(&i_gh
);
913 gfs2_glock_dq_uninit(q_gh
);
921 gfs2_glock_dq_uninit(&i_gh
);
923 gfs2_glock_dq_uninit(q_gh
);
927 int gfs2_quota_lock(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
929 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
930 struct gfs2_quota_data
*qd
;
934 error
= gfs2_quota_hold(ip
, uid
, gid
);
938 if (capable(CAP_SYS_RESOURCE
) ||
939 sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
942 sort(ip
->i_res
->rs_qa_qd
, ip
->i_res
->rs_qa_qd_num
,
943 sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
945 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
946 int force
= NO_FORCE
;
947 qd
= ip
->i_res
->rs_qa_qd
[x
];
948 if (test_and_clear_bit(QDF_REFRESH
, &qd
->qd_flags
))
950 error
= do_glock(qd
, force
, &ip
->i_res
->rs_qa_qd_ghs
[x
]);
956 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
959 gfs2_glock_dq_uninit(&ip
->i_res
->rs_qa_qd_ghs
[x
]);
960 gfs2_quota_unhold(ip
);
966 static int need_sync(struct gfs2_quota_data
*qd
)
968 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
969 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
971 unsigned int num
, den
;
974 if (!qd
->qd_qb
.qb_limit
)
977 spin_lock(&qd_lru_lock
);
978 value
= qd
->qd_change
;
979 spin_unlock(&qd_lru_lock
);
981 spin_lock(>
->gt_spin
);
982 num
= gt
->gt_quota_scale_num
;
983 den
= gt
->gt_quota_scale_den
;
984 spin_unlock(>
->gt_spin
);
988 else if ((s64
)be64_to_cpu(qd
->qd_qb
.qb_value
) >=
989 (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
992 value
*= gfs2_jindex_size(sdp
) * num
;
993 value
= div_s64(value
, den
);
994 value
+= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
995 if (value
< (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
1002 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
1004 struct gfs2_quota_data
*qda
[4];
1005 unsigned int count
= 0;
1008 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1011 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
1012 struct gfs2_quota_data
*qd
;
1015 qd
= ip
->i_res
->rs_qa_qd
[x
];
1016 sync
= need_sync(qd
);
1018 gfs2_glock_dq_uninit(&ip
->i_res
->rs_qa_qd_ghs
[x
]);
1020 if (sync
&& qd_trylock(qd
))
1025 do_sync(count
, qda
);
1026 for (x
= 0; x
< count
; x
++)
1031 gfs2_quota_unhold(ip
);
1034 #define MAX_LINE 256
1036 static int print_message(struct gfs2_quota_data
*qd
, char *type
)
1038 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
1040 printk(KERN_INFO
"GFS2: fsid=%s: quota %s for %s %u\n",
1041 sdp
->sd_fsname
, type
,
1042 (qd
->qd_id
.type
== USRQUOTA
) ? "user" : "group",
1043 from_kqid(&init_user_ns
, qd
->qd_id
));
1048 int gfs2_quota_check(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
1050 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1051 struct gfs2_quota_data
*qd
;
1056 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1059 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
1062 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
1063 qd
= ip
->i_res
->rs_qa_qd
[x
];
1065 if (!(qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1066 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))))
1069 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1070 spin_lock(&qd_lru_lock
);
1071 value
+= qd
->qd_change
;
1072 spin_unlock(&qd_lru_lock
);
1074 if (be64_to_cpu(qd
->qd_qb
.qb_limit
) && (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
) < value
) {
1075 print_message(qd
, "exceeded");
1076 quota_send_warning(qd
->qd_id
,
1077 sdp
->sd_vfs
->s_dev
, QUOTA_NL_BHARDWARN
);
1081 } else if (be64_to_cpu(qd
->qd_qb
.qb_warn
) &&
1082 (s64
)be64_to_cpu(qd
->qd_qb
.qb_warn
) < value
&&
1083 time_after_eq(jiffies
, qd
->qd_last_warn
+
1085 gt_quota_warn_period
) * HZ
)) {
1086 quota_send_warning(qd
->qd_id
,
1087 sdp
->sd_vfs
->s_dev
, QUOTA_NL_BSOFTWARN
);
1088 error
= print_message(qd
, "warning");
1089 qd
->qd_last_warn
= jiffies
;
1096 void gfs2_quota_change(struct gfs2_inode
*ip
, s64 change
,
1097 kuid_t uid
, kgid_t gid
)
1099 struct gfs2_quota_data
*qd
;
1102 if (gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), change
))
1104 if (ip
->i_diskflags
& GFS2_DIF_SYSTEM
)
1107 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
1108 qd
= ip
->i_res
->rs_qa_qd
[x
];
1110 if (qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1111 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))) {
1117 int gfs2_quota_sync(struct super_block
*sb
, int type
)
1119 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1120 struct gfs2_quota_data
**qda
;
1121 unsigned int max_qd
= gfs2_tune_get(sdp
, gt_quota_simul_sync
);
1122 unsigned int num_qd
;
1126 sdp
->sd_quota_sync_gen
++;
1128 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1136 error
= qd_fish(sdp
, qda
+ num_qd
);
1137 if (error
|| !qda
[num_qd
])
1139 if (++num_qd
== max_qd
)
1145 error
= do_sync(num_qd
, qda
);
1147 for (x
= 0; x
< num_qd
; x
++)
1148 qda
[x
]->qd_sync_gen
=
1149 sdp
->sd_quota_sync_gen
;
1151 for (x
= 0; x
< num_qd
; x
++)
1154 } while (!error
&& num_qd
== max_qd
);
1161 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, struct kqid qid
)
1163 struct gfs2_quota_data
*qd
;
1164 struct gfs2_holder q_gh
;
1167 error
= qd_get(sdp
, qid
, &qd
);
1171 error
= do_glock(qd
, FORCE
, &q_gh
);
1173 gfs2_glock_dq_uninit(&q_gh
);
1179 static void gfs2_quota_change_in(struct gfs2_quota_change_host
*qc
, const void *buf
)
1181 const struct gfs2_quota_change
*str
= buf
;
1183 qc
->qc_change
= be64_to_cpu(str
->qc_change
);
1184 qc
->qc_flags
= be32_to_cpu(str
->qc_flags
);
1185 qc
->qc_id
= make_kqid(&init_user_ns
,
1186 (qc
->qc_flags
& GFS2_QCF_USER
)?USRQUOTA
:GRPQUOTA
,
1187 be32_to_cpu(str
->qc_id
));
1190 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1192 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1193 u64 size
= i_size_read(sdp
->sd_qc_inode
);
1194 unsigned int blocks
= size
>> sdp
->sd_sb
.sb_bsize_shift
;
1195 unsigned int x
, slot
= 0;
1196 unsigned int found
= 0;
1201 if (gfs2_check_internal_file_size(sdp
->sd_qc_inode
, 1, 64 << 20))
1204 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1205 sdp
->sd_quota_chunks
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * PAGE_SIZE
);
1209 sdp
->sd_quota_bitmap
= kcalloc(sdp
->sd_quota_chunks
,
1210 sizeof(unsigned char *), GFP_NOFS
);
1211 if (!sdp
->sd_quota_bitmap
)
1214 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++) {
1215 sdp
->sd_quota_bitmap
[x
] = kzalloc(PAGE_SIZE
, GFP_NOFS
);
1216 if (!sdp
->sd_quota_bitmap
[x
])
1220 for (x
= 0; x
< blocks
; x
++) {
1221 struct buffer_head
*bh
;
1226 error
= gfs2_extent_map(&ip
->i_inode
, x
, &new, &dblock
, &extlen
);
1231 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1234 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
)) {
1239 for (y
= 0; y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1241 struct gfs2_quota_change_host qc
;
1242 struct gfs2_quota_data
*qd
;
1244 gfs2_quota_change_in(&qc
, bh
->b_data
+
1245 sizeof(struct gfs2_meta_header
) +
1246 y
* sizeof(struct gfs2_quota_change
));
1250 error
= qd_alloc(sdp
, qc
.qc_id
, &qd
);
1256 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1257 qd
->qd_change
= qc
.qc_change
;
1259 qd
->qd_slot_count
= 1;
1261 spin_lock(&qd_lru_lock
);
1262 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, slot
, 1);
1263 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1264 atomic_inc(&sdp
->sd_quota_count
);
1265 spin_unlock(&qd_lru_lock
);
1276 fs_info(sdp
, "found %u quota changes\n", found
);
1281 gfs2_quota_cleanup(sdp
);
1285 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1287 struct list_head
*head
= &sdp
->sd_quota_list
;
1288 struct gfs2_quota_data
*qd
;
1291 spin_lock(&qd_lru_lock
);
1292 while (!list_empty(head
)) {
1293 qd
= list_entry(head
->prev
, struct gfs2_quota_data
, qd_list
);
1295 if (atomic_read(&qd
->qd_count
) > 1 ||
1296 (atomic_read(&qd
->qd_count
) &&
1297 !test_bit(QDF_CHANGE
, &qd
->qd_flags
))) {
1298 list_move(&qd
->qd_list
, head
);
1299 spin_unlock(&qd_lru_lock
);
1301 spin_lock(&qd_lru_lock
);
1305 list_del(&qd
->qd_list
);
1306 /* Also remove if this qd exists in the reclaim list */
1307 if (!list_empty(&qd
->qd_reclaim
)) {
1308 list_del_init(&qd
->qd_reclaim
);
1309 atomic_dec(&qd_lru_count
);
1311 atomic_dec(&sdp
->sd_quota_count
);
1312 spin_unlock(&qd_lru_lock
);
1314 if (!atomic_read(&qd
->qd_count
)) {
1315 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1316 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1318 gfs2_assert_warn(sdp
, qd
->qd_slot_count
== 1);
1319 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1321 gfs2_glock_put(qd
->qd_gl
);
1322 kmem_cache_free(gfs2_quotad_cachep
, qd
);
1324 spin_lock(&qd_lru_lock
);
1326 spin_unlock(&qd_lru_lock
);
1328 gfs2_assert_warn(sdp
, !atomic_read(&sdp
->sd_quota_count
));
1330 if (sdp
->sd_quota_bitmap
) {
1331 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++)
1332 kfree(sdp
->sd_quota_bitmap
[x
]);
1333 kfree(sdp
->sd_quota_bitmap
);
1337 static void quotad_error(struct gfs2_sbd
*sdp
, const char *msg
, int error
)
1339 if (error
== 0 || error
== -EROFS
)
1341 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
1342 fs_err(sdp
, "gfs2_quotad: %s error %d\n", msg
, error
);
1345 static void quotad_check_timeo(struct gfs2_sbd
*sdp
, const char *msg
,
1346 int (*fxn
)(struct super_block
*sb
, int type
),
1347 unsigned long t
, unsigned long *timeo
,
1348 unsigned int *new_timeo
)
1351 int error
= fxn(sdp
->sd_vfs
, 0);
1352 quotad_error(sdp
, msg
, error
);
1353 *timeo
= gfs2_tune_get_i(&sdp
->sd_tune
, new_timeo
) * HZ
;
1359 static void quotad_check_trunc_list(struct gfs2_sbd
*sdp
)
1361 struct gfs2_inode
*ip
;
1365 spin_lock(&sdp
->sd_trunc_lock
);
1366 if (!list_empty(&sdp
->sd_trunc_list
)) {
1367 ip
= list_entry(sdp
->sd_trunc_list
.next
,
1368 struct gfs2_inode
, i_trunc_list
);
1369 list_del_init(&ip
->i_trunc_list
);
1371 spin_unlock(&sdp
->sd_trunc_lock
);
1374 gfs2_glock_finish_truncate(ip
);
1378 void gfs2_wake_up_statfs(struct gfs2_sbd
*sdp
) {
1379 if (!sdp
->sd_statfs_force_sync
) {
1380 sdp
->sd_statfs_force_sync
= 1;
1381 wake_up(&sdp
->sd_quota_wait
);
1387 * gfs2_quotad - Write cached quota changes into the quota file
1388 * @sdp: Pointer to GFS2 superblock
1392 int gfs2_quotad(void *data
)
1394 struct gfs2_sbd
*sdp
= data
;
1395 struct gfs2_tune
*tune
= &sdp
->sd_tune
;
1396 unsigned long statfs_timeo
= 0;
1397 unsigned long quotad_timeo
= 0;
1398 unsigned long t
= 0;
1402 while (!kthread_should_stop()) {
1404 /* Update the master statfs file */
1405 if (sdp
->sd_statfs_force_sync
) {
1406 int error
= gfs2_statfs_sync(sdp
->sd_vfs
, 0);
1407 quotad_error(sdp
, "statfs", error
);
1408 statfs_timeo
= gfs2_tune_get(sdp
, gt_statfs_quantum
) * HZ
;
1411 quotad_check_timeo(sdp
, "statfs", gfs2_statfs_sync
, t
,
1413 &tune
->gt_statfs_quantum
);
1415 /* Update quota file */
1416 quotad_check_timeo(sdp
, "sync", gfs2_quota_sync
, t
,
1417 "ad_timeo
, &tune
->gt_quota_quantum
);
1419 /* Check for & recover partially truncated inodes */
1420 quotad_check_trunc_list(sdp
);
1424 t
= min(quotad_timeo
, statfs_timeo
);
1426 prepare_to_wait(&sdp
->sd_quota_wait
, &wait
, TASK_INTERRUPTIBLE
);
1427 spin_lock(&sdp
->sd_trunc_lock
);
1428 empty
= list_empty(&sdp
->sd_trunc_list
);
1429 spin_unlock(&sdp
->sd_trunc_lock
);
1430 if (empty
&& !sdp
->sd_statfs_force_sync
)
1431 t
-= schedule_timeout(t
);
1434 finish_wait(&sdp
->sd_quota_wait
, &wait
);
1440 static int gfs2_quota_get_xstate(struct super_block
*sb
,
1441 struct fs_quota_stat
*fqs
)
1443 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1445 memset(fqs
, 0, sizeof(struct fs_quota_stat
));
1446 fqs
->qs_version
= FS_QSTAT_VERSION
;
1448 switch (sdp
->sd_args
.ar_quota
) {
1450 fqs
->qs_flags
|= (FS_QUOTA_UDQ_ENFD
| FS_QUOTA_GDQ_ENFD
);
1452 case GFS2_QUOTA_ACCOUNT
:
1453 fqs
->qs_flags
|= (FS_QUOTA_UDQ_ACCT
| FS_QUOTA_GDQ_ACCT
);
1455 case GFS2_QUOTA_OFF
:
1459 if (sdp
->sd_quota_inode
) {
1460 fqs
->qs_uquota
.qfs_ino
= GFS2_I(sdp
->sd_quota_inode
)->i_no_addr
;
1461 fqs
->qs_uquota
.qfs_nblks
= sdp
->sd_quota_inode
->i_blocks
;
1463 fqs
->qs_uquota
.qfs_nextents
= 1; /* unsupported */
1464 fqs
->qs_gquota
= fqs
->qs_uquota
; /* its the same inode in both cases */
1465 fqs
->qs_incoredqs
= atomic_read(&qd_lru_count
);
1469 static int gfs2_get_dqblk(struct super_block
*sb
, struct kqid qid
,
1470 struct fs_disk_quota
*fdq
)
1472 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1473 struct gfs2_quota_lvb
*qlvb
;
1474 struct gfs2_quota_data
*qd
;
1475 struct gfs2_holder q_gh
;
1478 memset(fdq
, 0, sizeof(struct fs_disk_quota
));
1480 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1481 return -ESRCH
; /* Crazy XFS error code */
1483 if ((qid
.type
!= USRQUOTA
) &&
1484 (qid
.type
!= GRPQUOTA
))
1487 error
= qd_get(sdp
, qid
, &qd
);
1490 error
= do_glock(qd
, FORCE
, &q_gh
);
1494 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
1495 fdq
->d_version
= FS_DQUOT_VERSION
;
1496 fdq
->d_flags
= (qid
.type
== USRQUOTA
) ? FS_USER_QUOTA
: FS_GROUP_QUOTA
;
1497 fdq
->d_id
= from_kqid_munged(current_user_ns(), qid
);
1498 fdq
->d_blk_hardlimit
= be64_to_cpu(qlvb
->qb_limit
) << sdp
->sd_fsb2bb_shift
;
1499 fdq
->d_blk_softlimit
= be64_to_cpu(qlvb
->qb_warn
) << sdp
->sd_fsb2bb_shift
;
1500 fdq
->d_bcount
= be64_to_cpu(qlvb
->qb_value
) << sdp
->sd_fsb2bb_shift
;
1502 gfs2_glock_dq_uninit(&q_gh
);
1508 /* GFS2 only supports a subset of the XFS fields */
1509 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1511 static int gfs2_set_dqblk(struct super_block
*sb
, struct kqid qid
,
1512 struct fs_disk_quota
*fdq
)
1514 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1515 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1516 struct gfs2_quota_data
*qd
;
1517 struct gfs2_holder q_gh
, i_gh
;
1518 unsigned int data_blocks
, ind_blocks
;
1519 unsigned int blocks
= 0;
1524 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1525 return -ESRCH
; /* Crazy XFS error code */
1527 if ((qid
.type
!= USRQUOTA
) &&
1528 (qid
.type
!= GRPQUOTA
))
1531 if (fdq
->d_fieldmask
& ~GFS2_FIELDMASK
)
1534 error
= qd_get(sdp
, qid
, &qd
);
1538 error
= gfs2_rs_alloc(ip
);
1542 mutex_lock(&ip
->i_inode
.i_mutex
);
1543 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
, 0, &q_gh
);
1546 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1550 /* Check for existing entry, if none then alloc new blocks */
1551 error
= update_qd(sdp
, qd
);
1555 /* If nothing has changed, this is a no-op */
1556 if ((fdq
->d_fieldmask
& FS_DQ_BSOFT
) &&
1557 ((fdq
->d_blk_softlimit
>> sdp
->sd_fsb2bb_shift
) == be64_to_cpu(qd
->qd_qb
.qb_warn
)))
1558 fdq
->d_fieldmask
^= FS_DQ_BSOFT
;
1560 if ((fdq
->d_fieldmask
& FS_DQ_BHARD
) &&
1561 ((fdq
->d_blk_hardlimit
>> sdp
->sd_fsb2bb_shift
) == be64_to_cpu(qd
->qd_qb
.qb_limit
)))
1562 fdq
->d_fieldmask
^= FS_DQ_BHARD
;
1564 if ((fdq
->d_fieldmask
& FS_DQ_BCOUNT
) &&
1565 ((fdq
->d_bcount
>> sdp
->sd_fsb2bb_shift
) == be64_to_cpu(qd
->qd_qb
.qb_value
)))
1566 fdq
->d_fieldmask
^= FS_DQ_BCOUNT
;
1568 if (fdq
->d_fieldmask
== 0)
1571 offset
= qd2offset(qd
);
1572 alloc_required
= gfs2_write_alloc_required(ip
, offset
, sizeof(struct gfs2_quota
));
1573 if (gfs2_is_stuffed(ip
))
1575 if (alloc_required
) {
1576 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
1577 &data_blocks
, &ind_blocks
);
1578 blocks
= 1 + data_blocks
+ ind_blocks
;
1579 error
= gfs2_inplace_reserve(ip
, blocks
, 0);
1582 blocks
+= gfs2_rg_blocks(ip
, blocks
);
1585 /* Some quotas span block boundaries and can update two blocks,
1586 adding an extra block to the transaction to handle such quotas */
1587 error
= gfs2_trans_begin(sdp
, blocks
+ RES_DINODE
+ 2, 0);
1592 error
= gfs2_adjust_quota(ip
, offset
, 0, qd
, fdq
);
1594 gfs2_trans_end(sdp
);
1597 gfs2_inplace_release(ip
);
1599 gfs2_glock_dq_uninit(&i_gh
);
1601 gfs2_glock_dq_uninit(&q_gh
);
1603 mutex_unlock(&ip
->i_inode
.i_mutex
);
1609 const struct quotactl_ops gfs2_quotactl_ops
= {
1610 .quota_sync
= gfs2_quota_sync
,
1611 .get_xstate
= gfs2_quota_get_xstate
,
1612 .get_dqblk
= gfs2_get_dqblk
,
1613 .set_dqblk
= gfs2_set_dqblk
,