1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the diskquota system for the LINUX operating system. QUOTA
4 * is implemented using the BSD system call interface as the means of
5 * communication with the user level. This file contains the generic routines
6 * called by the different filesystems on allocation of an inode or block.
7 * These routines take care of the administration needed to have a consistent
8 * diskquota tracking system. The ideas of both user and group quotas are based
9 * on the Melbourne quota system as used on BSD derived systems. The internal
10 * implementation is based on one of the several variants of the LINUX
11 * inode-subsystem with added complexity of the diskquota system.
13 * Author: Marco van Wieringen <mvw@planets.elm.net>
15 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
17 * Revised list management to avoid races
18 * -- Bill Hawes, <whawes@star.net>, 9/98
20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21 * As the consequence the locking was moved from dquot_decr_...(),
22 * dquot_incr_...() to calling functions.
23 * invalidate_dquots() now writes modified dquots.
24 * Serialized quota_off() and quota_on() for mount point.
25 * Fixed a few bugs in grow_dquots().
26 * Fixed deadlock in write_dquot() - we no longer account quotas on
28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
29 * add_dquot_ref() restarts after blocking
30 * Added check for bogus uid and fixed check for group in quotactl.
31 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
33 * Used struct list_head instead of own list struct
34 * Invalidation of referenced dquots is no longer possible
35 * Improved free_dquots list management
36 * Quota and i_blocks are now updated in one place to avoid races
37 * Warnings are now delayed so we won't block in critical section
38 * Write updated not to require dquot lock
39 * Jan Kara, <jack@suse.cz>, 9/2000
41 * Added dynamic quota structure allocation
42 * Jan Kara <jack@suse.cz> 12/2000
44 * Rewritten quota interface. Implemented new quota format and
45 * formats registering.
46 * Jan Kara, <jack@suse.cz>, 2001,2002
49 * Jan Kara, <jack@suse.cz>, 10/2002
51 * Added journalled quota support, fix lock inversion problems
52 * Jan Kara, <jack@suse.cz>, 2003,2004
54 * (C) Copyright 1994 - 1997 Marco van Wieringen
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
60 #include <linux/mount.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include <linux/blkdev.h>
82 #include <linux/sched/mm.h>
84 #include <linux/uaccess.h>
87 * There are five quota SMP locks:
88 * * dq_list_lock protects all lists with quotas and quota formats.
89 * * dquot->dq_dqb_lock protects data from dq_dqb
90 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
91 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
92 * dquot_transfer() can stabilize amount it transfers
93 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
94 * pointers in the inode
95 * * dq_state_lock protects modifications of quota state (on quotaon and
96 * quotaoff) and readers who care about latest values take it as well.
98 * The spinlock ordering is hence:
99 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
100 * dq_list_lock > dq_state_lock
102 * Note that some things (eg. sb pointer, type, id) doesn't change during
103 * the life of the dquot structure and so needn't to be protected by a lock
105 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
106 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
107 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
108 * inode and before dropping dquot references to avoid use of dquots after
109 * they are freed. dq_data_lock is used to serialize the pointer setting and
110 * clearing operations.
111 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
112 * inode is a quota file). Functions adding pointers from inode to dquots have
113 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
114 * have to do all pointer modifications before dropping dq_data_lock. This makes
115 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
116 * then drops all pointers to dquots from an inode.
118 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
119 * memory (or space for it is being allocated) on the first dqget(), when it is
120 * being written out, and when it is being released on the last dqput(). The
121 * allocation and release operations are serialized by the dq_lock and by
122 * checking the use count in dquot_release().
124 * Lock ordering (including related VFS locks) is the following:
125 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
128 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(dq_list_lock
);
129 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(dq_state_lock
);
130 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(dq_data_lock
);
131 EXPORT_SYMBOL(dq_data_lock
);
132 DEFINE_STATIC_SRCU(dquot_srcu
);
134 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq
);
136 void __quota_error(struct super_block
*sb
, const char *func
,
137 const char *fmt
, ...)
139 if (printk_ratelimit()) {
141 struct va_format vaf
;
148 printk(KERN_ERR
"Quota error (device %s): %s: %pV\n",
149 sb
->s_id
, func
, &vaf
);
154 EXPORT_SYMBOL(__quota_error
);
156 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
157 static char *quotatypes
[] = INITQFNAMES
;
159 static struct quota_format_type
*quota_formats
; /* List of registered formats */
160 static struct quota_module_name module_names
[] = INIT_QUOTA_MODULE_NAMES
;
162 /* SLAB cache for dquot structures */
163 static struct kmem_cache
*dquot_cachep
;
165 void register_quota_format(struct quota_format_type
*fmt
)
167 spin_lock(&dq_list_lock
);
168 fmt
->qf_next
= quota_formats
;
170 spin_unlock(&dq_list_lock
);
172 EXPORT_SYMBOL(register_quota_format
);
174 void unregister_quota_format(struct quota_format_type
*fmt
)
176 struct quota_format_type
**actqf
;
178 spin_lock(&dq_list_lock
);
179 for (actqf
= "a_formats
; *actqf
&& *actqf
!= fmt
;
180 actqf
= &(*actqf
)->qf_next
)
183 *actqf
= (*actqf
)->qf_next
;
184 spin_unlock(&dq_list_lock
);
186 EXPORT_SYMBOL(unregister_quota_format
);
188 static struct quota_format_type
*find_quota_format(int id
)
190 struct quota_format_type
*actqf
;
192 spin_lock(&dq_list_lock
);
193 for (actqf
= quota_formats
; actqf
&& actqf
->qf_fmt_id
!= id
;
194 actqf
= actqf
->qf_next
)
196 if (!actqf
|| !try_module_get(actqf
->qf_owner
)) {
199 spin_unlock(&dq_list_lock
);
201 for (qm
= 0; module_names
[qm
].qm_fmt_id
&&
202 module_names
[qm
].qm_fmt_id
!= id
; qm
++)
204 if (!module_names
[qm
].qm_fmt_id
||
205 request_module(module_names
[qm
].qm_mod_name
))
208 spin_lock(&dq_list_lock
);
209 for (actqf
= quota_formats
; actqf
&& actqf
->qf_fmt_id
!= id
;
210 actqf
= actqf
->qf_next
)
212 if (actqf
&& !try_module_get(actqf
->qf_owner
))
215 spin_unlock(&dq_list_lock
);
219 static void put_quota_format(struct quota_format_type
*fmt
)
221 module_put(fmt
->qf_owner
);
225 * Dquot List Management:
226 * The quota code uses five lists for dquot management: the inuse_list,
227 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
228 * A single dquot structure may be on some of those lists, depending on
231 * All dquots are placed to the end of inuse_list when first created, and this
232 * list is used for invalidate operation, which must look at every dquot.
234 * When the last reference of a dquot is dropped, the dquot is added to
235 * releasing_dquots. We'll then queue work item which will call
236 * synchronize_srcu() and after that perform the final cleanup of all the
237 * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
238 * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
241 * Unused and cleaned up dquots are in the free_dquots list and this list is
242 * searched whenever we need an available dquot. Dquots are removed from the
243 * list as soon as they are used again and dqstats.free_dquots gives the number
244 * of dquots on the list. When dquot is invalidated it's completely released
247 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
248 * dirtied, and this list is searched when writing dirty dquots back to
249 * quota file. Note that some filesystems do dirty dquot tracking on their
250 * own (e.g. in a journal) and thus don't use dqi_dirty_list.
252 * Dquots with a specific identity (device, type and id) are placed on
253 * one of the dquot_hash[] hash chains. The provides an efficient search
254 * mechanism to locate a specific dquot.
257 static LIST_HEAD(inuse_list
);
258 static LIST_HEAD(free_dquots
);
259 static LIST_HEAD(releasing_dquots
);
260 static unsigned int dq_hash_bits
, dq_hash_mask
;
261 static struct hlist_head
*dquot_hash
;
263 struct dqstats dqstats
;
264 EXPORT_SYMBOL(dqstats
);
266 static qsize_t
inode_get_rsv_space(struct inode
*inode
);
267 static qsize_t
__inode_get_rsv_space(struct inode
*inode
);
268 static int __dquot_initialize(struct inode
*inode
, int type
);
270 static void quota_release_workfn(struct work_struct
*work
);
271 static DECLARE_DELAYED_WORK(quota_release_work
, quota_release_workfn
);
273 static inline unsigned int
274 hashfn(const struct super_block
*sb
, struct kqid qid
)
276 unsigned int id
= from_kqid(&init_user_ns
, qid
);
280 tmp
= (((unsigned long)sb
>>L1_CACHE_SHIFT
) ^ id
) * (MAXQUOTAS
- type
);
281 return (tmp
+ (tmp
>> dq_hash_bits
)) & dq_hash_mask
;
285 * Following list functions expect dq_list_lock to be held
287 static inline void insert_dquot_hash(struct dquot
*dquot
)
289 struct hlist_head
*head
;
290 head
= dquot_hash
+ hashfn(dquot
->dq_sb
, dquot
->dq_id
);
291 hlist_add_head(&dquot
->dq_hash
, head
);
294 static inline void remove_dquot_hash(struct dquot
*dquot
)
296 hlist_del_init(&dquot
->dq_hash
);
299 static struct dquot
*find_dquot(unsigned int hashent
, struct super_block
*sb
,
304 hlist_for_each_entry(dquot
, dquot_hash
+hashent
, dq_hash
)
305 if (dquot
->dq_sb
== sb
&& qid_eq(dquot
->dq_id
, qid
))
311 /* Add a dquot to the tail of the free list */
312 static inline void put_dquot_last(struct dquot
*dquot
)
314 list_add_tail(&dquot
->dq_free
, &free_dquots
);
315 dqstats_inc(DQST_FREE_DQUOTS
);
318 static inline void put_releasing_dquots(struct dquot
*dquot
)
320 list_add_tail(&dquot
->dq_free
, &releasing_dquots
);
321 set_bit(DQ_RELEASING_B
, &dquot
->dq_flags
);
324 static inline void remove_free_dquot(struct dquot
*dquot
)
326 if (list_empty(&dquot
->dq_free
))
328 list_del_init(&dquot
->dq_free
);
329 if (!test_bit(DQ_RELEASING_B
, &dquot
->dq_flags
))
330 dqstats_dec(DQST_FREE_DQUOTS
);
332 clear_bit(DQ_RELEASING_B
, &dquot
->dq_flags
);
335 static inline void put_inuse(struct dquot
*dquot
)
337 /* We add to the back of inuse list so we don't have to restart
338 * when traversing this list and we block */
339 list_add_tail(&dquot
->dq_inuse
, &inuse_list
);
340 dqstats_inc(DQST_ALLOC_DQUOTS
);
343 static inline void remove_inuse(struct dquot
*dquot
)
345 dqstats_dec(DQST_ALLOC_DQUOTS
);
346 list_del(&dquot
->dq_inuse
);
349 * End of list functions needing dq_list_lock
352 static void wait_on_dquot(struct dquot
*dquot
)
354 mutex_lock(&dquot
->dq_lock
);
355 mutex_unlock(&dquot
->dq_lock
);
358 static inline int dquot_active(struct dquot
*dquot
)
360 return test_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
363 static inline int dquot_dirty(struct dquot
*dquot
)
365 return test_bit(DQ_MOD_B
, &dquot
->dq_flags
);
368 static inline int mark_dquot_dirty(struct dquot
*dquot
)
370 return dquot
->dq_sb
->dq_op
->mark_dirty(dquot
);
373 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
374 int dquot_mark_dquot_dirty(struct dquot
*dquot
)
378 if (!dquot_active(dquot
))
381 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NOLIST_DIRTY
)
382 return test_and_set_bit(DQ_MOD_B
, &dquot
->dq_flags
);
384 /* If quota is dirty already, we don't have to acquire dq_list_lock */
385 if (dquot_dirty(dquot
))
388 spin_lock(&dq_list_lock
);
389 if (!test_and_set_bit(DQ_MOD_B
, &dquot
->dq_flags
)) {
390 list_add(&dquot
->dq_dirty
, &sb_dqopt(dquot
->dq_sb
)->
391 info
[dquot
->dq_id
.type
].dqi_dirty_list
);
394 spin_unlock(&dq_list_lock
);
397 EXPORT_SYMBOL(dquot_mark_dquot_dirty
);
399 /* Dirtify all the dquots - this can block when journalling */
400 static inline int mark_all_dquot_dirty(struct dquot __rcu
* const *dquots
)
406 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
407 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
409 /* Even in case of error we have to continue */
410 ret
= mark_dquot_dirty(dquot
);
417 static inline void dqput_all(struct dquot
**dquot
)
421 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
425 static inline int clear_dquot_dirty(struct dquot
*dquot
)
427 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NOLIST_DIRTY
)
428 return test_and_clear_bit(DQ_MOD_B
, &dquot
->dq_flags
);
430 spin_lock(&dq_list_lock
);
431 if (!test_and_clear_bit(DQ_MOD_B
, &dquot
->dq_flags
)) {
432 spin_unlock(&dq_list_lock
);
435 list_del_init(&dquot
->dq_dirty
);
436 spin_unlock(&dq_list_lock
);
440 void mark_info_dirty(struct super_block
*sb
, int type
)
442 spin_lock(&dq_data_lock
);
443 sb_dqopt(sb
)->info
[type
].dqi_flags
|= DQF_INFO_DIRTY
;
444 spin_unlock(&dq_data_lock
);
446 EXPORT_SYMBOL(mark_info_dirty
);
449 * Read dquot from disk and alloc space for it
452 int dquot_acquire(struct dquot
*dquot
)
454 int ret
= 0, ret2
= 0;
455 unsigned int memalloc
;
456 struct quota_info
*dqopt
= sb_dqopt(dquot
->dq_sb
);
458 mutex_lock(&dquot
->dq_lock
);
459 memalloc
= memalloc_nofs_save();
460 if (!test_bit(DQ_READ_B
, &dquot
->dq_flags
)) {
461 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->read_dqblk(dquot
);
465 /* Make sure flags update is visible after dquot has been filled */
466 smp_mb__before_atomic();
467 set_bit(DQ_READ_B
, &dquot
->dq_flags
);
468 /* Instantiate dquot if needed */
469 if (!dquot_active(dquot
) && !dquot
->dq_off
) {
470 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->commit_dqblk(dquot
);
471 /* Write the info if needed */
472 if (info_dirty(&dqopt
->info
[dquot
->dq_id
.type
])) {
473 ret2
= dqopt
->ops
[dquot
->dq_id
.type
]->write_file_info(
474 dquot
->dq_sb
, dquot
->dq_id
.type
);
484 * Make sure flags update is visible after on-disk struct has been
485 * allocated. Paired with smp_rmb() in dqget().
487 smp_mb__before_atomic();
488 set_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
490 memalloc_nofs_restore(memalloc
);
491 mutex_unlock(&dquot
->dq_lock
);
494 EXPORT_SYMBOL(dquot_acquire
);
497 * Write dquot to disk
499 int dquot_commit(struct dquot
*dquot
)
502 unsigned int memalloc
;
503 struct quota_info
*dqopt
= sb_dqopt(dquot
->dq_sb
);
505 mutex_lock(&dquot
->dq_lock
);
506 memalloc
= memalloc_nofs_save();
507 if (!clear_dquot_dirty(dquot
))
509 /* Inactive dquot can be only if there was error during read/init
510 * => we have better not writing it */
511 if (dquot_active(dquot
))
512 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->commit_dqblk(dquot
);
516 memalloc_nofs_restore(memalloc
);
517 mutex_unlock(&dquot
->dq_lock
);
520 EXPORT_SYMBOL(dquot_commit
);
525 int dquot_release(struct dquot
*dquot
)
527 int ret
= 0, ret2
= 0;
528 unsigned int memalloc
;
529 struct quota_info
*dqopt
= sb_dqopt(dquot
->dq_sb
);
531 mutex_lock(&dquot
->dq_lock
);
532 memalloc
= memalloc_nofs_save();
533 /* Check whether we are not racing with some other dqget() */
534 if (dquot_is_busy(dquot
))
536 if (dqopt
->ops
[dquot
->dq_id
.type
]->release_dqblk
) {
537 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->release_dqblk(dquot
);
539 if (info_dirty(&dqopt
->info
[dquot
->dq_id
.type
])) {
540 ret2
= dqopt
->ops
[dquot
->dq_id
.type
]->write_file_info(
541 dquot
->dq_sb
, dquot
->dq_id
.type
);
546 clear_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
548 memalloc_nofs_restore(memalloc
);
549 mutex_unlock(&dquot
->dq_lock
);
552 EXPORT_SYMBOL(dquot_release
);
554 void dquot_destroy(struct dquot
*dquot
)
556 kmem_cache_free(dquot_cachep
, dquot
);
558 EXPORT_SYMBOL(dquot_destroy
);
560 static inline void do_destroy_dquot(struct dquot
*dquot
)
562 dquot
->dq_sb
->dq_op
->destroy_dquot(dquot
);
565 /* Invalidate all dquots on the list. Note that this function is called after
566 * quota is disabled and pointers from inodes removed so there cannot be new
567 * quota users. There can still be some users of quotas due to inodes being
568 * just deleted or pruned by prune_icache() (those are not attached to any
569 * list) or parallel quotactl call. We have to wait for such users.
571 static void invalidate_dquots(struct super_block
*sb
, int type
)
573 struct dquot
*dquot
, *tmp
;
576 flush_delayed_work("a_release_work
);
578 spin_lock(&dq_list_lock
);
579 list_for_each_entry_safe(dquot
, tmp
, &inuse_list
, dq_inuse
) {
580 if (dquot
->dq_sb
!= sb
)
582 if (dquot
->dq_id
.type
!= type
)
584 /* Wait for dquot users */
585 if (atomic_read(&dquot
->dq_count
)) {
586 atomic_inc(&dquot
->dq_count
);
587 spin_unlock(&dq_list_lock
);
589 * Once dqput() wakes us up, we know it's time to free
591 * IMPORTANT: we rely on the fact that there is always
592 * at most one process waiting for dquot to free.
593 * Otherwise dq_count would be > 1 and we would never
596 wait_event(dquot_ref_wq
,
597 atomic_read(&dquot
->dq_count
) == 1);
599 /* At this moment dquot() need not exist (it could be
600 * reclaimed by prune_dqcache(). Hence we must
605 * The last user already dropped its reference but dquot didn't
606 * get fully cleaned up yet. Restart the scan which flushes the
607 * work cleaning up released dquots.
609 if (test_bit(DQ_RELEASING_B
, &dquot
->dq_flags
)) {
610 spin_unlock(&dq_list_lock
);
614 * Quota now has no users and it has been written on last
617 remove_dquot_hash(dquot
);
618 remove_free_dquot(dquot
);
620 do_destroy_dquot(dquot
);
622 spin_unlock(&dq_list_lock
);
625 /* Call callback for every active dquot on given filesystem */
626 int dquot_scan_active(struct super_block
*sb
,
627 int (*fn
)(struct dquot
*dquot
, unsigned long priv
),
630 struct dquot
*dquot
, *old_dquot
= NULL
;
633 WARN_ON_ONCE(!rwsem_is_locked(&sb
->s_umount
));
635 spin_lock(&dq_list_lock
);
636 list_for_each_entry(dquot
, &inuse_list
, dq_inuse
) {
637 if (!dquot_active(dquot
))
639 if (dquot
->dq_sb
!= sb
)
641 /* Now we have active dquot so we can just increase use count */
642 atomic_inc(&dquot
->dq_count
);
643 spin_unlock(&dq_list_lock
);
647 * ->release_dquot() can be racing with us. Our reference
648 * protects us from new calls to it so just wait for any
649 * outstanding call and recheck the DQ_ACTIVE_B after that.
651 wait_on_dquot(dquot
);
652 if (dquot_active(dquot
)) {
653 ret
= fn(dquot
, priv
);
657 spin_lock(&dq_list_lock
);
658 /* We are safe to continue now because our dquot could not
659 * be moved out of the inuse list while we hold the reference */
661 spin_unlock(&dq_list_lock
);
666 EXPORT_SYMBOL(dquot_scan_active
);
668 static inline int dquot_write_dquot(struct dquot
*dquot
)
670 int ret
= dquot
->dq_sb
->dq_op
->write_dquot(dquot
);
672 quota_error(dquot
->dq_sb
, "Can't write quota structure "
673 "(error %d). Quota may get out of sync!", ret
);
674 /* Clear dirty bit anyway to avoid infinite loop. */
675 clear_dquot_dirty(dquot
);
680 /* Write all dquot structures to quota files */
681 int dquot_writeback_dquots(struct super_block
*sb
, int type
)
683 struct list_head dirty
;
685 struct quota_info
*dqopt
= sb_dqopt(sb
);
689 WARN_ON_ONCE(!rwsem_is_locked(&sb
->s_umount
));
691 flush_delayed_work("a_release_work
);
693 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
694 if (type
!= -1 && cnt
!= type
)
696 if (!sb_has_quota_active(sb
, cnt
))
698 spin_lock(&dq_list_lock
);
699 /* Move list away to avoid livelock. */
700 list_replace_init(&dqopt
->info
[cnt
].dqi_dirty_list
, &dirty
);
701 while (!list_empty(&dirty
)) {
702 dquot
= list_first_entry(&dirty
, struct dquot
,
705 WARN_ON(!dquot_active(dquot
));
706 /* If the dquot is releasing we should not touch it */
707 if (test_bit(DQ_RELEASING_B
, &dquot
->dq_flags
)) {
708 spin_unlock(&dq_list_lock
);
709 flush_delayed_work("a_release_work
);
710 spin_lock(&dq_list_lock
);
714 /* Now we have active dquot from which someone is
715 * holding reference so we can safely just increase
718 spin_unlock(&dq_list_lock
);
719 err
= dquot_write_dquot(dquot
);
723 spin_lock(&dq_list_lock
);
725 spin_unlock(&dq_list_lock
);
728 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
729 if ((cnt
== type
|| type
== -1) && sb_has_quota_active(sb
, cnt
)
730 && info_dirty(&dqopt
->info
[cnt
]))
731 sb
->dq_op
->write_info(sb
, cnt
);
732 dqstats_inc(DQST_SYNCS
);
736 EXPORT_SYMBOL(dquot_writeback_dquots
);
738 /* Write all dquot structures to disk and make them visible from userspace */
739 int dquot_quota_sync(struct super_block
*sb
, int type
)
741 struct quota_info
*dqopt
= sb_dqopt(sb
);
745 ret
= dquot_writeback_dquots(sb
, type
);
748 if (dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)
751 /* This is not very clever (and fast) but currently I don't know about
752 * any other simple way of getting quota data to disk and we must get
753 * them there for userspace to be visible... */
754 if (sb
->s_op
->sync_fs
) {
755 ret
= sb
->s_op
->sync_fs(sb
, 1);
759 ret
= sync_blockdev(sb
->s_bdev
);
764 * Now when everything is written we can discard the pagecache so
765 * that userspace sees the changes.
767 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
768 if (type
!= -1 && cnt
!= type
)
770 if (!sb_has_quota_active(sb
, cnt
))
772 inode_lock(dqopt
->files
[cnt
]);
773 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
774 inode_unlock(dqopt
->files
[cnt
]);
779 EXPORT_SYMBOL(dquot_quota_sync
);
782 dqcache_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
785 unsigned long freed
= 0;
787 spin_lock(&dq_list_lock
);
788 while (!list_empty(&free_dquots
) && sc
->nr_to_scan
) {
789 dquot
= list_first_entry(&free_dquots
, struct dquot
, dq_free
);
790 remove_dquot_hash(dquot
);
791 remove_free_dquot(dquot
);
793 do_destroy_dquot(dquot
);
797 spin_unlock(&dq_list_lock
);
802 dqcache_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
804 return vfs_pressure_ratio(
805 percpu_counter_read_positive(&dqstats
.counter
[DQST_FREE_DQUOTS
]));
809 * Safely release dquot and put reference to dquot.
811 static void quota_release_workfn(struct work_struct
*work
)
814 struct list_head rls_head
;
816 spin_lock(&dq_list_lock
);
817 /* Exchange the list head to avoid livelock. */
818 list_replace_init(&releasing_dquots
, &rls_head
);
819 spin_unlock(&dq_list_lock
);
820 synchronize_srcu(&dquot_srcu
);
823 spin_lock(&dq_list_lock
);
824 while (!list_empty(&rls_head
)) {
825 dquot
= list_first_entry(&rls_head
, struct dquot
, dq_free
);
826 WARN_ON_ONCE(atomic_read(&dquot
->dq_count
));
828 * Note that DQ_RELEASING_B protects us from racing with
829 * invalidate_dquots() calls so we are safe to work with the
830 * dquot even after we drop dq_list_lock.
832 if (dquot_dirty(dquot
)) {
833 spin_unlock(&dq_list_lock
);
834 /* Commit dquot before releasing */
835 dquot_write_dquot(dquot
);
838 if (dquot_active(dquot
)) {
839 spin_unlock(&dq_list_lock
);
840 dquot
->dq_sb
->dq_op
->release_dquot(dquot
);
843 /* Dquot is inactive and clean, now move it to free list */
844 remove_free_dquot(dquot
);
845 put_dquot_last(dquot
);
847 spin_unlock(&dq_list_lock
);
851 * Put reference to dquot
853 void dqput(struct dquot
*dquot
)
857 #ifdef CONFIG_QUOTA_DEBUG
858 if (!atomic_read(&dquot
->dq_count
)) {
859 quota_error(dquot
->dq_sb
, "trying to free free dquot of %s %d",
860 quotatypes
[dquot
->dq_id
.type
],
861 from_kqid(&init_user_ns
, dquot
->dq_id
));
865 dqstats_inc(DQST_DROPS
);
867 spin_lock(&dq_list_lock
);
868 if (atomic_read(&dquot
->dq_count
) > 1) {
869 /* We have more than one user... nothing to do */
870 atomic_dec(&dquot
->dq_count
);
871 /* Releasing dquot during quotaoff phase? */
872 if (!sb_has_quota_active(dquot
->dq_sb
, dquot
->dq_id
.type
) &&
873 atomic_read(&dquot
->dq_count
) == 1)
874 wake_up(&dquot_ref_wq
);
875 spin_unlock(&dq_list_lock
);
879 /* Need to release dquot? */
880 WARN_ON_ONCE(!list_empty(&dquot
->dq_free
));
881 put_releasing_dquots(dquot
);
882 atomic_dec(&dquot
->dq_count
);
883 spin_unlock(&dq_list_lock
);
884 queue_delayed_work(system_unbound_wq
, "a_release_work
, 1);
886 EXPORT_SYMBOL(dqput
);
888 struct dquot
*dquot_alloc(struct super_block
*sb
, int type
)
890 return kmem_cache_zalloc(dquot_cachep
, GFP_NOFS
);
892 EXPORT_SYMBOL(dquot_alloc
);
894 static struct dquot
*get_empty_dquot(struct super_block
*sb
, int type
)
898 dquot
= sb
->dq_op
->alloc_dquot(sb
, type
);
902 mutex_init(&dquot
->dq_lock
);
903 INIT_LIST_HEAD(&dquot
->dq_free
);
904 INIT_LIST_HEAD(&dquot
->dq_inuse
);
905 INIT_HLIST_NODE(&dquot
->dq_hash
);
906 INIT_LIST_HEAD(&dquot
->dq_dirty
);
908 dquot
->dq_id
= make_kqid_invalid(type
);
909 atomic_set(&dquot
->dq_count
, 1);
910 spin_lock_init(&dquot
->dq_dqb_lock
);
916 * Get reference to dquot
918 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
919 * destroying our dquot by:
920 * a) checking for quota flags under dq_list_lock and
921 * b) getting a reference to dquot before we release dq_list_lock
923 struct dquot
*dqget(struct super_block
*sb
, struct kqid qid
)
925 unsigned int hashent
= hashfn(sb
, qid
);
926 struct dquot
*dquot
, *empty
= NULL
;
928 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
929 return ERR_PTR(-EINVAL
);
931 if (!sb_has_quota_active(sb
, qid
.type
))
932 return ERR_PTR(-ESRCH
);
934 spin_lock(&dq_list_lock
);
935 spin_lock(&dq_state_lock
);
936 if (!sb_has_quota_active(sb
, qid
.type
)) {
937 spin_unlock(&dq_state_lock
);
938 spin_unlock(&dq_list_lock
);
939 dquot
= ERR_PTR(-ESRCH
);
942 spin_unlock(&dq_state_lock
);
944 dquot
= find_dquot(hashent
, sb
, qid
);
947 spin_unlock(&dq_list_lock
);
948 empty
= get_empty_dquot(sb
, qid
.type
);
950 schedule(); /* Try to wait for a moment... */
956 /* all dquots go on the inuse_list */
958 /* hash it first so it can be found */
959 insert_dquot_hash(dquot
);
960 spin_unlock(&dq_list_lock
);
961 dqstats_inc(DQST_LOOKUPS
);
963 if (!atomic_read(&dquot
->dq_count
))
964 remove_free_dquot(dquot
);
965 atomic_inc(&dquot
->dq_count
);
966 spin_unlock(&dq_list_lock
);
967 dqstats_inc(DQST_CACHE_HITS
);
968 dqstats_inc(DQST_LOOKUPS
);
970 /* Wait for dq_lock - after this we know that either dquot_release() is
971 * already finished or it will be canceled due to dq_count > 0 test */
972 wait_on_dquot(dquot
);
973 /* Read the dquot / allocate space in quota file */
974 if (!dquot_active(dquot
)) {
977 err
= sb
->dq_op
->acquire_dquot(dquot
);
980 dquot
= ERR_PTR(err
);
985 * Make sure following reads see filled structure - paired with
986 * smp_mb__before_atomic() in dquot_acquire().
989 /* Has somebody invalidated entry under us? */
990 WARN_ON_ONCE(hlist_unhashed(&dquot
->dq_hash
));
993 do_destroy_dquot(empty
);
997 EXPORT_SYMBOL(dqget
);
999 static inline struct dquot __rcu
**i_dquot(struct inode
*inode
)
1001 return inode
->i_sb
->s_op
->get_dquots(inode
);
1004 static int dqinit_needed(struct inode
*inode
, int type
)
1006 struct dquot __rcu
* const *dquots
;
1009 if (IS_NOQUOTA(inode
))
1012 dquots
= i_dquot(inode
);
1014 return !dquots
[type
];
1015 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
1021 /* This routine is guarded by s_umount semaphore */
1022 static int add_dquot_ref(struct super_block
*sb
, int type
)
1024 struct inode
*inode
, *old_inode
= NULL
;
1025 #ifdef CONFIG_QUOTA_DEBUG
1030 spin_lock(&sb
->s_inode_list_lock
);
1031 list_for_each_entry(inode
, &sb
->s_inodes
, i_sb_list
) {
1032 spin_lock(&inode
->i_lock
);
1033 if ((inode
->i_state
& (I_FREEING
|I_WILL_FREE
|I_NEW
)) ||
1034 !atomic_read(&inode
->i_writecount
) ||
1035 !dqinit_needed(inode
, type
)) {
1036 spin_unlock(&inode
->i_lock
);
1040 spin_unlock(&inode
->i_lock
);
1041 spin_unlock(&sb
->s_inode_list_lock
);
1043 #ifdef CONFIG_QUOTA_DEBUG
1044 if (unlikely(inode_get_rsv_space(inode
) > 0))
1048 err
= __dquot_initialize(inode
, type
);
1055 * We hold a reference to 'inode' so it couldn't have been
1056 * removed from s_inodes list while we dropped the
1057 * s_inode_list_lock. We cannot iput the inode now as we can be
1058 * holding the last reference and we cannot iput it under
1059 * s_inode_list_lock. So we keep the reference and iput it
1064 spin_lock(&sb
->s_inode_list_lock
);
1066 spin_unlock(&sb
->s_inode_list_lock
);
1069 #ifdef CONFIG_QUOTA_DEBUG
1071 quota_error(sb
, "Writes happened before quota was turned on "
1072 "thus quota information is probably inconsistent. "
1073 "Please run quotacheck(8)");
1079 static void remove_dquot_ref(struct super_block
*sb
, int type
)
1081 struct inode
*inode
;
1082 #ifdef CONFIG_QUOTA_DEBUG
1086 spin_lock(&sb
->s_inode_list_lock
);
1087 list_for_each_entry(inode
, &sb
->s_inodes
, i_sb_list
) {
1089 * We have to scan also I_NEW inodes because they can already
1090 * have quota pointer initialized. Luckily, we need to touch
1091 * only quota pointers and these have separate locking
1094 spin_lock(&dq_data_lock
);
1095 if (!IS_NOQUOTA(inode
)) {
1096 struct dquot __rcu
**dquots
= i_dquot(inode
);
1097 struct dquot
*dquot
= srcu_dereference_check(
1098 dquots
[type
], &dquot_srcu
,
1099 lockdep_is_held(&dq_data_lock
));
1101 #ifdef CONFIG_QUOTA_DEBUG
1102 if (unlikely(inode_get_rsv_space(inode
) > 0))
1105 rcu_assign_pointer(dquots
[type
], NULL
);
1109 spin_unlock(&dq_data_lock
);
1111 spin_unlock(&sb
->s_inode_list_lock
);
1112 #ifdef CONFIG_QUOTA_DEBUG
1114 printk(KERN_WARNING
"VFS (%s): Writes happened after quota"
1115 " was disabled thus quota information is probably "
1116 "inconsistent. Please run quotacheck(8).\n", sb
->s_id
);
1121 /* Gather all references from inodes and drop them */
1122 static void drop_dquot_ref(struct super_block
*sb
, int type
)
1125 remove_dquot_ref(sb
, type
);
1129 void dquot_free_reserved_space(struct dquot
*dquot
, qsize_t number
)
1131 if (dquot
->dq_dqb
.dqb_rsvspace
>= number
)
1132 dquot
->dq_dqb
.dqb_rsvspace
-= number
;
1135 dquot
->dq_dqb
.dqb_rsvspace
= 0;
1137 if (dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
<=
1138 dquot
->dq_dqb
.dqb_bsoftlimit
)
1139 dquot
->dq_dqb
.dqb_btime
= (time64_t
) 0;
1140 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
1143 static void dquot_decr_inodes(struct dquot
*dquot
, qsize_t number
)
1145 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NEGATIVE_USAGE
||
1146 dquot
->dq_dqb
.dqb_curinodes
>= number
)
1147 dquot
->dq_dqb
.dqb_curinodes
-= number
;
1149 dquot
->dq_dqb
.dqb_curinodes
= 0;
1150 if (dquot
->dq_dqb
.dqb_curinodes
<= dquot
->dq_dqb
.dqb_isoftlimit
)
1151 dquot
->dq_dqb
.dqb_itime
= (time64_t
) 0;
1152 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
1155 static void dquot_decr_space(struct dquot
*dquot
, qsize_t number
)
1157 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NEGATIVE_USAGE
||
1158 dquot
->dq_dqb
.dqb_curspace
>= number
)
1159 dquot
->dq_dqb
.dqb_curspace
-= number
;
1161 dquot
->dq_dqb
.dqb_curspace
= 0;
1162 if (dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
<=
1163 dquot
->dq_dqb
.dqb_bsoftlimit
)
1164 dquot
->dq_dqb
.dqb_btime
= (time64_t
) 0;
1165 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
1169 struct super_block
*w_sb
;
1170 struct kqid w_dq_id
;
1174 static int warning_issued(struct dquot
*dquot
, const int warntype
)
1176 int flag
= (warntype
== QUOTA_NL_BHARDWARN
||
1177 warntype
== QUOTA_NL_BSOFTLONGWARN
) ? DQ_BLKS_B
:
1178 ((warntype
== QUOTA_NL_IHARDWARN
||
1179 warntype
== QUOTA_NL_ISOFTLONGWARN
) ? DQ_INODES_B
: 0);
1183 return test_and_set_bit(flag
, &dquot
->dq_flags
);
1186 #ifdef CONFIG_PRINT_QUOTA_WARNING
1187 static int flag_print_warnings
= 1;
1189 static int need_print_warning(struct dquot_warn
*warn
)
1191 if (!flag_print_warnings
)
1194 switch (warn
->w_dq_id
.type
) {
1196 return uid_eq(current_fsuid(), warn
->w_dq_id
.uid
);
1198 return in_group_p(warn
->w_dq_id
.gid
);
1205 /* Print warning to user which exceeded quota */
1206 static void print_warning(struct dquot_warn
*warn
)
1209 struct tty_struct
*tty
;
1210 int warntype
= warn
->w_type
;
1212 if (warntype
== QUOTA_NL_IHARDBELOW
||
1213 warntype
== QUOTA_NL_ISOFTBELOW
||
1214 warntype
== QUOTA_NL_BHARDBELOW
||
1215 warntype
== QUOTA_NL_BSOFTBELOW
|| !need_print_warning(warn
))
1218 tty
= get_current_tty();
1221 tty_write_message(tty
, warn
->w_sb
->s_id
);
1222 if (warntype
== QUOTA_NL_ISOFTWARN
|| warntype
== QUOTA_NL_BSOFTWARN
)
1223 tty_write_message(tty
, ": warning, ");
1225 tty_write_message(tty
, ": write failed, ");
1226 tty_write_message(tty
, quotatypes
[warn
->w_dq_id
.type
]);
1228 case QUOTA_NL_IHARDWARN
:
1229 msg
= " file limit reached.\r\n";
1231 case QUOTA_NL_ISOFTLONGWARN
:
1232 msg
= " file quota exceeded too long.\r\n";
1234 case QUOTA_NL_ISOFTWARN
:
1235 msg
= " file quota exceeded.\r\n";
1237 case QUOTA_NL_BHARDWARN
:
1238 msg
= " block limit reached.\r\n";
1240 case QUOTA_NL_BSOFTLONGWARN
:
1241 msg
= " block quota exceeded too long.\r\n";
1243 case QUOTA_NL_BSOFTWARN
:
1244 msg
= " block quota exceeded.\r\n";
1247 tty_write_message(tty
, msg
);
1252 static void prepare_warning(struct dquot_warn
*warn
, struct dquot
*dquot
,
1255 if (warning_issued(dquot
, warntype
))
1257 warn
->w_type
= warntype
;
1258 warn
->w_sb
= dquot
->dq_sb
;
1259 warn
->w_dq_id
= dquot
->dq_id
;
1263 * Write warnings to the console and send warning messages over netlink.
1265 * Note that this function can call into tty and networking code.
1267 static void flush_warnings(struct dquot_warn
*warn
)
1271 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1272 if (warn
[i
].w_type
== QUOTA_NL_NOWARN
)
1274 #ifdef CONFIG_PRINT_QUOTA_WARNING
1275 print_warning(&warn
[i
]);
1277 quota_send_warning(warn
[i
].w_dq_id
,
1278 warn
[i
].w_sb
->s_dev
, warn
[i
].w_type
);
1282 static int ignore_hardlimit(struct dquot
*dquot
)
1284 struct mem_dqinfo
*info
= &sb_dqopt(dquot
->dq_sb
)->info
[dquot
->dq_id
.type
];
1286 return capable(CAP_SYS_RESOURCE
) &&
1287 (info
->dqi_format
->qf_fmt_id
!= QFMT_VFS_OLD
||
1288 !(info
->dqi_flags
& DQF_ROOT_SQUASH
));
1291 static int dquot_add_inodes(struct dquot
*dquot
, qsize_t inodes
,
1292 struct dquot_warn
*warn
)
1297 spin_lock(&dquot
->dq_dqb_lock
);
1298 newinodes
= dquot
->dq_dqb
.dqb_curinodes
+ inodes
;
1299 if (!sb_has_quota_limits_enabled(dquot
->dq_sb
, dquot
->dq_id
.type
) ||
1300 test_bit(DQ_FAKE_B
, &dquot
->dq_flags
))
1303 if (dquot
->dq_dqb
.dqb_ihardlimit
&&
1304 newinodes
> dquot
->dq_dqb
.dqb_ihardlimit
&&
1305 !ignore_hardlimit(dquot
)) {
1306 prepare_warning(warn
, dquot
, QUOTA_NL_IHARDWARN
);
1311 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
1312 newinodes
> dquot
->dq_dqb
.dqb_isoftlimit
&&
1313 dquot
->dq_dqb
.dqb_itime
&&
1314 ktime_get_real_seconds() >= dquot
->dq_dqb
.dqb_itime
&&
1315 !ignore_hardlimit(dquot
)) {
1316 prepare_warning(warn
, dquot
, QUOTA_NL_ISOFTLONGWARN
);
1321 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
1322 newinodes
> dquot
->dq_dqb
.dqb_isoftlimit
&&
1323 dquot
->dq_dqb
.dqb_itime
== 0) {
1324 prepare_warning(warn
, dquot
, QUOTA_NL_ISOFTWARN
);
1325 dquot
->dq_dqb
.dqb_itime
= ktime_get_real_seconds() +
1326 sb_dqopt(dquot
->dq_sb
)->info
[dquot
->dq_id
.type
].dqi_igrace
;
1329 dquot
->dq_dqb
.dqb_curinodes
= newinodes
;
1332 spin_unlock(&dquot
->dq_dqb_lock
);
1336 static int dquot_add_space(struct dquot
*dquot
, qsize_t space
,
1337 qsize_t rsv_space
, unsigned int flags
,
1338 struct dquot_warn
*warn
)
1341 struct super_block
*sb
= dquot
->dq_sb
;
1344 spin_lock(&dquot
->dq_dqb_lock
);
1345 if (!sb_has_quota_limits_enabled(sb
, dquot
->dq_id
.type
) ||
1346 test_bit(DQ_FAKE_B
, &dquot
->dq_flags
))
1349 tspace
= dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
1350 + space
+ rsv_space
;
1352 if (dquot
->dq_dqb
.dqb_bhardlimit
&&
1353 tspace
> dquot
->dq_dqb
.dqb_bhardlimit
&&
1354 !ignore_hardlimit(dquot
)) {
1355 if (flags
& DQUOT_SPACE_WARN
)
1356 prepare_warning(warn
, dquot
, QUOTA_NL_BHARDWARN
);
1361 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
1362 tspace
> dquot
->dq_dqb
.dqb_bsoftlimit
&&
1363 dquot
->dq_dqb
.dqb_btime
&&
1364 ktime_get_real_seconds() >= dquot
->dq_dqb
.dqb_btime
&&
1365 !ignore_hardlimit(dquot
)) {
1366 if (flags
& DQUOT_SPACE_WARN
)
1367 prepare_warning(warn
, dquot
, QUOTA_NL_BSOFTLONGWARN
);
1372 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
1373 tspace
> dquot
->dq_dqb
.dqb_bsoftlimit
&&
1374 dquot
->dq_dqb
.dqb_btime
== 0) {
1375 if (flags
& DQUOT_SPACE_WARN
) {
1376 prepare_warning(warn
, dquot
, QUOTA_NL_BSOFTWARN
);
1377 dquot
->dq_dqb
.dqb_btime
= ktime_get_real_seconds() +
1378 sb_dqopt(sb
)->info
[dquot
->dq_id
.type
].dqi_bgrace
;
1381 * We don't allow preallocation to exceed softlimit so exceeding will
1390 * We have to be careful and go through warning generation & grace time
1391 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1394 if (flags
& DQUOT_SPACE_NOFAIL
)
1397 dquot
->dq_dqb
.dqb_rsvspace
+= rsv_space
;
1398 dquot
->dq_dqb
.dqb_curspace
+= space
;
1400 spin_unlock(&dquot
->dq_dqb_lock
);
1404 static int info_idq_free(struct dquot
*dquot
, qsize_t inodes
)
1408 if (test_bit(DQ_FAKE_B
, &dquot
->dq_flags
) ||
1409 dquot
->dq_dqb
.dqb_curinodes
<= dquot
->dq_dqb
.dqb_isoftlimit
||
1410 !sb_has_quota_limits_enabled(dquot
->dq_sb
, dquot
->dq_id
.type
))
1411 return QUOTA_NL_NOWARN
;
1413 newinodes
= dquot
->dq_dqb
.dqb_curinodes
- inodes
;
1414 if (newinodes
<= dquot
->dq_dqb
.dqb_isoftlimit
)
1415 return QUOTA_NL_ISOFTBELOW
;
1416 if (dquot
->dq_dqb
.dqb_curinodes
>= dquot
->dq_dqb
.dqb_ihardlimit
&&
1417 newinodes
< dquot
->dq_dqb
.dqb_ihardlimit
)
1418 return QUOTA_NL_IHARDBELOW
;
1419 return QUOTA_NL_NOWARN
;
1422 static int info_bdq_free(struct dquot
*dquot
, qsize_t space
)
1426 tspace
= dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
;
1428 if (test_bit(DQ_FAKE_B
, &dquot
->dq_flags
) ||
1429 tspace
<= dquot
->dq_dqb
.dqb_bsoftlimit
)
1430 return QUOTA_NL_NOWARN
;
1432 if (tspace
- space
<= dquot
->dq_dqb
.dqb_bsoftlimit
)
1433 return QUOTA_NL_BSOFTBELOW
;
1434 if (tspace
>= dquot
->dq_dqb
.dqb_bhardlimit
&&
1435 tspace
- space
< dquot
->dq_dqb
.dqb_bhardlimit
)
1436 return QUOTA_NL_BHARDBELOW
;
1437 return QUOTA_NL_NOWARN
;
1440 static int inode_quota_active(const struct inode
*inode
)
1442 struct super_block
*sb
= inode
->i_sb
;
1444 if (IS_NOQUOTA(inode
))
1446 return sb_any_quota_loaded(sb
) & ~sb_any_quota_suspended(sb
);
1450 * Initialize quota pointers in inode
1452 * It is better to call this function outside of any transaction as it
1453 * might need a lot of space in journal for dquot structure allocation.
1455 static int __dquot_initialize(struct inode
*inode
, int type
)
1457 int cnt
, init_needed
= 0;
1458 struct dquot __rcu
**dquots
;
1459 struct dquot
*got
[MAXQUOTAS
] = {};
1460 struct super_block
*sb
= inode
->i_sb
;
1464 if (!inode_quota_active(inode
))
1467 dquots
= i_dquot(inode
);
1469 /* First get references to structures we might need. */
1470 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1474 struct dquot
*dquot
;
1476 if (type
!= -1 && cnt
!= type
)
1479 * The i_dquot should have been initialized in most cases,
1480 * we check it without locking here to avoid unnecessary
1481 * dqget()/dqput() calls.
1486 if (!sb_has_quota_active(sb
, cnt
))
1493 qid
= make_kqid_uid(inode
->i_uid
);
1496 qid
= make_kqid_gid(inode
->i_gid
);
1499 rc
= inode
->i_sb
->dq_op
->get_projid(inode
, &projid
);
1502 qid
= make_kqid_projid(projid
);
1505 dquot
= dqget(sb
, qid
);
1506 if (IS_ERR(dquot
)) {
1507 /* We raced with somebody turning quotas off... */
1508 if (PTR_ERR(dquot
) != -ESRCH
) {
1509 ret
= PTR_ERR(dquot
);
1517 /* All required i_dquot has been initialized */
1521 spin_lock(&dq_data_lock
);
1522 if (IS_NOQUOTA(inode
))
1524 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1525 if (type
!= -1 && cnt
!= type
)
1527 /* Avoid races with quotaoff() */
1528 if (!sb_has_quota_active(sb
, cnt
))
1530 /* We could race with quotaon or dqget() could have failed */
1534 rcu_assign_pointer(dquots
[cnt
], got
[cnt
]);
1537 * Make quota reservation system happy if someone
1538 * did a write before quota was turned on
1540 rsv
= inode_get_rsv_space(inode
);
1541 if (unlikely(rsv
)) {
1542 struct dquot
*dquot
= srcu_dereference_check(
1543 dquots
[cnt
], &dquot_srcu
,
1544 lockdep_is_held(&dq_data_lock
));
1546 spin_lock(&inode
->i_lock
);
1547 /* Get reservation again under proper lock */
1548 rsv
= __inode_get_rsv_space(inode
);
1549 spin_lock(&dquot
->dq_dqb_lock
);
1550 dquot
->dq_dqb
.dqb_rsvspace
+= rsv
;
1551 spin_unlock(&dquot
->dq_dqb_lock
);
1552 spin_unlock(&inode
->i_lock
);
1557 spin_unlock(&dq_data_lock
);
1559 /* Drop unused references */
1565 int dquot_initialize(struct inode
*inode
)
1567 return __dquot_initialize(inode
, -1);
1569 EXPORT_SYMBOL(dquot_initialize
);
1571 bool dquot_initialize_needed(struct inode
*inode
)
1573 struct dquot __rcu
**dquots
;
1576 if (!inode_quota_active(inode
))
1579 dquots
= i_dquot(inode
);
1580 for (i
= 0; i
< MAXQUOTAS
; i
++)
1581 if (!dquots
[i
] && sb_has_quota_active(inode
->i_sb
, i
))
1585 EXPORT_SYMBOL(dquot_initialize_needed
);
1588 * Release all quotas referenced by inode.
1590 * This function only be called on inode free or converting
1591 * a file to quota file, no other users for the i_dquot in
1592 * both cases, so we needn't call synchronize_srcu() after
1595 static void __dquot_drop(struct inode
*inode
)
1598 struct dquot __rcu
**dquots
= i_dquot(inode
);
1599 struct dquot
*put
[MAXQUOTAS
];
1601 spin_lock(&dq_data_lock
);
1602 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1603 put
[cnt
] = srcu_dereference_check(dquots
[cnt
], &dquot_srcu
,
1604 lockdep_is_held(&dq_data_lock
));
1605 rcu_assign_pointer(dquots
[cnt
], NULL
);
1607 spin_unlock(&dq_data_lock
);
1611 void dquot_drop(struct inode
*inode
)
1613 struct dquot __rcu
* const *dquots
;
1616 if (IS_NOQUOTA(inode
))
1620 * Test before calling to rule out calls from proc and such
1621 * where we are not allowed to block. Note that this is
1622 * actually reliable test even without the lock - the caller
1623 * must assure that nobody can come after the DQUOT_DROP and
1624 * add quota pointers back anyway.
1626 dquots
= i_dquot(inode
);
1627 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1632 if (cnt
< MAXQUOTAS
)
1633 __dquot_drop(inode
);
1635 EXPORT_SYMBOL(dquot_drop
);
1638 * inode_reserved_space is managed internally by quota, and protected by
1639 * i_lock similar to i_blocks+i_bytes.
1641 static qsize_t
*inode_reserved_space(struct inode
* inode
)
1643 /* Filesystem must explicitly define it's own method in order to use
1644 * quota reservation interface */
1645 BUG_ON(!inode
->i_sb
->dq_op
->get_reserved_space
);
1646 return inode
->i_sb
->dq_op
->get_reserved_space(inode
);
1649 static qsize_t
__inode_get_rsv_space(struct inode
*inode
)
1651 if (!inode
->i_sb
->dq_op
->get_reserved_space
)
1653 return *inode_reserved_space(inode
);
1656 static qsize_t
inode_get_rsv_space(struct inode
*inode
)
1660 if (!inode
->i_sb
->dq_op
->get_reserved_space
)
1662 spin_lock(&inode
->i_lock
);
1663 ret
= __inode_get_rsv_space(inode
);
1664 spin_unlock(&inode
->i_lock
);
1669 * This functions updates i_blocks+i_bytes fields and quota information
1670 * (together with appropriate checks).
1672 * NOTE: We absolutely rely on the fact that caller dirties the inode
1673 * (usually helpers in quotaops.h care about this) and holds a handle for
1674 * the current transaction so that dquot write and inode write go into the
1679 * This operation can block, but only after everything is updated
1681 int __dquot_alloc_space(struct inode
*inode
, qsize_t number
, int flags
)
1683 int cnt
, ret
= 0, index
;
1684 struct dquot_warn warn
[MAXQUOTAS
];
1685 int reserve
= flags
& DQUOT_SPACE_RESERVE
;
1686 struct dquot __rcu
**dquots
;
1687 struct dquot
*dquot
;
1689 if (!inode_quota_active(inode
)) {
1691 spin_lock(&inode
->i_lock
);
1692 *inode_reserved_space(inode
) += number
;
1693 spin_unlock(&inode
->i_lock
);
1695 inode_add_bytes(inode
, number
);
1700 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
1701 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1703 dquots
= i_dquot(inode
);
1704 index
= srcu_read_lock(&dquot_srcu
);
1705 spin_lock(&inode
->i_lock
);
1706 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1707 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1711 ret
= dquot_add_space(dquot
, 0, number
, flags
, &warn
[cnt
]);
1713 ret
= dquot_add_space(dquot
, number
, 0, flags
, &warn
[cnt
]);
1716 /* Back out changes we already did */
1717 for (cnt
--; cnt
>= 0; cnt
--) {
1718 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1721 spin_lock(&dquot
->dq_dqb_lock
);
1723 dquot_free_reserved_space(dquot
, number
);
1725 dquot_decr_space(dquot
, number
);
1726 spin_unlock(&dquot
->dq_dqb_lock
);
1728 spin_unlock(&inode
->i_lock
);
1729 goto out_flush_warn
;
1733 *inode_reserved_space(inode
) += number
;
1735 __inode_add_bytes(inode
, number
);
1736 spin_unlock(&inode
->i_lock
);
1739 goto out_flush_warn
;
1740 ret
= mark_all_dquot_dirty(dquots
);
1742 srcu_read_unlock(&dquot_srcu
, index
);
1743 flush_warnings(warn
);
1747 EXPORT_SYMBOL(__dquot_alloc_space
);
1750 * This operation can block, but only after everything is updated
1752 int dquot_alloc_inode(struct inode
*inode
)
1754 int cnt
, ret
= 0, index
;
1755 struct dquot_warn warn
[MAXQUOTAS
];
1756 struct dquot __rcu
* const *dquots
;
1757 struct dquot
*dquot
;
1759 if (!inode_quota_active(inode
))
1761 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
1762 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1764 dquots
= i_dquot(inode
);
1765 index
= srcu_read_lock(&dquot_srcu
);
1766 spin_lock(&inode
->i_lock
);
1767 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1768 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1771 ret
= dquot_add_inodes(dquot
, 1, &warn
[cnt
]);
1773 for (cnt
--; cnt
>= 0; cnt
--) {
1774 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1777 /* Back out changes we already did */
1778 spin_lock(&dquot
->dq_dqb_lock
);
1779 dquot_decr_inodes(dquot
, 1);
1780 spin_unlock(&dquot
->dq_dqb_lock
);
1787 spin_unlock(&inode
->i_lock
);
1789 ret
= mark_all_dquot_dirty(dquots
);
1790 srcu_read_unlock(&dquot_srcu
, index
);
1791 flush_warnings(warn
);
1794 EXPORT_SYMBOL(dquot_alloc_inode
);
1797 * Convert in-memory reserved quotas to real consumed quotas
1799 void dquot_claim_space_nodirty(struct inode
*inode
, qsize_t number
)
1801 struct dquot __rcu
**dquots
;
1802 struct dquot
*dquot
;
1805 if (!inode_quota_active(inode
)) {
1806 spin_lock(&inode
->i_lock
);
1807 *inode_reserved_space(inode
) -= number
;
1808 __inode_add_bytes(inode
, number
);
1809 spin_unlock(&inode
->i_lock
);
1813 dquots
= i_dquot(inode
);
1814 index
= srcu_read_lock(&dquot_srcu
);
1815 spin_lock(&inode
->i_lock
);
1816 /* Claim reserved quotas to allocated quotas */
1817 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1818 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1820 spin_lock(&dquot
->dq_dqb_lock
);
1821 if (WARN_ON_ONCE(dquot
->dq_dqb
.dqb_rsvspace
< number
))
1822 number
= dquot
->dq_dqb
.dqb_rsvspace
;
1823 dquot
->dq_dqb
.dqb_curspace
+= number
;
1824 dquot
->dq_dqb
.dqb_rsvspace
-= number
;
1825 spin_unlock(&dquot
->dq_dqb_lock
);
1828 /* Update inode bytes */
1829 *inode_reserved_space(inode
) -= number
;
1830 __inode_add_bytes(inode
, number
);
1831 spin_unlock(&inode
->i_lock
);
1832 mark_all_dquot_dirty(dquots
);
1833 srcu_read_unlock(&dquot_srcu
, index
);
1835 EXPORT_SYMBOL(dquot_claim_space_nodirty
);
1838 * Convert allocated space back to in-memory reserved quotas
1840 void dquot_reclaim_space_nodirty(struct inode
*inode
, qsize_t number
)
1842 struct dquot __rcu
**dquots
;
1843 struct dquot
*dquot
;
1846 if (!inode_quota_active(inode
)) {
1847 spin_lock(&inode
->i_lock
);
1848 *inode_reserved_space(inode
) += number
;
1849 __inode_sub_bytes(inode
, number
);
1850 spin_unlock(&inode
->i_lock
);
1854 dquots
= i_dquot(inode
);
1855 index
= srcu_read_lock(&dquot_srcu
);
1856 spin_lock(&inode
->i_lock
);
1857 /* Claim reserved quotas to allocated quotas */
1858 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1859 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1861 spin_lock(&dquot
->dq_dqb_lock
);
1862 if (WARN_ON_ONCE(dquot
->dq_dqb
.dqb_curspace
< number
))
1863 number
= dquot
->dq_dqb
.dqb_curspace
;
1864 dquot
->dq_dqb
.dqb_rsvspace
+= number
;
1865 dquot
->dq_dqb
.dqb_curspace
-= number
;
1866 spin_unlock(&dquot
->dq_dqb_lock
);
1869 /* Update inode bytes */
1870 *inode_reserved_space(inode
) += number
;
1871 __inode_sub_bytes(inode
, number
);
1872 spin_unlock(&inode
->i_lock
);
1873 mark_all_dquot_dirty(dquots
);
1874 srcu_read_unlock(&dquot_srcu
, index
);
1876 EXPORT_SYMBOL(dquot_reclaim_space_nodirty
);
1879 * This operation can block, but only after everything is updated
1881 void __dquot_free_space(struct inode
*inode
, qsize_t number
, int flags
)
1884 struct dquot_warn warn
[MAXQUOTAS
];
1885 struct dquot __rcu
**dquots
;
1886 struct dquot
*dquot
;
1887 int reserve
= flags
& DQUOT_SPACE_RESERVE
, index
;
1889 if (!inode_quota_active(inode
)) {
1891 spin_lock(&inode
->i_lock
);
1892 *inode_reserved_space(inode
) -= number
;
1893 spin_unlock(&inode
->i_lock
);
1895 inode_sub_bytes(inode
, number
);
1900 dquots
= i_dquot(inode
);
1901 index
= srcu_read_lock(&dquot_srcu
);
1902 spin_lock(&inode
->i_lock
);
1903 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1906 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1907 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1910 spin_lock(&dquot
->dq_dqb_lock
);
1911 wtype
= info_bdq_free(dquot
, number
);
1912 if (wtype
!= QUOTA_NL_NOWARN
)
1913 prepare_warning(&warn
[cnt
], dquot
, wtype
);
1915 dquot_free_reserved_space(dquot
, number
);
1917 dquot_decr_space(dquot
, number
);
1918 spin_unlock(&dquot
->dq_dqb_lock
);
1921 *inode_reserved_space(inode
) -= number
;
1923 __inode_sub_bytes(inode
, number
);
1924 spin_unlock(&inode
->i_lock
);
1928 mark_all_dquot_dirty(dquots
);
1930 srcu_read_unlock(&dquot_srcu
, index
);
1931 flush_warnings(warn
);
1933 EXPORT_SYMBOL(__dquot_free_space
);
1936 * This operation can block, but only after everything is updated
1938 void dquot_free_inode(struct inode
*inode
)
1941 struct dquot_warn warn
[MAXQUOTAS
];
1942 struct dquot __rcu
* const *dquots
;
1943 struct dquot
*dquot
;
1946 if (!inode_quota_active(inode
))
1949 dquots
= i_dquot(inode
);
1950 index
= srcu_read_lock(&dquot_srcu
);
1951 spin_lock(&inode
->i_lock
);
1952 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1954 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1955 dquot
= srcu_dereference(dquots
[cnt
], &dquot_srcu
);
1958 spin_lock(&dquot
->dq_dqb_lock
);
1959 wtype
= info_idq_free(dquot
, 1);
1960 if (wtype
!= QUOTA_NL_NOWARN
)
1961 prepare_warning(&warn
[cnt
], dquot
, wtype
);
1962 dquot_decr_inodes(dquot
, 1);
1963 spin_unlock(&dquot
->dq_dqb_lock
);
1965 spin_unlock(&inode
->i_lock
);
1966 mark_all_dquot_dirty(dquots
);
1967 srcu_read_unlock(&dquot_srcu
, index
);
1968 flush_warnings(warn
);
1970 EXPORT_SYMBOL(dquot_free_inode
);
1973 * Transfer the number of inode and blocks from one diskquota to an other.
1974 * On success, dquot references in transfer_to are consumed and references
1975 * to original dquots that need to be released are placed there. On failure,
1976 * references are kept untouched.
1978 * This operation can block, but only after everything is updated
1979 * A transaction must be started when entering this function.
1981 * We are holding reference on transfer_from & transfer_to, no need to
1982 * protect them by srcu_read_lock().
1984 int __dquot_transfer(struct inode
*inode
, struct dquot
**transfer_to
)
1987 qsize_t rsv_space
= 0;
1988 qsize_t inode_usage
= 1;
1989 struct dquot __rcu
**dquots
;
1990 struct dquot
*transfer_from
[MAXQUOTAS
] = {};
1991 int cnt
, index
, ret
= 0, err
;
1992 char is_valid
[MAXQUOTAS
] = {};
1993 struct dquot_warn warn_to
[MAXQUOTAS
];
1994 struct dquot_warn warn_from_inodes
[MAXQUOTAS
];
1995 struct dquot_warn warn_from_space
[MAXQUOTAS
];
1997 if (IS_NOQUOTA(inode
))
2000 if (inode
->i_sb
->dq_op
->get_inode_usage
) {
2001 ret
= inode
->i_sb
->dq_op
->get_inode_usage(inode
, &inode_usage
);
2006 /* Initialize the arrays */
2007 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2008 warn_to
[cnt
].w_type
= QUOTA_NL_NOWARN
;
2009 warn_from_inodes
[cnt
].w_type
= QUOTA_NL_NOWARN
;
2010 warn_from_space
[cnt
].w_type
= QUOTA_NL_NOWARN
;
2013 spin_lock(&dq_data_lock
);
2014 spin_lock(&inode
->i_lock
);
2015 if (IS_NOQUOTA(inode
)) { /* File without quota accounting? */
2016 spin_unlock(&inode
->i_lock
);
2017 spin_unlock(&dq_data_lock
);
2020 cur_space
= __inode_get_bytes(inode
);
2021 rsv_space
= __inode_get_rsv_space(inode
);
2022 dquots
= i_dquot(inode
);
2024 * Build the transfer_from list, check limits, and update usage in
2025 * the target structures.
2027 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2029 * Skip changes for same uid or gid or for turned off quota-type.
2031 if (!transfer_to
[cnt
])
2033 /* Avoid races with quotaoff() */
2034 if (!sb_has_quota_active(inode
->i_sb
, cnt
))
2037 transfer_from
[cnt
] = srcu_dereference_check(dquots
[cnt
],
2038 &dquot_srcu
, lockdep_is_held(&dq_data_lock
));
2039 ret
= dquot_add_inodes(transfer_to
[cnt
], inode_usage
,
2043 ret
= dquot_add_space(transfer_to
[cnt
], cur_space
, rsv_space
,
2044 DQUOT_SPACE_WARN
, &warn_to
[cnt
]);
2046 spin_lock(&transfer_to
[cnt
]->dq_dqb_lock
);
2047 dquot_decr_inodes(transfer_to
[cnt
], inode_usage
);
2048 spin_unlock(&transfer_to
[cnt
]->dq_dqb_lock
);
2053 /* Decrease usage for source structures and update quota pointers */
2054 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2057 /* Due to IO error we might not have transfer_from[] structure */
2058 if (transfer_from
[cnt
]) {
2061 spin_lock(&transfer_from
[cnt
]->dq_dqb_lock
);
2062 wtype
= info_idq_free(transfer_from
[cnt
], inode_usage
);
2063 if (wtype
!= QUOTA_NL_NOWARN
)
2064 prepare_warning(&warn_from_inodes
[cnt
],
2065 transfer_from
[cnt
], wtype
);
2066 wtype
= info_bdq_free(transfer_from
[cnt
],
2067 cur_space
+ rsv_space
);
2068 if (wtype
!= QUOTA_NL_NOWARN
)
2069 prepare_warning(&warn_from_space
[cnt
],
2070 transfer_from
[cnt
], wtype
);
2071 dquot_decr_inodes(transfer_from
[cnt
], inode_usage
);
2072 dquot_decr_space(transfer_from
[cnt
], cur_space
);
2073 dquot_free_reserved_space(transfer_from
[cnt
],
2075 spin_unlock(&transfer_from
[cnt
]->dq_dqb_lock
);
2077 rcu_assign_pointer(dquots
[cnt
], transfer_to
[cnt
]);
2079 spin_unlock(&inode
->i_lock
);
2080 spin_unlock(&dq_data_lock
);
2083 * These arrays are local and we hold dquot references so we don't need
2084 * the srcu protection but still take dquot_srcu to avoid warning in
2085 * mark_all_dquot_dirty().
2087 index
= srcu_read_lock(&dquot_srcu
);
2088 err
= mark_all_dquot_dirty((struct dquot __rcu
**)transfer_from
);
2091 err
= mark_all_dquot_dirty((struct dquot __rcu
**)transfer_to
);
2094 srcu_read_unlock(&dquot_srcu
, index
);
2096 flush_warnings(warn_to
);
2097 flush_warnings(warn_from_inodes
);
2098 flush_warnings(warn_from_space
);
2099 /* Pass back references to put */
2100 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
2102 transfer_to
[cnt
] = transfer_from
[cnt
];
2105 /* Back out changes we already did */
2106 for (cnt
--; cnt
>= 0; cnt
--) {
2109 spin_lock(&transfer_to
[cnt
]->dq_dqb_lock
);
2110 dquot_decr_inodes(transfer_to
[cnt
], inode_usage
);
2111 dquot_decr_space(transfer_to
[cnt
], cur_space
);
2112 dquot_free_reserved_space(transfer_to
[cnt
], rsv_space
);
2113 spin_unlock(&transfer_to
[cnt
]->dq_dqb_lock
);
2115 spin_unlock(&inode
->i_lock
);
2116 spin_unlock(&dq_data_lock
);
2117 flush_warnings(warn_to
);
2120 EXPORT_SYMBOL(__dquot_transfer
);
2122 /* Wrapper for transferring ownership of an inode for uid/gid only
2123 * Called from FSXXX_setattr()
2125 int dquot_transfer(struct mnt_idmap
*idmap
, struct inode
*inode
,
2126 struct iattr
*iattr
)
2128 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2129 struct dquot
*dquot
;
2130 struct super_block
*sb
= inode
->i_sb
;
2133 if (!inode_quota_active(inode
))
2136 if (i_uid_needs_update(idmap
, iattr
, inode
)) {
2137 kuid_t kuid
= from_vfsuid(idmap
, i_user_ns(inode
),
2140 dquot
= dqget(sb
, make_kqid_uid(kuid
));
2141 if (IS_ERR(dquot
)) {
2142 if (PTR_ERR(dquot
) != -ESRCH
) {
2143 ret
= PTR_ERR(dquot
);
2148 transfer_to
[USRQUOTA
] = dquot
;
2150 if (i_gid_needs_update(idmap
, iattr
, inode
)) {
2151 kgid_t kgid
= from_vfsgid(idmap
, i_user_ns(inode
),
2154 dquot
= dqget(sb
, make_kqid_gid(kgid
));
2155 if (IS_ERR(dquot
)) {
2156 if (PTR_ERR(dquot
) != -ESRCH
) {
2157 ret
= PTR_ERR(dquot
);
2162 transfer_to
[GRPQUOTA
] = dquot
;
2164 ret
= __dquot_transfer(inode
, transfer_to
);
2166 dqput_all(transfer_to
);
2169 EXPORT_SYMBOL(dquot_transfer
);
2172 * Write info of quota file to disk
2174 int dquot_commit_info(struct super_block
*sb
, int type
)
2176 struct quota_info
*dqopt
= sb_dqopt(sb
);
2178 return dqopt
->ops
[type
]->write_file_info(sb
, type
);
2180 EXPORT_SYMBOL(dquot_commit_info
);
2182 int dquot_get_next_id(struct super_block
*sb
, struct kqid
*qid
)
2184 struct quota_info
*dqopt
= sb_dqopt(sb
);
2186 if (!sb_has_quota_active(sb
, qid
->type
))
2188 if (!dqopt
->ops
[qid
->type
]->get_next_id
)
2190 return dqopt
->ops
[qid
->type
]->get_next_id(sb
, qid
);
2192 EXPORT_SYMBOL(dquot_get_next_id
);
2195 * Definitions of diskquota operations.
2197 const struct dquot_operations dquot_operations
= {
2198 .write_dquot
= dquot_commit
,
2199 .acquire_dquot
= dquot_acquire
,
2200 .release_dquot
= dquot_release
,
2201 .mark_dirty
= dquot_mark_dquot_dirty
,
2202 .write_info
= dquot_commit_info
,
2203 .alloc_dquot
= dquot_alloc
,
2204 .destroy_dquot
= dquot_destroy
,
2205 .get_next_id
= dquot_get_next_id
,
2207 EXPORT_SYMBOL(dquot_operations
);
2210 * Generic helper for ->open on filesystems supporting disk quotas.
2212 int dquot_file_open(struct inode
*inode
, struct file
*file
)
2216 error
= generic_file_open(inode
, file
);
2217 if (!error
&& (file
->f_mode
& FMODE_WRITE
))
2218 error
= dquot_initialize(inode
);
2221 EXPORT_SYMBOL(dquot_file_open
);
2223 static void vfs_cleanup_quota_inode(struct super_block
*sb
, int type
)
2225 struct quota_info
*dqopt
= sb_dqopt(sb
);
2226 struct inode
*inode
= dqopt
->files
[type
];
2230 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)) {
2232 inode
->i_flags
&= ~S_NOQUOTA
;
2233 inode_unlock(inode
);
2235 dqopt
->files
[type
] = NULL
;
2240 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2242 int dquot_disable(struct super_block
*sb
, int type
, unsigned int flags
)
2245 struct quota_info
*dqopt
= sb_dqopt(sb
);
2247 rwsem_assert_held_write(&sb
->s_umount
);
2249 /* Cannot turn off usage accounting without turning off limits, or
2250 * suspend quotas and simultaneously turn quotas off. */
2251 if ((flags
& DQUOT_USAGE_ENABLED
&& !(flags
& DQUOT_LIMITS_ENABLED
))
2252 || (flags
& DQUOT_SUSPENDED
&& flags
& (DQUOT_LIMITS_ENABLED
|
2253 DQUOT_USAGE_ENABLED
)))
2257 * Skip everything if there's nothing to do. We have to do this because
2258 * sometimes we are called when fill_super() failed and calling
2259 * sync_fs() in such cases does no good.
2261 if (!sb_any_quota_loaded(sb
))
2264 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2265 if (type
!= -1 && cnt
!= type
)
2267 if (!sb_has_quota_loaded(sb
, cnt
))
2270 if (flags
& DQUOT_SUSPENDED
) {
2271 spin_lock(&dq_state_lock
);
2273 dquot_state_flag(DQUOT_SUSPENDED
, cnt
);
2274 spin_unlock(&dq_state_lock
);
2276 spin_lock(&dq_state_lock
);
2277 dqopt
->flags
&= ~dquot_state_flag(flags
, cnt
);
2278 /* Turning off suspended quotas? */
2279 if (!sb_has_quota_loaded(sb
, cnt
) &&
2280 sb_has_quota_suspended(sb
, cnt
)) {
2281 dqopt
->flags
&= ~dquot_state_flag(
2282 DQUOT_SUSPENDED
, cnt
);
2283 spin_unlock(&dq_state_lock
);
2284 vfs_cleanup_quota_inode(sb
, cnt
);
2287 spin_unlock(&dq_state_lock
);
2290 /* We still have to keep quota loaded? */
2291 if (sb_has_quota_loaded(sb
, cnt
) && !(flags
& DQUOT_SUSPENDED
))
2294 /* Note: these are blocking operations */
2295 drop_dquot_ref(sb
, cnt
);
2296 invalidate_dquots(sb
, cnt
);
2298 * Now all dquots should be invalidated, all writes done so we
2299 * should be only users of the info. No locks needed.
2301 if (info_dirty(&dqopt
->info
[cnt
]))
2302 sb
->dq_op
->write_info(sb
, cnt
);
2303 if (dqopt
->ops
[cnt
]->free_file_info
)
2304 dqopt
->ops
[cnt
]->free_file_info(sb
, cnt
);
2305 put_quota_format(dqopt
->info
[cnt
].dqi_format
);
2306 dqopt
->info
[cnt
].dqi_flags
= 0;
2307 dqopt
->info
[cnt
].dqi_igrace
= 0;
2308 dqopt
->info
[cnt
].dqi_bgrace
= 0;
2309 dqopt
->ops
[cnt
] = NULL
;
2312 /* Skip syncing and setting flags if quota files are hidden */
2313 if (dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)
2316 /* Sync the superblock so that buffers with quota data are written to
2317 * disk (and so userspace sees correct data afterwards). */
2318 if (sb
->s_op
->sync_fs
)
2319 sb
->s_op
->sync_fs(sb
, 1);
2320 sync_blockdev(sb
->s_bdev
);
2321 /* Now the quota files are just ordinary files and we can set the
2322 * inode flags back. Moreover we discard the pagecache so that
2323 * userspace sees the writes we did bypassing the pagecache. We
2324 * must also discard the blockdev buffers so that we see the
2325 * changes done by userspace on the next quotaon() */
2326 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
2327 if (!sb_has_quota_loaded(sb
, cnt
) && dqopt
->files
[cnt
]) {
2328 inode_lock(dqopt
->files
[cnt
]);
2329 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
2330 inode_unlock(dqopt
->files
[cnt
]);
2333 invalidate_bdev(sb
->s_bdev
);
2335 /* We are done when suspending quotas */
2336 if (flags
& DQUOT_SUSPENDED
)
2339 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
2340 if (!sb_has_quota_loaded(sb
, cnt
))
2341 vfs_cleanup_quota_inode(sb
, cnt
);
2344 EXPORT_SYMBOL(dquot_disable
);
2346 int dquot_quota_off(struct super_block
*sb
, int type
)
2348 return dquot_disable(sb
, type
,
2349 DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
2351 EXPORT_SYMBOL(dquot_quota_off
);
2354 * Turn quotas on on a device
2357 static int vfs_setup_quota_inode(struct inode
*inode
, int type
)
2359 struct super_block
*sb
= inode
->i_sb
;
2360 struct quota_info
*dqopt
= sb_dqopt(sb
);
2362 if (is_bad_inode(inode
))
2364 if (!S_ISREG(inode
->i_mode
))
2366 if (IS_RDONLY(inode
))
2368 if (sb_has_quota_loaded(sb
, type
))
2372 * Quota files should never be encrypted. They should be thought of as
2373 * filesystem metadata, not user data. New-style internal quota files
2374 * cannot be encrypted by users anyway, but old-style external quota
2375 * files could potentially be incorrectly created in an encrypted
2376 * directory, hence this explicit check. Some reasons why encrypted
2377 * quota files don't work include: (1) some filesystems that support
2378 * encryption don't handle it in their quota_read and quota_write, and
2379 * (2) cleaning up encrypted quota files at unmount would need special
2380 * consideration, as quota files are cleaned up later than user files.
2382 if (IS_ENCRYPTED(inode
))
2385 dqopt
->files
[type
] = igrab(inode
);
2386 if (!dqopt
->files
[type
])
2388 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)) {
2389 /* We don't want quota and atime on quota files (deadlocks
2390 * possible) Also nobody should write to the file - we use
2391 * special IO operations which ignore the immutable bit. */
2393 inode
->i_flags
|= S_NOQUOTA
;
2394 inode_unlock(inode
);
2396 * When S_NOQUOTA is set, remove dquot references as no more
2397 * references can be added
2399 __dquot_drop(inode
);
2404 int dquot_load_quota_sb(struct super_block
*sb
, int type
, int format_id
,
2407 struct quota_format_type
*fmt
;
2408 struct quota_info
*dqopt
= sb_dqopt(sb
);
2411 lockdep_assert_held_write(&sb
->s_umount
);
2413 /* Just unsuspend quotas? */
2414 if (WARN_ON_ONCE(flags
& DQUOT_SUSPENDED
))
2417 fmt
= find_quota_format(format_id
);
2420 if (!sb
->dq_op
|| !sb
->s_qcop
||
2421 (type
== PRJQUOTA
&& sb
->dq_op
->get_projid
== NULL
)) {
2425 /* Filesystems outside of init_user_ns not yet supported */
2426 if (sb
->s_user_ns
!= &init_user_ns
) {
2430 /* Usage always has to be set... */
2431 if (!(flags
& DQUOT_USAGE_ENABLED
)) {
2435 if (sb_has_quota_loaded(sb
, type
)) {
2440 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)) {
2441 /* As we bypass the pagecache we must now flush all the
2442 * dirty data and invalidate caches so that kernel sees
2443 * changes from userspace. It is not enough to just flush
2444 * the quota file since if blocksize < pagesize, invalidation
2445 * of the cache could fail because of other unrelated dirty
2447 sync_filesystem(sb
);
2448 invalidate_bdev(sb
->s_bdev
);
2452 if (!fmt
->qf_ops
->check_quota_file(sb
, type
))
2455 dqopt
->ops
[type
] = fmt
->qf_ops
;
2456 dqopt
->info
[type
].dqi_format
= fmt
;
2457 dqopt
->info
[type
].dqi_fmt_id
= format_id
;
2458 INIT_LIST_HEAD(&dqopt
->info
[type
].dqi_dirty_list
);
2459 error
= dqopt
->ops
[type
]->read_file_info(sb
, type
);
2462 if (dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
) {
2463 spin_lock(&dq_data_lock
);
2464 dqopt
->info
[type
].dqi_flags
|= DQF_SYS_FILE
;
2465 spin_unlock(&dq_data_lock
);
2467 spin_lock(&dq_state_lock
);
2468 dqopt
->flags
|= dquot_state_flag(flags
, type
);
2469 spin_unlock(&dq_state_lock
);
2471 error
= add_dquot_ref(sb
, type
);
2473 dquot_disable(sb
, type
,
2474 DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
2478 put_quota_format(fmt
);
2482 EXPORT_SYMBOL(dquot_load_quota_sb
);
2485 * More powerful function for turning on quotas on given quota inode allowing
2486 * setting of individual quota flags
2488 int dquot_load_quota_inode(struct inode
*inode
, int type
, int format_id
,
2493 err
= vfs_setup_quota_inode(inode
, type
);
2496 err
= dquot_load_quota_sb(inode
->i_sb
, type
, format_id
, flags
);
2498 vfs_cleanup_quota_inode(inode
->i_sb
, type
);
2501 EXPORT_SYMBOL(dquot_load_quota_inode
);
2503 /* Reenable quotas on remount RW */
2504 int dquot_resume(struct super_block
*sb
, int type
)
2506 struct quota_info
*dqopt
= sb_dqopt(sb
);
2510 rwsem_assert_held_write(&sb
->s_umount
);
2512 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2513 if (type
!= -1 && cnt
!= type
)
2515 if (!sb_has_quota_suspended(sb
, cnt
))
2518 spin_lock(&dq_state_lock
);
2519 flags
= dqopt
->flags
& dquot_state_flag(DQUOT_USAGE_ENABLED
|
2520 DQUOT_LIMITS_ENABLED
,
2522 dqopt
->flags
&= ~dquot_state_flag(DQUOT_STATE_FLAGS
, cnt
);
2523 spin_unlock(&dq_state_lock
);
2525 flags
= dquot_generic_flag(flags
, cnt
);
2526 ret
= dquot_load_quota_sb(sb
, cnt
, dqopt
->info
[cnt
].dqi_fmt_id
,
2529 vfs_cleanup_quota_inode(sb
, cnt
);
2534 EXPORT_SYMBOL(dquot_resume
);
2536 int dquot_quota_on(struct super_block
*sb
, int type
, int format_id
,
2537 const struct path
*path
)
2539 int error
= security_quota_on(path
->dentry
);
2542 /* Quota file not on the same filesystem? */
2543 if (path
->dentry
->d_sb
!= sb
)
2546 error
= dquot_load_quota_inode(d_inode(path
->dentry
), type
,
2547 format_id
, DQUOT_USAGE_ENABLED
|
2548 DQUOT_LIMITS_ENABLED
);
2551 EXPORT_SYMBOL(dquot_quota_on
);
2554 * This function is used when filesystem needs to initialize quotas
2555 * during mount time.
2557 int dquot_quota_on_mount(struct super_block
*sb
, char *qf_name
,
2558 int format_id
, int type
)
2560 struct dentry
*dentry
;
2563 dentry
= lookup_positive_unlocked(qf_name
, sb
->s_root
, strlen(qf_name
));
2565 return PTR_ERR(dentry
);
2567 error
= security_quota_on(dentry
);
2569 error
= dquot_load_quota_inode(d_inode(dentry
), type
, format_id
,
2570 DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
2575 EXPORT_SYMBOL(dquot_quota_on_mount
);
2577 static int dquot_quota_enable(struct super_block
*sb
, unsigned int flags
)
2581 struct quota_info
*dqopt
= sb_dqopt(sb
);
2583 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
))
2585 /* Accounting cannot be turned on while fs is mounted */
2586 flags
&= ~(FS_QUOTA_UDQ_ACCT
| FS_QUOTA_GDQ_ACCT
| FS_QUOTA_PDQ_ACCT
);
2589 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2590 if (!(flags
& qtype_enforce_flag(type
)))
2592 /* Can't enforce without accounting */
2593 if (!sb_has_quota_usage_enabled(sb
, type
)) {
2597 if (sb_has_quota_limits_enabled(sb
, type
)) {
2598 /* compatible with XFS */
2602 spin_lock(&dq_state_lock
);
2603 dqopt
->flags
|= dquot_state_flag(DQUOT_LIMITS_ENABLED
, type
);
2604 spin_unlock(&dq_state_lock
);
2608 /* Backout enforcement enablement we already did */
2609 for (type
--; type
>= 0; type
--) {
2610 if (flags
& qtype_enforce_flag(type
))
2611 dquot_disable(sb
, type
, DQUOT_LIMITS_ENABLED
);
2616 static int dquot_quota_disable(struct super_block
*sb
, unsigned int flags
)
2620 struct quota_info
*dqopt
= sb_dqopt(sb
);
2622 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
))
2625 * We don't support turning off accounting via quotactl. In principle
2626 * quota infrastructure can do this but filesystems don't expect
2627 * userspace to be able to do it.
2630 (FS_QUOTA_UDQ_ACCT
| FS_QUOTA_GDQ_ACCT
| FS_QUOTA_PDQ_ACCT
))
2633 /* Filter out limits not enabled */
2634 for (type
= 0; type
< MAXQUOTAS
; type
++)
2635 if (!sb_has_quota_limits_enabled(sb
, type
))
2636 flags
&= ~qtype_enforce_flag(type
);
2640 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2641 if (flags
& qtype_enforce_flag(type
)) {
2642 ret
= dquot_disable(sb
, type
, DQUOT_LIMITS_ENABLED
);
2649 /* Backout enforcement disabling we already did */
2650 for (type
--; type
>= 0; type
--) {
2651 if (flags
& qtype_enforce_flag(type
)) {
2652 spin_lock(&dq_state_lock
);
2654 dquot_state_flag(DQUOT_LIMITS_ENABLED
, type
);
2655 spin_unlock(&dq_state_lock
);
2661 /* Generic routine for getting common part of quota structure */
2662 static void do_get_dqblk(struct dquot
*dquot
, struct qc_dqblk
*di
)
2664 struct mem_dqblk
*dm
= &dquot
->dq_dqb
;
2666 memset(di
, 0, sizeof(*di
));
2667 spin_lock(&dquot
->dq_dqb_lock
);
2668 di
->d_spc_hardlimit
= dm
->dqb_bhardlimit
;
2669 di
->d_spc_softlimit
= dm
->dqb_bsoftlimit
;
2670 di
->d_ino_hardlimit
= dm
->dqb_ihardlimit
;
2671 di
->d_ino_softlimit
= dm
->dqb_isoftlimit
;
2672 di
->d_space
= dm
->dqb_curspace
+ dm
->dqb_rsvspace
;
2673 di
->d_ino_count
= dm
->dqb_curinodes
;
2674 di
->d_spc_timer
= dm
->dqb_btime
;
2675 di
->d_ino_timer
= dm
->dqb_itime
;
2676 spin_unlock(&dquot
->dq_dqb_lock
);
2679 int dquot_get_dqblk(struct super_block
*sb
, struct kqid qid
,
2680 struct qc_dqblk
*di
)
2682 struct dquot
*dquot
;
2684 dquot
= dqget(sb
, qid
);
2686 return PTR_ERR(dquot
);
2687 do_get_dqblk(dquot
, di
);
2692 EXPORT_SYMBOL(dquot_get_dqblk
);
2694 int dquot_get_next_dqblk(struct super_block
*sb
, struct kqid
*qid
,
2695 struct qc_dqblk
*di
)
2697 struct dquot
*dquot
;
2700 if (!sb
->dq_op
->get_next_id
)
2702 err
= sb
->dq_op
->get_next_id(sb
, qid
);
2705 dquot
= dqget(sb
, *qid
);
2707 return PTR_ERR(dquot
);
2708 do_get_dqblk(dquot
, di
);
2713 EXPORT_SYMBOL(dquot_get_next_dqblk
);
2715 #define VFS_QC_MASK \
2716 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2717 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2718 QC_SPC_TIMER | QC_INO_TIMER)
2720 /* Generic routine for setting common part of quota structure */
2721 static int do_set_dqblk(struct dquot
*dquot
, struct qc_dqblk
*di
)
2723 struct mem_dqblk
*dm
= &dquot
->dq_dqb
;
2724 int check_blim
= 0, check_ilim
= 0;
2725 struct mem_dqinfo
*dqi
= &sb_dqopt(dquot
->dq_sb
)->info
[dquot
->dq_id
.type
];
2728 if (di
->d_fieldmask
& ~VFS_QC_MASK
)
2731 if (((di
->d_fieldmask
& QC_SPC_SOFT
) &&
2732 di
->d_spc_softlimit
> dqi
->dqi_max_spc_limit
) ||
2733 ((di
->d_fieldmask
& QC_SPC_HARD
) &&
2734 di
->d_spc_hardlimit
> dqi
->dqi_max_spc_limit
) ||
2735 ((di
->d_fieldmask
& QC_INO_SOFT
) &&
2736 (di
->d_ino_softlimit
> dqi
->dqi_max_ino_limit
)) ||
2737 ((di
->d_fieldmask
& QC_INO_HARD
) &&
2738 (di
->d_ino_hardlimit
> dqi
->dqi_max_ino_limit
)))
2741 spin_lock(&dquot
->dq_dqb_lock
);
2742 if (di
->d_fieldmask
& QC_SPACE
) {
2743 dm
->dqb_curspace
= di
->d_space
- dm
->dqb_rsvspace
;
2745 set_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
);
2748 if (di
->d_fieldmask
& QC_SPC_SOFT
)
2749 dm
->dqb_bsoftlimit
= di
->d_spc_softlimit
;
2750 if (di
->d_fieldmask
& QC_SPC_HARD
)
2751 dm
->dqb_bhardlimit
= di
->d_spc_hardlimit
;
2752 if (di
->d_fieldmask
& (QC_SPC_SOFT
| QC_SPC_HARD
)) {
2754 set_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
);
2757 if (di
->d_fieldmask
& QC_INO_COUNT
) {
2758 dm
->dqb_curinodes
= di
->d_ino_count
;
2760 set_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
);
2763 if (di
->d_fieldmask
& QC_INO_SOFT
)
2764 dm
->dqb_isoftlimit
= di
->d_ino_softlimit
;
2765 if (di
->d_fieldmask
& QC_INO_HARD
)
2766 dm
->dqb_ihardlimit
= di
->d_ino_hardlimit
;
2767 if (di
->d_fieldmask
& (QC_INO_SOFT
| QC_INO_HARD
)) {
2769 set_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
);
2772 if (di
->d_fieldmask
& QC_SPC_TIMER
) {
2773 dm
->dqb_btime
= di
->d_spc_timer
;
2775 set_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
);
2778 if (di
->d_fieldmask
& QC_INO_TIMER
) {
2779 dm
->dqb_itime
= di
->d_ino_timer
;
2781 set_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
);
2785 if (!dm
->dqb_bsoftlimit
||
2786 dm
->dqb_curspace
+ dm
->dqb_rsvspace
<= dm
->dqb_bsoftlimit
) {
2788 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
2789 } else if (!(di
->d_fieldmask
& QC_SPC_TIMER
))
2790 /* Set grace only if user hasn't provided his own... */
2791 dm
->dqb_btime
= ktime_get_real_seconds() + dqi
->dqi_bgrace
;
2794 if (!dm
->dqb_isoftlimit
||
2795 dm
->dqb_curinodes
<= dm
->dqb_isoftlimit
) {
2797 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
2798 } else if (!(di
->d_fieldmask
& QC_INO_TIMER
))
2799 /* Set grace only if user hasn't provided his own... */
2800 dm
->dqb_itime
= ktime_get_real_seconds() + dqi
->dqi_igrace
;
2802 if (dm
->dqb_bhardlimit
|| dm
->dqb_bsoftlimit
|| dm
->dqb_ihardlimit
||
2804 clear_bit(DQ_FAKE_B
, &dquot
->dq_flags
);
2806 set_bit(DQ_FAKE_B
, &dquot
->dq_flags
);
2807 spin_unlock(&dquot
->dq_dqb_lock
);
2808 ret
= mark_dquot_dirty(dquot
);
2814 int dquot_set_dqblk(struct super_block
*sb
, struct kqid qid
,
2815 struct qc_dqblk
*di
)
2817 struct dquot
*dquot
;
2820 dquot
= dqget(sb
, qid
);
2821 if (IS_ERR(dquot
)) {
2822 rc
= PTR_ERR(dquot
);
2825 rc
= do_set_dqblk(dquot
, di
);
2830 EXPORT_SYMBOL(dquot_set_dqblk
);
2832 /* Generic routine for getting common part of quota file information */
2833 int dquot_get_state(struct super_block
*sb
, struct qc_state
*state
)
2835 struct mem_dqinfo
*mi
;
2836 struct qc_type_state
*tstate
;
2837 struct quota_info
*dqopt
= sb_dqopt(sb
);
2840 memset(state
, 0, sizeof(*state
));
2841 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2842 if (!sb_has_quota_active(sb
, type
))
2844 tstate
= state
->s_state
+ type
;
2845 mi
= sb_dqopt(sb
)->info
+ type
;
2846 tstate
->flags
= QCI_ACCT_ENABLED
;
2847 spin_lock(&dq_data_lock
);
2848 if (mi
->dqi_flags
& DQF_SYS_FILE
)
2849 tstate
->flags
|= QCI_SYSFILE
;
2850 if (mi
->dqi_flags
& DQF_ROOT_SQUASH
)
2851 tstate
->flags
|= QCI_ROOT_SQUASH
;
2852 if (sb_has_quota_limits_enabled(sb
, type
))
2853 tstate
->flags
|= QCI_LIMITS_ENFORCED
;
2854 tstate
->spc_timelimit
= mi
->dqi_bgrace
;
2855 tstate
->ino_timelimit
= mi
->dqi_igrace
;
2856 if (dqopt
->files
[type
]) {
2857 tstate
->ino
= dqopt
->files
[type
]->i_ino
;
2858 tstate
->blocks
= dqopt
->files
[type
]->i_blocks
;
2860 tstate
->nextents
= 1; /* We don't know... */
2861 spin_unlock(&dq_data_lock
);
2865 EXPORT_SYMBOL(dquot_get_state
);
2867 /* Generic routine for setting common part of quota file information */
2868 int dquot_set_dqinfo(struct super_block
*sb
, int type
, struct qc_info
*ii
)
2870 struct mem_dqinfo
*mi
;
2872 if ((ii
->i_fieldmask
& QC_WARNS_MASK
) ||
2873 (ii
->i_fieldmask
& QC_RT_SPC_TIMER
))
2875 if (!sb_has_quota_active(sb
, type
))
2877 mi
= sb_dqopt(sb
)->info
+ type
;
2878 if (ii
->i_fieldmask
& QC_FLAGS
) {
2879 if ((ii
->i_flags
& QCI_ROOT_SQUASH
&&
2880 mi
->dqi_format
->qf_fmt_id
!= QFMT_VFS_OLD
))
2883 spin_lock(&dq_data_lock
);
2884 if (ii
->i_fieldmask
& QC_SPC_TIMER
)
2885 mi
->dqi_bgrace
= ii
->i_spc_timelimit
;
2886 if (ii
->i_fieldmask
& QC_INO_TIMER
)
2887 mi
->dqi_igrace
= ii
->i_ino_timelimit
;
2888 if (ii
->i_fieldmask
& QC_FLAGS
) {
2889 if (ii
->i_flags
& QCI_ROOT_SQUASH
)
2890 mi
->dqi_flags
|= DQF_ROOT_SQUASH
;
2892 mi
->dqi_flags
&= ~DQF_ROOT_SQUASH
;
2894 spin_unlock(&dq_data_lock
);
2895 mark_info_dirty(sb
, type
);
2896 /* Force write to disk */
2897 return sb
->dq_op
->write_info(sb
, type
);
2899 EXPORT_SYMBOL(dquot_set_dqinfo
);
2901 const struct quotactl_ops dquot_quotactl_sysfile_ops
= {
2902 .quota_enable
= dquot_quota_enable
,
2903 .quota_disable
= dquot_quota_disable
,
2904 .quota_sync
= dquot_quota_sync
,
2905 .get_state
= dquot_get_state
,
2906 .set_info
= dquot_set_dqinfo
,
2907 .get_dqblk
= dquot_get_dqblk
,
2908 .get_nextdqblk
= dquot_get_next_dqblk
,
2909 .set_dqblk
= dquot_set_dqblk
2911 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops
);
2913 static int do_proc_dqstats(const struct ctl_table
*table
, int write
,
2914 void *buffer
, size_t *lenp
, loff_t
*ppos
)
2916 unsigned int type
= (unsigned long *)table
->data
- dqstats
.stat
;
2917 s64 value
= percpu_counter_sum(&dqstats
.counter
[type
]);
2919 /* Filter negative values for non-monotonic counters */
2920 if (value
< 0 && (type
== DQST_ALLOC_DQUOTS
||
2921 type
== DQST_FREE_DQUOTS
))
2924 /* Update global table */
2925 dqstats
.stat
[type
] = value
;
2926 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
2929 static struct ctl_table fs_dqstats_table
[] = {
2931 .procname
= "lookups",
2932 .data
= &dqstats
.stat
[DQST_LOOKUPS
],
2933 .maxlen
= sizeof(unsigned long),
2935 .proc_handler
= do_proc_dqstats
,
2938 .procname
= "drops",
2939 .data
= &dqstats
.stat
[DQST_DROPS
],
2940 .maxlen
= sizeof(unsigned long),
2942 .proc_handler
= do_proc_dqstats
,
2945 .procname
= "reads",
2946 .data
= &dqstats
.stat
[DQST_READS
],
2947 .maxlen
= sizeof(unsigned long),
2949 .proc_handler
= do_proc_dqstats
,
2952 .procname
= "writes",
2953 .data
= &dqstats
.stat
[DQST_WRITES
],
2954 .maxlen
= sizeof(unsigned long),
2956 .proc_handler
= do_proc_dqstats
,
2959 .procname
= "cache_hits",
2960 .data
= &dqstats
.stat
[DQST_CACHE_HITS
],
2961 .maxlen
= sizeof(unsigned long),
2963 .proc_handler
= do_proc_dqstats
,
2966 .procname
= "allocated_dquots",
2967 .data
= &dqstats
.stat
[DQST_ALLOC_DQUOTS
],
2968 .maxlen
= sizeof(unsigned long),
2970 .proc_handler
= do_proc_dqstats
,
2973 .procname
= "free_dquots",
2974 .data
= &dqstats
.stat
[DQST_FREE_DQUOTS
],
2975 .maxlen
= sizeof(unsigned long),
2977 .proc_handler
= do_proc_dqstats
,
2980 .procname
= "syncs",
2981 .data
= &dqstats
.stat
[DQST_SYNCS
],
2982 .maxlen
= sizeof(unsigned long),
2984 .proc_handler
= do_proc_dqstats
,
2986 #ifdef CONFIG_PRINT_QUOTA_WARNING
2988 .procname
= "warnings",
2989 .data
= &flag_print_warnings
,
2990 .maxlen
= sizeof(int),
2992 .proc_handler
= proc_dointvec
,
2997 static int __init
dquot_init(void)
3000 unsigned long nr_hash
, order
;
3001 struct shrinker
*dqcache_shrinker
;
3003 printk(KERN_NOTICE
"VFS: Disk quotas %s\n", __DQUOT_VERSION__
);
3005 register_sysctl_init("fs/quota", fs_dqstats_table
);
3007 dquot_cachep
= kmem_cache_create("dquot",
3008 sizeof(struct dquot
), sizeof(unsigned long) * 4,
3009 (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
3014 dquot_hash
= (struct hlist_head
*)__get_free_pages(GFP_KERNEL
, order
);
3016 panic("Cannot create dquot hash table");
3018 ret
= percpu_counter_init_many(dqstats
.counter
, 0, GFP_KERNEL
,
3021 panic("Cannot create dquot stat counters");
3023 /* Find power-of-two hlist_heads which can fit into allocation */
3024 nr_hash
= (1UL << order
) * PAGE_SIZE
/ sizeof(struct hlist_head
);
3025 dq_hash_bits
= ilog2(nr_hash
);
3027 nr_hash
= 1UL << dq_hash_bits
;
3028 dq_hash_mask
= nr_hash
- 1;
3029 for (i
= 0; i
< nr_hash
; i
++)
3030 INIT_HLIST_HEAD(dquot_hash
+ i
);
3032 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3033 " %ld bytes)\n", nr_hash
, order
, (PAGE_SIZE
<< order
));
3035 dqcache_shrinker
= shrinker_alloc(0, "dquota-cache");
3036 if (!dqcache_shrinker
)
3037 panic("Cannot allocate dquot shrinker");
3039 dqcache_shrinker
->count_objects
= dqcache_shrink_count
;
3040 dqcache_shrinker
->scan_objects
= dqcache_shrink_scan
;
3042 shrinker_register(dqcache_shrinker
);
3046 fs_initcall(dquot_init
);