1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of operations over global quota file
5 #include <linux/spinlock.h>
7 #include <linux/slab.h>
8 #include <linux/quota.h>
9 #include <linux/quotaops.h>
10 #include <linux/dqblk_qtree.h>
11 #include <linux/jiffies.h>
12 #include <linux/writeback.h>
13 #include <linux/workqueue.h>
14 #include <linux/llist.h>
15 #include <linux/iversion.h>
17 #include <cluster/masklog.h>
22 #include "blockcheck.h"
30 #include "buffer_head_io.h"
32 #include "ocfs2_trace.h"
35 * Locking of quotas with OCFS2 is rather complex. Here are rules that
36 * should be obeyed by all the functions:
37 * - any write of quota structure (either to local or global file) is protected
38 * by dqio_sem or dquot->dq_lock.
39 * - any modification of global quota file holds inode cluster lock, i_mutex,
40 * and ip_alloc_sem of the global quota file (achieved by
41 * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
42 * - an allocation of new blocks for local quota file is protected by
45 * A rough sketch of locking dependencies (lf = local file, gf = global file):
46 * Normal filesystem operation:
47 * start_trans -> dqio_sem -> write to lf
48 * Syncing of local and global file:
49 * ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
52 * Acquire dquot for the first time:
53 * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
54 * -> alloc space for gf
55 * -> start_trans -> qinfo_lock -> write to gf
56 * -> ip_alloc_sem of lf -> alloc space for lf
58 * Release last reference to dquot:
59 * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
61 * Note that all the above operations also hold the inode cluster lock of lf.
63 * inode cluster lock of recovered lf
64 * -> read bitmaps -> ip_alloc_sem of lf
65 * -> ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
69 static void qsync_work_fn(struct work_struct
*work
);
71 static void ocfs2_global_disk2memdqb(struct dquot
*dquot
, void *dp
)
73 struct ocfs2_global_disk_dqblk
*d
= dp
;
74 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
76 /* Update from disk only entries not set by the admin */
77 if (!test_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
)) {
78 m
->dqb_ihardlimit
= le64_to_cpu(d
->dqb_ihardlimit
);
79 m
->dqb_isoftlimit
= le64_to_cpu(d
->dqb_isoftlimit
);
81 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
82 m
->dqb_curinodes
= le64_to_cpu(d
->dqb_curinodes
);
83 if (!test_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
)) {
84 m
->dqb_bhardlimit
= le64_to_cpu(d
->dqb_bhardlimit
);
85 m
->dqb_bsoftlimit
= le64_to_cpu(d
->dqb_bsoftlimit
);
87 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
88 m
->dqb_curspace
= le64_to_cpu(d
->dqb_curspace
);
89 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
))
90 m
->dqb_btime
= le64_to_cpu(d
->dqb_btime
);
91 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
))
92 m
->dqb_itime
= le64_to_cpu(d
->dqb_itime
);
93 OCFS2_DQUOT(dquot
)->dq_use_count
= le32_to_cpu(d
->dqb_use_count
);
96 static void ocfs2_global_mem2diskdqb(void *dp
, struct dquot
*dquot
)
98 struct ocfs2_global_disk_dqblk
*d
= dp
;
99 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
101 d
->dqb_id
= cpu_to_le32(from_kqid(&init_user_ns
, dquot
->dq_id
));
102 d
->dqb_use_count
= cpu_to_le32(OCFS2_DQUOT(dquot
)->dq_use_count
);
103 d
->dqb_ihardlimit
= cpu_to_le64(m
->dqb_ihardlimit
);
104 d
->dqb_isoftlimit
= cpu_to_le64(m
->dqb_isoftlimit
);
105 d
->dqb_curinodes
= cpu_to_le64(m
->dqb_curinodes
);
106 d
->dqb_bhardlimit
= cpu_to_le64(m
->dqb_bhardlimit
);
107 d
->dqb_bsoftlimit
= cpu_to_le64(m
->dqb_bsoftlimit
);
108 d
->dqb_curspace
= cpu_to_le64(m
->dqb_curspace
);
109 d
->dqb_btime
= cpu_to_le64(m
->dqb_btime
);
110 d
->dqb_itime
= cpu_to_le64(m
->dqb_itime
);
111 d
->dqb_pad1
= d
->dqb_pad2
= 0;
114 static int ocfs2_global_is_id(void *dp
, struct dquot
*dquot
)
116 struct ocfs2_global_disk_dqblk
*d
= dp
;
117 struct ocfs2_mem_dqinfo
*oinfo
=
118 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_id
.type
)->dqi_priv
;
120 if (qtree_entry_unused(&oinfo
->dqi_gi
, dp
))
123 return qid_eq(make_kqid(&init_user_ns
, dquot
->dq_id
.type
,
124 le32_to_cpu(d
->dqb_id
)),
128 const struct qtree_fmt_operations ocfs2_global_ops
= {
129 .mem2disk_dqblk
= ocfs2_global_mem2diskdqb
,
130 .disk2mem_dqblk
= ocfs2_global_disk2memdqb
,
131 .is_id
= ocfs2_global_is_id
,
134 int ocfs2_validate_quota_block(struct super_block
*sb
, struct buffer_head
*bh
)
136 struct ocfs2_disk_dqtrailer
*dqt
=
137 ocfs2_block_dqtrailer(sb
->s_blocksize
, bh
->b_data
);
139 trace_ocfs2_validate_quota_block((unsigned long long)bh
->b_blocknr
);
141 BUG_ON(!buffer_uptodate(bh
));
144 * If the ecc fails, we return the error but otherwise
145 * leave the filesystem running. We know any error is
146 * local to this block.
148 return ocfs2_validate_meta_ecc(sb
, bh
->b_data
, &dqt
->dq_check
);
151 int ocfs2_read_quota_phys_block(struct inode
*inode
, u64 p_block
,
152 struct buffer_head
**bhp
)
157 rc
= ocfs2_read_blocks(INODE_CACHE(inode
), p_block
, 1, bhp
, 0,
158 ocfs2_validate_quota_block
);
164 /* Read data from global quotafile - avoid pagecache and such because we cannot
165 * afford acquiring the locks... We use quota cluster lock to serialize
166 * operations. Caller is responsible for acquiring it. */
167 ssize_t
ocfs2_quota_read(struct super_block
*sb
, int type
, char *data
,
168 size_t len
, loff_t off
)
170 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
171 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
172 loff_t i_size
= i_size_read(gqinode
);
173 int offset
= off
& (sb
->s_blocksize
- 1);
174 sector_t blk
= off
>> sb
->s_blocksize_bits
;
176 struct buffer_head
*bh
;
177 size_t toread
, tocopy
;
178 u64 pblock
= 0, pcount
= 0;
182 if (off
+ len
> i_size
)
186 tocopy
= min_t(size_t, (sb
->s_blocksize
- offset
), toread
);
188 err
= ocfs2_extent_map_get_blocks(gqinode
, blk
, &pblock
,
199 err
= ocfs2_read_quota_phys_block(gqinode
, pblock
, &bh
);
204 memcpy(data
, bh
->b_data
+ offset
, tocopy
);
214 /* Write to quotafile (we know the transaction is already started and has
216 ssize_t
ocfs2_quota_write(struct super_block
*sb
, int type
,
217 const char *data
, size_t len
, loff_t off
)
219 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
220 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
221 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
222 int offset
= off
& (sb
->s_blocksize
- 1);
223 sector_t blk
= off
>> sb
->s_blocksize_bits
;
224 int err
= 0, new = 0, ja_type
;
225 struct buffer_head
*bh
= NULL
;
226 handle_t
*handle
= journal_current_handle();
230 mlog(ML_ERROR
, "Quota write (off=%llu, len=%llu) cancelled "
231 "because transaction was not started.\n",
232 (unsigned long long)off
, (unsigned long long)len
);
235 if (len
> sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
) {
237 len
= sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
;
240 if (i_size_read(gqinode
) < off
+ len
) {
242 ocfs2_align_bytes_to_blocks(sb
, off
+ len
);
244 /* Space is already allocated in ocfs2_acquire_dquot() */
245 err
= ocfs2_simple_size_update(gqinode
,
252 err
= ocfs2_extent_map_get_blocks(gqinode
, blk
, &pblock
, &pcount
, NULL
);
257 /* Not rewriting whole block? */
258 if ((offset
|| len
< sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
) &&
260 err
= ocfs2_read_quota_phys_block(gqinode
, pblock
, &bh
);
261 ja_type
= OCFS2_JOURNAL_ACCESS_WRITE
;
263 bh
= sb_getblk(sb
, pblock
);
266 ja_type
= OCFS2_JOURNAL_ACCESS_CREATE
;
274 memset(bh
->b_data
, 0, sb
->s_blocksize
);
275 memcpy(bh
->b_data
+ offset
, data
, len
);
276 flush_dcache_page(bh
->b_page
);
277 set_buffer_uptodate(bh
);
279 ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode
), bh
);
280 err
= ocfs2_journal_access_dq(handle
, INODE_CACHE(gqinode
), bh
,
286 ocfs2_journal_dirty(handle
, bh
);
293 inode_inc_iversion(gqinode
);
294 ocfs2_mark_inode_dirty(handle
, gqinode
, oinfo
->dqi_gqi_bh
);
298 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
301 struct buffer_head
*bh
= NULL
;
303 status
= ocfs2_inode_lock(oinfo
->dqi_gqinode
, &bh
, ex
);
306 spin_lock(&dq_data_lock
);
307 if (!oinfo
->dqi_gqi_count
++)
308 oinfo
->dqi_gqi_bh
= bh
;
310 WARN_ON(bh
!= oinfo
->dqi_gqi_bh
);
311 spin_unlock(&dq_data_lock
);
313 inode_lock(oinfo
->dqi_gqinode
);
314 down_write(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
316 down_read(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
321 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
324 up_write(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
325 inode_unlock(oinfo
->dqi_gqinode
);
327 up_read(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
329 ocfs2_inode_unlock(oinfo
->dqi_gqinode
, ex
);
330 brelse(oinfo
->dqi_gqi_bh
);
331 spin_lock(&dq_data_lock
);
332 if (!--oinfo
->dqi_gqi_count
)
333 oinfo
->dqi_gqi_bh
= NULL
;
334 spin_unlock(&dq_data_lock
);
337 /* Read information header from global quota file */
338 int ocfs2_global_read_info(struct super_block
*sb
, int type
)
340 struct inode
*gqinode
= NULL
;
341 unsigned int ino
[OCFS2_MAXQUOTAS
] = { USER_QUOTA_SYSTEM_INODE
,
342 GROUP_QUOTA_SYSTEM_INODE
};
343 struct ocfs2_global_disk_dqinfo dinfo
;
344 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
345 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
349 /* Read global header */
350 gqinode
= ocfs2_get_system_file_inode(OCFS2_SB(sb
), ino
[type
],
353 mlog(ML_ERROR
, "failed to get global quota inode (type=%d)\n",
358 oinfo
->dqi_gi
.dqi_sb
= sb
;
359 oinfo
->dqi_gi
.dqi_type
= type
;
360 ocfs2_qinfo_lock_res_init(&oinfo
->dqi_gqlock
, oinfo
);
361 oinfo
->dqi_gi
.dqi_entry_size
= sizeof(struct ocfs2_global_disk_dqblk
);
362 oinfo
->dqi_gi
.dqi_ops
= &ocfs2_global_ops
;
363 oinfo
->dqi_gqi_bh
= NULL
;
364 oinfo
->dqi_gqi_count
= 0;
365 oinfo
->dqi_gqinode
= gqinode
;
366 status
= ocfs2_lock_global_qf(oinfo
, 0);
372 status
= ocfs2_extent_map_get_blocks(gqinode
, 0, &oinfo
->dqi_giblk
,
377 status
= ocfs2_qinfo_lock(oinfo
, 0);
380 status
= sb
->s_op
->quota_read(sb
, type
, (char *)&dinfo
,
381 sizeof(struct ocfs2_global_disk_dqinfo
),
382 OCFS2_GLOBAL_INFO_OFF
);
383 ocfs2_qinfo_unlock(oinfo
, 0);
384 ocfs2_unlock_global_qf(oinfo
, 0);
385 if (status
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
386 mlog(ML_ERROR
, "Cannot read global quota info (%d).\n",
393 info
->dqi_bgrace
= le32_to_cpu(dinfo
.dqi_bgrace
);
394 info
->dqi_igrace
= le32_to_cpu(dinfo
.dqi_igrace
);
395 oinfo
->dqi_syncms
= le32_to_cpu(dinfo
.dqi_syncms
);
396 oinfo
->dqi_gi
.dqi_blocks
= le32_to_cpu(dinfo
.dqi_blocks
);
397 oinfo
->dqi_gi
.dqi_free_blk
= le32_to_cpu(dinfo
.dqi_free_blk
);
398 oinfo
->dqi_gi
.dqi_free_entry
= le32_to_cpu(dinfo
.dqi_free_entry
);
399 oinfo
->dqi_gi
.dqi_blocksize_bits
= sb
->s_blocksize_bits
;
400 oinfo
->dqi_gi
.dqi_usable_bs
= sb
->s_blocksize
-
401 OCFS2_QBLK_RESERVED_SPACE
;
402 oinfo
->dqi_gi
.dqi_qtree_depth
= qtree_depth(&oinfo
->dqi_gi
);
403 INIT_DELAYED_WORK(&oinfo
->dqi_sync_work
, qsync_work_fn
);
404 schedule_delayed_work(&oinfo
->dqi_sync_work
,
405 msecs_to_jiffies(oinfo
->dqi_syncms
));
410 ocfs2_unlock_global_qf(oinfo
, 0);
415 /* Write information to global quota file. Expects exlusive lock on quota
416 * file inode and quota info */
417 static int __ocfs2_global_write_info(struct super_block
*sb
, int type
)
419 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
420 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
421 struct ocfs2_global_disk_dqinfo dinfo
;
424 spin_lock(&dq_data_lock
);
425 info
->dqi_flags
&= ~DQF_INFO_DIRTY
;
426 dinfo
.dqi_bgrace
= cpu_to_le32(info
->dqi_bgrace
);
427 dinfo
.dqi_igrace
= cpu_to_le32(info
->dqi_igrace
);
428 spin_unlock(&dq_data_lock
);
429 dinfo
.dqi_syncms
= cpu_to_le32(oinfo
->dqi_syncms
);
430 dinfo
.dqi_blocks
= cpu_to_le32(oinfo
->dqi_gi
.dqi_blocks
);
431 dinfo
.dqi_free_blk
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_blk
);
432 dinfo
.dqi_free_entry
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_entry
);
433 size
= sb
->s_op
->quota_write(sb
, type
, (char *)&dinfo
,
434 sizeof(struct ocfs2_global_disk_dqinfo
),
435 OCFS2_GLOBAL_INFO_OFF
);
436 if (size
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
437 mlog(ML_ERROR
, "Cannot write global quota info structure\n");
445 int ocfs2_global_write_info(struct super_block
*sb
, int type
)
448 struct quota_info
*dqopt
= sb_dqopt(sb
);
449 struct ocfs2_mem_dqinfo
*info
= dqopt
->info
[type
].dqi_priv
;
451 down_write(&dqopt
->dqio_sem
);
452 err
= ocfs2_qinfo_lock(info
, 1);
455 err
= __ocfs2_global_write_info(sb
, type
);
456 ocfs2_qinfo_unlock(info
, 1);
458 up_write(&dqopt
->dqio_sem
);
462 static int ocfs2_global_qinit_alloc(struct super_block
*sb
, int type
)
464 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
467 * We may need to allocate tree blocks and a leaf block but not the
470 return oinfo
->dqi_gi
.dqi_qtree_depth
;
473 static int ocfs2_calc_global_qinit_credits(struct super_block
*sb
, int type
)
475 /* We modify all the allocated blocks, tree root, info block and
477 return (ocfs2_global_qinit_alloc(sb
, type
) + 2) *
478 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
+ 1;
481 /* Sync local information about quota modifications with global quota file.
482 * Caller must have started the transaction and obtained exclusive lock for
483 * global quota file inode */
484 int __ocfs2_sync_dquot(struct dquot
*dquot
, int freeing
)
487 struct super_block
*sb
= dquot
->dq_sb
;
488 int type
= dquot
->dq_id
.type
;
489 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
490 struct ocfs2_global_disk_dqblk dqblk
;
491 s64 spacechange
, inodechange
;
492 time64_t olditime
, oldbtime
;
494 err
= sb
->s_op
->quota_read(sb
, type
, (char *)&dqblk
,
495 sizeof(struct ocfs2_global_disk_dqblk
),
497 if (err
!= sizeof(struct ocfs2_global_disk_dqblk
)) {
499 mlog(ML_ERROR
, "Short read from global quota file "
506 /* Update space and inode usage. Get also other information from
507 * global quota file so that we don't overwrite any changes there.
509 spin_lock(&dquot
->dq_dqb_lock
);
510 spacechange
= dquot
->dq_dqb
.dqb_curspace
-
511 OCFS2_DQUOT(dquot
)->dq_origspace
;
512 inodechange
= dquot
->dq_dqb
.dqb_curinodes
-
513 OCFS2_DQUOT(dquot
)->dq_originodes
;
514 olditime
= dquot
->dq_dqb
.dqb_itime
;
515 oldbtime
= dquot
->dq_dqb
.dqb_btime
;
516 ocfs2_global_disk2memdqb(dquot
, &dqblk
);
517 trace_ocfs2_sync_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
518 dquot
->dq_dqb
.dqb_curspace
,
519 (long long)spacechange
,
520 dquot
->dq_dqb
.dqb_curinodes
,
521 (long long)inodechange
);
522 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
523 dquot
->dq_dqb
.dqb_curspace
+= spacechange
;
524 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
525 dquot
->dq_dqb
.dqb_curinodes
+= inodechange
;
526 /* Set properly space grace time... */
527 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
528 dquot
->dq_dqb
.dqb_curspace
> dquot
->dq_dqb
.dqb_bsoftlimit
) {
529 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
) &&
531 if (dquot
->dq_dqb
.dqb_btime
> 0)
532 dquot
->dq_dqb
.dqb_btime
=
533 min(dquot
->dq_dqb
.dqb_btime
, oldbtime
);
535 dquot
->dq_dqb
.dqb_btime
= oldbtime
;
538 dquot
->dq_dqb
.dqb_btime
= 0;
539 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
541 /* Set properly inode grace time... */
542 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
543 dquot
->dq_dqb
.dqb_curinodes
> dquot
->dq_dqb
.dqb_isoftlimit
) {
544 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
) &&
546 if (dquot
->dq_dqb
.dqb_itime
> 0)
547 dquot
->dq_dqb
.dqb_itime
=
548 min(dquot
->dq_dqb
.dqb_itime
, olditime
);
550 dquot
->dq_dqb
.dqb_itime
= olditime
;
553 dquot
->dq_dqb
.dqb_itime
= 0;
554 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
556 /* All information is properly updated, clear the flags */
557 __clear_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
);
558 __clear_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
);
559 __clear_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
);
560 __clear_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
);
561 __clear_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
);
562 __clear_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
);
563 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
564 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
565 spin_unlock(&dquot
->dq_dqb_lock
);
566 err
= ocfs2_qinfo_lock(info
, freeing
);
568 mlog(ML_ERROR
, "Failed to lock quota info, losing quota write"
569 " (type=%d, id=%u)\n", dquot
->dq_id
.type
,
570 (unsigned)from_kqid(&init_user_ns
, dquot
->dq_id
));
574 OCFS2_DQUOT(dquot
)->dq_use_count
--;
575 err
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
578 if (freeing
&& !OCFS2_DQUOT(dquot
)->dq_use_count
) {
579 err
= qtree_release_dquot(&info
->dqi_gi
, dquot
);
580 if (info_dirty(sb_dqinfo(sb
, type
))) {
581 err2
= __ocfs2_global_write_info(sb
, type
);
587 ocfs2_qinfo_unlock(info
, freeing
);
595 * Functions for periodic syncing of dquots with global file
597 static int ocfs2_sync_dquot_helper(struct dquot
*dquot
, unsigned long type
)
600 struct super_block
*sb
= dquot
->dq_sb
;
601 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
602 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
605 trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns
, dquot
->dq_id
),
608 if (type
!= dquot
->dq_id
.type
)
610 status
= ocfs2_lock_global_qf(oinfo
, 1);
614 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
615 if (IS_ERR(handle
)) {
616 status
= PTR_ERR(handle
);
620 down_write(&sb_dqopt(sb
)->dqio_sem
);
621 status
= ocfs2_sync_dquot(dquot
);
624 /* We have to write local structure as well... */
625 status
= ocfs2_local_write_dquot(dquot
);
628 up_write(&sb_dqopt(sb
)->dqio_sem
);
629 ocfs2_commit_trans(osb
, handle
);
631 ocfs2_unlock_global_qf(oinfo
, 1);
636 static void qsync_work_fn(struct work_struct
*work
)
638 struct ocfs2_mem_dqinfo
*oinfo
= container_of(work
,
639 struct ocfs2_mem_dqinfo
,
641 struct super_block
*sb
= oinfo
->dqi_gqinode
->i_sb
;
644 * We have to be careful here not to deadlock on s_umount as umount
645 * disabling quotas may be in progress and it waits for this work to
646 * complete. If trylock fails, we'll do the sync next time...
648 if (down_read_trylock(&sb
->s_umount
)) {
649 dquot_scan_active(sb
, ocfs2_sync_dquot_helper
, oinfo
->dqi_type
);
650 up_read(&sb
->s_umount
);
652 schedule_delayed_work(&oinfo
->dqi_sync_work
,
653 msecs_to_jiffies(oinfo
->dqi_syncms
));
657 * Wrappers for generic quota functions
660 static int ocfs2_write_dquot(struct dquot
*dquot
)
663 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
666 trace_ocfs2_write_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
669 handle
= ocfs2_start_trans(osb
, OCFS2_QWRITE_CREDITS
);
670 if (IS_ERR(handle
)) {
671 status
= PTR_ERR(handle
);
675 down_write(&sb_dqopt(dquot
->dq_sb
)->dqio_sem
);
676 status
= ocfs2_local_write_dquot(dquot
);
677 up_write(&sb_dqopt(dquot
->dq_sb
)->dqio_sem
);
678 ocfs2_commit_trans(osb
, handle
);
683 static int ocfs2_calc_qdel_credits(struct super_block
*sb
, int type
)
685 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
687 * We modify tree, leaf block, global info, local chunk header,
688 * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
689 * accounts for inode update
691 return (oinfo
->dqi_gi
.dqi_qtree_depth
+ 2) *
692 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
+
693 OCFS2_QINFO_WRITE_CREDITS
+
694 OCFS2_INODE_UPDATE_CREDITS
;
697 void ocfs2_drop_dquot_refs(struct work_struct
*work
)
699 struct ocfs2_super
*osb
= container_of(work
, struct ocfs2_super
,
701 struct llist_node
*list
;
702 struct ocfs2_dquot
*odquot
, *next_odquot
;
704 list
= llist_del_all(&osb
->dquot_drop_list
);
705 llist_for_each_entry_safe(odquot
, next_odquot
, list
, list
) {
706 /* Drop the reference we acquired in ocfs2_dquot_release() */
707 dqput(&odquot
->dq_dquot
);
712 * Called when the last reference to dquot is dropped. If we are called from
713 * downconvert thread, we cannot do all the handling here because grabbing
714 * quota lock could deadlock (the node holding the quota lock could need some
715 * other cluster lock to proceed but with blocked downconvert thread we cannot
718 static int ocfs2_release_dquot(struct dquot
*dquot
)
721 struct ocfs2_mem_dqinfo
*oinfo
=
722 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_id
.type
)->dqi_priv
;
723 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
726 trace_ocfs2_release_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
729 mutex_lock(&dquot
->dq_lock
);
730 /* Check whether we are not racing with some other dqget() */
731 if (atomic_read(&dquot
->dq_count
) > 1)
733 /* Running from downconvert thread? Postpone quota processing to wq */
734 if (current
== osb
->dc_task
) {
736 * Grab our own reference to dquot and queue it for delayed
737 * dropping. Quota code rechecks after calling
738 * ->release_dquot() and won't free dquot structure.
741 /* First entry on list -> queue work */
742 if (llist_add(&OCFS2_DQUOT(dquot
)->list
, &osb
->dquot_drop_list
))
743 queue_work(osb
->ocfs2_wq
, &osb
->dquot_drop_work
);
746 status
= ocfs2_lock_global_qf(oinfo
, 1);
749 handle
= ocfs2_start_trans(osb
,
750 ocfs2_calc_qdel_credits(dquot
->dq_sb
, dquot
->dq_id
.type
));
751 if (IS_ERR(handle
)) {
752 status
= PTR_ERR(handle
);
757 status
= ocfs2_global_release_dquot(dquot
);
762 status
= ocfs2_local_release_dquot(handle
, dquot
);
764 * If we fail here, we cannot do much as global structure is
765 * already released. So just complain...
770 * Clear dq_off so that we search for the structure in quota file next
771 * time we acquire it. The structure might be deleted and reallocated
772 * elsewhere by another node while our dquot structure is on freelist.
775 clear_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
777 ocfs2_commit_trans(osb
, handle
);
779 ocfs2_unlock_global_qf(oinfo
, 1);
781 mutex_unlock(&dquot
->dq_lock
);
788 * Read global dquot structure from disk or create it if it does
789 * not exist. Also update use count of the global structure and
790 * create structure in node-local quota file.
792 static int ocfs2_acquire_dquot(struct dquot
*dquot
)
796 struct super_block
*sb
= dquot
->dq_sb
;
797 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
798 int type
= dquot
->dq_id
.type
;
799 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
800 struct inode
*gqinode
= info
->dqi_gqinode
;
801 int need_alloc
= ocfs2_global_qinit_alloc(sb
, type
);
804 trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
806 mutex_lock(&dquot
->dq_lock
);
808 * We need an exclusive lock, because we're going to update use count
809 * and instantiate possibly new dquot structure
811 status
= ocfs2_lock_global_qf(info
, 1);
814 status
= ocfs2_qinfo_lock(info
, 0);
818 * We always want to read dquot structure from disk because we don't
819 * know what happened with it while it was on freelist.
821 status
= qtree_read_dquot(&info
->dqi_gi
, dquot
);
822 ocfs2_qinfo_unlock(info
, 0);
826 OCFS2_DQUOT(dquot
)->dq_use_count
++;
827 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
828 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
829 if (!dquot
->dq_off
) { /* No real quota entry? */
832 * Add blocks to quota file before we start a transaction since
833 * locking allocators ranks above a transaction start
835 WARN_ON(journal_current_handle());
836 status
= ocfs2_extend_no_holes(gqinode
, NULL
,
837 i_size_read(gqinode
) + (need_alloc
<< sb
->s_blocksize_bits
),
838 i_size_read(gqinode
));
843 handle
= ocfs2_start_trans(osb
,
844 ocfs2_calc_global_qinit_credits(sb
, type
));
845 if (IS_ERR(handle
)) {
846 status
= PTR_ERR(handle
);
849 status
= ocfs2_qinfo_lock(info
, ex
);
852 status
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
853 if (ex
&& info_dirty(sb_dqinfo(sb
, type
))) {
854 err
= __ocfs2_global_write_info(sb
, type
);
858 ocfs2_qinfo_unlock(info
, ex
);
860 ocfs2_commit_trans(osb
, handle
);
862 ocfs2_unlock_global_qf(info
, 1);
866 status
= ocfs2_create_local_dquot(dquot
);
869 set_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
871 mutex_unlock(&dquot
->dq_lock
);
877 static int ocfs2_get_next_id(struct super_block
*sb
, struct kqid
*qid
)
879 int type
= qid
->type
;
880 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
883 trace_ocfs2_get_next_id(from_kqid(&init_user_ns
, *qid
), type
);
884 if (!sb_has_quota_loaded(sb
, type
)) {
888 status
= ocfs2_lock_global_qf(info
, 0);
891 status
= ocfs2_qinfo_lock(info
, 0);
894 status
= qtree_get_next_id(&info
->dqi_gi
, qid
);
895 ocfs2_qinfo_unlock(info
, 0);
897 ocfs2_unlock_global_qf(info
, 0);
900 * Avoid logging ENOENT since it just means there isn't next ID and
901 * ESRCH which means quota isn't enabled for the filesystem.
903 if (status
&& status
!= -ENOENT
&& status
!= -ESRCH
)
908 static int ocfs2_mark_dquot_dirty(struct dquot
*dquot
)
910 unsigned long mask
= (1 << (DQ_LASTSET_B
+ QIF_ILIMITS_B
)) |
911 (1 << (DQ_LASTSET_B
+ QIF_BLIMITS_B
)) |
912 (1 << (DQ_LASTSET_B
+ QIF_INODES_B
)) |
913 (1 << (DQ_LASTSET_B
+ QIF_SPACE_B
)) |
914 (1 << (DQ_LASTSET_B
+ QIF_BTIME_B
)) |
915 (1 << (DQ_LASTSET_B
+ QIF_ITIME_B
));
918 struct super_block
*sb
= dquot
->dq_sb
;
919 int type
= dquot
->dq_id
.type
;
920 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
922 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
924 trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns
, dquot
->dq_id
),
927 /* In case user set some limits, sync dquot immediately to global
928 * quota file so that information propagates quicker */
929 spin_lock(&dquot
->dq_dqb_lock
);
930 if (dquot
->dq_flags
& mask
)
932 spin_unlock(&dquot
->dq_dqb_lock
);
933 /* This is a slight hack but we can't afford getting global quota
934 * lock if we already have a transaction started. */
935 if (!sync
|| journal_current_handle()) {
936 status
= ocfs2_write_dquot(dquot
);
939 status
= ocfs2_lock_global_qf(oinfo
, 1);
942 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
943 if (IS_ERR(handle
)) {
944 status
= PTR_ERR(handle
);
948 down_write(&sb_dqopt(sb
)->dqio_sem
);
949 status
= ocfs2_sync_dquot(dquot
);
954 /* Now write updated local dquot structure */
955 status
= ocfs2_local_write_dquot(dquot
);
957 up_write(&sb_dqopt(sb
)->dqio_sem
);
958 ocfs2_commit_trans(osb
, handle
);
960 ocfs2_unlock_global_qf(oinfo
, 1);
967 /* This should happen only after set_dqinfo(). */
968 static int ocfs2_write_info(struct super_block
*sb
, int type
)
972 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
974 status
= ocfs2_lock_global_qf(oinfo
, 1);
977 handle
= ocfs2_start_trans(OCFS2_SB(sb
), OCFS2_QINFO_WRITE_CREDITS
);
978 if (IS_ERR(handle
)) {
979 status
= PTR_ERR(handle
);
983 status
= dquot_commit_info(sb
, type
);
984 ocfs2_commit_trans(OCFS2_SB(sb
), handle
);
986 ocfs2_unlock_global_qf(oinfo
, 1);
993 static struct dquot
*ocfs2_alloc_dquot(struct super_block
*sb
, int type
)
995 struct ocfs2_dquot
*dquot
=
996 kmem_cache_zalloc(ocfs2_dquot_cachep
, GFP_NOFS
);
1000 return &dquot
->dq_dquot
;
1003 static void ocfs2_destroy_dquot(struct dquot
*dquot
)
1005 kmem_cache_free(ocfs2_dquot_cachep
, dquot
);
1008 const struct dquot_operations ocfs2_quota_operations
= {
1009 /* We never make dquot dirty so .write_dquot is never called */
1010 .acquire_dquot
= ocfs2_acquire_dquot
,
1011 .release_dquot
= ocfs2_release_dquot
,
1012 .mark_dirty
= ocfs2_mark_dquot_dirty
,
1013 .write_info
= ocfs2_write_info
,
1014 .alloc_dquot
= ocfs2_alloc_dquot
,
1015 .destroy_dquot
= ocfs2_destroy_dquot
,
1016 .get_next_id
= ocfs2_get_next_id
,