1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of operations over global quota file
5 #include <linux/spinlock.h>
7 #include <linux/slab.h>
8 #include <linux/quota.h>
9 #include <linux/quotaops.h>
10 #include <linux/dqblk_qtree.h>
11 #include <linux/jiffies.h>
12 #include <linux/writeback.h>
13 #include <linux/workqueue.h>
14 #include <linux/llist.h>
16 #include <cluster/masklog.h>
21 #include "blockcheck.h"
29 #include "buffer_head_io.h"
31 #include "ocfs2_trace.h"
34 * Locking of quotas with OCFS2 is rather complex. Here are rules that
35 * should be obeyed by all the functions:
36 * - any write of quota structure (either to local or global file) is protected
37 * by dqio_sem or dquot->dq_lock.
38 * - any modification of global quota file holds inode cluster lock, i_mutex,
39 * and ip_alloc_sem of the global quota file (achieved by
40 * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
41 * - an allocation of new blocks for local quota file is protected by
44 * A rough sketch of locking dependencies (lf = local file, gf = global file):
45 * Normal filesystem operation:
46 * start_trans -> dqio_sem -> write to lf
47 * Syncing of local and global file:
48 * ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
51 * Acquire dquot for the first time:
52 * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
53 * -> alloc space for gf
54 * -> start_trans -> qinfo_lock -> write to gf
55 * -> ip_alloc_sem of lf -> alloc space for lf
57 * Release last reference to dquot:
58 * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
60 * Note that all the above operations also hold the inode cluster lock of lf.
62 * inode cluster lock of recovered lf
63 * -> read bitmaps -> ip_alloc_sem of lf
64 * -> ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
68 static void qsync_work_fn(struct work_struct
*work
);
70 static void ocfs2_global_disk2memdqb(struct dquot
*dquot
, void *dp
)
72 struct ocfs2_global_disk_dqblk
*d
= dp
;
73 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
75 /* Update from disk only entries not set by the admin */
76 if (!test_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
)) {
77 m
->dqb_ihardlimit
= le64_to_cpu(d
->dqb_ihardlimit
);
78 m
->dqb_isoftlimit
= le64_to_cpu(d
->dqb_isoftlimit
);
80 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
81 m
->dqb_curinodes
= le64_to_cpu(d
->dqb_curinodes
);
82 if (!test_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
)) {
83 m
->dqb_bhardlimit
= le64_to_cpu(d
->dqb_bhardlimit
);
84 m
->dqb_bsoftlimit
= le64_to_cpu(d
->dqb_bsoftlimit
);
86 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
87 m
->dqb_curspace
= le64_to_cpu(d
->dqb_curspace
);
88 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
))
89 m
->dqb_btime
= le64_to_cpu(d
->dqb_btime
);
90 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
))
91 m
->dqb_itime
= le64_to_cpu(d
->dqb_itime
);
92 OCFS2_DQUOT(dquot
)->dq_use_count
= le32_to_cpu(d
->dqb_use_count
);
95 static void ocfs2_global_mem2diskdqb(void *dp
, struct dquot
*dquot
)
97 struct ocfs2_global_disk_dqblk
*d
= dp
;
98 struct mem_dqblk
*m
= &dquot
->dq_dqb
;
100 d
->dqb_id
= cpu_to_le32(from_kqid(&init_user_ns
, dquot
->dq_id
));
101 d
->dqb_use_count
= cpu_to_le32(OCFS2_DQUOT(dquot
)->dq_use_count
);
102 d
->dqb_ihardlimit
= cpu_to_le64(m
->dqb_ihardlimit
);
103 d
->dqb_isoftlimit
= cpu_to_le64(m
->dqb_isoftlimit
);
104 d
->dqb_curinodes
= cpu_to_le64(m
->dqb_curinodes
);
105 d
->dqb_bhardlimit
= cpu_to_le64(m
->dqb_bhardlimit
);
106 d
->dqb_bsoftlimit
= cpu_to_le64(m
->dqb_bsoftlimit
);
107 d
->dqb_curspace
= cpu_to_le64(m
->dqb_curspace
);
108 d
->dqb_btime
= cpu_to_le64(m
->dqb_btime
);
109 d
->dqb_itime
= cpu_to_le64(m
->dqb_itime
);
110 d
->dqb_pad1
= d
->dqb_pad2
= 0;
113 static int ocfs2_global_is_id(void *dp
, struct dquot
*dquot
)
115 struct ocfs2_global_disk_dqblk
*d
= dp
;
116 struct ocfs2_mem_dqinfo
*oinfo
=
117 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_id
.type
)->dqi_priv
;
119 if (qtree_entry_unused(&oinfo
->dqi_gi
, dp
))
122 return qid_eq(make_kqid(&init_user_ns
, dquot
->dq_id
.type
,
123 le32_to_cpu(d
->dqb_id
)),
127 const struct qtree_fmt_operations ocfs2_global_ops
= {
128 .mem2disk_dqblk
= ocfs2_global_mem2diskdqb
,
129 .disk2mem_dqblk
= ocfs2_global_disk2memdqb
,
130 .is_id
= ocfs2_global_is_id
,
133 int ocfs2_validate_quota_block(struct super_block
*sb
, struct buffer_head
*bh
)
135 struct ocfs2_disk_dqtrailer
*dqt
=
136 ocfs2_block_dqtrailer(sb
->s_blocksize
, bh
->b_data
);
138 trace_ocfs2_validate_quota_block((unsigned long long)bh
->b_blocknr
);
140 BUG_ON(!buffer_uptodate(bh
));
143 * If the ecc fails, we return the error but otherwise
144 * leave the filesystem running. We know any error is
145 * local to this block.
147 return ocfs2_validate_meta_ecc(sb
, bh
->b_data
, &dqt
->dq_check
);
150 int ocfs2_read_quota_phys_block(struct inode
*inode
, u64 p_block
,
151 struct buffer_head
**bhp
)
156 rc
= ocfs2_read_blocks(INODE_CACHE(inode
), p_block
, 1, bhp
, 0,
157 ocfs2_validate_quota_block
);
163 /* Read data from global quotafile - avoid pagecache and such because we cannot
164 * afford acquiring the locks... We use quota cluster lock to serialize
165 * operations. Caller is responsible for acquiring it. */
166 ssize_t
ocfs2_quota_read(struct super_block
*sb
, int type
, char *data
,
167 size_t len
, loff_t off
)
169 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
170 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
171 loff_t i_size
= i_size_read(gqinode
);
172 int offset
= off
& (sb
->s_blocksize
- 1);
173 sector_t blk
= off
>> sb
->s_blocksize_bits
;
175 struct buffer_head
*bh
;
176 size_t toread
, tocopy
;
177 u64 pblock
= 0, pcount
= 0;
181 if (off
+ len
> i_size
)
185 tocopy
= min_t(size_t, (sb
->s_blocksize
- offset
), toread
);
187 err
= ocfs2_extent_map_get_blocks(gqinode
, blk
, &pblock
,
198 err
= ocfs2_read_quota_phys_block(gqinode
, pblock
, &bh
);
203 memcpy(data
, bh
->b_data
+ offset
, tocopy
);
213 /* Write to quotafile (we know the transaction is already started and has
215 ssize_t
ocfs2_quota_write(struct super_block
*sb
, int type
,
216 const char *data
, size_t len
, loff_t off
)
218 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
219 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
220 struct inode
*gqinode
= oinfo
->dqi_gqinode
;
221 int offset
= off
& (sb
->s_blocksize
- 1);
222 sector_t blk
= off
>> sb
->s_blocksize_bits
;
223 int err
= 0, new = 0, ja_type
;
224 struct buffer_head
*bh
= NULL
;
225 handle_t
*handle
= journal_current_handle();
229 mlog(ML_ERROR
, "Quota write (off=%llu, len=%llu) cancelled "
230 "because transaction was not started.\n",
231 (unsigned long long)off
, (unsigned long long)len
);
234 if (len
> sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
) {
236 len
= sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
- offset
;
239 if (i_size_read(gqinode
) < off
+ len
) {
241 ocfs2_align_bytes_to_blocks(sb
, off
+ len
);
243 /* Space is already allocated in ocfs2_acquire_dquot() */
244 err
= ocfs2_simple_size_update(gqinode
,
251 err
= ocfs2_extent_map_get_blocks(gqinode
, blk
, &pblock
, &pcount
, NULL
);
256 /* Not rewriting whole block? */
257 if ((offset
|| len
< sb
->s_blocksize
- OCFS2_QBLK_RESERVED_SPACE
) &&
259 err
= ocfs2_read_quota_phys_block(gqinode
, pblock
, &bh
);
260 ja_type
= OCFS2_JOURNAL_ACCESS_WRITE
;
262 bh
= sb_getblk(sb
, pblock
);
265 ja_type
= OCFS2_JOURNAL_ACCESS_CREATE
;
273 memset(bh
->b_data
, 0, sb
->s_blocksize
);
274 memcpy(bh
->b_data
+ offset
, data
, len
);
275 flush_dcache_page(bh
->b_page
);
276 set_buffer_uptodate(bh
);
278 ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode
), bh
);
279 err
= ocfs2_journal_access_dq(handle
, INODE_CACHE(gqinode
), bh
,
285 ocfs2_journal_dirty(handle
, bh
);
292 gqinode
->i_version
++;
293 ocfs2_mark_inode_dirty(handle
, gqinode
, oinfo
->dqi_gqi_bh
);
297 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
300 struct buffer_head
*bh
= NULL
;
302 status
= ocfs2_inode_lock(oinfo
->dqi_gqinode
, &bh
, ex
);
305 spin_lock(&dq_data_lock
);
306 if (!oinfo
->dqi_gqi_count
++)
307 oinfo
->dqi_gqi_bh
= bh
;
309 WARN_ON(bh
!= oinfo
->dqi_gqi_bh
);
310 spin_unlock(&dq_data_lock
);
312 inode_lock(oinfo
->dqi_gqinode
);
313 down_write(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
315 down_read(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
320 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
323 up_write(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
324 inode_unlock(oinfo
->dqi_gqinode
);
326 up_read(&OCFS2_I(oinfo
->dqi_gqinode
)->ip_alloc_sem
);
328 ocfs2_inode_unlock(oinfo
->dqi_gqinode
, ex
);
329 brelse(oinfo
->dqi_gqi_bh
);
330 spin_lock(&dq_data_lock
);
331 if (!--oinfo
->dqi_gqi_count
)
332 oinfo
->dqi_gqi_bh
= NULL
;
333 spin_unlock(&dq_data_lock
);
336 /* Read information header from global quota file */
337 int ocfs2_global_read_info(struct super_block
*sb
, int type
)
339 struct inode
*gqinode
= NULL
;
340 unsigned int ino
[OCFS2_MAXQUOTAS
] = { USER_QUOTA_SYSTEM_INODE
,
341 GROUP_QUOTA_SYSTEM_INODE
};
342 struct ocfs2_global_disk_dqinfo dinfo
;
343 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
344 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
348 /* Read global header */
349 gqinode
= ocfs2_get_system_file_inode(OCFS2_SB(sb
), ino
[type
],
352 mlog(ML_ERROR
, "failed to get global quota inode (type=%d)\n",
357 oinfo
->dqi_gi
.dqi_sb
= sb
;
358 oinfo
->dqi_gi
.dqi_type
= type
;
359 ocfs2_qinfo_lock_res_init(&oinfo
->dqi_gqlock
, oinfo
);
360 oinfo
->dqi_gi
.dqi_entry_size
= sizeof(struct ocfs2_global_disk_dqblk
);
361 oinfo
->dqi_gi
.dqi_ops
= &ocfs2_global_ops
;
362 oinfo
->dqi_gqi_bh
= NULL
;
363 oinfo
->dqi_gqi_count
= 0;
364 oinfo
->dqi_gqinode
= gqinode
;
365 status
= ocfs2_lock_global_qf(oinfo
, 0);
371 status
= ocfs2_extent_map_get_blocks(gqinode
, 0, &oinfo
->dqi_giblk
,
376 status
= ocfs2_qinfo_lock(oinfo
, 0);
379 status
= sb
->s_op
->quota_read(sb
, type
, (char *)&dinfo
,
380 sizeof(struct ocfs2_global_disk_dqinfo
),
381 OCFS2_GLOBAL_INFO_OFF
);
382 ocfs2_qinfo_unlock(oinfo
, 0);
383 ocfs2_unlock_global_qf(oinfo
, 0);
384 if (status
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
385 mlog(ML_ERROR
, "Cannot read global quota info (%d).\n",
392 info
->dqi_bgrace
= le32_to_cpu(dinfo
.dqi_bgrace
);
393 info
->dqi_igrace
= le32_to_cpu(dinfo
.dqi_igrace
);
394 oinfo
->dqi_syncms
= le32_to_cpu(dinfo
.dqi_syncms
);
395 oinfo
->dqi_gi
.dqi_blocks
= le32_to_cpu(dinfo
.dqi_blocks
);
396 oinfo
->dqi_gi
.dqi_free_blk
= le32_to_cpu(dinfo
.dqi_free_blk
);
397 oinfo
->dqi_gi
.dqi_free_entry
= le32_to_cpu(dinfo
.dqi_free_entry
);
398 oinfo
->dqi_gi
.dqi_blocksize_bits
= sb
->s_blocksize_bits
;
399 oinfo
->dqi_gi
.dqi_usable_bs
= sb
->s_blocksize
-
400 OCFS2_QBLK_RESERVED_SPACE
;
401 oinfo
->dqi_gi
.dqi_qtree_depth
= qtree_depth(&oinfo
->dqi_gi
);
402 INIT_DELAYED_WORK(&oinfo
->dqi_sync_work
, qsync_work_fn
);
403 schedule_delayed_work(&oinfo
->dqi_sync_work
,
404 msecs_to_jiffies(oinfo
->dqi_syncms
));
409 ocfs2_unlock_global_qf(oinfo
, 0);
414 /* Write information to global quota file. Expects exlusive lock on quota
415 * file inode and quota info */
416 static int __ocfs2_global_write_info(struct super_block
*sb
, int type
)
418 struct mem_dqinfo
*info
= sb_dqinfo(sb
, type
);
419 struct ocfs2_mem_dqinfo
*oinfo
= info
->dqi_priv
;
420 struct ocfs2_global_disk_dqinfo dinfo
;
423 spin_lock(&dq_data_lock
);
424 info
->dqi_flags
&= ~DQF_INFO_DIRTY
;
425 dinfo
.dqi_bgrace
= cpu_to_le32(info
->dqi_bgrace
);
426 dinfo
.dqi_igrace
= cpu_to_le32(info
->dqi_igrace
);
427 spin_unlock(&dq_data_lock
);
428 dinfo
.dqi_syncms
= cpu_to_le32(oinfo
->dqi_syncms
);
429 dinfo
.dqi_blocks
= cpu_to_le32(oinfo
->dqi_gi
.dqi_blocks
);
430 dinfo
.dqi_free_blk
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_blk
);
431 dinfo
.dqi_free_entry
= cpu_to_le32(oinfo
->dqi_gi
.dqi_free_entry
);
432 size
= sb
->s_op
->quota_write(sb
, type
, (char *)&dinfo
,
433 sizeof(struct ocfs2_global_disk_dqinfo
),
434 OCFS2_GLOBAL_INFO_OFF
);
435 if (size
!= sizeof(struct ocfs2_global_disk_dqinfo
)) {
436 mlog(ML_ERROR
, "Cannot write global quota info structure\n");
444 int ocfs2_global_write_info(struct super_block
*sb
, int type
)
447 struct quota_info
*dqopt
= sb_dqopt(sb
);
448 struct ocfs2_mem_dqinfo
*info
= dqopt
->info
[type
].dqi_priv
;
450 down_write(&dqopt
->dqio_sem
);
451 err
= ocfs2_qinfo_lock(info
, 1);
454 err
= __ocfs2_global_write_info(sb
, type
);
455 ocfs2_qinfo_unlock(info
, 1);
457 up_write(&dqopt
->dqio_sem
);
461 static int ocfs2_global_qinit_alloc(struct super_block
*sb
, int type
)
463 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
466 * We may need to allocate tree blocks and a leaf block but not the
469 return oinfo
->dqi_gi
.dqi_qtree_depth
;
472 static int ocfs2_calc_global_qinit_credits(struct super_block
*sb
, int type
)
474 /* We modify all the allocated blocks, tree root, info block and
476 return (ocfs2_global_qinit_alloc(sb
, type
) + 2) *
477 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
+ 1;
480 /* Sync local information about quota modifications with global quota file.
481 * Caller must have started the transaction and obtained exclusive lock for
482 * global quota file inode */
483 int __ocfs2_sync_dquot(struct dquot
*dquot
, int freeing
)
486 struct super_block
*sb
= dquot
->dq_sb
;
487 int type
= dquot
->dq_id
.type
;
488 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
489 struct ocfs2_global_disk_dqblk dqblk
;
490 s64 spacechange
, inodechange
;
491 time64_t olditime
, oldbtime
;
493 err
= sb
->s_op
->quota_read(sb
, type
, (char *)&dqblk
,
494 sizeof(struct ocfs2_global_disk_dqblk
),
496 if (err
!= sizeof(struct ocfs2_global_disk_dqblk
)) {
498 mlog(ML_ERROR
, "Short read from global quota file "
505 /* Update space and inode usage. Get also other information from
506 * global quota file so that we don't overwrite any changes there.
508 spin_lock(&dquot
->dq_dqb_lock
);
509 spacechange
= dquot
->dq_dqb
.dqb_curspace
-
510 OCFS2_DQUOT(dquot
)->dq_origspace
;
511 inodechange
= dquot
->dq_dqb
.dqb_curinodes
-
512 OCFS2_DQUOT(dquot
)->dq_originodes
;
513 olditime
= dquot
->dq_dqb
.dqb_itime
;
514 oldbtime
= dquot
->dq_dqb
.dqb_btime
;
515 ocfs2_global_disk2memdqb(dquot
, &dqblk
);
516 trace_ocfs2_sync_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
517 dquot
->dq_dqb
.dqb_curspace
,
518 (long long)spacechange
,
519 dquot
->dq_dqb
.dqb_curinodes
,
520 (long long)inodechange
);
521 if (!test_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
))
522 dquot
->dq_dqb
.dqb_curspace
+= spacechange
;
523 if (!test_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
))
524 dquot
->dq_dqb
.dqb_curinodes
+= inodechange
;
525 /* Set properly space grace time... */
526 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
527 dquot
->dq_dqb
.dqb_curspace
> dquot
->dq_dqb
.dqb_bsoftlimit
) {
528 if (!test_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
) &&
530 if (dquot
->dq_dqb
.dqb_btime
> 0)
531 dquot
->dq_dqb
.dqb_btime
=
532 min(dquot
->dq_dqb
.dqb_btime
, oldbtime
);
534 dquot
->dq_dqb
.dqb_btime
= oldbtime
;
537 dquot
->dq_dqb
.dqb_btime
= 0;
538 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
540 /* Set properly inode grace time... */
541 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
542 dquot
->dq_dqb
.dqb_curinodes
> dquot
->dq_dqb
.dqb_isoftlimit
) {
543 if (!test_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
) &&
545 if (dquot
->dq_dqb
.dqb_itime
> 0)
546 dquot
->dq_dqb
.dqb_itime
=
547 min(dquot
->dq_dqb
.dqb_itime
, olditime
);
549 dquot
->dq_dqb
.dqb_itime
= olditime
;
552 dquot
->dq_dqb
.dqb_itime
= 0;
553 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
555 /* All information is properly updated, clear the flags */
556 __clear_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
);
557 __clear_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
);
558 __clear_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
);
559 __clear_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
);
560 __clear_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
);
561 __clear_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
);
562 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
563 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
564 spin_unlock(&dquot
->dq_dqb_lock
);
565 err
= ocfs2_qinfo_lock(info
, freeing
);
567 mlog(ML_ERROR
, "Failed to lock quota info, losing quota write"
568 " (type=%d, id=%u)\n", dquot
->dq_id
.type
,
569 (unsigned)from_kqid(&init_user_ns
, dquot
->dq_id
));
573 OCFS2_DQUOT(dquot
)->dq_use_count
--;
574 err
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
577 if (freeing
&& !OCFS2_DQUOT(dquot
)->dq_use_count
) {
578 err
= qtree_release_dquot(&info
->dqi_gi
, dquot
);
579 if (info_dirty(sb_dqinfo(sb
, type
))) {
580 err2
= __ocfs2_global_write_info(sb
, type
);
586 ocfs2_qinfo_unlock(info
, freeing
);
594 * Functions for periodic syncing of dquots with global file
596 static int ocfs2_sync_dquot_helper(struct dquot
*dquot
, unsigned long type
)
599 struct super_block
*sb
= dquot
->dq_sb
;
600 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
601 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
604 trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns
, dquot
->dq_id
),
607 if (type
!= dquot
->dq_id
.type
)
609 status
= ocfs2_lock_global_qf(oinfo
, 1);
613 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
614 if (IS_ERR(handle
)) {
615 status
= PTR_ERR(handle
);
619 down_write(&sb_dqopt(sb
)->dqio_sem
);
620 status
= ocfs2_sync_dquot(dquot
);
623 /* We have to write local structure as well... */
624 status
= ocfs2_local_write_dquot(dquot
);
627 up_write(&sb_dqopt(sb
)->dqio_sem
);
628 ocfs2_commit_trans(osb
, handle
);
630 ocfs2_unlock_global_qf(oinfo
, 1);
635 static void qsync_work_fn(struct work_struct
*work
)
637 struct ocfs2_mem_dqinfo
*oinfo
= container_of(work
,
638 struct ocfs2_mem_dqinfo
,
640 struct super_block
*sb
= oinfo
->dqi_gqinode
->i_sb
;
643 * We have to be careful here not to deadlock on s_umount as umount
644 * disabling quotas may be in progress and it waits for this work to
645 * complete. If trylock fails, we'll do the sync next time...
647 if (down_read_trylock(&sb
->s_umount
)) {
648 dquot_scan_active(sb
, ocfs2_sync_dquot_helper
, oinfo
->dqi_type
);
649 up_read(&sb
->s_umount
);
651 schedule_delayed_work(&oinfo
->dqi_sync_work
,
652 msecs_to_jiffies(oinfo
->dqi_syncms
));
656 * Wrappers for generic quota functions
659 static int ocfs2_write_dquot(struct dquot
*dquot
)
662 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
665 trace_ocfs2_write_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
668 handle
= ocfs2_start_trans(osb
, OCFS2_QWRITE_CREDITS
);
669 if (IS_ERR(handle
)) {
670 status
= PTR_ERR(handle
);
674 down_write(&sb_dqopt(dquot
->dq_sb
)->dqio_sem
);
675 status
= ocfs2_local_write_dquot(dquot
);
676 up_write(&sb_dqopt(dquot
->dq_sb
)->dqio_sem
);
677 ocfs2_commit_trans(osb
, handle
);
682 static int ocfs2_calc_qdel_credits(struct super_block
*sb
, int type
)
684 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
686 * We modify tree, leaf block, global info, local chunk header,
687 * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
688 * accounts for inode update
690 return (oinfo
->dqi_gi
.dqi_qtree_depth
+ 2) *
691 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
+
692 OCFS2_QINFO_WRITE_CREDITS
+
693 OCFS2_INODE_UPDATE_CREDITS
;
696 void ocfs2_drop_dquot_refs(struct work_struct
*work
)
698 struct ocfs2_super
*osb
= container_of(work
, struct ocfs2_super
,
700 struct llist_node
*list
;
701 struct ocfs2_dquot
*odquot
, *next_odquot
;
703 list
= llist_del_all(&osb
->dquot_drop_list
);
704 llist_for_each_entry_safe(odquot
, next_odquot
, list
, list
) {
705 /* Drop the reference we acquired in ocfs2_dquot_release() */
706 dqput(&odquot
->dq_dquot
);
711 * Called when the last reference to dquot is dropped. If we are called from
712 * downconvert thread, we cannot do all the handling here because grabbing
713 * quota lock could deadlock (the node holding the quota lock could need some
714 * other cluster lock to proceed but with blocked downconvert thread we cannot
717 static int ocfs2_release_dquot(struct dquot
*dquot
)
720 struct ocfs2_mem_dqinfo
*oinfo
=
721 sb_dqinfo(dquot
->dq_sb
, dquot
->dq_id
.type
)->dqi_priv
;
722 struct ocfs2_super
*osb
= OCFS2_SB(dquot
->dq_sb
);
725 trace_ocfs2_release_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
728 mutex_lock(&dquot
->dq_lock
);
729 /* Check whether we are not racing with some other dqget() */
730 if (atomic_read(&dquot
->dq_count
) > 1)
732 /* Running from downconvert thread? Postpone quota processing to wq */
733 if (current
== osb
->dc_task
) {
735 * Grab our own reference to dquot and queue it for delayed
736 * dropping. Quota code rechecks after calling
737 * ->release_dquot() and won't free dquot structure.
740 /* First entry on list -> queue work */
741 if (llist_add(&OCFS2_DQUOT(dquot
)->list
, &osb
->dquot_drop_list
))
742 queue_work(osb
->ocfs2_wq
, &osb
->dquot_drop_work
);
745 status
= ocfs2_lock_global_qf(oinfo
, 1);
748 handle
= ocfs2_start_trans(osb
,
749 ocfs2_calc_qdel_credits(dquot
->dq_sb
, dquot
->dq_id
.type
));
750 if (IS_ERR(handle
)) {
751 status
= PTR_ERR(handle
);
756 status
= ocfs2_global_release_dquot(dquot
);
761 status
= ocfs2_local_release_dquot(handle
, dquot
);
763 * If we fail here, we cannot do much as global structure is
764 * already released. So just complain...
769 * Clear dq_off so that we search for the structure in quota file next
770 * time we acquire it. The structure might be deleted and reallocated
771 * elsewhere by another node while our dquot structure is on freelist.
774 clear_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
776 ocfs2_commit_trans(osb
, handle
);
778 ocfs2_unlock_global_qf(oinfo
, 1);
780 mutex_unlock(&dquot
->dq_lock
);
787 * Read global dquot structure from disk or create it if it does
788 * not exist. Also update use count of the global structure and
789 * create structure in node-local quota file.
791 static int ocfs2_acquire_dquot(struct dquot
*dquot
)
795 struct super_block
*sb
= dquot
->dq_sb
;
796 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
797 int type
= dquot
->dq_id
.type
;
798 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
799 struct inode
*gqinode
= info
->dqi_gqinode
;
800 int need_alloc
= ocfs2_global_qinit_alloc(sb
, type
);
803 trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns
, dquot
->dq_id
),
805 mutex_lock(&dquot
->dq_lock
);
807 * We need an exclusive lock, because we're going to update use count
808 * and instantiate possibly new dquot structure
810 status
= ocfs2_lock_global_qf(info
, 1);
813 status
= ocfs2_qinfo_lock(info
, 0);
817 * We always want to read dquot structure from disk because we don't
818 * know what happened with it while it was on freelist.
820 status
= qtree_read_dquot(&info
->dqi_gi
, dquot
);
821 ocfs2_qinfo_unlock(info
, 0);
825 OCFS2_DQUOT(dquot
)->dq_use_count
++;
826 OCFS2_DQUOT(dquot
)->dq_origspace
= dquot
->dq_dqb
.dqb_curspace
;
827 OCFS2_DQUOT(dquot
)->dq_originodes
= dquot
->dq_dqb
.dqb_curinodes
;
828 if (!dquot
->dq_off
) { /* No real quota entry? */
831 * Add blocks to quota file before we start a transaction since
832 * locking allocators ranks above a transaction start
834 WARN_ON(journal_current_handle());
835 status
= ocfs2_extend_no_holes(gqinode
, NULL
,
836 i_size_read(gqinode
) + (need_alloc
<< sb
->s_blocksize_bits
),
837 i_size_read(gqinode
));
842 handle
= ocfs2_start_trans(osb
,
843 ocfs2_calc_global_qinit_credits(sb
, type
));
844 if (IS_ERR(handle
)) {
845 status
= PTR_ERR(handle
);
848 status
= ocfs2_qinfo_lock(info
, ex
);
851 status
= qtree_write_dquot(&info
->dqi_gi
, dquot
);
852 if (ex
&& info_dirty(sb_dqinfo(sb
, type
))) {
853 err
= __ocfs2_global_write_info(sb
, type
);
857 ocfs2_qinfo_unlock(info
, ex
);
859 ocfs2_commit_trans(osb
, handle
);
861 ocfs2_unlock_global_qf(info
, 1);
865 status
= ocfs2_create_local_dquot(dquot
);
868 set_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
870 mutex_unlock(&dquot
->dq_lock
);
876 static int ocfs2_get_next_id(struct super_block
*sb
, struct kqid
*qid
)
878 int type
= qid
->type
;
879 struct ocfs2_mem_dqinfo
*info
= sb_dqinfo(sb
, type
)->dqi_priv
;
882 trace_ocfs2_get_next_id(from_kqid(&init_user_ns
, *qid
), type
);
883 if (!sb_has_quota_loaded(sb
, type
)) {
887 status
= ocfs2_lock_global_qf(info
, 0);
890 status
= ocfs2_qinfo_lock(info
, 0);
893 status
= qtree_get_next_id(&info
->dqi_gi
, qid
);
894 ocfs2_qinfo_unlock(info
, 0);
896 ocfs2_unlock_global_qf(info
, 0);
899 * Avoid logging ENOENT since it just means there isn't next ID and
900 * ESRCH which means quota isn't enabled for the filesystem.
902 if (status
&& status
!= -ENOENT
&& status
!= -ESRCH
)
907 static int ocfs2_mark_dquot_dirty(struct dquot
*dquot
)
909 unsigned long mask
= (1 << (DQ_LASTSET_B
+ QIF_ILIMITS_B
)) |
910 (1 << (DQ_LASTSET_B
+ QIF_BLIMITS_B
)) |
911 (1 << (DQ_LASTSET_B
+ QIF_INODES_B
)) |
912 (1 << (DQ_LASTSET_B
+ QIF_SPACE_B
)) |
913 (1 << (DQ_LASTSET_B
+ QIF_BTIME_B
)) |
914 (1 << (DQ_LASTSET_B
+ QIF_ITIME_B
));
917 struct super_block
*sb
= dquot
->dq_sb
;
918 int type
= dquot
->dq_id
.type
;
919 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
921 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
923 trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns
, dquot
->dq_id
),
926 /* In case user set some limits, sync dquot immediately to global
927 * quota file so that information propagates quicker */
928 spin_lock(&dquot
->dq_dqb_lock
);
929 if (dquot
->dq_flags
& mask
)
931 spin_unlock(&dquot
->dq_dqb_lock
);
932 /* This is a slight hack but we can't afford getting global quota
933 * lock if we already have a transaction started. */
934 if (!sync
|| journal_current_handle()) {
935 status
= ocfs2_write_dquot(dquot
);
938 status
= ocfs2_lock_global_qf(oinfo
, 1);
941 handle
= ocfs2_start_trans(osb
, OCFS2_QSYNC_CREDITS
);
942 if (IS_ERR(handle
)) {
943 status
= PTR_ERR(handle
);
947 down_write(&sb_dqopt(sb
)->dqio_sem
);
948 status
= ocfs2_sync_dquot(dquot
);
953 /* Now write updated local dquot structure */
954 status
= ocfs2_local_write_dquot(dquot
);
956 up_write(&sb_dqopt(sb
)->dqio_sem
);
957 ocfs2_commit_trans(osb
, handle
);
959 ocfs2_unlock_global_qf(oinfo
, 1);
966 /* This should happen only after set_dqinfo(). */
967 static int ocfs2_write_info(struct super_block
*sb
, int type
)
971 struct ocfs2_mem_dqinfo
*oinfo
= sb_dqinfo(sb
, type
)->dqi_priv
;
973 status
= ocfs2_lock_global_qf(oinfo
, 1);
976 handle
= ocfs2_start_trans(OCFS2_SB(sb
), OCFS2_QINFO_WRITE_CREDITS
);
977 if (IS_ERR(handle
)) {
978 status
= PTR_ERR(handle
);
982 status
= dquot_commit_info(sb
, type
);
983 ocfs2_commit_trans(OCFS2_SB(sb
), handle
);
985 ocfs2_unlock_global_qf(oinfo
, 1);
992 static struct dquot
*ocfs2_alloc_dquot(struct super_block
*sb
, int type
)
994 struct ocfs2_dquot
*dquot
=
995 kmem_cache_zalloc(ocfs2_dquot_cachep
, GFP_NOFS
);
999 return &dquot
->dq_dquot
;
1002 static void ocfs2_destroy_dquot(struct dquot
*dquot
)
1004 kmem_cache_free(ocfs2_dquot_cachep
, dquot
);
1007 const struct dquot_operations ocfs2_quota_operations
= {
1008 /* We never make dquot dirty so .write_dquot is never called */
1009 .acquire_dquot
= ocfs2_acquire_dquot
,
1010 .release_dquot
= ocfs2_release_dquot
,
1011 .mark_dirty
= ocfs2_mark_dquot_dirty
,
1012 .write_info
= ocfs2_write_info
,
1013 .alloc_dquot
= ocfs2_alloc_dquot
,
1014 .destroy_dquot
= ocfs2_destroy_dquot
,
1015 .get_next_id
= ocfs2_get_next_id
,