1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
49 EVICT_SHOULD_SKIP_DELETE
,
50 EVICT_SHOULD_DEFER_DELETE
,
54 * gfs2_jindex_free - Clear all the journal index information
55 * @sdp: The GFS2 superblock
59 void gfs2_jindex_free(struct gfs2_sbd
*sdp
)
61 struct list_head list
;
62 struct gfs2_jdesc
*jd
;
64 spin_lock(&sdp
->sd_jindex_spin
);
65 list_add(&list
, &sdp
->sd_jindex_list
);
66 list_del_init(&sdp
->sd_jindex_list
);
68 spin_unlock(&sdp
->sd_jindex_spin
);
70 down_write(&sdp
->sd_log_flush_lock
);
72 up_write(&sdp
->sd_log_flush_lock
);
74 while (!list_empty(&list
)) {
75 jd
= list_first_entry(&list
, struct gfs2_jdesc
, jd_list
);
76 BUG_ON(jd
->jd_log_bio
);
77 gfs2_free_journal_extents(jd
);
78 list_del(&jd
->jd_list
);
85 static struct gfs2_jdesc
*jdesc_find_i(struct list_head
*head
, unsigned int jid
)
87 struct gfs2_jdesc
*jd
;
89 list_for_each_entry(jd
, head
, jd_list
) {
90 if (jd
->jd_jid
== jid
)
96 struct gfs2_jdesc
*gfs2_jdesc_find(struct gfs2_sbd
*sdp
, unsigned int jid
)
98 struct gfs2_jdesc
*jd
;
100 spin_lock(&sdp
->sd_jindex_spin
);
101 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
102 spin_unlock(&sdp
->sd_jindex_spin
);
107 int gfs2_jdesc_check(struct gfs2_jdesc
*jd
)
109 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
110 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
111 u64 size
= i_size_read(jd
->jd_inode
);
113 if (gfs2_check_internal_file_size(jd
->jd_inode
, 8 << 20, BIT(30)))
116 jd
->jd_blocks
= size
>> sdp
->sd_sb
.sb_bsize_shift
;
118 if (gfs2_write_alloc_required(ip
, 0, size
)) {
119 gfs2_consist_inode(ip
);
127 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
128 * @sdp: the filesystem
133 int gfs2_make_fs_rw(struct gfs2_sbd
*sdp
)
135 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
136 struct gfs2_glock
*j_gl
= ip
->i_gl
;
137 struct gfs2_log_header_host head
;
140 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
);
141 if (gfs2_withdrawing_or_withdrawn(sdp
))
144 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
, false);
150 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
155 /* Initialize some head of the log stuff */
156 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
157 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
159 error
= gfs2_quota_init(sdp
);
160 if (!error
&& gfs2_withdrawing_or_withdrawn(sdp
))
163 set_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
167 void gfs2_statfs_change_in(struct gfs2_statfs_change_host
*sc
, const void *buf
)
169 const struct gfs2_statfs_change
*str
= buf
;
171 sc
->sc_total
= be64_to_cpu(str
->sc_total
);
172 sc
->sc_free
= be64_to_cpu(str
->sc_free
);
173 sc
->sc_dinodes
= be64_to_cpu(str
->sc_dinodes
);
176 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host
*sc
, void *buf
)
178 struct gfs2_statfs_change
*str
= buf
;
180 str
->sc_total
= cpu_to_be64(sc
->sc_total
);
181 str
->sc_free
= cpu_to_be64(sc
->sc_free
);
182 str
->sc_dinodes
= cpu_to_be64(sc
->sc_dinodes
);
185 int gfs2_statfs_init(struct gfs2_sbd
*sdp
)
187 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
188 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
189 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
190 struct buffer_head
*m_bh
;
191 struct gfs2_holder gh
;
194 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
199 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
203 if (sdp
->sd_args
.ar_spectator
) {
204 spin_lock(&sdp
->sd_statfs_spin
);
205 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
206 sizeof(struct gfs2_dinode
));
207 spin_unlock(&sdp
->sd_statfs_spin
);
209 spin_lock(&sdp
->sd_statfs_spin
);
210 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
211 sizeof(struct gfs2_dinode
));
212 gfs2_statfs_change_in(l_sc
, sdp
->sd_sc_bh
->b_data
+
213 sizeof(struct gfs2_dinode
));
214 spin_unlock(&sdp
->sd_statfs_spin
);
220 gfs2_glock_dq_uninit(&gh
);
224 void gfs2_statfs_change(struct gfs2_sbd
*sdp
, s64 total
, s64 free
,
227 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
228 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
229 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
233 gfs2_trans_add_meta(l_ip
->i_gl
, sdp
->sd_sc_bh
);
235 spin_lock(&sdp
->sd_statfs_spin
);
236 l_sc
->sc_total
+= total
;
237 l_sc
->sc_free
+= free
;
238 l_sc
->sc_dinodes
+= dinodes
;
239 gfs2_statfs_change_out(l_sc
, sdp
->sd_sc_bh
->b_data
+
240 sizeof(struct gfs2_dinode
));
241 if (sdp
->sd_args
.ar_statfs_percent
) {
242 x
= 100 * l_sc
->sc_free
;
243 y
= m_sc
->sc_free
* sdp
->sd_args
.ar_statfs_percent
;
244 if (x
>= y
|| x
<= -y
)
247 spin_unlock(&sdp
->sd_statfs_spin
);
250 gfs2_wake_up_statfs(sdp
);
253 void update_statfs(struct gfs2_sbd
*sdp
, struct buffer_head
*m_bh
)
255 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
256 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
257 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
258 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
260 gfs2_trans_add_meta(l_ip
->i_gl
, sdp
->sd_sc_bh
);
261 gfs2_trans_add_meta(m_ip
->i_gl
, m_bh
);
263 spin_lock(&sdp
->sd_statfs_spin
);
264 m_sc
->sc_total
+= l_sc
->sc_total
;
265 m_sc
->sc_free
+= l_sc
->sc_free
;
266 m_sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
267 memset(l_sc
, 0, sizeof(struct gfs2_statfs_change
));
268 memset(sdp
->sd_sc_bh
->b_data
+ sizeof(struct gfs2_dinode
),
269 0, sizeof(struct gfs2_statfs_change
));
270 gfs2_statfs_change_out(m_sc
, m_bh
->b_data
+ sizeof(struct gfs2_dinode
));
271 spin_unlock(&sdp
->sd_statfs_spin
);
274 int gfs2_statfs_sync(struct super_block
*sb
, int type
)
276 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
277 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
278 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
279 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
280 struct gfs2_holder gh
;
281 struct buffer_head
*m_bh
;
284 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
289 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
293 spin_lock(&sdp
->sd_statfs_spin
);
294 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
295 sizeof(struct gfs2_dinode
));
296 if (!l_sc
->sc_total
&& !l_sc
->sc_free
&& !l_sc
->sc_dinodes
) {
297 spin_unlock(&sdp
->sd_statfs_spin
);
300 spin_unlock(&sdp
->sd_statfs_spin
);
302 error
= gfs2_trans_begin(sdp
, 2 * RES_DINODE
, 0);
306 update_statfs(sdp
, m_bh
);
307 sdp
->sd_statfs_force_sync
= 0;
314 gfs2_glock_dq_uninit(&gh
);
320 struct list_head list
;
321 struct gfs2_holder gh
;
325 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
327 * @sdp: the file system
332 static int gfs2_lock_fs_check_clean(struct gfs2_sbd
*sdp
)
334 struct gfs2_inode
*ip
;
335 struct gfs2_jdesc
*jd
;
338 struct gfs2_log_header_host lh
;
342 * Grab all the journal glocks in SH mode. We are *probably* doing
343 * that to prevent recovery.
346 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
347 lfcc
= kmalloc(sizeof(struct lfcc
), GFP_KERNEL
);
352 ip
= GFS2_I(jd
->jd_inode
);
353 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &lfcc
->gh
);
358 list_add(&lfcc
->list
, &list
);
361 gfs2_freeze_unlock(sdp
);
363 error
= gfs2_glock_nq_init(sdp
->sd_freeze_gl
, LM_ST_EXCLUSIVE
,
364 LM_FLAG_NOEXP
| GL_NOPID
,
369 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
370 error
= gfs2_jdesc_check(jd
);
373 error
= gfs2_find_jhead(jd
, &lh
, false);
376 if (!(lh
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
383 goto out
; /* success */
385 gfs2_freeze_unlock(sdp
);
388 error2
= gfs2_freeze_lock_shared(sdp
);
389 gfs2_assert_withdraw(sdp
, !error2
);
392 while (!list_empty(&list
)) {
393 lfcc
= list_first_entry(&list
, struct lfcc
, list
);
394 list_del(&lfcc
->list
);
395 gfs2_glock_dq_uninit(&lfcc
->gh
);
401 void gfs2_dinode_out(const struct gfs2_inode
*ip
, void *buf
)
403 const struct inode
*inode
= &ip
->i_inode
;
404 struct gfs2_dinode
*str
= buf
;
406 str
->di_header
.mh_magic
= cpu_to_be32(GFS2_MAGIC
);
407 str
->di_header
.mh_type
= cpu_to_be32(GFS2_METATYPE_DI
);
408 str
->di_header
.mh_format
= cpu_to_be32(GFS2_FORMAT_DI
);
409 str
->di_num
.no_addr
= cpu_to_be64(ip
->i_no_addr
);
410 str
->di_num
.no_formal_ino
= cpu_to_be64(ip
->i_no_formal_ino
);
411 str
->di_mode
= cpu_to_be32(inode
->i_mode
);
412 str
->di_uid
= cpu_to_be32(i_uid_read(inode
));
413 str
->di_gid
= cpu_to_be32(i_gid_read(inode
));
414 str
->di_nlink
= cpu_to_be32(inode
->i_nlink
);
415 str
->di_size
= cpu_to_be64(i_size_read(inode
));
416 str
->di_blocks
= cpu_to_be64(gfs2_get_inode_blocks(inode
));
417 str
->di_atime
= cpu_to_be64(inode_get_atime_sec(inode
));
418 str
->di_mtime
= cpu_to_be64(inode_get_mtime_sec(inode
));
419 str
->di_ctime
= cpu_to_be64(inode_get_ctime_sec(inode
));
421 str
->di_goal_meta
= cpu_to_be64(ip
->i_goal
);
422 str
->di_goal_data
= cpu_to_be64(ip
->i_goal
);
423 str
->di_generation
= cpu_to_be64(ip
->i_generation
);
425 str
->di_flags
= cpu_to_be32(ip
->i_diskflags
);
426 str
->di_height
= cpu_to_be16(ip
->i_height
);
427 str
->di_payload_format
= cpu_to_be32(S_ISDIR(inode
->i_mode
) &&
428 !(ip
->i_diskflags
& GFS2_DIF_EXHASH
) ?
430 str
->di_depth
= cpu_to_be16(ip
->i_depth
);
431 str
->di_entries
= cpu_to_be32(ip
->i_entries
);
433 str
->di_eattr
= cpu_to_be64(ip
->i_eattr
);
434 str
->di_atime_nsec
= cpu_to_be32(inode_get_atime_nsec(inode
));
435 str
->di_mtime_nsec
= cpu_to_be32(inode_get_mtime_nsec(inode
));
436 str
->di_ctime_nsec
= cpu_to_be32(inode_get_ctime_nsec(inode
));
440 * gfs2_write_inode - Make sure the inode is stable on the disk
442 * @wbc: The writeback control structure
447 static int gfs2_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
449 struct gfs2_inode
*ip
= GFS2_I(inode
);
450 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
451 struct address_space
*metamapping
= gfs2_glock2aspace(ip
->i_gl
);
452 struct backing_dev_info
*bdi
= inode_to_bdi(metamapping
->host
);
454 bool flush_all
= (wbc
->sync_mode
== WB_SYNC_ALL
|| gfs2_is_jdata(ip
));
457 gfs2_log_flush(GFS2_SB(inode
), ip
->i_gl
,
458 GFS2_LOG_HEAD_FLUSH_NORMAL
|
459 GFS2_LFC_WRITE_INODE
);
460 if (bdi
->wb
.dirty_exceeded
)
461 gfs2_ail1_flush(sdp
, wbc
);
463 filemap_fdatawrite(metamapping
);
465 ret
= filemap_fdatawait(metamapping
);
467 mark_inode_dirty_sync(inode
);
469 spin_lock(&inode
->i_lock
);
470 if (!(inode
->i_flags
& I_DIRTY
))
471 gfs2_ordered_del_inode(ip
);
472 spin_unlock(&inode
->i_lock
);
478 * gfs2_dirty_inode - check for atime updates
479 * @inode: The inode in question
480 * @flags: The type of dirty
482 * Unfortunately it can be called under any combination of inode
483 * glock and freeze glock, so we have to check carefully.
485 * At the moment this deals only with atime - it should be possible
486 * to expand that role in future, once a review of the locking has
490 static void gfs2_dirty_inode(struct inode
*inode
, int flags
)
492 struct gfs2_inode
*ip
= GFS2_I(inode
);
493 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
494 struct buffer_head
*bh
;
495 struct gfs2_holder gh
;
497 int need_endtrans
= 0;
500 if (unlikely(!ip
->i_gl
)) {
501 /* This can only happen during incomplete inode creation. */
502 BUG_ON(!test_bit(GIF_ALLOC_FAILED
, &ip
->i_flags
));
506 if (gfs2_withdrawing_or_withdrawn(sdp
))
508 if (!gfs2_glock_is_locked_by_me(ip
->i_gl
)) {
509 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
511 fs_err(sdp
, "dirty_inode: glock %d\n", ret
);
512 gfs2_dump_glock(NULL
, ip
->i_gl
, true);
516 } else if (WARN_ON_ONCE(ip
->i_gl
->gl_state
!= LM_ST_EXCLUSIVE
))
519 if (current
->journal_info
== NULL
) {
520 ret
= gfs2_trans_begin(sdp
, RES_DINODE
, 0);
522 fs_err(sdp
, "dirty_inode: gfs2_trans_begin %d\n", ret
);
528 ret
= gfs2_meta_inode_buffer(ip
, &bh
);
530 gfs2_trans_add_meta(ip
->i_gl
, bh
);
531 gfs2_dinode_out(ip
, bh
->b_data
);
539 gfs2_glock_dq_uninit(&gh
);
543 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
544 * @sdp: the filesystem
549 void gfs2_make_fs_ro(struct gfs2_sbd
*sdp
)
551 int log_write_allowed
= test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
553 if (!test_bit(SDF_KILL
, &sdp
->sd_flags
))
554 gfs2_flush_delete_work(sdp
);
556 gfs2_destroy_threads(sdp
);
558 if (log_write_allowed
) {
559 gfs2_quota_sync(sdp
->sd_vfs
, 0);
560 gfs2_statfs_sync(sdp
->sd_vfs
, 0);
562 /* We do two log flushes here. The first one commits dirty inodes
563 * and rgrps to the journal, but queues up revokes to the ail list.
564 * The second flush writes out and removes the revokes.
566 * The first must be done before the FLUSH_SHUTDOWN code
567 * clears the LIVE flag, otherwise it will not be able to start
568 * a transaction to write its revokes, and the error will cause
569 * a withdraw of the file system. */
570 gfs2_log_flush(sdp
, NULL
, GFS2_LFC_MAKE_FS_RO
);
571 gfs2_log_flush(sdp
, NULL
, GFS2_LOG_HEAD_FLUSH_SHUTDOWN
|
572 GFS2_LFC_MAKE_FS_RO
);
573 wait_event_timeout(sdp
->sd_log_waitq
,
574 gfs2_log_is_empty(sdp
),
576 gfs2_assert_warn(sdp
, gfs2_log_is_empty(sdp
));
578 gfs2_quota_cleanup(sdp
);
582 * gfs2_put_super - Unmount the filesystem
583 * @sb: The VFS superblock
587 static void gfs2_put_super(struct super_block
*sb
)
589 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
590 struct gfs2_jdesc
*jd
;
592 /* No more recovery requests */
593 set_bit(SDF_NORECOVERY
, &sdp
->sd_flags
);
596 /* Wait on outstanding recovery */
598 spin_lock(&sdp
->sd_jindex_spin
);
599 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
600 if (!test_bit(JDF_RECOVERY
, &jd
->jd_flags
))
602 spin_unlock(&sdp
->sd_jindex_spin
);
603 wait_on_bit(&jd
->jd_flags
, JDF_RECOVERY
,
604 TASK_UNINTERRUPTIBLE
);
607 spin_unlock(&sdp
->sd_jindex_spin
);
610 gfs2_make_fs_ro(sdp
);
612 if (gfs2_withdrawing_or_withdrawn(sdp
))
613 gfs2_destroy_threads(sdp
);
615 gfs2_quota_cleanup(sdp
);
618 WARN_ON(gfs2_withdrawing(sdp
));
620 /* At this point, we're through modifying the disk */
624 gfs2_freeze_unlock(sdp
);
626 iput(sdp
->sd_jindex
);
627 iput(sdp
->sd_statfs_inode
);
628 iput(sdp
->sd_rindex
);
629 iput(sdp
->sd_quota_inode
);
631 gfs2_glock_put(sdp
->sd_rename_gl
);
632 gfs2_glock_put(sdp
->sd_freeze_gl
);
634 if (!sdp
->sd_args
.ar_spectator
) {
635 if (gfs2_holder_initialized(&sdp
->sd_journal_gh
))
636 gfs2_glock_dq_uninit(&sdp
->sd_journal_gh
);
637 if (gfs2_holder_initialized(&sdp
->sd_jinode_gh
))
638 gfs2_glock_dq_uninit(&sdp
->sd_jinode_gh
);
639 brelse(sdp
->sd_sc_bh
);
640 gfs2_glock_dq_uninit(&sdp
->sd_sc_gh
);
641 gfs2_glock_dq_uninit(&sdp
->sd_qc_gh
);
642 free_local_statfs_inodes(sdp
);
643 iput(sdp
->sd_qc_inode
);
646 gfs2_glock_dq_uninit(&sdp
->sd_live_gh
);
647 gfs2_clear_rgrpd(sdp
);
648 gfs2_jindex_free(sdp
);
649 /* Take apart glock structures and buffer lists */
650 gfs2_gl_hash_clear(sdp
);
651 truncate_inode_pages_final(&sdp
->sd_aspace
);
652 gfs2_delete_debugfs_file(sdp
);
654 gfs2_sys_fs_del(sdp
);
659 * gfs2_sync_fs - sync the filesystem
660 * @sb: the superblock
661 * @wait: true to wait for completion
663 * Flushes the log to disk.
666 static int gfs2_sync_fs(struct super_block
*sb
, int wait
)
668 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
670 gfs2_quota_sync(sb
, -1);
672 gfs2_log_flush(sdp
, NULL
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
674 return sdp
->sd_log_error
;
677 static int gfs2_do_thaw(struct gfs2_sbd
*sdp
)
679 struct super_block
*sb
= sdp
->sd_vfs
;
682 error
= gfs2_freeze_lock_shared(sdp
);
685 error
= thaw_super(sb
, FREEZE_HOLDER_USERSPACE
);
690 fs_info(sdp
, "GFS2: couldn't thaw filesystem: %d\n", error
);
691 gfs2_assert_withdraw(sdp
, 0);
695 void gfs2_freeze_func(struct work_struct
*work
)
697 struct gfs2_sbd
*sdp
= container_of(work
, struct gfs2_sbd
, sd_freeze_work
);
698 struct super_block
*sb
= sdp
->sd_vfs
;
701 mutex_lock(&sdp
->sd_freeze_mutex
);
703 if (test_bit(SDF_FROZEN
, &sdp
->sd_flags
))
706 error
= freeze_super(sb
, FREEZE_HOLDER_USERSPACE
);
710 gfs2_freeze_unlock(sdp
);
711 set_bit(SDF_FROZEN
, &sdp
->sd_flags
);
713 error
= gfs2_do_thaw(sdp
);
717 clear_bit(SDF_FROZEN
, &sdp
->sd_flags
);
721 fs_info(sdp
, "GFS2: couldn't freeze filesystem: %d\n", error
);
724 mutex_unlock(&sdp
->sd_freeze_mutex
);
725 deactivate_super(sb
);
729 * gfs2_freeze_super - prevent further writes to the filesystem
730 * @sb: the VFS structure for the filesystem
734 static int gfs2_freeze_super(struct super_block
*sb
, enum freeze_holder who
)
736 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
739 if (!mutex_trylock(&sdp
->sd_freeze_mutex
))
741 if (test_bit(SDF_FROZEN
, &sdp
->sd_flags
)) {
742 mutex_unlock(&sdp
->sd_freeze_mutex
);
747 error
= freeze_super(sb
, FREEZE_HOLDER_USERSPACE
);
749 fs_info(sdp
, "GFS2: couldn't freeze filesystem: %d\n",
754 error
= gfs2_lock_fs_check_clean(sdp
);
756 set_bit(SDF_FREEZE_INITIATOR
, &sdp
->sd_flags
);
757 set_bit(SDF_FROZEN
, &sdp
->sd_flags
);
761 error
= gfs2_do_thaw(sdp
);
766 fs_err(sdp
, "waiting for recovery before freeze\n");
767 else if (error
== -EIO
) {
768 fs_err(sdp
, "Fatal IO error: cannot freeze gfs2 due "
769 "to recovery error.\n");
772 fs_err(sdp
, "error freezing FS: %d\n", error
);
774 fs_err(sdp
, "retrying...\n");
779 mutex_unlock(&sdp
->sd_freeze_mutex
);
783 static int gfs2_freeze_fs(struct super_block
*sb
)
785 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
787 if (test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
788 gfs2_log_flush(sdp
, NULL
, GFS2_LOG_HEAD_FLUSH_FREEZE
|
789 GFS2_LFC_FREEZE_GO_SYNC
);
790 if (gfs2_withdrawing_or_withdrawn(sdp
))
797 * gfs2_thaw_super - reallow writes to the filesystem
798 * @sb: the VFS structure for the filesystem
802 static int gfs2_thaw_super(struct super_block
*sb
, enum freeze_holder who
)
804 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
807 if (!mutex_trylock(&sdp
->sd_freeze_mutex
))
809 if (!test_bit(SDF_FREEZE_INITIATOR
, &sdp
->sd_flags
)) {
810 mutex_unlock(&sdp
->sd_freeze_mutex
);
814 atomic_inc(&sb
->s_active
);
815 gfs2_freeze_unlock(sdp
);
817 error
= gfs2_do_thaw(sdp
);
820 clear_bit(SDF_FREEZE_INITIATOR
, &sdp
->sd_flags
);
821 clear_bit(SDF_FROZEN
, &sdp
->sd_flags
);
823 mutex_unlock(&sdp
->sd_freeze_mutex
);
824 deactivate_super(sb
);
828 void gfs2_thaw_freeze_initiator(struct super_block
*sb
)
830 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
832 mutex_lock(&sdp
->sd_freeze_mutex
);
833 if (!test_bit(SDF_FREEZE_INITIATOR
, &sdp
->sd_flags
))
836 gfs2_freeze_unlock(sdp
);
839 mutex_unlock(&sdp
->sd_freeze_mutex
);
843 * statfs_slow_fill - fill in the sg for a given RG
845 * @sc: the sc structure
847 * Returns: 0 on success, -ESTALE if the LVB is invalid
850 static int statfs_slow_fill(struct gfs2_rgrpd
*rgd
,
851 struct gfs2_statfs_change_host
*sc
)
853 gfs2_rgrp_verify(rgd
);
854 sc
->sc_total
+= rgd
->rd_data
;
855 sc
->sc_free
+= rgd
->rd_free
;
856 sc
->sc_dinodes
+= rgd
->rd_dinodes
;
861 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
862 * @sdp: the filesystem
863 * @sc: the sc info that will be returned
865 * Any error (other than a signal) will cause this routine to fall back
866 * to the synchronous version.
868 * FIXME: This really shouldn't busy wait like this.
873 static int gfs2_statfs_slow(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change_host
*sc
)
875 struct gfs2_rgrpd
*rgd_next
;
876 struct gfs2_holder
*gha
, *gh
;
877 unsigned int slots
= 64;
882 memset(sc
, 0, sizeof(struct gfs2_statfs_change_host
));
883 gha
= kmalloc_array(slots
, sizeof(struct gfs2_holder
), GFP_KERNEL
);
886 for (x
= 0; x
< slots
; x
++)
887 gfs2_holder_mark_uninitialized(gha
+ x
);
889 rgd_next
= gfs2_rgrpd_get_first(sdp
);
894 for (x
= 0; x
< slots
; x
++) {
897 if (gfs2_holder_initialized(gh
) && gfs2_glock_poll(gh
)) {
898 err
= gfs2_glock_wait(gh
);
900 gfs2_holder_uninit(gh
);
904 struct gfs2_rgrpd
*rgd
=
905 gfs2_glock2rgrp(gh
->gh_gl
);
907 error
= statfs_slow_fill(rgd
, sc
);
909 gfs2_glock_dq_uninit(gh
);
913 if (gfs2_holder_initialized(gh
))
915 else if (rgd_next
&& !error
) {
916 error
= gfs2_glock_nq_init(rgd_next
->rd_gl
,
920 rgd_next
= gfs2_rgrpd_get_next(rgd_next
);
924 if (signal_pending(current
))
925 error
= -ERESTARTSYS
;
939 * gfs2_statfs_i - Do a statfs
940 * @sdp: the filesystem
941 * @sc: the sc structure
946 static int gfs2_statfs_i(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change_host
*sc
)
948 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
949 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
951 spin_lock(&sdp
->sd_statfs_spin
);
954 sc
->sc_total
+= l_sc
->sc_total
;
955 sc
->sc_free
+= l_sc
->sc_free
;
956 sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
958 spin_unlock(&sdp
->sd_statfs_spin
);
962 if (sc
->sc_free
> sc
->sc_total
)
963 sc
->sc_free
= sc
->sc_total
;
964 if (sc
->sc_dinodes
< 0)
971 * gfs2_statfs - Gather and return stats about the filesystem
972 * @dentry: The name of the link
975 * Returns: 0 on success or error code
978 static int gfs2_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
980 struct super_block
*sb
= dentry
->d_sb
;
981 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
982 struct gfs2_statfs_change_host sc
;
985 error
= gfs2_rindex_update(sdp
);
989 if (gfs2_tune_get(sdp
, gt_statfs_slow
))
990 error
= gfs2_statfs_slow(sdp
, &sc
);
992 error
= gfs2_statfs_i(sdp
, &sc
);
997 buf
->f_type
= GFS2_MAGIC
;
998 buf
->f_bsize
= sdp
->sd_sb
.sb_bsize
;
999 buf
->f_blocks
= sc
.sc_total
;
1000 buf
->f_bfree
= sc
.sc_free
;
1001 buf
->f_bavail
= sc
.sc_free
;
1002 buf
->f_files
= sc
.sc_dinodes
+ sc
.sc_free
;
1003 buf
->f_ffree
= sc
.sc_free
;
1004 buf
->f_namelen
= GFS2_FNAMESIZE
;
1005 buf
->f_fsid
= uuid_to_fsid(sb
->s_uuid
.b
);
1011 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1012 * @inode: The inode to drop
1014 * If we've received a callback on an iopen lock then it's because a
1015 * remote node tried to deallocate the inode but failed due to this node
1016 * still having the inode open. Here we mark the link count zero
1017 * since we know that it must have reached zero if the GLF_DEMOTE flag
1018 * is set on the iopen glock. If we didn't do a disk read since the
1019 * remote node removed the final link then we might otherwise miss
1020 * this event. This check ensures that this node will deallocate the
1021 * inode's blocks, or alternatively pass the baton on to another
1022 * node for later deallocation.
1025 static int gfs2_drop_inode(struct inode
*inode
)
1027 struct gfs2_inode
*ip
= GFS2_I(inode
);
1028 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
1030 if (inode
->i_nlink
&&
1031 gfs2_holder_initialized(&ip
->i_iopen_gh
)) {
1032 struct gfs2_glock
*gl
= ip
->i_iopen_gh
.gh_gl
;
1033 if (glock_needs_demote(gl
))
1038 * When under memory pressure when an inode's link count has dropped to
1039 * zero, defer deleting the inode to the delete workqueue. This avoids
1040 * calling into DLM under memory pressure, which can deadlock.
1042 if (!inode
->i_nlink
&&
1043 unlikely(current
->flags
& PF_MEMALLOC
) &&
1044 gfs2_holder_initialized(&ip
->i_iopen_gh
)) {
1045 struct gfs2_glock
*gl
= ip
->i_iopen_gh
.gh_gl
;
1047 gfs2_glock_hold(gl
);
1048 if (!gfs2_queue_verify_delete(gl
, true))
1049 gfs2_glock_put_async(gl
);
1054 * No longer cache inodes when trying to evict them all.
1056 if (test_bit(SDF_EVICTING
, &sdp
->sd_flags
))
1059 return generic_drop_inode(inode
);
1063 * gfs2_show_options - Show mount options for /proc/mounts
1064 * @s: seq_file structure
1065 * @root: root of this (sub)tree
1067 * Returns: 0 on success or error code
1070 static int gfs2_show_options(struct seq_file
*s
, struct dentry
*root
)
1072 struct gfs2_sbd
*sdp
= root
->d_sb
->s_fs_info
;
1073 struct gfs2_args
*args
= &sdp
->sd_args
;
1074 unsigned int logd_secs
, statfs_slow
, statfs_quantum
, quota_quantum
;
1076 spin_lock(&sdp
->sd_tune
.gt_spin
);
1077 logd_secs
= sdp
->sd_tune
.gt_logd_secs
;
1078 quota_quantum
= sdp
->sd_tune
.gt_quota_quantum
;
1079 statfs_quantum
= sdp
->sd_tune
.gt_statfs_quantum
;
1080 statfs_slow
= sdp
->sd_tune
.gt_statfs_slow
;
1081 spin_unlock(&sdp
->sd_tune
.gt_spin
);
1083 if (is_subdir(root
, sdp
->sd_master_dir
))
1084 seq_puts(s
, ",meta");
1085 if (args
->ar_lockproto
[0])
1086 seq_show_option(s
, "lockproto", args
->ar_lockproto
);
1087 if (args
->ar_locktable
[0])
1088 seq_show_option(s
, "locktable", args
->ar_locktable
);
1089 if (args
->ar_hostdata
[0])
1090 seq_show_option(s
, "hostdata", args
->ar_hostdata
);
1091 if (args
->ar_spectator
)
1092 seq_puts(s
, ",spectator");
1093 if (args
->ar_localflocks
)
1094 seq_puts(s
, ",localflocks");
1096 seq_puts(s
, ",debug");
1097 if (args
->ar_posix_acl
)
1098 seq_puts(s
, ",acl");
1099 if (args
->ar_quota
!= GFS2_QUOTA_DEFAULT
) {
1101 switch (args
->ar_quota
) {
1102 case GFS2_QUOTA_OFF
:
1105 case GFS2_QUOTA_ACCOUNT
:
1111 case GFS2_QUOTA_QUIET
:
1118 seq_printf(s
, ",quota=%s", state
);
1120 if (args
->ar_suiddir
)
1121 seq_puts(s
, ",suiddir");
1122 if (args
->ar_data
!= GFS2_DATA_DEFAULT
) {
1124 switch (args
->ar_data
) {
1125 case GFS2_DATA_WRITEBACK
:
1126 state
= "writeback";
1128 case GFS2_DATA_ORDERED
:
1135 seq_printf(s
, ",data=%s", state
);
1137 if (args
->ar_discard
)
1138 seq_puts(s
, ",discard");
1139 if (logd_secs
!= 30)
1140 seq_printf(s
, ",commit=%d", logd_secs
);
1141 if (statfs_quantum
!= 30)
1142 seq_printf(s
, ",statfs_quantum=%d", statfs_quantum
);
1143 else if (statfs_slow
)
1144 seq_puts(s
, ",statfs_quantum=0");
1145 if (quota_quantum
!= 60)
1146 seq_printf(s
, ",quota_quantum=%d", quota_quantum
);
1147 if (args
->ar_statfs_percent
)
1148 seq_printf(s
, ",statfs_percent=%d", args
->ar_statfs_percent
);
1149 if (args
->ar_errors
!= GFS2_ERRORS_DEFAULT
) {
1152 switch (args
->ar_errors
) {
1153 case GFS2_ERRORS_WITHDRAW
:
1156 case GFS2_ERRORS_PANIC
:
1163 seq_printf(s
, ",errors=%s", state
);
1165 if (test_bit(SDF_NOBARRIERS
, &sdp
->sd_flags
))
1166 seq_puts(s
, ",nobarrier");
1167 if (test_bit(SDF_DEMOTE
, &sdp
->sd_flags
))
1168 seq_puts(s
, ",demote_interface_used");
1169 if (args
->ar_rgrplvb
)
1170 seq_puts(s
, ",rgrplvb");
1171 if (args
->ar_loccookie
)
1172 seq_puts(s
, ",loccookie");
1176 static void gfs2_final_release_pages(struct gfs2_inode
*ip
)
1178 struct inode
*inode
= &ip
->i_inode
;
1179 struct gfs2_glock
*gl
= ip
->i_gl
;
1181 if (unlikely(!gl
)) {
1182 /* This can only happen during incomplete inode creation. */
1183 BUG_ON(!test_bit(GIF_ALLOC_FAILED
, &ip
->i_flags
));
1187 truncate_inode_pages(gfs2_glock2aspace(gl
), 0);
1188 truncate_inode_pages(&inode
->i_data
, 0);
1190 if (atomic_read(&gl
->gl_revokes
) == 0) {
1191 clear_bit(GLF_LFLUSH
, &gl
->gl_flags
);
1192 clear_bit(GLF_DIRTY
, &gl
->gl_flags
);
1196 static int gfs2_dinode_dealloc(struct gfs2_inode
*ip
)
1198 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1199 struct gfs2_rgrpd
*rgd
;
1200 struct gfs2_holder gh
;
1203 if (gfs2_get_inode_blocks(&ip
->i_inode
) != 1) {
1204 gfs2_consist_inode(ip
);
1208 gfs2_rindex_update(sdp
);
1210 error
= gfs2_quota_hold(ip
, NO_UID_QUOTA_CHANGE
, NO_GID_QUOTA_CHANGE
);
1214 rgd
= gfs2_blk2rgrpd(sdp
, ip
->i_no_addr
, 1);
1216 gfs2_consist_inode(ip
);
1221 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
,
1222 LM_FLAG_NODE_SCOPE
, &gh
);
1226 error
= gfs2_trans_begin(sdp
, RES_RG_BIT
+ RES_STATFS
+ RES_QUOTA
,
1227 sdp
->sd_jdesc
->jd_blocks
);
1229 goto out_rg_gunlock
;
1231 gfs2_free_di(rgd
, ip
);
1233 gfs2_final_release_pages(ip
);
1235 gfs2_trans_end(sdp
);
1238 gfs2_glock_dq_uninit(&gh
);
1240 gfs2_quota_unhold(ip
);
1245 * gfs2_glock_put_eventually
1246 * @gl: The glock to put
1248 * When under memory pressure, trigger a deferred glock put to make sure we
1249 * won't call into DLM and deadlock. Otherwise, put the glock directly.
1252 static void gfs2_glock_put_eventually(struct gfs2_glock
*gl
)
1254 if (current
->flags
& PF_MEMALLOC
)
1255 gfs2_glock_put_async(gl
);
1260 static enum evict_behavior
gfs2_upgrade_iopen_glock(struct inode
*inode
)
1262 struct gfs2_inode
*ip
= GFS2_I(inode
);
1263 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
1264 struct gfs2_holder
*gh
= &ip
->i_iopen_gh
;
1267 gh
->gh_flags
|= GL_NOCACHE
;
1268 gfs2_glock_dq_wait(gh
);
1271 * If there are no other lock holders, we will immediately get
1272 * exclusive access to the iopen glock here.
1274 * Otherwise, the other nodes holding the lock will be notified about
1275 * our locking request (see iopen_go_callback()). If they do not have
1276 * the inode open, they are expected to evict the cached inode and
1277 * release the lock, allowing us to proceed.
1279 * Otherwise, if they cannot evict the inode, they are expected to poke
1280 * the inode glock (note: not the iopen glock). We will notice that
1281 * and stop waiting for the iopen glock immediately. The other node(s)
1282 * are then expected to take care of deleting the inode when they no
1285 * As a last resort, if another node keeps holding the iopen glock
1286 * without showing any activity on the inode glock, we will eventually
1287 * time out and fail the iopen glock upgrade.
1290 gfs2_holder_reinit(LM_ST_EXCLUSIVE
, GL_ASYNC
| GL_NOCACHE
, gh
);
1291 error
= gfs2_glock_nq(gh
);
1293 return EVICT_SHOULD_SKIP_DELETE
;
1295 wait_event_interruptible_timeout(sdp
->sd_async_glock_wait
,
1296 !test_bit(HIF_WAIT
, &gh
->gh_iflags
) ||
1297 glock_needs_demote(ip
->i_gl
),
1299 if (!test_bit(HIF_HOLDER
, &gh
->gh_iflags
)) {
1301 if (glock_needs_demote(ip
->i_gl
))
1302 return EVICT_SHOULD_SKIP_DELETE
;
1303 return EVICT_SHOULD_DEFER_DELETE
;
1305 error
= gfs2_glock_holder_ready(gh
);
1307 return EVICT_SHOULD_SKIP_DELETE
;
1308 return EVICT_SHOULD_DELETE
;
1312 * evict_should_delete - determine whether the inode is eligible for deletion
1313 * @inode: The inode to evict
1314 * @gh: The glock holder structure
1316 * This function determines whether the evicted inode is eligible to be deleted
1317 * and locks the inode glock.
1319 * Returns: the fate of the dinode
1321 static enum evict_behavior
evict_should_delete(struct inode
*inode
,
1322 struct gfs2_holder
*gh
)
1324 struct gfs2_inode
*ip
= GFS2_I(inode
);
1325 struct super_block
*sb
= inode
->i_sb
;
1326 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1329 if (unlikely(test_bit(GIF_ALLOC_FAILED
, &ip
->i_flags
)))
1332 if (test_bit(GIF_DEFER_DELETE
, &ip
->i_flags
))
1333 return EVICT_SHOULD_DEFER_DELETE
;
1335 /* Deletes should never happen under memory pressure anymore. */
1336 if (WARN_ON_ONCE(current
->flags
& PF_MEMALLOC
))
1337 return EVICT_SHOULD_DEFER_DELETE
;
1339 /* Must not read inode block until block type has been verified */
1340 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_SKIP
, gh
);
1341 if (unlikely(ret
)) {
1342 glock_clear_object(ip
->i_iopen_gh
.gh_gl
, ip
);
1343 ip
->i_iopen_gh
.gh_flags
|= GL_NOCACHE
;
1344 gfs2_glock_dq_uninit(&ip
->i_iopen_gh
);
1345 return EVICT_SHOULD_DEFER_DELETE
;
1348 if (gfs2_inode_already_deleted(ip
->i_gl
, ip
->i_no_formal_ino
))
1349 return EVICT_SHOULD_SKIP_DELETE
;
1350 ret
= gfs2_check_blk_type(sdp
, ip
->i_no_addr
, GFS2_BLKST_UNLINKED
);
1352 return EVICT_SHOULD_SKIP_DELETE
;
1354 ret
= gfs2_instantiate(gh
);
1356 return EVICT_SHOULD_SKIP_DELETE
;
1359 * The inode may have been recreated in the meantime.
1362 return EVICT_SHOULD_SKIP_DELETE
;
1365 if (gfs2_holder_initialized(&ip
->i_iopen_gh
) &&
1366 test_bit(HIF_HOLDER
, &ip
->i_iopen_gh
.gh_iflags
)) {
1367 enum evict_behavior behavior
=
1368 gfs2_upgrade_iopen_glock(inode
);
1370 if (behavior
!= EVICT_SHOULD_DELETE
) {
1371 gfs2_holder_uninit(&ip
->i_iopen_gh
);
1375 return EVICT_SHOULD_DELETE
;
1379 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1380 * @inode: The inode to evict
1382 static int evict_unlinked_inode(struct inode
*inode
)
1384 struct gfs2_inode
*ip
= GFS2_I(inode
);
1387 if (S_ISDIR(inode
->i_mode
) &&
1388 (ip
->i_diskflags
& GFS2_DIF_EXHASH
)) {
1389 ret
= gfs2_dir_exhash_dealloc(ip
);
1395 ret
= gfs2_ea_dealloc(ip
);
1400 if (!gfs2_is_stuffed(ip
)) {
1401 ret
= gfs2_file_dealloc(ip
);
1407 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1408 * can get called to recreate it, or even gfs2_inode_lookup() if the
1409 * inode was recreated on another node in the meantime.
1411 * However, inserting the new inode into the inode hash table will not
1412 * succeed until the old inode is removed, and that only happens after
1413 * ->evict_inode() returns. The new inode is attached to its inode and
1414 * iopen glocks after inserting it into the inode hash table, so at
1415 * that point we can be sure that both glocks are unused.
1418 ret
= gfs2_dinode_dealloc(ip
);
1419 if (!ret
&& ip
->i_gl
)
1420 gfs2_inode_remember_delete(ip
->i_gl
, ip
->i_no_formal_ino
);
1427 * evict_linked_inode - evict an inode whose dinode has not been unlinked
1428 * @inode: The inode to evict
1430 static int evict_linked_inode(struct inode
*inode
)
1432 struct super_block
*sb
= inode
->i_sb
;
1433 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1434 struct gfs2_inode
*ip
= GFS2_I(inode
);
1435 struct address_space
*metamapping
;
1438 gfs2_log_flush(sdp
, ip
->i_gl
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
1439 GFS2_LFC_EVICT_INODE
);
1440 metamapping
= gfs2_glock2aspace(ip
->i_gl
);
1441 if (test_bit(GLF_DIRTY
, &ip
->i_gl
->gl_flags
)) {
1442 filemap_fdatawrite(metamapping
);
1443 filemap_fdatawait(metamapping
);
1445 write_inode_now(inode
, 1);
1446 gfs2_ail_flush(ip
->i_gl
, 0);
1448 ret
= gfs2_trans_begin(sdp
, 0, sdp
->sd_jdesc
->jd_blocks
);
1452 /* Needs to be done before glock release & also in a transaction */
1453 truncate_inode_pages(&inode
->i_data
, 0);
1454 truncate_inode_pages(metamapping
, 0);
1455 gfs2_trans_end(sdp
);
1460 * gfs2_evict_inode - Remove an inode from cache
1461 * @inode: The inode to evict
1463 * There are three cases to consider:
1464 * 1. i_nlink == 0, we are final opener (and must deallocate)
1465 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1468 * If the fs is read only, then we have to treat all cases as per #3
1469 * since we are unable to do any deallocation. The inode will be
1470 * deallocated by the next read/write node to attempt an allocation
1471 * in the same resource group
1473 * We have to (at the moment) hold the inodes main lock to cover
1474 * the gap between unlocking the shared lock on the iopen lock and
1475 * taking the exclusive lock. I'd rather do a shared -> exclusive
1476 * conversion on the iopen lock, but we can change that later. This
1477 * is safe, just less efficient.
1480 static void gfs2_evict_inode(struct inode
*inode
)
1482 struct super_block
*sb
= inode
->i_sb
;
1483 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1484 struct gfs2_inode
*ip
= GFS2_I(inode
);
1485 struct gfs2_holder gh
;
1486 enum evict_behavior behavior
;
1489 gfs2_holder_mark_uninitialized(&gh
);
1490 if (inode
->i_nlink
|| sb_rdonly(sb
) || !ip
->i_no_addr
)
1494 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1495 * system files without having an active journal to write to. In that
1496 * case, skip the filesystem evict.
1501 behavior
= evict_should_delete(inode
, &gh
);
1502 if (behavior
== EVICT_SHOULD_DEFER_DELETE
&&
1503 !test_bit(SDF_KILL
, &sdp
->sd_flags
)) {
1504 struct gfs2_glock
*io_gl
= ip
->i_iopen_gh
.gh_gl
;
1507 gfs2_glock_hold(io_gl
);
1508 if (!gfs2_queue_verify_delete(io_gl
, true))
1509 gfs2_glock_put(io_gl
);
1512 behavior
= EVICT_SHOULD_DELETE
;
1514 if (behavior
== EVICT_SHOULD_DELETE
)
1515 ret
= evict_unlinked_inode(inode
);
1517 ret
= evict_linked_inode(inode
);
1519 if (gfs2_rs_active(&ip
->i_res
))
1520 gfs2_rs_deltree(&ip
->i_res
);
1522 if (ret
&& ret
!= GLR_TRYFAILED
&& ret
!= -EROFS
)
1523 fs_warn(sdp
, "gfs2_evict_inode: %d\n", ret
);
1525 if (gfs2_holder_initialized(&gh
))
1526 gfs2_glock_dq_uninit(&gh
);
1527 truncate_inode_pages_final(&inode
->i_data
);
1529 gfs2_assert_warn(sdp
, ip
->i_qadata
->qa_ref
== 0);
1530 gfs2_rs_deltree(&ip
->i_res
);
1531 gfs2_ordered_del_inode(ip
);
1533 gfs2_dir_hash_inval(ip
);
1534 if (gfs2_holder_initialized(&ip
->i_iopen_gh
)) {
1535 struct gfs2_glock
*gl
= ip
->i_iopen_gh
.gh_gl
;
1537 glock_clear_object(gl
, ip
);
1538 gfs2_glock_hold(gl
);
1539 ip
->i_iopen_gh
.gh_flags
|= GL_NOCACHE
;
1540 gfs2_glock_dq_uninit(&ip
->i_iopen_gh
);
1541 gfs2_glock_put_eventually(gl
);
1544 glock_clear_object(ip
->i_gl
, ip
);
1545 wait_on_bit_io(&ip
->i_flags
, GIF_GLOP_PENDING
, TASK_UNINTERRUPTIBLE
);
1546 gfs2_glock_put_eventually(ip
->i_gl
);
1547 rcu_assign_pointer(ip
->i_gl
, NULL
);
1551 static struct inode
*gfs2_alloc_inode(struct super_block
*sb
)
1553 struct gfs2_inode
*ip
;
1555 ip
= alloc_inode_sb(sb
, gfs2_inode_cachep
, GFP_KERNEL
);
1559 ip
->i_no_formal_ino
= 0;
1562 gfs2_holder_mark_uninitialized(&ip
->i_iopen_gh
);
1563 memset(&ip
->i_res
, 0, sizeof(ip
->i_res
));
1564 RB_CLEAR_NODE(&ip
->i_res
.rs_node
);
1565 ip
->i_diskflags
= 0;
1567 return &ip
->i_inode
;
1570 static void gfs2_free_inode(struct inode
*inode
)
1572 kmem_cache_free(gfs2_inode_cachep
, GFS2_I(inode
));
1575 void free_local_statfs_inodes(struct gfs2_sbd
*sdp
)
1577 struct local_statfs_inode
*lsi
, *safe
;
1579 /* Run through the statfs inodes list to iput and free memory */
1580 list_for_each_entry_safe(lsi
, safe
, &sdp
->sd_sc_inodes_list
, si_list
) {
1581 if (lsi
->si_jid
== sdp
->sd_jdesc
->jd_jid
)
1582 sdp
->sd_sc_inode
= NULL
; /* belongs to this node */
1583 if (lsi
->si_sc_inode
)
1584 iput(lsi
->si_sc_inode
);
1585 list_del(&lsi
->si_list
);
1590 struct inode
*find_local_statfs_inode(struct gfs2_sbd
*sdp
,
1593 struct local_statfs_inode
*lsi
;
1595 /* Return the local (per node) statfs inode in the
1596 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1597 list_for_each_entry(lsi
, &sdp
->sd_sc_inodes_list
, si_list
) {
1598 if (lsi
->si_jid
== index
)
1599 return lsi
->si_sc_inode
;
1604 const struct super_operations gfs2_super_ops
= {
1605 .alloc_inode
= gfs2_alloc_inode
,
1606 .free_inode
= gfs2_free_inode
,
1607 .write_inode
= gfs2_write_inode
,
1608 .dirty_inode
= gfs2_dirty_inode
,
1609 .evict_inode
= gfs2_evict_inode
,
1610 .put_super
= gfs2_put_super
,
1611 .sync_fs
= gfs2_sync_fs
,
1612 .freeze_super
= gfs2_freeze_super
,
1613 .freeze_fs
= gfs2_freeze_fs
,
1614 .thaw_super
= gfs2_thaw_super
,
1615 .statfs
= gfs2_statfs
,
1616 .drop_inode
= gfs2_drop_inode
,
1617 .show_options
= gfs2_show_options
,