2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16 #include <linux/security.h>
32 struct workqueue_struct
*gfs2_freeze_wq
;
34 static void gfs2_ail_error(struct gfs2_glock
*gl
, const struct buffer_head
*bh
)
36 fs_err(gl
->gl_name
.ln_sbd
,
37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
39 bh
, (unsigned long long)bh
->b_blocknr
, bh
->b_state
,
40 bh
->b_page
->mapping
, bh
->b_page
->flags
);
41 fs_err(gl
->gl_name
.ln_sbd
, "AIL glock %u:%llu mapping %p\n",
42 gl
->gl_name
.ln_type
, gl
->gl_name
.ln_number
,
43 gfs2_glock2aspace(gl
));
44 gfs2_lm_withdraw(gl
->gl_name
.ln_sbd
, "AIL error\n");
48 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
50 * @fsync: set when called from fsync (not all buffers will be clean)
52 * None of the buffers should be dirty, locked, or pinned.
55 static void __gfs2_ail_flush(struct gfs2_glock
*gl
, bool fsync
,
56 unsigned int nr_revokes
)
58 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
59 struct list_head
*head
= &gl
->gl_ail_list
;
60 struct gfs2_bufdata
*bd
, *tmp
;
61 struct buffer_head
*bh
;
62 const unsigned long b_state
= (1UL << BH_Dirty
)|(1UL << BH_Pinned
)|(1UL << BH_Lock
);
65 spin_lock(&sdp
->sd_ail_lock
);
66 list_for_each_entry_safe_reverse(bd
, tmp
, head
, bd_ail_gl_list
) {
70 if (bh
->b_state
& b_state
) {
73 gfs2_ail_error(gl
, bh
);
75 gfs2_trans_add_revoke(sdp
, bd
);
78 GLOCK_BUG_ON(gl
, !fsync
&& atomic_read(&gl
->gl_ail_count
));
79 spin_unlock(&sdp
->sd_ail_lock
);
84 static void gfs2_ail_empty_gl(struct gfs2_glock
*gl
)
86 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
89 memset(&tr
, 0, sizeof(tr
));
90 INIT_LIST_HEAD(&tr
.tr_buf
);
91 INIT_LIST_HEAD(&tr
.tr_databuf
);
92 tr
.tr_revokes
= atomic_read(&gl
->gl_ail_count
);
97 /* A shortened, inline version of gfs2_trans_begin()
98 * tr->alloced is not set since the transaction structure is
100 tr
.tr_reserved
= 1 + gfs2_struct2blk(sdp
, tr
.tr_revokes
, sizeof(u64
));
102 if (gfs2_log_reserve(sdp
, tr
.tr_reserved
) < 0)
104 WARN_ON_ONCE(current
->journal_info
);
105 current
->journal_info
= &tr
;
107 __gfs2_ail_flush(gl
, 0, tr
.tr_revokes
);
110 gfs2_log_flush(sdp
, NULL
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
111 GFS2_LFC_AIL_EMPTY_GL
);
114 void gfs2_ail_flush(struct gfs2_glock
*gl
, bool fsync
)
116 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
117 unsigned int revokes
= atomic_read(&gl
->gl_ail_count
);
118 unsigned int max_revokes
= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_log_descriptor
)) / sizeof(u64
);
124 while (revokes
> max_revokes
)
125 max_revokes
+= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
)) / sizeof(u64
);
127 ret
= gfs2_trans_begin(sdp
, 0, max_revokes
);
130 __gfs2_ail_flush(gl
, fsync
, max_revokes
);
132 gfs2_log_flush(sdp
, NULL
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
137 * rgrp_go_sync - sync out the metadata for this glock
140 * Called when demoting or unlocking an EX glock. We must flush
141 * to disk all dirty buffers/pages relating to this glock, and must not
142 * return to caller to demote/unlock the glock until I/O is complete.
145 static void rgrp_go_sync(struct gfs2_glock
*gl
)
147 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
148 struct address_space
*mapping
= &sdp
->sd_aspace
;
149 struct gfs2_rgrpd
*rgd
;
152 spin_lock(&gl
->gl_lockref
.lock
);
155 gfs2_rgrp_brelse(rgd
);
156 spin_unlock(&gl
->gl_lockref
.lock
);
158 if (!test_and_clear_bit(GLF_DIRTY
, &gl
->gl_flags
))
160 GLOCK_BUG_ON(gl
, gl
->gl_state
!= LM_ST_EXCLUSIVE
);
162 gfs2_log_flush(sdp
, gl
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
163 GFS2_LFC_RGRP_GO_SYNC
);
164 filemap_fdatawrite_range(mapping
, gl
->gl_vm
.start
, gl
->gl_vm
.end
);
165 error
= filemap_fdatawait_range(mapping
, gl
->gl_vm
.start
, gl
->gl_vm
.end
);
166 mapping_set_error(mapping
, error
);
167 gfs2_ail_empty_gl(gl
);
169 spin_lock(&gl
->gl_lockref
.lock
);
172 gfs2_free_clones(rgd
);
173 spin_unlock(&gl
->gl_lockref
.lock
);
177 * rgrp_go_inval - invalidate the metadata for this glock
181 * We never used LM_ST_DEFERRED with resource groups, so that we
182 * should always see the metadata flag set here.
186 static void rgrp_go_inval(struct gfs2_glock
*gl
, int flags
)
188 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
189 struct address_space
*mapping
= &sdp
->sd_aspace
;
190 struct gfs2_rgrpd
*rgd
= gfs2_glock2rgrp(gl
);
193 gfs2_rgrp_brelse(rgd
);
195 WARN_ON_ONCE(!(flags
& DIO_METADATA
));
196 gfs2_assert_withdraw(sdp
, !atomic_read(&gl
->gl_ail_count
));
197 truncate_inode_pages_range(mapping
, gl
->gl_vm
.start
, gl
->gl_vm
.end
);
200 rgd
->rd_flags
&= ~GFS2_RDF_UPTODATE
;
203 static struct gfs2_inode
*gfs2_glock2inode(struct gfs2_glock
*gl
)
205 struct gfs2_inode
*ip
;
207 spin_lock(&gl
->gl_lockref
.lock
);
210 set_bit(GIF_GLOP_PENDING
, &ip
->i_flags
);
211 spin_unlock(&gl
->gl_lockref
.lock
);
215 struct gfs2_rgrpd
*gfs2_glock2rgrp(struct gfs2_glock
*gl
)
217 struct gfs2_rgrpd
*rgd
;
219 spin_lock(&gl
->gl_lockref
.lock
);
221 spin_unlock(&gl
->gl_lockref
.lock
);
226 static void gfs2_clear_glop_pending(struct gfs2_inode
*ip
)
231 clear_bit_unlock(GIF_GLOP_PENDING
, &ip
->i_flags
);
232 wake_up_bit(&ip
->i_flags
, GIF_GLOP_PENDING
);
236 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
237 * @gl: the glock protecting the inode
241 static void inode_go_sync(struct gfs2_glock
*gl
)
243 struct gfs2_inode
*ip
= gfs2_glock2inode(gl
);
244 int isreg
= ip
&& S_ISREG(ip
->i_inode
.i_mode
);
245 struct address_space
*metamapping
= gfs2_glock2aspace(gl
);
249 if (test_and_clear_bit(GIF_SW_PAGED
, &ip
->i_flags
))
250 unmap_shared_mapping_range(ip
->i_inode
.i_mapping
, 0, 0);
251 inode_dio_wait(&ip
->i_inode
);
253 if (!test_and_clear_bit(GLF_DIRTY
, &gl
->gl_flags
))
256 GLOCK_BUG_ON(gl
, gl
->gl_state
!= LM_ST_EXCLUSIVE
);
258 gfs2_log_flush(gl
->gl_name
.ln_sbd
, gl
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
259 GFS2_LFC_INODE_GO_SYNC
);
260 filemap_fdatawrite(metamapping
);
262 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
263 filemap_fdatawrite(mapping
);
264 error
= filemap_fdatawait(mapping
);
265 mapping_set_error(mapping
, error
);
267 error
= filemap_fdatawait(metamapping
);
268 mapping_set_error(metamapping
, error
);
269 gfs2_ail_empty_gl(gl
);
271 * Writeback of the data mapping may cause the dirty flag to be set
272 * so we have to clear it again here.
274 smp_mb__before_atomic();
275 clear_bit(GLF_DIRTY
, &gl
->gl_flags
);
278 gfs2_clear_glop_pending(ip
);
282 * inode_go_inval - prepare a inode glock to be released
286 * Normally we invalidate everything, but if we are moving into
287 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
288 * can keep hold of the metadata, since it won't have changed.
292 static void inode_go_inval(struct gfs2_glock
*gl
, int flags
)
294 struct gfs2_inode
*ip
= gfs2_glock2inode(gl
);
296 gfs2_assert_withdraw(gl
->gl_name
.ln_sbd
, !atomic_read(&gl
->gl_ail_count
));
298 if (flags
& DIO_METADATA
) {
299 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
300 truncate_inode_pages(mapping
, 0);
302 set_bit(GIF_INVALID
, &ip
->i_flags
);
303 forget_all_cached_acls(&ip
->i_inode
);
304 security_inode_invalidate_secctx(&ip
->i_inode
);
305 gfs2_dir_hash_inval(ip
);
309 if (ip
== GFS2_I(gl
->gl_name
.ln_sbd
->sd_rindex
)) {
310 gfs2_log_flush(gl
->gl_name
.ln_sbd
, NULL
,
311 GFS2_LOG_HEAD_FLUSH_NORMAL
|
312 GFS2_LFC_INODE_GO_INVAL
);
313 gl
->gl_name
.ln_sbd
->sd_rindex_uptodate
= 0;
315 if (ip
&& S_ISREG(ip
->i_inode
.i_mode
))
316 truncate_inode_pages(ip
->i_inode
.i_mapping
, 0);
318 gfs2_clear_glop_pending(ip
);
322 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
325 * Returns: 1 if it's ok
328 static int inode_go_demote_ok(const struct gfs2_glock
*gl
)
330 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
332 if (sdp
->sd_jindex
== gl
->gl_object
|| sdp
->sd_rindex
== gl
->gl_object
)
338 static int gfs2_dinode_in(struct gfs2_inode
*ip
, const void *buf
)
340 const struct gfs2_dinode
*str
= buf
;
341 struct timespec atime
;
344 if (unlikely(ip
->i_no_addr
!= be64_to_cpu(str
->di_num
.no_addr
)))
346 ip
->i_no_formal_ino
= be64_to_cpu(str
->di_num
.no_formal_ino
);
347 ip
->i_inode
.i_mode
= be32_to_cpu(str
->di_mode
);
348 ip
->i_inode
.i_rdev
= 0;
349 switch (ip
->i_inode
.i_mode
& S_IFMT
) {
352 ip
->i_inode
.i_rdev
= MKDEV(be32_to_cpu(str
->di_major
),
353 be32_to_cpu(str
->di_minor
));
357 i_uid_write(&ip
->i_inode
, be32_to_cpu(str
->di_uid
));
358 i_gid_write(&ip
->i_inode
, be32_to_cpu(str
->di_gid
));
359 set_nlink(&ip
->i_inode
, be32_to_cpu(str
->di_nlink
));
360 i_size_write(&ip
->i_inode
, be64_to_cpu(str
->di_size
));
361 gfs2_set_inode_blocks(&ip
->i_inode
, be64_to_cpu(str
->di_blocks
));
362 atime
.tv_sec
= be64_to_cpu(str
->di_atime
);
363 atime
.tv_nsec
= be32_to_cpu(str
->di_atime_nsec
);
364 if (timespec_compare(&ip
->i_inode
.i_atime
, &atime
) < 0)
365 ip
->i_inode
.i_atime
= atime
;
366 ip
->i_inode
.i_mtime
.tv_sec
= be64_to_cpu(str
->di_mtime
);
367 ip
->i_inode
.i_mtime
.tv_nsec
= be32_to_cpu(str
->di_mtime_nsec
);
368 ip
->i_inode
.i_ctime
.tv_sec
= be64_to_cpu(str
->di_ctime
);
369 ip
->i_inode
.i_ctime
.tv_nsec
= be32_to_cpu(str
->di_ctime_nsec
);
371 ip
->i_goal
= be64_to_cpu(str
->di_goal_meta
);
372 ip
->i_generation
= be64_to_cpu(str
->di_generation
);
374 ip
->i_diskflags
= be32_to_cpu(str
->di_flags
);
375 ip
->i_eattr
= be64_to_cpu(str
->di_eattr
);
376 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
377 gfs2_set_inode_flags(&ip
->i_inode
);
378 height
= be16_to_cpu(str
->di_height
);
379 if (unlikely(height
> GFS2_MAX_META_HEIGHT
))
381 ip
->i_height
= (u8
)height
;
383 depth
= be16_to_cpu(str
->di_depth
);
384 if (unlikely(depth
> GFS2_DIR_MAX_DEPTH
))
386 ip
->i_depth
= (u8
)depth
;
387 ip
->i_entries
= be32_to_cpu(str
->di_entries
);
389 if (S_ISREG(ip
->i_inode
.i_mode
))
390 gfs2_set_aops(&ip
->i_inode
);
394 gfs2_consist_inode(ip
);
399 * gfs2_inode_refresh - Refresh the incore copy of the dinode
400 * @ip: The GFS2 inode
405 int gfs2_inode_refresh(struct gfs2_inode
*ip
)
407 struct buffer_head
*dibh
;
410 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
414 error
= gfs2_dinode_in(ip
, dibh
->b_data
);
416 clear_bit(GIF_INVALID
, &ip
->i_flags
);
422 * inode_go_lock - operation done after an inode lock is locked by a process
429 static int inode_go_lock(struct gfs2_holder
*gh
)
431 struct gfs2_glock
*gl
= gh
->gh_gl
;
432 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
433 struct gfs2_inode
*ip
= gl
->gl_object
;
436 if (!ip
|| (gh
->gh_flags
& GL_SKIP
))
439 if (test_bit(GIF_INVALID
, &ip
->i_flags
)) {
440 error
= gfs2_inode_refresh(ip
);
445 if (gh
->gh_state
!= LM_ST_DEFERRED
)
446 inode_dio_wait(&ip
->i_inode
);
448 if ((ip
->i_diskflags
& GFS2_DIF_TRUNC_IN_PROG
) &&
449 (gl
->gl_state
== LM_ST_EXCLUSIVE
) &&
450 (gh
->gh_state
== LM_ST_EXCLUSIVE
)) {
451 spin_lock(&sdp
->sd_trunc_lock
);
452 if (list_empty(&ip
->i_trunc_list
))
453 list_add(&ip
->i_trunc_list
, &sdp
->sd_trunc_list
);
454 spin_unlock(&sdp
->sd_trunc_lock
);
455 wake_up(&sdp
->sd_quota_wait
);
463 * inode_go_dump - print information about an inode
469 static void inode_go_dump(struct seq_file
*seq
, const struct gfs2_glock
*gl
)
471 const struct gfs2_inode
*ip
= gl
->gl_object
;
474 gfs2_print_dbg(seq
, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
475 (unsigned long long)ip
->i_no_formal_ino
,
476 (unsigned long long)ip
->i_no_addr
,
477 IF2DT(ip
->i_inode
.i_mode
), ip
->i_flags
,
478 (unsigned int)ip
->i_diskflags
,
479 (unsigned long long)i_size_read(&ip
->i_inode
));
483 * freeze_go_sync - promote/demote the freeze glock
485 * @state: the requested state
490 static void freeze_go_sync(struct gfs2_glock
*gl
)
493 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
495 if (gl
->gl_state
== LM_ST_SHARED
&&
496 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
497 atomic_set(&sdp
->sd_freeze_state
, SFS_STARTING_FREEZE
);
498 error
= freeze_super(sdp
->sd_vfs
);
500 printk(KERN_INFO
"GFS2: couldn't freeze filesystem: %d\n", error
);
501 gfs2_assert_withdraw(sdp
, 0);
503 queue_work(gfs2_freeze_wq
, &sdp
->sd_freeze_work
);
504 gfs2_log_flush(sdp
, NULL
, GFS2_LOG_HEAD_FLUSH_FREEZE
|
505 GFS2_LFC_FREEZE_GO_SYNC
);
510 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
515 static int freeze_go_xmote_bh(struct gfs2_glock
*gl
, struct gfs2_holder
*gh
)
517 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
518 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
519 struct gfs2_glock
*j_gl
= ip
->i_gl
;
520 struct gfs2_log_header_host head
;
523 if (test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
524 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
);
526 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
529 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
))
532 /* Initialize some head of the log stuff */
533 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)) {
534 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
535 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
548 static int freeze_go_demote_ok(const struct gfs2_glock
*gl
)
554 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
557 * gl_lockref.lock lock is held while calling this
559 static void iopen_go_callback(struct gfs2_glock
*gl
, bool remote
)
561 struct gfs2_inode
*ip
= gl
->gl_object
;
562 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
564 if (!remote
|| sb_rdonly(sdp
->sd_vfs
))
567 if (gl
->gl_demote_state
== LM_ST_UNLOCKED
&&
568 gl
->gl_state
== LM_ST_SHARED
&& ip
) {
569 gl
->gl_lockref
.count
++;
570 if (queue_work(gfs2_delete_workqueue
, &gl
->gl_delete
) == 0)
571 gl
->gl_lockref
.count
--;
575 const struct gfs2_glock_operations gfs2_meta_glops
= {
576 .go_type
= LM_TYPE_META
,
579 const struct gfs2_glock_operations gfs2_inode_glops
= {
580 .go_sync
= inode_go_sync
,
581 .go_inval
= inode_go_inval
,
582 .go_demote_ok
= inode_go_demote_ok
,
583 .go_lock
= inode_go_lock
,
584 .go_dump
= inode_go_dump
,
585 .go_type
= LM_TYPE_INODE
,
586 .go_flags
= GLOF_ASPACE
| GLOF_LRU
,
589 const struct gfs2_glock_operations gfs2_rgrp_glops
= {
590 .go_sync
= rgrp_go_sync
,
591 .go_inval
= rgrp_go_inval
,
592 .go_lock
= gfs2_rgrp_go_lock
,
593 .go_unlock
= gfs2_rgrp_go_unlock
,
594 .go_dump
= gfs2_rgrp_dump
,
595 .go_type
= LM_TYPE_RGRP
,
596 .go_flags
= GLOF_LVB
,
599 const struct gfs2_glock_operations gfs2_freeze_glops
= {
600 .go_sync
= freeze_go_sync
,
601 .go_xmote_bh
= freeze_go_xmote_bh
,
602 .go_demote_ok
= freeze_go_demote_ok
,
603 .go_type
= LM_TYPE_NONDISK
,
606 const struct gfs2_glock_operations gfs2_iopen_glops
= {
607 .go_type
= LM_TYPE_IOPEN
,
608 .go_callback
= iopen_go_callback
,
609 .go_flags
= GLOF_LRU
,
612 const struct gfs2_glock_operations gfs2_flock_glops
= {
613 .go_type
= LM_TYPE_FLOCK
,
614 .go_flags
= GLOF_LRU
,
617 const struct gfs2_glock_operations gfs2_nondisk_glops
= {
618 .go_type
= LM_TYPE_NONDISK
,
621 const struct gfs2_glock_operations gfs2_quota_glops
= {
622 .go_type
= LM_TYPE_QUOTA
,
623 .go_flags
= GLOF_LVB
| GLOF_LRU
,
626 const struct gfs2_glock_operations gfs2_journal_glops
= {
627 .go_type
= LM_TYPE_JOURNAL
,
630 const struct gfs2_glock_operations
*gfs2_glops_list
[] = {
631 [LM_TYPE_META
] = &gfs2_meta_glops
,
632 [LM_TYPE_INODE
] = &gfs2_inode_glops
,
633 [LM_TYPE_RGRP
] = &gfs2_rgrp_glops
,
634 [LM_TYPE_IOPEN
] = &gfs2_iopen_glops
,
635 [LM_TYPE_FLOCK
] = &gfs2_flock_glops
,
636 [LM_TYPE_NONDISK
] = &gfs2_nondisk_glops
,
637 [LM_TYPE_QUOTA
] = &gfs2_quota_glops
,
638 [LM_TYPE_JOURNAL
] = &gfs2_journal_glops
,