2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/lm_interface.h>
31 * ail_empty_gl - remove all buffers for a given lock from the AIL
34 * None of the buffers should be dirty, locked, or pinned.
37 static void gfs2_ail_empty_gl(struct gfs2_glock
*gl
)
39 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
41 struct list_head
*head
= &gl
->gl_ail_list
;
42 struct gfs2_bufdata
*bd
;
43 struct buffer_head
*bh
;
46 blocks
= atomic_read(&gl
->gl_ail_count
);
50 error
= gfs2_trans_begin(sdp
, 0, blocks
);
51 if (gfs2_assert_withdraw(sdp
, !error
))
55 while (!list_empty(head
)) {
56 bd
= list_entry(head
->next
, struct gfs2_bufdata
,
59 gfs2_remove_from_ail(bd
);
62 bd
->bd_blkno
= bh
->b_blocknr
;
63 gfs2_assert_withdraw(sdp
, !buffer_busy(bh
));
64 gfs2_trans_add_revoke(sdp
, bd
);
66 gfs2_assert_withdraw(sdp
, !atomic_read(&gl
->gl_ail_count
));
70 gfs2_log_flush(sdp
, NULL
);
74 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
79 static void gfs2_pte_inval(struct gfs2_glock
*gl
)
81 struct gfs2_inode
*ip
;
86 if (!ip
|| !S_ISREG(inode
->i_mode
))
89 unmap_shared_mapping_range(inode
->i_mapping
, 0, 0);
90 if (test_bit(GIF_SW_PAGED
, &ip
->i_flags
))
91 set_bit(GLF_DIRTY
, &gl
->gl_flags
);
96 * meta_go_sync - sync out the metadata for this glock
99 * Called when demoting or unlocking an EX glock. We must flush
100 * to disk all dirty buffers/pages relating to this glock, and must not
101 * not return to caller to demote/unlock the glock until I/O is complete.
104 static void meta_go_sync(struct gfs2_glock
*gl
)
106 if (gl
->gl_state
!= LM_ST_EXCLUSIVE
)
109 if (test_and_clear_bit(GLF_DIRTY
, &gl
->gl_flags
)) {
110 gfs2_log_flush(gl
->gl_sbd
, gl
);
112 gfs2_ail_empty_gl(gl
);
117 * meta_go_inval - invalidate the metadata for this glock
123 static void meta_go_inval(struct gfs2_glock
*gl
, int flags
)
125 if (!(flags
& DIO_METADATA
))
133 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
134 * @gl: the glock protecting the inode
138 static void inode_go_sync(struct gfs2_glock
*gl
)
140 struct gfs2_inode
*ip
= gl
->gl_object
;
141 struct address_space
*metamapping
= gl
->gl_aspace
->i_mapping
;
144 if (gl
->gl_state
!= LM_ST_UNLOCKED
)
146 if (gl
->gl_state
!= LM_ST_EXCLUSIVE
)
149 if (ip
&& !S_ISREG(ip
->i_inode
.i_mode
))
152 if (test_bit(GLF_DIRTY
, &gl
->gl_flags
)) {
153 gfs2_log_flush(gl
->gl_sbd
, gl
);
154 filemap_fdatawrite(metamapping
);
156 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
157 filemap_fdatawrite(mapping
);
158 error
= filemap_fdatawait(mapping
);
159 mapping_set_error(mapping
, error
);
161 error
= filemap_fdatawait(metamapping
);
162 mapping_set_error(metamapping
, error
);
163 clear_bit(GLF_DIRTY
, &gl
->gl_flags
);
164 gfs2_ail_empty_gl(gl
);
169 * inode_go_xmote_bh - After promoting/demoting a glock
174 static void inode_go_xmote_bh(struct gfs2_glock
*gl
)
176 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
177 struct buffer_head
*bh
;
180 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
181 (!gh
|| !(gh
->gh_flags
& GL_SKIP
))) {
182 error
= gfs2_meta_read(gl
, gl
->gl_name
.ln_number
, 0, &bh
);
189 * inode_go_inval - prepare a inode glock to be released
195 static void inode_go_inval(struct gfs2_glock
*gl
, int flags
)
197 struct gfs2_inode
*ip
= gl
->gl_object
;
198 int meta
= (flags
& DIO_METADATA
);
203 set_bit(GIF_INVALID
, &ip
->i_flags
);
206 if (ip
&& S_ISREG(ip
->i_inode
.i_mode
))
207 truncate_inode_pages(ip
->i_inode
.i_mapping
, 0);
211 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
214 * Returns: 1 if it's ok
217 static int inode_go_demote_ok(struct gfs2_glock
*gl
)
219 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
222 if (!gl
->gl_object
&& !gl
->gl_aspace
->i_mapping
->nrpages
)
224 else if (!sdp
->sd_args
.ar_localcaching
&&
225 time_after_eq(jiffies
, gl
->gl_stamp
+
226 gfs2_tune_get(sdp
, gt_demote_secs
) * HZ
))
233 * inode_go_lock - operation done after an inode lock is locked by a process
240 static int inode_go_lock(struct gfs2_holder
*gh
)
242 struct gfs2_glock
*gl
= gh
->gh_gl
;
243 struct gfs2_inode
*ip
= gl
->gl_object
;
249 if (test_bit(GIF_INVALID
, &ip
->i_flags
)) {
250 error
= gfs2_inode_refresh(ip
);
255 if ((ip
->i_di
.di_flags
& GFS2_DIF_TRUNC_IN_PROG
) &&
256 (gl
->gl_state
== LM_ST_EXCLUSIVE
) &&
257 (gh
->gh_state
== LM_ST_EXCLUSIVE
))
258 error
= gfs2_truncatei_resume(ip
);
264 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
267 * Returns: 1 if it's ok
270 static int rgrp_go_demote_ok(struct gfs2_glock
*gl
)
272 return !gl
->gl_aspace
->i_mapping
->nrpages
;
276 * rgrp_go_lock - operation done after an rgrp lock is locked by
277 * a first holder on this node.
284 static int rgrp_go_lock(struct gfs2_holder
*gh
)
286 return gfs2_rgrp_bh_get(gh
->gh_gl
->gl_object
);
290 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
291 * a last holder on this node.
297 static void rgrp_go_unlock(struct gfs2_holder
*gh
)
299 gfs2_rgrp_bh_put(gh
->gh_gl
->gl_object
);
303 * trans_go_sync - promote/demote the transaction glock
305 * @state: the requested state
310 static void trans_go_sync(struct gfs2_glock
*gl
)
312 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
314 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
315 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
316 gfs2_meta_syncfs(sdp
);
317 gfs2_log_shutdown(sdp
);
322 * trans_go_xmote_bh - After promoting/demoting the transaction glock
327 static void trans_go_xmote_bh(struct gfs2_glock
*gl
)
329 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
330 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
331 struct gfs2_glock
*j_gl
= ip
->i_gl
;
332 struct gfs2_log_header_host head
;
335 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
336 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
337 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
);
339 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
342 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
))
345 /* Initialize some head of the log stuff */
346 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)) {
347 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
348 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
354 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
357 * Returns: 1 if it's ok
360 static int quota_go_demote_ok(struct gfs2_glock
*gl
)
362 return !atomic_read(&gl
->gl_lvb_count
);
365 const struct gfs2_glock_operations gfs2_meta_glops
= {
366 .go_xmote_th
= meta_go_sync
,
367 .go_type
= LM_TYPE_META
,
370 const struct gfs2_glock_operations gfs2_inode_glops
= {
371 .go_xmote_th
= inode_go_sync
,
372 .go_xmote_bh
= inode_go_xmote_bh
,
373 .go_inval
= inode_go_inval
,
374 .go_demote_ok
= inode_go_demote_ok
,
375 .go_lock
= inode_go_lock
,
376 .go_type
= LM_TYPE_INODE
,
377 .go_min_hold_time
= HZ
/ 10,
380 const struct gfs2_glock_operations gfs2_rgrp_glops
= {
381 .go_xmote_th
= meta_go_sync
,
382 .go_inval
= meta_go_inval
,
383 .go_demote_ok
= rgrp_go_demote_ok
,
384 .go_lock
= rgrp_go_lock
,
385 .go_unlock
= rgrp_go_unlock
,
386 .go_type
= LM_TYPE_RGRP
,
387 .go_min_hold_time
= HZ
/ 10,
390 const struct gfs2_glock_operations gfs2_trans_glops
= {
391 .go_xmote_th
= trans_go_sync
,
392 .go_xmote_bh
= trans_go_xmote_bh
,
393 .go_type
= LM_TYPE_NONDISK
,
396 const struct gfs2_glock_operations gfs2_iopen_glops
= {
397 .go_type
= LM_TYPE_IOPEN
,
400 const struct gfs2_glock_operations gfs2_flock_glops
= {
401 .go_type
= LM_TYPE_FLOCK
,
404 const struct gfs2_glock_operations gfs2_nondisk_glops
= {
405 .go_type
= LM_TYPE_NONDISK
,
408 const struct gfs2_glock_operations gfs2_quota_glops
= {
409 .go_demote_ok
= quota_go_demote_ok
,
410 .go_type
= LM_TYPE_QUOTA
,
413 const struct gfs2_glock_operations gfs2_journal_glops
= {
414 .go_type
= LM_TYPE_JOURNAL
,