mm-only debug patch...
[mmotm.git] / fs / gfs2 / glops.c
blob78554acc060562f66de530363a623616bb3c9732
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/bio.h>
16 #include <linux/posix_acl.h>
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "bmap.h"
21 #include "glock.h"
22 #include "glops.h"
23 #include "inode.h"
24 #include "log.h"
25 #include "meta_io.h"
26 #include "recovery.h"
27 #include "rgrp.h"
28 #include "util.h"
29 #include "trans.h"
31 /**
32 * ail_empty_gl - remove all buffers for a given lock from the AIL
33 * @gl: the glock
35 * None of the buffers should be dirty, locked, or pinned.
38 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
40 struct gfs2_sbd *sdp = gl->gl_sbd;
41 struct list_head *head = &gl->gl_ail_list;
42 struct gfs2_bufdata *bd;
43 struct buffer_head *bh;
44 struct gfs2_trans tr;
46 memset(&tr, 0, sizeof(tr));
47 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
49 if (!tr.tr_revokes)
50 return;
52 /* A shortened, inline version of gfs2_trans_begin() */
53 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
54 tr.tr_ip = (unsigned long)__builtin_return_address(0);
55 INIT_LIST_HEAD(&tr.tr_list_buf);
56 gfs2_log_reserve(sdp, tr.tr_reserved);
57 BUG_ON(current->journal_info);
58 current->journal_info = &tr;
60 gfs2_log_lock(sdp);
61 while (!list_empty(head)) {
62 bd = list_entry(head->next, struct gfs2_bufdata,
63 bd_ail_gl_list);
64 bh = bd->bd_bh;
65 gfs2_remove_from_ail(bd);
66 bd->bd_bh = NULL;
67 bh->b_private = NULL;
68 bd->bd_blkno = bh->b_blocknr;
69 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
70 gfs2_trans_add_revoke(sdp, bd);
72 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
73 gfs2_log_unlock(sdp);
75 gfs2_trans_end(sdp);
76 gfs2_log_flush(sdp, NULL);
79 /**
80 * rgrp_go_sync - sync out the metadata for this glock
81 * @gl: the glock
83 * Called when demoting or unlocking an EX glock. We must flush
84 * to disk all dirty buffers/pages relating to this glock, and must not
85 * not return to caller to demote/unlock the glock until I/O is complete.
88 static void rgrp_go_sync(struct gfs2_glock *gl)
90 struct address_space *metamapping = gl->gl_aspace->i_mapping;
91 int error;
93 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
94 return;
95 BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
97 gfs2_log_flush(gl->gl_sbd, gl);
98 filemap_fdatawrite(metamapping);
99 error = filemap_fdatawait(metamapping);
100 mapping_set_error(metamapping, error);
101 gfs2_ail_empty_gl(gl);
105 * rgrp_go_inval - invalidate the metadata for this glock
106 * @gl: the glock
107 * @flags:
109 * We never used LM_ST_DEFERRED with resource groups, so that we
110 * should always see the metadata flag set here.
114 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
116 struct address_space *mapping = gl->gl_aspace->i_mapping;
118 BUG_ON(!(flags & DIO_METADATA));
119 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
120 truncate_inode_pages(mapping, 0);
122 if (gl->gl_object) {
123 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
124 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
129 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
130 * @gl: the glock protecting the inode
134 static void inode_go_sync(struct gfs2_glock *gl)
136 struct gfs2_inode *ip = gl->gl_object;
137 struct address_space *metamapping = gl->gl_aspace->i_mapping;
138 int error;
140 if (ip && !S_ISREG(ip->i_inode.i_mode))
141 ip = NULL;
142 if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
143 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
144 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
145 return;
147 BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
149 gfs2_log_flush(gl->gl_sbd, gl);
150 filemap_fdatawrite(metamapping);
151 if (ip) {
152 struct address_space *mapping = ip->i_inode.i_mapping;
153 filemap_fdatawrite(mapping);
154 error = filemap_fdatawait(mapping);
155 mapping_set_error(mapping, error);
157 error = filemap_fdatawait(metamapping);
158 mapping_set_error(metamapping, error);
159 gfs2_ail_empty_gl(gl);
161 * Writeback of the data mapping may cause the dirty flag to be set
162 * so we have to clear it again here.
164 smp_mb__before_clear_bit();
165 clear_bit(GLF_DIRTY, &gl->gl_flags);
169 * inode_go_inval - prepare a inode glock to be released
170 * @gl: the glock
171 * @flags:
173 * Normally we invlidate everything, but if we are moving into
174 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
175 * can keep hold of the metadata, since it won't have changed.
179 static void inode_go_inval(struct gfs2_glock *gl, int flags)
181 struct gfs2_inode *ip = gl->gl_object;
183 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
185 if (flags & DIO_METADATA) {
186 struct address_space *mapping = gl->gl_aspace->i_mapping;
187 truncate_inode_pages(mapping, 0);
188 if (ip) {
189 set_bit(GIF_INVALID, &ip->i_flags);
190 forget_all_cached_acls(&ip->i_inode);
194 if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
195 gl->gl_sbd->sd_rindex_uptodate = 0;
196 if (ip && S_ISREG(ip->i_inode.i_mode))
197 truncate_inode_pages(ip->i_inode.i_mapping, 0);
201 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
202 * @gl: the glock
204 * Returns: 1 if it's ok
207 static int inode_go_demote_ok(const struct gfs2_glock *gl)
209 struct gfs2_sbd *sdp = gl->gl_sbd;
210 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
211 return 0;
212 return 1;
216 * inode_go_lock - operation done after an inode lock is locked by a process
217 * @gl: the glock
218 * @flags:
220 * Returns: errno
223 static int inode_go_lock(struct gfs2_holder *gh)
225 struct gfs2_glock *gl = gh->gh_gl;
226 struct gfs2_sbd *sdp = gl->gl_sbd;
227 struct gfs2_inode *ip = gl->gl_object;
228 int error = 0;
230 if (!ip || (gh->gh_flags & GL_SKIP))
231 return 0;
233 if (test_bit(GIF_INVALID, &ip->i_flags)) {
234 error = gfs2_inode_refresh(ip);
235 if (error)
236 return error;
239 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
240 (gl->gl_state == LM_ST_EXCLUSIVE) &&
241 (gh->gh_state == LM_ST_EXCLUSIVE)) {
242 spin_lock(&sdp->sd_trunc_lock);
243 if (list_empty(&ip->i_trunc_list))
244 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
245 spin_unlock(&sdp->sd_trunc_lock);
246 wake_up(&sdp->sd_quota_wait);
247 return 1;
250 return error;
254 * inode_go_dump - print information about an inode
255 * @seq: The iterator
256 * @ip: the inode
258 * Returns: 0 on success, -ENOBUFS when we run out of space
261 static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
263 const struct gfs2_inode *ip = gl->gl_object;
264 if (ip == NULL)
265 return 0;
266 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n",
267 (unsigned long long)ip->i_no_formal_ino,
268 (unsigned long long)ip->i_no_addr,
269 IF2DT(ip->i_inode.i_mode), ip->i_flags,
270 (unsigned int)ip->i_diskflags,
271 (unsigned long long)ip->i_inode.i_size,
272 (unsigned long long)ip->i_disksize);
273 return 0;
277 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
278 * @gl: the glock
280 * Returns: 1 if it's ok
283 static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
285 return !gl->gl_aspace->i_mapping->nrpages;
289 * rgrp_go_lock - operation done after an rgrp lock is locked by
290 * a first holder on this node.
291 * @gl: the glock
292 * @flags:
294 * Returns: errno
297 static int rgrp_go_lock(struct gfs2_holder *gh)
299 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
303 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
304 * a last holder on this node.
305 * @gl: the glock
306 * @flags:
310 static void rgrp_go_unlock(struct gfs2_holder *gh)
312 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
316 * trans_go_sync - promote/demote the transaction glock
317 * @gl: the glock
318 * @state: the requested state
319 * @flags:
323 static void trans_go_sync(struct gfs2_glock *gl)
325 struct gfs2_sbd *sdp = gl->gl_sbd;
327 if (gl->gl_state != LM_ST_UNLOCKED &&
328 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
329 flush_workqueue(gfs2_delete_workqueue);
330 gfs2_meta_syncfs(sdp);
331 gfs2_log_shutdown(sdp);
336 * trans_go_xmote_bh - After promoting/demoting the transaction glock
337 * @gl: the glock
341 static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
343 struct gfs2_sbd *sdp = gl->gl_sbd;
344 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
345 struct gfs2_glock *j_gl = ip->i_gl;
346 struct gfs2_log_header_host head;
347 int error;
349 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
350 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
352 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
353 if (error)
354 gfs2_consist(sdp);
355 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
356 gfs2_consist(sdp);
358 /* Initialize some head of the log stuff */
359 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
360 sdp->sd_log_sequence = head.lh_sequence + 1;
361 gfs2_log_pointers_init(sdp, head.lh_blkno);
364 return 0;
368 * trans_go_demote_ok
369 * @gl: the glock
371 * Always returns 0
374 static int trans_go_demote_ok(const struct gfs2_glock *gl)
376 return 0;
380 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
381 * @gl: the glock
383 * gl_spin lock is held while calling this
385 static void iopen_go_callback(struct gfs2_glock *gl)
387 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
389 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
390 gl->gl_state == LM_ST_SHARED &&
391 ip && test_bit(GIF_USER, &ip->i_flags)) {
392 gfs2_glock_hold(gl);
393 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
394 gfs2_glock_put_nolock(gl);
398 const struct gfs2_glock_operations gfs2_meta_glops = {
399 .go_type = LM_TYPE_META,
402 const struct gfs2_glock_operations gfs2_inode_glops = {
403 .go_xmote_th = inode_go_sync,
404 .go_inval = inode_go_inval,
405 .go_demote_ok = inode_go_demote_ok,
406 .go_lock = inode_go_lock,
407 .go_dump = inode_go_dump,
408 .go_type = LM_TYPE_INODE,
409 .go_min_hold_time = HZ / 5,
412 const struct gfs2_glock_operations gfs2_rgrp_glops = {
413 .go_xmote_th = rgrp_go_sync,
414 .go_inval = rgrp_go_inval,
415 .go_demote_ok = rgrp_go_demote_ok,
416 .go_lock = rgrp_go_lock,
417 .go_unlock = rgrp_go_unlock,
418 .go_dump = gfs2_rgrp_dump,
419 .go_type = LM_TYPE_RGRP,
420 .go_min_hold_time = HZ / 5,
423 const struct gfs2_glock_operations gfs2_trans_glops = {
424 .go_xmote_th = trans_go_sync,
425 .go_xmote_bh = trans_go_xmote_bh,
426 .go_demote_ok = trans_go_demote_ok,
427 .go_type = LM_TYPE_NONDISK,
430 const struct gfs2_glock_operations gfs2_iopen_glops = {
431 .go_type = LM_TYPE_IOPEN,
432 .go_callback = iopen_go_callback,
435 const struct gfs2_glock_operations gfs2_flock_glops = {
436 .go_type = LM_TYPE_FLOCK,
439 const struct gfs2_glock_operations gfs2_nondisk_glops = {
440 .go_type = LM_TYPE_NONDISK,
443 const struct gfs2_glock_operations gfs2_quota_glops = {
444 .go_type = LM_TYPE_QUOTA,
447 const struct gfs2_glock_operations gfs2_journal_glops = {
448 .go_type = LM_TYPE_JOURNAL,
451 const struct gfs2_glock_operations *gfs2_glops_list[] = {
452 [LM_TYPE_META] = &gfs2_meta_glops,
453 [LM_TYPE_INODE] = &gfs2_inode_glops,
454 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
455 [LM_TYPE_NONDISK] = &gfs2_trans_glops,
456 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
457 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
458 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
459 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
460 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,