[XFS] move v_iocount from bhv_vnode to xfs_inode
[wrt350n-kernel.git] / fs / xfs / linux-2.6 / xfs_vnode.c
blobbde8d2e7f559d5caf087c97c44b4c296b9874209
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_vnodeops.h"
20 #include "xfs_bmap_btree.h"
21 #include "xfs_inode.h"
24 * And this gunk is needed for xfs_mount.h"
26 #include "xfs_log.h"
27 #include "xfs_trans.h"
28 #include "xfs_sb.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_inum.h"
31 #include "xfs_ag.h"
32 #include "xfs_mount.h"
34 uint64_t vn_generation; /* vnode generation number */
35 DEFINE_SPINLOCK(vnumber_lock);
38 * Dedicated vnode inactive/reclaim sync semaphores.
39 * Prime number of hash buckets since address is used as the key.
41 #define NVSYNC 37
42 #define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
43 static wait_queue_head_t vsync[NVSYNC];
45 void
46 vn_init(void)
48 int i;
50 for (i = 0; i < NVSYNC; i++)
51 init_waitqueue_head(&vsync[i]);
54 void
55 vn_iowait(
56 xfs_inode_t *ip)
58 wait_queue_head_t *wq = vptosync(ip);
60 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
63 void
64 vn_iowake(
65 xfs_inode_t *ip)
67 if (atomic_dec_and_test(&ip->i_iocount))
68 wake_up(vptosync(ip));
72 * Volume managers supporting multiple paths can send back ENODEV when the
73 * final path disappears. In this case continuing to fill the page cache
74 * with dirty data which cannot be written out is evil, so prevent that.
76 void
77 vn_ioerror(
78 xfs_inode_t *ip,
79 int error,
80 char *f,
81 int l)
83 bhv_vfs_t *vfsp = XFS_MTOVFS(ip->i_mount);
85 if (unlikely(error == -ENODEV))
86 bhv_vfs_force_shutdown(vfsp, SHUTDOWN_DEVICE_REQ, f, l);
89 bhv_vnode_t *
90 vn_initialize(
91 struct inode *inode)
93 bhv_vnode_t *vp = vn_from_inode(inode);
95 XFS_STATS_INC(vn_active);
96 XFS_STATS_INC(vn_alloc);
98 spin_lock(&vnumber_lock);
99 if (!++vn_generation) /* v_number shouldn't be zero */
100 vn_generation++;
101 vp->v_number = vn_generation;
102 spin_unlock(&vnumber_lock);
104 ASSERT(VN_CACHED(vp) == 0);
106 #ifdef XFS_VNODE_TRACE
107 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
108 #endif /* XFS_VNODE_TRACE */
110 vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address);
111 return vp;
115 * Revalidate the Linux inode from the vattr.
116 * Note: i_size _not_ updated; we must hold the inode
117 * semaphore when doing that - callers responsibility.
119 void
120 vn_revalidate_core(
121 bhv_vnode_t *vp,
122 bhv_vattr_t *vap)
124 struct inode *inode = vn_to_inode(vp);
126 inode->i_mode = vap->va_mode;
127 inode->i_nlink = vap->va_nlink;
128 inode->i_uid = vap->va_uid;
129 inode->i_gid = vap->va_gid;
130 inode->i_blocks = vap->va_nblocks;
131 inode->i_mtime = vap->va_mtime;
132 inode->i_ctime = vap->va_ctime;
133 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
134 inode->i_flags |= S_IMMUTABLE;
135 else
136 inode->i_flags &= ~S_IMMUTABLE;
137 if (vap->va_xflags & XFS_XFLAG_APPEND)
138 inode->i_flags |= S_APPEND;
139 else
140 inode->i_flags &= ~S_APPEND;
141 if (vap->va_xflags & XFS_XFLAG_SYNC)
142 inode->i_flags |= S_SYNC;
143 else
144 inode->i_flags &= ~S_SYNC;
145 if (vap->va_xflags & XFS_XFLAG_NOATIME)
146 inode->i_flags |= S_NOATIME;
147 else
148 inode->i_flags &= ~S_NOATIME;
152 * Revalidate the Linux inode from the vnode.
155 __vn_revalidate(
156 bhv_vnode_t *vp,
157 bhv_vattr_t *vattr)
159 int error;
161 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
162 vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS;
163 error = xfs_getattr(xfs_vtoi(vp), vattr, 0);
164 if (likely(!error)) {
165 vn_revalidate_core(vp, vattr);
166 xfs_iflags_clear(xfs_vtoi(vp), XFS_IMODIFIED);
168 return -error;
172 vn_revalidate(
173 bhv_vnode_t *vp)
175 bhv_vattr_t vattr;
177 return __vn_revalidate(vp, &vattr);
181 * Add a reference to a referenced vnode.
183 bhv_vnode_t *
184 vn_hold(
185 bhv_vnode_t *vp)
187 struct inode *inode;
189 XFS_STATS_INC(vn_hold);
191 inode = igrab(vn_to_inode(vp));
192 ASSERT(inode);
194 return vp;
197 #ifdef XFS_VNODE_TRACE
199 #define KTRACE_ENTER(vp, vk, s, line, ra) \
200 ktrace_enter( (vp)->v_trace, \
201 /* 0 */ (void *)(__psint_t)(vk), \
202 /* 1 */ (void *)(s), \
203 /* 2 */ (void *)(__psint_t) line, \
204 /* 3 */ (void *)(__psint_t)(vn_count(vp)), \
205 /* 4 */ (void *)(ra), \
206 /* 5 */ NULL, \
207 /* 6 */ (void *)(__psint_t)current_cpu(), \
208 /* 7 */ (void *)(__psint_t)current_pid(), \
209 /* 8 */ (void *)__return_address, \
210 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
213 * Vnode tracing code.
215 void
216 vn_trace_entry(bhv_vnode_t *vp, const char *func, inst_t *ra)
218 KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra);
221 void
222 vn_trace_exit(bhv_vnode_t *vp, const char *func, inst_t *ra)
224 KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra);
227 void
228 vn_trace_hold(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
230 KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra);
233 void
234 vn_trace_ref(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
236 KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra);
239 void
240 vn_trace_rele(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
242 KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra);
244 #endif /* XFS_VNODE_TRACE */