make vfs & filesystems use failable copying
[minix3.git] / servers / vfs / vnode.c
bloba45b3d885dfc029041641c1be9fb910b56b4c6ae
1 /* This file contains the routines related to vnodes.
2 * The entry points are:
4 * get_vnode - increase counter and get details of an inode
5 * get_free_vnode - get a pointer to a free vnode obj
6 * find_vnode - find a vnode according to the FS endpoint and the inode num.
7 * dup_vnode - duplicate vnode (i.e. increase counter)
8 * put_vnode - drop vnode (i.e. decrease counter)
9 */
11 #include "fs.h"
12 #include "vnode.h"
13 #include "vmnt.h"
14 #include "file.h"
15 #include <minix/vfsif.h>
16 #include <assert.h>
18 /* Is vnode pointer reasonable? */
19 #if NDEBUG
20 #define SANEVP(v)
21 #define CHECKVN(v)
22 #define ASSERTVP(v)
23 #else
24 #define SANEVP(v) ((((v) >= &vnode[0] && (v) < &vnode[NR_VNODES])))
26 #define BADVP(v, f, l) printf("%s:%d: bad vp %p\n", f, l, v)
28 /* vp check that returns 0 for use in check_vrefs() */
29 #define CHECKVN(v) if(!SANEVP(v)) { \
30 BADVP(v, __FILE__, __LINE__); \
31 return 0; \
34 /* vp check that panics */
35 #define ASSERTVP(v) if(!SANEVP(v)) { \
36 BADVP(v, __FILE__, __LINE__); panic("bad vp"); }
37 #endif
39 #if LOCK_DEBUG
40 /*===========================================================================*
41 * check_vnode_locks_by_me *
42 *===========================================================================*/
43 void check_vnode_locks_by_me(struct fproc *rfp)
45 /* Check whether this thread still has locks held on vnodes */
46 struct vnode *vp;
48 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++) {
49 if (tll_locked_by_me(&vp->v_lock)) {
50 panic("Thread %d still holds vnode lock on vp %p call_nr=%d\n",
51 mthread_self(), vp, job_call_nr);
55 if (rfp->fp_vp_rdlocks != 0)
56 panic("Thread %d still holds read locks on a vnode (%d) call_nr=%d\n",
57 mthread_self(), rfp->fp_vp_rdlocks, job_call_nr);
59 #endif
61 /*===========================================================================*
62 * check_vnode_locks *
63 *===========================================================================*/
64 void check_vnode_locks()
66 struct vnode *vp;
67 int count = 0;
69 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++)
70 if (is_vnode_locked(vp)) {
71 count++;
74 if (count) panic("%d locked vnodes\n", count);
75 #if 0
76 printf("check_vnode_locks OK\n");
77 #endif
80 /*===========================================================================*
81 * get_free_vnode *
82 *===========================================================================*/
83 struct vnode *get_free_vnode()
85 /* Find a free vnode slot in the vnode table (it's not actually allocated) */
86 struct vnode *vp;
88 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
89 if (vp->v_ref_count == 0 && !is_vnode_locked(vp)) {
90 vp->v_uid = -1;
91 vp->v_gid = -1;
92 vp->v_sdev = NO_DEV;
93 vp->v_mapfs_e = NONE;
94 vp->v_mapfs_count = 0;
95 vp->v_mapinode_nr = 0;
96 return(vp);
100 err_code = ENFILE;
101 return(NULL);
105 /*===========================================================================*
106 * find_vnode *
107 *===========================================================================*/
108 struct vnode *find_vnode(int fs_e, ino_t ino)
110 /* Find a specified (FS endpoint and inode number) vnode in the
111 * vnode table */
112 struct vnode *vp;
114 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp)
115 if (vp->v_ref_count > 0 && vp->v_inode_nr == ino && vp->v_fs_e == fs_e)
116 return(vp);
118 return(NULL);
121 /*===========================================================================*
122 * is_vnode_locked *
123 *===========================================================================*/
124 int is_vnode_locked(struct vnode *vp)
126 /* Find out whether a thread holds a lock on this vnode or is trying to obtain
127 * a lock. */
128 ASSERTVP(vp);
130 return(tll_islocked(&vp->v_lock) || tll_haspendinglock(&vp->v_lock));
133 /*===========================================================================*
134 * init_vnodes *
135 *===========================================================================*/
136 void init_vnodes(void)
138 struct vnode *vp;
140 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
141 vp->v_fs_e = NONE;
142 vp->v_mapfs_e = NONE;
143 vp->v_inode_nr = 0;
144 vp->v_ref_count = 0;
145 vp->v_fs_count = 0;
146 vp->v_mapfs_count = 0;
147 tll_init(&vp->v_lock);
151 /*===========================================================================*
152 * lock_vnode *
153 *===========================================================================*/
154 int lock_vnode(struct vnode *vp, tll_access_t locktype)
156 int r;
158 ASSERTVP(vp);
160 r = tll_lock(&vp->v_lock, locktype);
162 #if LOCK_DEBUG
163 if (locktype == VNODE_READ) {
164 fp->fp_vp_rdlocks++;
166 #endif
168 if (r == EBUSY) return(r);
169 return(OK);
172 /*===========================================================================*
173 * unlock_vnode *
174 *===========================================================================*/
175 void unlock_vnode(struct vnode *vp)
177 #if LOCK_DEBUG
178 int i;
179 register struct vnode *rvp;
180 struct worker_thread *w;
181 #endif
182 ASSERTVP(vp);
184 #if LOCK_DEBUG
185 /* Decrease read-only lock counter when not locked as VNODE_OPCL or
186 * VNODE_WRITE */
187 if (!tll_locked_by_me(&vp->v_lock)) {
188 fp->fp_vp_rdlocks--;
191 for (i = 0; i < NR_VNODES; i++) {
192 rvp = &vnode[i];
194 w = rvp->v_lock.t_write;
195 assert(w != self);
196 while (w && w->w_next != NULL) {
197 w = w->w_next;
198 assert(w != self);
201 w = rvp->v_lock.t_serial;
202 assert(w != self);
203 while (w && w->w_next != NULL) {
204 w = w->w_next;
205 assert(w != self);
208 #endif
210 tll_unlock(&vp->v_lock);
213 /*===========================================================================*
214 * vnode *
215 *===========================================================================*/
216 void upgrade_vnode_lock(struct vnode *vp)
218 ASSERTVP(vp);
219 tll_upgrade(&vp->v_lock);
222 /*===========================================================================*
223 * dup_vnode *
224 *===========================================================================*/
225 void dup_vnode(struct vnode *vp)
227 /* dup_vnode() is called to increment the vnode and therefore the
228 * referred inode's counter.
230 ASSERTVP(vp);
231 vp->v_ref_count++;
235 /*===========================================================================*
236 * put_vnode *
237 *===========================================================================*/
238 void put_vnode(struct vnode *vp)
240 /* Decrease vnode's usage counter and decrease inode's usage counter in the
241 * corresponding FS process. Decreasing the fs_count each time we decrease the
242 * ref count would lead to poor performance. Instead, only decrease fs_count
243 * when the ref count hits zero. However, this could lead to fs_count to wrap.
244 * To prevent this, we drop the counter to 1 when the counter hits 256.
245 * We maintain fs_count as a sanity check to make sure VFS and the FS are in
246 * sync.
248 int r, lock_vp;
250 ASSERTVP(vp);
252 /* Lock vnode. It's quite possible this thread already has a lock on this
253 * vnode. That's no problem, because the reference counter will not decrease
254 * to zero in that case. However, if the counter does decrease to zero *and*
255 * is already locked, we have a consistency problem somewhere. */
256 lock_vp = lock_vnode(vp, VNODE_OPCL);
258 if (vp->v_ref_count > 1) {
259 /* Decrease counter */
260 vp->v_ref_count--;
261 if (vp->v_fs_count > 256)
262 vnode_clean_refs(vp);
263 if (lock_vp != EBUSY) unlock_vnode(vp);
264 return;
267 /* If we already had a lock, there is a consistency problem */
268 assert(lock_vp != EBUSY);
269 upgrade_vnode_lock(vp); /* Acquire exclusive access */
271 /* A vnode that's not in use can't be put back. */
272 if (vp->v_ref_count <= 0)
273 panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count);
275 /* fs_count should indicate that the file is in use. */
276 if (vp->v_fs_count <= 0)
277 panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count);
279 /* Tell FS we don't need this inode to be open anymore. */
280 r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count);
282 if (r != OK) {
283 printf("VFS: putnode failed: %d\n", r);
284 util_stacktrace();
287 /* This inode could've been mapped. If so, tell mapped FS to close it as
288 * well. If mapped onto same FS, this putnode is not needed. */
289 if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e)
290 req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count);
292 vp->v_fs_count = 0;
293 vp->v_ref_count = 0;
294 vp->v_mapfs_count = 0;
296 unlock_vnode(vp);
300 /*===========================================================================*
301 * vnode_clean_refs *
302 *===========================================================================*/
303 void vnode_clean_refs(struct vnode *vp)
305 /* Tell the underlying FS to drop all reference but one. */
307 if (vp == NULL) return;
308 if (vp->v_fs_count <= 1) return; /* Nothing to do */
310 /* Drop all references except one */
311 req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count - 1);
312 vp->v_fs_count = 1;