1 /* This file contains the routines related to vnodes.
2 * The entry points are:
4 * get_vnode - increase counter and get details of an inode
5 * get_free_vnode - get a pointer to a free vnode obj
6 * find_vnode - find a vnode according to the FS endpoint and the inode num.
7 * dup_vnode - duplicate vnode (i.e. increase counter)
8 * put_vnode - drop vnode (i.e. decrease counter)
15 #include <minix/vfsif.h>
18 /* Is vnode pointer reasonable? */
24 #define SANEVP(v) ((((v) >= &vnode[0] && (v) < &vnode[NR_VNODES])))
26 #define BADVP(v, f, l) printf("%s:%d: bad vp %p\n", f, l, v)
28 /* vp check that returns 0 for use in check_vrefs() */
29 #define CHECKVN(v) if(!SANEVP(v)) { \
30 BADVP(v, __FILE__, __LINE__); \
34 /* vp check that panics */
35 #define ASSERTVP(v) if(!SANEVP(v)) { \
36 BADVP(v, __FILE__, __LINE__); panic("bad vp"); }
40 /*===========================================================================*
41 * check_vnode_locks_by_me *
42 *===========================================================================*/
43 void check_vnode_locks_by_me(struct fproc
*rfp
)
45 /* Check whether this thread still has locks held on vnodes */
48 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; vp
++) {
49 if (tll_locked_by_me(&vp
->v_lock
)) {
50 panic("Thread %d still holds vnode lock on vp %p call_nr=%d\n",
51 mthread_self(), vp
, job_call_nr
);
55 if (rfp
->fp_vp_rdlocks
!= 0)
56 panic("Thread %d still holds read locks on a vnode (%d) call_nr=%d\n",
57 mthread_self(), rfp
->fp_vp_rdlocks
, job_call_nr
);
61 /*===========================================================================*
63 *===========================================================================*/
64 void check_vnode_locks()
69 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; vp
++)
70 if (is_vnode_locked(vp
)) {
74 if (count
) panic("%d locked vnodes\n", count
);
76 printf("check_vnode_locks OK\n");
80 /*===========================================================================*
82 *===========================================================================*/
83 struct vnode
*get_free_vnode()
85 /* Find a free vnode slot in the vnode table (it's not actually allocated) */
88 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; ++vp
) {
89 if (vp
->v_ref_count
== 0 && !is_vnode_locked(vp
)) {
94 vp
->v_mapfs_count
= 0;
95 vp
->v_mapinode_nr
= 0;
105 /*===========================================================================*
107 *===========================================================================*/
108 struct vnode
*find_vnode(int fs_e
, ino_t ino
)
110 /* Find a specified (FS endpoint and inode number) vnode in the
114 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; ++vp
)
115 if (vp
->v_ref_count
> 0 && vp
->v_inode_nr
== ino
&& vp
->v_fs_e
== fs_e
)
121 /*===========================================================================*
123 *===========================================================================*/
124 int is_vnode_locked(struct vnode
*vp
)
126 /* Find out whether a thread holds a lock on this vnode or is trying to obtain
130 return(tll_islocked(&vp
->v_lock
) || tll_haspendinglock(&vp
->v_lock
));
133 /*===========================================================================*
135 *===========================================================================*/
136 void init_vnodes(void)
140 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; ++vp
) {
142 vp
->v_mapfs_e
= NONE
;
146 vp
->v_mapfs_count
= 0;
147 tll_init(&vp
->v_lock
);
151 /*===========================================================================*
153 *===========================================================================*/
154 int lock_vnode(struct vnode
*vp
, tll_access_t locktype
)
160 r
= tll_lock(&vp
->v_lock
, locktype
);
163 if (locktype
== VNODE_READ
) {
168 if (r
== EBUSY
) return(r
);
172 /*===========================================================================*
174 *===========================================================================*/
175 void unlock_vnode(struct vnode
*vp
)
179 register struct vnode
*rvp
;
180 struct worker_thread
*w
;
185 /* Decrease read-only lock counter when not locked as VNODE_OPCL or
187 if (!tll_locked_by_me(&vp
->v_lock
)) {
191 for (i
= 0; i
< NR_VNODES
; i
++) {
194 w
= rvp
->v_lock
.t_write
;
196 while (w
&& w
->w_next
!= NULL
) {
201 w
= rvp
->v_lock
.t_serial
;
203 while (w
&& w
->w_next
!= NULL
) {
210 tll_unlock(&vp
->v_lock
);
213 /*===========================================================================*
215 *===========================================================================*/
216 void upgrade_vnode_lock(struct vnode
*vp
)
219 tll_upgrade(&vp
->v_lock
);
222 /*===========================================================================*
224 *===========================================================================*/
225 void dup_vnode(struct vnode
*vp
)
227 /* dup_vnode() is called to increment the vnode and therefore the
228 * referred inode's counter.
235 /*===========================================================================*
237 *===========================================================================*/
238 void put_vnode(struct vnode
*vp
)
240 /* Decrease vnode's usage counter and decrease inode's usage counter in the
241 * corresponding FS process. Decreasing the fs_count each time we decrease the
242 * ref count would lead to poor performance. Instead, only decrease fs_count
243 * when the ref count hits zero. However, this could lead to fs_count to wrap.
244 * To prevent this, we drop the counter to 1 when the counter hits 256.
245 * We maintain fs_count as a sanity check to make sure VFS and the FS are in
252 /* Lock vnode. It's quite possible this thread already has a lock on this
253 * vnode. That's no problem, because the reference counter will not decrease
254 * to zero in that case. However, if the counter does decrease to zero *and*
255 * is already locked, we have a consistency problem somewhere. */
256 lock_vp
= lock_vnode(vp
, VNODE_OPCL
);
258 if (vp
->v_ref_count
> 1) {
259 /* Decrease counter */
261 if (vp
->v_fs_count
> 256)
262 vnode_clean_refs(vp
);
263 if (lock_vp
!= EBUSY
) unlock_vnode(vp
);
267 /* If we already had a lock, there is a consistency problem */
268 assert(lock_vp
!= EBUSY
);
269 upgrade_vnode_lock(vp
); /* Acquire exclusive access */
271 /* A vnode that's not in use can't be put back. */
272 if (vp
->v_ref_count
<= 0)
273 panic("put_vnode failed: bad v_ref_count %d\n", vp
->v_ref_count
);
275 /* fs_count should indicate that the file is in use. */
276 if (vp
->v_fs_count
<= 0)
277 panic("put_vnode failed: bad v_fs_count %d\n", vp
->v_fs_count
);
279 /* Tell FS we don't need this inode to be open anymore. */
280 r
= req_putnode(vp
->v_fs_e
, vp
->v_inode_nr
, vp
->v_fs_count
);
283 printf("VFS: putnode failed: %d\n", r
);
287 /* This inode could've been mapped. If so, tell mapped FS to close it as
288 * well. If mapped onto same FS, this putnode is not needed. */
289 if (vp
->v_mapfs_e
!= NONE
&& vp
->v_mapfs_e
!= vp
->v_fs_e
)
290 req_putnode(vp
->v_mapfs_e
, vp
->v_mapinode_nr
, vp
->v_mapfs_count
);
294 vp
->v_mapfs_count
= 0;
300 /*===========================================================================*
302 *===========================================================================*/
303 void vnode_clean_refs(struct vnode
*vp
)
305 /* Tell the underlying FS to drop all reference but one. */
307 if (vp
== NULL
) return;
308 if (vp
->v_fs_count
<= 1) return; /* Nothing to do */
310 /* Drop all references except one */
311 req_putnode(vp
->v_fs_e
, vp
->v_inode_nr
, vp
->v_fs_count
- 1);