1 /* This file contains the routines related to vnodes.
2 * The entry points are:
4 * get_vnode - increase counter and get details of an inode
5 * get_free_vnode - get a pointer to a free vnode obj
6 * find_vnode - find a vnode according to the FS endpoint and the inode num.
7 * dup_vnode - duplicate vnode (i.e. increase counter)
8 * put_vnode - drop vnode (i.e. decrease counter)
17 #include <minix/vfsif.h>
20 /* Is vnode pointer reasonable? */
26 #define SANEVP(v) ((((v) >= &vnode[0] && (v) < &vnode[NR_VNODES])))
28 #define BADVP(v, f, l) printf("%s:%d: bad vp %p\n", f, l, v)
30 /* vp check that returns 0 for use in check_vrefs() */
31 #define CHECKVN(v) if(!SANEVP(v)) { \
32 BADVP(v, __FILE__, __LINE__); \
36 /* vp check that panics */
37 #define ASSERTVP(v) if(!SANEVP(v)) { \
38 BADVP(v, __FILE__, __LINE__); panic("bad vp"); }
42 /*===========================================================================*
43 * check_vnode_locks_by_me *
44 *===========================================================================*/
45 void check_vnode_locks_by_me(struct fproc
*rfp
)
47 /* Check whether this thread still has locks held on vnodes */
50 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; vp
++) {
51 if (tll_locked_by_me(&vp
->v_lock
)) {
52 panic("Thread %d still holds vnode lock on vp %x call_nr=%d\n",
53 mthread_self(), vp
, job_call_nr
);
57 if (rfp
->fp_vp_rdlocks
!= 0)
58 panic("Thread %d still holds read locks on a vnode (%d) call_nr=%d\n",
59 mthread_self(), rfp
->fp_vp_rdlocks
, job_call_nr
);
63 /*===========================================================================*
65 *===========================================================================*/
66 void check_vnode_locks()
71 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; vp
++)
72 if (is_vnode_locked(vp
)) {
76 if (count
) panic("%d locked vnodes\n", count
);
78 printf("check_vnode_locks OK\n");
82 /*===========================================================================*
84 *===========================================================================*/
85 struct vnode
*get_free_vnode()
87 /* Find a free vnode slot in the vnode table (it's not actually allocated) */
90 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; ++vp
) {
91 if (vp
->v_ref_count
== 0 && !is_vnode_locked(vp
)) {
96 vp
->v_mapfs_count
= 0;
97 vp
->v_mapinode_nr
= 0;
107 /*===========================================================================*
109 *===========================================================================*/
110 struct vnode
*find_vnode(int fs_e
, ino_t ino
)
112 /* Find a specified (FS endpoint and inode number) vnode in the
116 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; ++vp
)
117 if (vp
->v_ref_count
> 0 && vp
->v_inode_nr
== ino
&& vp
->v_fs_e
== fs_e
)
123 /*===========================================================================*
125 *===========================================================================*/
126 int is_vnode_locked(struct vnode
*vp
)
128 /* Find out whether a thread holds a lock on this vnode or is trying to obtain
132 return(tll_islocked(&vp
->v_lock
) || tll_haspendinglock(&vp
->v_lock
));
135 /*===========================================================================*
137 *===========================================================================*/
138 void init_vnodes(void)
142 for (vp
= &vnode
[0]; vp
< &vnode
[NR_VNODES
]; ++vp
) {
144 vp
->v_mapfs_e
= NONE
;
148 vp
->v_mapfs_count
= 0;
149 tll_init(&vp
->v_lock
);
153 /*===========================================================================*
155 *===========================================================================*/
156 int lock_vnode(struct vnode
*vp
, tll_access_t locktype
)
162 r
= tll_lock(&vp
->v_lock
, locktype
);
165 if (locktype
== VNODE_READ
) {
170 if (r
== EBUSY
) return(r
);
174 /*===========================================================================*
176 *===========================================================================*/
177 void unlock_vnode(struct vnode
*vp
)
181 register struct vnode
*rvp
;
182 struct worker_thread
*w
;
187 /* Decrease read-only lock counter when not locked as VNODE_OPCL or
189 if (!tll_locked_by_me(&vp
->v_lock
)) {
193 for (i
= 0; i
< NR_VNODES
; i
++) {
196 w
= rvp
->v_lock
.t_write
;
198 while (w
&& w
->w_next
!= NULL
) {
203 w
= rvp
->v_lock
.t_serial
;
205 while (w
&& w
->w_next
!= NULL
) {
212 tll_unlock(&vp
->v_lock
);
215 /*===========================================================================*
217 *===========================================================================*/
218 void upgrade_vnode_lock(struct vnode
*vp
)
221 tll_upgrade(&vp
->v_lock
);
224 /*===========================================================================*
226 *===========================================================================*/
227 void dup_vnode(struct vnode
*vp
)
229 /* dup_vnode() is called to increment the vnode and therefore the
230 * referred inode's counter.
237 /*===========================================================================*
239 *===========================================================================*/
240 void put_vnode(struct vnode
*vp
)
242 /* Decrease vnode's usage counter and decrease inode's usage counter in the
243 * corresponding FS process. Decreasing the fs_count each time we decrease the
244 * ref count would lead to poor performance. Instead, only decrease fs_count
245 * when the ref count hits zero. However, this could lead to fs_count to wrap.
246 * To prevent this, we drop the counter to 1 when the counter hits 256.
247 * We maintain fs_count as a sanity check to make sure VFS and the FS are in
254 /* Lock vnode. It's quite possible this thread already has a lock on this
255 * vnode. That's no problem, because the reference counter will not decrease
256 * to zero in that case. However, if the counter does decrease to zero *and*
257 * is already locked, we have a consistency problem somewhere. */
258 lock_vp
= lock_vnode(vp
, VNODE_OPCL
);
260 if (vp
->v_ref_count
> 1) {
261 /* Decrease counter */
263 if (vp
->v_fs_count
> 256)
264 vnode_clean_refs(vp
);
265 if (lock_vp
!= EBUSY
) unlock_vnode(vp
);
269 /* If we already had a lock, there is a consistency problem */
270 assert(lock_vp
!= EBUSY
);
271 upgrade_vnode_lock(vp
); /* Acquire exclusive access */
273 /* A vnode that's not in use can't be put back. */
274 if (vp
->v_ref_count
<= 0)
275 panic("put_vnode failed: bad v_ref_count %d\n", vp
->v_ref_count
);
277 /* fs_count should indicate that the file is in use. */
278 if (vp
->v_fs_count
<= 0)
279 panic("put_vnode failed: bad v_fs_count %d\n", vp
->v_fs_count
);
281 /* Tell FS we don't need this inode to be open anymore. */
282 r
= req_putnode(vp
->v_fs_e
, vp
->v_inode_nr
, vp
->v_fs_count
);
285 printf("VFS: putnode failed: %d\n", r
);
289 /* This inode could've been mapped. If so, tell mapped FS to close it as
290 * well. If mapped onto same FS, this putnode is not needed. */
291 if (vp
->v_mapfs_e
!= NONE
&& vp
->v_mapfs_e
!= vp
->v_fs_e
)
292 req_putnode(vp
->v_mapfs_e
, vp
->v_mapinode_nr
, vp
->v_mapfs_count
);
296 vp
->v_mapfs_count
= 0;
302 /*===========================================================================*
304 *===========================================================================*/
305 void vnode_clean_refs(struct vnode
*vp
)
307 /* Tell the underlying FS to drop all reference but one. */
309 if (vp
== NULL
) return;
310 if (vp
->v_fs_count
<= 1) return; /* Nothing to do */
312 /* Drop all references except one */
313 req_putnode(vp
->v_fs_e
, vp
->v_inode_nr
, vp
->v_fs_count
- 1);