tar: use utime() to restore timestamps
[minix.git] / servers / vfs / vnode.c
blob0227117487748ee00afa8d78a0908936a9107578
1 /* This file contains the routines related to vnodes.
2 * The entry points are:
4 * get_vnode - increase counter and get details of an inode
5 * get_free_vnode - get a pointer to a free vnode obj
6 * find_vnode - find a vnode according to the FS endpoint and the inode num.
7 * dup_vnode - duplicate vnode (i.e. increase counter)
8 * put_vnode - drop vnode (i.e. decrease counter)
9 */
11 #include "fs.h"
12 #include "threads.h"
13 #include "vnode.h"
14 #include "vmnt.h"
15 #include "fproc.h"
16 #include "file.h"
17 #include <minix/vfsif.h>
18 #include <assert.h>
20 /* Is vnode pointer reasonable? */
21 #if NDEBUG
22 #define SANEVP(v)
23 #define CHECKVN(v)
24 #define ASSERTVP(v)
25 #else
26 #define SANEVP(v) ((((v) >= &vnode[0] && (v) < &vnode[NR_VNODES])))
28 #define BADVP(v, f, l) printf("%s:%d: bad vp %p\n", f, l, v)
30 /* vp check that returns 0 for use in check_vrefs() */
31 #define CHECKVN(v) if(!SANEVP(v)) { \
32 BADVP(v, __FILE__, __LINE__); \
33 return 0; \
36 /* vp check that panics */
37 #define ASSERTVP(v) if(!SANEVP(v)) { \
38 BADVP(v, __FILE__, __LINE__); panic("bad vp"); }
39 #endif
41 #if LOCK_DEBUG
42 /*===========================================================================*
43 * check_vnode_locks_by_me *
44 *===========================================================================*/
45 void check_vnode_locks_by_me(struct fproc *rfp)
47 /* Check whether this thread still has locks held on vnodes */
48 struct vnode *vp;
50 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++) {
51 if (tll_locked_by_me(&vp->v_lock)) {
52 panic("Thread %d still holds vnode lock on vp %x call_nr=%d\n",
53 mthread_self(), vp, job_call_nr);
57 if (rfp->fp_vp_rdlocks != 0)
58 panic("Thread %d still holds read locks on a vnode (%d) call_nr=%d\n",
59 mthread_self(), rfp->fp_vp_rdlocks, job_call_nr);
61 #endif
63 /*===========================================================================*
64 * check_vnode_locks *
65 *===========================================================================*/
66 void check_vnode_locks()
68 struct vnode *vp;
69 int count = 0;
71 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++)
72 if (is_vnode_locked(vp)) {
73 count++;
76 if (count) panic("%d locked vnodes\n", count);
77 #if 0
78 printf("check_vnode_locks OK\n");
79 #endif
82 /*===========================================================================*
83 * get_free_vnode *
84 *===========================================================================*/
85 struct vnode *get_free_vnode()
87 /* Find a free vnode slot in the vnode table (it's not actually allocated) */
88 struct vnode *vp;
90 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
91 if (vp->v_ref_count == 0 && !is_vnode_locked(vp)) {
92 vp->v_uid = -1;
93 vp->v_gid = -1;
94 vp->v_sdev = NO_DEV;
95 vp->v_mapfs_e = NONE;
96 vp->v_mapfs_count = 0;
97 vp->v_mapinode_nr = 0;
98 return(vp);
102 err_code = ENFILE;
103 return(NULL);
107 /*===========================================================================*
108 * find_vnode *
109 *===========================================================================*/
110 struct vnode *find_vnode(int fs_e, ino_t ino)
112 /* Find a specified (FS endpoint and inode number) vnode in the
113 * vnode table */
114 struct vnode *vp;
116 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp)
117 if (vp->v_ref_count > 0 && vp->v_inode_nr == ino && vp->v_fs_e == fs_e)
118 return(vp);
120 return(NULL);
123 /*===========================================================================*
124 * is_vnode_locked *
125 *===========================================================================*/
126 int is_vnode_locked(struct vnode *vp)
128 /* Find out whether a thread holds a lock on this vnode or is trying to obtain
129 * a lock. */
130 ASSERTVP(vp);
132 return(tll_islocked(&vp->v_lock) || tll_haspendinglock(&vp->v_lock));
135 /*===========================================================================*
136 * init_vnodes *
137 *===========================================================================*/
138 void init_vnodes(void)
140 struct vnode *vp;
142 for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
143 vp->v_fs_e = NONE;
144 vp->v_mapfs_e = NONE;
145 vp->v_inode_nr = 0;
146 vp->v_ref_count = 0;
147 vp->v_fs_count = 0;
148 vp->v_mapfs_count = 0;
149 tll_init(&vp->v_lock);
153 /*===========================================================================*
154 * lock_vnode *
155 *===========================================================================*/
156 int lock_vnode(struct vnode *vp, tll_access_t locktype)
158 int r;
160 ASSERTVP(vp);
162 r = tll_lock(&vp->v_lock, locktype);
164 #if LOCK_DEBUG
165 if (locktype == VNODE_READ) {
166 fp->fp_vp_rdlocks++;
168 #endif
170 if (r == EBUSY) return(r);
171 return(OK);
174 /*===========================================================================*
175 * unlock_vnode *
176 *===========================================================================*/
177 void unlock_vnode(struct vnode *vp)
179 #if LOCK_DEBUG
180 int i;
181 register struct vnode *rvp;
182 struct worker_thread *w;
183 #endif
184 ASSERTVP(vp);
186 #if LOCK_DEBUG
187 /* Decrease read-only lock counter when not locked as VNODE_OPCL or
188 * VNODE_WRITE */
189 if (!tll_locked_by_me(&vp->v_lock)) {
190 fp->fp_vp_rdlocks--;
193 for (i = 0; i < NR_VNODES; i++) {
194 rvp = &vnode[i];
196 w = rvp->v_lock.t_write;
197 assert(w != self);
198 while (w && w->w_next != NULL) {
199 w = w->w_next;
200 assert(w != self);
203 w = rvp->v_lock.t_serial;
204 assert(w != self);
205 while (w && w->w_next != NULL) {
206 w = w->w_next;
207 assert(w != self);
210 #endif
212 tll_unlock(&vp->v_lock);
215 /*===========================================================================*
216 * vnode *
217 *===========================================================================*/
218 void upgrade_vnode_lock(struct vnode *vp)
220 ASSERTVP(vp);
221 tll_upgrade(&vp->v_lock);
224 /*===========================================================================*
225 * dup_vnode *
226 *===========================================================================*/
227 void dup_vnode(struct vnode *vp)
229 /* dup_vnode() is called to increment the vnode and therefore the
230 * referred inode's counter.
232 ASSERTVP(vp);
233 vp->v_ref_count++;
237 /*===========================================================================*
238 * put_vnode *
239 *===========================================================================*/
240 void put_vnode(struct vnode *vp)
242 /* Decrease vnode's usage counter and decrease inode's usage counter in the
243 * corresponding FS process. Decreasing the fs_count each time we decrease the
244 * ref count would lead to poor performance. Instead, only decrease fs_count
245 * when the ref count hits zero. However, this could lead to fs_count to wrap.
246 * To prevent this, we drop the counter to 1 when the counter hits 256.
247 * We maintain fs_count as a sanity check to make sure VFS and the FS are in
248 * sync.
250 int r, lock_vp;
252 ASSERTVP(vp);
254 /* Lock vnode. It's quite possible this thread already has a lock on this
255 * vnode. That's no problem, because the reference counter will not decrease
256 * to zero in that case. However, if the counter does decrease to zero *and*
257 * is already locked, we have a consistency problem somewhere. */
258 lock_vp = lock_vnode(vp, VNODE_OPCL);
260 if (vp->v_ref_count > 1) {
261 /* Decrease counter */
262 vp->v_ref_count--;
263 if (vp->v_fs_count > 256)
264 vnode_clean_refs(vp);
265 if (lock_vp != EBUSY) unlock_vnode(vp);
266 return;
269 /* If we already had a lock, there is a consistency problem */
270 assert(lock_vp != EBUSY);
271 upgrade_vnode_lock(vp); /* Acquire exclusive access */
273 /* A vnode that's not in use can't be put back. */
274 if (vp->v_ref_count <= 0)
275 panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count);
277 /* fs_count should indicate that the file is in use. */
278 if (vp->v_fs_count <= 0)
279 panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count);
281 /* Tell FS we don't need this inode to be open anymore. */
282 r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count);
284 if (r != OK) {
285 printf("VFS: putnode failed: %d\n", r);
286 util_stacktrace();
289 /* This inode could've been mapped. If so, tell mapped FS to close it as
290 * well. If mapped onto same FS, this putnode is not needed. */
291 if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e)
292 req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count);
294 vp->v_fs_count = 0;
295 vp->v_ref_count = 0;
296 vp->v_mapfs_count = 0;
298 unlock_vnode(vp);
302 /*===========================================================================*
303 * vnode_clean_refs *
304 *===========================================================================*/
305 void vnode_clean_refs(struct vnode *vp)
307 /* Tell the underlying FS to drop all reference but one. */
309 if (vp == NULL) return;
310 if (vp->v_fs_count <= 1) return; /* Nothing to do */
312 /* Drop all references except one */
313 req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count - 1);
314 vp->v_fs_count = 1;