tar: use utime() to restore timestamps
[minix.git] / servers / vfs / filedes.c
blobb0a19911e228080aca22fe694102a594e0557064
1 /* This file contains the procedures that manipulate file descriptors.
3 * The entry points into this file are
4 * get_fd: look for free file descriptor and free filp slots
5 * get_filp: look up the filp entry for a given file descriptor
6 * find_filp: find a filp slot that points to a given vnode
7 * inval_filp: invalidate a filp and associated fd's, only let close()
8 * happen on it
9 * do_verify_fd: verify whether the given file descriptor is valid for
10 * the given endpoint.
11 * do_set_filp: marks a filp as in-flight.
12 * do_copy_filp: copies a filp to another endpoint.
13 * do_put_filp: marks a filp as not in-flight anymore.
14 * do_cancel_fd: cancel the transaction when something goes wrong for
15 * the receiver.
18 #include <sys/select.h>
19 #include <minix/callnr.h>
20 #include <minix/u64.h>
21 #include <assert.h>
22 #include <sys/stat.h>
23 #include "fs.h"
24 #include "file.h"
25 #include "fproc.h"
26 #include "vnode.h"
29 static filp_id_t verify_fd(endpoint_t ep, int fd);
31 #if LOCK_DEBUG
32 /*===========================================================================*
33 * check_filp_locks *
34 *===========================================================================*/
35 void check_filp_locks_by_me(void)
37 /* Check whether this thread still has filp locks held */
38 struct filp *f;
39 int r;
41 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
42 r = mutex_trylock(&f->filp_lock);
43 if (r == -EDEADLK)
44 panic("Thread %d still holds filp lock on filp %p call_nr=%d\n",
45 mthread_self(), f, job_call_nr);
46 else if (r == 0) {
47 /* We just obtained the lock, release it */
48 mutex_unlock(&f->filp_lock);
52 #endif
54 /*===========================================================================*
55 * check_filp_locks *
56 *===========================================================================*/
57 void check_filp_locks(void)
59 struct filp *f;
60 int r, count = 0;
62 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
63 r = mutex_trylock(&f->filp_lock);
64 if (r == -EBUSY) {
65 /* Mutex is still locked */
66 count++;
67 } else if (r == 0) {
68 /* We just obtained a lock, don't want it */
69 mutex_unlock(&f->filp_lock);
70 } else
71 panic("filp_lock weird state");
73 if (count) panic("locked filps");
74 #if 0
75 else printf("check_filp_locks OK\n");
76 #endif
79 /*===========================================================================*
80 * do_filp_gc *
81 *===========================================================================*/
82 void *do_filp_gc(void *UNUSED(arg))
84 struct filp *f;
85 struct vnode *vp;
87 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
88 if (!(f->filp_state & FS_INVALIDATED)) continue;
89 assert(f->filp_vno != NULL);
90 vp = f->filp_vno;
92 /* Synchronize with worker thread that might hold a lock on the vp */
93 lock_vnode(vp, VNODE_OPCL);
94 unlock_vnode(vp);
96 /* If garbage collection was invoked due to a failed device open
97 * request, then common_open has already cleaned up and we have
98 * nothing to do.
100 if (!(f->filp_state & FS_INVALIDATED)) {
101 continue;
104 /* If garbage collection was invoked due to a failed device close
105 * request, the close_filp has already cleaned up and we have nothing
106 * to do.
108 if (f->filp_mode != FILP_CLOSED) {
109 assert(f->filp_count == 0);
110 f->filp_count = 1; /* So lock_filp and close_filp will do
111 * their job */
112 lock_filp(f, VNODE_READ);
113 close_filp(f);
116 f->filp_state &= ~FS_INVALIDATED;
119 thread_cleanup(NULL);
120 return(NULL);
123 /*===========================================================================*
124 * init_filps *
125 *===========================================================================*/
126 void init_filps(void)
128 /* Initialize filps */
129 struct filp *f;
131 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
132 if (mutex_init(&f->filp_lock, NULL) != 0)
133 panic("Failed to initialize filp mutex");
138 /*===========================================================================*
139 * get_fd *
140 *===========================================================================*/
141 int get_fd(int start, mode_t bits, int *k, struct filp **fpt)
143 /* Look for a free file descriptor and a free filp slot. Fill in the mode word
144 * in the latter, but don't claim either one yet, since the open() or creat()
145 * may yet fail.
148 register struct filp *f;
149 register int i;
151 /* Search the fproc fp_filp table for a free file descriptor. */
152 for (i = start; i < OPEN_MAX; i++) {
153 if (fp->fp_filp[i] == NULL && !FD_ISSET(i, &fp->fp_filp_inuse)) {
154 /* A file descriptor has been located. */
155 *k = i;
156 break;
160 /* Check to see if a file descriptor has been found. */
161 if (i >= OPEN_MAX) return(EMFILE);
163 /* If we don't care about a filp, return now */
164 if (fpt == NULL) return(OK);
166 /* Now that a file descriptor has been found, look for a free filp slot. */
167 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
168 assert(f->filp_count >= 0);
169 if (f->filp_count == 0 && mutex_trylock(&f->filp_lock) == 0) {
170 f->filp_mode = bits;
171 f->filp_pos = cvu64(0);
172 f->filp_selectors = 0;
173 f->filp_select_ops = 0;
174 f->filp_pipe_select_ops = 0;
175 f->filp_flags = 0;
176 f->filp_state = FS_NORMAL;
177 f->filp_select_flags = 0;
178 f->filp_softlock = NULL;
179 *fpt = f;
180 return(OK);
184 /* If control passes here, the filp table must be full. Report that back. */
185 return(ENFILE);
189 /*===========================================================================*
190 * get_filp *
191 *===========================================================================*/
192 struct filp *get_filp(fild, locktype)
193 int fild; /* file descriptor */
194 tll_access_t locktype;
196 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
198 return get_filp2(fp, fild, locktype);
202 /*===========================================================================*
203 * get_filp2 *
204 *===========================================================================*/
205 struct filp *get_filp2(rfp, fild, locktype)
206 register struct fproc *rfp;
207 int fild; /* file descriptor */
208 tll_access_t locktype;
210 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
211 struct filp *filp;
213 filp = NULL;
214 if (fild < 0 || fild >= OPEN_MAX)
215 err_code = EBADF;
216 else if (rfp->fp_filp[fild] == NULL && FD_ISSET(fild, &rfp->fp_filp_inuse))
217 err_code = EIO; /* The filedes is not there, but is not closed either.
219 else if ((filp = rfp->fp_filp[fild]) == NULL)
220 err_code = EBADF;
221 else
222 lock_filp(filp, locktype); /* All is fine */
224 return(filp); /* may also be NULL */
228 /*===========================================================================*
229 * find_filp *
230 *===========================================================================*/
231 struct filp *find_filp(struct vnode *vp, mode_t bits)
233 /* Find a filp slot that refers to the vnode 'vp' in a way as described
234 * by the mode bit 'bits'. Used for determining whether somebody is still
235 * interested in either end of a pipe. Also used when opening a FIFO to
236 * find partners to share a filp field with (to shared the file position).
237 * Like 'get_fd' it performs its job by linear search through the filp table.
240 struct filp *f;
242 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
243 if (f->filp_count != 0 && f->filp_vno == vp && (f->filp_mode & bits)) {
244 return(f);
248 /* If control passes here, the filp wasn't there. Report that back. */
249 return(NULL);
252 /*===========================================================================*
253 * invalidate_filp *
254 *===========================================================================*/
255 int invalidate_filp(struct filp *rfilp)
257 /* Invalidate filp. fp_filp_inuse is not cleared, so filp can't be reused
258 until it is closed first. */
260 int f, fd, n = 0;
261 for (f = 0; f < NR_PROCS; f++) {
262 if (fproc[f].fp_pid == PID_FREE) continue;
263 for (fd = 0; fd < OPEN_MAX; fd++) {
264 if(fproc[f].fp_filp[fd] && fproc[f].fp_filp[fd] == rfilp) {
265 fproc[f].fp_filp[fd] = NULL;
266 n++;
271 rfilp->filp_state |= FS_INVALIDATED;
272 return(n); /* Report back how often this filp has been invalidated. */
275 /*===========================================================================*
276 * invalidate_filp_by_char_major *
277 *===========================================================================*/
278 void invalidate_filp_by_char_major(int major)
280 struct filp *f;
282 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
283 if (f->filp_count != 0 && f->filp_vno != NULL) {
284 if (major(f->filp_vno->v_sdev) == major &&
285 S_ISCHR(f->filp_vno->v_mode)) {
286 (void) invalidate_filp(f);
292 /*===========================================================================*
293 * invalidate_filp_by_endpt *
294 *===========================================================================*/
295 void invalidate_filp_by_endpt(endpoint_t proc_e)
297 struct filp *f;
299 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
300 if (f->filp_count != 0 && f->filp_vno != NULL) {
301 if (f->filp_vno->v_fs_e == proc_e)
302 (void) invalidate_filp(f);
307 /*===========================================================================*
308 * lock_filp *
309 *===========================================================================*/
310 void lock_filp(filp, locktype)
311 struct filp *filp;
312 tll_access_t locktype;
314 struct fproc *org_fp;
315 struct worker_thread *org_self;
316 struct vnode *vp;
318 assert(filp->filp_count > 0);
319 vp = filp->filp_vno;
320 assert(vp != NULL);
322 /* Lock vnode only if we haven't already locked it. If already locked by us,
323 * we're allowed to have one additional 'soft' lock. */
324 if (tll_locked_by_me(&vp->v_lock)) {
325 assert(filp->filp_softlock == NULL);
326 filp->filp_softlock = fp;
327 } else {
328 /* We have to make an exception for vnodes belonging to pipes. Even
329 * read(2) operations on pipes change the vnode and therefore require
330 * exclusive access.
332 if (S_ISFIFO(vp->v_mode) && locktype == VNODE_READ)
333 locktype = VNODE_WRITE;
334 lock_vnode(vp, locktype);
337 assert(vp->v_ref_count > 0); /* vnode still in use? */
338 assert(filp->filp_vno == vp); /* vnode still what we think it is? */
340 /* First try to get filp lock right off the bat */
341 if (mutex_trylock(&filp->filp_lock) != 0) {
343 /* Already in use, let's wait for our turn */
344 org_fp = fp;
345 org_self = self;
347 if (mutex_lock(&filp->filp_lock) != 0)
348 panic("unable to obtain lock on filp");
350 fp = org_fp;
351 self = org_self;
355 /*===========================================================================*
356 * unlock_filp *
357 *===========================================================================*/
358 void unlock_filp(filp)
359 struct filp *filp;
361 /* If this filp holds a soft lock on the vnode, we must be the owner */
362 if (filp->filp_softlock != NULL)
363 assert(filp->filp_softlock == fp);
365 if (filp->filp_count > 0 || filp->filp_state & FS_INVALIDATED) {
366 /* Only unlock vnode if filp is still in use */
368 /* and if we don't hold a soft lock */
369 if (filp->filp_softlock == NULL) {
370 assert(tll_islocked(&(filp->filp_vno->v_lock)));
371 unlock_vnode(filp->filp_vno);
375 filp->filp_softlock = NULL;
376 if (mutex_unlock(&filp->filp_lock) != 0)
377 panic("unable to release lock on filp");
380 /*===========================================================================*
381 * unlock_filps *
382 *===========================================================================*/
383 void unlock_filps(filp1, filp2)
384 struct filp *filp1;
385 struct filp *filp2;
387 /* Unlock two filps that are tied to the same vnode. As a thread can lock a
388 * vnode only once, unlocking the vnode twice would result in an error. */
390 /* No NULL pointers and not equal */
391 assert(filp1);
392 assert(filp2);
393 assert(filp1 != filp2);
395 /* Must be tied to the same vnode and not NULL */
396 assert(filp1->filp_vno == filp2->filp_vno);
397 assert(filp1->filp_vno != NULL);
399 if (filp1->filp_count > 0 && filp2->filp_count > 0) {
400 /* Only unlock vnode if filps are still in use */
401 unlock_vnode(filp1->filp_vno);
404 filp1->filp_softlock = NULL;
405 filp2->filp_softlock = NULL;
406 if (mutex_unlock(&filp2->filp_lock) != 0)
407 panic("unable to release filp lock on filp2");
408 if (mutex_unlock(&filp1->filp_lock) != 0)
409 panic("unable to release filp lock on filp1");
412 /*===========================================================================*
413 * verify_fd *
414 *===========================================================================*/
415 static filp_id_t verify_fd(ep, fd)
416 endpoint_t ep;
417 int fd;
419 /* Verify whether the file descriptor 'fd' is valid for the endpoint 'ep'. When
420 * the file descriptor is valid, verify_fd returns a pointer to that filp, else
421 * it returns NULL.
423 int slot;
424 struct filp *rfilp;
426 if (isokendpt(ep, &slot) != OK)
427 return(NULL);
429 rfilp = get_filp2(&fproc[slot], fd, VNODE_READ);
431 return(rfilp);
434 /*===========================================================================*
435 * do_verify_fd *
436 *===========================================================================*/
437 int do_verify_fd(void)
439 struct filp *rfilp;
440 endpoint_t proc_e;
441 int fd;
443 proc_e = job_m_in.USER_ENDPT;
444 fd = job_m_in.COUNT;
446 rfilp = (struct filp *) verify_fd(proc_e, fd);
447 m_out.ADDRESS = (void *) rfilp;
448 if (rfilp != NULL) unlock_filp(rfilp);
449 return (rfilp != NULL) ? OK : EINVAL;
452 /*===========================================================================*
453 * set_filp *
454 *===========================================================================*/
455 int set_filp(sfilp)
456 filp_id_t sfilp;
458 if (sfilp == NULL) return(EINVAL);
460 lock_filp(sfilp, VNODE_READ);
461 sfilp->filp_count++;
462 unlock_filp(sfilp);
464 return(OK);
467 /*===========================================================================*
468 * do_set_filp *
469 *===========================================================================*/
470 int do_set_filp(void)
472 filp_id_t f;
473 f = (filp_id_t) job_m_in.ADDRESS;
474 return set_filp(f);
477 /*===========================================================================*
478 * copy_filp *
479 *===========================================================================*/
480 int copy_filp(to_ep, cfilp)
481 endpoint_t to_ep;
482 filp_id_t cfilp;
484 int fd;
485 int slot;
486 struct fproc *rfp;
488 if (isokendpt(to_ep, &slot) != OK) return(EINVAL);
489 rfp = &fproc[slot];
491 /* Find an open slot in fp_filp */
492 for (fd = 0; fd < OPEN_MAX; fd++) {
493 if (rfp->fp_filp[fd] == NULL &&
494 !FD_ISSET(fd, &rfp->fp_filp_inuse)) {
496 /* Found a free slot, add descriptor */
497 FD_SET(fd, &rfp->fp_filp_inuse);
498 rfp->fp_filp[fd] = cfilp;
499 rfp->fp_filp[fd]->filp_count++;
500 return(fd);
504 /* File descriptor table is full */
505 return(EMFILE);
508 /*===========================================================================*
509 * do_copy_filp *
510 *===========================================================================*/
511 int do_copy_filp(void)
513 endpoint_t proc_e;
514 filp_id_t f;
516 proc_e = job_m_in.USER_ENDPT;
517 f = (filp_id_t) job_m_in.ADDRESS;
519 return copy_filp(proc_e, f);
522 /*===========================================================================*
523 * put_filp *
524 *===========================================================================*/
525 int put_filp(pfilp)
526 filp_id_t pfilp;
528 if (pfilp == NULL) {
529 return EINVAL;
530 } else {
531 lock_filp(pfilp, VNODE_OPCL);
532 close_filp(pfilp);
533 return(OK);
537 /*===========================================================================*
538 * do_put_filp *
539 *===========================================================================*/
540 int do_put_filp(void)
542 filp_id_t f;
543 f = (filp_id_t) job_m_in.ADDRESS;
544 return put_filp(f);
547 /*===========================================================================*
548 * cancel_fd *
549 *===========================================================================*/
550 int cancel_fd(ep, fd)
551 endpoint_t ep;
552 int fd;
554 int slot;
555 struct fproc *rfp;
556 struct filp *rfilp;
558 if (isokendpt(ep, &slot) != OK) return(EINVAL);
559 rfp = &fproc[slot];
561 /* Check that the input 'fd' is valid */
562 rfilp = (struct filp *) verify_fd(ep, fd);
563 if (rfilp != NULL) {
564 /* Found a valid descriptor, remove it */
565 FD_CLR(fd, &rfp->fp_filp_inuse);
566 if (rfp->fp_filp[fd]->filp_count == 0) {
567 unlock_filp(rfilp);
568 printf("VFS: filp_count for slot %d fd %d already zero", slot,
569 fd);
570 return(EINVAL);
572 rfp->fp_filp[fd]->filp_count--;
573 rfp->fp_filp[fd] = NULL;
574 unlock_filp(rfilp);
575 return(fd);
578 /* File descriptor is not valid for the endpoint. */
579 return(EINVAL);
582 /*===========================================================================*
583 * do_cancel_fd *
584 *===========================================================================*/
585 int do_cancel_fd(void)
587 endpoint_t proc_e;
588 int fd;
590 proc_e = job_m_in.USER_ENDPT;
591 fd = job_m_in.COUNT;
593 return cancel_fd(proc_e, fd);
596 /*===========================================================================*
597 * close_filp *
598 *===========================================================================*/
599 void close_filp(f)
600 struct filp *f;
602 /* Close a file. Will also unlock filp when done */
604 int rw;
605 dev_t dev;
606 struct vnode *vp;
608 /* Must be locked */
609 assert(mutex_trylock(&f->filp_lock) == -EDEADLK);
610 assert(tll_islocked(&f->filp_vno->v_lock));
612 vp = f->filp_vno;
614 if (f->filp_count - 1 == 0 && f->filp_mode != FILP_CLOSED) {
615 /* Check to see if the file is special. */
616 if (S_ISCHR(vp->v_mode) || S_ISBLK(vp->v_mode)) {
617 dev = (dev_t) vp->v_sdev;
618 if (S_ISBLK(vp->v_mode)) {
619 lock_bsf();
620 if (vp->v_bfs_e == ROOT_FS_E) {
621 /* Invalidate the cache unless the special is
622 * mounted. Assume that the root filesystem's
623 * is open only for fsck.
625 req_flush(vp->v_bfs_e, dev);
627 unlock_bsf();
629 /* Attempt to close only when feasible */
630 if (!(f->filp_state & FS_INVALIDATED)) {
631 (void) bdev_close(dev); /* Ignore errors */
633 } else {
634 /* Attempt to close only when feasible */
635 if (!(f->filp_state & FS_INVALIDATED)) {
636 (void) dev_close(dev, f-filp);/*Ignore errors*/
640 f->filp_mode = FILP_CLOSED;
644 /* If the inode being closed is a pipe, release everyone hanging on it. */
645 if (S_ISFIFO(vp->v_mode)) {
646 rw = (f->filp_mode & R_BIT ? WRITE : READ);
647 release(vp, rw, susp_count);
650 f->filp_count--; /* If filp got invalidated at device closure, the
651 * count might've become negative now */
652 if (f->filp_count == 0 ||
653 (f->filp_count < 0 && f->filp_state & FS_INVALIDATED)) {
654 if (S_ISFIFO(vp->v_mode)) {
655 /* Last reader or writer is going. Tell PFS about latest
656 * pipe size.
658 truncate_vnode(vp, vp->v_size);
661 unlock_vnode(f->filp_vno);
662 put_vnode(f->filp_vno);
663 f->filp_vno = NULL;
664 f->filp_mode = FILP_CLOSED;
665 f->filp_count = 0;
666 } else if (f->filp_count < 0) {
667 panic("VFS: invalid filp count: %d ino %d/%d", f->filp_count,
668 vp->v_dev, vp->v_inode_nr);
669 } else {
670 unlock_vnode(f->filp_vno);
673 mutex_unlock(&f->filp_lock);