mkfs: drop support for running on DOS
[minix.git] / servers / vfs / filedes.c
blobb51642b2cbf4b27000bb2641cca9265ab118fcbf
1 /* This file contains the procedures that manipulate file descriptors.
3 * The entry points into this file are
4 * get_fd: look for free file descriptor and free filp slots
5 * get_filp: look up the filp entry for a given file descriptor
6 * find_filp: find a filp slot that points to a given vnode
7 * inval_filp: invalidate a filp and associated fd's, only let close()
8 * happen on it
9 * do_verify_fd: verify whether the given file descriptor is valid for
10 * the given endpoint.
11 * do_set_filp: marks a filp as in-flight.
12 * do_copy_filp: copies a filp to another endpoint.
13 * do_put_filp: marks a filp as not in-flight anymore.
14 * do_cancel_fd: cancel the transaction when something goes wrong for
15 * the receiver.
18 #include <sys/select.h>
19 #include <minix/callnr.h>
20 #include <minix/u64.h>
21 #include <assert.h>
22 #include <sys/stat.h>
23 #include "fs.h"
24 #include "file.h"
25 #include "fproc.h"
26 #include "vnode.h"
29 static filp_id_t verify_fd(endpoint_t ep, int fd);
31 #if LOCK_DEBUG
32 /*===========================================================================*
33 * check_filp_locks *
34 *===========================================================================*/
35 void check_filp_locks_by_me(void)
37 /* Check whether this thread still has filp locks held */
38 struct filp *f;
39 int r;
41 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
42 r = mutex_trylock(&f->filp_lock);
43 if (r == -EDEADLK)
44 panic("Thread %d still holds filp lock on filp %p call_nr=%d\n",
45 mthread_self(), f, job_call_nr);
46 else if (r == 0) {
47 /* We just obtained the lock, release it */
48 mutex_unlock(&f->filp_lock);
52 #endif
54 /*===========================================================================*
55 * check_filp_locks *
56 *===========================================================================*/
57 void check_filp_locks(void)
59 struct filp *f;
60 int r, count = 0;
62 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
63 r = mutex_trylock(&f->filp_lock);
64 if (r == -EBUSY) {
65 /* Mutex is still locked */
66 count++;
67 } else if (r == 0) {
68 /* We just obtained a lock, don't want it */
69 mutex_unlock(&f->filp_lock);
70 } else
71 panic("filp_lock weird state");
73 if (count) panic("locked filps");
74 #if 0
75 else printf("check_filp_locks OK\n");
76 #endif
79 /*===========================================================================*
80 * do_filp_gc *
81 *===========================================================================*/
82 void *do_filp_gc(void *UNUSED(arg))
84 struct filp *f;
85 struct vnode *vp;
87 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
88 if (!(f->filp_state & FS_INVALIDATED)) continue;
89 assert(f->filp_vno != NULL);
90 vp = f->filp_vno;
92 /* Synchronize with worker thread that might hold a lock on the vp */
93 lock_vnode(vp, VNODE_OPCL);
94 unlock_vnode(vp);
96 /* If garbage collection was invoked due to a failed device open
97 * request, then common_open has already cleaned up and we have
98 * nothing to do.
100 if (!(f->filp_state & FS_INVALIDATED)) {
101 continue;
104 /* If garbage collection was invoked due to a failed device close
105 * request, the close_filp has already cleaned up and we have nothing
106 * to do.
108 if (f->filp_mode != FILP_CLOSED) {
109 assert(f->filp_count == 0);
110 f->filp_count = 1; /* So lock_filp and close_filp will do
111 * their job */
112 lock_filp(f, VNODE_READ);
113 close_filp(f);
116 f->filp_state &= ~FS_INVALIDATED;
119 thread_cleanup(NULL);
120 return(NULL);
123 /*===========================================================================*
124 * init_filps *
125 *===========================================================================*/
126 void init_filps(void)
128 /* Initialize filps */
129 struct filp *f;
131 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
132 if (mutex_init(&f->filp_lock, NULL) != 0)
133 panic("Failed to initialize filp mutex");
138 /*===========================================================================*
139 * get_fd *
140 *===========================================================================*/
141 int get_fd(int start, mode_t bits, int *k, struct filp **fpt)
143 /* Look for a free file descriptor and a free filp slot. Fill in the mode word
144 * in the latter, but don't claim either one yet, since the open() or creat()
145 * may yet fail.
148 register struct filp *f;
149 register int i;
151 /* Search the fproc fp_filp table for a free file descriptor. */
152 for (i = start; i < OPEN_MAX; i++) {
153 if (fp->fp_filp[i] == NULL && !FD_ISSET(i, &fp->fp_filp_inuse)) {
154 /* A file descriptor has been located. */
155 *k = i;
156 break;
160 /* Check to see if a file descriptor has been found. */
161 if (i >= OPEN_MAX) return(EMFILE);
163 /* If we don't care about a filp, return now */
164 if (fpt == NULL) return(OK);
166 /* Now that a file descriptor has been found, look for a free filp slot. */
167 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
168 assert(f->filp_count >= 0);
169 if (f->filp_count == 0 && mutex_trylock(&f->filp_lock) == 0) {
170 f->filp_mode = bits;
171 f->filp_pos = cvu64(0);
172 f->filp_selectors = 0;
173 f->filp_select_ops = 0;
174 f->filp_pipe_select_ops = 0;
175 f->filp_flags = 0;
176 f->filp_state = FS_NORMAL;
177 f->filp_select_flags = 0;
178 f->filp_softlock = NULL;
179 *fpt = f;
180 return(OK);
184 /* If control passes here, the filp table must be full. Report that back. */
185 return(ENFILE);
189 /*===========================================================================*
190 * get_filp *
191 *===========================================================================*/
192 struct filp *get_filp(fild, locktype)
193 int fild; /* file descriptor */
194 tll_access_t locktype;
196 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
198 return get_filp2(fp, fild, locktype);
202 /*===========================================================================*
203 * get_filp2 *
204 *===========================================================================*/
205 struct filp *get_filp2(rfp, fild, locktype)
206 register struct fproc *rfp;
207 int fild; /* file descriptor */
208 tll_access_t locktype;
210 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
211 struct filp *filp;
213 filp = NULL;
214 if (fild < 0 || fild >= OPEN_MAX)
215 err_code = EBADF;
216 else if (rfp->fp_filp[fild] == NULL && FD_ISSET(fild, &rfp->fp_filp_inuse))
217 err_code = EIO; /* The filedes is not there, but is not closed either.
219 else if ((filp = rfp->fp_filp[fild]) == NULL)
220 err_code = EBADF;
221 else
222 lock_filp(filp, locktype); /* All is fine */
224 return(filp); /* may also be NULL */
228 /*===========================================================================*
229 * find_filp *
230 *===========================================================================*/
231 struct filp *find_filp(struct vnode *vp, mode_t bits)
233 /* Find a filp slot that refers to the vnode 'vp' in a way as described
234 * by the mode bit 'bits'. Used for determining whether somebody is still
235 * interested in either end of a pipe. Also used when opening a FIFO to
236 * find partners to share a filp field with (to shared the file position).
237 * Like 'get_fd' it performs its job by linear search through the filp table.
240 struct filp *f;
242 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
243 if (f->filp_count != 0 && f->filp_vno == vp && (f->filp_mode & bits)) {
244 return(f);
248 /* If control passes here, the filp wasn't there. Report that back. */
249 return(NULL);
252 /*===========================================================================*
253 * invalidate_filp *
254 *===========================================================================*/
255 int invalidate_filp(struct filp *rfilp)
257 /* Invalidate filp. fp_filp_inuse is not cleared, so filp can't be reused
258 until it is closed first. */
260 int f, fd, n = 0;
261 for (f = 0; f < NR_PROCS; f++) {
262 if (fproc[f].fp_pid == PID_FREE) continue;
263 for (fd = 0; fd < OPEN_MAX; fd++) {
264 if(fproc[f].fp_filp[fd] && fproc[f].fp_filp[fd] == rfilp) {
265 fproc[f].fp_filp[fd] = NULL;
266 n++;
271 rfilp->filp_state |= FS_INVALIDATED;
272 return(n); /* Report back how often this filp has been invalidated. */
275 /*===========================================================================*
276 * invalidate_filp_by_char_major *
277 *===========================================================================*/
278 void invalidate_filp_by_char_major(int major)
280 struct filp *f;
282 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
283 if (f->filp_count != 0 && f->filp_vno != NULL) {
284 if (major(f->filp_vno->v_sdev) == major &&
285 S_ISCHR(f->filp_vno->v_mode)) {
286 (void) invalidate_filp(f);
292 /*===========================================================================*
293 * invalidate_filp_by_endpt *
294 *===========================================================================*/
295 void invalidate_filp_by_endpt(endpoint_t proc_e)
297 struct filp *f;
299 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
300 if (f->filp_count != 0 && f->filp_vno != NULL) {
301 if (f->filp_vno->v_fs_e == proc_e)
302 (void) invalidate_filp(f);
307 /*===========================================================================*
308 * lock_filp *
309 *===========================================================================*/
310 void lock_filp(filp, locktype)
311 struct filp *filp;
312 tll_access_t locktype;
314 struct fproc *org_fp;
315 struct worker_thread *org_self;
316 struct vnode *vp;
318 assert(filp->filp_count > 0);
319 vp = filp->filp_vno;
320 assert(vp != NULL);
322 /* Lock vnode only if we haven't already locked it. If already locked by us,
323 * we're allowed to have one additional 'soft' lock. */
324 if (tll_locked_by_me(&vp->v_lock)) {
325 assert(filp->filp_softlock == NULL);
326 filp->filp_softlock = fp;
327 } else {
328 lock_vnode(vp, locktype);
331 assert(vp->v_ref_count > 0); /* vnode still in use? */
332 assert(filp->filp_vno == vp); /* vnode still what we think it is? */
334 /* First try to get filp lock right off the bat */
335 if (mutex_trylock(&filp->filp_lock) != 0) {
337 /* Already in use, let's wait for our turn */
338 org_fp = fp;
339 org_self = self;
341 if (mutex_lock(&filp->filp_lock) != 0)
342 panic("unable to obtain lock on filp");
344 fp = org_fp;
345 self = org_self;
349 /*===========================================================================*
350 * unlock_filp *
351 *===========================================================================*/
352 void unlock_filp(filp)
353 struct filp *filp;
355 /* If this filp holds a soft lock on the vnode, we must be the owner */
356 if (filp->filp_softlock != NULL)
357 assert(filp->filp_softlock == fp);
359 if (filp->filp_count > 0 || filp->filp_state & FS_INVALIDATED) {
360 /* Only unlock vnode if filp is still in use */
362 /* and if we don't hold a soft lock */
363 if (filp->filp_softlock == NULL) {
364 assert(tll_islocked(&(filp->filp_vno->v_lock)));
365 unlock_vnode(filp->filp_vno);
369 filp->filp_softlock = NULL;
370 if (mutex_unlock(&filp->filp_lock) != 0)
371 panic("unable to release lock on filp");
374 /*===========================================================================*
375 * unlock_filps *
376 *===========================================================================*/
377 void unlock_filps(filp1, filp2)
378 struct filp *filp1;
379 struct filp *filp2;
381 /* Unlock two filps that are tied to the same vnode. As a thread can lock a
382 * vnode only once, unlocking the vnode twice would result in an error. */
384 /* No NULL pointers and not equal */
385 assert(filp1);
386 assert(filp2);
387 assert(filp1 != filp2);
389 /* Must be tied to the same vnode and not NULL */
390 assert(filp1->filp_vno == filp2->filp_vno);
391 assert(filp1->filp_vno != NULL);
393 if (filp1->filp_count > 0 && filp2->filp_count > 0) {
394 /* Only unlock vnode if filps are still in use */
395 unlock_vnode(filp1->filp_vno);
398 filp1->filp_softlock = NULL;
399 filp2->filp_softlock = NULL;
400 if (mutex_unlock(&filp2->filp_lock) != 0)
401 panic("unable to release filp lock on filp2");
402 if (mutex_unlock(&filp1->filp_lock) != 0)
403 panic("unable to release filp lock on filp1");
406 /*===========================================================================*
407 * verify_fd *
408 *===========================================================================*/
409 static filp_id_t verify_fd(ep, fd)
410 endpoint_t ep;
411 int fd;
413 /* Verify whether the file descriptor 'fd' is valid for the endpoint 'ep'. When
414 * the file descriptor is valid, verify_fd returns a pointer to that filp, else
415 * it returns NULL.
417 int slot;
418 struct filp *rfilp;
420 if (isokendpt(ep, &slot) != OK)
421 return(NULL);
423 rfilp = get_filp2(&fproc[slot], fd, VNODE_READ);
425 return(rfilp);
428 /*===========================================================================*
429 * do_verify_fd *
430 *===========================================================================*/
431 int do_verify_fd(void)
433 struct filp *rfilp;
434 endpoint_t proc_e;
435 int fd;
437 proc_e = job_m_in.USER_ENDPT;
438 fd = job_m_in.COUNT;
440 rfilp = (struct filp *) verify_fd(proc_e, fd);
441 m_out.ADDRESS = (void *) rfilp;
442 if (rfilp != NULL) unlock_filp(rfilp);
443 return (rfilp != NULL) ? OK : EINVAL;
446 /*===========================================================================*
447 * set_filp *
448 *===========================================================================*/
449 int set_filp(sfilp)
450 filp_id_t sfilp;
452 if (sfilp == NULL) return(EINVAL);
454 lock_filp(sfilp, VNODE_READ);
455 sfilp->filp_count++;
456 unlock_filp(sfilp);
458 return(OK);
461 /*===========================================================================*
462 * do_set_filp *
463 *===========================================================================*/
464 int do_set_filp(void)
466 filp_id_t f;
467 f = (filp_id_t) job_m_in.ADDRESS;
468 return set_filp(f);
471 /*===========================================================================*
472 * copy_filp *
473 *===========================================================================*/
474 int copy_filp(to_ep, cfilp)
475 endpoint_t to_ep;
476 filp_id_t cfilp;
478 int fd;
479 int slot;
480 struct fproc *rfp;
482 if (isokendpt(to_ep, &slot) != OK) return(EINVAL);
483 rfp = &fproc[slot];
485 /* Find an open slot in fp_filp */
486 for (fd = 0; fd < OPEN_MAX; fd++) {
487 if (rfp->fp_filp[fd] == NULL &&
488 !FD_ISSET(fd, &rfp->fp_filp_inuse)) {
490 /* Found a free slot, add descriptor */
491 FD_SET(fd, &rfp->fp_filp_inuse);
492 rfp->fp_filp[fd] = cfilp;
493 rfp->fp_filp[fd]->filp_count++;
494 return(fd);
498 /* File descriptor table is full */
499 return(EMFILE);
502 /*===========================================================================*
503 * do_copy_filp *
504 *===========================================================================*/
505 int do_copy_filp(void)
507 endpoint_t proc_e;
508 filp_id_t f;
510 proc_e = job_m_in.USER_ENDPT;
511 f = (filp_id_t) job_m_in.ADDRESS;
513 return copy_filp(proc_e, f);
516 /*===========================================================================*
517 * put_filp *
518 *===========================================================================*/
519 int put_filp(pfilp)
520 filp_id_t pfilp;
522 if (pfilp == NULL) {
523 return EINVAL;
524 } else {
525 lock_filp(pfilp, VNODE_OPCL);
526 close_filp(pfilp);
527 return(OK);
531 /*===========================================================================*
532 * do_put_filp *
533 *===========================================================================*/
534 int do_put_filp(void)
536 filp_id_t f;
537 f = (filp_id_t) job_m_in.ADDRESS;
538 return put_filp(f);
541 /*===========================================================================*
542 * cancel_fd *
543 *===========================================================================*/
544 int cancel_fd(ep, fd)
545 endpoint_t ep;
546 int fd;
548 int slot;
549 struct fproc *rfp;
550 struct filp *rfilp;
552 if (isokendpt(ep, &slot) != OK) return(EINVAL);
553 rfp = &fproc[slot];
555 /* Check that the input 'fd' is valid */
556 rfilp = (struct filp *) verify_fd(ep, fd);
557 if (rfilp != NULL) {
558 /* Found a valid descriptor, remove it */
559 FD_CLR(fd, &rfp->fp_filp_inuse);
560 if (rfp->fp_filp[fd]->filp_count == 0) {
561 unlock_filp(rfilp);
562 printf("VFS: filp_count for slot %d fd %d already zero", slot,
563 fd);
564 return(EINVAL);
566 rfp->fp_filp[fd]->filp_count--;
567 rfp->fp_filp[fd] = NULL;
568 unlock_filp(rfilp);
569 return(fd);
572 /* File descriptor is not valid for the endpoint. */
573 return(EINVAL);
576 /*===========================================================================*
577 * do_cancel_fd *
578 *===========================================================================*/
579 int do_cancel_fd(void)
581 endpoint_t proc_e;
582 int fd;
584 proc_e = job_m_in.USER_ENDPT;
585 fd = job_m_in.COUNT;
587 return cancel_fd(proc_e, fd);
590 /*===========================================================================*
591 * close_filp *
592 *===========================================================================*/
593 void close_filp(f)
594 struct filp *f;
596 /* Close a file. Will also unlock filp when done */
598 int rw;
599 dev_t dev;
600 struct vnode *vp;
602 /* Must be locked */
603 assert(mutex_trylock(&f->filp_lock) == -EDEADLK);
604 assert(tll_islocked(&f->filp_vno->v_lock));
606 vp = f->filp_vno;
608 if (f->filp_count - 1 == 0 && f->filp_mode != FILP_CLOSED) {
609 /* Check to see if the file is special. */
610 if (S_ISCHR(vp->v_mode) || S_ISBLK(vp->v_mode)) {
611 dev = (dev_t) vp->v_sdev;
612 if (S_ISBLK(vp->v_mode)) {
613 lock_bsf();
614 if (vp->v_bfs_e == ROOT_FS_E) {
615 /* Invalidate the cache unless the special is
616 * mounted. Assume that the root filesystem's
617 * is open only for fsck.
619 req_flush(vp->v_bfs_e, dev);
621 unlock_bsf();
623 /* Attempt to close only when feasible */
624 if (!(f->filp_state & FS_INVALIDATED)) {
625 (void) bdev_close(dev); /* Ignore errors */
627 } else {
628 /* Attempt to close only when feasible */
629 if (!(f->filp_state & FS_INVALIDATED)) {
630 (void) dev_close(dev, f-filp);/*Ignore errors*/
634 f->filp_mode = FILP_CLOSED;
638 /* If the inode being closed is a pipe, release everyone hanging on it. */
639 if (S_ISFIFO(vp->v_mode)) {
640 rw = (f->filp_mode & R_BIT ? WRITE : READ);
641 release(vp, rw, susp_count);
644 f->filp_count--; /* If filp got invalidated at device closure, the
645 * count might've become negative now */
646 if (f->filp_count == 0 ||
647 (f->filp_count < 0 && f->filp_state & FS_INVALIDATED)) {
648 if (S_ISFIFO(vp->v_mode)) {
649 /* Last reader or writer is going. Tell PFS about latest
650 * pipe size.
652 truncate_vnode(vp, vp->v_size);
655 unlock_vnode(f->filp_vno);
656 put_vnode(f->filp_vno);
657 f->filp_vno = NULL;
658 f->filp_mode = FILP_CLOSED;
659 f->filp_count = 0;
660 } else if (f->filp_count < 0) {
661 panic("VFS: invalid filp count: %d ino %d/%d", f->filp_count,
662 vp->v_dev, vp->v_inode_nr);
663 } else {
664 unlock_vnode(f->filp_vno);
667 mutex_unlock(&f->filp_lock);