1 /* This file contains the procedures that manipulate file descriptors.
3 * The entry points into this file are
4 * get_fd: look for free file descriptor and free filp slots
5 * get_filp: look up the filp entry for a given file descriptor
6 * find_filp: find a filp slot that points to a given vnode
7 * inval_filp: invalidate a filp and associated fd's, only let close()
9 * do_verify_fd: verify whether the given file descriptor is valid for
11 * do_set_filp: marks a filp as in-flight.
12 * do_copy_filp: copies a filp to another endpoint.
13 * do_put_filp: marks a filp as not in-flight anymore.
14 * do_cancel_fd: cancel the transaction when something goes wrong for
18 #include <sys/select.h>
19 #include <minix/callnr.h>
20 #include <minix/u64.h>
29 static filp_id_t
verify_fd(endpoint_t ep
, int fd
);
32 /*===========================================================================*
34 *===========================================================================*/
35 void check_filp_locks_by_me(void)
37 /* Check whether this thread still has filp locks held */
41 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
42 r
= mutex_trylock(&f
->filp_lock
);
44 panic("Thread %d still holds filp lock on filp %p call_nr=%d\n",
45 mthread_self(), f
, job_call_nr
);
47 /* We just obtained the lock, release it */
48 mutex_unlock(&f
->filp_lock
);
54 /*===========================================================================*
56 *===========================================================================*/
57 void check_filp_locks(void)
62 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
63 r
= mutex_trylock(&f
->filp_lock
);
65 /* Mutex is still locked */
68 /* We just obtained a lock, don't want it */
69 mutex_unlock(&f
->filp_lock
);
71 panic("filp_lock weird state");
73 if (count
) panic("locked filps");
75 else printf("check_filp_locks OK\n");
79 /*===========================================================================*
81 *===========================================================================*/
82 void *do_filp_gc(void *UNUSED(arg
))
87 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
88 if (!(f
->filp_state
& FS_INVALIDATED
)) continue;
89 assert(f
->filp_vno
!= NULL
);
92 /* Synchronize with worker thread that might hold a lock on the vp */
93 lock_vnode(vp
, VNODE_OPCL
);
96 /* If garbage collection was invoked due to a failed device open
97 * request, then common_open has already cleaned up and we have
100 if (!(f
->filp_state
& FS_INVALIDATED
)) {
104 /* If garbage collection was invoked due to a failed device close
105 * request, the close_filp has already cleaned up and we have nothing
108 if (f
->filp_mode
!= FILP_CLOSED
) {
109 assert(f
->filp_count
== 0);
110 f
->filp_count
= 1; /* So lock_filp and close_filp will do
112 lock_filp(f
, VNODE_READ
);
116 f
->filp_state
&= ~FS_INVALIDATED
;
119 thread_cleanup(NULL
);
123 /*===========================================================================*
125 *===========================================================================*/
126 void init_filps(void)
128 /* Initialize filps */
131 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
132 if (mutex_init(&f
->filp_lock
, NULL
) != 0)
133 panic("Failed to initialize filp mutex");
138 /*===========================================================================*
140 *===========================================================================*/
141 int get_fd(int start
, mode_t bits
, int *k
, struct filp
**fpt
)
143 /* Look for a free file descriptor and a free filp slot. Fill in the mode word
144 * in the latter, but don't claim either one yet, since the open() or creat()
148 register struct filp
*f
;
151 /* Search the fproc fp_filp table for a free file descriptor. */
152 for (i
= start
; i
< OPEN_MAX
; i
++) {
153 if (fp
->fp_filp
[i
] == NULL
&& !FD_ISSET(i
, &fp
->fp_filp_inuse
)) {
154 /* A file descriptor has been located. */
160 /* Check to see if a file descriptor has been found. */
161 if (i
>= OPEN_MAX
) return(EMFILE
);
163 /* If we don't care about a filp, return now */
164 if (fpt
== NULL
) return(OK
);
166 /* Now that a file descriptor has been found, look for a free filp slot. */
167 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
168 assert(f
->filp_count
>= 0);
169 if (f
->filp_count
== 0 && mutex_trylock(&f
->filp_lock
) == 0) {
171 f
->filp_pos
= cvu64(0);
172 f
->filp_selectors
= 0;
173 f
->filp_select_ops
= 0;
174 f
->filp_pipe_select_ops
= 0;
176 f
->filp_state
= FS_NORMAL
;
177 f
->filp_select_flags
= 0;
178 f
->filp_softlock
= NULL
;
184 /* If control passes here, the filp table must be full. Report that back. */
189 /*===========================================================================*
191 *===========================================================================*/
192 struct filp
*get_filp(fild
, locktype
)
193 int fild
; /* file descriptor */
194 tll_access_t locktype
;
196 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
198 return get_filp2(fp
, fild
, locktype
);
202 /*===========================================================================*
204 *===========================================================================*/
205 struct filp
*get_filp2(rfp
, fild
, locktype
)
206 register struct fproc
*rfp
;
207 int fild
; /* file descriptor */
208 tll_access_t locktype
;
210 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
214 if (fild
< 0 || fild
>= OPEN_MAX
)
216 else if (rfp
->fp_filp
[fild
] == NULL
&& FD_ISSET(fild
, &rfp
->fp_filp_inuse
))
217 err_code
= EIO
; /* The filedes is not there, but is not closed either.
219 else if ((filp
= rfp
->fp_filp
[fild
]) == NULL
)
222 lock_filp(filp
, locktype
); /* All is fine */
224 return(filp
); /* may also be NULL */
228 /*===========================================================================*
230 *===========================================================================*/
231 struct filp
*find_filp(struct vnode
*vp
, mode_t bits
)
233 /* Find a filp slot that refers to the vnode 'vp' in a way as described
234 * by the mode bit 'bits'. Used for determining whether somebody is still
235 * interested in either end of a pipe. Also used when opening a FIFO to
236 * find partners to share a filp field with (to shared the file position).
237 * Like 'get_fd' it performs its job by linear search through the filp table.
242 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
243 if (f
->filp_count
!= 0 && f
->filp_vno
== vp
&& (f
->filp_mode
& bits
)) {
248 /* If control passes here, the filp wasn't there. Report that back. */
252 /*===========================================================================*
254 *===========================================================================*/
255 int invalidate_filp(struct filp
*rfilp
)
257 /* Invalidate filp. fp_filp_inuse is not cleared, so filp can't be reused
258 until it is closed first. */
261 for (f
= 0; f
< NR_PROCS
; f
++) {
262 if (fproc
[f
].fp_pid
== PID_FREE
) continue;
263 for (fd
= 0; fd
< OPEN_MAX
; fd
++) {
264 if(fproc
[f
].fp_filp
[fd
] && fproc
[f
].fp_filp
[fd
] == rfilp
) {
265 fproc
[f
].fp_filp
[fd
] = NULL
;
271 rfilp
->filp_state
|= FS_INVALIDATED
;
272 return(n
); /* Report back how often this filp has been invalidated. */
275 /*===========================================================================*
276 * invalidate_filp_by_char_major *
277 *===========================================================================*/
278 void invalidate_filp_by_char_major(int major
)
282 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
283 if (f
->filp_count
!= 0 && f
->filp_vno
!= NULL
) {
284 if (major(f
->filp_vno
->v_sdev
) == major
&&
285 S_ISCHR(f
->filp_vno
->v_mode
)) {
286 (void) invalidate_filp(f
);
292 /*===========================================================================*
293 * invalidate_filp_by_endpt *
294 *===========================================================================*/
295 void invalidate_filp_by_endpt(endpoint_t proc_e
)
299 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
300 if (f
->filp_count
!= 0 && f
->filp_vno
!= NULL
) {
301 if (f
->filp_vno
->v_fs_e
== proc_e
)
302 (void) invalidate_filp(f
);
307 /*===========================================================================*
309 *===========================================================================*/
310 void lock_filp(filp
, locktype
)
312 tll_access_t locktype
;
314 struct fproc
*org_fp
;
315 struct worker_thread
*org_self
;
318 assert(filp
->filp_count
> 0);
322 /* Lock vnode only if we haven't already locked it. If already locked by us,
323 * we're allowed to have one additional 'soft' lock. */
324 if (tll_locked_by_me(&vp
->v_lock
)) {
325 assert(filp
->filp_softlock
== NULL
);
326 filp
->filp_softlock
= fp
;
328 /* We have to make an exception for vnodes belonging to pipes. Even
329 * read(2) operations on pipes change the vnode and therefore require
332 if (S_ISFIFO(vp
->v_mode
) && locktype
== VNODE_READ
)
333 locktype
= VNODE_WRITE
;
334 lock_vnode(vp
, locktype
);
337 assert(vp
->v_ref_count
> 0); /* vnode still in use? */
338 assert(filp
->filp_vno
== vp
); /* vnode still what we think it is? */
340 /* First try to get filp lock right off the bat */
341 if (mutex_trylock(&filp
->filp_lock
) != 0) {
343 /* Already in use, let's wait for our turn */
347 if (mutex_lock(&filp
->filp_lock
) != 0)
348 panic("unable to obtain lock on filp");
355 /*===========================================================================*
357 *===========================================================================*/
358 void unlock_filp(filp
)
361 /* If this filp holds a soft lock on the vnode, we must be the owner */
362 if (filp
->filp_softlock
!= NULL
)
363 assert(filp
->filp_softlock
== fp
);
365 if (filp
->filp_count
> 0 || filp
->filp_state
& FS_INVALIDATED
) {
366 /* Only unlock vnode if filp is still in use */
368 /* and if we don't hold a soft lock */
369 if (filp
->filp_softlock
== NULL
) {
370 assert(tll_islocked(&(filp
->filp_vno
->v_lock
)));
371 unlock_vnode(filp
->filp_vno
);
375 filp
->filp_softlock
= NULL
;
376 if (mutex_unlock(&filp
->filp_lock
) != 0)
377 panic("unable to release lock on filp");
380 /*===========================================================================*
382 *===========================================================================*/
383 void unlock_filps(filp1
, filp2
)
387 /* Unlock two filps that are tied to the same vnode. As a thread can lock a
388 * vnode only once, unlocking the vnode twice would result in an error. */
390 /* No NULL pointers and not equal */
393 assert(filp1
!= filp2
);
395 /* Must be tied to the same vnode and not NULL */
396 assert(filp1
->filp_vno
== filp2
->filp_vno
);
397 assert(filp1
->filp_vno
!= NULL
);
399 if (filp1
->filp_count
> 0 && filp2
->filp_count
> 0) {
400 /* Only unlock vnode if filps are still in use */
401 unlock_vnode(filp1
->filp_vno
);
404 filp1
->filp_softlock
= NULL
;
405 filp2
->filp_softlock
= NULL
;
406 if (mutex_unlock(&filp2
->filp_lock
) != 0)
407 panic("unable to release filp lock on filp2");
408 if (mutex_unlock(&filp1
->filp_lock
) != 0)
409 panic("unable to release filp lock on filp1");
412 /*===========================================================================*
414 *===========================================================================*/
415 static filp_id_t
verify_fd(ep
, fd
)
419 /* Verify whether the file descriptor 'fd' is valid for the endpoint 'ep'. When
420 * the file descriptor is valid, verify_fd returns a pointer to that filp, else
426 if (isokendpt(ep
, &slot
) != OK
)
429 rfilp
= get_filp2(&fproc
[slot
], fd
, VNODE_READ
);
434 /*===========================================================================*
436 *===========================================================================*/
437 int do_verify_fd(void)
443 proc_e
= job_m_in
.USER_ENDPT
;
446 rfilp
= (struct filp
*) verify_fd(proc_e
, fd
);
447 m_out
.ADDRESS
= (void *) rfilp
;
448 if (rfilp
!= NULL
) unlock_filp(rfilp
);
449 return (rfilp
!= NULL
) ? OK
: EINVAL
;
452 /*===========================================================================*
454 *===========================================================================*/
458 if (sfilp
== NULL
) return(EINVAL
);
460 lock_filp(sfilp
, VNODE_READ
);
467 /*===========================================================================*
469 *===========================================================================*/
470 int do_set_filp(void)
473 f
= (filp_id_t
) job_m_in
.ADDRESS
;
477 /*===========================================================================*
479 *===========================================================================*/
480 int copy_filp(to_ep
, cfilp
)
488 if (isokendpt(to_ep
, &slot
) != OK
) return(EINVAL
);
491 /* Find an open slot in fp_filp */
492 for (fd
= 0; fd
< OPEN_MAX
; fd
++) {
493 if (rfp
->fp_filp
[fd
] == NULL
&&
494 !FD_ISSET(fd
, &rfp
->fp_filp_inuse
)) {
496 /* Found a free slot, add descriptor */
497 FD_SET(fd
, &rfp
->fp_filp_inuse
);
498 rfp
->fp_filp
[fd
] = cfilp
;
499 rfp
->fp_filp
[fd
]->filp_count
++;
504 /* File descriptor table is full */
508 /*===========================================================================*
510 *===========================================================================*/
511 int do_copy_filp(void)
516 proc_e
= job_m_in
.USER_ENDPT
;
517 f
= (filp_id_t
) job_m_in
.ADDRESS
;
519 return copy_filp(proc_e
, f
);
522 /*===========================================================================*
524 *===========================================================================*/
531 lock_filp(pfilp
, VNODE_OPCL
);
537 /*===========================================================================*
539 *===========================================================================*/
540 int do_put_filp(void)
543 f
= (filp_id_t
) job_m_in
.ADDRESS
;
547 /*===========================================================================*
549 *===========================================================================*/
550 int cancel_fd(ep
, fd
)
558 if (isokendpt(ep
, &slot
) != OK
) return(EINVAL
);
561 /* Check that the input 'fd' is valid */
562 rfilp
= (struct filp
*) verify_fd(ep
, fd
);
564 /* Found a valid descriptor, remove it */
565 FD_CLR(fd
, &rfp
->fp_filp_inuse
);
566 if (rfp
->fp_filp
[fd
]->filp_count
== 0) {
568 printf("VFS: filp_count for slot %d fd %d already zero", slot
,
572 rfp
->fp_filp
[fd
]->filp_count
--;
573 rfp
->fp_filp
[fd
] = NULL
;
578 /* File descriptor is not valid for the endpoint. */
582 /*===========================================================================*
584 *===========================================================================*/
585 int do_cancel_fd(void)
590 proc_e
= job_m_in
.USER_ENDPT
;
593 return cancel_fd(proc_e
, fd
);
596 /*===========================================================================*
598 *===========================================================================*/
602 /* Close a file. Will also unlock filp when done */
609 assert(mutex_trylock(&f
->filp_lock
) == -EDEADLK
);
610 assert(tll_islocked(&f
->filp_vno
->v_lock
));
614 if (f
->filp_count
- 1 == 0 && f
->filp_mode
!= FILP_CLOSED
) {
615 /* Check to see if the file is special. */
616 if (S_ISCHR(vp
->v_mode
) || S_ISBLK(vp
->v_mode
)) {
617 dev
= (dev_t
) vp
->v_sdev
;
618 if (S_ISBLK(vp
->v_mode
)) {
620 if (vp
->v_bfs_e
== ROOT_FS_E
) {
621 /* Invalidate the cache unless the special is
622 * mounted. Assume that the root filesystem's
623 * is open only for fsck.
625 req_flush(vp
->v_bfs_e
, dev
);
629 /* Attempt to close only when feasible */
630 if (!(f
->filp_state
& FS_INVALIDATED
)) {
631 (void) bdev_close(dev
); /* Ignore errors */
634 /* Attempt to close only when feasible */
635 if (!(f
->filp_state
& FS_INVALIDATED
)) {
636 (void) dev_close(dev
, f
-filp
);/*Ignore errors*/
640 f
->filp_mode
= FILP_CLOSED
;
644 /* If the inode being closed is a pipe, release everyone hanging on it. */
645 if (S_ISFIFO(vp
->v_mode
)) {
646 rw
= (f
->filp_mode
& R_BIT
? WRITE
: READ
);
647 release(vp
, rw
, susp_count
);
650 f
->filp_count
--; /* If filp got invalidated at device closure, the
651 * count might've become negative now */
652 if (f
->filp_count
== 0 ||
653 (f
->filp_count
< 0 && f
->filp_state
& FS_INVALIDATED
)) {
654 if (S_ISFIFO(vp
->v_mode
)) {
655 /* Last reader or writer is going. Tell PFS about latest
658 truncate_vnode(vp
, vp
->v_size
);
661 unlock_vnode(f
->filp_vno
);
662 put_vnode(f
->filp_vno
);
664 f
->filp_mode
= FILP_CLOSED
;
666 } else if (f
->filp_count
< 0) {
667 panic("VFS: invalid filp count: %d ino %d/%d", f
->filp_count
,
668 vp
->v_dev
, vp
->v_inode_nr
);
670 unlock_vnode(f
->filp_vno
);
673 mutex_unlock(&f
->filp_lock
);