1 /* This file contains the procedures that manipulate file descriptors.
3 * The entry points into this file are
4 * get_fd: look for free file descriptor and free filp slots
5 * get_filp: look up the filp entry for a given file descriptor
6 * find_filp: find a filp slot that points to a given vnode
7 * inval_filp: invalidate a filp and associated fd's, only let close()
9 * do_verify_fd: verify whether the given file descriptor is valid for
11 * do_set_filp: marks a filp as in-flight.
12 * do_copy_filp: copies a filp to another endpoint.
13 * do_put_filp: marks a filp as not in-flight anymore.
14 * do_cancel_fd: cancel the transaction when something goes wrong for
18 #include <sys/select.h>
19 #include <minix/callnr.h>
20 #include <minix/u64.h>
29 static filp_id_t
verify_fd(endpoint_t ep
, int fd
);
32 /*===========================================================================*
34 *===========================================================================*/
35 void check_filp_locks_by_me(void)
37 /* Check whether this thread still has filp locks held */
41 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
42 r
= mutex_trylock(&f
->filp_lock
);
44 panic("Thread %d still holds filp lock on filp %p call_nr=%d\n",
45 mthread_self(), f
, job_call_nr
);
47 /* We just obtained the lock, release it */
48 mutex_unlock(&f
->filp_lock
);
54 /*===========================================================================*
56 *===========================================================================*/
57 void check_filp_locks(void)
62 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
63 r
= mutex_trylock(&f
->filp_lock
);
65 /* Mutex is still locked */
68 /* We just obtained a lock, don't want it */
69 mutex_unlock(&f
->filp_lock
);
71 panic("filp_lock weird state");
73 if (count
) panic("locked filps");
75 else printf("check_filp_locks OK\n");
79 /*===========================================================================*
81 *===========================================================================*/
82 void *do_filp_gc(void *UNUSED(arg
))
87 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
88 if (!(f
->filp_state
& FS_INVALIDATED
)) continue;
89 assert(f
->filp_vno
!= NULL
);
92 /* Synchronize with worker thread that might hold a lock on the vp */
93 lock_vnode(vp
, VNODE_OPCL
);
96 /* If garbage collection was invoked due to a failed device open
97 * request, then common_open has already cleaned up and we have
100 if (!(f
->filp_state
& FS_INVALIDATED
)) {
104 /* If garbage collection was invoked due to a failed device close
105 * request, the close_filp has already cleaned up and we have nothing
108 if (f
->filp_mode
!= FILP_CLOSED
) {
109 assert(f
->filp_count
== 0);
110 f
->filp_count
= 1; /* So lock_filp and close_filp will do
112 lock_filp(f
, VNODE_READ
);
116 f
->filp_state
&= ~FS_INVALIDATED
;
119 thread_cleanup(NULL
);
123 /*===========================================================================*
125 *===========================================================================*/
126 void init_filps(void)
128 /* Initialize filps */
131 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
132 if (mutex_init(&f
->filp_lock
, NULL
) != 0)
133 panic("Failed to initialize filp mutex");
138 /*===========================================================================*
140 *===========================================================================*/
141 int get_fd(int start
, mode_t bits
, int *k
, struct filp
**fpt
)
143 /* Look for a free file descriptor and a free filp slot. Fill in the mode word
144 * in the latter, but don't claim either one yet, since the open() or creat()
148 register struct filp
*f
;
151 /* Search the fproc fp_filp table for a free file descriptor. */
152 for (i
= start
; i
< OPEN_MAX
; i
++) {
153 if (fp
->fp_filp
[i
] == NULL
&& !FD_ISSET(i
, &fp
->fp_filp_inuse
)) {
154 /* A file descriptor has been located. */
160 /* Check to see if a file descriptor has been found. */
161 if (i
>= OPEN_MAX
) return(EMFILE
);
163 /* If we don't care about a filp, return now */
164 if (fpt
== NULL
) return(OK
);
166 /* Now that a file descriptor has been found, look for a free filp slot. */
167 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
168 assert(f
->filp_count
>= 0);
169 if (f
->filp_count
== 0 && mutex_trylock(&f
->filp_lock
) == 0) {
171 f
->filp_pos
= cvu64(0);
172 f
->filp_selectors
= 0;
173 f
->filp_select_ops
= 0;
174 f
->filp_pipe_select_ops
= 0;
176 f
->filp_state
= FS_NORMAL
;
177 f
->filp_select_flags
= 0;
178 f
->filp_softlock
= NULL
;
184 /* If control passes here, the filp table must be full. Report that back. */
189 /*===========================================================================*
191 *===========================================================================*/
192 struct filp
*get_filp(fild
, locktype
)
193 int fild
; /* file descriptor */
194 tll_access_t locktype
;
196 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
198 return get_filp2(fp
, fild
, locktype
);
202 /*===========================================================================*
204 *===========================================================================*/
205 struct filp
*get_filp2(rfp
, fild
, locktype
)
206 register struct fproc
*rfp
;
207 int fild
; /* file descriptor */
208 tll_access_t locktype
;
210 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
214 if (fild
< 0 || fild
>= OPEN_MAX
)
216 else if (rfp
->fp_filp
[fild
] == NULL
&& FD_ISSET(fild
, &rfp
->fp_filp_inuse
))
217 err_code
= EIO
; /* The filedes is not there, but is not closed either.
219 else if ((filp
= rfp
->fp_filp
[fild
]) == NULL
)
222 lock_filp(filp
, locktype
); /* All is fine */
224 return(filp
); /* may also be NULL */
228 /*===========================================================================*
230 *===========================================================================*/
231 struct filp
*find_filp(struct vnode
*vp
, mode_t bits
)
233 /* Find a filp slot that refers to the vnode 'vp' in a way as described
234 * by the mode bit 'bits'. Used for determining whether somebody is still
235 * interested in either end of a pipe. Also used when opening a FIFO to
236 * find partners to share a filp field with (to shared the file position).
237 * Like 'get_fd' it performs its job by linear search through the filp table.
242 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
243 if (f
->filp_count
!= 0 && f
->filp_vno
== vp
&& (f
->filp_mode
& bits
)) {
248 /* If control passes here, the filp wasn't there. Report that back. */
252 /*===========================================================================*
254 *===========================================================================*/
255 int invalidate_filp(struct filp
*rfilp
)
257 /* Invalidate filp. fp_filp_inuse is not cleared, so filp can't be reused
258 until it is closed first. */
261 for (f
= 0; f
< NR_PROCS
; f
++) {
262 if (fproc
[f
].fp_pid
== PID_FREE
) continue;
263 for (fd
= 0; fd
< OPEN_MAX
; fd
++) {
264 if(fproc
[f
].fp_filp
[fd
] && fproc
[f
].fp_filp
[fd
] == rfilp
) {
265 fproc
[f
].fp_filp
[fd
] = NULL
;
271 rfilp
->filp_state
|= FS_INVALIDATED
;
272 return(n
); /* Report back how often this filp has been invalidated. */
275 /*===========================================================================*
276 * invalidate_filp_by_char_major *
277 *===========================================================================*/
278 void invalidate_filp_by_char_major(int major
)
282 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
283 if (f
->filp_count
!= 0 && f
->filp_vno
!= NULL
) {
284 if (major(f
->filp_vno
->v_sdev
) == major
&&
285 S_ISCHR(f
->filp_vno
->v_mode
)) {
286 (void) invalidate_filp(f
);
292 /*===========================================================================*
293 * invalidate_filp_by_endpt *
294 *===========================================================================*/
295 void invalidate_filp_by_endpt(endpoint_t proc_e
)
299 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
300 if (f
->filp_count
!= 0 && f
->filp_vno
!= NULL
) {
301 if (f
->filp_vno
->v_fs_e
== proc_e
)
302 (void) invalidate_filp(f
);
307 /*===========================================================================*
309 *===========================================================================*/
310 void lock_filp(filp
, locktype
)
312 tll_access_t locktype
;
314 struct fproc
*org_fp
;
315 struct worker_thread
*org_self
;
318 assert(filp
->filp_count
> 0);
322 /* Lock vnode only if we haven't already locked it. If already locked by us,
323 * we're allowed to have one additional 'soft' lock. */
324 if (tll_locked_by_me(&vp
->v_lock
)) {
325 assert(filp
->filp_softlock
== NULL
);
326 filp
->filp_softlock
= fp
;
328 lock_vnode(vp
, locktype
);
331 assert(vp
->v_ref_count
> 0); /* vnode still in use? */
332 assert(filp
->filp_vno
== vp
); /* vnode still what we think it is? */
334 /* First try to get filp lock right off the bat */
335 if (mutex_trylock(&filp
->filp_lock
) != 0) {
337 /* Already in use, let's wait for our turn */
341 if (mutex_lock(&filp
->filp_lock
) != 0)
342 panic("unable to obtain lock on filp");
349 /*===========================================================================*
351 *===========================================================================*/
352 void unlock_filp(filp
)
355 /* If this filp holds a soft lock on the vnode, we must be the owner */
356 if (filp
->filp_softlock
!= NULL
)
357 assert(filp
->filp_softlock
== fp
);
359 if (filp
->filp_count
> 0 || filp
->filp_state
& FS_INVALIDATED
) {
360 /* Only unlock vnode if filp is still in use */
362 /* and if we don't hold a soft lock */
363 if (filp
->filp_softlock
== NULL
) {
364 assert(tll_islocked(&(filp
->filp_vno
->v_lock
)));
365 unlock_vnode(filp
->filp_vno
);
369 filp
->filp_softlock
= NULL
;
370 if (mutex_unlock(&filp
->filp_lock
) != 0)
371 panic("unable to release lock on filp");
374 /*===========================================================================*
376 *===========================================================================*/
377 void unlock_filps(filp1
, filp2
)
381 /* Unlock two filps that are tied to the same vnode. As a thread can lock a
382 * vnode only once, unlocking the vnode twice would result in an error. */
384 /* No NULL pointers and not equal */
387 assert(filp1
!= filp2
);
389 /* Must be tied to the same vnode and not NULL */
390 assert(filp1
->filp_vno
== filp2
->filp_vno
);
391 assert(filp1
->filp_vno
!= NULL
);
393 if (filp1
->filp_count
> 0 && filp2
->filp_count
> 0) {
394 /* Only unlock vnode if filps are still in use */
395 unlock_vnode(filp1
->filp_vno
);
398 filp1
->filp_softlock
= NULL
;
399 filp2
->filp_softlock
= NULL
;
400 if (mutex_unlock(&filp2
->filp_lock
) != 0)
401 panic("unable to release filp lock on filp2");
402 if (mutex_unlock(&filp1
->filp_lock
) != 0)
403 panic("unable to release filp lock on filp1");
406 /*===========================================================================*
408 *===========================================================================*/
409 static filp_id_t
verify_fd(ep
, fd
)
413 /* Verify whether the file descriptor 'fd' is valid for the endpoint 'ep'. When
414 * the file descriptor is valid, verify_fd returns a pointer to that filp, else
420 if (isokendpt(ep
, &slot
) != OK
)
423 rfilp
= get_filp2(&fproc
[slot
], fd
, VNODE_READ
);
428 /*===========================================================================*
430 *===========================================================================*/
431 int do_verify_fd(void)
437 proc_e
= job_m_in
.USER_ENDPT
;
440 rfilp
= (struct filp
*) verify_fd(proc_e
, fd
);
441 m_out
.ADDRESS
= (void *) rfilp
;
442 if (rfilp
!= NULL
) unlock_filp(rfilp
);
443 return (rfilp
!= NULL
) ? OK
: EINVAL
;
446 /*===========================================================================*
448 *===========================================================================*/
452 if (sfilp
== NULL
) return(EINVAL
);
454 lock_filp(sfilp
, VNODE_READ
);
461 /*===========================================================================*
463 *===========================================================================*/
464 int do_set_filp(void)
467 f
= (filp_id_t
) job_m_in
.ADDRESS
;
471 /*===========================================================================*
473 *===========================================================================*/
474 int copy_filp(to_ep
, cfilp
)
482 if (isokendpt(to_ep
, &slot
) != OK
) return(EINVAL
);
485 /* Find an open slot in fp_filp */
486 for (fd
= 0; fd
< OPEN_MAX
; fd
++) {
487 if (rfp
->fp_filp
[fd
] == NULL
&&
488 !FD_ISSET(fd
, &rfp
->fp_filp_inuse
)) {
490 /* Found a free slot, add descriptor */
491 FD_SET(fd
, &rfp
->fp_filp_inuse
);
492 rfp
->fp_filp
[fd
] = cfilp
;
493 rfp
->fp_filp
[fd
]->filp_count
++;
498 /* File descriptor table is full */
502 /*===========================================================================*
504 *===========================================================================*/
505 int do_copy_filp(void)
510 proc_e
= job_m_in
.USER_ENDPT
;
511 f
= (filp_id_t
) job_m_in
.ADDRESS
;
513 return copy_filp(proc_e
, f
);
516 /*===========================================================================*
518 *===========================================================================*/
525 lock_filp(pfilp
, VNODE_OPCL
);
531 /*===========================================================================*
533 *===========================================================================*/
534 int do_put_filp(void)
537 f
= (filp_id_t
) job_m_in
.ADDRESS
;
541 /*===========================================================================*
543 *===========================================================================*/
544 int cancel_fd(ep
, fd
)
552 if (isokendpt(ep
, &slot
) != OK
) return(EINVAL
);
555 /* Check that the input 'fd' is valid */
556 rfilp
= (struct filp
*) verify_fd(ep
, fd
);
558 /* Found a valid descriptor, remove it */
559 FD_CLR(fd
, &rfp
->fp_filp_inuse
);
560 if (rfp
->fp_filp
[fd
]->filp_count
== 0) {
562 printf("VFS: filp_count for slot %d fd %d already zero", slot
,
566 rfp
->fp_filp
[fd
]->filp_count
--;
567 rfp
->fp_filp
[fd
] = NULL
;
572 /* File descriptor is not valid for the endpoint. */
576 /*===========================================================================*
578 *===========================================================================*/
579 int do_cancel_fd(void)
584 proc_e
= job_m_in
.USER_ENDPT
;
587 return cancel_fd(proc_e
, fd
);
590 /*===========================================================================*
592 *===========================================================================*/
596 /* Close a file. Will also unlock filp when done */
603 assert(mutex_trylock(&f
->filp_lock
) == -EDEADLK
);
604 assert(tll_islocked(&f
->filp_vno
->v_lock
));
608 if (f
->filp_count
- 1 == 0 && f
->filp_mode
!= FILP_CLOSED
) {
609 /* Check to see if the file is special. */
610 if (S_ISCHR(vp
->v_mode
) || S_ISBLK(vp
->v_mode
)) {
611 dev
= (dev_t
) vp
->v_sdev
;
612 if (S_ISBLK(vp
->v_mode
)) {
614 if (vp
->v_bfs_e
== ROOT_FS_E
) {
615 /* Invalidate the cache unless the special is
616 * mounted. Assume that the root filesystem's
617 * is open only for fsck.
619 req_flush(vp
->v_bfs_e
, dev
);
623 /* Attempt to close only when feasible */
624 if (!(f
->filp_state
& FS_INVALIDATED
)) {
625 (void) bdev_close(dev
); /* Ignore errors */
628 /* Attempt to close only when feasible */
629 if (!(f
->filp_state
& FS_INVALIDATED
)) {
630 (void) dev_close(dev
, f
-filp
);/*Ignore errors*/
634 f
->filp_mode
= FILP_CLOSED
;
638 /* If the inode being closed is a pipe, release everyone hanging on it. */
639 if (S_ISFIFO(vp
->v_mode
)) {
640 rw
= (f
->filp_mode
& R_BIT
? WRITE
: READ
);
641 release(vp
, rw
, susp_count
);
644 f
->filp_count
--; /* If filp got invalidated at device closure, the
645 * count might've become negative now */
646 if (f
->filp_count
== 0 ||
647 (f
->filp_count
< 0 && f
->filp_state
& FS_INVALIDATED
)) {
648 if (S_ISFIFO(vp
->v_mode
)) {
649 /* Last reader or writer is going. Tell PFS about latest
652 truncate_vnode(vp
, vp
->v_size
);
655 unlock_vnode(f
->filp_vno
);
656 put_vnode(f
->filp_vno
);
658 f
->filp_mode
= FILP_CLOSED
;
660 } else if (f
->filp_count
< 0) {
661 panic("VFS: invalid filp count: %d ino %d/%d", f
->filp_count
,
662 vp
->v_dev
, vp
->v_inode_nr
);
664 unlock_vnode(f
->filp_vno
);
667 mutex_unlock(&f
->filp_lock
);