1 /* This file contains the procedures that manipulate file descriptors.
3 * The entry points into this file are
4 * get_fd: look for free file descriptor and free filp slots
5 * get_filp: look up the filp entry for a given file descriptor
6 * find_filp: find a filp slot that points to a given vnode
7 * inval_filp: invalidate a filp and associated fd's, only let close()
9 * do_copyfd: copies a file descriptor from or to another endpoint
12 #include <sys/select.h>
13 #include <minix/callnr.h>
14 #include <minix/u64.h>
23 /*===========================================================================*
25 *===========================================================================*/
26 void check_filp_locks_by_me(void)
28 /* Check whether this thread still has filp locks held */
32 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
33 r
= mutex_trylock(&f
->filp_lock
);
35 panic("Thread %d still holds filp lock on filp %p call_nr=%d\n",
36 mthread_self(), f
, job_call_nr
);
38 /* We just obtained the lock, release it */
39 mutex_unlock(&f
->filp_lock
);
45 /*===========================================================================*
47 *===========================================================================*/
48 void check_filp_locks(void)
53 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
54 r
= mutex_trylock(&f
->filp_lock
);
56 /* Mutex is still locked */
59 /* We just obtained a lock, don't want it */
60 mutex_unlock(&f
->filp_lock
);
62 panic("filp_lock weird state");
64 if (count
) panic("locked filps");
66 else printf("check_filp_locks OK\n");
70 /*===========================================================================*
72 *===========================================================================*/
75 /* Initialize filps */
78 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
79 if (mutex_init(&f
->filp_lock
, NULL
) != 0)
80 panic("Failed to initialize filp mutex");
85 /*===========================================================================*
87 *===========================================================================*/
88 int get_fd(struct fproc
*rfp
, int start
, mode_t bits
, int *k
, struct filp
**fpt
)
90 /* Look for a free file descriptor and a free filp slot. Fill in the mode word
91 * in the latter, but don't claim either one yet, since the open() or creat()
95 register struct filp
*f
;
98 /* Search the fproc fp_filp table for a free file descriptor. */
99 for (i
= start
; i
< OPEN_MAX
; i
++) {
100 if (rfp
->fp_filp
[i
] == NULL
) {
101 /* A file descriptor has been located. */
107 /* Check to see if a file descriptor has been found. */
108 if (i
>= OPEN_MAX
) return(EMFILE
);
110 /* If we don't care about a filp, return now */
111 if (fpt
== NULL
) return(OK
);
113 /* Now that a file descriptor has been found, look for a free filp slot. */
114 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
115 assert(f
->filp_count
>= 0);
116 if (f
->filp_count
== 0 && mutex_trylock(&f
->filp_lock
) == 0) {
119 f
->filp_selectors
= 0;
120 f
->filp_select_ops
= 0;
121 f
->filp_pipe_select_ops
= 0;
123 f
->filp_select_flags
= 0;
124 f
->filp_softlock
= NULL
;
125 f
->filp_ioctl_fp
= NULL
;
131 /* If control passes here, the filp table must be full. Report that back. */
136 /*===========================================================================*
138 *===========================================================================*/
139 struct filp
*get_filp(fild
, locktype
)
140 int fild
; /* file descriptor */
141 tll_access_t locktype
;
143 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
145 return get_filp2(fp
, fild
, locktype
);
149 /*===========================================================================*
151 *===========================================================================*/
152 struct filp
*get_filp2(rfp
, fild
, locktype
)
153 register struct fproc
*rfp
;
154 int fild
; /* file descriptor */
155 tll_access_t locktype
;
157 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */
161 if (fild
< 0 || fild
>= OPEN_MAX
)
163 else if (locktype
!= VNODE_OPCL
&& rfp
->fp_filp
[fild
] != NULL
&&
164 rfp
->fp_filp
[fild
]->filp_mode
== FILP_CLOSED
)
165 err_code
= EIO
; /* disallow all use except close(2) */
166 else if ((filp
= rfp
->fp_filp
[fild
]) == NULL
)
168 else if (locktype
!= VNODE_NONE
) /* Only lock the filp if requested */
169 lock_filp(filp
, locktype
); /* All is fine */
171 return(filp
); /* may also be NULL */
175 /*===========================================================================*
177 *===========================================================================*/
178 struct filp
*find_filp(struct vnode
*vp
, mode_t bits
)
180 /* Find a filp slot that refers to the vnode 'vp' in a way as described
181 * by the mode bit 'bits'. Used for determining whether somebody is still
182 * interested in either end of a pipe. Also used when opening a FIFO to
183 * find partners to share a filp field with (to shared the file position).
184 * Like 'get_fd' it performs its job by linear search through the filp table.
189 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
190 if (f
->filp_count
!= 0 && f
->filp_vno
== vp
&& (f
->filp_mode
& bits
)) {
195 /* If control passes here, the filp wasn't there. Report that back. */
199 /*===========================================================================*
201 *===========================================================================*/
202 void invalidate_filp(struct filp
*rfilp
)
204 /* Invalidate filp. */
206 rfilp
->filp_mode
= FILP_CLOSED
;
209 /*===========================================================================*
210 * invalidate_filp_by_char_major *
211 *===========================================================================*/
212 void invalidate_filp_by_char_major(int major
)
216 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
217 if (f
->filp_count
!= 0 && f
->filp_vno
!= NULL
) {
218 if (major(f
->filp_vno
->v_sdev
) == major
&&
219 S_ISCHR(f
->filp_vno
->v_mode
)) {
226 /*===========================================================================*
227 * invalidate_filp_by_endpt *
228 *===========================================================================*/
229 void invalidate_filp_by_endpt(endpoint_t proc_e
)
233 for (f
= &filp
[0]; f
< &filp
[NR_FILPS
]; f
++) {
234 if (f
->filp_count
!= 0 && f
->filp_vno
!= NULL
) {
235 if (f
->filp_vno
->v_fs_e
== proc_e
)
241 /*===========================================================================*
243 *===========================================================================*/
244 void lock_filp(filp
, locktype
)
246 tll_access_t locktype
;
248 struct worker_thread
*org_self
;
251 assert(filp
->filp_count
> 0);
255 /* Lock vnode only if we haven't already locked it. If already locked by us,
256 * we're allowed to have one additional 'soft' lock. */
257 if (tll_locked_by_me(&vp
->v_lock
)) {
258 assert(filp
->filp_softlock
== NULL
);
259 filp
->filp_softlock
= fp
;
261 /* We have to make an exception for vnodes belonging to pipes. Even
262 * read(2) operations on pipes change the vnode and therefore require
265 if (S_ISFIFO(vp
->v_mode
) && locktype
== VNODE_READ
)
266 locktype
= VNODE_WRITE
;
267 lock_vnode(vp
, locktype
);
270 assert(vp
->v_ref_count
> 0); /* vnode still in use? */
271 assert(filp
->filp_vno
== vp
); /* vnode still what we think it is? */
273 /* First try to get filp lock right off the bat */
274 if (mutex_trylock(&filp
->filp_lock
) != 0) {
276 /* Already in use, let's wait for our turn */
277 org_self
= worker_suspend();
279 if (mutex_lock(&filp
->filp_lock
) != 0)
280 panic("unable to obtain lock on filp");
282 worker_resume(org_self
);
286 /*===========================================================================*
288 *===========================================================================*/
289 void unlock_filp(filp
)
292 /* If this filp holds a soft lock on the vnode, we must be the owner */
293 if (filp
->filp_softlock
!= NULL
)
294 assert(filp
->filp_softlock
== fp
);
296 if (filp
->filp_count
> 0) {
297 /* Only unlock vnode if filp is still in use */
299 /* and if we don't hold a soft lock */
300 if (filp
->filp_softlock
== NULL
) {
301 assert(tll_islocked(&(filp
->filp_vno
->v_lock
)));
302 unlock_vnode(filp
->filp_vno
);
306 filp
->filp_softlock
= NULL
;
307 if (mutex_unlock(&filp
->filp_lock
) != 0)
308 panic("unable to release lock on filp");
311 /*===========================================================================*
313 *===========================================================================*/
314 void unlock_filps(filp1
, filp2
)
318 /* Unlock two filps that are tied to the same vnode. As a thread can lock a
319 * vnode only once, unlocking the vnode twice would result in an error. */
321 /* No NULL pointers and not equal */
324 assert(filp1
!= filp2
);
326 /* Must be tied to the same vnode and not NULL */
327 assert(filp1
->filp_vno
== filp2
->filp_vno
);
328 assert(filp1
->filp_vno
!= NULL
);
330 if (filp1
->filp_count
> 0 && filp2
->filp_count
> 0) {
331 /* Only unlock vnode if filps are still in use */
332 unlock_vnode(filp1
->filp_vno
);
335 filp1
->filp_softlock
= NULL
;
336 filp2
->filp_softlock
= NULL
;
337 if (mutex_unlock(&filp2
->filp_lock
) != 0)
338 panic("unable to release filp lock on filp2");
339 if (mutex_unlock(&filp1
->filp_lock
) != 0)
340 panic("unable to release filp lock on filp1");
343 /*===========================================================================*
345 *===========================================================================*/
349 /* Close a file. Will also unlock filp when done */
356 assert(mutex_trylock(&f
->filp_lock
) == -EDEADLK
);
357 assert(tll_islocked(&f
->filp_vno
->v_lock
));
361 if (f
->filp_count
- 1 == 0 && f
->filp_mode
!= FILP_CLOSED
) {
362 /* Check to see if the file is special. */
363 if (S_ISCHR(vp
->v_mode
) || S_ISBLK(vp
->v_mode
)) {
365 if (S_ISBLK(vp
->v_mode
)) {
367 if (vp
->v_bfs_e
== ROOT_FS_E
) {
368 /* Invalidate the cache unless the special is
369 * mounted. Assume that the root filesystem's
370 * is open only for fsck.
372 req_flush(vp
->v_bfs_e
, dev
);
376 (void) bdev_close(dev
); /* Ignore errors */
378 (void) cdev_close(dev
); /* Ignore errors */
381 f
->filp_mode
= FILP_CLOSED
;
385 /* If the inode being closed is a pipe, release everyone hanging on it. */
386 if (S_ISFIFO(vp
->v_mode
)) {
387 rw
= (f
->filp_mode
& R_BIT
? VFS_WRITE
: VFS_READ
);
388 release(vp
, rw
, susp_count
);
391 if (--f
->filp_count
== 0) {
392 if (S_ISFIFO(vp
->v_mode
)) {
393 /* Last reader or writer is going. Tell PFS about latest
396 truncate_vnode(vp
, vp
->v_size
);
399 unlock_vnode(f
->filp_vno
);
400 put_vnode(f
->filp_vno
);
402 f
->filp_mode
= FILP_CLOSED
;
404 } else if (f
->filp_count
< 0) {
405 panic("VFS: invalid filp count: %d ino %llx/%llu", f
->filp_count
,
406 vp
->v_dev
, vp
->v_inode_nr
);
408 unlock_vnode(f
->filp_vno
);
411 mutex_unlock(&f
->filp_lock
);
414 /*===========================================================================*
416 *===========================================================================*/
419 /* Copy a file descriptor between processes, or close a remote file descriptor.
420 * This call is used as back-call by device drivers (UDS, VND), and is expected
421 * to be used in response to an IOCTL to such device drivers.
426 int r
, fd
, what
, slot
;
428 /* This should be replaced with an ACL check. */
429 if (!super_user
) return(EPERM
);
431 endpt
= (endpoint_t
) job_m_in
.VFS_COPYFD_ENDPT
;
432 fd
= job_m_in
.VFS_COPYFD_FD
;
433 what
= job_m_in
.VFS_COPYFD_WHAT
;
435 if (isokendpt(endpt
, &slot
) != OK
) return(EINVAL
);
438 /* FIXME: we should now check that the user process is indeed blocked on an
439 * IOCTL call, so that we can safely mess with its file descriptors. We
440 * currently do not have the necessary state to verify this, so we assume
441 * that the call is always used in the right way.
444 /* Depending on the operation, get the file descriptor from the caller or the
445 * user process. Do not lock the filp yet: we first need to make sure that
446 * locking it will not result in a deadlock.
448 rfilp
= get_filp2((what
== COPYFD_TO
) ? fp
: rfp
, fd
, VNODE_NONE
);
452 /* If the filp is involved in an IOCTL by the user process, locking the filp
453 * here would result in a deadlock. This would happen if a user process
454 * passes in the file descriptor to the device node on which it is performing
455 * the IOCTL. We do not allow manipulation of such device nodes. In
456 * practice, this only applies to block-special files (and thus VND), because
457 * character-special files (as used by UDS) are unlocked during the IOCTL.
459 if (rfilp
->filp_ioctl_fp
== rfp
)
462 /* Now we can safely lock the filp, copy or close it, and unlock it again. */
463 lock_filp(rfilp
, VNODE_READ
);
471 /* Find a free file descriptor slot in the local or remote process. */
472 for (fd
= 0; fd
< OPEN_MAX
; fd
++)
473 if (rfp
->fp_filp
[fd
] == NULL
)
476 /* If found, fill the slot and return the slot number. */
478 rfp
->fp_filp
[fd
] = rfilp
;
487 /* This should be used ONLY to revert a successful copy-to operation,
488 * and assumes that the filp is still in use by the caller as well.
490 if (rfilp
->filp_count
> 1) {
492 rfp
->fp_filp
[fd
] = NULL
;