Replace previous change by different test
[minix.git] / servers / vfs / select.c
blob8c3a13ce7cd10889b321bec0691934fed9c68102
1 /* Implement entry point to select system call.
3 * The entry points into this file are
4 * do_select: perform the SELECT system call
5 * select_callback: notify select system of possible fd operation
6 * select_unsuspend_by_endpt: cancel a blocking select on exiting driver
7 */
9 #include "fs.h"
10 #include <sys/time.h>
11 #include <sys/select.h>
12 #include <sys/stat.h>
13 #include <minix/com.h>
14 #include <minix/u64.h>
15 #include <string.h>
16 #include <assert.h>
18 #include "file.h"
19 #include "fproc.h"
20 #include "dmap.h"
21 #include "vnode.h"
23 /* max. number of simultaneously pending select() calls */
24 #define MAXSELECTS 25
25 #define FROM_PROC 0
26 #define TO_PROC 1
28 static struct selectentry {
29 struct fproc *requestor; /* slot is free iff this is NULL */
30 endpoint_t req_endpt;
31 fd_set readfds, writefds, errorfds;
32 fd_set ready_readfds, ready_writefds, ready_errorfds;
33 fd_set *vir_readfds, *vir_writefds, *vir_errorfds;
34 struct filp *filps[OPEN_MAX];
35 int type[OPEN_MAX];
36 int nfds, nreadyfds;
37 int error;
38 char block;
39 clock_t expiry;
40 timer_t timer; /* if expiry > 0 */
41 } selecttab[MAXSELECTS];
43 static int copy_fdsets(struct selectentry *se, int nfds, int
44 direction);
45 static int do_select_request(struct selectentry *se, int fd, int *ops);
46 static void filp_status(struct filp *fp, int status);
47 static int is_deferred(struct selectentry *se);
48 static void restart_proc(struct selectentry *se);
49 static void ops2tab(int ops, int fd, struct selectentry *e);
50 static int is_regular_file(struct filp *f);
51 static int is_pipe(struct filp *f);
52 static int is_supported_major(struct filp *f);
53 static void select_lock_filp(struct filp *f, int ops);
54 static int select_request_async(struct filp *f, int *ops, int block);
55 static int select_request_file(struct filp *f, int *ops, int block);
56 static int select_request_major(struct filp *f, int *ops, int block);
57 static int select_request_pipe(struct filp *f, int *ops, int block);
58 static int select_request_sync(struct filp *f, int *ops, int block);
59 static void select_cancel_all(struct selectentry *e);
60 static void select_cancel_filp(struct filp *f);
61 static void select_return(struct selectentry *);
62 static void select_restart_filps(void);
63 static int tab2ops(int fd, struct selectentry *e);
64 static void wipe_select(struct selectentry *s);
66 static struct fdtype {
67 int (*select_request)(struct filp *, int *ops, int block);
68 int (*type_match)(struct filp *f);
69 } fdtypes[] = {
70 { select_request_major, is_supported_major },
71 { select_request_file, is_regular_file },
72 { select_request_pipe, is_pipe },
74 #define SEL_FDS (sizeof(fdtypes) / sizeof(fdtypes[0]))
75 static int select_majors[] = { /* List of majors that support selecting on */
76 TTY_MAJOR,
77 INET_MAJOR,
78 UDS_MAJOR,
79 LOG_MAJOR,
81 #define SEL_MAJORS (sizeof(select_majors) / sizeof(select_majors[0]))
83 /*===========================================================================*
84 * do_select *
85 *===========================================================================*/
86 int do_select(void)
88 /* Implement the select(nfds, readfds, writefds, errorfds, timeout) system
89 * call. First we copy the arguments and verify their sanity. Then we check
90 * whether there are file descriptors that satisfy the select call right of the
91 * bat. If so, or if there are no ready file descriptors but the process
92 * requested to return immediately, we return the result. Otherwise we set a
93 * timeout and wait for either the file descriptors to become ready or the
94 * timer to go off. If no timeout value was provided, we wait indefinitely. */
96 int r, nfds, do_timeout = 0, fd, s;
97 struct timeval timeout;
98 struct selectentry *se;
99 vir_bytes vtimeout;
101 nfds = job_m_in.SEL_NFDS;
102 vtimeout = (vir_bytes) job_m_in.SEL_TIMEOUT;
104 /* Sane amount of file descriptors? */
105 if (nfds < 0 || nfds > OPEN_MAX) return(EINVAL);
107 /* Find a slot to store this select request */
108 for (s = 0; s < MAXSELECTS; s++)
109 if (selecttab[s].requestor == NULL) /* Unused slot */
110 break;
111 if (s >= MAXSELECTS) return(ENOSPC);
113 se = &selecttab[s];
114 wipe_select(se); /* Clear results of previous usage */
115 se->requestor = fp;
116 se->req_endpt = who_e;
117 se->vir_readfds = (fd_set *) job_m_in.SEL_READFDS;
118 se->vir_writefds = (fd_set *) job_m_in.SEL_WRITEFDS;
119 se->vir_errorfds = (fd_set *) job_m_in.SEL_ERRORFDS;
121 /* Copy fdsets from the process */
122 if ((r = copy_fdsets(se, nfds, FROM_PROC)) != OK) {
123 se->requestor = NULL;
124 return(r);
127 /* Did the process set a timeout value? If so, retrieve it. */
128 if (vtimeout != 0) {
129 do_timeout = 1;
130 r = sys_vircopy(who_e, (vir_bytes) vtimeout, SELF,
131 (vir_bytes) &timeout, sizeof(timeout));
132 if (r != OK) {
133 se->requestor = NULL;
134 return(r);
138 /* No nonsense in the timeval */
139 if (do_timeout && (timeout.tv_sec < 0 || timeout.tv_usec < 0)) {
140 se->requestor = NULL;
141 return(EINVAL);
144 /* If there is no timeout, we block forever. Otherwise, we block up to the
145 * specified time interval.
147 if (!do_timeout) /* No timeout value set */
148 se->block = 1;
149 else if (do_timeout && (timeout.tv_sec > 0 || timeout.tv_usec > 0))
150 se->block = 1;
151 else /* timeout set as (0,0) - this effects a poll */
152 se->block = 0;
153 se->expiry = 0; /* no timer set (yet) */
155 /* Verify that file descriptors are okay to select on */
156 for (fd = 0; fd < nfds; fd++) {
157 struct filp *f;
158 unsigned int type, ops;
160 /* Because the select() interface implicitly includes file descriptors
161 * you might not want to select on, we have to figure out whether we're
162 * interested in them. Typically, these file descriptors include fd's
163 * inherited from the parent proc and file descriptors that have been
164 * close()d, but had a lower fd than one in the current set.
166 if (!(ops = tab2ops(fd, se)))
167 continue; /* No operations set; nothing to do for this fd */
169 /* Get filp belonging to this fd */
170 f = se->filps[fd] = get_filp(fd, VNODE_READ);
171 if (f == NULL) {
172 if (err_code == EBADF)
173 r = err_code;
174 else /* File descriptor is 'ready' to return EIO */
175 r = EINTR;
177 se->requestor = NULL;
178 return(r);
181 /* Check file types. According to POSIX 2008:
182 * "The pselect() and select() functions shall support regular files,
183 * terminal and pseudo-terminal devices, FIFOs, pipes, and sockets. The
184 * behavior of pselect() and select() on file descriptors that refer to
185 * other types of file is unspecified."
187 * In our case, terminal and pseudo-terminal devices are handled by the
188 * TTY major and sockets by either INET major (socket type AF_INET) or
189 * PFS major (socket type AF_UNIX). PFS acts as an FS when it handles
190 * pipes and as a driver when it handles sockets. Additionally, we
191 * support select on the LOG major to handle kernel logging, which is
192 * beyond the POSIX spec. */
194 se->type[fd] = -1;
195 for (type = 0; type < SEL_FDS; type++) {
196 if (fdtypes[type].type_match(f)) {
197 se->type[fd] = type;
198 se->nfds = fd+1;
199 se->filps[fd]->filp_selectors++;
200 break;
203 unlock_filp(f);
204 if (se->type[fd] == -1) { /* Type not found */
205 se->requestor = NULL;
206 return(EBADF);
210 /* Check all file descriptors in the set whether one is 'ready' now */
211 for (fd = 0; fd < nfds; fd++) {
212 int ops, r;
213 struct filp *f;
215 /* Again, check for involuntarily selected fd's */
216 if (!(ops = tab2ops(fd, se)))
217 continue; /* No operations set; nothing to do for this fd */
219 /* Test filp for select operations if not already done so. e.g.,
220 * processes sharing a filp and both doing a select on that filp. */
221 f = se->filps[fd];
222 if ((f->filp_select_ops & ops) != ops) {
223 int wantops;
225 wantops = (f->filp_select_ops |= ops);
226 r = do_select_request(se, fd, &wantops);
227 if (r != OK && r != SUSPEND)
228 break; /* Error or bogus return code; abort */
230 /* The select request above might have turned on/off some
231 * operations because they were 'ready' or not meaningful.
232 * Either way, we might have a result and we need to store them
233 * in the select table entry. */
234 if (wantops & ops) ops2tab(wantops, fd, se);
238 if ((se->nreadyfds > 0 || !se->block) && !is_deferred(se)) {
239 /* fd's were found that were ready to go right away, and/or
240 * we were instructed not to block at all. Must return
241 * immediately.
243 r = copy_fdsets(se, se->nfds, TO_PROC);
244 select_cancel_all(se);
245 se->requestor = NULL;
247 if (r != OK)
248 return(r);
249 else if (se->error != OK)
250 return(se->error);
252 return(se->nreadyfds);
255 /* Convert timeval to ticks and set the timer. If it fails, undo
256 * all, return error.
258 if (do_timeout) {
259 int ticks;
260 /* Open Group:
261 * "If the requested timeout interval requires a finer
262 * granularity than the implementation supports, the
263 * actual timeout interval shall be rounded up to the next
264 * supported value."
266 #define USECPERSEC 1000000
267 while(timeout.tv_usec >= USECPERSEC) {
268 /* this is to avoid overflow with *system_hz below */
269 timeout.tv_usec -= USECPERSEC;
270 timeout.tv_sec++;
272 ticks = timeout.tv_sec * system_hz +
273 (timeout.tv_usec * system_hz + USECPERSEC-1) / USECPERSEC;
274 se->expiry = ticks;
275 set_timer(&se->timer, ticks, select_timeout_check, s);
278 /* process now blocked */
279 suspend(FP_BLOCKED_ON_SELECT);
280 return(SUSPEND);
283 /*===========================================================================*
284 * is_deferred *
285 *===========================================================================*/
286 static int is_deferred(struct selectentry *se)
288 /* Find out whether this select has pending initial replies */
290 int fd;
291 struct filp *f;
293 for (fd = 0; fd < se->nfds; fd++) {
294 if ((f = se->filps[fd]) == NULL) continue;
295 if (f->filp_select_flags & (FSF_UPDATE|FSF_BUSY)) return(TRUE);
298 return(FALSE);
302 /*===========================================================================*
303 * is_regular_file *
304 *===========================================================================*/
305 static int is_regular_file(struct filp *f)
307 return(f && f->filp_vno && S_ISREG(f->filp_vno->v_mode));
310 /*===========================================================================*
311 * is_pipe *
312 *===========================================================================*/
313 static int is_pipe(struct filp *f)
315 /* Recognize either anonymous pipe or named pipe (FIFO) */
316 return(f && f->filp_vno && S_ISFIFO(f->filp_vno->v_mode));
319 /*===========================================================================*
320 * is_supported_major *
321 *===========================================================================*/
322 static int is_supported_major(struct filp *f)
324 /* See if this filp is a handle on a device on which we support select() */
325 unsigned int m;
327 if (!(f && f->filp_vno)) return(FALSE);
328 if (!S_ISCHR(f->filp_vno->v_mode)) return(FALSE);
330 for (m = 0; m < SEL_MAJORS; m++)
331 if (major(f->filp_vno->v_sdev) == select_majors[m])
332 return(TRUE);
334 return(FALSE);
337 /*===========================================================================*
338 * select_request_async *
339 *===========================================================================*/
340 static int select_request_async(struct filp *f, int *ops, int block)
342 int r, rops, major;
343 struct dmap *dp;
345 rops = *ops;
347 /* By default, nothing to do */
348 *ops = 0;
350 if (!block && (f->filp_select_flags & FSF_BLOCKED)) {
351 /* This filp is blocked waiting for a reply, but we don't want to
352 * block ourselves. Unless we're awaiting the initial reply, these
353 * operations won't be ready */
354 if (!(f->filp_select_flags & FSF_BUSY)) {
355 if ((rops & SEL_RD) && (f->filp_select_flags & FSF_RD_BLOCK))
356 rops &= ~SEL_RD;
357 if ((rops & SEL_WR) && (f->filp_select_flags & FSF_WR_BLOCK))
358 rops &= ~SEL_WR;
359 if ((rops & SEL_ERR) && (f->filp_select_flags & FSF_ERR_BLOCK))
360 rops &= ~SEL_ERR;
361 if (!(rops & (SEL_RD|SEL_WR|SEL_ERR)))
362 return(OK);
366 f->filp_select_flags |= FSF_UPDATE;
367 if (block) {
368 rops |= SEL_NOTIFY;
369 if (rops & SEL_RD) f->filp_select_flags |= FSF_RD_BLOCK;
370 if (rops & SEL_WR) f->filp_select_flags |= FSF_WR_BLOCK;
371 if (rops & SEL_ERR) f->filp_select_flags |= FSF_ERR_BLOCK;
374 if (f->filp_select_flags & FSF_BUSY)
375 return(SUSPEND);
377 major = major(f->filp_vno->v_sdev);
378 if (major < 0 || major >= NR_DEVICES) return(ENXIO);
379 dp = &dmap[major];
380 if (dp->dmap_sel_filp)
381 return(SUSPEND);
383 f->filp_select_flags &= ~FSF_UPDATE;
384 r = dev_io(VFS_DEV_SELECT, f->filp_vno->v_sdev, rops, NULL,
385 cvu64(0), 0, 0, FALSE);
386 if (r < 0 && r != SUSPEND)
387 return(r);
389 if (r != SUSPEND)
390 panic("select_request_asynch: expected SUSPEND got: %d", r);
392 dp->dmap_sel_filp = f;
393 f->filp_select_flags |= FSF_BUSY;
395 return(SUSPEND);
398 /*===========================================================================*
399 * select_request_file *
400 *===========================================================================*/
401 static int select_request_file(struct filp *UNUSED(f), int *UNUSED(ops),
402 int UNUSED(block))
404 /* Files are always ready, so output *ops is input *ops */
405 return(OK);
408 /*===========================================================================*
409 * select_request_major *
410 *===========================================================================*/
411 static int select_request_major(struct filp *f, int *ops, int block)
413 int major, r;
415 major = major(f->filp_vno->v_sdev);
416 if (major < 0 || major >= NR_DEVICES) return(ENXIO);
418 if (dmap[major].dmap_style == STYLE_DEVA ||
419 dmap[major].dmap_style == STYLE_CLONE_A)
420 r = select_request_async(f, ops, block);
421 else
422 r = select_request_sync(f, ops, block);
424 return(r);
427 /*===========================================================================*
428 * select_request_sync *
429 *===========================================================================*/
430 static int select_request_sync(struct filp *f, int *ops, int block)
432 int rops;
434 rops = *ops;
435 if (block) rops |= SEL_NOTIFY;
436 *ops = dev_io(VFS_DEV_SELECT, f->filp_vno->v_sdev, rops, NULL,
437 cvu64(0), 0, 0, FALSE);
438 if (*ops < 0)
439 return(*ops);
441 return(OK);
444 /*===========================================================================*
445 * select_request_pipe *
446 *===========================================================================*/
447 static int select_request_pipe(struct filp *f, int *ops, int block)
449 int orig_ops, r = 0, err;
451 orig_ops = *ops;
453 if ((*ops & (SEL_RD|SEL_ERR))) {
454 /* Check if we can read 1 byte */
455 err = pipe_check(f->filp_vno, READING, 0, 1, 1 /* Check only */);
457 if (err != SUSPEND)
458 r |= SEL_RD;
459 if (err < 0 && err != SUSPEND)
460 r |= SEL_ERR;
461 if (err == SUSPEND && !(f->filp_mode & R_BIT)) {
462 /* A "meaningless" read select, therefore ready
463 * for reading and no error set. */
464 r |= SEL_RD;
465 r &= ~SEL_ERR;
469 if ((*ops & (SEL_WR|SEL_ERR))) {
470 /* Check if we can write 1 byte */
471 err = pipe_check(f->filp_vno, WRITING, 0, 1, 1 /* Check only */);
473 if (err != SUSPEND)
474 r |= SEL_WR;
475 if (err < 0 && err != SUSPEND)
476 r |= SEL_ERR;
477 if (err == SUSPEND && !(f->filp_mode & W_BIT)) {
478 /* A "meaningless" write select, therefore ready
479 for writing and no error set. */
480 r |= SEL_WR;
481 r &= ~SEL_ERR;
485 /* Some options we collected might not be requested. */
486 *ops = r & orig_ops;
488 if (!*ops && block)
489 f->filp_pipe_select_ops |= orig_ops;
491 return(OK);
494 /*===========================================================================*
495 * tab2ops *
496 *===========================================================================*/
497 static int tab2ops(int fd, struct selectentry *e)
499 int ops = 0;
500 if (FD_ISSET(fd, &e->readfds)) ops |= SEL_RD;
501 if (FD_ISSET(fd, &e->writefds)) ops |= SEL_WR;
502 if (FD_ISSET(fd, &e->errorfds)) ops |= SEL_ERR;
504 return(ops);
508 /*===========================================================================*
509 * ops2tab *
510 *===========================================================================*/
511 static void ops2tab(int ops, int fd, struct selectentry *e)
513 if ((ops & SEL_RD) && e->vir_readfds && FD_ISSET(fd, &e->readfds) &&
514 !FD_ISSET(fd, &e->ready_readfds)) {
515 FD_SET(fd, &e->ready_readfds);
516 e->nreadyfds++;
519 if ((ops & SEL_WR) && e->vir_writefds && FD_ISSET(fd, &e->writefds) &&
520 !FD_ISSET(fd, &e->ready_writefds)) {
521 FD_SET(fd, &e->ready_writefds);
522 e->nreadyfds++;
525 if ((ops & SEL_ERR) && e->vir_errorfds && FD_ISSET(fd, &e->errorfds) &&
526 !FD_ISSET(fd, &e->ready_errorfds)) {
527 FD_SET(fd, &e->ready_errorfds);
528 e->nreadyfds++;
533 /*===========================================================================*
534 * copy_fdsets *
535 *===========================================================================*/
536 static int copy_fdsets(struct selectentry *se, int nfds, int direction)
538 int r;
539 size_t fd_setsize;
540 endpoint_t src_e, dst_e;
541 fd_set *src_fds, *dst_fds;
543 if (nfds < 0 || nfds > OPEN_MAX)
544 panic("select copy_fdsets: nfds wrong: %d", nfds);
546 /* Only copy back as many bits as the user expects. */
547 #ifdef __NBSD_LIBC
548 fd_setsize = (size_t) (howmany(nfds, __NFDBITS) * sizeof(__fd_mask));
549 #else
550 fd_setsize = (size_t) (_FDSETWORDS(nfds) * _FDSETBITSPERWORD/8);
551 #endif
553 /* Set source and destination endpoints */
554 src_e = (direction == FROM_PROC) ? se->req_endpt : SELF;
555 dst_e = (direction == FROM_PROC) ? SELF : se->req_endpt;
557 /* read set */
558 src_fds = (direction == FROM_PROC) ? se->vir_readfds : &se->ready_readfds;
559 dst_fds = (direction == FROM_PROC) ? &se->readfds : se->vir_readfds;
560 if (se->vir_readfds) {
561 r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e,
562 (vir_bytes) dst_fds, fd_setsize);
563 if (r != OK) return(r);
566 /* write set */
567 src_fds = (direction == FROM_PROC) ? se->vir_writefds : &se->ready_writefds;
568 dst_fds = (direction == FROM_PROC) ? &se->writefds : se->vir_writefds;
569 if (se->vir_writefds) {
570 r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e,
571 (vir_bytes) dst_fds, fd_setsize);
572 if (r != OK) return(r);
575 /* error set */
576 src_fds = (direction == FROM_PROC) ? se->vir_errorfds : &se->ready_errorfds;
577 dst_fds = (direction == FROM_PROC) ? &se->errorfds : se->vir_errorfds;
578 if (se->vir_errorfds) {
579 r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e,
580 (vir_bytes) dst_fds, fd_setsize);
581 if (r != OK) return(r);
584 return(OK);
588 /*===========================================================================*
589 * select_cancel_all *
590 *===========================================================================*/
591 static void select_cancel_all(struct selectentry *se)
593 /* Cancel select. Decrease select usage and cancel timer */
595 int fd;
596 struct filp *f;
598 for (fd = 0; fd < se->nfds; fd++) {
599 if ((f = se->filps[fd]) == NULL) continue;
600 se->filps[fd] = NULL;
601 select_cancel_filp(f);
604 if (se->expiry > 0) {
605 cancel_timer(&se->timer);
606 se->expiry = 0;
609 se->requestor = NULL;
612 /*===========================================================================*
613 * select_cancel_filp *
614 *===========================================================================*/
615 static void select_cancel_filp(struct filp *f)
617 /* Reduce number of select users of this filp */
619 assert(f);
620 assert(f->filp_selectors >= 0);
621 if (f->filp_selectors == 0) return;
622 if (f->filp_count == 0) return;
624 select_lock_filp(f, f->filp_select_ops);
626 f->filp_selectors--;
627 if (f->filp_selectors == 0) {
628 /* No one selecting on this filp anymore, forget about select state */
629 f->filp_select_ops = 0;
630 f->filp_select_flags = 0;
631 f->filp_pipe_select_ops = 0;
634 unlock_filp(f);
637 /*===========================================================================*
638 * select_return *
639 *===========================================================================*/
640 static void select_return(struct selectentry *se)
642 int r, r1;
644 assert(!is_deferred(se)); /* Not done yet, first wait for async reply */
646 select_cancel_all(se);
648 r1 = copy_fdsets(se, se->nfds, TO_PROC);
649 if (r1 != OK)
650 r = r1;
651 else if (se->error != OK)
652 r = se->error;
653 else
654 r = se->nreadyfds;
656 revive(se->req_endpt, r);
660 /*===========================================================================*
661 * select_callback *
662 *===========================================================================*/
663 void select_callback(struct filp *f, int status)
665 filp_status(f, status);
668 /*===========================================================================*
669 * init_select *
670 *===========================================================================*/
671 void init_select(void)
673 int s;
675 for (s = 0; s < MAXSELECTS; s++)
676 init_timer(&selecttab[s].timer);
680 /*===========================================================================*
681 * select_forget *
682 *===========================================================================*/
683 void select_forget(endpoint_t proc_e)
685 /* Something has happened (e.g. signal delivered that interrupts select()).
686 * Totally forget about the select(). */
688 int slot;
689 struct selectentry *se;
691 for (slot = 0; slot < MAXSELECTS; slot++) {
692 se = &selecttab[slot];
693 if (se->requestor != NULL && se->req_endpt == proc_e)
694 break;
697 if (slot >= MAXSELECTS) return; /* Entry not found */
698 se->error = EINTR;
699 if (is_deferred(se)) return; /* Still awaiting initial reply */
701 select_cancel_all(se);
705 /*===========================================================================*
706 * select_timeout_check *
707 *===========================================================================*/
708 void select_timeout_check(timer_t *timer)
710 int s;
711 struct selectentry *se;
713 s = tmr_arg(timer)->ta_int;
714 if (s < 0 || s >= MAXSELECTS) return; /* Entry does not exist */
716 se = &selecttab[s];
717 if (se->requestor == NULL) return;
718 fp = se->requestor;
719 if (se->expiry <= 0) return; /* Strange, did we even ask for a timeout? */
720 se->expiry = 0;
721 if (is_deferred(se)) return; /* Wait for initial replies to DEV_SELECT */
722 select_return(se);
726 /*===========================================================================*
727 * select_unsuspend_by_endpt *
728 *===========================================================================*/
729 void select_unsuspend_by_endpt(endpoint_t proc_e)
731 /* Revive blocked processes when a driver has disappeared */
733 int fd, s, major;
734 struct selectentry *se;
735 struct filp *f;
737 for (s = 0; s < MAXSELECTS; s++) {
738 int wakehim = 0;
739 se = &selecttab[s];
740 if (se->requestor == NULL) continue;
741 if (se->requestor->fp_endpoint == proc_e) {
742 assert(se->requestor->fp_flags & FP_EXITING);
743 select_cancel_all(se);
744 continue;
747 for (fd = 0; fd < se->nfds; fd++) {
748 if ((f = se->filps[fd]) == NULL || f->filp_vno == NULL)
749 continue;
751 major = major(f->filp_vno->v_sdev);
752 if (dmap_driver_match(proc_e, major)) {
753 se->filps[fd] = NULL;
754 se->error = EINTR;
755 select_cancel_filp(f);
756 wakehim = 1;
760 if (wakehim && !is_deferred(se))
761 select_return(se);
765 /*===========================================================================*
766 * select_reply1 *
767 *===========================================================================*/
768 void select_reply1(driver_e, minor, status)
769 endpoint_t driver_e;
770 int minor;
771 int status;
773 /* Handle reply to DEV_SELECT request */
775 int major;
776 dev_t dev;
777 struct filp *f;
778 struct dmap *dp;
779 struct vnode *vp;
781 /* Figure out which device is replying */
782 if ((dp = get_dmap(driver_e)) == NULL) return;
784 major = dp-dmap;
785 dev = makedev(major, minor);
787 /* Get filp belonging to character special file */
788 if ((f = dp->dmap_sel_filp) == NULL) {
789 printf("VFS (%s:%d): major %d was not expecting a DEV_SELECT reply\n",
790 __FILE__, __LINE__, major);
791 return;
794 /* Is the filp still in use and busy waiting for a reply? The owner might
795 * have vanished before the driver was able to reply. */
796 if (f->filp_count >= 1 && (f->filp_select_flags & FSF_BUSY)) {
797 /* Find vnode and check we got a reply from the device we expected */
798 vp = f->filp_vno;
799 assert(vp != NULL);
800 assert(S_ISCHR(vp->v_mode));
801 if (vp->v_sdev != dev) {
802 printf("VFS (%s:%d): expected reply from dev %d not %d\n",
803 __FILE__, __LINE__, vp->v_sdev, dev);
804 return;
808 /* No longer waiting for a reply from this device */
809 dp->dmap_sel_filp = NULL;
811 /* Process select result only if requestor is still around. That is, the
812 * corresponding filp is still in use.
814 if (f->filp_count >= 1) {
815 select_lock_filp(f, f->filp_select_ops);
816 f->filp_select_flags &= ~FSF_BUSY;
818 /* The select call is done now, except when
819 * - another process started a select on the same filp with possibly a
820 * different set of operations.
821 * - a process does a select on the same filp but using different file
822 * descriptors.
823 * - the select has a timeout. Upon receiving this reply the operations
824 * might not be ready yet, so we want to wait for that to ultimately
825 * happen.
826 * Therefore we need to keep remembering what the operations are.
828 if (!(f->filp_select_flags & (FSF_UPDATE|FSF_BLOCKED)))
829 f->filp_select_ops = 0; /* done selecting */
830 else if (!(f->filp_select_flags & FSF_UPDATE))
831 /* there may be operations pending */
832 f->filp_select_ops &= ~status;
834 /* Record new filp status */
835 if (!(status == 0 && (f->filp_select_flags & FSF_BLOCKED))) {
836 if (status > 0) { /* operations ready */
837 if (status & SEL_RD)
838 f->filp_select_flags &= ~FSF_RD_BLOCK;
839 if (status & SEL_WR)
840 f->filp_select_flags &= ~FSF_WR_BLOCK;
841 if (status & SEL_ERR)
842 f->filp_select_flags &= ~FSF_ERR_BLOCK;
843 } else if (status < 0) { /* error */
844 /* Always unblock upon error */
845 f->filp_select_flags &= ~FSF_BLOCKED;
849 unlock_filp(f);
850 filp_status(f, status); /* Tell filp owners about the results */
853 select_restart_filps();
857 /*===========================================================================*
858 * select_reply2 *
859 *===========================================================================*/
860 void select_reply2(driver_e, minor, status)
861 endpoint_t driver_e;
862 int minor;
863 int status;
865 /* Handle secondary reply to DEV_SELECT request. A secondary reply occurs when
866 * the select request is 'blocking' until an operation becomes ready. */
867 int major, slot, fd;
868 dev_t dev;
869 struct filp *f;
870 struct dmap *dp;
871 struct vnode *vp;
872 struct selectentry *se;
874 if (status == 0) {
875 printf("VFS (%s:%d): weird status (%d) to report\n",
876 __FILE__, __LINE__, status);
877 return;
880 /* Figure out which device is replying */
881 if ((dp = get_dmap(driver_e)) == NULL) {
882 printf("VFS (%s:%d): endpoint %d is not a known driver endpoint\n",
883 __FILE__, __LINE__, driver_e);
884 return;
886 major = dp-dmap;
887 dev = makedev(major, minor);
889 /* Find all file descriptors selecting for this device */
890 for (slot = 0; slot < MAXSELECTS; slot++) {
891 se = &selecttab[slot];
892 if (se->requestor == NULL) continue; /* empty slot */
894 for (fd = 0; fd < se->nfds; fd++) {
895 if ((f = se->filps[fd]) == NULL) continue;
896 if ((vp = f->filp_vno) == NULL) continue;
897 if (!S_ISCHR(vp->v_mode)) continue;
898 if (vp->v_sdev != dev) continue;
900 select_lock_filp(f, f->filp_select_ops);
901 if (status > 0) { /* Operations ready */
902 /* Clear the replied bits from the request
903 * mask unless FSF_UPDATE is set.
905 if (!(f->filp_select_flags & FSF_UPDATE))
906 f->filp_select_ops &= ~status;
907 if (status & SEL_RD)
908 f->filp_select_flags &= ~FSF_RD_BLOCK;
909 if (status & SEL_WR)
910 f->filp_select_flags &= ~FSF_WR_BLOCK;
911 if (status & SEL_ERR)
912 f->filp_select_flags &= ~FSF_ERR_BLOCK;
914 ops2tab(status, fd, se);
915 } else {
916 f->filp_select_flags &= ~FSF_BLOCKED;
917 ops2tab(SEL_RD|SEL_WR|SEL_ERR, fd, se);
919 unlock_filp(f);
920 if (se->nreadyfds > 0) restart_proc(se);
924 select_restart_filps();
927 /*===========================================================================*
928 * select_restart_filps *
929 *===========================================================================*/
930 static void select_restart_filps()
932 int fd, slot;
933 struct filp *f;
934 struct vnode *vp;
935 struct selectentry *se;
937 /* Locate filps that can be restarted */
938 for (slot = 0; slot < MAXSELECTS; slot++) {
939 se = &selecttab[slot];
940 if (se->requestor == NULL) continue; /* empty slot */
942 /* Only 'deferred' processes are eligible to restart */
943 if (!is_deferred(se)) continue;
945 /* Find filps that are not waiting for a reply, but have an updated
946 * status (i.e., another select on the same filp with possibly a
947 * different set of operations is to be done), and thus requires the
948 * select request to be sent again).
950 for (fd = 0; fd < se->nfds; fd++) {
951 int r, wantops, ops;
952 if ((f = se->filps[fd]) == NULL) continue;
953 if (f->filp_select_flags & FSF_BUSY) /* Still waiting for */
954 continue; /* initial reply */
955 if (!(f->filp_select_flags & FSF_UPDATE)) /* Must be in */
956 continue; /* 'update' state */
958 wantops = ops = f->filp_select_ops;
959 vp = f->filp_vno;
960 assert(S_ISCHR(vp->v_mode));
961 r = do_select_request(se, fd, &wantops);
962 if (r != OK && r != SUSPEND)
963 break; /* Error or bogus return code; abort */
964 if (wantops & ops) ops2tab(wantops, fd, se);
969 /*===========================================================================*
970 * do_select_request *
971 *===========================================================================*/
972 static int do_select_request(se, fd, ops)
973 struct selectentry *se;
974 int fd;
975 int *ops;
977 /* Perform actual select request for file descriptor fd */
979 int r, type;
980 struct filp *f;
982 type = se->type[fd];
983 f = se->filps[fd];
984 select_lock_filp(f, *ops);
985 r = fdtypes[type].select_request(f, ops, se->block);
986 unlock_filp(f);
987 if (r != OK && r != SUSPEND) {
988 se->error = EINTR;
989 se->block = 0; /* Stop blocking to return asap */
990 if (!is_deferred(se)) select_cancel_all(se);
993 return(r);
996 /*===========================================================================*
997 * filp_status *
998 *===========================================================================*/
999 static void filp_status(f, status)
1000 struct filp *f;
1001 int status;
1003 /* Tell processes that need to know about the status of this filp */
1004 int fd, slot;
1005 struct selectentry *se;
1007 for (slot = 0; slot < MAXSELECTS; slot++) {
1008 se = &selecttab[slot];
1009 if (se->requestor == NULL) continue; /* empty slot */
1011 for (fd = 0; fd < se->nfds; fd++) {
1012 if (se->filps[fd] != f) continue;
1013 if (status < 0)
1014 ops2tab(SEL_RD|SEL_WR|SEL_ERR, fd, se);
1015 else
1016 ops2tab(status, fd, se);
1017 restart_proc(se);
1022 /*===========================================================================*
1023 * restart_proc *
1024 *===========================================================================*/
1025 static void restart_proc(se)
1026 struct selectentry *se;
1028 /* Tell process about select results (if any) unless there are still results
1029 * pending. */
1031 if ((se->nreadyfds > 0 || !se->block) && !is_deferred(se))
1032 select_return(se);
1035 /*===========================================================================*
1036 * wipe_select *
1037 *===========================================================================*/
1038 static void wipe_select(struct selectentry *se)
1040 se->nfds = 0;
1041 se->nreadyfds = 0;
1042 se->error = OK;
1043 se->block = 0;
1044 memset(se->filps, 0, sizeof(se->filps));
1046 FD_ZERO(&se->readfds);
1047 FD_ZERO(&se->writefds);
1048 FD_ZERO(&se->errorfds);
1049 FD_ZERO(&se->ready_readfds);
1050 FD_ZERO(&se->ready_writefds);
1051 FD_ZERO(&se->ready_errorfds);
1054 /*===========================================================================*
1055 * select_lock_filp *
1056 *===========================================================================*/
1057 static void select_lock_filp(struct filp *f, int ops)
1059 /* Lock a filp and vnode based on which operations are requested */
1060 tll_access_t locktype;;
1062 locktype = VNODE_READ; /* By default */
1064 if (ops & (SEL_WR|SEL_ERR))
1065 /* Selecting for error or writing requires exclusive access */
1066 locktype = VNODE_WRITE;
1068 lock_filp(f, locktype);