1 /* When a needed block is not in the cache, it must be fetched from the disk.
2 * Special character files also require I/O. The routines for these are here.
4 * The entry points in this file are:
5 * cdev_open: open a character device
6 * cdev_close: close a character device
7 * cdev_io: initiate a read, write, or ioctl to a character device
8 * cdev_select: initiate a select call on a device
9 * cdev_cancel: cancel an I/O request, blocking until it has been cancelled
10 * cdev_reply: process the result of a character driver request
11 * bdev_open: open a block device
12 * bdev_close: close a block device
13 * bdev_reply: process the result of a block driver request
14 * bdev_up: a block driver has been mapped in
15 * do_ioctl: perform the IOCTL system call
23 #include <sys/ttycom.h>
24 #include <minix/callnr.h>
25 #include <minix/com.h>
26 #include <minix/endpoint.h>
27 #include <minix/ioctl.h>
28 #include <minix/u64.h>
30 #include "scratchpad.h"
32 #include <minix/vfsif.h>
36 static int cdev_opcl(int op
, dev_t dev
, int flags
);
37 static int block_io(endpoint_t driver_e
, message
*mess_ptr
);
38 static cp_grant_id_t
make_grant(endpoint_t driver_e
, endpoint_t user_e
, int op
,
39 vir_bytes buf
, unsigned long size
);
41 /*===========================================================================*
43 *===========================================================================*/
44 int bdev_open(dev_t dev
, int access
)
46 /* Open a block device. */
52 major_dev
= major(dev
);
53 minor_dev
= minor(dev
);
54 if (major_dev
< 0 || major_dev
>= NR_DEVICES
) return ENXIO
;
55 if (dmap
[major_dev
].dmap_driver
== NONE
) return ENXIO
;
57 memset(&dev_mess
, 0, sizeof(dev_mess
));
58 dev_mess
.m_type
= BDEV_OPEN
;
59 dev_mess
.m_lbdev_lblockdriver_msg
.minor
= minor_dev
;
60 dev_mess
.m_lbdev_lblockdriver_msg
.access
= 0;
61 if (access
& R_BIT
) dev_mess
.m_lbdev_lblockdriver_msg
.access
|= BDEV_R_BIT
;
62 if (access
& W_BIT
) dev_mess
.m_lbdev_lblockdriver_msg
.access
|= BDEV_W_BIT
;
63 dev_mess
.m_lbdev_lblockdriver_msg
.id
= 0;
66 r
= block_io(dmap
[major_dev
].dmap_driver
, &dev_mess
);
70 return dev_mess
.m_lblockdriver_lbdev_reply
.status
;
74 /*===========================================================================*
76 *===========================================================================*/
77 int bdev_close(dev_t dev
)
79 /* Close a block device. */
85 major_dev
= major(dev
);
86 minor_dev
= minor(dev
);
87 if (major_dev
< 0 || major_dev
>= NR_DEVICES
) return ENXIO
;
88 if (dmap
[major_dev
].dmap_driver
== NONE
) return ENXIO
;
90 memset(&dev_mess
, 0, sizeof(dev_mess
));
91 dev_mess
.m_type
= BDEV_CLOSE
;
92 dev_mess
.m_lbdev_lblockdriver_msg
.minor
= minor_dev
;
93 dev_mess
.m_lbdev_lblockdriver_msg
.id
= 0;
95 r
= block_io(dmap
[major_dev
].dmap_driver
, &dev_mess
);
99 return dev_mess
.m_lblockdriver_lbdev_reply
.status
;
103 /*===========================================================================*
105 *===========================================================================*/
106 static int bdev_ioctl(dev_t dev
, endpoint_t proc_e
, unsigned long req
,
109 /* Perform an I/O control operation on a block device. */
113 devmajor_t major_dev
;
114 devminor_t minor_dev
;
117 major_dev
= major(dev
);
118 minor_dev
= minor(dev
);
120 /* Determine task dmap. */
121 dp
= &dmap
[major_dev
];
122 if (dp
->dmap_driver
== NONE
) {
123 printf("VFS: bdev_ioctl: no driver for major %d\n", major_dev
);
127 /* Set up a grant if necessary. */
128 gid
= make_grant(dp
->dmap_driver
, proc_e
, BDEV_IOCTL
, buf
, req
);
130 /* Set up the message passed to the task. */
131 memset(&dev_mess
, 0, sizeof(dev_mess
));
133 dev_mess
.m_type
= BDEV_IOCTL
;
134 dev_mess
.m_lbdev_lblockdriver_msg
.minor
= minor_dev
;
135 dev_mess
.m_lbdev_lblockdriver_msg
.request
= req
;
136 dev_mess
.m_lbdev_lblockdriver_msg
.grant
= gid
;
137 dev_mess
.m_lbdev_lblockdriver_msg
.user
= proc_e
;
138 dev_mess
.m_lbdev_lblockdriver_msg
.id
= 0;
141 r
= block_io(dp
->dmap_driver
, &dev_mess
);
144 if (GRANT_VALID(gid
)) cpf_revoke(gid
);
146 /* Return the result. */
150 return(dev_mess
.m_lblockdriver_lbdev_reply
.status
);
154 /*===========================================================================*
156 *===========================================================================*/
157 static cp_grant_id_t
make_grant(endpoint_t driver_e
, endpoint_t user_e
, int op
,
158 vir_bytes buf
, unsigned long bytes
)
160 /* Create a magic grant for the given operation and buffer. */
168 gid
= cpf_grant_magic(driver_e
, user_e
, buf
,
169 (size_t) bytes
, op
== CDEV_READ
? CPF_WRITE
: CPF_READ
);
174 /* For IOCTLs, the bytes parameter contains the IOCTL request.
175 * This request encodes the requested access method and buffer size.
178 if(_MINIX_IOCTL_IOR(bytes
)) access
|= CPF_WRITE
;
179 if(_MINIX_IOCTL_IOW(bytes
)) access
|= CPF_READ
;
180 if(_MINIX_IOCTL_BIG(bytes
))
181 size
= _MINIX_IOCTL_SIZE_BIG(bytes
);
183 size
= _MINIX_IOCTL_SIZE(bytes
);
185 /* Grant access to the buffer even if no I/O happens with the ioctl,
186 * although now that we no longer identify responses based on grants,
187 * this is not strictly necessary.
189 gid
= cpf_grant_magic(driver_e
, user_e
, buf
, size
, access
);
193 panic("VFS: unknown operation %d", op
);
196 if (!GRANT_VALID(gid
))
197 panic("VFS: cpf_grant_magic failed");
202 /*===========================================================================*
204 *===========================================================================*/
205 dev_t
cdev_map(dev_t dev
, struct fproc
*rfp
)
207 /* Map the given device number to a real device number, remapping /dev/tty to
208 * the given process's controlling terminal if it has one. Perform a bounds
209 * check on the resulting device's major number, and return NO_DEV on failure.
210 * This function is idempotent but not used that way.
214 /* First cover one special case: /dev/tty, the magic device that translates
215 * to the controlling tty.
217 if ((major
= major(dev
)) == CTTY_MAJOR
) {
218 /* No controlling terminal? Fail the request. */
219 if (rfp
->fp_tty
== NO_DEV
) return NO_DEV
;
221 /* Substitute the controlling terminal device. */
226 if (major
< 0 || major
>= NR_DEVICES
) return NO_DEV
;
231 /*===========================================================================*
233 *===========================================================================*/
234 static struct dmap
*cdev_get(dev_t dev
, devminor_t
*minor_dev
)
236 /* Obtain the dmap structure for the given device, if a valid driver exists for
237 * the major device. Perform redirection for CTTY_MAJOR.
242 /* Remap /dev/tty as needed. Perform a bounds check on the major number. */
243 if ((dev
= cdev_map(dev
, fp
)) == NO_DEV
)
246 /* Determine task dmap. */
247 dp
= &dmap
[major(dev
)];
249 /* See if driver is roughly valid. */
250 if (dp
->dmap_driver
== NONE
) return(NULL
);
252 if (isokendpt(dp
->dmap_driver
, &slot
) != OK
) {
253 printf("VFS: cdev_get: old driver for major %x (%d)\n", major(dev
),
258 /* Also return the (possibly redirected) minor number. */
259 *minor_dev
= minor(dev
);
263 /*===========================================================================*
265 *===========================================================================*/
267 int op
, /* CDEV_READ, CDEV_WRITE, or CDEV_IOCTL */
268 dev_t dev
, /* major-minor device number */
269 endpoint_t proc_e
, /* in whose address space is buf? */
270 vir_bytes buf
, /* virtual address of the buffer */
271 off_t pos
, /* byte position */
272 unsigned long bytes
, /* how many bytes to transfer, or request */
273 int flags
/* special flags, like O_NONBLOCK */
276 /* Initiate a read, write, or ioctl to a character device. */
277 devminor_t minor_dev
;
283 assert(op
== CDEV_READ
|| op
== CDEV_WRITE
|| op
== CDEV_IOCTL
);
285 /* Determine task map. */
286 if ((dp
= cdev_get(dev
, &minor_dev
)) == NULL
)
289 /* Handle TIOCSCTTY ioctl: set controlling tty.
290 * TODO: cleaner implementation work in progress.
292 if (op
== CDEV_IOCTL
&& bytes
== TIOCSCTTY
&& major(dev
) == TTY_MAJOR
) {
296 /* Create a grant for the buffer provided by the user process. */
297 gid
= make_grant(dp
->dmap_driver
, proc_e
, op
, buf
, bytes
);
299 /* Set up the rest of the message that will be sent to the driver. */
300 memset(&dev_mess
, 0, sizeof(dev_mess
));
301 dev_mess
.m_type
= op
;
302 dev_mess
.m_vfs_lchardriver_readwrite
.minor
= minor_dev
;
303 if (op
== CDEV_IOCTL
) {
304 dev_mess
.m_vfs_lchardriver_readwrite
.request
= bytes
;
305 dev_mess
.m_vfs_lchardriver_readwrite
.user
= proc_e
;
307 dev_mess
.m_vfs_lchardriver_readwrite
.pos
= pos
;
308 dev_mess
.m_vfs_lchardriver_readwrite
.count
= bytes
;
310 dev_mess
.m_vfs_lchardriver_readwrite
.id
= proc_e
;
311 dev_mess
.m_vfs_lchardriver_readwrite
.grant
= gid
;
312 dev_mess
.m_vfs_lchardriver_readwrite
.flags
= 0;
313 if (flags
& O_NONBLOCK
)
314 dev_mess
.m_vfs_lchardriver_readwrite
.flags
|= CDEV_NONBLOCK
;
316 /* Send the request to the driver. */
317 if ((r
= asynsend3(dp
->dmap_driver
, &dev_mess
, AMF_NOREPLY
)) != OK
)
318 panic("VFS: asynsend in cdev_io failed: %d", r
);
320 /* Suspend the calling process until a reply arrives. */
321 wait_for(dp
->dmap_driver
);
322 assert(!GRANT_VALID(fp
->fp_grant
));
323 fp
->fp_grant
= gid
; /* revoke this when unsuspended. */
329 /*===========================================================================*
331 *===========================================================================*/
332 static int cdev_clone(dev_t dev
, devminor_t new_minor
)
334 /* A new minor device number has been returned. Request PFS to create a
335 * temporary device file to hold it.
338 struct node_details res
;
341 /* Device number of the new device. */
342 dev
= makedev(major(dev
), new_minor
);
345 r
= req_newnode(PFS_PROC_NR
, fp
->fp_effuid
, fp
->fp_effgid
,
346 ALL_MODES
| I_CHAR_SPECIAL
, dev
, &res
);
348 (void) cdev_opcl(CDEV_CLOSE
, dev
, 0);
352 /* Drop old node and use the new values */
353 if ((vp
= get_free_vnode()) == NULL
) {
354 req_putnode(PFS_PROC_NR
, res
.inode_nr
, 1); /* is this right? */
355 (void) cdev_opcl(CDEV_CLOSE
, dev
, 0);
358 lock_vnode(vp
, VNODE_OPCL
);
360 assert(fp
->fp_filp
[scratch(fp
).file
.fd_nr
] != NULL
);
361 unlock_vnode(fp
->fp_filp
[scratch(fp
).file
.fd_nr
]->filp_vno
);
362 put_vnode(fp
->fp_filp
[scratch(fp
).file
.fd_nr
]->filp_vno
);
364 vp
->v_fs_e
= res
.fs_e
;
367 vp
->v_fs_e
= res
.fs_e
;
368 vp
->v_inode_nr
= res
.inode_nr
;
369 vp
->v_mode
= res
.fmode
;
373 fp
->fp_filp
[scratch(fp
).file
.fd_nr
]->filp_vno
= vp
;
379 /*===========================================================================*
381 *===========================================================================*/
382 static int cdev_opcl(
383 int op
, /* operation, CDEV_OPEN or CDEV_CLOSE */
384 dev_t dev
, /* device to open or close */
385 int flags
/* mode bits and flags */
388 /* Open or close a character device. */
389 devminor_t minor_dev
, new_minor
;
395 assert(op
== CDEV_OPEN
|| op
== CDEV_CLOSE
);
397 /* Determine task dmap. */
398 if ((dp
= cdev_get(dev
, &minor_dev
)) == NULL
)
401 /* CTTY exception: do not actually send the open/close request for /dev/tty
402 * to the driver. This avoids the case that the actual device will remain
403 * open forever if the process calls setsid() after opening /dev/tty.
405 if (major(dev
) == CTTY_MAJOR
) return(OK
);
407 /* Add O_NOCTTY to the access flags if this process is not a session leader,
408 * or if it already has a controlling tty, or if it is someone else's
409 * controlling tty. For performance reasons, only search the full process
410 * table if this driver has set controlling ttys before.
412 if (!(fp
->fp_flags
& FP_SESLDR
) || fp
->fp_tty
!= 0) {
414 } else if (!(flags
& O_NOCTTY
) && dp
->dmap_seen_tty
) {
415 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++)
416 if (rfp
->fp_pid
!= PID_FREE
&& rfp
->fp_tty
== dev
)
420 /* Prepare the request message. */
421 memset(&dev_mess
, 0, sizeof(dev_mess
));
423 dev_mess
.m_type
= op
;
424 dev_mess
.m_vfs_lchardriver_openclose
.minor
= minor_dev
;
425 dev_mess
.m_vfs_lchardriver_openclose
.id
= who_e
;
426 if (op
== CDEV_OPEN
) {
427 dev_mess
.m_vfs_lchardriver_openclose
.user
= who_e
;
428 dev_mess
.m_vfs_lchardriver_openclose
.access
= 0;
430 dev_mess
.m_vfs_lchardriver_openclose
.access
|= CDEV_R_BIT
;
432 dev_mess
.m_vfs_lchardriver_openclose
.access
|= CDEV_W_BIT
;
433 if (flags
& O_NOCTTY
)
434 dev_mess
.m_vfs_lchardriver_openclose
.access
|= CDEV_NOCTTY
;
437 /* Send the request to the driver. */
438 if ((r
= asynsend3(dp
->dmap_driver
, &dev_mess
, AMF_NOREPLY
)) != OK
)
439 panic("VFS: asynsend in cdev_opcl failed: %d", r
);
441 /* Block the thread waiting for a reply. */
442 fp
->fp_task
= dp
->dmap_driver
;
443 self
->w_task
= dp
->dmap_driver
;
444 self
->w_drv_sendrec
= &dev_mess
;
449 self
->w_drv_sendrec
= NULL
;
451 /* Process the reply. */
452 r
= dev_mess
.m_lchardriver_vfs_reply
.status
;
454 if (op
== CDEV_OPEN
&& r
>= 0) {
455 /* Some devices need special processing upon open. Such a device is
456 * "cloned", i.e. on a succesful open it is replaced by a new device
457 * with a new unique minor device number. This new device number
458 * identifies a new object (such as a new network connection) that has
459 * been allocated within a driver.
461 if (r
& CDEV_CLONED
) {
462 new_minor
= r
& ~(CDEV_CLONED
| CDEV_CTTY
);
463 if ((r2
= cdev_clone(dev
, new_minor
)) < 0)
467 /* Did this call make the tty the controlling tty? */
470 dp
->dmap_seen_tty
= TRUE
;
476 /* Return the result from the driver. */
481 /*===========================================================================*
483 *===========================================================================*/
484 int cdev_open(dev_t dev
, int flags
)
486 /* Open a character device. */
488 return cdev_opcl(CDEV_OPEN
, dev
, flags
);
492 /*===========================================================================*
494 *===========================================================================*/
495 int cdev_close(dev_t dev
)
497 /* Close a character device. */
499 return cdev_opcl(CDEV_CLOSE
, dev
, 0);
503 /*===========================================================================*
505 *===========================================================================*/
508 /* Perform the ioctl(2) system call. */
509 unsigned long ioctlrequest
;
512 register struct vnode
*vp
;
516 scratch(fp
).file
.fd_nr
= job_m_in
.m_lc_vfs_ioctl
.fd
;
517 ioctlrequest
= job_m_in
.m_lc_vfs_ioctl
.req
;
518 argx
= (vir_bytes
)job_m_in
.m_lc_vfs_ioctl
.arg
;
520 if ((f
= get_filp(scratch(fp
).file
.fd_nr
, VNODE_READ
)) == NULL
)
522 vp
= f
->filp_vno
; /* get vnode pointer */
523 if (!S_ISCHR(vp
->v_mode
) && !S_ISBLK(vp
->v_mode
)) {
530 if (S_ISBLK(vp
->v_mode
)) {
531 f
->filp_ioctl_fp
= fp
;
533 r
= bdev_ioctl(dev
, who_e
, ioctlrequest
, argx
);
535 f
->filp_ioctl_fp
= NULL
;
537 r
= cdev_io(CDEV_IOCTL
, dev
, who_e
, argx
, 0, ioctlrequest
,
547 /*===========================================================================*
549 *===========================================================================*/
550 int cdev_select(dev_t dev
, int ops
)
552 /* Initiate a select call on a device. Return OK iff the request was sent.
553 * This function explicitly bypasses cdev_get() since it must not do CTTY
554 * mapping, because a) the caller already has done that, b) "fp" may be wrong.
561 /* Determine task dmap, without CTTY mapping. */
562 assert(dev
!= NO_DEV
);
564 assert(major
>= 0 && major
< NR_DEVICES
);
565 assert(major
!= CTTY_MAJOR
);
568 /* Prepare the request message. */
569 memset(&dev_mess
, 0, sizeof(dev_mess
));
571 dev_mess
.m_type
= CDEV_SELECT
;
572 dev_mess
.m_vfs_lchardriver_select
.minor
= minor(dev
);
573 dev_mess
.m_vfs_lchardriver_select
.ops
= ops
;
575 /* Send the request to the driver. */
576 if ((r
= asynsend3(dp
->dmap_driver
, &dev_mess
, AMF_NOREPLY
)) != OK
)
577 panic("VFS: asynsend in cdev_select failed: %d", r
);
583 /*===========================================================================*
585 *===========================================================================*/
586 int cdev_cancel(dev_t dev
)
588 /* Cancel an I/O request, blocking until it has been cancelled. */
589 devminor_t minor_dev
;
594 /* Determine task dmap. */
595 if ((dp
= cdev_get(dev
, &minor_dev
)) == NULL
)
598 /* Prepare the request message. */
599 memset(&dev_mess
, 0, sizeof(dev_mess
));
601 dev_mess
.m_type
= CDEV_CANCEL
;
602 dev_mess
.m_vfs_lchardriver_cancel
.minor
= minor_dev
;
603 dev_mess
.m_vfs_lchardriver_cancel
.id
= fp
->fp_endpoint
;
605 /* Send the request to the driver. */
606 if ((r
= asynsend3(dp
->dmap_driver
, &dev_mess
, AMF_NOREPLY
)) != OK
)
607 panic("VFS: asynsend in cdev_cancel failed: %d", r
);
609 /* Suspend this thread until we have received the response. */
610 fp
->fp_task
= dp
->dmap_driver
;
611 self
->w_task
= dp
->dmap_driver
;
612 self
->w_drv_sendrec
= &dev_mess
;
617 self
->w_drv_sendrec
= NULL
;
619 /* Clean up and return the result (note: the request may have completed). */
620 if (GRANT_VALID(fp
->fp_grant
)) {
621 (void) cpf_revoke(fp
->fp_grant
);
622 fp
->fp_grant
= GRANT_INVALID
;
625 r
= dev_mess
.m_lchardriver_vfs_reply
.status
;
626 return (r
== EAGAIN
) ? EINTR
: r
;
630 /*===========================================================================*
632 *===========================================================================*/
633 static int block_io(endpoint_t driver_e
, message
*mess_ptr
)
635 /* Perform I/O on a block device. The current thread is suspended until a reply
636 * comes in from the driver.
638 int r
, status
, retry_count
;
641 assert(IS_BDEV_RQ(mess_ptr
->m_type
));
642 mess_retry
= *mess_ptr
;
646 r
= drv_sendrec(driver_e
, mess_ptr
);
650 status
= mess_ptr
->m_lblockdriver_lbdev_reply
.status
;
651 if (status
== ERESTART
) {
653 *mess_ptr
= mess_retry
;
656 } while (status
== ERESTART
&& retry_count
< 5);
658 /* If we failed to restart the request, return EIO */
659 if (status
== ERESTART
&& retry_count
>= 5)
663 if (r
== EDEADSRCDST
|| r
== EDEADEPT
) {
664 printf("VFS: dead driver %d\n", driver_e
);
665 dmap_unmap_by_endpt(driver_e
);
667 } else if (r
== ELOCKED
) {
668 printf("VFS: ELOCKED talking to %d\n", driver_e
);
671 panic("block_io: can't send/receive: %d", r
);
678 /*===========================================================================*
680 *===========================================================================*/
681 void bdev_up(devmajor_t maj
)
683 /* A new block device driver has been mapped in. This may affect both mounted
684 * file systems and open block-special files.
692 if (maj
< 0 || maj
>= NR_DEVICES
) panic("VFS: out-of-bound major");
693 label
= dmap
[maj
].dmap_label
;
696 /* For each block-special file that was previously opened on the affected
697 * device, we need to reopen it on the new driver.
699 for (rfilp
= filp
; rfilp
< &filp
[NR_FILPS
]; rfilp
++) {
700 if (rfilp
->filp_count
< 1 || !(vp
= rfilp
->filp_vno
)) continue;
701 if (major(vp
->v_sdev
) != maj
) continue;
702 if (!S_ISBLK(vp
->v_mode
)) continue;
704 /* Reopen the device on the driver, once per filp. */
705 bits
= rfilp
->filp_mode
& (R_BIT
|W_BIT
);
706 if ((r
= bdev_open(vp
->v_sdev
, bits
)) != OK
) {
707 printf("VFS: mounted dev %d/%d re-open failed: %d.\n",
708 maj
, minor(vp
->v_sdev
), r
);
709 dmap
[maj
].dmap_recovering
= 0;
710 return; /* Give up entirely */
716 /* Tell each affected mounted file system about the new endpoint.
718 for (vmp
= &vmnt
[0]; vmp
< &vmnt
[NR_MNTS
]; ++vmp
) {
719 if (major(vmp
->m_dev
) != maj
) continue;
721 /* Send the driver label to the mounted file system. */
722 if (OK
!= req_newdriver(vmp
->m_fs_e
, vmp
->m_dev
, label
))
723 printf("VFS dev_up: error sending new driver label to %d\n",
727 /* If any block-special file was open for this major at all, also inform the
728 * root file system about the new driver. We do this even if the
729 * block-special file is linked to another mounted file system, merely
730 * because it is more work to check for that case.
733 if (OK
!= req_newdriver(ROOT_FS_E
, makedev(maj
, 0), label
))
734 printf("VFSdev_up: error sending new driver label to %d\n",
740 /*===========================================================================*
741 * cdev_generic_reply *
742 *===========================================================================*/
743 static void cdev_generic_reply(message
*m_ptr
)
745 /* A character driver has results for an open, close, read, write, or ioctl
746 * call (i.e., everything except select). There may be a thread waiting for
747 * these results as part of an ongoing open, close, or (for read/write/ioctl)
748 * cancel call. If so, wake up that thread; if not, send a reply to the
749 * requesting process. This function MUST NOT block its calling thread.
752 struct worker_thread
*wp
;
756 proc_e
= m_ptr
->m_lchardriver_vfs_reply
.id
;
758 if (m_ptr
->m_lchardriver_vfs_reply
.status
== SUSPEND
) {
759 printf("VFS: got SUSPEND from %d, not reviving\n", m_ptr
->m_source
);
763 if (isokendpt(proc_e
, &slot
) != OK
) {
764 printf("VFS: proc %d from %d not found\n", proc_e
, m_ptr
->m_source
);
769 if (wp
!= NULL
&& wp
->w_task
== who_e
) {
770 assert(!fp_is_blocked(rfp
));
771 *wp
->w_drv_sendrec
= *m_ptr
;
772 worker_signal(wp
); /* Continue open/close/cancel */
773 } else if (rfp
->fp_blocked_on
!= FP_BLOCKED_ON_OTHER
||
774 rfp
->fp_task
!= m_ptr
->m_source
) {
775 /* This would typically be caused by a protocol error, i.e. a driver
776 * not properly following the character driver protocol rules.
778 printf("VFS: proc %d not blocked on %d\n", proc_e
, m_ptr
->m_source
);
780 /* Some services (inet) use the same infrastructure for nonblocking
781 * and cancelled requests, resulting in one of EINTR or EAGAIN when the
782 * other is really the appropriate code. Thus, cdev_cancel converts
783 * EAGAIN into EINTR, and we convert EINTR into EAGAIN here.
785 r
= m_ptr
->m_lchardriver_vfs_reply
.status
;
786 revive(proc_e
, (r
== EINTR
) ? EAGAIN
: r
);
791 /*===========================================================================*
793 *===========================================================================*/
794 void cdev_reply(void)
796 /* A character driver has results for us. */
798 if (get_dmap(who_e
) == NULL
) {
799 printf("VFS: ignoring char dev reply from unknown driver %d\n", who_e
);
805 cdev_generic_reply(&m_in
);
807 case CDEV_SEL1_REPLY
:
808 select_reply1(m_in
.m_source
, m_in
.m_lchardriver_vfs_sel1
.minor
,
809 m_in
.m_lchardriver_vfs_sel1
.status
);
811 case CDEV_SEL2_REPLY
:
812 select_reply2(m_in
.m_source
, m_in
.m_lchardriver_vfs_sel2
.minor
,
813 m_in
.m_lchardriver_vfs_sel2
.status
);
816 printf("VFS: char driver %u sent unknown reply %x\n", who_e
, call_nr
);
821 /*===========================================================================*
823 *===========================================================================*/
824 void bdev_reply(void)
826 /* A block driver has results for a call. There must be a thread waiting for
827 * these results - wake it up. This function MUST NOT block its calling thread.
829 struct worker_thread
*wp
;
832 if ((dp
= get_dmap(who_e
)) == NULL
) {
833 printf("VFS: ignoring block dev reply from unknown driver %d\n",
838 if (dp
->dmap_servicing
== INVALID_THREAD
) {
839 printf("VFS: ignoring spurious block dev reply from %d\n", who_e
);
843 wp
= worker_get(dp
->dmap_servicing
);
844 if (wp
== NULL
|| wp
->w_task
!= who_e
) {
845 printf("VFS: no worker thread waiting for a reply from %d\n", who_e
);
849 assert(wp
->w_drv_sendrec
!= NULL
);
850 *wp
->w_drv_sendrec
= m_in
;
851 wp
->w_drv_sendrec
= NULL
;