vm: remove leftover diag print
[minix.git] / servers / vfs / device.c
blob655419ceaae300014b629ec6273af07454d28535
1 /* When a needed block is not in the cache, it must be fetched from the disk.
2 * Special character files also require I/O. The routines for these are here.
4 * The entry points in this file are:
5 * dev_open: FS opens a device
6 * dev_close: FS closes a device
7 * dev_io: FS does a read or write on a device
8 * dev_status: FS processes callback request alert
9 * gen_opcl: generic call to a task to perform an open/close
10 * gen_io: generic call to a task to perform an I/O operation
11 * no_dev: open/close processing for devices that don't exist
12 * no_dev_io: i/o processing for devices that don't exist
13 * tty_opcl: perform tty-specific processing for open/close
14 * ctty_opcl: perform controlling-tty-specific processing for open/close
15 * ctty_io: perform controlling-tty-specific processing for I/O
16 * do_ioctl: perform the IOCTL system call
17 * do_setsid: perform the SETSID system call (FS side)
20 #include "fs.h"
21 #include <fcntl.h>
22 #include <assert.h>
23 #include <minix/callnr.h>
24 #include <minix/com.h>
25 #include <minix/endpoint.h>
26 #include <minix/ioctl.h>
27 #include <minix/u64.h>
28 #include "file.h"
29 #include "fproc.h"
30 #include <minix/vfsif.h>
31 #include "vnode.h"
32 #include "vmnt.h"
33 #include "param.h"
35 #define ELEMENTS(a) (sizeof(a)/sizeof((a)[0]))
37 FORWARD _PROTOTYPE( int safe_io_conversion, (endpoint_t, cp_grant_id_t *,
38 int *, cp_grant_id_t *, int,
39 endpoint_t *, void **, int *,
40 vir_bytes, u32_t *) );
41 FORWARD _PROTOTYPE( void safe_io_cleanup, (cp_grant_id_t, cp_grant_id_t *,
42 int) );
43 FORWARD _PROTOTYPE( void restart_reopen, (int maj) );
45 extern int dmap_size;
46 PRIVATE int dummyproc;
49 /*===========================================================================*
50 * dev_open *
51 *===========================================================================*/
52 PUBLIC int dev_open(
53 dev_t dev, /* device to open */
54 int proc, /* process to open for */
55 int flags /* mode bits and flags */
58 int major, r;
59 struct dmap *dp;
61 /* Determine the major device number call the device class specific
62 * open/close routine. (This is the only routine that must check the
63 * device number for being in range. All others can trust this check.)
65 major = (dev >> MAJOR) & BYTE;
66 if (major >= NR_DEVICES) major = 0;
67 dp = &dmap[major];
68 if (dp->dmap_driver == NONE) return(ENXIO);
69 r = (*dp->dmap_opcl)(DEV_OPEN, dev, proc, flags);
70 return(r);
74 /*===========================================================================*
75 * dev_reopen *
76 *===========================================================================*/
77 PUBLIC int dev_reopen(
78 dev_t dev, /* device to open */
79 int filp_no, /* filp to reopen for */
80 int flags /* mode bits and flags */
83 int major, r;
84 struct dmap *dp;
86 /* Determine the major device number call the device class specific
87 * open/close routine. (This is the only routine that must check the
88 * device number for being in range. All others can trust this check.)
90 major = (dev >> MAJOR) & BYTE;
91 if (major >= NR_DEVICES) major = 0;
92 dp = &dmap[major];
93 if (dp->dmap_driver == NONE) return(ENXIO);
94 r = (*dp->dmap_opcl)(DEV_REOPEN, dev, filp_no, flags);
95 if (r == OK) panic("OK on reopen from: %d", dp->dmap_driver);
96 if (r == SUSPEND) r = OK;
97 return(r);
101 /*===========================================================================*
102 * dev_close *
103 *===========================================================================*/
104 PUBLIC int dev_close(
105 dev_t dev, /* device to close */
106 int filp_no
109 int r;
111 /* See if driver is roughly valid. */
112 if (dmap[(dev >> MAJOR)].dmap_driver == NONE) return(ENXIO);
113 r = (*dmap[(dev >> MAJOR) & BYTE].dmap_opcl)(DEV_CLOSE, dev, filp_no, 0);
114 return(r);
118 /*===========================================================================*
119 * suspended_ep *
120 *===========================================================================*/
121 endpoint_t suspended_ep(endpoint_t driver, cp_grant_id_t g)
123 /* A process is suspended on a driver for which FS issued
124 * a grant. Find out which process it was.
126 struct fproc *rfp;
127 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
128 if(rfp->fp_pid == PID_FREE) continue;
129 if(rfp->fp_blocked_on == FP_BLOCKED_ON_OTHER &&
130 rfp->fp_task == driver && rfp->fp_grant == g)
131 return rfp->fp_endpoint;
134 return(NONE);
138 /*===========================================================================*
139 * dev_status *
140 *===========================================================================*/
141 PUBLIC void dev_status(message *m)
143 message st;
144 int d, get_more = 1;
145 endpoint_t endpt;
147 for(d = 0; d < NR_DEVICES; d++)
148 if (dmap[d].dmap_driver != NONE && dmap[d].dmap_driver == m->m_source)
149 break;
151 if (d >= NR_DEVICES) return;
152 if (dmap[d].dmap_style == STYLE_DEVA) {
153 printf("dev_status: not doing dev_status for async driver %d\n",
154 m->m_source);
155 return;
158 do {
159 int r;
160 st.m_type = DEV_STATUS;
161 r = sendrec(m->m_source, &st);
162 if(r == OK && st.REP_STATUS == ERESTART) r = EDEADSRCDST;
163 if (r != OK) {
164 printf("DEV_STATUS failed to %d: %d\n", m->m_source, r);
165 if (r == EDEADSRCDST) return;
166 panic("couldn't sendrec for DEV_STATUS: %d", r);
169 switch(st.m_type) {
170 case DEV_REVIVE:
171 endpt = st.REP_ENDPT;
172 if(endpt == FS_PROC_NR) {
173 endpt = suspended_ep(m->m_source,
174 st.REP_IO_GRANT);
175 if(endpt == NONE) {
176 printf("FS: proc with grant %d"
177 " from %d not found (revive)\n",
178 st.REP_IO_GRANT, st.m_source);
179 continue;
182 revive(endpt, st.REP_STATUS);
183 break;
184 case DEV_IO_READY:
185 select_notified(d, st.DEV_MINOR,
186 st.DEV_SEL_OPS);
187 break;
188 default:
189 printf("FS: unrecognized reply %d to "
190 "DEV_STATUS\n", st.m_type);
191 /* Fall through. */
192 case DEV_NO_STATUS:
193 get_more = 0;
194 break;
196 } while(get_more);
198 return;
202 /*===========================================================================*
203 * safe_io_conversion *
204 *===========================================================================*/
205 PRIVATE int safe_io_conversion(driver, gid, op, gids, gids_size,
206 io_ept, buf, vec_grants, bytes, pos_lo)
207 endpoint_t driver;
208 cp_grant_id_t *gid;
209 int *op;
210 cp_grant_id_t *gids;
211 int gids_size;
212 endpoint_t *io_ept;
213 void **buf;
214 int *vec_grants;
215 vir_bytes bytes;
216 u32_t *pos_lo;
218 int access = 0, size, j;
219 iovec_t *v;
220 static iovec_t new_iovec[NR_IOREQS];
222 /* Number of grants allocated in vector I/O. */
223 *vec_grants = 0;
225 /* Driver can handle it - change request to a safe one. */
226 *gid = GRANT_INVALID;
228 switch(*op) {
229 case VFS_DEV_READ:
230 case VFS_DEV_WRITE:
231 /* Change to safe op. */
232 *op = *op == VFS_DEV_READ ? DEV_READ_S : DEV_WRITE_S;
234 *gid = cpf_grant_magic(driver, *io_ept, (vir_bytes) *buf, bytes,
235 *op == DEV_READ_S ? CPF_WRITE : CPF_READ);
236 if (*gid < 0)
237 panic("cpf_grant_magic of buffer failed");
238 break;
239 case VFS_DEV_GATHER:
240 case VFS_DEV_SCATTER:
241 /* Change to safe op. */
242 *op = *op == VFS_DEV_GATHER ? DEV_GATHER_S : DEV_SCATTER_S;
244 /* Grant access to my new i/o vector. */
245 *gid = cpf_grant_direct(driver, (vir_bytes) new_iovec,
246 bytes * sizeof(iovec_t), CPF_READ|CPF_WRITE);
247 if (*gid < 0)
248 panic("cpf_grant_direct of vector failed");
250 v = (iovec_t *) *buf;
251 /* Grant access to i/o buffers. */
252 for(j = 0; j < bytes; j++) {
253 if(j >= NR_IOREQS) panic("vec too big: %d", bytes);
255 new_iovec[j].iov_addr =
256 gids[j] =
257 cpf_grant_direct(driver, (vir_bytes) v[j].iov_addr, v[j].iov_size,
258 *op == DEV_GATHER_S ? CPF_WRITE : CPF_READ);
260 if(!GRANT_VALID(gids[j]))
261 panic("grant to iovec buf failed");
263 new_iovec[j].iov_size = v[j].iov_size;
264 (*vec_grants)++;
267 /* Set user's vector to the new one. */
268 *buf = new_iovec;
269 break;
270 case VFS_DEV_IOCTL:
271 *pos_lo = *io_ept; /* Old endpoint in POSITION field. */
272 *op = DEV_IOCTL_S;
273 if(_MINIX_IOCTL_IOR(m_in.REQUEST)) access |= CPF_WRITE;
274 if(_MINIX_IOCTL_IOW(m_in.REQUEST)) access |= CPF_READ;
275 if(_MINIX_IOCTL_BIG(m_in.REQUEST))
276 size = _MINIX_IOCTL_SIZE_BIG(m_in.REQUEST);
277 else
278 size = _MINIX_IOCTL_SIZE(m_in.REQUEST);
281 /* Do this even if no I/O happens with the ioctl, in
282 * order to disambiguate requests with DEV_IOCTL_S.
284 *gid = cpf_grant_magic(driver, *io_ept, (vir_bytes) *buf, size,
285 access);
286 if (*gid < 0)
287 panic("cpf_grant_magic failed (ioctl)");
289 break;
290 case VFS_DEV_SELECT:
291 *op = DEV_SELECT;
292 break;
293 default:
294 panic("safe_io_conversion: unknown operation: %d", *op);
297 /* If we have converted to a safe operation, I/O
298 * endpoint becomes FS if it wasn't already.
300 if(GRANT_VALID(*gid)) {
301 *io_ept = FS_PROC_NR;
302 return 1;
305 /* Not converted to a safe operation (because there is no
306 * copying involved in this operation).
308 return 0;
311 /*===========================================================================*
312 * safe_io_cleanup *
313 *===========================================================================*/
314 PRIVATE void safe_io_cleanup(gid, gids, gids_size)
315 cp_grant_id_t gid;
316 cp_grant_id_t *gids;
317 int gids_size;
319 /* Free resources (specifically, grants) allocated by safe_io_conversion(). */
320 int j;
322 cpf_revoke(gid);
323 for(j = 0; j < gids_size; j++)
324 cpf_revoke(gids[j]);
328 /*===========================================================================*
329 * dev_io *
330 *===========================================================================*/
331 PUBLIC int dev_io(
332 int op, /* DEV_READ, DEV_WRITE, DEV_IOCTL, etc. */
333 dev_t dev, /* major-minor device number */
334 int proc_e, /* in whose address space is buf? */
335 void *buf, /* virtual address of the buffer */
336 u64_t pos, /* byte position */
337 int bytes, /* how many bytes to transfer */
338 int flags, /* special flags, like O_NONBLOCK */
339 int suspend_reopen /* Just suspend the process */
342 /* Read or write from a device. The parameter 'dev' tells which one. */
343 struct dmap *dp;
344 u32_t pos_lo, pos_high;
345 message dev_mess;
346 cp_grant_id_t gid = GRANT_INVALID;
347 static cp_grant_id_t gids[NR_IOREQS];
348 int vec_grants = 0, safe;
349 void *buf_used;
350 endpoint_t ioproc;
352 pos_lo= ex64lo(pos);
353 pos_high= ex64hi(pos);
355 /* Determine task dmap. */
356 dp = &dmap[(dev >> MAJOR) & BYTE];
358 /* See if driver is roughly valid. */
359 if (dp->dmap_driver == NONE) {
360 printf("FS: dev_io: no driver for dev %x\n", dev);
361 return(ENXIO);
364 if (suspend_reopen) {
365 /* Suspend user. */
366 fp->fp_grant = GRANT_INVALID;
367 fp->fp_ioproc = NONE;
368 wait_for(dp->dmap_driver);
369 fp->fp_flags |= SUSP_REOPEN;
370 return(SUSPEND);
373 if(isokendpt(dp->dmap_driver, &dummyproc) != OK) {
374 printf("FS: dev_io: old driver for dev %x (%d)\n",dev,dp->dmap_driver);
375 return(ENXIO);
378 /* By default, these are right. */
379 dev_mess.IO_ENDPT = proc_e;
380 dev_mess.ADDRESS = buf;
382 /* Convert DEV_* to DEV_*_S variants. */
383 buf_used = buf;
384 safe = safe_io_conversion(dp->dmap_driver, &gid, &op, gids, NR_IOREQS,
385 (endpoint_t*) &dev_mess.IO_ENDPT, &buf_used,
386 &vec_grants, bytes, &pos_lo);
388 if(buf != buf_used)
389 panic("dev_io: safe_io_conversion changed buffer");
391 /* If the safe conversion was done, set the ADDRESS to
392 * the grant id.
394 if(safe) dev_mess.IO_GRANT = (char *) gid;
396 /* Set up the rest of the message passed to task. */
397 dev_mess.m_type = op;
398 dev_mess.DEVICE = (dev >> MINOR) & BYTE;
399 dev_mess.POSITION = pos_lo;
400 dev_mess.COUNT = bytes;
401 dev_mess.HIGHPOS = pos_high;
403 /* This will be used if the i/o is suspended. */
404 ioproc = dev_mess.IO_ENDPT;
406 /* Call the task. */
407 (*dp->dmap_io)(dp->dmap_driver, &dev_mess);
409 if(dp->dmap_driver == NONE) {
410 /* Driver has vanished. */
411 printf("Driver gone?\n");
412 if(safe) safe_io_cleanup(gid, gids, vec_grants);
413 return(EIO);
416 /* Task has completed. See if call completed. */
417 if (dev_mess.REP_STATUS == SUSPEND) {
418 if(vec_grants > 0) panic("SUSPEND on vectored i/o");
420 /* fp is uninitialized at init time. */
421 if(!fp) panic("SUSPEND on NULL fp");
423 if ((flags & O_NONBLOCK) && !(dp->dmap_style == STYLE_DEVA)) {
424 /* Not supposed to block. */
425 dev_mess.m_type = CANCEL;
426 dev_mess.IO_ENDPT = ioproc;
427 dev_mess.IO_GRANT = (char *) gid;
429 /* This R_BIT/W_BIT check taken from suspend()/unpause()
430 * logic. Mode is expected in the COUNT field.
432 dev_mess.COUNT = 0;
433 if (call_nr == READ) dev_mess.COUNT = R_BIT;
434 else if (call_nr == WRITE) dev_mess.COUNT = W_BIT;
435 dev_mess.DEVICE = (dev >> MINOR) & BYTE;
436 (*dp->dmap_io)(dp->dmap_driver, &dev_mess);
437 if (dev_mess.REP_STATUS == EINTR) dev_mess.REP_STATUS = EAGAIN;
438 } else {
439 /* select() will do suspending itself. */
440 if(op != DEV_SELECT) {
441 /* Suspend user. */
442 wait_for(dp->dmap_driver);
444 assert(!GRANT_VALID(fp->fp_grant));
445 fp->fp_grant = gid; /* revoke this when unsuspended. */
446 fp->fp_ioproc = ioproc;
448 if (flags & O_NONBLOCK) {
449 /* Not supposed to block, send cancel message */
450 dev_mess.m_type = CANCEL;
451 dev_mess.IO_ENDPT = ioproc;
452 dev_mess.IO_GRANT = (char *) gid;
454 /* This R_BIT/W_BIT check taken from suspend()/unpause()
455 * logic. Mode is expected in the COUNT field.
457 dev_mess.COUNT = 0;
458 if(call_nr == READ) dev_mess.COUNT = R_BIT;
459 else if(call_nr == WRITE) dev_mess.COUNT = W_BIT;
460 dev_mess.DEVICE = (dev >> MINOR) & BYTE;
461 (*dp->dmap_io)(dp->dmap_driver, &dev_mess);
463 /* Should do something about EINTR -> EAGAIN mapping */
465 return(SUSPEND);
469 /* No suspend, or cancelled suspend, so I/O is over and can be cleaned up. */
470 if(safe) safe_io_cleanup(gid, gids, vec_grants);
472 return(dev_mess.REP_STATUS);
475 /*===========================================================================*
476 * gen_opcl *
477 *===========================================================================*/
478 PUBLIC int gen_opcl(
479 int op, /* operation, DEV_OPEN or DEV_CLOSE */
480 dev_t dev, /* device to open or close */
481 int proc_e, /* process to open/close for */
482 int flags /* mode bits and flags */
485 /* Called from the dmap struct in table.c on opens & closes of special files.*/
486 int r;
487 struct dmap *dp;
488 message dev_mess;
490 /* Determine task dmap. */
491 dp = &dmap[(dev >> MAJOR) & BYTE];
493 dev_mess.m_type = op;
494 dev_mess.DEVICE = (dev >> MINOR) & BYTE;
495 dev_mess.IO_ENDPT = proc_e;
496 dev_mess.COUNT = flags;
498 if (dp->dmap_driver == NONE) {
499 printf("FS: gen_opcl: no driver for dev %x\n", dev);
500 return(ENXIO);
503 /* Call the task. */
504 r= (*dp->dmap_io)(dp->dmap_driver, &dev_mess);
505 if (r != OK) return(r);
507 return(dev_mess.REP_STATUS);
510 /*===========================================================================*
511 * tty_opcl *
512 *===========================================================================*/
513 PUBLIC int tty_opcl(
514 int op, /* operation, DEV_OPEN or DEV_CLOSE */
515 dev_t dev, /* device to open or close */
516 int proc_e, /* process to open/close for */
517 int flags /* mode bits and flags */
520 /* This procedure is called from the dmap struct on tty open/close. */
522 int r;
523 register struct fproc *rfp;
525 /* Add O_NOCTTY to the flags if this process is not a session leader, or
526 * if it already has a controlling tty, or if it is someone elses
527 * controlling tty.
529 if (!fp->fp_sesldr || fp->fp_tty != 0) {
530 flags |= O_NOCTTY;
531 } else {
532 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
533 if(rfp->fp_pid == PID_FREE) continue;
534 if (rfp->fp_tty == dev) flags |= O_NOCTTY;
538 r = gen_opcl(op, dev, proc_e, flags);
540 /* Did this call make the tty the controlling tty? */
541 if (r == 1) {
542 fp->fp_tty = dev;
543 r = OK;
545 return(r);
549 /*===========================================================================*
550 * ctty_opcl *
551 *===========================================================================*/
552 PUBLIC int ctty_opcl(
553 int op, /* operation, DEV_OPEN or DEV_CLOSE */
554 dev_t dev, /* device to open or close */
555 int proc_e, /* process to open/close for */
556 int flags /* mode bits and flags */
559 /* This procedure is called from the dmap struct in table.c on opening/closing
560 * /dev/tty, the magic device that translates to the controlling tty.
563 return(fp->fp_tty == 0 ? ENXIO : OK);
567 /*===========================================================================*
568 * pm_setsid *
569 *===========================================================================*/
570 PUBLIC void pm_setsid(proc_e)
571 int proc_e;
573 /* Perform the FS side of the SETSID call, i.e. get rid of the controlling
574 * terminal of a process, and make the process a session leader.
576 register struct fproc *rfp;
577 int slot;
579 /* Make the process a session leader with no controlling tty. */
580 okendpt(proc_e, &slot);
581 rfp = &fproc[slot];
582 rfp->fp_sesldr = TRUE;
583 rfp->fp_tty = 0;
587 /*===========================================================================*
588 * do_ioctl *
589 *===========================================================================*/
590 PUBLIC int do_ioctl()
592 /* Perform the ioctl(ls_fd, request, argx) system call (uses m2 fmt). */
594 int suspend_reopen;
595 struct filp *f;
596 register struct vnode *vp;
597 dev_t dev;
599 if ((f = get_filp(m_in.ls_fd)) == NIL_FILP) return(err_code);
600 vp = f->filp_vno; /* get vnode pointer */
601 if ((vp->v_mode & I_TYPE) != I_CHAR_SPECIAL &&
602 (vp->v_mode & I_TYPE) != I_BLOCK_SPECIAL) return(ENOTTY);
603 suspend_reopen= (f->filp_state != FS_NORMAL);
604 dev = (dev_t) vp->v_sdev;
606 return dev_io(VFS_DEV_IOCTL, dev, who_e, m_in.ADDRESS, cvu64(0),
607 m_in.REQUEST, f->filp_flags, suspend_reopen);
611 /*===========================================================================*
612 * gen_io *
613 *===========================================================================*/
614 PUBLIC int gen_io(task_nr, mess_ptr)
615 int task_nr; /* which task to call */
616 message *mess_ptr; /* pointer to message for task */
618 /* All file system I/O ultimately comes down to I/O on major/minor device
619 * pairs. These lead to calls on the following routines via the dmap table.
622 int r, proc_e;
624 if(task_nr == SYSTEM) printf("VFS: sending %d to SYSTEM\n", mess_ptr->m_type);
626 proc_e = mess_ptr->IO_ENDPT;
627 r = sendrec(task_nr, mess_ptr);
628 if(r == OK && mess_ptr->REP_STATUS == ERESTART) r = EDEADSRCDST;
629 if (r != OK) {
630 if (r == EDEADSRCDST) {
631 printf("fs: dead driver %d\n", task_nr);
632 dmap_unmap_by_endpt(task_nr);
633 return(r);
635 if (r == ELOCKED) {
636 printf("fs: ELOCKED talking to %d\n", task_nr);
637 return(r);
639 panic("call_task: can't send/receive: %d", r);
642 /* Did the process we did the sendrec() for get a result? */
643 if (mess_ptr->REP_ENDPT != proc_e) {
644 printf("fs: strange device reply from %d, type = %d, proc = %d "
645 "(not %d) (2) ignored\n", mess_ptr->m_source, mess_ptr->m_type,
646 proc_e, mess_ptr->REP_ENDPT);
647 return(EIO);
650 return(OK);
654 /*===========================================================================*
655 * asyn_io *
656 *===========================================================================*/
657 PUBLIC int asyn_io(task_nr, mess_ptr)
658 int task_nr; /* which task to call */
659 message *mess_ptr; /* pointer to message for task */
661 /* All file system I/O ultimately comes down to I/O on major/minor device
662 * pairs. These lead to calls on the following routines via the dmap table.
665 int r;
667 r = asynsend(task_nr, mess_ptr);
668 if (r != OK) panic("asyn_io: asynsend failed: %d", r);
670 /* Fake a SUSPEND */
671 mess_ptr->REP_STATUS = SUSPEND;
672 return(OK);
676 /*===========================================================================*
677 * ctty_io *
678 *===========================================================================*/
679 PUBLIC int ctty_io(task_nr, mess_ptr)
680 int task_nr; /* not used - for compatibility with dmap_t */
681 message *mess_ptr; /* pointer to message for task */
683 /* This routine is only called for one device, namely /dev/tty. Its job
684 * is to change the message to use the controlling terminal, instead of the
685 * major/minor pair for /dev/tty itself.
688 struct dmap *dp;
690 if (fp->fp_tty == 0) {
691 /* No controlling tty present anymore, return an I/O error. */
692 mess_ptr->REP_STATUS = EIO;
693 } else {
694 /* Substitute the controlling terminal device. */
695 dp = &dmap[(fp->fp_tty >> MAJOR) & BYTE];
696 mess_ptr->DEVICE = (fp->fp_tty >> MINOR) & BYTE;
698 if (dp->dmap_driver == NONE) {
699 printf("FS: ctty_io: no driver for dev\n");
700 return(EIO);
703 if(isokendpt(dp->dmap_driver, &dummyproc) != OK) {
704 printf("FS: ctty_io: old driver %d\n", dp->dmap_driver);
705 return(EIO);
708 (*dp->dmap_io)(dp->dmap_driver, mess_ptr);
710 return(OK);
714 /*===========================================================================*
715 * no_dev *
716 *===========================================================================*/
717 PUBLIC int no_dev(
718 int UNUSED(op), /* operation, DEV_OPEN or DEV_CLOSE */
719 dev_t UNUSED(dev), /* device to open or close */
720 int UNUSED(proc), /* process to open/close for */
721 int UNUSED(flags) /* mode bits and flags */
724 /* Called when opening a nonexistent device. */
725 return(ENODEV);
728 /*===========================================================================*
729 * no_dev_io *
730 *===========================================================================*/
731 PUBLIC int no_dev_io(int proc, message *m)
733 /* Called when doing i/o on a nonexistent device. */
734 printf("VFS: I/O on unmapped device number\n");
735 return(EIO);
739 /*===========================================================================*
740 * clone_opcl *
741 *===========================================================================*/
742 PUBLIC int clone_opcl(
743 int op, /* operation, DEV_OPEN or DEV_CLOSE */
744 dev_t dev, /* device to open or close */
745 int proc_e, /* process to open/close for */
746 int flags /* mode bits and flags */
749 /* Some devices need special processing upon open. Such a device is "cloned",
750 * i.e. on a succesful open it is replaced by a new device with a new unique
751 * minor device number. This new device number identifies a new object (such
752 * as a new network connection) that has been allocated within a task.
754 struct dmap *dp;
755 int r, minor;
756 message dev_mess;
758 /* Determine task dmap. */
759 dp = &dmap[(dev >> MAJOR) & BYTE];
760 minor = (dev >> MINOR) & BYTE;
762 dev_mess.m_type = op;
763 dev_mess.DEVICE = minor;
764 dev_mess.IO_ENDPT = proc_e;
765 dev_mess.COUNT = flags;
768 if (dp->dmap_driver == NONE) {
769 printf("VFS clone_opcl: no driver for dev %x\n", dev);
770 return(ENXIO);
773 if(isokendpt(dp->dmap_driver, &dummyproc) != OK) {
774 printf("VFS clone_opcl: bad driver endpoint for dev %x (%d)\n", dev,
775 dp->dmap_driver);
776 return(ENXIO);
779 /* Call the task. */
780 r = (*dp->dmap_io)(dp->dmap_driver, &dev_mess);
781 if (r != OK) return(r);
783 if (op == DEV_OPEN && dev_mess.REP_STATUS >= 0) {
784 if (dev_mess.REP_STATUS != minor) {
785 struct vnode *vp;
786 struct node_details res;
788 /* A new minor device number has been returned.
789 * Request PFS to create a temporary device file to hold it.
792 /* Device number of the new device. */
793 dev = (dev & ~(BYTE << MINOR)) | (dev_mess.REP_STATUS << MINOR);
795 /* Issue request */
796 r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid,
797 ALL_MODES | I_CHAR_SPECIAL, dev, &res);
798 if (r != OK) {
799 (void) clone_opcl(DEV_CLOSE, dev, proc_e, 0);
800 return r;
803 /* Drop old node and use the new values */
804 vp = fp->fp_filp[m_in.fd]->filp_vno;
806 put_vnode(vp);
807 if ((vp = get_free_vnode()) == NIL_VNODE)
808 vp = fp->fp_filp[m_in.fd]->filp_vno;
810 vp->v_fs_e = res.fs_e;
811 vp->v_vmnt = NIL_VMNT;
812 vp->v_dev = NO_DEV;
813 vp->v_fs_e = res.fs_e;
814 vp->v_inode_nr = res.inode_nr;
815 vp->v_mode = res.fmode;
816 vp->v_sdev = dev;
817 vp->v_fs_count = 1;
818 vp->v_ref_count = 1;
819 fp->fp_filp[m_in.fd]->filp_vno = vp;
821 dev_mess.REP_STATUS = OK;
823 return(dev_mess.REP_STATUS);
827 /*===========================================================================*
828 * dev_up *
829 *===========================================================================*/
830 PUBLIC void dev_up(int maj)
832 /* A new device driver has been mapped in. This function
833 * checks if any filesystems are mounted on it, and if so,
834 * dev_open()s them so the filesystem can be reused.
836 int r, new_driver_e, needs_reopen, fd_nr;
837 struct filp *fp;
838 struct vmnt *vmp;
839 struct fproc *rfp;
840 struct vnode *vp;
842 /* Open a device once for every filp that's opened on it,
843 * and once for every filesystem mounted from it.
845 new_driver_e = dmap[maj].dmap_driver;
847 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
848 int minor;
849 if (vmp->m_dev == NO_DEV) continue;
850 if ( ((vmp->m_dev >> MAJOR) & BYTE) != maj) continue;
851 minor = ((vmp->m_dev >> MINOR) & BYTE);
853 if ((r = dev_open(vmp->m_dev, FS_PROC_NR,
854 vmp->m_flags ? R_BIT : (R_BIT|W_BIT))) != OK) {
855 printf("VFS: mounted dev %d/%d re-open failed: %d.\n",
856 maj, minor, r);
859 /* Send new driver endpoint */
860 if (OK != req_newdriver(vmp->m_fs_e, vmp->m_dev, new_driver_e))
861 printf("VFSdev_up: error sending new driver endpoint."
862 " FS_e: %d req_nr: %d\n", vmp->m_fs_e, REQ_NEW_DRIVER);
865 /* Look for processes that are suspened in an OPEN call. Set SUSP_REOPEN
866 * to indicate that this process was suspended before the call to dev_up.
868 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
869 if(rfp->fp_pid == PID_FREE) continue;
870 if(rfp->fp_blocked_on != FP_BLOCKED_ON_DOPEN) continue;
872 printf("dev_up: found process in FP_BLOCKED_ON_DOPEN, fd %d\n",
873 rfp->fp_fd >> 8);
874 fd_nr = (rfp->fp_fd >> 8);
875 fp = rfp->fp_filp[fd_nr];
876 vp = fp->filp_vno;
877 if (!vp) panic("restart_reopen: no vp");
878 if ((vp->v_mode & I_TYPE) != I_CHAR_SPECIAL) continue;
879 if (((vp->v_sdev >> MAJOR) & BYTE) != maj) continue;
881 rfp->fp_flags |= SUSP_REOPEN;
884 needs_reopen= FALSE;
885 for (fp = filp; fp < &filp[NR_FILPS]; fp++) {
886 struct vnode *vp;
888 if(fp->filp_count < 1 || !(vp = fp->filp_vno)) continue;
889 if(((vp->v_sdev >> MAJOR) & BYTE) != maj) continue;
890 if(!(vp->v_mode & (I_BLOCK_SPECIAL|I_CHAR_SPECIAL))) continue;
892 fp->filp_state = FS_NEEDS_REOPEN;
893 needs_reopen = TRUE;
896 if (needs_reopen)
897 restart_reopen(maj);
902 /*===========================================================================*
903 * restart_reopen *
904 *===========================================================================*/
905 PRIVATE void restart_reopen(maj)
906 int maj;
908 int n, r, minor, fd_nr;
909 endpoint_t driver_e;
910 struct vnode *vp;
911 struct filp *fp;
912 struct fproc *rfp;
914 for (fp = filp; fp < &filp[NR_FILPS]; fp++) {
915 if (fp->filp_count < 1 || !(vp = fp->filp_vno)) continue;
916 if (fp->filp_state != FS_NEEDS_REOPEN) continue;
917 if (((vp->v_sdev >> MAJOR) & BYTE) != maj) continue;
918 if ((vp->v_mode & I_TYPE) != I_CHAR_SPECIAL) continue;
919 minor = ((vp->v_sdev >> MINOR) & BYTE);
921 if (!(fp->filp_flags & O_REOPEN)) {
922 /* File descriptor is to be closed when driver restarts. */
923 n = invalidate(fp);
924 if (n != fp->filp_count) {
925 printf("VFS: warning: invalidate/count "
926 "discrepancy (%d, %d)\n", n, fp->filp_count);
928 fp->filp_count = 0;
929 continue;
932 r = dev_reopen(vp->v_sdev, fp-filp, vp->v_mode & (R_BIT|W_BIT));
933 if (r == OK) return;
935 /* Device could not be reopened. Invalidate all filps on that device.*/
936 n = invalidate(fp);
937 if (n != fp->filp_count) {
938 printf("VFS: warning: invalidate/count "
939 "discrepancy (%d, %d)\n", n, fp->filp_count);
941 fp->filp_count = 0;
942 printf("VFS: file on dev %d/%d re-open failed: %d; "
943 "invalidated %d fd's.\n", maj, minor, r, n);
946 /* Nothing more to re-open. Restart suspended processes */
947 driver_e= dmap[maj].dmap_driver;
949 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
950 if(rfp->fp_pid == PID_FREE) continue;
951 if(rfp->fp_blocked_on == FP_BLOCKED_ON_OTHER &&
952 rfp->fp_task == driver_e && (rfp->fp_flags & SUSP_REOPEN)) {
953 rfp->fp_flags &= ~SUSP_REOPEN;
954 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
955 reply(rfp->fp_endpoint, ERESTART);
959 /* Look for processes that are suspened in an OPEN call */
960 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
961 if (rfp->fp_pid == PID_FREE) continue;
962 if (rfp->fp_blocked_on == FP_BLOCKED_ON_DOPEN ||
963 !(rfp->fp_flags & SUSP_REOPEN)) continue;
965 printf("restart_reopen: found process in FP_BLOCKED_ON_DOPEN, fd %d\n",
966 rfp->fp_fd >> 8);
967 fd_nr = (rfp->fp_fd >> 8);
968 fp = rfp->fp_filp[fd_nr];
970 if (!fp) {
971 /* Open failed, and automatic reopen was not requested */
972 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
973 FD_CLR(fd_nr, &rfp->fp_filp_inuse);
974 reply(rfp->fp_endpoint, EIO);
975 continue;
978 vp = fp->filp_vno;
979 if (!vp) panic("restart_reopen: no vp");
980 if ((vp->v_mode & I_TYPE) != I_CHAR_SPECIAL) continue;
981 if (((vp->v_sdev >> MAJOR) & BYTE) != maj) continue;
983 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
984 reply(rfp->fp_endpoint, fd_nr);
989 /*===========================================================================*
990 * reopen_reply *
991 *===========================================================================*/
992 PUBLIC void reopen_reply()
994 endpoint_t driver_e;
995 int filp_no, status, maj;
996 struct filp *fp;
997 struct vnode *vp;
998 struct dmap *dp;
1000 driver_e = m_in.m_source;
1001 filp_no = m_in.REP_ENDPT;
1002 status = m_in.REP_STATUS;
1004 if (filp_no < 0 || filp_no >= NR_FILPS) {
1005 printf("reopen_reply: bad filp number %d from driver %d\n", filp_no,
1006 driver_e);
1007 return;
1010 fp = &filp[filp_no];
1011 if (fp->filp_count < 1) {
1012 printf("reopen_reply: filp number %d not inuse (from driver %d)\n",
1013 filp_no, driver_e);
1014 return;
1017 vp = fp->filp_vno;
1018 if (!vp) {
1019 printf("reopen_reply: no vnode for filp number %d (from driver %d)\n",
1020 filp_no, driver_e);
1021 return;
1024 if (fp->filp_state != FS_NEEDS_REOPEN) {
1025 printf("reopen_reply: bad state %d for filp number %d"
1026 " (from driver %d)\n", fp->filp_state, filp_no, driver_e);
1027 return;
1030 if ((vp->v_mode & I_TYPE) != I_CHAR_SPECIAL) {
1031 printf("reopen_reply: bad mode 0%o for filp number %d"
1032 " (from driver %d)\n", vp->v_mode, filp_no, driver_e);
1033 return;
1036 maj = ((vp->v_sdev >> MAJOR) & BYTE);
1037 dp = &dmap[maj];
1038 if (dp->dmap_driver != driver_e) {
1039 printf("reopen_reply: bad major %d for filp number %d "
1040 "(from driver %d, current driver is %d)\n", maj, filp_no,
1041 driver_e, dp->dmap_driver);
1042 return;
1045 if (status == OK) {
1046 fp->filp_state= FS_NORMAL;
1047 } else {
1048 printf("reopen_reply: should handle error status\n");
1049 return;
1052 restart_reopen(maj);
1055 #if 0
1056 #define ASYN_NR 100
1057 PRIVATE asynmsg_t msgtable[ASYN_NR];
1058 PRIVATE int first_slot= 0, next_slot= 0;
1060 PUBLIC int asynsend(dst, mp)
1061 endpoint_t dst;
1062 message *mp;
1064 int r, src_ind, dst_ind;
1065 unsigned flags;
1067 /* Update first_slot */
1068 for (; first_slot < next_slot; first_slot++)
1070 flags= msgtable[first_slot].flags;
1071 if ((flags & (AMF_VALID|AMF_DONE)) == (AMF_VALID|AMF_DONE))
1073 if (msgtable[first_slot].result != OK)
1075 printf(
1076 "asynsend: found completed entry %d with error %d\n",
1077 first_slot,
1078 msgtable[first_slot].result);
1080 continue;
1082 if (flags != AMF_EMPTY)
1083 break;
1086 if (first_slot >= next_slot)
1088 /* Reset first_slot and next_slot */
1089 next_slot= first_slot= 0;
1092 if (next_slot >= ASYN_NR)
1094 /* Tell the kernel to stop processing */
1095 r= senda(NULL, 0);
1096 if (r != OK)
1097 panic("asynsend: senda failed: %d", r);
1099 dst_ind= 0;
1100 for (src_ind= first_slot; src_ind<next_slot; src_ind++)
1102 flags= msgtable[src_ind].flags;
1103 if ((flags & (AMF_VALID|AMF_DONE)) ==
1104 (AMF_VALID|AMF_DONE))
1106 if (msgtable[src_ind].result != OK)
1108 printf(
1109 "asynsend: found completed entry %d with error %d\n",
1110 src_ind,
1111 msgtable[src_ind].result);
1113 continue;
1115 if (flags == AMF_EMPTY)
1116 continue;
1117 #if 0
1118 printf("asynsend: copying entry %d to %d\n",
1119 src_ind, dst_ind);
1120 #endif
1121 if (src_ind != dst_ind)
1122 msgtable[dst_ind]= msgtable[src_ind];
1123 dst_ind++;
1125 first_slot= 0;
1126 next_slot= dst_ind;
1127 if (next_slot >= ASYN_NR)
1128 panic("asynsend: msgtable full");
1131 msgtable[next_slot].dst= dst;
1132 msgtable[next_slot].msg= *mp;
1133 msgtable[next_slot].flags= AMF_VALID; /* Has to be last. The kernel
1134 * scans this table while we
1135 * are sleeping.
1137 next_slot++;
1139 /* Tell the kernel to rescan the table */
1140 return senda(msgtable+first_slot, next_slot-first_slot);
1142 #endif