coverity appeasement
[minix.git] / servers / vfs / main.c
blob3e4eb3dad1c1ec72ad5828b5f1ad8344ca779aae
1 /*
2 * a loop that gets messages requesting work, carries out the work, and sends
3 * replies.
5 * The entry points into this file are:
6 * main: main program of the Virtual File System
7 * reply: send a reply to a process after the requested work is done
9 */
11 #include "fs.h"
12 #include <fcntl.h>
13 #include <string.h>
14 #include <stdio.h>
15 #include <signal.h>
16 #include <assert.h>
17 #include <stdlib.h>
18 #include <sys/ioc_memory.h>
19 #include <sys/svrctl.h>
20 #include <sys/select.h>
21 #include <minix/callnr.h>
22 #include <minix/com.h>
23 #include <minix/keymap.h>
24 #include <minix/const.h>
25 #include <minix/endpoint.h>
26 #include <minix/safecopies.h>
27 #include <minix/debug.h>
28 #include <minix/vfsif.h>
29 #include "file.h"
30 #include "dmap.h"
31 #include "fproc.h"
32 #include "scratchpad.h"
33 #include "vmnt.h"
34 #include "vnode.h"
35 #include "job.h"
36 #include "param.h"
38 #if ENABLE_SYSCALL_STATS
39 EXTERN unsigned long calls_stats[NCALLS];
40 #endif
42 /* Thread related prototypes */
43 static void *do_async_dev_result(void *arg);
44 static void *do_control_msgs(void *arg);
45 static void *do_fs_reply(struct job *job);
46 static void *do_work(void *arg);
47 static void *do_pm(void *arg);
48 static void *do_init_root(void *arg);
49 static void handle_work(void *(*func)(void *arg));
51 static void get_work(void);
52 static void lock_pm(void);
53 static void unlock_pm(void);
54 static void service_pm(void);
55 static void service_pm_postponed(void);
56 static int unblock(struct fproc *rfp);
58 /* SEF functions and variables. */
59 static void sef_local_startup(void);
60 static int sef_cb_init_fresh(int type, sef_init_info_t *info);
61 static mutex_t pm_lock;
62 static endpoint_t receive_from;
64 /*===========================================================================*
65 * main *
66 *===========================================================================*/
67 int main(void)
69 /* This is the main program of the file system. The main loop consists of
70 * three major activities: getting new work, processing the work, and sending
71 * the reply. This loop never terminates as long as the file system runs.
73 int transid;
74 struct job *job;
76 /* SEF local startup. */
77 sef_local_startup();
79 printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS);
81 if (OK != (sys_getkinfo(&kinfo)))
82 panic("couldn't get kernel kinfo");
84 /* This is the main loop that gets work, processes it, and sends replies. */
85 while (TRUE) {
86 yield_all(); /* let other threads run */
87 self = NULL;
88 job = NULL;
89 send_work();
90 get_work();
92 transid = TRNS_GET_ID(m_in.m_type);
93 if (IS_VFS_FS_TRANSID(transid)) {
94 job = worker_getjob( (thread_t) transid - VFS_TRANSID);
95 if (job == NULL) {
96 printf("VFS: spurious message %d from endpoint %d\n",
97 m_in.m_type, m_in.m_source);
98 continue;
100 m_in.m_type = TRNS_DEL_ID(m_in.m_type);
103 if (job != NULL) {
104 do_fs_reply(job);
105 continue;
106 } else if (who_e == PM_PROC_NR) { /* Calls from PM */
107 /* Special control messages from PM */
108 sys_worker_start(do_pm);
109 continue;
110 } else if (is_notify(call_nr)) {
111 /* A task notify()ed us */
112 if (who_e == DS_PROC_NR)
113 worker_start(ds_event);
114 else
115 sys_worker_start(do_control_msgs);
116 continue;
117 } else if (who_p < 0) { /* i.e., message comes from a task */
118 /* We're going to ignore this message. Tasks should
119 * send notify()s only.
121 printf("VFS: ignoring message from %d (%d)\n", who_e, call_nr);
122 continue;
125 /* At this point we either have results from an asynchronous device
126 * or a new system call. In both cases a new worker thread has to be
127 * started and there might not be one available from the pool. This is
128 * not a problem (requests/replies are simply queued), except when
129 * they're from an FS endpoint, because these can cause a deadlock.
130 * handle_work() takes care of the details. */
131 if (IS_DRV_REPLY(call_nr)) {
132 /* We've got results for a device request */
134 struct dmap *dp;
136 dp = get_dmap(who_e);
137 if (dp != NULL) {
138 if (dev_style_asyn(dp->dmap_style)) {
139 handle_work(do_async_dev_result);
141 } else {
142 if (dp->dmap_servicing == NONE) {
143 printf("Got spurious dev reply from %d",
144 who_e);
145 } else {
146 dev_reply(dp);
149 continue;
151 printf("VFS: ignoring dev reply from unknown driver %d\n",
152 who_e);
153 } else {
154 /* Normal syscall. */
155 handle_work(do_work);
158 return(OK); /* shouldn't come here */
161 /*===========================================================================*
162 * handle_work *
163 *===========================================================================*/
164 static void handle_work(void *(*func)(void *arg))
166 /* Handle asynchronous device replies and new system calls. If the originating
167 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
168 struct vmnt *vmp = NULL;
169 endpoint_t proc_e;
171 proc_e = m_in.m_source;
173 if (fp->fp_flags & FP_SYS_PROC) {
174 if (worker_available() == 0) {
175 if (!deadlock_resolving) {
176 if ((vmp = find_vmnt(proc_e)) != NULL) {
177 /* A call back or dev result from an FS
178 * endpoint. Set call back flag. Can do only
179 * one call back at a time.
181 if (vmp->m_flags & VMNT_CALLBACK) {
182 reply(proc_e, EAGAIN);
183 return;
185 vmp->m_flags |= VMNT_CALLBACK;
187 /* When an FS endpoint has to make a call back
188 * in order to mount, force its device to a
189 * "none device" so block reads/writes will be
190 * handled by ROOT_FS_E.
192 if (vmp->m_flags & VMNT_MOUNTING)
193 vmp->m_flags |= VMNT_FORCEROOTBSF;
195 deadlock_resolving = 1;
196 dl_worker_start(func);
197 return;
199 /* Already trying to resolve a deadlock, can't
200 * handle more, sorry */
202 reply(proc_e, EAGAIN);
203 return;
207 worker_start(func);
210 /*===========================================================================*
211 * do_async_dev_result *
212 *===========================================================================*/
213 static void *do_async_dev_result(void *arg)
215 endpoint_t endpt;
216 struct job my_job;
218 my_job = *((struct job *) arg);
219 fp = my_job.j_fp;
221 /* An asynchronous character driver has results for us */
222 if (job_call_nr == DEV_REVIVE) {
223 endpt = job_m_in.REP_ENDPT;
224 if (endpt == VFS_PROC_NR)
225 endpt = find_suspended_ep(job_m_in.m_source,
226 job_m_in.REP_IO_GRANT);
228 if (endpt == NONE) {
229 printf("VFS: proc with grant %d from %d not found\n",
230 job_m_in.REP_IO_GRANT, job_m_in.m_source);
231 } else if (job_m_in.REP_STATUS == SUSPEND) {
232 printf("VFS: got SUSPEND on DEV_REVIVE: not reviving proc\n");
233 } else
234 revive(endpt, job_m_in.REP_STATUS);
236 else if (job_call_nr == DEV_OPEN_REPL) open_reply();
237 else if (job_call_nr == DEV_REOPEN_REPL) reopen_reply();
238 else if (job_call_nr == DEV_CLOSE_REPL) close_reply();
239 else if (job_call_nr == DEV_SEL_REPL1)
240 select_reply1(job_m_in.m_source, job_m_in.DEV_MINOR,
241 job_m_in.DEV_SEL_OPS);
242 else if (job_call_nr == DEV_SEL_REPL2)
243 select_reply2(job_m_in.m_source, job_m_in.DEV_MINOR,
244 job_m_in.DEV_SEL_OPS);
246 if (deadlock_resolving) {
247 if (fp != NULL && fp->fp_wtid == dl_worker.w_tid)
248 deadlock_resolving = 0;
251 if (fp != NULL && (fp->fp_flags & FP_SYS_PROC)) {
252 struct vmnt *vmp;
254 if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL)
255 vmp->m_flags &= ~VMNT_CALLBACK;
258 thread_cleanup(NULL);
259 return(NULL);
262 /*===========================================================================*
263 * do_control_msgs *
264 *===========================================================================*/
265 static void *do_control_msgs(void *arg)
267 struct job my_job;
269 my_job = *((struct job *) arg);
270 fp = my_job.j_fp;
272 /* Check for special control messages. */
273 if (job_m_in.m_source == CLOCK) {
274 /* Alarm timer expired. Used only for select(). Check it. */
275 expire_timers(job_m_in.NOTIFY_TIMESTAMP);
276 } else {
277 /* Device notifies us of an event. */
278 dev_status(job_m_in.m_source);
281 thread_cleanup(NULL);
282 return(NULL);
285 /*===========================================================================*
286 * do_fs_reply *
287 *===========================================================================*/
288 static void *do_fs_reply(struct job *job)
290 struct vmnt *vmp;
291 struct worker_thread *wp;
293 if ((vmp = find_vmnt(who_e)) == NULL)
294 panic("Couldn't find vmnt for endpoint %d", who_e);
296 wp = worker_get(job->j_fp->fp_wtid);
298 if (wp == NULL) {
299 printf("VFS: spurious reply from %d\n", who_e);
300 return(NULL);
303 if (wp->w_task != who_e) {
304 printf("VFS: expected %d to reply, not %d\n", wp->w_task, who_e);
305 return(NULL);
307 *wp->w_fs_sendrec = m_in;
308 wp->w_task = NONE;
309 vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */
310 worker_signal(wp); /* Continue this thread */
311 return(NULL);
314 /*===========================================================================*
315 * lock_pm *
316 *===========================================================================*/
317 static void lock_pm(void)
319 struct fproc *org_fp;
320 struct worker_thread *org_self;
322 /* First try to get it right off the bat */
323 if (mutex_trylock(&pm_lock) == 0)
324 return;
326 org_fp = fp;
327 org_self = self;
329 if (mutex_lock(&pm_lock) != 0)
330 panic("Could not obtain lock on pm\n");
332 fp = org_fp;
333 self = org_self;
336 /*===========================================================================*
337 * unlock_pm *
338 *===========================================================================*/
339 static void unlock_pm(void)
341 if (mutex_unlock(&pm_lock) != 0)
342 panic("Could not release lock on pm");
345 /*===========================================================================*
346 * do_pm *
347 *===========================================================================*/
348 static void *do_pm(void *arg __unused)
350 lock_pm();
351 service_pm();
352 unlock_pm();
354 thread_cleanup(NULL);
355 return(NULL);
358 /*===========================================================================*
359 * do_pending_pipe *
360 *===========================================================================*/
361 static void *do_pending_pipe(void *arg)
363 int r, op;
364 struct job my_job;
365 struct filp *f;
366 tll_access_t locktype;
368 my_job = *((struct job *) arg);
369 fp = my_job.j_fp;
371 lock_proc(fp, 1 /* force lock */);
373 f = scratch(fp).file.filp;
374 assert(f != NULL);
375 scratch(fp).file.filp = NULL;
377 locktype = (job_call_nr == READ) ? VNODE_READ : VNODE_WRITE;
378 op = (job_call_nr == READ) ? READING : WRITING;
379 lock_filp(f, locktype);
381 r = rw_pipe(op, who_e, f, scratch(fp).io.io_buffer, scratch(fp).io.io_nbytes);
383 if (r != SUSPEND) /* Do we have results to report? */
384 reply(fp->fp_endpoint, r);
386 unlock_filp(f);
388 thread_cleanup(fp);
389 return(NULL);
392 /*===========================================================================*
393 * do_dummy *
394 *===========================================================================*/
395 void *do_dummy(void *arg)
397 struct job my_job;
398 int r;
400 my_job = *((struct job *) arg);
401 fp = my_job.j_fp;
403 if ((r = mutex_trylock(&fp->fp_lock)) == 0) {
404 thread_cleanup(fp);
405 } else {
406 /* Proc is busy, let that worker thread carry out the work */
407 thread_cleanup(NULL);
409 return(NULL);
412 /*===========================================================================*
413 * do_work *
414 *===========================================================================*/
415 static void *do_work(void *arg)
417 int error;
418 struct job my_job;
420 my_job = *((struct job *) arg);
421 fp = my_job.j_fp;
423 lock_proc(fp, 0); /* This proc is busy */
425 if (job_call_nr == MAPDRIVER) {
426 error = do_mapdriver();
427 } else if (job_call_nr == COMMON_GETSYSINFO) {
428 error = do_getsysinfo();
429 } else if (IS_PFS_VFS_RQ(job_call_nr)) {
430 if (who_e != PFS_PROC_NR) {
431 printf("VFS: only PFS is allowed to make nested VFS calls\n");
432 error = ENOSYS;
433 } else if (job_call_nr <= PFS_BASE ||
434 job_call_nr >= PFS_BASE + PFS_NREQS) {
435 error = ENOSYS;
436 } else {
437 job_call_nr -= PFS_BASE;
438 error = (*pfs_call_vec[job_call_nr])();
440 } else {
441 /* We're dealing with a POSIX system call from a normal
442 * process. Call the internal function that does the work.
444 if (job_call_nr < 0 || job_call_nr >= NCALLS) {
445 error = ENOSYS;
446 } else if (fp->fp_pid == PID_FREE) {
447 /* Process vanished before we were able to handle request.
448 * Replying has no use. Just drop it. */
449 error = SUSPEND;
450 } else {
451 #if ENABLE_SYSCALL_STATS
452 calls_stats[job_call_nr]++;
453 #endif
454 error = (*call_vec[job_call_nr])();
458 /* Copy the results back to the user and send reply. */
459 if (error != SUSPEND) {
461 if ((fp->fp_flags & FP_SYS_PROC)) {
462 struct vmnt *vmp;
464 if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL)
465 vmp->m_flags &= ~VMNT_CALLBACK;
468 if (deadlock_resolving) {
469 if (fp->fp_wtid == dl_worker.w_tid)
470 deadlock_resolving = 0;
473 reply(fp->fp_endpoint, error);
476 thread_cleanup(fp);
477 return(NULL);
480 /*===========================================================================*
481 * sef_local_startup *
482 *===========================================================================*/
483 static void sef_local_startup()
485 /* Register init callbacks. */
486 sef_setcb_init_fresh(sef_cb_init_fresh);
487 sef_setcb_init_restart(sef_cb_init_fail);
489 /* No live update support for now. */
491 /* Let SEF perform startup. */
492 sef_startup();
495 /*===========================================================================*
496 * sef_cb_init_fresh *
497 *===========================================================================*/
498 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info)
500 /* Initialize the virtual file server. */
501 int s, i;
502 struct fproc *rfp;
503 message mess;
504 struct rprocpub rprocpub[NR_BOOT_PROCS];
506 force_sync = 0;
507 receive_from = ANY;
508 self = NULL;
509 verbose = 0;
511 /* Initialize proc endpoints to NONE */
512 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
513 rfp->fp_endpoint = NONE;
514 rfp->fp_pid = PID_FREE;
517 /* Initialize the process table with help of the process manager messages.
518 * Expect one message for each system process with its slot number and pid.
519 * When no more processes follow, the magic process number NONE is sent.
520 * Then, stop and synchronize with the PM.
522 do {
523 if ((s = sef_receive(PM_PROC_NR, &mess)) != OK)
524 panic("VFS: couldn't receive from PM: %d", s);
526 if (mess.m_type != PM_INIT)
527 panic("unexpected message from PM: %d", mess.m_type);
529 if (NONE == mess.PM_PROC) break;
531 rfp = &fproc[mess.PM_SLOT];
532 rfp->fp_flags = FP_NOFLAGS;
533 rfp->fp_pid = mess.PM_PID;
534 rfp->fp_endpoint = mess.PM_PROC;
535 rfp->fp_grant = GRANT_INVALID;
536 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
537 rfp->fp_realuid = (uid_t) SYS_UID;
538 rfp->fp_effuid = (uid_t) SYS_UID;
539 rfp->fp_realgid = (gid_t) SYS_GID;
540 rfp->fp_effgid = (gid_t) SYS_GID;
541 rfp->fp_umask = ~0;
542 } while (TRUE); /* continue until process NONE */
543 mess.m_type = OK; /* tell PM that we succeeded */
544 s = send(PM_PROC_NR, &mess); /* send synchronization message */
546 /* All process table entries have been set. Continue with initialization. */
547 fp = &fproc[_ENDPOINT_P(VFS_PROC_NR)];/* During init all communication with
548 * FSes is on behalf of myself */
549 init_dmap(); /* Initialize device table. */
550 system_hz = sys_hz();
552 /* Map all the services in the boot image. */
553 if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
554 (vir_bytes) rprocpub, sizeof(rprocpub))) != OK){
555 panic("sys_safecopyfrom failed: %d", s);
557 for (i = 0; i < NR_BOOT_PROCS; i++) {
558 if (rprocpub[i].in_use) {
559 if ((s = map_service(&rprocpub[i])) != OK) {
560 panic("VFS: unable to map service: %d", s);
565 /* Subscribe to block and character driver events. */
566 s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE);
567 if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s);
569 /* Initialize worker threads */
570 for (i = 0; i < NR_WTHREADS; i++) {
571 worker_init(&workers[i]);
573 worker_init(&sys_worker); /* exclusive system worker thread */
574 worker_init(&dl_worker); /* exclusive worker thread to resolve deadlocks */
576 /* Initialize global locks */
577 if (mthread_mutex_init(&pm_lock, NULL) != 0)
578 panic("VFS: couldn't initialize pm lock mutex");
579 if (mthread_mutex_init(&exec_lock, NULL) != 0)
580 panic("VFS: couldn't initialize exec lock");
581 if (mthread_mutex_init(&bsf_lock, NULL) != 0)
582 panic("VFS: couldn't initialize block special file lock");
584 /* Initialize event resources for boot procs and locks for all procs */
585 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
586 if (mutex_init(&rfp->fp_lock, NULL) != 0)
587 panic("unable to initialize fproc lock");
588 #if LOCK_DEBUG
589 rfp->fp_vp_rdlocks = 0;
590 rfp->fp_vmnt_rdlocks = 0;
591 #endif
594 init_dmap_locks(); /* init dmap locks */
595 init_vnodes(); /* init vnodes */
596 init_vmnts(); /* init vmnt structures */
597 init_select(); /* init select() structures */
598 init_filps(); /* Init filp structures */
599 mount_pfs(); /* mount Pipe File Server */
600 worker_start(do_init_root); /* mount initial ramdisk as file system root */
601 yield(); /* force do_init_root to start */
602 self = NULL;
604 return(OK);
607 /*===========================================================================*
608 * do_init_root *
609 *===========================================================================*/
610 static void *do_init_root(void *arg)
612 struct fproc *rfp;
613 struct job my_job;
614 int r;
615 char *mount_label = "fs_imgrd"; /* FIXME: obtain this from RS */
617 my_job = *((struct job *) arg);
618 fp = my_job.j_fp;
620 lock_proc(fp, 1 /* force lock */); /* This proc is busy */
621 lock_pm();
623 /* Initialize process directories. mount_fs will set them to the correct
624 * values */
625 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
626 FD_ZERO(&(rfp->fp_filp_inuse));
627 rfp->fp_rd = NULL;
628 rfp->fp_wd = NULL;
631 receive_from = MFS_PROC_NR;
632 if ((r = mount_fs(DEV_IMGRD, "/", MFS_PROC_NR, 0, mount_label)) != OK)
633 panic("Failed to initialize root");
634 receive_from = ANY;
636 unlock_pm();
637 thread_cleanup(fp);
638 return(NULL);
641 /*===========================================================================*
642 * lock_proc *
643 *===========================================================================*/
644 void lock_proc(struct fproc *rfp, int force_lock)
646 int r;
647 struct fproc *org_fp;
648 struct worker_thread *org_self;
650 r = mutex_trylock(&rfp->fp_lock);
652 /* Were we supposed to obtain this lock immediately? */
653 if (force_lock) {
654 assert(r == 0);
655 return;
658 if (r == 0) return;
660 org_fp = fp;
661 org_self = self;
663 if ((r = mutex_lock(&rfp->fp_lock)) != 0)
664 panic("unable to lock fproc lock: %d", r);
666 fp = org_fp;
667 self = org_self;
670 /*===========================================================================*
671 * unlock_proc *
672 *===========================================================================*/
673 void unlock_proc(struct fproc *rfp)
675 int r;
677 if ((r = mutex_unlock(&rfp->fp_lock)) != 0)
678 panic("Failed to unlock: %d", r);
681 /*===========================================================================*
682 * thread_cleanup *
683 *===========================================================================*/
684 void thread_cleanup(struct fproc *rfp)
686 /* Clean up worker thread. Skip parts if this thread is not associated
687 * with a particular process (i.e., rfp is NULL) */
689 #if LOCK_DEBUG
690 if (rfp != NULL) {
691 check_filp_locks_by_me();
692 check_vnode_locks_by_me(rfp);
693 check_vmnt_locks_by_me(rfp);
695 #endif
697 if (rfp != NULL && rfp->fp_flags & FP_PM_PENDING) { /* Postponed PM call */
698 job_m_in = rfp->fp_job.j_m_in;
699 rfp->fp_flags &= ~FP_PM_PENDING;
700 service_pm_postponed();
703 #if LOCK_DEBUG
704 if (rfp != NULL) {
705 check_filp_locks_by_me();
706 check_vnode_locks_by_me(rfp);
707 check_vmnt_locks_by_me(rfp);
709 #endif
711 if (rfp != NULL) {
712 rfp->fp_flags &= ~FP_DROP_WORK;
713 unlock_proc(rfp);
717 /*===========================================================================*
718 * get_work *
719 *===========================================================================*/
720 static void get_work()
722 /* Normally wait for new input. However, if 'reviving' is
723 * nonzero, a suspended process must be awakened.
725 int r, found_one, proc_p;
726 register struct fproc *rp;
728 while (reviving != 0) {
729 found_one = FALSE;
731 /* Find a suspended process. */
732 for (rp = &fproc[0]; rp < &fproc[NR_PROCS]; rp++)
733 if (rp->fp_pid != PID_FREE && (rp->fp_flags & FP_REVIVED)) {
734 found_one = TRUE; /* Found a suspended process */
735 if (unblock(rp))
736 return; /* So main loop can process job */
737 send_work();
740 if (!found_one) /* Consistency error */
741 panic("VFS: get_work couldn't revive anyone");
744 for(;;) {
745 /* Normal case. No one to revive. Get a useful request. */
746 if ((r = sef_receive(receive_from, &m_in)) != OK) {
747 panic("VFS: sef_receive error: %d", r);
750 proc_p = _ENDPOINT_P(m_in.m_source);
751 if (proc_p < 0 || proc_p >= NR_PROCS) fp = NULL;
752 else fp = &fproc[proc_p];
754 if (m_in.m_type == EDEADSRCDST) return; /* Failed 'sendrec' */
756 /* Negative who_p is never used to access the fproc array. Negative
757 * numbers (kernel tasks) are treated in a special way.
759 if (who_p >= (int)(sizeof(fproc) / sizeof(struct fproc)))
760 panic("receive process out of range: %d", who_p);
761 if (who_p >= 0 && fproc[who_p].fp_endpoint == NONE) {
762 printf("VFS: ignoring request from %d: NONE endpoint %d (%d)\n",
763 m_in.m_source, who_p, m_in.m_type);
764 continue;
767 /* Internal consistency check; our mental image of process numbers and
768 * endpoints must match with how the rest of the system thinks of them.
770 if (who_p >= 0 && fproc[who_p].fp_endpoint != who_e) {
771 if (fproc[who_p].fp_endpoint == NONE)
772 printf("slot unknown even\n");
774 printf("VFS: receive endpoint inconsistent (source %d, who_p "
775 "%d, stored ep %d, who_e %d).\n", m_in.m_source, who_p,
776 fproc[who_p].fp_endpoint, who_e);
777 panic("VFS: inconsistent endpoint ");
780 return;
784 /*===========================================================================*
785 * reply *
786 *===========================================================================*/
787 void reply(endpoint_t whom, int result)
789 /* Send a reply to a user process. If the send fails, just ignore it. */
790 int r;
792 m_out.reply_type = result;
793 r = sendnb(whom, &m_out);
794 if (r != OK) {
795 printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(),
796 result, whom, r);
797 util_stacktrace();
801 /*===========================================================================*
802 * service_pm_postponed *
803 *===========================================================================*/
804 static void service_pm_postponed(void)
806 int r;
807 vir_bytes pc, newsp;
809 switch(job_call_nr) {
810 case PM_EXEC:
812 endpoint_t proc_e;
813 vir_bytes exec_path, stack_frame;
814 size_t exec_path_len, stack_frame_len;
816 proc_e = job_m_in.PM_PROC;
817 exec_path = (vir_bytes) job_m_in.PM_PATH;
818 exec_path_len = (size_t) job_m_in.PM_PATH_LEN;
819 stack_frame = (vir_bytes) job_m_in.PM_FRAME;
820 stack_frame_len = (size_t) job_m_in.PM_FRAME_LEN;
822 r = pm_exec(proc_e, exec_path, exec_path_len, stack_frame,
823 stack_frame_len, &pc, &newsp, job_m_in.PM_EXECFLAGS);
825 /* Reply status to PM */
826 m_out.m_type = PM_EXEC_REPLY;
827 m_out.PM_PROC = proc_e;
828 m_out.PM_PC = (void*) pc;
829 m_out.PM_STATUS = r;
830 m_out.PM_NEWSP = (void *) newsp;
832 break;
834 case PM_EXIT:
836 endpoint_t proc_e;
837 proc_e = job_m_in.PM_PROC;
839 pm_exit(proc_e);
841 /* Reply dummy status to PM for synchronization */
842 m_out.m_type = PM_EXIT_REPLY;
843 m_out.PM_PROC = proc_e;
845 break;
847 case PM_DUMPCORE:
849 endpoint_t proc_e, traced_proc_e;
850 int term_signal;
851 vir_bytes core_path;
853 proc_e = job_m_in.PM_PROC;
854 traced_proc_e = job_m_in.PM_TRACED_PROC;
855 if(job_m_in.PM_PROC != job_m_in.PM_TRACED_PROC) {
856 /* dumpcore request */
857 term_signal = 0;
858 } else {
859 /* dumpcore on exit */
860 term_signal = job_m_in.PM_TERM_SIG;
862 core_path = (vir_bytes) job_m_in.PM_PATH;
864 r = pm_dumpcore(proc_e, term_signal, core_path);
866 /* Reply status to PM */
867 m_out.m_type = PM_CORE_REPLY;
868 m_out.PM_PROC = proc_e;
869 m_out.PM_TRACED_PROC = traced_proc_e;
870 m_out.PM_STATUS = r;
872 break;
874 default:
875 panic("Unhandled postponed PM call %d", job_m_in.m_type);
878 r = send(PM_PROC_NR, &m_out);
879 if (r != OK)
880 panic("service_pm_postponed: send failed: %d", r);
883 /*===========================================================================*
884 * service_pm *
885 *===========================================================================*/
886 static void service_pm()
888 int r, slot;
890 switch (job_call_nr) {
891 case PM_SETUID:
893 endpoint_t proc_e;
894 uid_t euid, ruid;
896 proc_e = job_m_in.PM_PROC;
897 euid = job_m_in.PM_EID;
898 ruid = job_m_in.PM_RID;
900 pm_setuid(proc_e, euid, ruid);
902 m_out.m_type = PM_SETUID_REPLY;
903 m_out.PM_PROC = proc_e;
905 break;
907 case PM_SETGID:
909 endpoint_t proc_e;
910 gid_t egid, rgid;
912 proc_e = job_m_in.PM_PROC;
913 egid = job_m_in.PM_EID;
914 rgid = job_m_in.PM_RID;
916 pm_setgid(proc_e, egid, rgid);
918 m_out.m_type = PM_SETGID_REPLY;
919 m_out.PM_PROC = proc_e;
921 break;
923 case PM_SETSID:
925 endpoint_t proc_e;
927 proc_e = job_m_in.PM_PROC;
928 pm_setsid(proc_e);
930 m_out.m_type = PM_SETSID_REPLY;
931 m_out.PM_PROC = proc_e;
933 break;
935 case PM_EXEC:
936 case PM_EXIT:
937 case PM_DUMPCORE:
939 endpoint_t proc_e = job_m_in.PM_PROC;
941 if(isokendpt(proc_e, &slot) != OK) {
942 printf("VFS: proc ep %d not ok\n", proc_e);
943 return;
946 fp = &fproc[slot];
948 if (fp->fp_flags & FP_PENDING) {
949 /* This process has a request pending, but PM wants it
950 * gone. Forget about the pending request and satisfy
951 * PM's request instead. Note that a pending request
952 * AND an EXEC request are mutually exclusive. Also, PM
953 * should send only one request/process at a time.
955 assert(fp->fp_job.j_m_in.m_source != PM_PROC_NR);
958 /* PM requests on behalf of a proc are handled after the
959 * system call that might be in progress for that proc has
960 * finished. If the proc is not busy, we start a dummy call.
962 if (!(fp->fp_flags & FP_PENDING) &&
963 mutex_trylock(&fp->fp_lock) == 0) {
964 mutex_unlock(&fp->fp_lock);
965 worker_start(do_dummy);
966 fp->fp_flags |= FP_DROP_WORK;
969 fp->fp_job.j_m_in = job_m_in;
970 fp->fp_flags |= FP_PM_PENDING;
972 return;
974 case PM_FORK:
975 case PM_SRV_FORK:
977 endpoint_t pproc_e, proc_e;
978 pid_t child_pid;
979 uid_t reuid;
980 gid_t regid;
982 pproc_e = job_m_in.PM_PPROC;
983 proc_e = job_m_in.PM_PROC;
984 child_pid = job_m_in.PM_CPID;
985 reuid = job_m_in.PM_REUID;
986 regid = job_m_in.PM_REGID;
988 pm_fork(pproc_e, proc_e, child_pid);
989 m_out.m_type = PM_FORK_REPLY;
991 if (job_call_nr == PM_SRV_FORK) {
992 m_out.m_type = PM_SRV_FORK_REPLY;
993 pm_setuid(proc_e, reuid, reuid);
994 pm_setgid(proc_e, regid, regid);
997 m_out.PM_PROC = proc_e;
999 break;
1000 case PM_SETGROUPS:
1002 endpoint_t proc_e;
1003 int group_no;
1004 gid_t *group_addr;
1006 proc_e = job_m_in.PM_PROC;
1007 group_no = job_m_in.PM_GROUP_NO;
1008 group_addr = (gid_t *) job_m_in.PM_GROUP_ADDR;
1010 pm_setgroups(proc_e, group_no, group_addr);
1012 m_out.m_type = PM_SETGROUPS_REPLY;
1013 m_out.PM_PROC = proc_e;
1015 break;
1017 case PM_UNPAUSE:
1019 endpoint_t proc_e;
1021 proc_e = job_m_in.PM_PROC;
1023 unpause(proc_e);
1025 m_out.m_type = PM_UNPAUSE_REPLY;
1026 m_out.PM_PROC = proc_e;
1028 break;
1030 case PM_REBOOT:
1031 pm_reboot();
1033 /* Reply dummy status to PM for synchronization */
1034 m_out.m_type = PM_REBOOT_REPLY;
1036 break;
1038 default:
1039 printf("VFS: don't know how to handle PM request %d\n", job_call_nr);
1041 return;
1044 r = send(PM_PROC_NR, &m_out);
1045 if (r != OK)
1046 panic("service_pm: send failed: %d", r);
1051 /*===========================================================================*
1052 * unblock *
1053 *===========================================================================*/
1054 static int unblock(rfp)
1055 struct fproc *rfp;
1057 int blocked_on;
1059 fp = rfp;
1060 blocked_on = rfp->fp_blocked_on;
1061 m_in.m_source = rfp->fp_endpoint;
1062 m_in.m_type = rfp->fp_block_callnr;
1063 m_in.fd = scratch(fp).file.fd_nr;
1064 m_in.buffer = scratch(fp).io.io_buffer;
1065 m_in.nbytes = scratch(fp).io.io_nbytes;
1067 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; /* no longer blocked */
1068 rfp->fp_flags &= ~FP_REVIVED;
1069 reviving--;
1070 assert(reviving >= 0);
1072 /* This should be a pipe I/O, not a device I/O. If it is, it'll 'leak'
1073 * grants.
1075 assert(!GRANT_VALID(rfp->fp_grant));
1077 /* Pending pipe reads/writes can be handled directly */
1078 if (blocked_on == FP_BLOCKED_ON_PIPE) {
1079 worker_start(do_pending_pipe);
1080 yield(); /* Give thread a chance to run */
1081 self = NULL;
1082 return(0); /* Retrieve more work */
1085 return(1); /* We've unblocked a process */