various warning/errorwarning fixes for gcc47
[minix3.git] / servers / vfs / main.c
blob7a4beb86fd67e05474c0fac0b17c1c9fec6b461d
1 /*
2 * a loop that gets messages requesting work, carries out the work, and sends
3 * replies.
5 * The entry points into this file are:
6 * main: main program of the Virtual File System
7 * reply: send a reply to a process after the requested work is done
9 */
11 #include "fs.h"
12 #include <fcntl.h>
13 #include <string.h>
14 #include <stdio.h>
15 #include <signal.h>
16 #include <assert.h>
17 #include <stdlib.h>
18 #include <sys/ioc_memory.h>
19 #include <sys/svrctl.h>
20 #include <sys/select.h>
21 #include <minix/callnr.h>
22 #include <minix/com.h>
23 #include <minix/keymap.h>
24 #include <minix/const.h>
25 #include <minix/endpoint.h>
26 #include <minix/safecopies.h>
27 #include <minix/debug.h>
28 #include <minix/vfsif.h>
29 #include "file.h"
30 #include "dmap.h"
31 #include "fproc.h"
32 #include "scratchpad.h"
33 #include "vmnt.h"
34 #include "vnode.h"
35 #include "job.h"
36 #include "param.h"
38 #if ENABLE_SYSCALL_STATS
39 EXTERN unsigned long calls_stats[NCALLS];
40 #endif
42 /* Thread related prototypes */
43 static void thread_cleanup(struct fproc *rfp);
44 static void *do_async_dev_result(void *arg);
45 static void *do_control_msgs(void *arg);
46 static void *do_fs_reply(struct job *job);
47 static void *do_work(void *arg);
48 static void *do_pm(void *arg);
49 static void *do_init_root(void *arg);
50 static void handle_work(void *(*func)(void *arg));
52 static void get_work(void);
53 static void lock_pm(void);
54 static void unlock_pm(void);
55 static void service_pm(void);
56 static void service_pm_postponed(void);
57 static int unblock(struct fproc *rfp);
59 /* SEF functions and variables. */
60 static void sef_local_startup(void);
61 static int sef_cb_init_fresh(int type, sef_init_info_t *info);
62 static mutex_t pm_lock;
63 static endpoint_t receive_from;
65 /*===========================================================================*
66 * main *
67 *===========================================================================*/
68 int main(void)
70 /* This is the main program of the file system. The main loop consists of
71 * three major activities: getting new work, processing the work, and sending
72 * the reply. This loop never terminates as long as the file system runs.
74 int transid;
75 struct job *job;
77 /* SEF local startup. */
78 sef_local_startup();
80 printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS);
82 if (OK != (sys_getkinfo(&kinfo)))
83 panic("couldn't get kernel kinfo");
85 /* This is the main loop that gets work, processes it, and sends replies. */
86 while (TRUE) {
87 yield_all(); /* let other threads run */
88 self = NULL;
89 job = NULL;
90 send_work();
91 get_work();
93 transid = TRNS_GET_ID(m_in.m_type);
94 if (IS_VFS_FS_TRANSID(transid)) {
95 job = worker_getjob( (thread_t) transid - VFS_TRANSID);
96 if (job == NULL) {
97 printf("VFS: spurious message %d from endpoint %d\n",
98 m_in.m_type, m_in.m_source);
99 continue;
101 m_in.m_type = TRNS_DEL_ID(m_in.m_type);
104 if (job != NULL) {
105 do_fs_reply(job);
106 continue;
107 } else if (who_e == PM_PROC_NR) { /* Calls from PM */
108 /* Special control messages from PM */
109 sys_worker_start(do_pm);
110 continue;
111 } else if (is_notify(call_nr)) {
112 /* A task notify()ed us */
113 sys_worker_start(do_control_msgs);
114 continue;
115 } else if (who_p < 0) { /* i.e., message comes from a task */
116 /* We're going to ignore this message. Tasks should
117 * send notify()s only.
119 printf("VFS: ignoring message from %d (%d)\n", who_e, call_nr);
120 continue;
123 /* At this point we either have results from an asynchronous device
124 * or a new system call. In both cases a new worker thread has to be
125 * started and there might not be one available from the pool. This is
126 * not a problem (requests/replies are simply queued), except when
127 * they're from an FS endpoint, because these can cause a deadlock.
128 * handle_work() takes care of the details. */
129 if (IS_DEV_RS(call_nr)) {
130 /* We've got results for a device request */
131 handle_work(do_async_dev_result);
132 continue;
133 } else {
134 /* Normal syscall. */
135 handle_work(do_work);
138 return(OK); /* shouldn't come here */
141 /*===========================================================================*
142 * handle_work *
143 *===========================================================================*/
144 static void handle_work(void *(*func)(void *arg))
146 /* Handle asynchronous device replies and new system calls. If the originating
147 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
148 struct vmnt *vmp = NULL;
149 endpoint_t proc_e;
151 proc_e = m_in.m_source;
153 if (fp->fp_flags & FP_SYS_PROC) {
154 if (worker_available() == 0) {
155 if (!deadlock_resolving) {
156 if ((vmp = find_vmnt(proc_e)) != NULL) {
157 /* A call back or dev result from an FS
158 * endpoint. Set call back flag. Can do only
159 * one call back at a time.
161 if (vmp->m_flags & VMNT_CALLBACK) {
162 reply(proc_e, EAGAIN);
163 return;
165 vmp->m_flags |= VMNT_CALLBACK;
167 /* When an FS endpoint has to make a call back
168 * in order to mount, force its device to a
169 * "none device" so block reads/writes will be
170 * handled by ROOT_FS_E.
172 if (vmp->m_flags & VMNT_MOUNTING)
173 vmp->m_flags |= VMNT_FORCEROOTBSF;
175 deadlock_resolving = 1;
176 dl_worker_start(func);
177 return;
179 /* Already trying to resolve a deadlock, can't
180 * handle more, sorry */
182 reply(proc_e, EAGAIN);
183 return;
187 worker_start(func);
190 /*===========================================================================*
191 * do_async_dev_result *
192 *===========================================================================*/
193 static void *do_async_dev_result(void *arg)
195 endpoint_t endpt;
196 struct job my_job;
198 my_job = *((struct job *) arg);
199 fp = my_job.j_fp;
201 /* An asynchronous character driver has results for us */
202 if (job_call_nr == DEV_REVIVE) {
203 endpt = job_m_in.REP_ENDPT;
204 if (endpt == VFS_PROC_NR)
205 endpt = find_suspended_ep(job_m_in.m_source,
206 job_m_in.REP_IO_GRANT);
208 if (endpt == NONE) {
209 printf("VFS: proc with grant %d from %d not found\n",
210 job_m_in.REP_IO_GRANT, job_m_in.m_source);
211 } else if (job_m_in.REP_STATUS == SUSPEND) {
212 printf("VFS: got SUSPEND on DEV_REVIVE: not reviving proc\n");
213 } else
214 revive(endpt, job_m_in.REP_STATUS);
216 else if (job_call_nr == DEV_OPEN_REPL) open_reply();
217 else if (job_call_nr == DEV_REOPEN_REPL) reopen_reply();
218 else if (job_call_nr == DEV_CLOSE_REPL) close_reply();
219 else if (job_call_nr == DEV_SEL_REPL1)
220 select_reply1(job_m_in.m_source, job_m_in.DEV_MINOR,
221 job_m_in.DEV_SEL_OPS);
222 else if (job_call_nr == DEV_SEL_REPL2)
223 select_reply2(job_m_in.m_source, job_m_in.DEV_MINOR,
224 job_m_in.DEV_SEL_OPS);
226 if (deadlock_resolving) {
227 if (fp != NULL && fp->fp_wtid == dl_worker.w_tid)
228 deadlock_resolving = 0;
231 if (fp != NULL && (fp->fp_flags & FP_SYS_PROC)) {
232 struct vmnt *vmp;
234 if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL)
235 vmp->m_flags &= ~VMNT_CALLBACK;
238 thread_cleanup(NULL);
239 return(NULL);
242 /*===========================================================================*
243 * do_control_msgs *
244 *===========================================================================*/
245 static void *do_control_msgs(void *arg)
247 struct job my_job;
249 my_job = *((struct job *) arg);
250 fp = my_job.j_fp;
252 /* Check for special control messages. */
253 if (job_m_in.m_source == CLOCK) {
254 /* Alarm timer expired. Used only for select(). Check it. */
255 expire_timers(job_m_in.NOTIFY_TIMESTAMP);
256 } else if (job_m_in.m_source == DS_PROC_NR) {
257 /* DS notifies us of an event. */
258 ds_event();
259 } else {
260 /* Device notifies us of an event. */
261 dev_status(&job_m_in);
264 thread_cleanup(NULL);
265 return(NULL);
268 /*===========================================================================*
269 * do_fs_reply *
270 *===========================================================================*/
271 static void *do_fs_reply(struct job *job)
273 struct vmnt *vmp;
274 struct fproc *rfp;
276 if ((vmp = find_vmnt(who_e)) == NULL)
277 panic("Couldn't find vmnt for endpoint %d", who_e);
279 rfp = job->j_fp;
281 if (rfp == NULL || rfp->fp_endpoint == NONE) {
282 printf("VFS: spurious reply from %d\n", who_e);
283 return(NULL);
286 if (rfp->fp_task != who_e)
287 printf("VFS: expected %d to reply, not %d\n", rfp->fp_task, who_e);
288 *rfp->fp_sendrec = m_in;
289 rfp->fp_task = NONE;
290 vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */
291 if (rfp->fp_wtid != invalid_thread_id)
292 worker_signal(worker_get(rfp->fp_wtid)); /* Continue this thread */
293 else
294 printf("VFS: consistency error: reply for finished job\n");
296 return(NULL);
299 /*===========================================================================*
300 * lock_pm *
301 *===========================================================================*/
302 static void lock_pm(void)
304 struct fproc *org_fp;
305 struct worker_thread *org_self;
307 /* First try to get it right off the bat */
308 if (mutex_trylock(&pm_lock) == 0)
309 return;
311 org_fp = fp;
312 org_self = self;
314 if (mutex_lock(&pm_lock) != 0)
315 panic("Could not obtain lock on pm\n");
317 fp = org_fp;
318 self = org_self;
321 /*===========================================================================*
322 * unlock_pm *
323 *===========================================================================*/
324 static void unlock_pm(void)
326 if (mutex_unlock(&pm_lock) != 0)
327 panic("Could not release lock on pm");
330 /*===========================================================================*
331 * do_pm *
332 *===========================================================================*/
333 static void *do_pm(void *arg __unused)
335 lock_pm();
336 service_pm();
337 unlock_pm();
339 thread_cleanup(NULL);
340 return(NULL);
343 /*===========================================================================*
344 * do_pending_pipe *
345 *===========================================================================*/
346 static void *do_pending_pipe(void *arg)
348 int r, op;
349 struct job my_job;
350 struct filp *f;
351 tll_access_t locktype;
353 my_job = *((struct job *) arg);
354 fp = my_job.j_fp;
356 lock_proc(fp, 1 /* force lock */);
358 f = scratch(fp).file.filp;
359 assert(f != NULL);
360 scratch(fp).file.filp = NULL;
362 locktype = (job_call_nr == READ) ? VNODE_READ : VNODE_WRITE;
363 op = (job_call_nr == READ) ? READING : WRITING;
364 lock_filp(f, locktype);
366 r = rw_pipe(op, who_e, f, scratch(fp).io.io_buffer, scratch(fp).io.io_nbytes);
368 if (r != SUSPEND) /* Do we have results to report? */
369 reply(fp->fp_endpoint, r);
371 unlock_filp(f);
373 thread_cleanup(fp);
374 return(NULL);
377 /*===========================================================================*
378 * do_dummy *
379 *===========================================================================*/
380 void *do_dummy(void *arg)
382 struct job my_job;
383 int r;
385 my_job = *((struct job *) arg);
386 fp = my_job.j_fp;
388 if ((r = mutex_trylock(&fp->fp_lock)) == 0) {
389 thread_cleanup(fp);
390 } else {
391 /* Proc is busy, let that worker thread carry out the work */
392 thread_cleanup(NULL);
394 return(NULL);
397 /*===========================================================================*
398 * do_work *
399 *===========================================================================*/
400 static void *do_work(void *arg)
402 int error;
403 struct job my_job;
405 my_job = *((struct job *) arg);
406 fp = my_job.j_fp;
408 lock_proc(fp, 0); /* This proc is busy */
410 if (job_call_nr == MAPDRIVER) {
411 error = do_mapdriver();
412 } else if (job_call_nr == COMMON_GETSYSINFO) {
413 error = do_getsysinfo();
414 } else if (IS_PFS_VFS_RQ(job_call_nr)) {
415 if (who_e != PFS_PROC_NR) {
416 printf("VFS: only PFS is allowed to make nested VFS calls\n");
417 error = ENOSYS;
418 } else if (job_call_nr <= PFS_BASE ||
419 job_call_nr >= PFS_BASE + PFS_NREQS) {
420 error = ENOSYS;
421 } else {
422 job_call_nr -= PFS_BASE;
423 error = (*pfs_call_vec[job_call_nr])();
425 } else {
426 /* We're dealing with a POSIX system call from a normal
427 * process. Call the internal function that does the work.
429 if (job_call_nr < 0 || job_call_nr >= NCALLS) {
430 error = ENOSYS;
431 } else if (fp->fp_pid == PID_FREE) {
432 /* Process vanished before we were able to handle request.
433 * Replying has no use. Just drop it. */
434 error = SUSPEND;
435 } else {
436 #if ENABLE_SYSCALL_STATS
437 calls_stats[job_call_nr]++;
438 #endif
439 error = (*call_vec[job_call_nr])();
443 /* Copy the results back to the user and send reply. */
444 if (error != SUSPEND) {
446 if ((fp->fp_flags & FP_SYS_PROC)) {
447 struct vmnt *vmp;
449 if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL)
450 vmp->m_flags &= ~VMNT_CALLBACK;
453 if (deadlock_resolving) {
454 if (fp->fp_wtid == dl_worker.w_tid)
455 deadlock_resolving = 0;
458 reply(fp->fp_endpoint, error);
461 thread_cleanup(fp);
462 return(NULL);
465 /*===========================================================================*
466 * sef_local_startup *
467 *===========================================================================*/
468 static void sef_local_startup()
470 /* Register init callbacks. */
471 sef_setcb_init_fresh(sef_cb_init_fresh);
472 sef_setcb_init_restart(sef_cb_init_fail);
474 /* No live update support for now. */
476 /* Let SEF perform startup. */
477 sef_startup();
480 /*===========================================================================*
481 * sef_cb_init_fresh *
482 *===========================================================================*/
483 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info)
485 /* Initialize the virtual file server. */
486 int s, i;
487 struct fproc *rfp;
488 message mess;
489 struct rprocpub rprocpub[NR_BOOT_PROCS];
491 force_sync = 0;
492 receive_from = ANY;
493 self = NULL;
494 verbose = 0;
496 /* Initialize proc endpoints to NONE */
497 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
498 rfp->fp_endpoint = NONE;
499 rfp->fp_pid = PID_FREE;
502 /* Initialize the process table with help of the process manager messages.
503 * Expect one message for each system process with its slot number and pid.
504 * When no more processes follow, the magic process number NONE is sent.
505 * Then, stop and synchronize with the PM.
507 do {
508 if ((s = sef_receive(PM_PROC_NR, &mess)) != OK)
509 panic("VFS: couldn't receive from PM: %d", s);
511 if (mess.m_type != PM_INIT)
512 panic("unexpected message from PM: %d", mess.m_type);
514 if (NONE == mess.PM_PROC) break;
516 rfp = &fproc[mess.PM_SLOT];
517 rfp->fp_flags = FP_NOFLAGS;
518 rfp->fp_pid = mess.PM_PID;
519 rfp->fp_endpoint = mess.PM_PROC;
520 rfp->fp_grant = GRANT_INVALID;
521 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
522 rfp->fp_realuid = (uid_t) SYS_UID;
523 rfp->fp_effuid = (uid_t) SYS_UID;
524 rfp->fp_realgid = (gid_t) SYS_GID;
525 rfp->fp_effgid = (gid_t) SYS_GID;
526 rfp->fp_umask = ~0;
527 } while (TRUE); /* continue until process NONE */
528 mess.m_type = OK; /* tell PM that we succeeded */
529 s = send(PM_PROC_NR, &mess); /* send synchronization message */
531 /* All process table entries have been set. Continue with initialization. */
532 fp = &fproc[_ENDPOINT_P(VFS_PROC_NR)];/* During init all communication with
533 * FSes is on behalf of myself */
534 init_dmap(); /* Initialize device table. */
535 system_hz = sys_hz();
537 /* Map all the services in the boot image. */
538 if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
539 (vir_bytes) rprocpub, sizeof(rprocpub))) != OK){
540 panic("sys_safecopyfrom failed: %d", s);
542 for (i = 0; i < NR_BOOT_PROCS; i++) {
543 if (rprocpub[i].in_use) {
544 if ((s = map_service(&rprocpub[i])) != OK) {
545 panic("VFS: unable to map service: %d", s);
550 /* Subscribe to block and character driver events. */
551 s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE);
552 if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s);
554 /* Initialize worker threads */
555 for (i = 0; i < NR_WTHREADS; i++) {
556 worker_init(&workers[i]);
558 worker_init(&sys_worker); /* exclusive system worker thread */
559 worker_init(&dl_worker); /* exclusive worker thread to resolve deadlocks */
561 /* Initialize global locks */
562 if (mthread_mutex_init(&pm_lock, NULL) != 0)
563 panic("VFS: couldn't initialize pm lock mutex");
564 if (mthread_mutex_init(&exec_lock, NULL) != 0)
565 panic("VFS: couldn't initialize exec lock");
566 if (mthread_mutex_init(&bsf_lock, NULL) != 0)
567 panic("VFS: couldn't initialize block special file lock");
569 /* Initialize event resources for boot procs and locks for all procs */
570 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
571 if (mutex_init(&rfp->fp_lock, NULL) != 0)
572 panic("unable to initialize fproc lock");
573 #if LOCK_DEBUG
574 rfp->fp_vp_rdlocks = 0;
575 rfp->fp_vmnt_rdlocks = 0;
576 #endif
579 init_vnodes(); /* init vnodes */
580 init_vmnts(); /* init vmnt structures */
581 init_select(); /* init select() structures */
582 init_filps(); /* Init filp structures */
583 mount_pfs(); /* mount Pipe File Server */
584 worker_start(do_init_root); /* mount initial ramdisk as file system root */
585 yield(); /* force do_init_root to start */
586 self = NULL;
588 return(OK);
591 /*===========================================================================*
592 * do_init_root *
593 *===========================================================================*/
594 static void *do_init_root(void *arg)
596 struct fproc *rfp;
597 struct job my_job;
598 int r;
599 char *mount_label = "fs_imgrd"; /* FIXME: obtain this from RS */
601 my_job = *((struct job *) arg);
602 fp = my_job.j_fp;
604 lock_proc(fp, 1 /* force lock */); /* This proc is busy */
605 lock_pm();
607 /* Initialize process directories. mount_fs will set them to the correct
608 * values */
609 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
610 FD_ZERO(&(rfp->fp_filp_inuse));
611 rfp->fp_rd = NULL;
612 rfp->fp_wd = NULL;
615 receive_from = MFS_PROC_NR;
616 if ((r = mount_fs(DEV_IMGRD, "/", MFS_PROC_NR, 0, mount_label)) != OK)
617 panic("Failed to initialize root");
618 receive_from = ANY;
620 unlock_pm();
621 thread_cleanup(fp);
622 return(NULL);
625 /*===========================================================================*
626 * lock_proc *
627 *===========================================================================*/
628 void lock_proc(struct fproc *rfp, int force_lock)
630 int r;
631 struct fproc *org_fp;
632 struct worker_thread *org_self;
634 r = mutex_trylock(&rfp->fp_lock);
636 /* Were we supposed to obtain this lock immediately? */
637 if (force_lock) {
638 assert(r == 0);
639 return;
642 if (r == 0) return;
644 org_fp = fp;
645 org_self = self;
647 if ((r = mutex_lock(&rfp->fp_lock)) != 0)
648 panic("unable to lock fproc lock: %d", r);
650 fp = org_fp;
651 self = org_self;
654 /*===========================================================================*
655 * unlock_proc *
656 *===========================================================================*/
657 void unlock_proc(struct fproc *rfp)
659 int r;
661 if ((r = mutex_unlock(&rfp->fp_lock)) != 0)
662 panic("Failed to unlock: %d", r);
665 /*===========================================================================*
666 * thread_cleanup *
667 *===========================================================================*/
668 static void thread_cleanup(struct fproc *rfp)
670 /* Clean up worker thread. Skip parts if this thread is not associated
671 * with a particular process (i.e., rfp is NULL) */
673 #if LOCK_DEBUG
674 if (rfp != NULL) {
675 check_filp_locks_by_me();
676 check_vnode_locks_by_me(rfp);
677 check_vmnt_locks_by_me(rfp);
679 #endif
681 if (rfp != NULL && rfp->fp_flags & FP_PM_PENDING) { /* Postponed PM call */
682 job_m_in = rfp->fp_job.j_m_in;
683 rfp->fp_flags &= ~FP_PM_PENDING;
684 service_pm_postponed();
687 #if LOCK_DEBUG
688 if (rfp != NULL) {
689 check_filp_locks_by_me();
690 check_vnode_locks_by_me(rfp);
691 check_vmnt_locks_by_me(rfp);
693 #endif
695 if (rfp != NULL) {
696 rfp->fp_flags &= ~FP_DROP_WORK;
697 unlock_proc(rfp);
701 /*===========================================================================*
702 * get_work *
703 *===========================================================================*/
704 static void get_work()
706 /* Normally wait for new input. However, if 'reviving' is
707 * nonzero, a suspended process must be awakened.
709 int r, found_one, proc_p;
710 register struct fproc *rp;
712 while (reviving != 0) {
713 found_one = FALSE;
715 /* Find a suspended process. */
716 for (rp = &fproc[0]; rp < &fproc[NR_PROCS]; rp++)
717 if (rp->fp_pid != PID_FREE && (rp->fp_flags & FP_REVIVED)) {
718 found_one = TRUE; /* Found a suspended process */
719 if (unblock(rp))
720 return; /* So main loop can process job */
721 send_work();
724 if (!found_one) /* Consistency error */
725 panic("VFS: get_work couldn't revive anyone");
728 for(;;) {
729 /* Normal case. No one to revive. Get a useful request. */
730 if ((r = sef_receive(receive_from, &m_in)) != OK) {
731 panic("VFS: sef_receive error: %d", r);
734 proc_p = _ENDPOINT_P(m_in.m_source);
735 if (proc_p < 0 || proc_p >= NR_PROCS) fp = NULL;
736 else fp = &fproc[proc_p];
738 if (m_in.m_type == EDEADSRCDST) return; /* Failed 'sendrec' */
740 /* Negative who_p is never used to access the fproc array. Negative
741 * numbers (kernel tasks) are treated in a special way.
743 if (who_p >= (int)(sizeof(fproc) / sizeof(struct fproc)))
744 panic("receive process out of range: %d", who_p);
745 if (who_p >= 0 && fproc[who_p].fp_endpoint == NONE) {
746 printf("VFS: ignoring request from %d: NONE endpoint %d (%d)\n",
747 m_in.m_source, who_p, m_in.m_type);
748 continue;
751 /* Internal consistency check; our mental image of process numbers and
752 * endpoints must match with how the rest of the system thinks of them.
754 if (who_p >= 0 && fproc[who_p].fp_endpoint != who_e) {
755 if (fproc[who_p].fp_endpoint == NONE)
756 printf("slot unknown even\n");
758 printf("VFS: receive endpoint inconsistent (source %d, who_p "
759 "%d, stored ep %d, who_e %d).\n", m_in.m_source, who_p,
760 fproc[who_p].fp_endpoint, who_e);
761 panic("VFS: inconsistent endpoint ");
764 return;
768 /*===========================================================================*
769 * reply *
770 *===========================================================================*/
771 void reply(endpoint_t whom, int result)
773 /* Send a reply to a user process. If the send fails, just ignore it. */
774 int r;
776 m_out.reply_type = result;
777 r = sendnb(whom, &m_out);
778 if (r != OK) {
779 printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(),
780 result, whom, r);
781 util_stacktrace();
785 /*===========================================================================*
786 * service_pm_postponed *
787 *===========================================================================*/
788 static void service_pm_postponed(void)
790 int r;
791 vir_bytes pc, newsp;
793 switch(job_call_nr) {
794 case PM_EXEC:
796 endpoint_t proc_e;
797 vir_bytes exec_path, stack_frame;
798 size_t exec_path_len, stack_frame_len;
800 proc_e = job_m_in.PM_PROC;
801 exec_path = (vir_bytes) job_m_in.PM_PATH;
802 exec_path_len = (size_t) job_m_in.PM_PATH_LEN;
803 stack_frame = (vir_bytes) job_m_in.PM_FRAME;
804 stack_frame_len = (size_t) job_m_in.PM_FRAME_LEN;
806 r = pm_exec(proc_e, exec_path, exec_path_len, stack_frame,
807 stack_frame_len, &pc, &newsp, job_m_in.PM_EXECFLAGS);
809 /* Reply status to PM */
810 m_out.m_type = PM_EXEC_REPLY;
811 m_out.PM_PROC = proc_e;
812 m_out.PM_PC = (void*) pc;
813 m_out.PM_STATUS = r;
814 m_out.PM_NEWSP = (void *) newsp;
816 break;
818 case PM_EXIT:
820 endpoint_t proc_e;
821 proc_e = job_m_in.PM_PROC;
823 pm_exit(proc_e);
825 /* Reply dummy status to PM for synchronization */
826 m_out.m_type = PM_EXIT_REPLY;
827 m_out.PM_PROC = proc_e;
829 break;
831 case PM_DUMPCORE:
833 endpoint_t proc_e, traced_proc_e;
834 int term_signal;
835 vir_bytes core_path;
837 proc_e = job_m_in.PM_PROC;
838 traced_proc_e = job_m_in.PM_TRACED_PROC;
839 if(job_m_in.PM_PROC != job_m_in.PM_TRACED_PROC) {
840 /* dumpcore request */
841 term_signal = 0;
842 } else {
843 /* dumpcore on exit */
844 term_signal = job_m_in.PM_TERM_SIG;
846 core_path = (vir_bytes) job_m_in.PM_PATH;
848 r = pm_dumpcore(proc_e, term_signal, core_path);
850 /* Reply status to PM */
851 m_out.m_type = PM_CORE_REPLY;
852 m_out.PM_PROC = proc_e;
853 m_out.PM_TRACED_PROC = traced_proc_e;
854 m_out.PM_STATUS = r;
856 break;
858 default:
859 panic("Unhandled postponed PM call %d", job_m_in.m_type);
862 r = send(PM_PROC_NR, &m_out);
863 if (r != OK)
864 panic("service_pm_postponed: send failed: %d", r);
867 /*===========================================================================*
868 * service_pm *
869 *===========================================================================*/
870 static void service_pm()
872 int r, slot;
874 switch (job_call_nr) {
875 case PM_SETUID:
877 endpoint_t proc_e;
878 uid_t euid, ruid;
880 proc_e = job_m_in.PM_PROC;
881 euid = job_m_in.PM_EID;
882 ruid = job_m_in.PM_RID;
884 pm_setuid(proc_e, euid, ruid);
886 m_out.m_type = PM_SETUID_REPLY;
887 m_out.PM_PROC = proc_e;
889 break;
891 case PM_SETGID:
893 endpoint_t proc_e;
894 gid_t egid, rgid;
896 proc_e = job_m_in.PM_PROC;
897 egid = job_m_in.PM_EID;
898 rgid = job_m_in.PM_RID;
900 pm_setgid(proc_e, egid, rgid);
902 m_out.m_type = PM_SETGID_REPLY;
903 m_out.PM_PROC = proc_e;
905 break;
907 case PM_SETSID:
909 endpoint_t proc_e;
911 proc_e = job_m_in.PM_PROC;
912 pm_setsid(proc_e);
914 m_out.m_type = PM_SETSID_REPLY;
915 m_out.PM_PROC = proc_e;
917 break;
919 case PM_EXEC:
920 case PM_EXIT:
921 case PM_DUMPCORE:
923 endpoint_t proc_e = job_m_in.PM_PROC;
925 if(isokendpt(proc_e, &slot) != OK) {
926 printf("VFS: proc ep %d not ok\n", proc_e);
927 return;
930 fp = &fproc[slot];
932 if (fp->fp_flags & FP_PENDING) {
933 /* This process has a request pending, but PM wants it
934 * gone. Forget about the pending request and satisfy
935 * PM's request instead. Note that a pending request
936 * AND an EXEC request are mutually exclusive. Also, PM
937 * should send only one request/process at a time.
939 assert(fp->fp_job.j_m_in.m_source != PM_PROC_NR);
942 /* PM requests on behalf of a proc are handled after the
943 * system call that might be in progress for that proc has
944 * finished. If the proc is not busy, we start a dummy call.
946 if (!(fp->fp_flags & FP_PENDING) &&
947 mutex_trylock(&fp->fp_lock) == 0) {
948 mutex_unlock(&fp->fp_lock);
949 worker_start(do_dummy);
950 fp->fp_flags |= FP_DROP_WORK;
953 fp->fp_job.j_m_in = job_m_in;
954 fp->fp_flags |= FP_PM_PENDING;
956 return;
958 case PM_FORK:
959 case PM_SRV_FORK:
961 endpoint_t pproc_e, proc_e;
962 pid_t child_pid;
963 uid_t reuid;
964 gid_t regid;
966 pproc_e = job_m_in.PM_PPROC;
967 proc_e = job_m_in.PM_PROC;
968 child_pid = job_m_in.PM_CPID;
969 reuid = job_m_in.PM_REUID;
970 regid = job_m_in.PM_REGID;
972 pm_fork(pproc_e, proc_e, child_pid);
973 m_out.m_type = PM_FORK_REPLY;
975 if (job_call_nr == PM_SRV_FORK) {
976 m_out.m_type = PM_SRV_FORK_REPLY;
977 pm_setuid(proc_e, reuid, reuid);
978 pm_setgid(proc_e, regid, regid);
981 m_out.PM_PROC = proc_e;
983 break;
984 case PM_SETGROUPS:
986 endpoint_t proc_e;
987 int group_no;
988 gid_t *group_addr;
990 proc_e = job_m_in.PM_PROC;
991 group_no = job_m_in.PM_GROUP_NO;
992 group_addr = (gid_t *) job_m_in.PM_GROUP_ADDR;
994 pm_setgroups(proc_e, group_no, group_addr);
996 m_out.m_type = PM_SETGROUPS_REPLY;
997 m_out.PM_PROC = proc_e;
999 break;
1001 case PM_UNPAUSE:
1003 endpoint_t proc_e;
1005 proc_e = job_m_in.PM_PROC;
1007 unpause(proc_e);
1009 m_out.m_type = PM_UNPAUSE_REPLY;
1010 m_out.PM_PROC = proc_e;
1012 break;
1014 case PM_REBOOT:
1015 pm_reboot();
1017 /* Reply dummy status to PM for synchronization */
1018 m_out.m_type = PM_REBOOT_REPLY;
1020 break;
1022 default:
1023 printf("VFS: don't know how to handle PM request %d\n", job_call_nr);
1025 return;
1028 r = send(PM_PROC_NR, &m_out);
1029 if (r != OK)
1030 panic("service_pm: send failed: %d", r);
1035 /*===========================================================================*
1036 * unblock *
1037 *===========================================================================*/
1038 static int unblock(rfp)
1039 struct fproc *rfp;
1041 int blocked_on;
1043 fp = rfp;
1044 blocked_on = rfp->fp_blocked_on;
1045 m_in.m_source = rfp->fp_endpoint;
1046 m_in.m_type = rfp->fp_block_callnr;
1047 m_in.fd = scratch(fp).file.fd_nr;
1048 m_in.buffer = scratch(fp).io.io_buffer;
1049 m_in.nbytes = scratch(fp).io.io_nbytes;
1051 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; /* no longer blocked */
1052 rfp->fp_flags &= ~FP_REVIVED;
1053 reviving--;
1054 assert(reviving >= 0);
1056 /* This should be a pipe I/O, not a device I/O. If it is, it'll 'leak'
1057 * grants.
1059 assert(!GRANT_VALID(rfp->fp_grant));
1061 /* Pending pipe reads/writes can be handled directly */
1062 if (blocked_on == FP_BLOCKED_ON_PIPE) {
1063 worker_start(do_pending_pipe);
1064 yield(); /* Give thread a chance to run */
1065 self = NULL;
1066 return(0); /* Retrieve more work */
1069 return(1); /* We've unblocked a process */