tools/llvm: Do not build with symbols
[minix3.git] / minix / servers / vfs / main.c
blob4e3fd3630b2ee9d23fb161e010cf9d7dc43d8247
1 /*
2 * a loop that gets messages requesting work, carries out the work, and sends
3 * replies.
5 * The entry points into this file are:
6 * main: main program of the Virtual File System
7 * reply: send a reply to a process after the requested work is done
9 */
11 #include "fs.h"
12 #include <fcntl.h>
13 #include <string.h>
14 #include <stdio.h>
15 #include <signal.h>
16 #include <assert.h>
17 #include <stdlib.h>
18 #include <sys/ioc_memory.h>
19 #include <sys/svrctl.h>
20 #include <sys/select.h>
21 #include <minix/callnr.h>
22 #include <minix/com.h>
23 #include <minix/const.h>
24 #include <minix/endpoint.h>
25 #include <minix/safecopies.h>
26 #include <minix/debug.h>
27 #include <minix/vfsif.h>
28 #include "file.h"
29 #include "scratchpad.h"
30 #include "vmnt.h"
31 #include "vnode.h"
33 #if ENABLE_SYSCALL_STATS
34 EXTERN unsigned long calls_stats[NR_VFS_CALLS];
35 #endif
37 /* Thread related prototypes */
38 static void do_reply(struct worker_thread *wp);
39 static void do_work(void);
40 static void do_init_root(void);
41 static void handle_work(void (*func)(void));
42 static void reply(message *m_out, endpoint_t whom, int result);
44 static void get_work(void);
45 static void service_pm(void);
46 static int unblock(struct fproc *rfp);
48 /* SEF functions and variables. */
49 static void sef_local_startup(void);
50 static int sef_cb_init_fresh(int type, sef_init_info_t *info);
51 static endpoint_t receive_from;
53 /*===========================================================================*
54 * main *
55 *===========================================================================*/
56 int main(void)
58 /* This is the main program of the file system. The main loop consists of
59 * three major activities: getting new work, processing the work, and sending
60 * the reply. This loop never terminates as long as the file system runs.
62 int transid;
63 struct worker_thread *wp;
65 /* SEF local startup. */
66 sef_local_startup();
68 printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS);
70 if (OK != (sys_getkinfo(&kinfo)))
71 panic("couldn't get kernel kinfo");
73 /* This is the main loop that gets work, processes it, and sends replies. */
74 while (TRUE) {
75 yield_all(); /* let other threads run */
76 self = NULL;
77 send_work();
78 get_work();
80 transid = TRNS_GET_ID(m_in.m_type);
81 if (IS_VFS_FS_TRANSID(transid)) {
82 wp = worker_get((thread_t) transid - VFS_TRANSID);
83 if (wp == NULL || wp->w_fp == NULL) {
84 printf("VFS: spurious message %d from endpoint %d\n",
85 m_in.m_type, m_in.m_source);
86 continue;
88 m_in.m_type = TRNS_DEL_ID(m_in.m_type);
89 do_reply(wp);
90 continue;
91 } else if (who_e == PM_PROC_NR) { /* Calls from PM */
92 /* Special control messages from PM */
93 service_pm();
94 continue;
95 } else if (is_notify(call_nr)) {
96 /* A task ipc_notify()ed us */
97 switch (who_e) {
98 case DS_PROC_NR:
99 /* Start a thread to handle DS events, if no thread
100 * is pending or active for it already. DS is not
101 * supposed to issue calls to VFS or be the subject of
102 * postponed PM requests, so this should be no problem.
104 if (worker_can_start(fp))
105 handle_work(ds_event);
106 break;
107 case KERNEL:
108 mthread_stacktraces();
109 break;
110 case CLOCK:
111 /* Timer expired. Used only for select(). Check it. */
112 expire_timers(m_in.m_notify.timestamp);
113 break;
114 default:
115 printf("VFS: ignoring notification from %d\n", who_e);
117 continue;
118 } else if (who_p < 0) { /* i.e., message comes from a task */
119 /* We're going to ignore this message. Tasks should
120 * send ipc_notify()s only.
122 printf("VFS: ignoring message from %d (%d)\n", who_e, call_nr);
123 continue;
126 if (IS_BDEV_RS(call_nr)) {
127 /* We've got results for a block device request. */
128 bdev_reply();
129 } else if (IS_CDEV_RS(call_nr)) {
130 /* We've got results for a character device request. */
131 cdev_reply();
132 } else {
133 /* Normal syscall. This spawns a new thread. */
134 handle_work(do_work);
137 return(OK); /* shouldn't come here */
140 /*===========================================================================*
141 * handle_work *
142 *===========================================================================*/
143 static void handle_work(void (*func)(void))
145 /* Handle asynchronous device replies and new system calls. If the originating
146 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
147 struct vmnt *vmp = NULL;
148 endpoint_t proc_e;
149 int use_spare = FALSE;
151 proc_e = m_in.m_source;
153 if (fp->fp_flags & FP_SRV_PROC) {
154 vmp = find_vmnt(proc_e);
155 if (vmp != NULL) {
156 /* A callback from an FS endpoint. Can do only one at once. */
157 if (vmp->m_flags & VMNT_CALLBACK) {
158 replycode(proc_e, EAGAIN);
159 return;
161 /* Already trying to resolve a deadlock? Can't handle more. */
162 if (worker_available() == 0) {
163 replycode(proc_e, EAGAIN);
164 return;
166 /* A thread is available. Set callback flag. */
167 vmp->m_flags |= VMNT_CALLBACK;
168 if (vmp->m_flags & VMNT_MOUNTING) {
169 vmp->m_flags |= VMNT_FORCEROOTBSF;
173 /* Use the spare thread to handle this request if needed. */
174 use_spare = TRUE;
177 worker_start(fp, func, &m_in, use_spare);
181 /*===========================================================================*
182 * do_reply *
183 *===========================================================================*/
184 static void do_reply(struct worker_thread *wp)
186 struct vmnt *vmp = NULL;
188 if(who_e != VM_PROC_NR && (vmp = find_vmnt(who_e)) == NULL)
189 panic("Couldn't find vmnt for endpoint %d", who_e);
191 if (wp->w_task != who_e) {
192 printf("VFS: tid %d: expected %d to reply, not %d\n",
193 wp->w_tid, wp->w_task, who_e);
195 *wp->w_sendrec = m_in;
196 wp->w_task = NONE;
197 if(vmp) vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */
198 worker_signal(wp); /* Continue this thread */
201 /*===========================================================================*
202 * do_pending_pipe *
203 *===========================================================================*/
204 static void do_pending_pipe(void)
206 int r, op;
207 struct filp *f;
208 tll_access_t locktype;
210 f = scratch(fp).file.filp;
211 assert(f != NULL);
212 scratch(fp).file.filp = NULL;
214 locktype = (job_call_nr == VFS_READ) ? VNODE_READ : VNODE_WRITE;
215 op = (job_call_nr == VFS_READ) ? READING : WRITING;
216 lock_filp(f, locktype);
218 r = rw_pipe(op, who_e, f, scratch(fp).io.io_buffer, scratch(fp).io.io_nbytes);
220 if (r != SUSPEND) /* Do we have results to report? */
221 replycode(fp->fp_endpoint, r);
223 unlock_filp(f);
226 /*===========================================================================*
227 * do_work *
228 *===========================================================================*/
229 static void do_work(void)
231 unsigned int call_index;
232 int error;
234 if (fp->fp_pid == PID_FREE) {
235 /* Process vanished before we were able to handle request.
236 * Replying has no use. Just drop it.
238 return;
241 memset(&job_m_out, 0, sizeof(job_m_out));
243 /* At this point we assume that we're dealing with a call that has been
244 * made specifically to VFS. Typically it will be a POSIX call from a
245 * normal process, but we also handle a few calls made by drivers such
246 * such as UDS and VND through here. Call the internal function that
247 * does the work.
249 if (IS_VFS_CALL(job_call_nr)) {
250 call_index = (unsigned int) (job_call_nr - VFS_BASE);
252 if (call_index < NR_VFS_CALLS && call_vec[call_index] != NULL) {
253 #if ENABLE_SYSCALL_STATS
254 calls_stats[call_index]++;
255 #endif
256 error = (*call_vec[call_index])();
257 } else
258 error = ENOSYS;
259 } else
260 error = ENOSYS;
262 /* Copy the results back to the user and send reply. */
263 if (error != SUSPEND) reply(&job_m_out, fp->fp_endpoint, error);
266 /*===========================================================================*
267 * sef_local_startup *
268 *===========================================================================*/
269 static void sef_local_startup()
271 /* Register init callbacks. */
272 sef_setcb_init_fresh(sef_cb_init_fresh);
273 sef_setcb_init_restart(sef_cb_init_fail);
275 /* No live update support for now. */
277 /* Let SEF perform startup. */
278 sef_startup();
281 /*===========================================================================*
282 * sef_cb_init_fresh *
283 *===========================================================================*/
284 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info)
286 /* Initialize the virtual file server. */
287 int s, i;
288 struct fproc *rfp;
289 message mess;
290 struct rprocpub rprocpub[NR_BOOT_PROCS];
292 receive_from = ANY;
293 self = NULL;
294 verbose = 0;
296 /* Initialize proc endpoints to NONE */
297 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
298 rfp->fp_endpoint = NONE;
299 rfp->fp_pid = PID_FREE;
302 /* Initialize the process table with help of the process manager messages.
303 * Expect one message for each system process with its slot number and pid.
304 * When no more processes follow, the magic process number NONE is sent.
305 * Then, stop and synchronize with the PM.
307 do {
308 if ((s = sef_receive(PM_PROC_NR, &mess)) != OK)
309 panic("VFS: couldn't receive from PM: %d", s);
311 if (mess.m_type != VFS_PM_INIT)
312 panic("unexpected message from PM: %d", mess.m_type);
314 if (NONE == mess.VFS_PM_ENDPT) break;
316 rfp = &fproc[mess.VFS_PM_SLOT];
317 rfp->fp_flags = FP_NOFLAGS;
318 rfp->fp_pid = mess.VFS_PM_PID;
319 rfp->fp_endpoint = mess.VFS_PM_ENDPT;
320 rfp->fp_grant = GRANT_INVALID;
321 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
322 rfp->fp_realuid = (uid_t) SYS_UID;
323 rfp->fp_effuid = (uid_t) SYS_UID;
324 rfp->fp_realgid = (gid_t) SYS_GID;
325 rfp->fp_effgid = (gid_t) SYS_GID;
326 rfp->fp_umask = ~0;
327 } while (TRUE); /* continue until process NONE */
328 mess.m_type = OK; /* tell PM that we succeeded */
329 s = ipc_send(PM_PROC_NR, &mess); /* send synchronization message */
331 system_hz = sys_hz();
333 /* Subscribe to block and character driver events. */
334 s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE);
335 if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s);
337 /* Initialize worker threads */
338 worker_init();
340 /* Initialize global locks */
341 if (mthread_mutex_init(&bsf_lock, NULL) != 0)
342 panic("VFS: couldn't initialize block special file lock");
344 init_dmap(); /* Initialize device table. */
346 /* Map all the services in the boot image. */
347 if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
348 (vir_bytes) rprocpub, sizeof(rprocpub))) != OK){
349 panic("sys_safecopyfrom failed: %d", s);
351 for (i = 0; i < NR_BOOT_PROCS; i++) {
352 if (rprocpub[i].in_use) {
353 if ((s = map_service(&rprocpub[i])) != OK) {
354 panic("VFS: unable to map service: %d", s);
359 /* Initialize locks and initial values for all processes. */
360 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
361 if (mutex_init(&rfp->fp_lock, NULL) != 0)
362 panic("unable to initialize fproc lock");
363 rfp->fp_worker = NULL;
364 #if LOCK_DEBUG
365 rfp->fp_vp_rdlocks = 0;
366 rfp->fp_vmnt_rdlocks = 0;
367 #endif
369 /* Initialize process directories. mount_fs will set them to the
370 * correct values.
372 for (i = 0; i < OPEN_MAX; i++)
373 rfp->fp_filp[i] = NULL;
374 rfp->fp_rd = NULL;
375 rfp->fp_wd = NULL;
378 init_vnodes(); /* init vnodes */
379 init_vmnts(); /* init vmnt structures */
380 init_select(); /* init select() structures */
381 init_filps(); /* Init filp structures */
382 mount_pfs(); /* mount Pipe File Server */
384 /* Mount initial ramdisk as file system root. */
385 receive_from = MFS_PROC_NR;
386 worker_start(fproc_addr(VFS_PROC_NR), do_init_root, &mess /*unused*/,
387 FALSE /*use_spare*/);
389 return(OK);
392 /*===========================================================================*
393 * do_init_root *
394 *===========================================================================*/
395 static void do_init_root(void)
397 int r;
398 char *mount_type = "mfs"; /* FIXME: use boot image process name instead */
399 char *mount_label = "fs_imgrd"; /* FIXME: obtain this from RS */
401 r = mount_fs(DEV_IMGRD, "bootramdisk", "/", MFS_PROC_NR, 0, mount_type,
402 mount_label);
403 if (r != OK)
404 panic("Failed to initialize root");
405 receive_from = ANY;
408 /*===========================================================================*
409 * lock_proc *
410 *===========================================================================*/
411 void lock_proc(struct fproc *rfp)
413 int r;
414 struct worker_thread *org_self;
416 r = mutex_trylock(&rfp->fp_lock);
417 if (r == 0) return;
419 org_self = worker_suspend();
421 if ((r = mutex_lock(&rfp->fp_lock)) != 0)
422 panic("unable to lock fproc lock: %d", r);
424 worker_resume(org_self);
427 /*===========================================================================*
428 * unlock_proc *
429 *===========================================================================*/
430 void unlock_proc(struct fproc *rfp)
432 int r;
434 if ((r = mutex_unlock(&rfp->fp_lock)) != 0)
435 panic("Failed to unlock: %d", r);
438 /*===========================================================================*
439 * thread_cleanup *
440 *===========================================================================*/
441 void thread_cleanup(void)
443 /* Perform cleanup actions for a worker thread. */
445 #if LOCK_DEBUG
446 check_filp_locks_by_me();
447 check_vnode_locks_by_me(fp);
448 check_vmnt_locks_by_me(fp);
449 #endif
451 if (fp->fp_flags & FP_SRV_PROC) {
452 struct vmnt *vmp;
454 if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL) {
455 vmp->m_flags &= ~VMNT_CALLBACK;
460 /*===========================================================================*
461 * get_work *
462 *===========================================================================*/
463 static void get_work()
465 /* Normally wait for new input. However, if 'reviving' is
466 * nonzero, a suspended process must be awakened.
468 int r, found_one, proc_p;
469 register struct fproc *rp;
471 while (reviving != 0) {
472 found_one = FALSE;
474 /* Find a suspended process. */
475 for (rp = &fproc[0]; rp < &fproc[NR_PROCS]; rp++)
476 if (rp->fp_pid != PID_FREE && (rp->fp_flags & FP_REVIVED)) {
477 found_one = TRUE; /* Found a suspended process */
478 if (unblock(rp))
479 return; /* So main loop can process job */
480 send_work();
483 if (!found_one) /* Consistency error */
484 panic("VFS: get_work couldn't revive anyone");
487 for(;;) {
488 /* Normal case. No one to revive. Get a useful request. */
489 if ((r = sef_receive(receive_from, &m_in)) != OK) {
490 panic("VFS: sef_receive error: %d", r);
493 proc_p = _ENDPOINT_P(m_in.m_source);
494 if (proc_p < 0 || proc_p >= NR_PROCS) fp = NULL;
495 else fp = &fproc[proc_p];
497 if (m_in.m_type == EDEADSRCDST) {
498 printf("VFS: failed ipc_sendrec\n");
499 return; /* Failed 'ipc_sendrec' */
502 /* Negative who_p is never used to access the fproc array. Negative
503 * numbers (kernel tasks) are treated in a special way.
505 if (fp && fp->fp_endpoint == NONE) {
506 printf("VFS: ignoring request from %d: NONE endpoint %d (%d)\n",
507 m_in.m_source, who_p, m_in.m_type);
508 continue;
511 /* Internal consistency check; our mental image of process numbers and
512 * endpoints must match with how the rest of the system thinks of them.
514 if (fp && fp->fp_endpoint != who_e) {
515 if (fproc[who_p].fp_endpoint == NONE)
516 printf("slot unknown even\n");
518 panic("VFS: receive endpoint inconsistent (source %d, who_p "
519 "%d, stored ep %d, who_e %d).\n", m_in.m_source, who_p,
520 fproc[who_p].fp_endpoint, who_e);
523 return;
527 /*===========================================================================*
528 * reply *
529 *===========================================================================*/
530 static void reply(message *m_out, endpoint_t whom, int result)
532 /* Send a reply to a user process. If the send fails, just ignore it. */
533 int r;
535 m_out->m_type = result;
536 r = ipc_sendnb(whom, m_out);
537 if (r != OK) {
538 printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(),
539 result, whom, r);
540 util_stacktrace();
544 /*===========================================================================*
545 * replycode *
546 *===========================================================================*/
547 void replycode(endpoint_t whom, int result)
549 /* Send a reply to a user process. If the send fails, just ignore it. */
550 message m_out;
552 memset(&m_out, 0, sizeof(m_out));
554 reply(&m_out, whom, result);
557 /*===========================================================================*
558 * service_pm_postponed *
559 *===========================================================================*/
560 void service_pm_postponed(void)
562 int r, term_signal;
563 vir_bytes core_path;
564 vir_bytes exec_path, stack_frame, pc, newsp, ps_str;
565 size_t exec_path_len, stack_frame_len;
566 endpoint_t proc_e;
567 message m_out;
569 memset(&m_out, 0, sizeof(m_out));
571 switch(job_call_nr) {
572 case VFS_PM_EXEC:
573 proc_e = job_m_in.VFS_PM_ENDPT;
574 exec_path = (vir_bytes) job_m_in.VFS_PM_PATH;
575 exec_path_len = (size_t) job_m_in.VFS_PM_PATH_LEN;
576 stack_frame = (vir_bytes) job_m_in.VFS_PM_FRAME;
577 stack_frame_len = (size_t) job_m_in.VFS_PM_FRAME_LEN;
578 ps_str = (vir_bytes) job_m_in.VFS_PM_PS_STR;
580 assert(proc_e == fp->fp_endpoint);
582 r = pm_exec(exec_path, exec_path_len, stack_frame, stack_frame_len,
583 &pc, &newsp, &ps_str);
585 /* Reply status to PM */
586 m_out.m_type = VFS_PM_EXEC_REPLY;
587 m_out.VFS_PM_ENDPT = proc_e;
588 m_out.VFS_PM_PC = (void *) pc;
589 m_out.VFS_PM_STATUS = r;
590 m_out.VFS_PM_NEWSP = (void *) newsp;
591 m_out.VFS_PM_NEWPS_STR = ps_str;
593 break;
595 case VFS_PM_EXIT:
596 proc_e = job_m_in.VFS_PM_ENDPT;
598 assert(proc_e == fp->fp_endpoint);
600 pm_exit();
602 /* Reply dummy status to PM for synchronization */
603 m_out.m_type = VFS_PM_EXIT_REPLY;
604 m_out.VFS_PM_ENDPT = proc_e;
606 break;
608 case VFS_PM_DUMPCORE:
609 proc_e = job_m_in.VFS_PM_ENDPT;
610 term_signal = job_m_in.VFS_PM_TERM_SIG;
611 core_path = (vir_bytes) job_m_in.VFS_PM_PATH;
613 assert(proc_e == fp->fp_endpoint);
615 r = pm_dumpcore(term_signal, core_path);
617 /* Reply status to PM */
618 m_out.m_type = VFS_PM_CORE_REPLY;
619 m_out.VFS_PM_ENDPT = proc_e;
620 m_out.VFS_PM_STATUS = r;
622 break;
624 case VFS_PM_UNPAUSE:
625 proc_e = job_m_in.VFS_PM_ENDPT;
627 assert(proc_e == fp->fp_endpoint);
629 unpause();
631 m_out.m_type = VFS_PM_UNPAUSE_REPLY;
632 m_out.VFS_PM_ENDPT = proc_e;
634 break;
636 default:
637 panic("Unhandled postponed PM call %d", job_m_in.m_type);
640 r = ipc_send(PM_PROC_NR, &m_out);
641 if (r != OK)
642 panic("service_pm_postponed: ipc_send failed: %d", r);
645 /*===========================================================================*
646 * service_pm *
647 *===========================================================================*/
648 static void service_pm(void)
650 /* Process a request from PM. This function is called from the main thread, and
651 * may therefore not block. Any requests that may require blocking the calling
652 * thread must be executed in a separate thread. Aside from VFS_PM_REBOOT, all
653 * requests from PM involve another, target process: for example, PM tells VFS
654 * that a process is performing a setuid() call. For some requests however,
655 * that other process may not be idle, and in that case VFS must serialize the
656 * PM request handling with any operation is it handling for that target
657 * process. As it happens, the requests that may require blocking are also the
658 * ones where the target process may not be idle. For both these reasons, such
659 * requests are run in worker threads associated to the target process.
661 struct fproc *rfp;
662 int r, slot;
663 message m_out;
665 memset(&m_out, 0, sizeof(m_out));
667 switch (call_nr) {
668 case VFS_PM_SETUID:
670 endpoint_t proc_e;
671 uid_t euid, ruid;
673 proc_e = m_in.VFS_PM_ENDPT;
674 euid = m_in.VFS_PM_EID;
675 ruid = m_in.VFS_PM_RID;
677 pm_setuid(proc_e, euid, ruid);
679 m_out.m_type = VFS_PM_SETUID_REPLY;
680 m_out.VFS_PM_ENDPT = proc_e;
682 break;
684 case VFS_PM_SETGID:
686 endpoint_t proc_e;
687 gid_t egid, rgid;
689 proc_e = m_in.VFS_PM_ENDPT;
690 egid = m_in.VFS_PM_EID;
691 rgid = m_in.VFS_PM_RID;
693 pm_setgid(proc_e, egid, rgid);
695 m_out.m_type = VFS_PM_SETGID_REPLY;
696 m_out.VFS_PM_ENDPT = proc_e;
698 break;
700 case VFS_PM_SETSID:
702 endpoint_t proc_e;
704 proc_e = m_in.VFS_PM_ENDPT;
705 pm_setsid(proc_e);
707 m_out.m_type = VFS_PM_SETSID_REPLY;
708 m_out.VFS_PM_ENDPT = proc_e;
710 break;
712 case VFS_PM_EXEC:
713 case VFS_PM_EXIT:
714 case VFS_PM_DUMPCORE:
715 case VFS_PM_UNPAUSE:
717 endpoint_t proc_e = m_in.VFS_PM_ENDPT;
719 if(isokendpt(proc_e, &slot) != OK) {
720 printf("VFS: proc ep %d not ok\n", proc_e);
721 return;
724 rfp = &fproc[slot];
726 /* PM requests on behalf of a proc are handled after the
727 * system call that might be in progress for that proc has
728 * finished. If the proc is not busy, we start a new thread.
730 worker_start(rfp, NULL, &m_in, FALSE /*use_spare*/);
732 return;
734 case VFS_PM_FORK:
735 case VFS_PM_SRV_FORK:
737 endpoint_t pproc_e, proc_e;
738 pid_t child_pid;
739 uid_t reuid;
740 gid_t regid;
742 pproc_e = m_in.VFS_PM_PENDPT;
743 proc_e = m_in.VFS_PM_ENDPT;
744 child_pid = m_in.VFS_PM_CPID;
745 reuid = m_in.VFS_PM_REUID;
746 regid = m_in.VFS_PM_REGID;
748 pm_fork(pproc_e, proc_e, child_pid);
749 m_out.m_type = VFS_PM_FORK_REPLY;
751 if (call_nr == VFS_PM_SRV_FORK) {
752 m_out.m_type = VFS_PM_SRV_FORK_REPLY;
753 pm_setuid(proc_e, reuid, reuid);
754 pm_setgid(proc_e, regid, regid);
757 m_out.VFS_PM_ENDPT = proc_e;
759 break;
760 case VFS_PM_SETGROUPS:
762 endpoint_t proc_e;
763 int group_no;
764 gid_t *group_addr;
766 proc_e = m_in.VFS_PM_ENDPT;
767 group_no = m_in.VFS_PM_GROUP_NO;
768 group_addr = (gid_t *) m_in.VFS_PM_GROUP_ADDR;
770 pm_setgroups(proc_e, group_no, group_addr);
772 m_out.m_type = VFS_PM_SETGROUPS_REPLY;
773 m_out.VFS_PM_ENDPT = proc_e;
775 break;
777 case VFS_PM_REBOOT:
778 /* Reboot requests are not considered postponed PM work and are instead
779 * handled from a separate worker thread that is associated with PM's
780 * process. PM makes no regular VFS calls, and thus, from VFS's
781 * perspective, PM is always idle. Therefore, we can safely do this.
782 * We do assume that PM sends us only one VFS_PM_REBOOT message at
783 * once, or ever for that matter. :)
785 worker_start(fproc_addr(PM_PROC_NR), pm_reboot, &m_in,
786 FALSE /*use_spare*/);
788 return;
790 default:
791 printf("VFS: don't know how to handle PM request %d\n", call_nr);
793 return;
796 r = ipc_send(PM_PROC_NR, &m_out);
797 if (r != OK)
798 panic("service_pm: ipc_send failed: %d", r);
802 /*===========================================================================*
803 * unblock *
804 *===========================================================================*/
805 static int unblock(rfp)
806 struct fproc *rfp;
808 /* Unblock a process that was previously blocked on a pipe or a lock. This is
809 * done by reconstructing the original request and continuing/repeating it.
810 * This function returns TRUE when it has restored a request for execution, and
811 * FALSE if the caller should continue looking for work to do.
813 int blocked_on;
815 blocked_on = rfp->fp_blocked_on;
817 /* Reconstruct the original request from the saved data. */
818 memset(&m_in, 0, sizeof(m_in));
819 m_in.m_source = rfp->fp_endpoint;
820 m_in.m_type = rfp->fp_block_callnr;
821 switch (m_in.m_type) {
822 case VFS_READ:
823 case VFS_WRITE:
824 assert(blocked_on == FP_BLOCKED_ON_PIPE);
825 m_in.m_lc_vfs_readwrite.fd = scratch(rfp).file.fd_nr;
826 m_in.m_lc_vfs_readwrite.buf = scratch(rfp).io.io_buffer;
827 m_in.m_lc_vfs_readwrite.len = scratch(rfp).io.io_nbytes;
828 break;
829 case VFS_FCNTL:
830 assert(blocked_on == FP_BLOCKED_ON_LOCK);
831 m_in.m_lc_vfs_fcntl.fd = scratch(rfp).file.fd_nr;
832 m_in.m_lc_vfs_fcntl.cmd = scratch(rfp).io.io_nbytes;
833 m_in.m_lc_vfs_fcntl.arg_ptr = scratch(rfp).io.io_buffer;
834 assert(m_in.m_lc_vfs_fcntl.cmd == F_SETLKW);
835 break;
836 default:
837 panic("unblocking call %d blocked on %d ??", m_in.m_type, blocked_on);
840 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; /* no longer blocked */
841 rfp->fp_flags &= ~FP_REVIVED;
842 reviving--;
843 assert(reviving >= 0);
845 /* This should not be device I/O. If it is, it'll 'leak' grants. */
846 assert(!GRANT_VALID(rfp->fp_grant));
848 /* Pending pipe reads/writes cannot be repeated as is, and thus require a
849 * special resumption procedure.
851 if (blocked_on == FP_BLOCKED_ON_PIPE) {
852 worker_start(rfp, do_pending_pipe, &m_in, FALSE /*use_spare*/);
853 return(FALSE); /* Retrieve more work */
856 /* A lock request. Repeat the original request as though it just came in. */
857 fp = rfp;
858 return(TRUE); /* We've unblocked a process */