2 * a loop that gets messages requesting work, carries out the work, and sends
5 * The entry points into this file are:
6 * main: main program of the Virtual File System
7 * reply: send a reply to a process after the requested work is done
18 #include <sys/ioc_memory.h>
19 #include <sys/svrctl.h>
20 #include <sys/select.h>
21 #include <minix/callnr.h>
22 #include <minix/com.h>
23 #include <minix/keymap.h>
24 #include <minix/const.h>
25 #include <minix/endpoint.h>
26 #include <minix/safecopies.h>
27 #include <minix/debug.h>
28 #include <minix/vfsif.h>
32 #include "scratchpad.h"
38 #if ENABLE_SYSCALL_STATS
39 EXTERN
unsigned long calls_stats
[NCALLS
];
42 /* Thread related prototypes */
43 static void *do_async_dev_result(void *arg
);
44 static void *do_control_msgs(void *arg
);
45 static void *do_fs_reply(struct job
*job
);
46 static void *do_work(void *arg
);
47 static void *do_pm(void *arg
);
48 static void *do_init_root(void *arg
);
49 static void handle_work(void *(*func
)(void *arg
));
51 static void get_work(void);
52 static void lock_pm(void);
53 static void unlock_pm(void);
54 static void service_pm(void);
55 static void service_pm_postponed(void);
56 static int unblock(struct fproc
*rfp
);
58 /* SEF functions and variables. */
59 static void sef_local_startup(void);
60 static int sef_cb_init_fresh(int type
, sef_init_info_t
*info
);
61 static mutex_t pm_lock
;
62 static endpoint_t receive_from
;
64 /*===========================================================================*
66 *===========================================================================*/
69 /* This is the main program of the file system. The main loop consists of
70 * three major activities: getting new work, processing the work, and sending
71 * the reply. This loop never terminates as long as the file system runs.
76 /* SEF local startup. */
79 printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS
);
81 if (OK
!= (sys_getkinfo(&kinfo
)))
82 panic("couldn't get kernel kinfo");
84 /* This is the main loop that gets work, processes it, and sends replies. */
86 yield_all(); /* let other threads run */
92 transid
= TRNS_GET_ID(m_in
.m_type
);
93 if (IS_VFS_FS_TRANSID(transid
)) {
94 job
= worker_getjob( (thread_t
) transid
- VFS_TRANSID
);
96 printf("VFS: spurious message %d from endpoint %d\n",
97 m_in
.m_type
, m_in
.m_source
);
100 m_in
.m_type
= TRNS_DEL_ID(m_in
.m_type
);
106 } else if (who_e
== PM_PROC_NR
) { /* Calls from PM */
107 /* Special control messages from PM */
108 sys_worker_start(do_pm
);
110 } else if (is_notify(call_nr
)) {
111 /* A task notify()ed us */
112 if (who_e
== DS_PROC_NR
)
113 worker_start(ds_event
);
115 sys_worker_start(do_control_msgs
);
117 } else if (who_p
< 0) { /* i.e., message comes from a task */
118 /* We're going to ignore this message. Tasks should
119 * send notify()s only.
121 printf("VFS: ignoring message from %d (%d)\n", who_e
, call_nr
);
125 /* At this point we either have results from an asynchronous device
126 * or a new system call. In both cases a new worker thread has to be
127 * started and there might not be one available from the pool. This is
128 * not a problem (requests/replies are simply queued), except when
129 * they're from an FS endpoint, because these can cause a deadlock.
130 * handle_work() takes care of the details. */
131 if (IS_DRV_REPLY(call_nr
)) {
132 /* We've got results for a device request */
136 dp
= get_dmap(who_e
);
138 if (dev_style_asyn(dp
->dmap_style
)) {
139 handle_work(do_async_dev_result
);
142 if (dp
->dmap_servicing
== NONE
) {
143 printf("Got spurious dev reply from %d",
151 printf("VFS: ignoring dev reply from unknown driver %d\n",
154 /* Normal syscall. */
155 handle_work(do_work
);
158 return(OK
); /* shouldn't come here */
161 /*===========================================================================*
163 *===========================================================================*/
164 static void handle_work(void *(*func
)(void *arg
))
166 /* Handle asynchronous device replies and new system calls. If the originating
167 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
168 struct vmnt
*vmp
= NULL
;
171 proc_e
= m_in
.m_source
;
173 if (fp
->fp_flags
& FP_SYS_PROC
) {
174 if (worker_available() == 0) {
175 if (!deadlock_resolving
) {
176 if ((vmp
= find_vmnt(proc_e
)) != NULL
) {
177 /* A call back or dev result from an FS
178 * endpoint. Set call back flag. Can do only
179 * one call back at a time.
181 if (vmp
->m_flags
& VMNT_CALLBACK
) {
182 reply(proc_e
, EAGAIN
);
185 vmp
->m_flags
|= VMNT_CALLBACK
;
187 /* When an FS endpoint has to make a call back
188 * in order to mount, force its device to a
189 * "none device" so block reads/writes will be
190 * handled by ROOT_FS_E.
192 if (vmp
->m_flags
& VMNT_MOUNTING
)
193 vmp
->m_flags
|= VMNT_FORCEROOTBSF
;
195 deadlock_resolving
= 1;
196 dl_worker_start(func
);
199 /* Already trying to resolve a deadlock, can't
200 * handle more, sorry */
202 reply(proc_e
, EAGAIN
);
210 /*===========================================================================*
211 * do_async_dev_result *
212 *===========================================================================*/
213 static void *do_async_dev_result(void *arg
)
218 my_job
= *((struct job
*) arg
);
221 /* An asynchronous character driver has results for us */
222 if (job_call_nr
== DEV_REVIVE
) {
223 endpt
= job_m_in
.REP_ENDPT
;
224 if (endpt
== VFS_PROC_NR
)
225 endpt
= find_suspended_ep(job_m_in
.m_source
,
226 job_m_in
.REP_IO_GRANT
);
229 printf("VFS: proc with grant %d from %d not found\n",
230 job_m_in
.REP_IO_GRANT
, job_m_in
.m_source
);
231 } else if (job_m_in
.REP_STATUS
== SUSPEND
) {
232 printf("VFS: got SUSPEND on DEV_REVIVE: not reviving proc\n");
234 revive(endpt
, job_m_in
.REP_STATUS
);
236 else if (job_call_nr
== DEV_OPEN_REPL
) open_reply();
237 else if (job_call_nr
== DEV_REOPEN_REPL
) reopen_reply();
238 else if (job_call_nr
== DEV_CLOSE_REPL
) close_reply();
239 else if (job_call_nr
== DEV_SEL_REPL1
)
240 select_reply1(job_m_in
.m_source
, job_m_in
.DEV_MINOR
,
241 job_m_in
.DEV_SEL_OPS
);
242 else if (job_call_nr
== DEV_SEL_REPL2
)
243 select_reply2(job_m_in
.m_source
, job_m_in
.DEV_MINOR
,
244 job_m_in
.DEV_SEL_OPS
);
246 if (deadlock_resolving
) {
247 if (fp
!= NULL
&& fp
->fp_wtid
== dl_worker
.w_tid
)
248 deadlock_resolving
= 0;
251 if (fp
!= NULL
&& (fp
->fp_flags
& FP_SYS_PROC
)) {
254 if ((vmp
= find_vmnt(fp
->fp_endpoint
)) != NULL
)
255 vmp
->m_flags
&= ~VMNT_CALLBACK
;
258 thread_cleanup(NULL
);
262 /*===========================================================================*
264 *===========================================================================*/
265 static void *do_control_msgs(void *arg
)
269 my_job
= *((struct job
*) arg
);
272 /* Check for special control messages. */
273 if (job_m_in
.m_source
== CLOCK
) {
274 /* Alarm timer expired. Used only for select(). Check it. */
275 expire_timers(job_m_in
.NOTIFY_TIMESTAMP
);
277 /* Device notifies us of an event. */
278 dev_status(job_m_in
.m_source
);
281 thread_cleanup(NULL
);
285 /*===========================================================================*
287 *===========================================================================*/
288 static void *do_fs_reply(struct job
*job
)
291 struct worker_thread
*wp
;
293 if ((vmp
= find_vmnt(who_e
)) == NULL
)
294 panic("Couldn't find vmnt for endpoint %d", who_e
);
296 wp
= worker_get(job
->j_fp
->fp_wtid
);
299 printf("VFS: spurious reply from %d\n", who_e
);
303 if (wp
->w_task
!= who_e
) {
304 printf("VFS: expected %d to reply, not %d\n", wp
->w_task
, who_e
);
307 *wp
->w_fs_sendrec
= m_in
;
309 vmp
->m_comm
.c_cur_reqs
--; /* We've got our reply, make room for others */
310 worker_signal(wp
); /* Continue this thread */
314 /*===========================================================================*
316 *===========================================================================*/
317 static void lock_pm(void)
319 struct fproc
*org_fp
;
320 struct worker_thread
*org_self
;
322 /* First try to get it right off the bat */
323 if (mutex_trylock(&pm_lock
) == 0)
329 if (mutex_lock(&pm_lock
) != 0)
330 panic("Could not obtain lock on pm\n");
336 /*===========================================================================*
338 *===========================================================================*/
339 static void unlock_pm(void)
341 if (mutex_unlock(&pm_lock
) != 0)
342 panic("Could not release lock on pm");
345 /*===========================================================================*
347 *===========================================================================*/
348 static void *do_pm(void *arg __unused
)
354 thread_cleanup(NULL
);
358 /*===========================================================================*
360 *===========================================================================*/
361 static void *do_pending_pipe(void *arg
)
366 tll_access_t locktype
;
368 my_job
= *((struct job
*) arg
);
371 lock_proc(fp
, 1 /* force lock */);
373 f
= scratch(fp
).file
.filp
;
375 scratch(fp
).file
.filp
= NULL
;
377 locktype
= (job_call_nr
== READ
) ? VNODE_READ
: VNODE_WRITE
;
378 op
= (job_call_nr
== READ
) ? READING
: WRITING
;
379 lock_filp(f
, locktype
);
381 r
= rw_pipe(op
, who_e
, f
, scratch(fp
).io
.io_buffer
, scratch(fp
).io
.io_nbytes
);
383 if (r
!= SUSPEND
) /* Do we have results to report? */
384 reply(fp
->fp_endpoint
, r
);
392 /*===========================================================================*
394 *===========================================================================*/
395 void *do_dummy(void *arg
)
400 my_job
= *((struct job
*) arg
);
403 if ((r
= mutex_trylock(&fp
->fp_lock
)) == 0) {
406 /* Proc is busy, let that worker thread carry out the work */
407 thread_cleanup(NULL
);
412 /*===========================================================================*
414 *===========================================================================*/
415 static void *do_work(void *arg
)
420 my_job
= *((struct job
*) arg
);
423 lock_proc(fp
, 0); /* This proc is busy */
425 if (job_call_nr
== MAPDRIVER
) {
426 error
= do_mapdriver();
427 } else if (job_call_nr
== COMMON_GETSYSINFO
) {
428 error
= do_getsysinfo();
429 } else if (IS_PFS_VFS_RQ(job_call_nr
)) {
430 if (who_e
!= PFS_PROC_NR
) {
431 printf("VFS: only PFS is allowed to make nested VFS calls\n");
433 } else if (job_call_nr
<= PFS_BASE
||
434 job_call_nr
>= PFS_BASE
+ PFS_NREQS
) {
437 job_call_nr
-= PFS_BASE
;
438 error
= (*pfs_call_vec
[job_call_nr
])();
441 /* We're dealing with a POSIX system call from a normal
442 * process. Call the internal function that does the work.
444 if (job_call_nr
< 0 || job_call_nr
>= NCALLS
) {
446 } else if (fp
->fp_pid
== PID_FREE
) {
447 /* Process vanished before we were able to handle request.
448 * Replying has no use. Just drop it. */
451 #if ENABLE_SYSCALL_STATS
452 calls_stats
[job_call_nr
]++;
454 error
= (*call_vec
[job_call_nr
])();
458 /* Copy the results back to the user and send reply. */
459 if (error
!= SUSPEND
) {
461 if ((fp
->fp_flags
& FP_SYS_PROC
)) {
464 if ((vmp
= find_vmnt(fp
->fp_endpoint
)) != NULL
)
465 vmp
->m_flags
&= ~VMNT_CALLBACK
;
468 if (deadlock_resolving
) {
469 if (fp
->fp_wtid
== dl_worker
.w_tid
)
470 deadlock_resolving
= 0;
473 reply(fp
->fp_endpoint
, error
);
480 /*===========================================================================*
481 * sef_local_startup *
482 *===========================================================================*/
483 static void sef_local_startup()
485 /* Register init callbacks. */
486 sef_setcb_init_fresh(sef_cb_init_fresh
);
487 sef_setcb_init_restart(sef_cb_init_fail
);
489 /* No live update support for now. */
491 /* Let SEF perform startup. */
495 /*===========================================================================*
496 * sef_cb_init_fresh *
497 *===========================================================================*/
498 static int sef_cb_init_fresh(int UNUSED(type
), sef_init_info_t
*info
)
500 /* Initialize the virtual file server. */
504 struct rprocpub rprocpub
[NR_BOOT_PROCS
];
511 /* Initialize proc endpoints to NONE */
512 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
513 rfp
->fp_endpoint
= NONE
;
514 rfp
->fp_pid
= PID_FREE
;
517 /* Initialize the process table with help of the process manager messages.
518 * Expect one message for each system process with its slot number and pid.
519 * When no more processes follow, the magic process number NONE is sent.
520 * Then, stop and synchronize with the PM.
523 if ((s
= sef_receive(PM_PROC_NR
, &mess
)) != OK
)
524 panic("VFS: couldn't receive from PM: %d", s
);
526 if (mess
.m_type
!= PM_INIT
)
527 panic("unexpected message from PM: %d", mess
.m_type
);
529 if (NONE
== mess
.PM_PROC
) break;
531 rfp
= &fproc
[mess
.PM_SLOT
];
532 rfp
->fp_flags
= FP_NOFLAGS
;
533 rfp
->fp_pid
= mess
.PM_PID
;
534 rfp
->fp_endpoint
= mess
.PM_PROC
;
535 rfp
->fp_grant
= GRANT_INVALID
;
536 rfp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
;
537 rfp
->fp_realuid
= (uid_t
) SYS_UID
;
538 rfp
->fp_effuid
= (uid_t
) SYS_UID
;
539 rfp
->fp_realgid
= (gid_t
) SYS_GID
;
540 rfp
->fp_effgid
= (gid_t
) SYS_GID
;
542 } while (TRUE
); /* continue until process NONE */
543 mess
.m_type
= OK
; /* tell PM that we succeeded */
544 s
= send(PM_PROC_NR
, &mess
); /* send synchronization message */
546 /* All process table entries have been set. Continue with initialization. */
547 fp
= &fproc
[_ENDPOINT_P(VFS_PROC_NR
)];/* During init all communication with
548 * FSes is on behalf of myself */
549 init_dmap(); /* Initialize device table. */
550 system_hz
= sys_hz();
552 /* Map all the services in the boot image. */
553 if ((s
= sys_safecopyfrom(RS_PROC_NR
, info
->rproctab_gid
, 0,
554 (vir_bytes
) rprocpub
, sizeof(rprocpub
))) != OK
){
555 panic("sys_safecopyfrom failed: %d", s
);
557 for (i
= 0; i
< NR_BOOT_PROCS
; i
++) {
558 if (rprocpub
[i
].in_use
) {
559 if ((s
= map_service(&rprocpub
[i
])) != OK
) {
560 panic("VFS: unable to map service: %d", s
);
565 /* Subscribe to block and character driver events. */
566 s
= ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL
| DSF_OVERWRITE
);
567 if (s
!= OK
) panic("VFS: can't subscribe to driver events (%d)", s
);
569 /* Initialize worker threads */
570 for (i
= 0; i
< NR_WTHREADS
; i
++) {
571 worker_init(&workers
[i
]);
573 worker_init(&sys_worker
); /* exclusive system worker thread */
574 worker_init(&dl_worker
); /* exclusive worker thread to resolve deadlocks */
576 /* Initialize global locks */
577 if (mthread_mutex_init(&pm_lock
, NULL
) != 0)
578 panic("VFS: couldn't initialize pm lock mutex");
579 if (mthread_mutex_init(&exec_lock
, NULL
) != 0)
580 panic("VFS: couldn't initialize exec lock");
581 if (mthread_mutex_init(&bsf_lock
, NULL
) != 0)
582 panic("VFS: couldn't initialize block special file lock");
584 /* Initialize event resources for boot procs and locks for all procs */
585 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
586 if (mutex_init(&rfp
->fp_lock
, NULL
) != 0)
587 panic("unable to initialize fproc lock");
589 rfp
->fp_vp_rdlocks
= 0;
590 rfp
->fp_vmnt_rdlocks
= 0;
594 init_dmap_locks(); /* init dmap locks */
595 init_vnodes(); /* init vnodes */
596 init_vmnts(); /* init vmnt structures */
597 init_select(); /* init select() structures */
598 init_filps(); /* Init filp structures */
599 mount_pfs(); /* mount Pipe File Server */
600 worker_start(do_init_root
); /* mount initial ramdisk as file system root */
601 yield(); /* force do_init_root to start */
607 /*===========================================================================*
609 *===========================================================================*/
610 static void *do_init_root(void *arg
)
615 char *mount_label
= "fs_imgrd"; /* FIXME: obtain this from RS */
617 my_job
= *((struct job
*) arg
);
620 lock_proc(fp
, 1 /* force lock */); /* This proc is busy */
623 /* Initialize process directories. mount_fs will set them to the correct
625 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
626 FD_ZERO(&(rfp
->fp_filp_inuse
));
631 receive_from
= MFS_PROC_NR
;
632 if ((r
= mount_fs(DEV_IMGRD
, "/", MFS_PROC_NR
, 0, mount_label
)) != OK
)
633 panic("Failed to initialize root");
641 /*===========================================================================*
643 *===========================================================================*/
644 void lock_proc(struct fproc
*rfp
, int force_lock
)
647 struct fproc
*org_fp
;
648 struct worker_thread
*org_self
;
650 r
= mutex_trylock(&rfp
->fp_lock
);
652 /* Were we supposed to obtain this lock immediately? */
663 if ((r
= mutex_lock(&rfp
->fp_lock
)) != 0)
664 panic("unable to lock fproc lock: %d", r
);
670 /*===========================================================================*
672 *===========================================================================*/
673 void unlock_proc(struct fproc
*rfp
)
677 if ((r
= mutex_unlock(&rfp
->fp_lock
)) != 0)
678 panic("Failed to unlock: %d", r
);
681 /*===========================================================================*
683 *===========================================================================*/
684 void thread_cleanup(struct fproc
*rfp
)
686 /* Clean up worker thread. Skip parts if this thread is not associated
687 * with a particular process (i.e., rfp is NULL) */
691 check_filp_locks_by_me();
692 check_vnode_locks_by_me(rfp
);
693 check_vmnt_locks_by_me(rfp
);
697 if (rfp
!= NULL
&& rfp
->fp_flags
& FP_PM_PENDING
) { /* Postponed PM call */
698 job_m_in
= rfp
->fp_job
.j_m_in
;
699 rfp
->fp_flags
&= ~FP_PM_PENDING
;
700 service_pm_postponed();
705 check_filp_locks_by_me();
706 check_vnode_locks_by_me(rfp
);
707 check_vmnt_locks_by_me(rfp
);
712 rfp
->fp_flags
&= ~FP_DROP_WORK
;
717 /*===========================================================================*
719 *===========================================================================*/
720 static void get_work()
722 /* Normally wait for new input. However, if 'reviving' is
723 * nonzero, a suspended process must be awakened.
725 int r
, found_one
, proc_p
;
726 register struct fproc
*rp
;
728 while (reviving
!= 0) {
731 /* Find a suspended process. */
732 for (rp
= &fproc
[0]; rp
< &fproc
[NR_PROCS
]; rp
++)
733 if (rp
->fp_pid
!= PID_FREE
&& (rp
->fp_flags
& FP_REVIVED
)) {
734 found_one
= TRUE
; /* Found a suspended process */
736 return; /* So main loop can process job */
740 if (!found_one
) /* Consistency error */
741 panic("VFS: get_work couldn't revive anyone");
745 /* Normal case. No one to revive. Get a useful request. */
746 if ((r
= sef_receive(receive_from
, &m_in
)) != OK
) {
747 panic("VFS: sef_receive error: %d", r
);
750 proc_p
= _ENDPOINT_P(m_in
.m_source
);
751 if (proc_p
< 0 || proc_p
>= NR_PROCS
) fp
= NULL
;
752 else fp
= &fproc
[proc_p
];
754 if (m_in
.m_type
== EDEADSRCDST
) return; /* Failed 'sendrec' */
756 /* Negative who_p is never used to access the fproc array. Negative
757 * numbers (kernel tasks) are treated in a special way.
759 if (who_p
>= (int)(sizeof(fproc
) / sizeof(struct fproc
)))
760 panic("receive process out of range: %d", who_p
);
761 if (who_p
>= 0 && fproc
[who_p
].fp_endpoint
== NONE
) {
762 printf("VFS: ignoring request from %d: NONE endpoint %d (%d)\n",
763 m_in
.m_source
, who_p
, m_in
.m_type
);
767 /* Internal consistency check; our mental image of process numbers and
768 * endpoints must match with how the rest of the system thinks of them.
770 if (who_p
>= 0 && fproc
[who_p
].fp_endpoint
!= who_e
) {
771 if (fproc
[who_p
].fp_endpoint
== NONE
)
772 printf("slot unknown even\n");
774 printf("VFS: receive endpoint inconsistent (source %d, who_p "
775 "%d, stored ep %d, who_e %d).\n", m_in
.m_source
, who_p
,
776 fproc
[who_p
].fp_endpoint
, who_e
);
777 panic("VFS: inconsistent endpoint ");
784 /*===========================================================================*
786 *===========================================================================*/
787 void reply(endpoint_t whom
, int result
)
789 /* Send a reply to a user process. If the send fails, just ignore it. */
792 m_out
.reply_type
= result
;
793 r
= sendnb(whom
, &m_out
);
795 printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(),
801 /*===========================================================================*
802 * service_pm_postponed *
803 *===========================================================================*/
804 static void service_pm_postponed(void)
809 switch(job_call_nr
) {
813 vir_bytes exec_path
, stack_frame
;
814 size_t exec_path_len
, stack_frame_len
;
816 proc_e
= job_m_in
.PM_PROC
;
817 exec_path
= (vir_bytes
) job_m_in
.PM_PATH
;
818 exec_path_len
= (size_t) job_m_in
.PM_PATH_LEN
;
819 stack_frame
= (vir_bytes
) job_m_in
.PM_FRAME
;
820 stack_frame_len
= (size_t) job_m_in
.PM_FRAME_LEN
;
822 r
= pm_exec(proc_e
, exec_path
, exec_path_len
, stack_frame
,
823 stack_frame_len
, &pc
, &newsp
, job_m_in
.PM_EXECFLAGS
);
825 /* Reply status to PM */
826 m_out
.m_type
= PM_EXEC_REPLY
;
827 m_out
.PM_PROC
= proc_e
;
828 m_out
.PM_PC
= (void*) pc
;
830 m_out
.PM_NEWSP
= (void *) newsp
;
837 proc_e
= job_m_in
.PM_PROC
;
841 /* Reply dummy status to PM for synchronization */
842 m_out
.m_type
= PM_EXIT_REPLY
;
843 m_out
.PM_PROC
= proc_e
;
849 endpoint_t proc_e
, traced_proc_e
;
853 proc_e
= job_m_in
.PM_PROC
;
854 traced_proc_e
= job_m_in
.PM_TRACED_PROC
;
855 if(job_m_in
.PM_PROC
!= job_m_in
.PM_TRACED_PROC
) {
856 /* dumpcore request */
859 /* dumpcore on exit */
860 term_signal
= job_m_in
.PM_TERM_SIG
;
862 core_path
= (vir_bytes
) job_m_in
.PM_PATH
;
864 r
= pm_dumpcore(proc_e
, term_signal
, core_path
);
866 /* Reply status to PM */
867 m_out
.m_type
= PM_CORE_REPLY
;
868 m_out
.PM_PROC
= proc_e
;
869 m_out
.PM_TRACED_PROC
= traced_proc_e
;
875 panic("Unhandled postponed PM call %d", job_m_in
.m_type
);
878 r
= send(PM_PROC_NR
, &m_out
);
880 panic("service_pm_postponed: send failed: %d", r
);
883 /*===========================================================================*
885 *===========================================================================*/
886 static void service_pm()
890 switch (job_call_nr
) {
896 proc_e
= job_m_in
.PM_PROC
;
897 euid
= job_m_in
.PM_EID
;
898 ruid
= job_m_in
.PM_RID
;
900 pm_setuid(proc_e
, euid
, ruid
);
902 m_out
.m_type
= PM_SETUID_REPLY
;
903 m_out
.PM_PROC
= proc_e
;
912 proc_e
= job_m_in
.PM_PROC
;
913 egid
= job_m_in
.PM_EID
;
914 rgid
= job_m_in
.PM_RID
;
916 pm_setgid(proc_e
, egid
, rgid
);
918 m_out
.m_type
= PM_SETGID_REPLY
;
919 m_out
.PM_PROC
= proc_e
;
927 proc_e
= job_m_in
.PM_PROC
;
930 m_out
.m_type
= PM_SETSID_REPLY
;
931 m_out
.PM_PROC
= proc_e
;
939 endpoint_t proc_e
= job_m_in
.PM_PROC
;
941 if(isokendpt(proc_e
, &slot
) != OK
) {
942 printf("VFS: proc ep %d not ok\n", proc_e
);
948 if (fp
->fp_flags
& FP_PENDING
) {
949 /* This process has a request pending, but PM wants it
950 * gone. Forget about the pending request and satisfy
951 * PM's request instead. Note that a pending request
952 * AND an EXEC request are mutually exclusive. Also, PM
953 * should send only one request/process at a time.
955 assert(fp
->fp_job
.j_m_in
.m_source
!= PM_PROC_NR
);
958 /* PM requests on behalf of a proc are handled after the
959 * system call that might be in progress for that proc has
960 * finished. If the proc is not busy, we start a dummy call.
962 if (!(fp
->fp_flags
& FP_PENDING
) &&
963 mutex_trylock(&fp
->fp_lock
) == 0) {
964 mutex_unlock(&fp
->fp_lock
);
965 worker_start(do_dummy
);
966 fp
->fp_flags
|= FP_DROP_WORK
;
969 fp
->fp_job
.j_m_in
= job_m_in
;
970 fp
->fp_flags
|= FP_PM_PENDING
;
977 endpoint_t pproc_e
, proc_e
;
982 pproc_e
= job_m_in
.PM_PPROC
;
983 proc_e
= job_m_in
.PM_PROC
;
984 child_pid
= job_m_in
.PM_CPID
;
985 reuid
= job_m_in
.PM_REUID
;
986 regid
= job_m_in
.PM_REGID
;
988 pm_fork(pproc_e
, proc_e
, child_pid
);
989 m_out
.m_type
= PM_FORK_REPLY
;
991 if (job_call_nr
== PM_SRV_FORK
) {
992 m_out
.m_type
= PM_SRV_FORK_REPLY
;
993 pm_setuid(proc_e
, reuid
, reuid
);
994 pm_setgid(proc_e
, regid
, regid
);
997 m_out
.PM_PROC
= proc_e
;
1006 proc_e
= job_m_in
.PM_PROC
;
1007 group_no
= job_m_in
.PM_GROUP_NO
;
1008 group_addr
= (gid_t
*) job_m_in
.PM_GROUP_ADDR
;
1010 pm_setgroups(proc_e
, group_no
, group_addr
);
1012 m_out
.m_type
= PM_SETGROUPS_REPLY
;
1013 m_out
.PM_PROC
= proc_e
;
1021 proc_e
= job_m_in
.PM_PROC
;
1025 m_out
.m_type
= PM_UNPAUSE_REPLY
;
1026 m_out
.PM_PROC
= proc_e
;
1033 /* Reply dummy status to PM for synchronization */
1034 m_out
.m_type
= PM_REBOOT_REPLY
;
1039 printf("VFS: don't know how to handle PM request %d\n", job_call_nr
);
1044 r
= send(PM_PROC_NR
, &m_out
);
1046 panic("service_pm: send failed: %d", r
);
1051 /*===========================================================================*
1053 *===========================================================================*/
1054 static int unblock(rfp
)
1060 blocked_on
= rfp
->fp_blocked_on
;
1061 m_in
.m_source
= rfp
->fp_endpoint
;
1062 m_in
.m_type
= rfp
->fp_block_callnr
;
1063 m_in
.fd
= scratch(fp
).file
.fd_nr
;
1064 m_in
.buffer
= scratch(fp
).io
.io_buffer
;
1065 m_in
.nbytes
= scratch(fp
).io
.io_nbytes
;
1067 rfp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
; /* no longer blocked */
1068 rfp
->fp_flags
&= ~FP_REVIVED
;
1070 assert(reviving
>= 0);
1072 /* This should be a pipe I/O, not a device I/O. If it is, it'll 'leak'
1075 assert(!GRANT_VALID(rfp
->fp_grant
));
1077 /* Pending pipe reads/writes can be handled directly */
1078 if (blocked_on
== FP_BLOCKED_ON_PIPE
) {
1079 worker_start(do_pending_pipe
);
1080 yield(); /* Give thread a chance to run */
1082 return(0); /* Retrieve more work */
1085 return(1); /* We've unblocked a process */