2 * a loop that gets messages requesting work, carries out the work, and sends
5 * The entry points into this file are:
6 * main: main program of the Virtual File System
7 * reply: send a reply to a process after the requested work is done
18 #include <sys/ioc_memory.h>
19 #include <sys/svrctl.h>
20 #include <sys/select.h>
21 #include <minix/callnr.h>
22 #include <minix/com.h>
23 #include <minix/keymap.h>
24 #include <minix/const.h>
25 #include <minix/endpoint.h>
26 #include <minix/safecopies.h>
27 #include <minix/debug.h>
28 #include <minix/vfsif.h>
32 #include "scratchpad.h"
38 #if ENABLE_SYSCALL_STATS
39 EXTERN
unsigned long calls_stats
[NCALLS
];
42 /* Thread related prototypes */
43 static void *do_async_dev_result(void *arg
);
44 static void *do_control_msgs(void *arg
);
45 static void *do_dev_event(void *arg
);
46 static void *do_fs_reply(struct job
*job
);
47 static void *do_work(void *arg
);
48 static void *do_pm(void *arg
);
49 static void *do_init_root(void *arg
);
50 static void handle_work(void *(*func
)(void *arg
));
52 static void get_work(void);
53 static void lock_pm(void);
54 static void unlock_pm(void);
55 static void service_pm(void);
56 static void service_pm_postponed(void);
57 static int unblock(struct fproc
*rfp
);
59 /* SEF functions and variables. */
60 static void sef_local_startup(void);
61 static int sef_cb_init_fresh(int type
, sef_init_info_t
*info
);
62 static mutex_t pm_lock
;
63 static endpoint_t receive_from
;
65 /*===========================================================================*
67 *===========================================================================*/
70 /* This is the main program of the file system. The main loop consists of
71 * three major activities: getting new work, processing the work, and sending
72 * the reply. This loop never terminates as long as the file system runs.
77 /* SEF local startup. */
80 printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS
);
82 if (OK
!= (sys_getkinfo(&kinfo
)))
83 panic("couldn't get kernel kinfo");
85 /* This is the main loop that gets work, processes it, and sends replies. */
87 yield_all(); /* let other threads run */
93 transid
= TRNS_GET_ID(m_in
.m_type
);
94 if (IS_VFS_FS_TRANSID(transid
)) {
95 job
= worker_getjob( (thread_t
) transid
- VFS_TRANSID
);
97 printf("VFS: spurious message %d from endpoint %d\n",
98 m_in
.m_type
, m_in
.m_source
);
101 m_in
.m_type
= TRNS_DEL_ID(m_in
.m_type
);
107 } else if (who_e
== PM_PROC_NR
) { /* Calls from PM */
108 /* Special control messages from PM */
109 sys_worker_start(do_pm
);
111 } else if (is_notify(call_nr
)) {
112 /* A task notify()ed us */
113 if (who_e
== DS_PROC_NR
)
114 handle_work(ds_event
);
115 else if (who_e
== KERNEL
)
116 mthread_stacktraces();
117 else if (fp
!= NULL
&& (fp
->fp_flags
& FP_SRV_PROC
))
118 handle_work(do_dev_event
);
120 sys_worker_start(do_control_msgs
);
122 } else if (who_p
< 0) { /* i.e., message comes from a task */
123 /* We're going to ignore this message. Tasks should
124 * send notify()s only.
126 printf("VFS: ignoring message from %d (%d)\n", who_e
, call_nr
);
130 /* At this point we either have results from an asynchronous device
131 * or a new system call. In both cases a new worker thread has to be
132 * started and there might not be one available from the pool. This is
133 * not a problem (requests/replies are simply queued), except when
134 * they're from an FS endpoint, because these can cause a deadlock.
135 * handle_work() takes care of the details. */
136 if (IS_DRV_REPLY(call_nr
)) {
137 /* We've got results for a device request */
141 dp
= get_dmap(who_e
);
143 if (dev_style_asyn(dp
->dmap_style
)) {
144 handle_work(do_async_dev_result
);
147 if (dp
->dmap_servicing
== NONE
) {
148 printf("Got spurious dev reply from %d",
156 printf("VFS: ignoring dev reply from unknown driver %d\n",
159 /* Normal syscall. */
160 handle_work(do_work
);
163 return(OK
); /* shouldn't come here */
166 /*===========================================================================*
168 *===========================================================================*/
169 static void handle_work(void *(*func
)(void *arg
))
171 /* Handle asynchronous device replies and new system calls. If the originating
172 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
173 struct vmnt
*vmp
= NULL
;
176 proc_e
= m_in
.m_source
;
178 if (fp
->fp_flags
& FP_SRV_PROC
) {
179 vmp
= find_vmnt(proc_e
);
181 /* A call back or dev result from an FS
182 * endpoint. Set call back flag. Can do only
183 * one call back at a time.
185 if (vmp
->m_flags
& VMNT_CALLBACK
) {
186 reply(proc_e
, EAGAIN
);
189 vmp
->m_flags
|= VMNT_CALLBACK
;
190 if (vmp
->m_flags
& VMNT_MOUNTING
) {
191 vmp
->m_flags
|= VMNT_FORCEROOTBSF
;
195 if (worker_available() == 0) {
196 if (!deadlock_resolving
) {
197 deadlock_resolving
= 1;
198 dl_worker_start(func
);
203 /* Already trying to resolve a deadlock, can't
204 * handle more, sorry */
206 reply(proc_e
, EAGAIN
);
215 /*===========================================================================*
216 * do_async_dev_result *
217 *===========================================================================*/
218 static void *do_async_dev_result(void *arg
)
223 my_job
= *((struct job
*) arg
);
226 /* An asynchronous character driver has results for us */
227 if (job_call_nr
== DEV_REVIVE
) {
228 endpt
= job_m_in
.REP_ENDPT
;
229 if (endpt
== VFS_PROC_NR
)
230 endpt
= find_suspended_ep(job_m_in
.m_source
,
231 job_m_in
.REP_IO_GRANT
);
234 printf("VFS: proc with grant %d from %d not found\n",
235 job_m_in
.REP_IO_GRANT
, job_m_in
.m_source
);
236 } else if (job_m_in
.REP_STATUS
== SUSPEND
) {
237 printf("VFS: got SUSPEND on DEV_REVIVE: not reviving proc\n");
239 revive(endpt
, job_m_in
.REP_STATUS
);
241 else if (job_call_nr
== DEV_OPEN_REPL
) open_reply();
242 else if (job_call_nr
== DEV_REOPEN_REPL
) reopen_reply();
243 else if (job_call_nr
== DEV_CLOSE_REPL
) close_reply();
244 else if (job_call_nr
== DEV_SEL_REPL1
)
245 select_reply1(job_m_in
.m_source
, job_m_in
.DEV_MINOR
,
246 job_m_in
.DEV_SEL_OPS
);
247 else if (job_call_nr
== DEV_SEL_REPL2
)
248 select_reply2(job_m_in
.m_source
, job_m_in
.DEV_MINOR
,
249 job_m_in
.DEV_SEL_OPS
);
255 /*===========================================================================*
257 *===========================================================================*/
258 static void *do_control_msgs(void *arg
)
262 my_job
= *((struct job
*) arg
);
265 /* Check for special control messages. */
266 if (job_m_in
.m_source
== CLOCK
) {
267 /* Alarm timer expired. Used only for select(). Check it. */
268 expire_timers(job_m_in
.NOTIFY_TIMESTAMP
);
271 thread_cleanup(NULL
);
275 /*===========================================================================*
277 *===========================================================================*/
278 static void *do_dev_event(void *arg
)
280 /* Device notifies us of an event. */
283 my_job
= *((struct job
*) arg
);
286 dev_status(job_m_in
.m_source
);
292 /*===========================================================================*
294 *===========================================================================*/
295 static void *do_fs_reply(struct job
*job
)
298 struct worker_thread
*wp
;
300 if ((vmp
= find_vmnt(who_e
)) == NULL
)
301 panic("Couldn't find vmnt for endpoint %d", who_e
);
303 wp
= worker_get(job
->j_fp
->fp_wtid
);
306 printf("VFS: spurious reply from %d\n", who_e
);
310 if (wp
->w_task
!= who_e
) {
311 printf("VFS: expected %d to reply, not %d\n", wp
->w_task
, who_e
);
314 *wp
->w_fs_sendrec
= m_in
;
316 vmp
->m_comm
.c_cur_reqs
--; /* We've got our reply, make room for others */
317 worker_signal(wp
); /* Continue this thread */
321 /*===========================================================================*
323 *===========================================================================*/
324 static void lock_pm(void)
326 struct fproc
*org_fp
;
327 struct worker_thread
*org_self
;
329 /* First try to get it right off the bat */
330 if (mutex_trylock(&pm_lock
) == 0)
336 if (mutex_lock(&pm_lock
) != 0)
337 panic("Could not obtain lock on pm\n");
343 /*===========================================================================*
345 *===========================================================================*/
346 static void unlock_pm(void)
348 if (mutex_unlock(&pm_lock
) != 0)
349 panic("Could not release lock on pm");
352 /*===========================================================================*
354 *===========================================================================*/
355 static void *do_pm(void *arg __unused
)
361 thread_cleanup(NULL
);
365 /*===========================================================================*
367 *===========================================================================*/
368 static void *do_pending_pipe(void *arg
)
373 tll_access_t locktype
;
375 my_job
= *((struct job
*) arg
);
378 lock_proc(fp
, 1 /* force lock */);
380 f
= scratch(fp
).file
.filp
;
382 scratch(fp
).file
.filp
= NULL
;
384 locktype
= (job_call_nr
== READ
) ? VNODE_READ
: VNODE_WRITE
;
385 op
= (job_call_nr
== READ
) ? READING
: WRITING
;
386 lock_filp(f
, locktype
);
388 r
= rw_pipe(op
, who_e
, f
, scratch(fp
).io
.io_buffer
, scratch(fp
).io
.io_nbytes
);
390 if (r
!= SUSPEND
) /* Do we have results to report? */
391 reply(fp
->fp_endpoint
, r
);
399 /*===========================================================================*
401 *===========================================================================*/
402 void *do_dummy(void *arg
)
407 my_job
= *((struct job
*) arg
);
410 if ((r
= mutex_trylock(&fp
->fp_lock
)) == 0) {
414 /* Proc is busy, let that worker thread carry out the work */
415 thread_cleanup(NULL
);
420 /*===========================================================================*
422 *===========================================================================*/
423 static void *do_work(void *arg
)
428 my_job
= *((struct job
*) arg
);
431 lock_proc(fp
, 0); /* This proc is busy */
433 if (job_call_nr
== MAPDRIVER
) {
434 error
= do_mapdriver();
435 } else if (job_call_nr
== COMMON_GETSYSINFO
) {
436 error
= do_getsysinfo();
437 } else if (IS_PFS_VFS_RQ(job_call_nr
)) {
438 if (who_e
!= PFS_PROC_NR
) {
439 printf("VFS: only PFS is allowed to make nested VFS calls\n");
441 } else if (job_call_nr
<= PFS_BASE
||
442 job_call_nr
>= PFS_BASE
+ PFS_NREQS
) {
445 job_call_nr
-= PFS_BASE
;
446 error
= (*pfs_call_vec
[job_call_nr
])();
449 /* We're dealing with a POSIX system call from a normal
450 * process. Call the internal function that does the work.
452 if (job_call_nr
< 0 || job_call_nr
>= NCALLS
) {
454 } else if (fp
->fp_pid
== PID_FREE
) {
455 /* Process vanished before we were able to handle request.
456 * Replying has no use. Just drop it. */
459 #if ENABLE_SYSCALL_STATS
460 calls_stats
[job_call_nr
]++;
462 error
= (*call_vec
[job_call_nr
])();
466 /* Copy the results back to the user and send reply. */
467 if (error
!= SUSPEND
) reply(fp
->fp_endpoint
, error
);
474 /*===========================================================================*
475 * sef_local_startup *
476 *===========================================================================*/
477 static void sef_local_startup()
479 /* Register init callbacks. */
480 sef_setcb_init_fresh(sef_cb_init_fresh
);
481 sef_setcb_init_restart(sef_cb_init_fail
);
483 /* No live update support for now. */
485 /* Let SEF perform startup. */
489 /*===========================================================================*
490 * sef_cb_init_fresh *
491 *===========================================================================*/
492 static int sef_cb_init_fresh(int UNUSED(type
), sef_init_info_t
*info
)
494 /* Initialize the virtual file server. */
498 struct rprocpub rprocpub
[NR_BOOT_PROCS
];
505 /* Initialize proc endpoints to NONE */
506 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
507 rfp
->fp_endpoint
= NONE
;
508 rfp
->fp_pid
= PID_FREE
;
511 /* Initialize the process table with help of the process manager messages.
512 * Expect one message for each system process with its slot number and pid.
513 * When no more processes follow, the magic process number NONE is sent.
514 * Then, stop and synchronize with the PM.
517 if ((s
= sef_receive(PM_PROC_NR
, &mess
)) != OK
)
518 panic("VFS: couldn't receive from PM: %d", s
);
520 if (mess
.m_type
!= PM_INIT
)
521 panic("unexpected message from PM: %d", mess
.m_type
);
523 if (NONE
== mess
.PM_PROC
) break;
525 rfp
= &fproc
[mess
.PM_SLOT
];
526 rfp
->fp_flags
= FP_NOFLAGS
;
527 rfp
->fp_pid
= mess
.PM_PID
;
528 rfp
->fp_endpoint
= mess
.PM_PROC
;
529 rfp
->fp_grant
= GRANT_INVALID
;
530 rfp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
;
531 rfp
->fp_realuid
= (uid_t
) SYS_UID
;
532 rfp
->fp_effuid
= (uid_t
) SYS_UID
;
533 rfp
->fp_realgid
= (gid_t
) SYS_GID
;
534 rfp
->fp_effgid
= (gid_t
) SYS_GID
;
536 } while (TRUE
); /* continue until process NONE */
537 mess
.m_type
= OK
; /* tell PM that we succeeded */
538 s
= send(PM_PROC_NR
, &mess
); /* send synchronization message */
540 /* All process table entries have been set. Continue with initialization. */
541 fp
= &fproc
[_ENDPOINT_P(VFS_PROC_NR
)];/* During init all communication with
542 * FSes is on behalf of myself */
543 init_dmap(); /* Initialize device table. */
544 system_hz
= sys_hz();
546 /* Map all the services in the boot image. */
547 if ((s
= sys_safecopyfrom(RS_PROC_NR
, info
->rproctab_gid
, 0,
548 (vir_bytes
) rprocpub
, sizeof(rprocpub
))) != OK
){
549 panic("sys_safecopyfrom failed: %d", s
);
551 for (i
= 0; i
< NR_BOOT_PROCS
; i
++) {
552 if (rprocpub
[i
].in_use
) {
553 if ((s
= map_service(&rprocpub
[i
])) != OK
) {
554 panic("VFS: unable to map service: %d", s
);
559 /* Subscribe to block and character driver events. */
560 s
= ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL
| DSF_OVERWRITE
);
561 if (s
!= OK
) panic("VFS: can't subscribe to driver events (%d)", s
);
563 /* Initialize worker threads */
564 for (i
= 0; i
< NR_WTHREADS
; i
++) {
565 worker_init(&workers
[i
]);
567 worker_init(&sys_worker
); /* exclusive system worker thread */
568 worker_init(&dl_worker
); /* exclusive worker thread to resolve deadlocks */
570 /* Initialize global locks */
571 if (mthread_mutex_init(&pm_lock
, NULL
) != 0)
572 panic("VFS: couldn't initialize pm lock mutex");
573 if (mthread_mutex_init(&exec_lock
, NULL
) != 0)
574 panic("VFS: couldn't initialize exec lock");
575 if (mthread_mutex_init(&bsf_lock
, NULL
) != 0)
576 panic("VFS: couldn't initialize block special file lock");
578 /* Initialize event resources for boot procs and locks for all procs */
579 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
580 if (mutex_init(&rfp
->fp_lock
, NULL
) != 0)
581 panic("unable to initialize fproc lock");
583 rfp
->fp_vp_rdlocks
= 0;
584 rfp
->fp_vmnt_rdlocks
= 0;
588 init_dmap_locks(); /* init dmap locks */
589 init_vnodes(); /* init vnodes */
590 init_vmnts(); /* init vmnt structures */
591 init_select(); /* init select() structures */
592 init_filps(); /* Init filp structures */
593 mount_pfs(); /* mount Pipe File Server */
594 worker_start(do_init_root
); /* mount initial ramdisk as file system root */
595 yield(); /* force do_init_root to start */
601 /*===========================================================================*
603 *===========================================================================*/
604 static void *do_init_root(void *arg
)
609 char *mount_label
= "fs_imgrd"; /* FIXME: obtain this from RS */
611 my_job
= *((struct job
*) arg
);
614 lock_proc(fp
, 1 /* force lock */); /* This proc is busy */
617 /* Initialize process directories. mount_fs will set them to the correct
619 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
620 FD_ZERO(&(rfp
->fp_filp_inuse
));
625 receive_from
= MFS_PROC_NR
;
626 r
= mount_fs(DEV_IMGRD
, "bootramdisk", "/", MFS_PROC_NR
, 0, mount_label
);
628 panic("Failed to initialize root");
637 /*===========================================================================*
639 *===========================================================================*/
640 void lock_proc(struct fproc
*rfp
, int force_lock
)
643 struct fproc
*org_fp
;
644 struct worker_thread
*org_self
;
646 r
= mutex_trylock(&rfp
->fp_lock
);
648 /* Were we supposed to obtain this lock immediately? */
659 if ((r
= mutex_lock(&rfp
->fp_lock
)) != 0)
660 panic("unable to lock fproc lock: %d", r
);
666 /*===========================================================================*
668 *===========================================================================*/
669 void unlock_proc(struct fproc
*rfp
)
673 if ((r
= mutex_unlock(&rfp
->fp_lock
)) != 0)
674 panic("Failed to unlock: %d", r
);
677 /*===========================================================================*
679 *===========================================================================*/
680 void thread_cleanup(struct fproc
*rfp
)
682 /* Clean up worker thread. Skip parts if this thread is not associated
683 * with a particular process (i.e., rfp is NULL) */
687 check_filp_locks_by_me();
688 check_vnode_locks_by_me(rfp
);
689 check_vmnt_locks_by_me(rfp
);
693 if (rfp
!= NULL
&& rfp
->fp_flags
& FP_PM_PENDING
) { /* Postponed PM call */
694 job_m_in
= rfp
->fp_job
.j_m_in
;
695 rfp
->fp_flags
&= ~FP_PM_PENDING
;
696 service_pm_postponed();
701 check_filp_locks_by_me();
702 check_vnode_locks_by_me(rfp
);
703 check_vmnt_locks_by_me(rfp
);
708 rfp
->fp_flags
&= ~FP_DROP_WORK
;
709 if (rfp
->fp_flags
& FP_SRV_PROC
) {
712 if ((vmp
= find_vmnt(rfp
->fp_endpoint
)) != NULL
) {
713 vmp
->m_flags
&= ~VMNT_CALLBACK
;
718 if (deadlock_resolving
) {
719 if (self
->w_tid
== dl_worker
.w_tid
)
720 deadlock_resolving
= 0;
724 /*===========================================================================*
726 *===========================================================================*/
727 static void get_work()
729 /* Normally wait for new input. However, if 'reviving' is
730 * nonzero, a suspended process must be awakened.
732 int r
, found_one
, proc_p
;
733 register struct fproc
*rp
;
735 while (reviving
!= 0) {
738 /* Find a suspended process. */
739 for (rp
= &fproc
[0]; rp
< &fproc
[NR_PROCS
]; rp
++)
740 if (rp
->fp_pid
!= PID_FREE
&& (rp
->fp_flags
& FP_REVIVED
)) {
741 found_one
= TRUE
; /* Found a suspended process */
743 return; /* So main loop can process job */
747 if (!found_one
) /* Consistency error */
748 panic("VFS: get_work couldn't revive anyone");
752 /* Normal case. No one to revive. Get a useful request. */
753 if ((r
= sef_receive(receive_from
, &m_in
)) != OK
) {
754 panic("VFS: sef_receive error: %d", r
);
757 proc_p
= _ENDPOINT_P(m_in
.m_source
);
758 if (proc_p
< 0 || proc_p
>= NR_PROCS
) fp
= NULL
;
759 else fp
= &fproc
[proc_p
];
761 if (m_in
.m_type
== EDEADSRCDST
) return; /* Failed 'sendrec' */
763 /* Negative who_p is never used to access the fproc array. Negative
764 * numbers (kernel tasks) are treated in a special way.
766 if (who_p
>= (int)(sizeof(fproc
) / sizeof(struct fproc
)))
767 panic("receive process out of range: %d", who_p
);
768 if (who_p
>= 0 && fproc
[who_p
].fp_endpoint
== NONE
) {
769 printf("VFS: ignoring request from %d: NONE endpoint %d (%d)\n",
770 m_in
.m_source
, who_p
, m_in
.m_type
);
774 /* Internal consistency check; our mental image of process numbers and
775 * endpoints must match with how the rest of the system thinks of them.
777 if (who_p
>= 0 && fproc
[who_p
].fp_endpoint
!= who_e
) {
778 if (fproc
[who_p
].fp_endpoint
== NONE
)
779 printf("slot unknown even\n");
781 printf("VFS: receive endpoint inconsistent (source %d, who_p "
782 "%d, stored ep %d, who_e %d).\n", m_in
.m_source
, who_p
,
783 fproc
[who_p
].fp_endpoint
, who_e
);
784 panic("VFS: inconsistent endpoint ");
791 /*===========================================================================*
793 *===========================================================================*/
794 void reply(endpoint_t whom
, int result
)
796 /* Send a reply to a user process. If the send fails, just ignore it. */
799 m_out
.reply_type
= result
;
800 r
= sendnb(whom
, &m_out
);
802 printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(),
808 /*===========================================================================*
809 * service_pm_postponed *
810 *===========================================================================*/
811 static void service_pm_postponed(void)
816 switch(job_call_nr
) {
820 vir_bytes exec_path
, stack_frame
;
821 size_t exec_path_len
, stack_frame_len
;
823 proc_e
= job_m_in
.PM_PROC
;
824 exec_path
= (vir_bytes
) job_m_in
.PM_PATH
;
825 exec_path_len
= (size_t) job_m_in
.PM_PATH_LEN
;
826 stack_frame
= (vir_bytes
) job_m_in
.PM_FRAME
;
827 stack_frame_len
= (size_t) job_m_in
.PM_FRAME_LEN
;
829 r
= pm_exec(proc_e
, exec_path
, exec_path_len
, stack_frame
,
830 stack_frame_len
, &pc
, &newsp
, job_m_in
.PM_EXECFLAGS
);
832 /* Reply status to PM */
833 m_out
.m_type
= PM_EXEC_REPLY
;
834 m_out
.PM_PROC
= proc_e
;
835 m_out
.PM_PC
= (void*) pc
;
837 m_out
.PM_NEWSP
= (void *) newsp
;
844 proc_e
= job_m_in
.PM_PROC
;
848 /* Reply dummy status to PM for synchronization */
849 m_out
.m_type
= PM_EXIT_REPLY
;
850 m_out
.PM_PROC
= proc_e
;
856 endpoint_t proc_e
, traced_proc_e
;
860 proc_e
= job_m_in
.PM_PROC
;
861 traced_proc_e
= job_m_in
.PM_TRACED_PROC
;
862 if(job_m_in
.PM_PROC
!= job_m_in
.PM_TRACED_PROC
) {
863 /* dumpcore request */
866 /* dumpcore on exit */
867 term_signal
= job_m_in
.PM_TERM_SIG
;
869 core_path
= (vir_bytes
) job_m_in
.PM_PATH
;
871 r
= pm_dumpcore(proc_e
, term_signal
, core_path
);
873 /* Reply status to PM */
874 m_out
.m_type
= PM_CORE_REPLY
;
875 m_out
.PM_PROC
= proc_e
;
876 m_out
.PM_TRACED_PROC
= traced_proc_e
;
882 panic("Unhandled postponed PM call %d", job_m_in
.m_type
);
885 r
= send(PM_PROC_NR
, &m_out
);
887 panic("service_pm_postponed: send failed: %d", r
);
890 /*===========================================================================*
892 *===========================================================================*/
893 static void service_pm()
897 switch (job_call_nr
) {
903 proc_e
= job_m_in
.PM_PROC
;
904 euid
= job_m_in
.PM_EID
;
905 ruid
= job_m_in
.PM_RID
;
907 pm_setuid(proc_e
, euid
, ruid
);
909 m_out
.m_type
= PM_SETUID_REPLY
;
910 m_out
.PM_PROC
= proc_e
;
919 proc_e
= job_m_in
.PM_PROC
;
920 egid
= job_m_in
.PM_EID
;
921 rgid
= job_m_in
.PM_RID
;
923 pm_setgid(proc_e
, egid
, rgid
);
925 m_out
.m_type
= PM_SETGID_REPLY
;
926 m_out
.PM_PROC
= proc_e
;
934 proc_e
= job_m_in
.PM_PROC
;
937 m_out
.m_type
= PM_SETSID_REPLY
;
938 m_out
.PM_PROC
= proc_e
;
946 endpoint_t proc_e
= job_m_in
.PM_PROC
;
948 if(isokendpt(proc_e
, &slot
) != OK
) {
949 printf("VFS: proc ep %d not ok\n", proc_e
);
955 if (fp
->fp_flags
& FP_PENDING
) {
956 /* This process has a request pending, but PM wants it
957 * gone. Forget about the pending request and satisfy
958 * PM's request instead. Note that a pending request
959 * AND an EXEC request are mutually exclusive. Also, PM
960 * should send only one request/process at a time.
962 assert(fp
->fp_job
.j_m_in
.m_source
!= PM_PROC_NR
);
965 /* PM requests on behalf of a proc are handled after the
966 * system call that might be in progress for that proc has
967 * finished. If the proc is not busy, we start a dummy call.
969 if (!(fp
->fp_flags
& FP_PENDING
) &&
970 mutex_trylock(&fp
->fp_lock
) == 0) {
971 mutex_unlock(&fp
->fp_lock
);
972 worker_start(do_dummy
);
973 fp
->fp_flags
|= FP_DROP_WORK
;
976 fp
->fp_job
.j_m_in
= job_m_in
;
977 fp
->fp_flags
|= FP_PM_PENDING
;
984 endpoint_t pproc_e
, proc_e
;
989 pproc_e
= job_m_in
.PM_PPROC
;
990 proc_e
= job_m_in
.PM_PROC
;
991 child_pid
= job_m_in
.PM_CPID
;
992 reuid
= job_m_in
.PM_REUID
;
993 regid
= job_m_in
.PM_REGID
;
995 pm_fork(pproc_e
, proc_e
, child_pid
);
996 m_out
.m_type
= PM_FORK_REPLY
;
998 if (job_call_nr
== PM_SRV_FORK
) {
999 m_out
.m_type
= PM_SRV_FORK_REPLY
;
1000 pm_setuid(proc_e
, reuid
, reuid
);
1001 pm_setgid(proc_e
, regid
, regid
);
1004 m_out
.PM_PROC
= proc_e
;
1013 proc_e
= job_m_in
.PM_PROC
;
1014 group_no
= job_m_in
.PM_GROUP_NO
;
1015 group_addr
= (gid_t
*) job_m_in
.PM_GROUP_ADDR
;
1017 pm_setgroups(proc_e
, group_no
, group_addr
);
1019 m_out
.m_type
= PM_SETGROUPS_REPLY
;
1020 m_out
.PM_PROC
= proc_e
;
1028 proc_e
= job_m_in
.PM_PROC
;
1032 m_out
.m_type
= PM_UNPAUSE_REPLY
;
1033 m_out
.PM_PROC
= proc_e
;
1040 /* Reply dummy status to PM for synchronization */
1041 m_out
.m_type
= PM_REBOOT_REPLY
;
1046 printf("VFS: don't know how to handle PM request %d\n", job_call_nr
);
1051 r
= send(PM_PROC_NR
, &m_out
);
1053 panic("service_pm: send failed: %d", r
);
1058 /*===========================================================================*
1060 *===========================================================================*/
1061 static int unblock(rfp
)
1067 blocked_on
= rfp
->fp_blocked_on
;
1068 m_in
.m_source
= rfp
->fp_endpoint
;
1069 m_in
.m_type
= rfp
->fp_block_callnr
;
1070 m_in
.fd
= scratch(fp
).file
.fd_nr
;
1071 m_in
.buffer
= scratch(fp
).io
.io_buffer
;
1072 m_in
.nbytes
= scratch(fp
).io
.io_nbytes
;
1074 rfp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
; /* no longer blocked */
1075 rfp
->fp_flags
&= ~FP_REVIVED
;
1077 assert(reviving
>= 0);
1079 /* This should be a pipe I/O, not a device I/O. If it is, it'll 'leak'
1082 assert(!GRANT_VALID(rfp
->fp_grant
));
1084 /* Pending pipe reads/writes can be handled directly */
1085 if (blocked_on
== FP_BLOCKED_ON_PIPE
) {
1086 worker_start(do_pending_pipe
);
1087 yield(); /* Give thread a chance to run */
1089 return(0); /* Retrieve more work */
1092 return(1); /* We've unblocked a process */