2 * a loop that gets messages requesting work, carries out the work, and sends
5 * The entry points into this file are:
6 * main: main program of the Virtual File System
7 * reply: send a reply to a process after the requested work is done
18 #include <sys/ioc_memory.h>
19 #include <sys/svrctl.h>
20 #include <sys/select.h>
21 #include <minix/callnr.h>
22 #include <minix/com.h>
23 #include <minix/keymap.h>
24 #include <minix/const.h>
25 #include <minix/endpoint.h>
26 #include <minix/safecopies.h>
27 #include <minix/debug.h>
28 #include <minix/vfsif.h>
32 #include "scratchpad.h"
38 #if ENABLE_SYSCALL_STATS
39 EXTERN
unsigned long calls_stats
[NCALLS
];
42 /* Thread related prototypes */
43 static void thread_cleanup(struct fproc
*rfp
);
44 static void *do_async_dev_result(void *arg
);
45 static void *do_control_msgs(void *arg
);
46 static void *do_fs_reply(struct job
*job
);
47 static void *do_work(void *arg
);
48 static void *do_pm(void *arg
);
49 static void *do_init_root(void *arg
);
50 static void handle_work(void *(*func
)(void *arg
));
52 static void get_work(void);
53 static void lock_pm(void);
54 static void unlock_pm(void);
55 static void service_pm(void);
56 static void service_pm_postponed(void);
57 static int unblock(struct fproc
*rfp
);
59 /* SEF functions and variables. */
60 static void sef_local_startup(void);
61 static int sef_cb_init_fresh(int type
, sef_init_info_t
*info
);
62 static mutex_t pm_lock
;
63 static endpoint_t receive_from
;
65 /*===========================================================================*
67 *===========================================================================*/
70 /* This is the main program of the file system. The main loop consists of
71 * three major activities: getting new work, processing the work, and sending
72 * the reply. This loop never terminates as long as the file system runs.
77 /* SEF local startup. */
80 printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS
);
82 if (OK
!= (sys_getkinfo(&kinfo
)))
83 panic("couldn't get kernel kinfo");
85 /* This is the main loop that gets work, processes it, and sends replies. */
87 yield_all(); /* let other threads run */
93 transid
= TRNS_GET_ID(m_in
.m_type
);
94 if (IS_VFS_FS_TRANSID(transid
)) {
95 job
= worker_getjob( (thread_t
) transid
- VFS_TRANSID
);
97 printf("VFS: spurious message %d from endpoint %d\n",
98 m_in
.m_type
, m_in
.m_source
);
101 m_in
.m_type
= TRNS_DEL_ID(m_in
.m_type
);
107 } else if (who_e
== PM_PROC_NR
) { /* Calls from PM */
108 /* Special control messages from PM */
109 sys_worker_start(do_pm
);
111 } else if (is_notify(call_nr
)) {
112 /* A task notify()ed us */
113 sys_worker_start(do_control_msgs
);
115 } else if (who_p
< 0) { /* i.e., message comes from a task */
116 /* We're going to ignore this message. Tasks should
117 * send notify()s only.
119 printf("VFS: ignoring message from %d (%d)\n", who_e
, call_nr
);
123 /* At this point we either have results from an asynchronous device
124 * or a new system call. In both cases a new worker thread has to be
125 * started and there might not be one available from the pool. This is
126 * not a problem (requests/replies are simply queued), except when
127 * they're from an FS endpoint, because these can cause a deadlock.
128 * handle_work() takes care of the details. */
129 if (IS_DEV_RS(call_nr
)) {
130 /* We've got results for a device request */
131 handle_work(do_async_dev_result
);
134 /* Normal syscall. */
135 handle_work(do_work
);
138 return(OK
); /* shouldn't come here */
141 /*===========================================================================*
143 *===========================================================================*/
144 static void handle_work(void *(*func
)(void *arg
))
146 /* Handle asynchronous device replies and new system calls. If the originating
147 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
148 struct vmnt
*vmp
= NULL
;
151 proc_e
= m_in
.m_source
;
153 if (fp
->fp_flags
& FP_SYS_PROC
) {
154 if (worker_available() == 0) {
155 if (!deadlock_resolving
) {
156 if ((vmp
= find_vmnt(proc_e
)) != NULL
) {
157 /* A call back or dev result from an FS
158 * endpoint. Set call back flag. Can do only
159 * one call back at a time.
161 if (vmp
->m_flags
& VMNT_CALLBACK
) {
162 reply(proc_e
, EAGAIN
);
165 vmp
->m_flags
|= VMNT_CALLBACK
;
167 /* When an FS endpoint has to make a call back
168 * in order to mount, force its device to a
169 * "none device" so block reads/writes will be
170 * handled by ROOT_FS_E.
172 if (vmp
->m_flags
& VMNT_MOUNTING
)
173 vmp
->m_flags
|= VMNT_FORCEROOTBSF
;
175 deadlock_resolving
= 1;
176 dl_worker_start(func
);
179 /* Already trying to resolve a deadlock, can't
180 * handle more, sorry */
182 reply(proc_e
, EAGAIN
);
190 /*===========================================================================*
191 * do_async_dev_result *
192 *===========================================================================*/
193 static void *do_async_dev_result(void *arg
)
198 my_job
= *((struct job
*) arg
);
201 /* An asynchronous character driver has results for us */
202 if (job_call_nr
== DEV_REVIVE
) {
203 endpt
= job_m_in
.REP_ENDPT
;
204 if (endpt
== VFS_PROC_NR
)
205 endpt
= find_suspended_ep(job_m_in
.m_source
,
206 job_m_in
.REP_IO_GRANT
);
209 printf("VFS: proc with grant %d from %d not found\n",
210 job_m_in
.REP_IO_GRANT
, job_m_in
.m_source
);
211 } else if (job_m_in
.REP_STATUS
== SUSPEND
) {
212 printf("VFS: got SUSPEND on DEV_REVIVE: not reviving proc\n");
214 revive(endpt
, job_m_in
.REP_STATUS
);
216 else if (job_call_nr
== DEV_OPEN_REPL
) open_reply();
217 else if (job_call_nr
== DEV_REOPEN_REPL
) reopen_reply();
218 else if (job_call_nr
== DEV_CLOSE_REPL
) close_reply();
219 else if (job_call_nr
== DEV_SEL_REPL1
)
220 select_reply1(job_m_in
.m_source
, job_m_in
.DEV_MINOR
,
221 job_m_in
.DEV_SEL_OPS
);
222 else if (job_call_nr
== DEV_SEL_REPL2
)
223 select_reply2(job_m_in
.m_source
, job_m_in
.DEV_MINOR
,
224 job_m_in
.DEV_SEL_OPS
);
226 if (deadlock_resolving
) {
227 if (fp
!= NULL
&& fp
->fp_wtid
== dl_worker
.w_tid
)
228 deadlock_resolving
= 0;
231 if (fp
!= NULL
&& (fp
->fp_flags
& FP_SYS_PROC
)) {
234 if ((vmp
= find_vmnt(fp
->fp_endpoint
)) != NULL
)
235 vmp
->m_flags
&= ~VMNT_CALLBACK
;
238 thread_cleanup(NULL
);
242 /*===========================================================================*
244 *===========================================================================*/
245 static void *do_control_msgs(void *arg
)
249 my_job
= *((struct job
*) arg
);
252 /* Check for special control messages. */
253 if (job_m_in
.m_source
== CLOCK
) {
254 /* Alarm timer expired. Used only for select(). Check it. */
255 expire_timers(job_m_in
.NOTIFY_TIMESTAMP
);
256 } else if (job_m_in
.m_source
== DS_PROC_NR
) {
257 /* DS notifies us of an event. */
260 /* Device notifies us of an event. */
261 dev_status(&job_m_in
);
264 thread_cleanup(NULL
);
268 /*===========================================================================*
270 *===========================================================================*/
271 static void *do_fs_reply(struct job
*job
)
276 if ((vmp
= find_vmnt(who_e
)) == NULL
)
277 panic("Couldn't find vmnt for endpoint %d", who_e
);
281 if (rfp
== NULL
|| rfp
->fp_endpoint
== NONE
) {
282 printf("VFS: spurious reply from %d\n", who_e
);
286 if (rfp
->fp_task
!= who_e
)
287 printf("VFS: expected %d to reply, not %d\n", rfp
->fp_task
, who_e
);
288 *rfp
->fp_sendrec
= m_in
;
290 vmp
->m_comm
.c_cur_reqs
--; /* We've got our reply, make room for others */
291 if (rfp
->fp_wtid
!= invalid_thread_id
)
292 worker_signal(worker_get(rfp
->fp_wtid
)); /* Continue this thread */
294 printf("VFS: consistency error: reply for finished job\n");
299 /*===========================================================================*
301 *===========================================================================*/
302 static void lock_pm(void)
304 struct fproc
*org_fp
;
305 struct worker_thread
*org_self
;
307 /* First try to get it right off the bat */
308 if (mutex_trylock(&pm_lock
) == 0)
314 if (mutex_lock(&pm_lock
) != 0)
315 panic("Could not obtain lock on pm\n");
321 /*===========================================================================*
323 *===========================================================================*/
324 static void unlock_pm(void)
326 if (mutex_unlock(&pm_lock
) != 0)
327 panic("Could not release lock on pm");
330 /*===========================================================================*
332 *===========================================================================*/
333 static void *do_pm(void *arg __unused
)
339 thread_cleanup(NULL
);
343 /*===========================================================================*
345 *===========================================================================*/
346 static void *do_pending_pipe(void *arg
)
351 tll_access_t locktype
;
353 my_job
= *((struct job
*) arg
);
356 lock_proc(fp
, 1 /* force lock */);
358 f
= scratch(fp
).file
.filp
;
360 scratch(fp
).file
.filp
= NULL
;
362 locktype
= (job_call_nr
== READ
) ? VNODE_READ
: VNODE_WRITE
;
363 op
= (job_call_nr
== READ
) ? READING
: WRITING
;
364 lock_filp(f
, locktype
);
366 r
= rw_pipe(op
, who_e
, f
, scratch(fp
).io
.io_buffer
, scratch(fp
).io
.io_nbytes
);
368 if (r
!= SUSPEND
) /* Do we have results to report? */
369 reply(fp
->fp_endpoint
, r
);
377 /*===========================================================================*
379 *===========================================================================*/
380 void *do_dummy(void *arg
)
385 my_job
= *((struct job
*) arg
);
388 if ((r
= mutex_trylock(&fp
->fp_lock
)) == 0) {
391 /* Proc is busy, let that worker thread carry out the work */
392 thread_cleanup(NULL
);
397 /*===========================================================================*
399 *===========================================================================*/
400 static void *do_work(void *arg
)
405 my_job
= *((struct job
*) arg
);
408 lock_proc(fp
, 0); /* This proc is busy */
410 if (job_call_nr
== MAPDRIVER
) {
411 error
= do_mapdriver();
412 } else if (job_call_nr
== COMMON_GETSYSINFO
) {
413 error
= do_getsysinfo();
414 } else if (IS_PFS_VFS_RQ(job_call_nr
)) {
415 if (who_e
!= PFS_PROC_NR
) {
416 printf("VFS: only PFS is allowed to make nested VFS calls\n");
418 } else if (job_call_nr
<= PFS_BASE
||
419 job_call_nr
>= PFS_BASE
+ PFS_NREQS
) {
422 job_call_nr
-= PFS_BASE
;
423 error
= (*pfs_call_vec
[job_call_nr
])();
426 /* We're dealing with a POSIX system call from a normal
427 * process. Call the internal function that does the work.
429 if (job_call_nr
< 0 || job_call_nr
>= NCALLS
) {
431 } else if (fp
->fp_pid
== PID_FREE
) {
432 /* Process vanished before we were able to handle request.
433 * Replying has no use. Just drop it. */
436 #if ENABLE_SYSCALL_STATS
437 calls_stats
[job_call_nr
]++;
439 error
= (*call_vec
[job_call_nr
])();
443 /* Copy the results back to the user and send reply. */
444 if (error
!= SUSPEND
) {
446 if ((fp
->fp_flags
& FP_SYS_PROC
)) {
449 if ((vmp
= find_vmnt(fp
->fp_endpoint
)) != NULL
)
450 vmp
->m_flags
&= ~VMNT_CALLBACK
;
453 if (deadlock_resolving
) {
454 if (fp
->fp_wtid
== dl_worker
.w_tid
)
455 deadlock_resolving
= 0;
458 reply(fp
->fp_endpoint
, error
);
465 /*===========================================================================*
466 * sef_local_startup *
467 *===========================================================================*/
468 static void sef_local_startup()
470 /* Register init callbacks. */
471 sef_setcb_init_fresh(sef_cb_init_fresh
);
472 sef_setcb_init_restart(sef_cb_init_fail
);
474 /* No live update support for now. */
476 /* Let SEF perform startup. */
480 /*===========================================================================*
481 * sef_cb_init_fresh *
482 *===========================================================================*/
483 static int sef_cb_init_fresh(int UNUSED(type
), sef_init_info_t
*info
)
485 /* Initialize the virtual file server. */
489 struct rprocpub rprocpub
[NR_BOOT_PROCS
];
496 /* Initialize proc endpoints to NONE */
497 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
498 rfp
->fp_endpoint
= NONE
;
499 rfp
->fp_pid
= PID_FREE
;
502 /* Initialize the process table with help of the process manager messages.
503 * Expect one message for each system process with its slot number and pid.
504 * When no more processes follow, the magic process number NONE is sent.
505 * Then, stop and synchronize with the PM.
508 if ((s
= sef_receive(PM_PROC_NR
, &mess
)) != OK
)
509 panic("VFS: couldn't receive from PM: %d", s
);
511 if (mess
.m_type
!= PM_INIT
)
512 panic("unexpected message from PM: %d", mess
.m_type
);
514 if (NONE
== mess
.PM_PROC
) break;
516 rfp
= &fproc
[mess
.PM_SLOT
];
517 rfp
->fp_flags
= FP_NOFLAGS
;
518 rfp
->fp_pid
= mess
.PM_PID
;
519 rfp
->fp_endpoint
= mess
.PM_PROC
;
520 rfp
->fp_grant
= GRANT_INVALID
;
521 rfp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
;
522 rfp
->fp_realuid
= (uid_t
) SYS_UID
;
523 rfp
->fp_effuid
= (uid_t
) SYS_UID
;
524 rfp
->fp_realgid
= (gid_t
) SYS_GID
;
525 rfp
->fp_effgid
= (gid_t
) SYS_GID
;
527 } while (TRUE
); /* continue until process NONE */
528 mess
.m_type
= OK
; /* tell PM that we succeeded */
529 s
= send(PM_PROC_NR
, &mess
); /* send synchronization message */
531 /* All process table entries have been set. Continue with initialization. */
532 fp
= &fproc
[_ENDPOINT_P(VFS_PROC_NR
)];/* During init all communication with
533 * FSes is on behalf of myself */
534 init_dmap(); /* Initialize device table. */
535 system_hz
= sys_hz();
537 /* Map all the services in the boot image. */
538 if ((s
= sys_safecopyfrom(RS_PROC_NR
, info
->rproctab_gid
, 0,
539 (vir_bytes
) rprocpub
, sizeof(rprocpub
))) != OK
){
540 panic("sys_safecopyfrom failed: %d", s
);
542 for (i
= 0; i
< NR_BOOT_PROCS
; i
++) {
543 if (rprocpub
[i
].in_use
) {
544 if ((s
= map_service(&rprocpub
[i
])) != OK
) {
545 panic("VFS: unable to map service: %d", s
);
550 /* Subscribe to block and character driver events. */
551 s
= ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL
| DSF_OVERWRITE
);
552 if (s
!= OK
) panic("VFS: can't subscribe to driver events (%d)", s
);
554 /* Initialize worker threads */
555 for (i
= 0; i
< NR_WTHREADS
; i
++) {
556 worker_init(&workers
[i
]);
558 worker_init(&sys_worker
); /* exclusive system worker thread */
559 worker_init(&dl_worker
); /* exclusive worker thread to resolve deadlocks */
561 /* Initialize global locks */
562 if (mthread_mutex_init(&pm_lock
, NULL
) != 0)
563 panic("VFS: couldn't initialize pm lock mutex");
564 if (mthread_mutex_init(&exec_lock
, NULL
) != 0)
565 panic("VFS: couldn't initialize exec lock");
566 if (mthread_mutex_init(&bsf_lock
, NULL
) != 0)
567 panic("VFS: couldn't initialize block special file lock");
569 /* Initialize event resources for boot procs and locks for all procs */
570 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
571 if (mutex_init(&rfp
->fp_lock
, NULL
) != 0)
572 panic("unable to initialize fproc lock");
574 rfp
->fp_vp_rdlocks
= 0;
575 rfp
->fp_vmnt_rdlocks
= 0;
579 init_vnodes(); /* init vnodes */
580 init_vmnts(); /* init vmnt structures */
581 init_select(); /* init select() structures */
582 init_filps(); /* Init filp structures */
583 mount_pfs(); /* mount Pipe File Server */
584 worker_start(do_init_root
); /* mount initial ramdisk as file system root */
585 yield(); /* force do_init_root to start */
591 /*===========================================================================*
593 *===========================================================================*/
594 static void *do_init_root(void *arg
)
599 char *mount_label
= "fs_imgrd"; /* FIXME: obtain this from RS */
601 my_job
= *((struct job
*) arg
);
604 lock_proc(fp
, 1 /* force lock */); /* This proc is busy */
607 /* Initialize process directories. mount_fs will set them to the correct
609 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
610 FD_ZERO(&(rfp
->fp_filp_inuse
));
615 receive_from
= MFS_PROC_NR
;
616 if ((r
= mount_fs(DEV_IMGRD
, "/", MFS_PROC_NR
, 0, mount_label
)) != OK
)
617 panic("Failed to initialize root");
625 /*===========================================================================*
627 *===========================================================================*/
628 void lock_proc(struct fproc
*rfp
, int force_lock
)
631 struct fproc
*org_fp
;
632 struct worker_thread
*org_self
;
634 r
= mutex_trylock(&rfp
->fp_lock
);
636 /* Were we supposed to obtain this lock immediately? */
647 if ((r
= mutex_lock(&rfp
->fp_lock
)) != 0)
648 panic("unable to lock fproc lock: %d", r
);
654 /*===========================================================================*
656 *===========================================================================*/
657 void unlock_proc(struct fproc
*rfp
)
661 if ((r
= mutex_unlock(&rfp
->fp_lock
)) != 0)
662 panic("Failed to unlock: %d", r
);
665 /*===========================================================================*
667 *===========================================================================*/
668 static void thread_cleanup(struct fproc
*rfp
)
670 /* Clean up worker thread. Skip parts if this thread is not associated
671 * with a particular process (i.e., rfp is NULL) */
675 check_filp_locks_by_me();
676 check_vnode_locks_by_me(rfp
);
677 check_vmnt_locks_by_me(rfp
);
681 if (rfp
!= NULL
&& rfp
->fp_flags
& FP_PM_PENDING
) { /* Postponed PM call */
682 job_m_in
= rfp
->fp_job
.j_m_in
;
683 rfp
->fp_flags
&= ~FP_PM_PENDING
;
684 service_pm_postponed();
689 check_filp_locks_by_me();
690 check_vnode_locks_by_me(rfp
);
691 check_vmnt_locks_by_me(rfp
);
696 rfp
->fp_flags
&= ~FP_DROP_WORK
;
701 /*===========================================================================*
703 *===========================================================================*/
704 static void get_work()
706 /* Normally wait for new input. However, if 'reviving' is
707 * nonzero, a suspended process must be awakened.
709 int r
, found_one
, proc_p
;
710 register struct fproc
*rp
;
712 while (reviving
!= 0) {
715 /* Find a suspended process. */
716 for (rp
= &fproc
[0]; rp
< &fproc
[NR_PROCS
]; rp
++)
717 if (rp
->fp_pid
!= PID_FREE
&& (rp
->fp_flags
& FP_REVIVED
)) {
718 found_one
= TRUE
; /* Found a suspended process */
720 return; /* So main loop can process job */
724 if (!found_one
) /* Consistency error */
725 panic("VFS: get_work couldn't revive anyone");
729 /* Normal case. No one to revive. Get a useful request. */
730 if ((r
= sef_receive(receive_from
, &m_in
)) != OK
) {
731 panic("VFS: sef_receive error: %d", r
);
734 proc_p
= _ENDPOINT_P(m_in
.m_source
);
735 if (proc_p
< 0 || proc_p
>= NR_PROCS
) fp
= NULL
;
736 else fp
= &fproc
[proc_p
];
738 if (m_in
.m_type
== EDEADSRCDST
) return; /* Failed 'sendrec' */
740 /* Negative who_p is never used to access the fproc array. Negative
741 * numbers (kernel tasks) are treated in a special way.
743 if (who_p
>= (int)(sizeof(fproc
) / sizeof(struct fproc
)))
744 panic("receive process out of range: %d", who_p
);
745 if (who_p
>= 0 && fproc
[who_p
].fp_endpoint
== NONE
) {
746 printf("VFS: ignoring request from %d: NONE endpoint %d (%d)\n",
747 m_in
.m_source
, who_p
, m_in
.m_type
);
751 /* Internal consistency check; our mental image of process numbers and
752 * endpoints must match with how the rest of the system thinks of them.
754 if (who_p
>= 0 && fproc
[who_p
].fp_endpoint
!= who_e
) {
755 if (fproc
[who_p
].fp_endpoint
== NONE
)
756 printf("slot unknown even\n");
758 printf("VFS: receive endpoint inconsistent (source %d, who_p "
759 "%d, stored ep %d, who_e %d).\n", m_in
.m_source
, who_p
,
760 fproc
[who_p
].fp_endpoint
, who_e
);
761 panic("VFS: inconsistent endpoint ");
768 /*===========================================================================*
770 *===========================================================================*/
771 void reply(endpoint_t whom
, int result
)
773 /* Send a reply to a user process. If the send fails, just ignore it. */
776 m_out
.reply_type
= result
;
777 r
= sendnb(whom
, &m_out
);
779 printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(),
785 /*===========================================================================*
786 * service_pm_postponed *
787 *===========================================================================*/
788 static void service_pm_postponed(void)
793 switch(job_call_nr
) {
797 vir_bytes exec_path
, stack_frame
;
798 size_t exec_path_len
, stack_frame_len
;
800 proc_e
= job_m_in
.PM_PROC
;
801 exec_path
= (vir_bytes
) job_m_in
.PM_PATH
;
802 exec_path_len
= (size_t) job_m_in
.PM_PATH_LEN
;
803 stack_frame
= (vir_bytes
) job_m_in
.PM_FRAME
;
804 stack_frame_len
= (size_t) job_m_in
.PM_FRAME_LEN
;
806 r
= pm_exec(proc_e
, exec_path
, exec_path_len
, stack_frame
,
807 stack_frame_len
, &pc
, &newsp
, job_m_in
.PM_EXECFLAGS
);
809 /* Reply status to PM */
810 m_out
.m_type
= PM_EXEC_REPLY
;
811 m_out
.PM_PROC
= proc_e
;
812 m_out
.PM_PC
= (void*) pc
;
814 m_out
.PM_NEWSP
= (void *) newsp
;
821 proc_e
= job_m_in
.PM_PROC
;
825 /* Reply dummy status to PM for synchronization */
826 m_out
.m_type
= PM_EXIT_REPLY
;
827 m_out
.PM_PROC
= proc_e
;
833 endpoint_t proc_e
, traced_proc_e
;
837 proc_e
= job_m_in
.PM_PROC
;
838 traced_proc_e
= job_m_in
.PM_TRACED_PROC
;
839 if(job_m_in
.PM_PROC
!= job_m_in
.PM_TRACED_PROC
) {
840 /* dumpcore request */
843 /* dumpcore on exit */
844 term_signal
= job_m_in
.PM_TERM_SIG
;
846 core_path
= (vir_bytes
) job_m_in
.PM_PATH
;
848 r
= pm_dumpcore(proc_e
, term_signal
, core_path
);
850 /* Reply status to PM */
851 m_out
.m_type
= PM_CORE_REPLY
;
852 m_out
.PM_PROC
= proc_e
;
853 m_out
.PM_TRACED_PROC
= traced_proc_e
;
859 panic("Unhandled postponed PM call %d", job_m_in
.m_type
);
862 r
= send(PM_PROC_NR
, &m_out
);
864 panic("service_pm_postponed: send failed: %d", r
);
867 /*===========================================================================*
869 *===========================================================================*/
870 static void service_pm()
874 switch (job_call_nr
) {
880 proc_e
= job_m_in
.PM_PROC
;
881 euid
= job_m_in
.PM_EID
;
882 ruid
= job_m_in
.PM_RID
;
884 pm_setuid(proc_e
, euid
, ruid
);
886 m_out
.m_type
= PM_SETUID_REPLY
;
887 m_out
.PM_PROC
= proc_e
;
896 proc_e
= job_m_in
.PM_PROC
;
897 egid
= job_m_in
.PM_EID
;
898 rgid
= job_m_in
.PM_RID
;
900 pm_setgid(proc_e
, egid
, rgid
);
902 m_out
.m_type
= PM_SETGID_REPLY
;
903 m_out
.PM_PROC
= proc_e
;
911 proc_e
= job_m_in
.PM_PROC
;
914 m_out
.m_type
= PM_SETSID_REPLY
;
915 m_out
.PM_PROC
= proc_e
;
923 endpoint_t proc_e
= job_m_in
.PM_PROC
;
925 if(isokendpt(proc_e
, &slot
) != OK
) {
926 printf("VFS: proc ep %d not ok\n", proc_e
);
932 if (fp
->fp_flags
& FP_PENDING
) {
933 /* This process has a request pending, but PM wants it
934 * gone. Forget about the pending request and satisfy
935 * PM's request instead. Note that a pending request
936 * AND an EXEC request are mutually exclusive. Also, PM
937 * should send only one request/process at a time.
939 assert(fp
->fp_job
.j_m_in
.m_source
!= PM_PROC_NR
);
942 /* PM requests on behalf of a proc are handled after the
943 * system call that might be in progress for that proc has
944 * finished. If the proc is not busy, we start a dummy call.
946 if (!(fp
->fp_flags
& FP_PENDING
) &&
947 mutex_trylock(&fp
->fp_lock
) == 0) {
948 mutex_unlock(&fp
->fp_lock
);
949 worker_start(do_dummy
);
950 fp
->fp_flags
|= FP_DROP_WORK
;
953 fp
->fp_job
.j_m_in
= job_m_in
;
954 fp
->fp_flags
|= FP_PM_PENDING
;
961 endpoint_t pproc_e
, proc_e
;
966 pproc_e
= job_m_in
.PM_PPROC
;
967 proc_e
= job_m_in
.PM_PROC
;
968 child_pid
= job_m_in
.PM_CPID
;
969 reuid
= job_m_in
.PM_REUID
;
970 regid
= job_m_in
.PM_REGID
;
972 pm_fork(pproc_e
, proc_e
, child_pid
);
973 m_out
.m_type
= PM_FORK_REPLY
;
975 if (job_call_nr
== PM_SRV_FORK
) {
976 m_out
.m_type
= PM_SRV_FORK_REPLY
;
977 pm_setuid(proc_e
, reuid
, reuid
);
978 pm_setgid(proc_e
, regid
, regid
);
981 m_out
.PM_PROC
= proc_e
;
990 proc_e
= job_m_in
.PM_PROC
;
991 group_no
= job_m_in
.PM_GROUP_NO
;
992 group_addr
= (gid_t
*) job_m_in
.PM_GROUP_ADDR
;
994 pm_setgroups(proc_e
, group_no
, group_addr
);
996 m_out
.m_type
= PM_SETGROUPS_REPLY
;
997 m_out
.PM_PROC
= proc_e
;
1005 proc_e
= job_m_in
.PM_PROC
;
1009 m_out
.m_type
= PM_UNPAUSE_REPLY
;
1010 m_out
.PM_PROC
= proc_e
;
1017 /* Reply dummy status to PM for synchronization */
1018 m_out
.m_type
= PM_REBOOT_REPLY
;
1023 printf("VFS: don't know how to handle PM request %d\n", job_call_nr
);
1028 r
= send(PM_PROC_NR
, &m_out
);
1030 panic("service_pm: send failed: %d", r
);
1035 /*===========================================================================*
1037 *===========================================================================*/
1038 static int unblock(rfp
)
1044 blocked_on
= rfp
->fp_blocked_on
;
1045 m_in
.m_source
= rfp
->fp_endpoint
;
1046 m_in
.m_type
= rfp
->fp_block_callnr
;
1047 m_in
.fd
= scratch(fp
).file
.fd_nr
;
1048 m_in
.buffer
= scratch(fp
).io
.io_buffer
;
1049 m_in
.nbytes
= scratch(fp
).io
.io_nbytes
;
1051 rfp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
; /* no longer blocked */
1052 rfp
->fp_flags
&= ~FP_REVIVED
;
1054 assert(reviving
>= 0);
1056 /* This should be a pipe I/O, not a device I/O. If it is, it'll 'leak'
1059 assert(!GRANT_VALID(rfp
->fp_grant
));
1061 /* Pending pipe reads/writes can be handled directly */
1062 if (blocked_on
== FP_BLOCKED_ON_PIPE
) {
1063 worker_start(do_pending_pipe
);
1064 yield(); /* Give thread a chance to run */
1066 return(0); /* Retrieve more work */
1069 return(1); /* We've unblocked a process */