2 * a loop that gets messages requesting work, carries out the work, and sends
5 * The entry points into this file are:
6 * main: main program of the Virtual File System
7 * reply: send a reply to a process after the requested work is done
10 * Jul 2006 (Balazs Gerofi)
20 #include <sys/ioc_memory.h>
21 #include <sys/svrctl.h>
22 #include <sys/select.h>
23 #include <minix/callnr.h>
24 #include <minix/com.h>
25 #include <minix/keymap.h>
26 #include <minix/const.h>
27 #include <minix/endpoint.h>
28 #include <minix/safecopies.h>
29 #include <minix/debug.h>
34 #include <minix/vfsif.h>
38 #if ENABLE_SYSCALL_STATS
39 EXTERN
unsigned long calls_stats
[NCALLS
];
42 FORWARD
_PROTOTYPE( void get_work
, (void) );
43 FORWARD
_PROTOTYPE( void init_root
, (void) );
44 FORWARD
_PROTOTYPE( void service_pm
, (void) );
46 /* SEF functions and variables. */
47 FORWARD
_PROTOTYPE( void sef_local_startup
, (void) );
48 FORWARD
_PROTOTYPE( int sef_cb_init_fresh
, (int type
, sef_init_info_t
*info
) );
50 /*===========================================================================*
52 *===========================================================================*/
55 /* This is the main program of the file system. The main loop consists of
56 * three major activities: getting new work, processing the work, and sending
57 * the reply. This loop never terminates as long as the file system runs.
61 /* SEF local startup. */
64 /* This is the main loop that gets work, processes it, and sends replies. */
67 get_work(); /* sets who and call_nr */
69 if (call_nr
== DEV_REVIVE
)
73 endpt
= m_in
.REP_ENDPT
;
74 if(endpt
== FS_PROC_NR
) {
75 endpt
= suspended_ep(m_in
.m_source
, m_in
.REP_IO_GRANT
);
77 printf("FS: proc with "
78 "grant %d from %d not found (revive)\n",
79 m_in
.REP_IO_GRANT
, m_in
.m_source
);
83 revive(endpt
, m_in
.REP_STATUS
);
86 if (call_nr
== DEV_REOPEN_REPL
)
91 if (call_nr
== DEV_CLOSE_REPL
)
96 if (call_nr
== DEV_SEL_REPL1
)
101 if (call_nr
== DEV_SEL_REPL2
)
107 /* Check for special control messages first. */
108 if (is_notify(call_nr
)) {
111 /* Alarm timer expired. Used only for select().
114 fs_expire_timers(m_in
.NOTIFY_TIMESTAMP
);
116 else if(who_e
== DS_PROC_NR
)
118 /* DS notifies us of an event. */
123 /* Device notifies us of an event. */
130 /* We only expect notify()s from tasks. */
132 printf("FS: ignoring message from %d (%d)\n",
137 /* Now it's safe to set and check fp. */
138 fp
= &fproc
[who_p
]; /* pointer to proc table struct */
139 super_user
= (fp
->fp_effuid
== SU_UID
? TRUE
: FALSE
); /* su? */
142 if(fp_is_blocked(fp
)) {
143 printf("VFS: requester %d call %d: suspended\n",
145 panic("requester suspended");
150 if (who_e
== PM_PROC_NR
) {
162 error
= do_mapdriver();
163 if (error
!= SUSPEND
) reply(who_e
, error
);
167 /* Call the internal function that does the work. */
168 if (call_nr
< 0 || call_nr
>= NCALLS
) {
170 /* Not supposed to happen. */
171 printf("VFS: illegal %d system call by %d\n",
173 } else if (fp
->fp_pid
== PID_FREE
) {
176 "FS, bad process, who = %d, call_nr = %d, endpt1 = %d\n",
177 who_e
, call_nr
, m_in
.endpt1
);
179 #if ENABLE_SYSCALL_STATS
180 calls_stats
[call_nr
]++;
183 error
= (*call_vec
[call_nr
])();
187 /* Copy the results back to the user and send reply. */
188 if (error
!= SUSPEND
) { reply(who_e
, error
); }
192 return(OK
); /* shouldn't come here */
195 /*===========================================================================*
196 * sef_local_startup *
197 *===========================================================================*/
198 PRIVATE
void sef_local_startup()
200 /* Register init callbacks. */
201 sef_setcb_init_fresh(sef_cb_init_fresh
);
202 sef_setcb_init_restart(sef_cb_init_fail
);
204 /* No live update support for now. */
206 /* Let SEF perform startup. */
210 /*===========================================================================*
211 * sef_cb_init_fresh *
212 *===========================================================================*/
213 PRIVATE
int sef_cb_init_fresh(int type
, sef_init_info_t
*info
)
215 /* Initialize the virtual file server. */
217 register struct fproc
*rfp
;
219 struct vnode
*root_vp
;
221 struct rprocpub rprocpub
[NR_BOOT_PROCS
];
223 /* Clear endpoint field */
224 last_login_fs_e
= NONE
;
225 mount_m_in
.m1_p3
= (char *) NONE
;
227 /* Initialize the process table with help of the process manager messages.
228 * Expect one message for each system process with its slot number and pid.
229 * When no more processes follow, the magic process number NONE is sent.
230 * Then, stop and synchronize with the PM.
233 if (OK
!= (s
=sef_receive(PM_PROC_NR
, &mess
)))
234 panic("FS couldn't receive from PM: %d", s
);
236 if (mess
.m_type
!= PM_INIT
)
237 panic("unexpected message from PM: %d", mess
.m_type
);
239 if (NONE
== mess
.PM_PROC
) break;
241 rfp
= &fproc
[mess
.PM_SLOT
];
242 rfp
->fp_pid
= mess
.PM_PID
;
243 rfp
->fp_endpoint
= mess
.PM_PROC
;
244 rfp
->fp_realuid
= (uid_t
) SYS_UID
;
245 rfp
->fp_effuid
= (uid_t
) SYS_UID
;
246 rfp
->fp_realgid
= (gid_t
) SYS_GID
;
247 rfp
->fp_effgid
= (gid_t
) SYS_GID
;
249 rfp
->fp_grant
= GRANT_INVALID
;
250 rfp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
;
251 rfp
->fp_revived
= NOT_REVIVING
;
253 } while (TRUE
); /* continue until process NONE */
254 mess
.m_type
= OK
; /* tell PM that we succeeded */
255 s
= send(PM_PROC_NR
, &mess
); /* send synchronization message */
257 /* All process table entries have been set. Continue with FS initialization.
258 * Certain relations must hold for the file system to work at all. Some
259 * extra block_size requirements are checked at super-block-read-in time.
261 if (OPEN_MAX
> 127) panic("OPEN_MAX > 127");
263 /* The following initializations are needed to let dev_opcl succeed .*/
264 fp
= (struct fproc
*) NULL
;
265 who_e
= who_p
= FS_PROC_NR
;
267 /* Initialize device table. */
270 /* Map all the services in the boot image. */
271 if((s
= sys_safecopyfrom(RS_PROC_NR
, info
->rproctab_gid
, 0,
272 (vir_bytes
) rprocpub
, sizeof(rprocpub
), S
)) != OK
) {
273 panic("sys_safecopyfrom failed: %d", s
);
275 for(i
=0;i
< NR_BOOT_PROCS
;i
++) {
276 if(rprocpub
[i
].in_use
) {
277 if((s
= map_service(&rprocpub
[i
])) != OK
) {
278 panic("unable to map service: %d", s
);
283 init_root(); /* init root device and load super block */
284 init_select(); /* init select() structures */
287 vmp
= &vmnt
[0]; /* Should be the root filesystem */
288 if (vmp
->m_dev
== NO_DEV
)
289 panic("vfs: no root filesystem");
290 root_vp
= vmp
->m_root_node
;
292 /* The root device can now be accessed; set process directories. */
293 for (rfp
=&fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
294 FD_ZERO(&(rfp
->fp_filp_inuse
));
295 if (rfp
->fp_pid
!= PID_FREE
) {
298 rfp
->fp_rd
= root_vp
;
300 rfp
->fp_wd
= root_vp
;
302 } else rfp
->fp_endpoint
= NONE
;
305 system_hz
= sys_hz();
307 /* Subscribe to driver events for VFS drivers. */
308 s
= ds_subscribe("drv\\.vfs\\..*", DSF_INITIAL
| DSF_OVERWRITE
);
310 panic("vfs: can't subscribe to driver events");
316 FIXME("VFS: DO_SANITYCHECKS is on");
322 /*===========================================================================*
324 *===========================================================================*/
325 PRIVATE
void get_work()
327 /* Normally wait for new input. However, if 'reviving' is
328 * nonzero, a suspended process must be awakened.
330 int r
, found_one
, fd_nr
;
332 register struct fproc
*rp
;
334 while (reviving
!= 0) {
337 /* Revive a suspended process. */
338 for (rp
= &fproc
[0]; rp
< &fproc
[NR_PROCS
]; rp
++)
339 if (rp
->fp_pid
!= PID_FREE
&& rp
->fp_revived
== REVIVING
) {
340 int blocked_on
= rp
->fp_blocked_on
;
342 who_p
= (int)(rp
- fproc
);
343 who_e
= rp
->fp_endpoint
;
344 call_nr
= rp
->fp_fd
& BYTE
;
346 m_in
.fd
= (rp
->fp_fd
>>8) & BYTE
;
347 m_in
.buffer
= rp
->fp_buffer
;
348 m_in
.nbytes
= rp
->fp_nbytes
;
349 /*no longer hanging*/
350 rp
->fp_blocked_on
= FP_BLOCKED_ON_NONE
;
351 rp
->fp_revived
= NOT_REVIVING
;
353 /* This should be a pipe I/O, not a device I/O.
354 * If it is, it'll 'leak' grants.
356 assert(!GRANT_VALID(rp
->fp_grant
));
358 if (blocked_on
== FP_BLOCKED_ON_PIPE
)
361 fd_nr
= (rp
->fp_fd
>> 8);
364 r
= rw_pipe((call_nr
== READ
) ? READING
:
365 WRITING
, who_e
, fd_nr
, f
,
366 rp
->fp_buffer
, rp
->fp_nbytes
);
375 panic("get_work couldn't revive anyone");
380 /* Normal case. No one to revive. */
381 if ((r
=sef_receive(ANY
, &m_in
)) != OK
)
382 panic("fs sef_receive error: %d", r
);
383 who_e
= m_in
.m_source
;
384 who_p
= _ENDPOINT_P(who_e
);
387 * negative who_p is never used to access the fproc array. Negative numbers
388 * (kernel tasks) are treated in a special way
390 if(who_p
>= (int)(sizeof(fproc
) / sizeof(struct fproc
)))
391 panic("receive process out of range: %d", who_p
);
392 if(who_p
>= 0 && fproc
[who_p
].fp_endpoint
== NONE
) {
393 printf("FS: ignoring request from %d, endpointless slot %d (%d)\n",
394 m_in
.m_source
, who_p
, m_in
.m_type
);
397 if(who_p
>= 0 && fproc
[who_p
].fp_endpoint
!= who_e
) {
398 if(fproc
[who_p
].fp_endpoint
== NONE
) {
399 printf("slot unknown even\n");
401 printf("FS: receive endpoint inconsistent (source %d, who_p %d, stored ep %d, who_e %d).\n",
402 m_in
.m_source
, who_p
, fproc
[who_p
].fp_endpoint
, who_e
);
404 panic("FS: inconsistent endpoint ");
408 call_nr
= m_in
.m_type
;
414 /*===========================================================================*
416 *===========================================================================*/
417 PUBLIC
void reply(whom
, result
)
418 int whom
; /* process to reply to */
419 int result
; /* result of the call (usually OK or error #) */
421 /* Send a reply to a user process. If the send fails, just ignore it. */
425 if (call_nr
== SYMLINK
)
426 printf("vfs:reply: replying %d for call %d\n", result
, call_nr
);
429 m_out
.reply_type
= result
;
430 s
= sendnb(whom
, &m_out
);
431 if (s
!= OK
) printf("VFS: couldn't send reply %d to %d: %d\n",
435 /*===========================================================================*
437 *===========================================================================*/
438 PRIVATE
void init_root()
442 struct vnode
*root_node
;
446 struct node_details res
;
448 /* Open the root device. */
449 root_dev
= DEV_IMGRD
;
450 ROOT_FS_E
= MFS_PROC_NR
;
452 /* Wait FS login message */
453 if (last_login_fs_e
!= ROOT_FS_E
) {
454 /* Wait FS login message */
455 if (sef_receive(ROOT_FS_E
, &m
) != OK
) {
456 printf("VFS: Error receiving login request from FS_e %d\n",
458 panic("Error receiving login request from root filesystem: %d", ROOT_FS_E
);
460 if (m
.m_type
!= FS_READY
) {
461 printf("VFS: Invalid login request from FS_e %d\n",
463 panic("Error receiving login request from root filesystem: %d", ROOT_FS_E
);
466 last_login_fs_e
= NONE
;
468 /* Initialize vmnt table */
469 for (vmp
= &vmnt
[0]; vmp
< &vmnt
[NR_MNTS
]; ++vmp
)
474 /* We'll need a vnode for the root inode, check whether there is one */
475 if ((root_node
= get_free_vnode()) == NIL_VNODE
)
476 panic("Cannot get free vnode: %d", r
);
479 /* Get driver process' endpoint */
480 dp
= &dmap
[(root_dev
>> MAJOR
) & BYTE
];
481 if (dp
->dmap_driver
== NONE
) {
482 panic("No driver for root device: %d", r
);
485 label
= dp
->dmap_label
;
486 if (strlen(label
) == 0)
488 panic("vfs:init_root: no label for major: %d", root_dev
>> MAJOR
);
492 r
= req_readsuper(ROOT_FS_E
, label
, root_dev
, 0 /*!readonly*/,
495 panic("Cannot read superblock from root: %d", r
);
498 /* Fill in root node's fields */
499 root_node
->v_fs_e
= res
.fs_e
;
500 root_node
->v_inode_nr
= res
.inode_nr
;
501 root_node
->v_mode
= res
.fmode
;
502 root_node
->v_size
= res
.fsize
;
503 root_node
->v_sdev
= NO_DEV
;
504 root_node
->v_fs_count
= 1;
505 root_node
->v_ref_count
= 1;
507 /* Fill in max file size and blocksize for the vmnt */
508 vmp
->m_fs_e
= res
.fs_e
;
509 vmp
->m_dev
= root_dev
;
512 /* Root node is indeed on the partition */
513 root_node
->v_vmnt
= vmp
;
514 root_node
->v_dev
= vmp
->m_dev
;
516 /* Root directory is not mounted on a vnode. */
517 vmp
->m_mounted_on
= NULL
;
518 vmp
->m_root_node
= root_node
;
519 strcpy(vmp
->m_label
, "fs_imgrd"); /* FIXME: obtain this from RS */
522 /*===========================================================================*
524 *===========================================================================*/
525 PRIVATE
void service_pm()
531 pm_setuid(m_in
.PM_PROC
, m_in
.PM_EID
, m_in
.PM_RID
);
533 m_out
.m_type
= PM_SETUID_REPLY
;
534 m_out
.PM_PROC
= m_in
.PM_PROC
;
539 pm_setgid(m_in
.PM_PROC
, m_in
.PM_EID
, m_in
.PM_RID
);
541 m_out
.m_type
= PM_SETGID_REPLY
;
542 m_out
.PM_PROC
= m_in
.PM_PROC
;
547 pm_setsid(m_in
.PM_PROC
);
549 m_out
.m_type
= PM_SETSID_REPLY
;
550 m_out
.PM_PROC
= m_in
.PM_PROC
;
555 r
= pm_exec(m_in
.PM_PROC
, m_in
.PM_PATH
, m_in
.PM_PATH_LEN
,
556 m_in
.PM_FRAME
, m_in
.PM_FRAME_LEN
);
558 /* Reply status to PM */
559 m_out
.m_type
= PM_EXEC_REPLY
;
560 m_out
.PM_PROC
= m_in
.PM_PROC
;
566 pm_exit(m_in
.PM_PROC
);
568 /* Reply dummy status to PM for synchronization */
569 m_out
.m_type
= PM_EXIT_REPLY
;
570 m_out
.PM_PROC
= m_in
.PM_PROC
;
575 r
= pm_dumpcore(m_in
.PM_PROC
,
576 NULL
/* (struct mem_map *) m_in.PM_SEGPTR */);
578 /* Reply status to PM */
579 m_out
.m_type
= PM_CORE_REPLY
;
580 m_out
.PM_PROC
= m_in
.PM_PROC
;
587 pm_fork(m_in
.PM_PPROC
, m_in
.PM_PROC
, m_in
.PM_CPID
);
589 m_out
.m_type
= (call_nr
== PM_FORK
) ? PM_FORK_REPLY
: PM_SRV_FORK_REPLY
;
590 m_out
.PM_PROC
= m_in
.PM_PROC
;
594 pm_setgroups(m_in
.PM_PROC
, m_in
.PM_GROUP_NO
, m_in
.PM_GROUP_ADDR
);
596 m_out
.m_type
= PM_SETGROUPS_REPLY
;
597 m_out
.PM_PROC
= m_in
.PM_PROC
;
602 unpause(m_in
.PM_PROC
);
604 m_out
.m_type
= PM_UNPAUSE_REPLY
;
605 m_out
.PM_PROC
= m_in
.PM_PROC
;
612 /* Reply dummy status to PM for synchronization */
613 m_out
.m_type
= PM_REBOOT_REPLY
;
618 printf("VFS: don't know how to handle PM request %x\n", call_nr
);
623 r
= send(PM_PROC_NR
, &m_out
);
625 panic("service_pm: send failed: %d", r
);