make vfs & filesystems use failable copying
[minix3.git] / servers / vfs / misc.c
blobd9548cbda17089c5e36844cde7a5ec068b1da70b
1 /* This file contains a collection of miscellaneous procedures. Some of them
2 * perform simple system calls. Some others do a little part of system calls
3 * that are mostly performed by the Memory Manager.
5 * The entry points into this file are
6 * do_fcntl: perform the FCNTL system call
7 * do_sync: perform the SYNC system call
8 * do_fsync: perform the FSYNC system call
9 * pm_setsid: perform VFS's side of setsid system call
10 * pm_reboot: sync disks and prepare for shutdown
11 * pm_fork: adjust the tables after PM has performed a FORK system call
12 * do_exec: handle files with FD_CLOEXEC on after PM has done an EXEC
13 * do_exit: a process has exited; note that in the tables
14 * do_set: set uid or gid for some process
15 * do_revive: revive a process that was waiting for something (e.g. TTY)
16 * do_svrctl: file system control
17 * do_getsysinfo: request copy of FS data structure
18 * pm_dumpcore: create a core dump
21 #include "fs.h"
22 #include <fcntl.h>
23 #include <assert.h>
24 #include <unistd.h>
25 #include <string.h>
26 #include <minix/callnr.h>
27 #include <minix/safecopies.h>
28 #include <minix/endpoint.h>
29 #include <minix/com.h>
30 #include <minix/sysinfo.h>
31 #include <minix/u64.h>
32 #include <sys/ptrace.h>
33 #include <sys/svrctl.h>
34 #include <sys/resource.h>
35 #include "file.h"
36 #include "scratchpad.h"
37 #include <minix/vfsif.h>
38 #include "vnode.h"
39 #include "vmnt.h"
41 #define CORE_NAME "core"
42 #define CORE_MODE 0777 /* mode to use on core image files */
44 #if ENABLE_SYSCALL_STATS
45 unsigned long calls_stats[NR_VFS_CALLS];
46 #endif
48 static void free_proc(int flags);
50 /*===========================================================================*
51 * do_getsysinfo *
52 *===========================================================================*/
53 int do_getsysinfo(void)
55 vir_bytes src_addr, dst_addr;
56 size_t len, buf_size;
57 int what;
59 what = job_m_in.SI_WHAT;
60 dst_addr = (vir_bytes) job_m_in.SI_WHERE;
61 buf_size = (size_t) job_m_in.SI_SIZE;
63 /* Only su may call do_getsysinfo. This call may leak information (and is not
64 * stable enough to be part of the API/ABI). In the future, requests from
65 * non-system processes should be denied.
68 if (!super_user) return(EPERM);
70 switch(what) {
71 case SI_PROC_TAB:
72 src_addr = (vir_bytes) fproc;
73 len = sizeof(struct fproc) * NR_PROCS;
74 break;
75 case SI_DMAP_TAB:
76 src_addr = (vir_bytes) dmap;
77 len = sizeof(struct dmap) * NR_DEVICES;
78 break;
79 #if ENABLE_SYSCALL_STATS
80 case SI_CALL_STATS:
81 src_addr = (vir_bytes) calls_stats;
82 len = sizeof(calls_stats);
83 break;
84 #endif
85 default:
86 return(EINVAL);
89 if (len != buf_size)
90 return(EINVAL);
92 return sys_datacopy_wrapper(SELF, src_addr, who_e, dst_addr, len);
95 /*===========================================================================*
96 * do_fcntl *
97 *===========================================================================*/
98 int do_fcntl(void)
100 /* Perform the fcntl(fd, cmd, ...) system call. */
102 register struct filp *f;
103 int new_fd, fl, r = OK, fcntl_req, fcntl_argx;
104 tll_access_t locktype;
106 scratch(fp).file.fd_nr = job_m_in.VFS_FCNTL_FD;
107 scratch(fp).io.io_buffer = job_m_in.VFS_FCNTL_ARG_PTR;
108 scratch(fp).io.io_nbytes = job_m_in.VFS_FCNTL_CMD;
109 fcntl_req = job_m_in.VFS_FCNTL_CMD;
110 fcntl_argx = job_m_in.VFS_FCNTL_ARG_INT;
112 /* Is the file descriptor valid? */
113 locktype = (fcntl_req == F_FREESP) ? VNODE_WRITE : VNODE_READ;
114 if ((f = get_filp(scratch(fp).file.fd_nr, locktype)) == NULL)
115 return(err_code);
117 switch (fcntl_req) {
118 case F_DUPFD:
119 /* This replaces the old dup() system call. */
120 if (fcntl_argx < 0 || fcntl_argx >= OPEN_MAX) r = EINVAL;
121 else if ((r = get_fd(fp, fcntl_argx, 0, &new_fd, NULL)) == OK) {
122 f->filp_count++;
123 fp->fp_filp[new_fd] = f;
124 r = new_fd;
126 break;
128 case F_GETFD:
129 /* Get close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
130 r = 0;
131 if (FD_ISSET(scratch(fp).file.fd_nr, &fp->fp_cloexec_set))
132 r = FD_CLOEXEC;
133 break;
135 case F_SETFD:
136 /* Set close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
137 if (fcntl_argx & FD_CLOEXEC)
138 FD_SET(scratch(fp).file.fd_nr, &fp->fp_cloexec_set);
139 else
140 FD_CLR(scratch(fp).file.fd_nr, &fp->fp_cloexec_set);
141 break;
143 case F_GETFL:
144 /* Get file status flags (O_NONBLOCK and O_APPEND). */
145 fl = f->filp_flags & (O_NONBLOCK | O_APPEND | O_ACCMODE);
146 r = fl;
147 break;
149 case F_SETFL:
150 /* Set file status flags (O_NONBLOCK and O_APPEND). */
151 fl = O_NONBLOCK | O_APPEND;
152 f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl);
153 break;
155 case F_GETLK:
156 case F_SETLK:
157 case F_SETLKW:
158 /* Set or clear a file lock. */
159 r = lock_op(f, fcntl_req);
160 break;
162 case F_FREESP:
164 /* Free a section of a file */
165 off_t start, end, offset;
166 struct flock flock_arg;
168 /* Check if it's a regular file. */
169 if (!S_ISREG(f->filp_vno->v_mode)) r = EINVAL;
170 else if (!(f->filp_mode & W_BIT)) r = EBADF;
171 else {
172 /* Copy flock data from userspace. */
173 r = sys_datacopy_wrapper(who_e, (vir_bytes) scratch(fp).io.io_buffer,
174 SELF, (vir_bytes) &flock_arg, sizeof(flock_arg));
177 if (r != OK) break;
179 /* Convert starting offset to signed. */
180 offset = (off_t) flock_arg.l_start;
182 /* Figure out starting position base. */
183 switch(flock_arg.l_whence) {
184 case SEEK_SET: start = 0; break;
185 case SEEK_CUR: start = f->filp_pos; break;
186 case SEEK_END: start = f->filp_vno->v_size; break;
187 default: r = EINVAL;
189 if (r != OK) break;
191 /* Check for overflow or underflow. */
192 if (offset > 0 && start + offset < start) r = EINVAL;
193 else if (offset < 0 && start + offset > start) r = EINVAL;
194 else {
195 start += offset;
196 if (start < 0) r = EINVAL;
198 if (r != OK) break;
200 if (flock_arg.l_len != 0) {
201 if (start >= f->filp_vno->v_size) r = EINVAL;
202 else if ((end = start + flock_arg.l_len) <= start) r = EINVAL;
203 else if (end > f->filp_vno->v_size) end = f->filp_vno->v_size;
204 } else {
205 end = 0;
207 if (r != OK) break;
209 r = req_ftrunc(f->filp_vno->v_fs_e, f->filp_vno->v_inode_nr,start,end);
211 if (r == OK && flock_arg.l_len == 0)
212 f->filp_vno->v_size = start;
214 break;
216 case F_GETNOSIGPIPE:
217 /* POSIX: return value other than -1 is flag is set, else -1 */
218 r = -1;
219 if (f->filp_flags & O_NOSIGPIPE)
220 r = 0;
221 break;
222 case F_SETNOSIGPIPE:
223 fl = (O_NOSIGPIPE);
224 f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl);
225 break;
226 default:
227 r = EINVAL;
230 unlock_filp(f);
231 return(r);
234 /*===========================================================================*
235 * do_sync *
236 *===========================================================================*/
237 int do_sync(void)
239 struct vmnt *vmp;
240 int r = OK;
242 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
243 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
244 break;
245 if (vmp->m_dev != NO_DEV && vmp->m_fs_e != NONE &&
246 vmp->m_root_node != NULL) {
247 req_sync(vmp->m_fs_e);
249 unlock_vmnt(vmp);
252 return(r);
255 /*===========================================================================*
256 * do_fsync *
257 *===========================================================================*/
258 int do_fsync(void)
260 /* Perform the fsync() system call. */
261 struct filp *rfilp;
262 struct vmnt *vmp;
263 dev_t dev;
264 int r = OK;
266 scratch(fp).file.fd_nr = job_m_in.VFS_FSYNC_FD;
268 if ((rfilp = get_filp(scratch(fp).file.fd_nr, VNODE_READ)) == NULL)
269 return(err_code);
271 dev = rfilp->filp_vno->v_dev;
272 unlock_filp(rfilp);
274 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
275 if (vmp->m_dev != dev) continue;
276 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
277 break;
278 if (vmp->m_dev != NO_DEV && vmp->m_dev == dev &&
279 vmp->m_fs_e != NONE && vmp->m_root_node != NULL) {
281 req_sync(vmp->m_fs_e);
283 unlock_vmnt(vmp);
286 return(r);
289 int dupvm(struct fproc *rfp, int pfd, int *vmfd, struct filp **newfilp)
291 int result, procfd;
292 struct filp *f = NULL;
293 struct fproc *vmf = fproc_addr(VM_PROC_NR);
295 *newfilp = NULL;
297 if ((f = get_filp2(rfp, pfd, VNODE_READ)) == NULL) {
298 printf("VFS dupvm: get_filp2 failed\n");
299 return EBADF;
302 if(!(f->filp_vno->v_vmnt->m_fs_flags & RES_HASPEEK)) {
303 unlock_filp(f);
304 #if 0 /* Noisy diagnostic for mmap() by ld.so */
305 printf("VFS dupvm: no peek available\n");
306 #endif
307 return EINVAL;
310 assert(f->filp_vno);
311 assert(f->filp_vno->v_vmnt);
313 if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode)) {
314 printf("VFS: mmap regular/blockdev only; dev 0x%llx ino %llu has mode 0%o\n",
315 f->filp_vno->v_dev, f->filp_vno->v_inode_nr, f->filp_vno->v_mode);
316 unlock_filp(f);
317 return EINVAL;
320 /* get free FD in VM */
321 if((result=get_fd(vmf, 0, 0, &procfd, NULL)) != OK) {
322 unlock_filp(f);
323 printf("VFS dupvm: getfd failed\n");
324 return result;
327 *vmfd = procfd;
329 f->filp_count++;
330 assert(f->filp_count > 0);
331 vmf->fp_filp[procfd] = f;
333 *newfilp = f;
335 return OK;
338 /*===========================================================================*
339 * do_vm_call *
340 *===========================================================================*/
341 int do_vm_call(void)
343 /* A call that VM does to VFS.
344 * We must reply with the fixed type VM_VFS_REPLY (and put our result info
345 * in the rest of the message) so VM can tell the difference between a
346 * request from VFS and a reply to this call.
348 int req = job_m_in.VFS_VMCALL_REQ;
349 int req_fd = job_m_in.VFS_VMCALL_FD;
350 u32_t req_id = job_m_in.VFS_VMCALL_REQID;
351 endpoint_t ep = job_m_in.VFS_VMCALL_ENDPOINT;
352 u64_t offset = job_m_in.VFS_VMCALL_OFFSET;
353 u32_t length = job_m_in.VFS_VMCALL_LENGTH;
354 int result = OK;
355 int slot;
356 struct fproc *rfp, *vmf;
357 struct filp *f = NULL;
358 int r;
360 if(job_m_in.m_source != VM_PROC_NR)
361 return ENOSYS;
363 if(isokendpt(ep, &slot) != OK) rfp = NULL;
364 else rfp = &fproc[slot];
366 vmf = fproc_addr(VM_PROC_NR);
367 assert(fp == vmf);
368 assert(rfp != vmf);
370 switch(req) {
371 case VMVFSREQ_FDLOOKUP:
373 int procfd;
375 /* Lookup fd in referenced process. */
377 if(!rfp) {
378 printf("VFS: why isn't ep %d here?!\n", ep);
379 result = ESRCH;
380 goto reqdone;
383 if((result = dupvm(rfp, req_fd, &procfd, &f)) != OK) {
384 #if 0 /* Noisy diagnostic for mmap() by ld.so */
385 printf("vfs: dupvm failed\n");
386 #endif
387 goto reqdone;
390 if(S_ISBLK(f->filp_vno->v_mode)) {
391 assert(f->filp_vno->v_sdev != NO_DEV);
392 job_m_out.VMV_DEV = f->filp_vno->v_sdev;
393 job_m_out.VMV_INO = VMC_NO_INODE;
394 job_m_out.VMV_SIZE_PAGES = LONG_MAX;
395 } else {
396 job_m_out.VMV_DEV = f->filp_vno->v_dev;
397 job_m_out.VMV_INO = f->filp_vno->v_inode_nr;
398 job_m_out.VMV_SIZE_PAGES =
399 roundup(f->filp_vno->v_size,
400 PAGE_SIZE)/PAGE_SIZE;
403 job_m_out.VMV_FD = procfd;
405 result = OK;
407 break;
409 case VMVFSREQ_FDCLOSE:
411 result = close_fd(fp, req_fd);
412 if(result != OK) {
413 printf("VFS: VM fd close for fd %d, %d (%d)\n",
414 req_fd, fp->fp_endpoint, result);
416 break;
418 case VMVFSREQ_FDIO:
420 result = actual_lseek(fp, req_fd, SEEK_SET, offset,
421 NULL);
423 if(result == OK) {
424 result = actual_read_write_peek(fp, PEEKING,
425 req_fd, NULL, length);
428 break;
430 default:
431 panic("VFS: bad request code from VM\n");
432 break;
435 reqdone:
436 if(f)
437 unlock_filp(f);
439 /* fp is VM still. */
440 assert(fp == vmf);
441 job_m_out.VMV_ENDPOINT = ep;
442 job_m_out.VMV_RESULT = result;
443 job_m_out.VMV_REQID = req_id;
445 /* Reply asynchronously as VM may not be able to receive
446 * an ipc_sendnb() message.
448 job_m_out.m_type = VM_VFS_REPLY;
449 r = asynsend3(VM_PROC_NR, &job_m_out, 0);
450 if(r != OK) printf("VFS: couldn't asynsend3() to VM\n");
452 /* VFS does not reply any further */
453 return SUSPEND;
456 /*===========================================================================*
457 * pm_reboot *
458 *===========================================================================*/
459 void pm_reboot()
461 /* Perform the VFS side of the reboot call. This call is performed from the PM
462 * process context.
464 message m_out;
465 int i, r;
466 struct fproc *rfp, *pmfp;
468 pmfp = fp;
470 do_sync();
472 /* Do exit processing for all leftover processes and servers, but don't
473 * actually exit them (if they were really gone, PM will tell us about it).
474 * Skip processes that handle parts of the file system; we first need to give
475 * them the chance to unmount (which should be possible as all normal
476 * processes have no open files anymore).
478 /* This is the only place where we allow special modification of "fp". The
479 * reboot procedure should really be implemented as a PM message broadcasted
480 * to all processes, so that each process will be shut down cleanly by a
481 * thread operating on its behalf. Doing everything here is simpler, but it
482 * requires an exception to the strict model of having "fp" be the process
483 * that owns the current worker thread.
485 for (i = 0; i < NR_PROCS; i++) {
486 rfp = &fproc[i];
488 /* Don't just free the proc right away, but let it finish what it was
489 * doing first */
490 if (rfp != fp) lock_proc(rfp);
491 if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) {
492 worker_set_proc(rfp); /* temporarily fake process context */
493 free_proc(0);
494 worker_set_proc(pmfp); /* restore original process context */
496 if (rfp != fp) unlock_proc(rfp);
499 do_sync();
500 unmount_all(0 /* Don't force */);
502 /* Try to exit all processes again including File Servers */
503 for (i = 0; i < NR_PROCS; i++) {
504 rfp = &fproc[i];
506 /* Don't just free the proc right away, but let it finish what it was
507 * doing first */
508 if (rfp != fp) lock_proc(rfp);
509 if (rfp->fp_endpoint != NONE) {
510 worker_set_proc(rfp); /* temporarily fake process context */
511 free_proc(0);
512 worker_set_proc(pmfp); /* restore original process context */
514 if (rfp != fp) unlock_proc(rfp);
517 do_sync();
518 unmount_all(1 /* Force */);
520 /* Reply to PM for synchronization */
521 memset(&m_out, 0, sizeof(m_out));
523 m_out.m_type = VFS_PM_REBOOT_REPLY;
525 if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK)
526 panic("pm_reboot: ipc_send failed: %d", r);
529 /*===========================================================================*
530 * pm_fork *
531 *===========================================================================*/
532 void pm_fork(endpoint_t pproc, endpoint_t cproc, pid_t cpid)
534 /* Perform those aspects of the fork() system call that relate to files.
535 * In particular, let the child inherit its parent's file descriptors.
536 * The parent and child parameters tell who forked off whom. The file
537 * system uses the same slot numbers as the kernel. Only PM makes this call.
540 struct fproc *cp, *pp;
541 int i, parentno, childno;
542 mutex_t c_fp_lock;
544 /* Check up-to-dateness of fproc. */
545 okendpt(pproc, &parentno);
547 /* PM gives child endpoint, which implies process slot information.
548 * Don't call isokendpt, because that will verify if the endpoint
549 * number is correct in fproc, which it won't be.
551 childno = _ENDPOINT_P(cproc);
552 if (childno < 0 || childno >= NR_PROCS)
553 panic("VFS: bogus child for forking: %d", cproc);
554 if (fproc[childno].fp_pid != PID_FREE)
555 panic("VFS: forking on top of in-use child: %d", childno);
557 /* Copy the parent's fproc struct to the child. */
558 /* However, the mutex variables belong to a slot and must stay the same. */
559 c_fp_lock = fproc[childno].fp_lock;
560 fproc[childno] = fproc[parentno];
561 fproc[childno].fp_lock = c_fp_lock;
563 /* Increase the counters in the 'filp' table. */
564 cp = &fproc[childno];
565 pp = &fproc[parentno];
567 for (i = 0; i < OPEN_MAX; i++)
568 if (cp->fp_filp[i] != NULL) cp->fp_filp[i]->filp_count++;
570 /* Fill in new process and endpoint id. */
571 cp->fp_pid = cpid;
572 cp->fp_endpoint = cproc;
574 /* A forking process never has an outstanding grant, as it isn't blocking on
575 * I/O. */
576 if (GRANT_VALID(pp->fp_grant)) {
577 panic("VFS: fork: pp (endpoint %d) has grant %d\n", pp->fp_endpoint,
578 pp->fp_grant);
580 if (GRANT_VALID(cp->fp_grant)) {
581 panic("VFS: fork: cp (endpoint %d) has grant %d\n", cp->fp_endpoint,
582 cp->fp_grant);
585 /* A child is not a process leader, not being revived, etc. */
586 cp->fp_flags = FP_NOFLAGS;
588 /* Record the fact that both root and working dir have another user. */
589 if (cp->fp_rd) dup_vnode(cp->fp_rd);
590 if (cp->fp_wd) dup_vnode(cp->fp_wd);
593 /*===========================================================================*
594 * free_proc *
595 *===========================================================================*/
596 static void free_proc(int flags)
598 int i;
599 register struct fproc *rfp;
600 register struct filp *rfilp;
601 register struct vnode *vp;
602 dev_t dev;
604 if (fp->fp_endpoint == NONE)
605 panic("free_proc: already free");
607 if (fp_is_blocked(fp))
608 unpause();
610 /* Loop on file descriptors, closing any that are open. */
611 for (i = 0; i < OPEN_MAX; i++) {
612 (void) close_fd(fp, i);
615 /* Release root and working directories. */
616 if (fp->fp_rd) { put_vnode(fp->fp_rd); fp->fp_rd = NULL; }
617 if (fp->fp_wd) { put_vnode(fp->fp_wd); fp->fp_wd = NULL; }
619 /* The rest of these actions is only done when processes actually exit. */
620 if (!(flags & FP_EXITING)) return;
622 fp->fp_flags |= FP_EXITING;
624 /* Check if any process is SUSPENDed on this driver.
625 * If a driver exits, unmap its entries in the dmap table.
626 * (unmapping has to be done after the first step, because the
627 * dmap table is used in the first step.)
629 unsuspend_by_endpt(fp->fp_endpoint);
630 dmap_unmap_by_endpt(fp->fp_endpoint);
632 worker_stop_by_endpt(fp->fp_endpoint); /* Unblock waiting threads */
633 vmnt_unmap_by_endpt(fp->fp_endpoint); /* Invalidate open files if this
634 * was an active FS */
636 /* If a session leader exits and it has a controlling tty, then revoke
637 * access to its controlling tty from all other processes using it.
639 if ((fp->fp_flags & FP_SESLDR) && fp->fp_tty != 0) {
640 dev = fp->fp_tty;
641 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
642 if(rfp->fp_pid == PID_FREE) continue;
643 if (rfp->fp_tty == dev) rfp->fp_tty = 0;
645 for (i = 0; i < OPEN_MAX; i++) {
646 if ((rfilp = rfp->fp_filp[i]) == NULL) continue;
647 if (rfilp->filp_mode == FILP_CLOSED) continue;
648 vp = rfilp->filp_vno;
649 if (!S_ISCHR(vp->v_mode)) continue;
650 if (vp->v_sdev != dev) continue;
651 lock_filp(rfilp, VNODE_READ);
652 (void) cdev_close(dev); /* Ignore any errors. */
654 rfilp->filp_mode = FILP_CLOSED;
655 unlock_filp(rfilp);
660 /* Exit done. Mark slot as free. */
661 fp->fp_endpoint = NONE;
662 fp->fp_pid = PID_FREE;
663 fp->fp_flags = FP_NOFLAGS;
666 /*===========================================================================*
667 * pm_exit *
668 *===========================================================================*/
669 void pm_exit(void)
671 /* Perform the file system portion of the exit(status) system call.
672 * This function is called from the context of the exiting process.
675 free_proc(FP_EXITING);
678 /*===========================================================================*
679 * pm_setgid *
680 *===========================================================================*/
681 void pm_setgid(proc_e, egid, rgid)
682 endpoint_t proc_e;
683 int egid;
684 int rgid;
686 register struct fproc *tfp;
687 int slot;
689 okendpt(proc_e, &slot);
690 tfp = &fproc[slot];
692 tfp->fp_effgid = egid;
693 tfp->fp_realgid = rgid;
697 /*===========================================================================*
698 * pm_setgroups *
699 *===========================================================================*/
700 void pm_setgroups(proc_e, ngroups, groups)
701 endpoint_t proc_e;
702 int ngroups;
703 gid_t *groups;
705 struct fproc *rfp;
706 int slot;
708 okendpt(proc_e, &slot);
709 rfp = &fproc[slot];
710 if (ngroups * sizeof(gid_t) > sizeof(rfp->fp_sgroups))
711 panic("VFS: pm_setgroups: too much data to copy");
712 if (sys_datacopy_wrapper(who_e, (vir_bytes) groups, SELF, (vir_bytes) rfp->fp_sgroups,
713 ngroups * sizeof(gid_t)) == OK) {
714 rfp->fp_ngroups = ngroups;
715 } else
716 panic("VFS: pm_setgroups: datacopy failed");
720 /*===========================================================================*
721 * pm_setuid *
722 *===========================================================================*/
723 void pm_setuid(proc_e, euid, ruid)
724 endpoint_t proc_e;
725 int euid;
726 int ruid;
728 struct fproc *tfp;
729 int slot;
731 okendpt(proc_e, &slot);
732 tfp = &fproc[slot];
734 tfp->fp_effuid = euid;
735 tfp->fp_realuid = ruid;
738 /*===========================================================================*
739 * pm_setsid *
740 *===========================================================================*/
741 void pm_setsid(endpoint_t proc_e)
743 /* Perform the VFS side of the SETSID call, i.e. get rid of the controlling
744 * terminal of a process, and make the process a session leader.
746 struct fproc *rfp;
747 int slot;
749 /* Make the process a session leader with no controlling tty. */
750 okendpt(proc_e, &slot);
751 rfp = &fproc[slot];
752 rfp->fp_flags |= FP_SESLDR;
753 rfp->fp_tty = 0;
756 /*===========================================================================*
757 * do_svrctl *
758 *===========================================================================*/
759 int do_svrctl(void)
761 unsigned int svrctl;
762 vir_bytes ptr;
764 svrctl = job_m_in.SVRCTL_REQ;
765 ptr = (vir_bytes) job_m_in.SVRCTL_ARG;
766 if (((svrctl >> 8) & 0xFF) != 'M') return(EINVAL);
768 switch (svrctl) {
769 case VFSSETPARAM:
770 case VFSGETPARAM:
772 struct sysgetenv sysgetenv;
773 char search_key[64];
774 char val[64];
775 int r, s;
777 /* Copy sysgetenv structure to VFS */
778 if (sys_datacopy_wrapper(who_e, ptr, SELF, (vir_bytes) &sysgetenv,
779 sizeof(sysgetenv)) != OK)
780 return(EFAULT);
782 /* Basic sanity checking */
783 if (svrctl == VFSSETPARAM) {
784 if (sysgetenv.keylen <= 0 ||
785 sysgetenv.keylen > (sizeof(search_key) - 1) ||
786 sysgetenv.vallen <= 0 ||
787 sysgetenv.vallen >= sizeof(val)) {
788 return(EINVAL);
792 /* Copy parameter "key" */
793 if ((s = sys_datacopy_wrapper(who_e, (vir_bytes) sysgetenv.key,
794 SELF, (vir_bytes) search_key,
795 sysgetenv.keylen)) != OK)
796 return(s);
797 search_key[sysgetenv.keylen] = '\0'; /* Limit string */
799 /* Is it a parameter we know? */
800 if (svrctl == VFSSETPARAM) {
801 if (!strcmp(search_key, "verbose")) {
802 int verbose_val;
803 if ((s = sys_datacopy_wrapper(who_e,
804 (vir_bytes) sysgetenv.val, SELF,
805 (vir_bytes) &val, sysgetenv.vallen)) != OK)
806 return(s);
807 val[sysgetenv.vallen] = '\0'; /* Limit string */
808 verbose_val = atoi(val);
809 if (verbose_val < 0 || verbose_val > 4) {
810 return(EINVAL);
812 verbose = verbose_val;
813 r = OK;
814 } else {
815 r = ESRCH;
817 } else { /* VFSGETPARAM */
818 char small_buf[60];
820 r = ESRCH;
821 if (!strcmp(search_key, "print_traces")) {
822 mthread_stacktraces();
823 sysgetenv.val = 0;
824 sysgetenv.vallen = 0;
825 r = OK;
826 } else if (!strcmp(search_key, "active_threads")) {
827 int active = NR_WTHREADS - worker_available();
828 snprintf(small_buf, sizeof(small_buf) - 1,
829 "%d", active);
830 sysgetenv.vallen = strlen(small_buf);
831 r = OK;
834 if (r == OK) {
835 if ((s = sys_datacopy_wrapper(SELF,
836 (vir_bytes) &sysgetenv, who_e, ptr,
837 sizeof(sysgetenv))) != OK)
838 return(s);
839 if (sysgetenv.val != 0) {
840 if ((s = sys_datacopy_wrapper(SELF,
841 (vir_bytes) small_buf, who_e,
842 (vir_bytes) sysgetenv.val,
843 sysgetenv.vallen)) != OK)
844 return(s);
849 return(r);
851 default:
852 return(EINVAL);
856 /*===========================================================================*
857 * pm_dumpcore *
858 *===========================================================================*/
859 int pm_dumpcore(int csig, vir_bytes exe_name)
861 int r = OK, core_fd;
862 struct filp *f;
863 char core_path[PATH_MAX];
864 char proc_name[PROC_NAME_LEN];
866 /* if a process is blocked, scratch(fp).file.fd_nr holds the fd it's blocked
867 * on. free it up for use by common_open().
869 if (fp_is_blocked(fp))
870 unpause();
872 /* open core file */
873 snprintf(core_path, PATH_MAX, "%s.%d", CORE_NAME, fp->fp_pid);
874 core_fd = common_open(core_path, O_WRONLY | O_CREAT | O_TRUNC, CORE_MODE);
875 if (core_fd < 0) { r = core_fd; goto core_exit; }
877 /* get process' name */
878 r = sys_datacopy_wrapper(PM_PROC_NR, exe_name, VFS_PROC_NR, (vir_bytes) proc_name,
879 PROC_NAME_LEN);
880 if (r != OK) goto core_exit;
881 proc_name[PROC_NAME_LEN - 1] = '\0';
883 if ((f = get_filp(core_fd, VNODE_WRITE)) == NULL) { r=EBADF; goto core_exit; }
884 write_elf_core_file(f, csig, proc_name);
885 unlock_filp(f);
886 (void) close_fd(fp, core_fd); /* ignore failure, we're exiting anyway */
888 core_exit:
889 if(csig)
890 free_proc(FP_EXITING);
891 return(r);
894 /*===========================================================================*
895 * ds_event *
896 *===========================================================================*/
897 void
898 ds_event(void)
900 char key[DS_MAX_KEYLEN];
901 char *blkdrv_prefix = "drv.blk.";
902 char *chrdrv_prefix = "drv.chr.";
903 u32_t value;
904 int type, r, is_blk;
905 endpoint_t owner_endpoint;
907 /* Get the event and the owner from DS. */
908 while ((r = ds_check(key, &type, &owner_endpoint)) == OK) {
909 /* Only check for block and character driver up events. */
910 if (!strncmp(key, blkdrv_prefix, strlen(blkdrv_prefix))) {
911 is_blk = TRUE;
912 } else if (!strncmp(key, chrdrv_prefix, strlen(chrdrv_prefix))) {
913 is_blk = FALSE;
914 } else {
915 continue;
918 if ((r = ds_retrieve_u32(key, &value)) != OK) {
919 printf("VFS: ds_event: ds_retrieve_u32 failed\n");
920 break;
922 if (value != DS_DRIVER_UP) continue;
924 /* Perform up. */
925 dmap_endpt_up(owner_endpoint, is_blk);
928 if (r != ENOENT) printf("VFS: ds_event: ds_check failed: %d\n", r);
931 /* A function to be called on panic(). */
932 void panic_hook(void)
934 printf("VFS mthread stacktraces:\n");
935 mthread_stacktraces();
938 /*===========================================================================*
939 * do_getrusage *
940 *===========================================================================*/
941 int do_getrusage(void)
943 int res;
944 struct rusage r_usage;
946 if ((res = sys_datacopy_wrapper(who_e, (vir_bytes) m_in.RU_RUSAGE_ADDR, SELF,
947 (vir_bytes) &r_usage, (vir_bytes) sizeof(r_usage))) < 0)
948 return res;
950 r_usage.ru_inblock = 0;
951 r_usage.ru_oublock = 0;
952 r_usage.ru_ixrss = fp->text_size;
953 r_usage.ru_idrss = fp->data_size;
954 r_usage.ru_isrss = DEFAULT_STACK_LIMIT;
956 return sys_datacopy_wrapper(SELF, (vir_bytes) &r_usage, who_e,
957 (vir_bytes) m_in.RU_RUSAGE_ADDR, (phys_bytes) sizeof(r_usage));