tools/llvm: Do not build with symbols
[minix3.git] / minix / servers / vfs / misc.c
blob65d4b8630b7603e64b2ff783cd1a3a014cdf65f9
1 /* This file contains a collection of miscellaneous procedures. Some of them
2 * perform simple system calls. Some others do a little part of system calls
3 * that are mostly performed by the Memory Manager.
5 * The entry points into this file are
6 * do_fcntl: perform the FCNTL system call
7 * do_sync: perform the SYNC system call
8 * do_fsync: perform the FSYNC system call
9 * pm_setsid: perform VFS's side of setsid system call
10 * pm_reboot: sync disks and prepare for shutdown
11 * pm_fork: adjust the tables after PM has performed a FORK system call
12 * do_exec: handle files with FD_CLOEXEC on after PM has done an EXEC
13 * do_exit: a process has exited; note that in the tables
14 * do_set: set uid or gid for some process
15 * do_revive: revive a process that was waiting for something (e.g. TTY)
16 * do_svrctl: file system control
17 * do_getsysinfo: request copy of FS data structure
18 * pm_dumpcore: create a core dump
21 #include "fs.h"
22 #include <fcntl.h>
23 #include <assert.h>
24 #include <unistd.h>
25 #include <string.h>
26 #include <minix/callnr.h>
27 #include <minix/safecopies.h>
28 #include <minix/endpoint.h>
29 #include <minix/com.h>
30 #include <minix/sysinfo.h>
31 #include <minix/u64.h>
32 #include <sys/ptrace.h>
33 #include <sys/svrctl.h>
34 #include <sys/resource.h>
35 #include "file.h"
36 #include "scratchpad.h"
37 #include <minix/vfsif.h>
38 #include "vnode.h"
39 #include "vmnt.h"
41 #define CORE_NAME "core"
42 #define CORE_MODE 0777 /* mode to use on core image files */
44 #if ENABLE_SYSCALL_STATS
45 unsigned long calls_stats[NR_VFS_CALLS];
46 #endif
48 static void free_proc(int flags);
50 /*===========================================================================*
51 * do_getsysinfo *
52 *===========================================================================*/
53 int do_getsysinfo(void)
55 vir_bytes src_addr, dst_addr;
56 size_t len, buf_size;
57 int what;
59 what = job_m_in.m_lsys_getsysinfo.what;
60 dst_addr = job_m_in.m_lsys_getsysinfo.where;
61 buf_size = job_m_in.m_lsys_getsysinfo.size;
63 /* Only su may call do_getsysinfo. This call may leak information (and is not
64 * stable enough to be part of the API/ABI). In the future, requests from
65 * non-system processes should be denied.
68 if (!super_user) return(EPERM);
70 switch(what) {
71 case SI_PROC_TAB:
72 src_addr = (vir_bytes) fproc;
73 len = sizeof(struct fproc) * NR_PROCS;
74 break;
75 case SI_DMAP_TAB:
76 src_addr = (vir_bytes) dmap;
77 len = sizeof(struct dmap) * NR_DEVICES;
78 break;
79 #if ENABLE_SYSCALL_STATS
80 case SI_CALL_STATS:
81 src_addr = (vir_bytes) calls_stats;
82 len = sizeof(calls_stats);
83 break;
84 #endif
85 default:
86 return(EINVAL);
89 if (len != buf_size)
90 return(EINVAL);
92 return sys_datacopy_wrapper(SELF, src_addr, who_e, dst_addr, len);
95 /*===========================================================================*
96 * do_fcntl *
97 *===========================================================================*/
98 int do_fcntl(void)
100 /* Perform the fcntl(fd, cmd, ...) system call. */
102 register struct filp *f;
103 int new_fd, fl, r = OK, fcntl_req, fcntl_argx;
104 tll_access_t locktype;
106 scratch(fp).file.fd_nr = job_m_in.m_lc_vfs_fcntl.fd;
107 scratch(fp).io.io_buffer = job_m_in.m_lc_vfs_fcntl.arg_ptr;
108 scratch(fp).io.io_nbytes = job_m_in.m_lc_vfs_fcntl.cmd;
109 fcntl_req = job_m_in.m_lc_vfs_fcntl.cmd;
110 fcntl_argx = job_m_in.m_lc_vfs_fcntl.arg_int;
112 /* Is the file descriptor valid? */
113 locktype = (fcntl_req == F_FREESP) ? VNODE_WRITE : VNODE_READ;
114 if ((f = get_filp(scratch(fp).file.fd_nr, locktype)) == NULL)
115 return(err_code);
117 switch (fcntl_req) {
118 case F_DUPFD:
119 /* This replaces the old dup() system call. */
120 if (fcntl_argx < 0 || fcntl_argx >= OPEN_MAX) r = EINVAL;
121 else if ((r = get_fd(fp, fcntl_argx, 0, &new_fd, NULL)) == OK) {
122 f->filp_count++;
123 fp->fp_filp[new_fd] = f;
124 r = new_fd;
126 break;
128 case F_GETFD:
129 /* Get close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
130 r = 0;
131 if (FD_ISSET(scratch(fp).file.fd_nr, &fp->fp_cloexec_set))
132 r = FD_CLOEXEC;
133 break;
135 case F_SETFD:
136 /* Set close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
137 if (fcntl_argx & FD_CLOEXEC)
138 FD_SET(scratch(fp).file.fd_nr, &fp->fp_cloexec_set);
139 else
140 FD_CLR(scratch(fp).file.fd_nr, &fp->fp_cloexec_set);
141 break;
143 case F_GETFL:
144 /* Get file status flags (O_NONBLOCK and O_APPEND). */
145 fl = f->filp_flags & (O_NONBLOCK | O_APPEND | O_ACCMODE);
146 r = fl;
147 break;
149 case F_SETFL:
150 /* Set file status flags (O_NONBLOCK and O_APPEND). */
151 fl = O_NONBLOCK | O_APPEND;
152 f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl);
153 break;
155 case F_GETLK:
156 case F_SETLK:
157 case F_SETLKW:
158 /* Set or clear a file lock. */
159 r = lock_op(f, fcntl_req);
160 break;
162 case F_FREESP:
164 /* Free a section of a file */
165 off_t start, end, offset;
166 struct flock flock_arg;
168 /* Check if it's a regular file. */
169 if (!S_ISREG(f->filp_vno->v_mode)) r = EINVAL;
170 else if (!(f->filp_mode & W_BIT)) r = EBADF;
171 else {
172 /* Copy flock data from userspace. */
173 r = sys_datacopy_wrapper(who_e, scratch(fp).io.io_buffer,
174 SELF, (vir_bytes) &flock_arg, sizeof(flock_arg));
177 if (r != OK) break;
179 /* Convert starting offset to signed. */
180 offset = (off_t) flock_arg.l_start;
182 /* Figure out starting position base. */
183 switch(flock_arg.l_whence) {
184 case SEEK_SET: start = 0; break;
185 case SEEK_CUR: start = f->filp_pos; break;
186 case SEEK_END: start = f->filp_vno->v_size; break;
187 default: r = EINVAL;
189 if (r != OK) break;
191 /* Check for overflow or underflow. */
192 if (offset > 0 && start + offset < start) r = EINVAL;
193 else if (offset < 0 && start + offset > start) r = EINVAL;
194 else {
195 start += offset;
196 if (start < 0) r = EINVAL;
198 if (r != OK) break;
200 if (flock_arg.l_len != 0) {
201 if (start >= f->filp_vno->v_size) r = EINVAL;
202 else if ((end = start + flock_arg.l_len) <= start) r = EINVAL;
203 else if (end > f->filp_vno->v_size) end = f->filp_vno->v_size;
204 } else {
205 end = 0;
207 if (r != OK) break;
209 r = req_ftrunc(f->filp_vno->v_fs_e, f->filp_vno->v_inode_nr,start,end);
211 if (r == OK && flock_arg.l_len == 0)
212 f->filp_vno->v_size = start;
214 break;
216 case F_GETNOSIGPIPE:
217 /* POSIX: return value other than -1 is flag is set, else -1 */
218 r = -1;
219 if (f->filp_flags & O_NOSIGPIPE)
220 r = 0;
221 break;
222 case F_SETNOSIGPIPE:
223 fl = (O_NOSIGPIPE);
224 f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl);
225 break;
226 case F_FLUSH_FS_CACHE:
228 struct vnode *vn = f->filp_vno;
229 mode_t mode = f->filp_vno->v_mode;
230 if (!super_user) {
231 r = EPERM;
232 } else if (S_ISBLK(mode)) {
233 /* Block device; flush corresponding device blocks. */
234 r = req_flush(vn->v_bfs_e, vn->v_sdev);
235 } else if (S_ISREG(mode) || S_ISDIR(mode)) {
236 /* Directory or regular file; flush hosting FS blocks. */
237 r = req_flush(vn->v_fs_e, vn->v_dev);
238 } else {
239 /* Remaining cases.. Meaning unclear. */
240 r = ENODEV;
242 break;
244 default:
245 r = EINVAL;
248 unlock_filp(f);
249 return(r);
252 /*===========================================================================*
253 * do_sync *
254 *===========================================================================*/
255 int do_sync(void)
257 struct vmnt *vmp;
258 int r = OK;
260 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
261 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
262 break;
263 if (vmp->m_dev != NO_DEV && vmp->m_fs_e != NONE &&
264 vmp->m_root_node != NULL) {
265 req_sync(vmp->m_fs_e);
267 unlock_vmnt(vmp);
270 return(r);
273 /*===========================================================================*
274 * do_fsync *
275 *===========================================================================*/
276 int do_fsync(void)
278 /* Perform the fsync() system call. */
279 struct filp *rfilp;
280 struct vmnt *vmp;
281 dev_t dev;
282 int r = OK;
284 scratch(fp).file.fd_nr = job_m_in.m_lc_vfs_fsync.fd;
286 if ((rfilp = get_filp(scratch(fp).file.fd_nr, VNODE_READ)) == NULL)
287 return(err_code);
289 dev = rfilp->filp_vno->v_dev;
290 unlock_filp(rfilp);
292 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
293 if (vmp->m_dev != dev) continue;
294 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK)
295 break;
296 if (vmp->m_dev != NO_DEV && vmp->m_dev == dev &&
297 vmp->m_fs_e != NONE && vmp->m_root_node != NULL) {
299 req_sync(vmp->m_fs_e);
301 unlock_vmnt(vmp);
304 return(r);
307 int dupvm(struct fproc *rfp, int pfd, int *vmfd, struct filp **newfilp)
309 int result, procfd;
310 struct filp *f = NULL;
311 struct fproc *vmf = fproc_addr(VM_PROC_NR);
313 *newfilp = NULL;
315 if ((f = get_filp2(rfp, pfd, VNODE_READ)) == NULL) {
316 printf("VFS dupvm: get_filp2 failed\n");
317 return EBADF;
320 if(!(f->filp_vno->v_vmnt->m_fs_flags & RES_HASPEEK)) {
321 unlock_filp(f);
322 #if 0 /* Noisy diagnostic for mmap() by ld.so */
323 printf("VFS dupvm: no peek available\n");
324 #endif
325 return EINVAL;
328 assert(f->filp_vno);
329 assert(f->filp_vno->v_vmnt);
331 if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode)) {
332 printf("VFS: mmap regular/blockdev only; dev 0x%llx ino %llu has mode 0%o\n",
333 f->filp_vno->v_dev, f->filp_vno->v_inode_nr, f->filp_vno->v_mode);
334 unlock_filp(f);
335 return EINVAL;
338 /* get free FD in VM */
339 if((result=get_fd(vmf, 0, 0, &procfd, NULL)) != OK) {
340 unlock_filp(f);
341 printf("VFS dupvm: getfd failed\n");
342 return result;
345 *vmfd = procfd;
347 f->filp_count++;
348 assert(f->filp_count > 0);
349 vmf->fp_filp[procfd] = f;
351 *newfilp = f;
353 return OK;
356 /*===========================================================================*
357 * do_vm_call *
358 *===========================================================================*/
359 int do_vm_call(void)
361 /* A call that VM does to VFS.
362 * We must reply with the fixed type VM_VFS_REPLY (and put our result info
363 * in the rest of the message) so VM can tell the difference between a
364 * request from VFS and a reply to this call.
366 int req = job_m_in.VFS_VMCALL_REQ;
367 int req_fd = job_m_in.VFS_VMCALL_FD;
368 u32_t req_id = job_m_in.VFS_VMCALL_REQID;
369 endpoint_t ep = job_m_in.VFS_VMCALL_ENDPOINT;
370 u64_t offset = job_m_in.VFS_VMCALL_OFFSET;
371 u32_t length = job_m_in.VFS_VMCALL_LENGTH;
372 int result = OK;
373 int slot;
374 struct fproc *rfp, *vmf;
375 struct filp *f = NULL;
376 int r;
378 if(job_m_in.m_source != VM_PROC_NR)
379 return ENOSYS;
381 if(isokendpt(ep, &slot) != OK) rfp = NULL;
382 else rfp = &fproc[slot];
384 vmf = fproc_addr(VM_PROC_NR);
385 assert(fp == vmf);
386 assert(rfp != vmf);
388 switch(req) {
389 case VMVFSREQ_FDLOOKUP:
391 int procfd;
393 /* Lookup fd in referenced process. */
395 if(!rfp) {
396 printf("VFS: why isn't ep %d here?!\n", ep);
397 result = ESRCH;
398 goto reqdone;
401 if((result = dupvm(rfp, req_fd, &procfd, &f)) != OK) {
402 #if 0 /* Noisy diagnostic for mmap() by ld.so */
403 printf("vfs: dupvm failed\n");
404 #endif
405 goto reqdone;
408 if(S_ISBLK(f->filp_vno->v_mode)) {
409 assert(f->filp_vno->v_sdev != NO_DEV);
410 job_m_out.VMV_DEV = f->filp_vno->v_sdev;
411 job_m_out.VMV_INO = VMC_NO_INODE;
412 job_m_out.VMV_SIZE_PAGES = LONG_MAX;
413 } else {
414 job_m_out.VMV_DEV = f->filp_vno->v_dev;
415 job_m_out.VMV_INO = f->filp_vno->v_inode_nr;
416 job_m_out.VMV_SIZE_PAGES =
417 roundup(f->filp_vno->v_size,
418 PAGE_SIZE)/PAGE_SIZE;
421 job_m_out.VMV_FD = procfd;
423 result = OK;
425 break;
427 case VMVFSREQ_FDCLOSE:
429 result = close_fd(fp, req_fd);
430 if(result != OK) {
431 printf("VFS: VM fd close for fd %d, %d (%d)\n",
432 req_fd, fp->fp_endpoint, result);
434 break;
436 case VMVFSREQ_FDIO:
438 result = actual_lseek(fp, req_fd, SEEK_SET, offset,
439 NULL);
441 if(result == OK) {
442 result = actual_read_write_peek(fp, PEEKING,
443 req_fd, /* vir_bytes */ 0, length);
446 break;
448 default:
449 panic("VFS: bad request code from VM\n");
450 break;
453 reqdone:
454 if(f)
455 unlock_filp(f);
457 /* fp is VM still. */
458 assert(fp == vmf);
459 job_m_out.VMV_ENDPOINT = ep;
460 job_m_out.VMV_RESULT = result;
461 job_m_out.VMV_REQID = req_id;
463 /* Reply asynchronously as VM may not be able to receive
464 * an ipc_sendnb() message.
466 job_m_out.m_type = VM_VFS_REPLY;
467 r = asynsend3(VM_PROC_NR, &job_m_out, 0);
468 if(r != OK) printf("VFS: couldn't asynsend3() to VM\n");
470 /* VFS does not reply any further */
471 return SUSPEND;
474 /*===========================================================================*
475 * pm_reboot *
476 *===========================================================================*/
477 void pm_reboot()
479 /* Perform the VFS side of the reboot call. This call is performed from the PM
480 * process context.
482 message m_out;
483 int i, r;
484 struct fproc *rfp, *pmfp;
486 pmfp = fp;
488 do_sync();
490 /* Do exit processing for all leftover processes and servers, but don't
491 * actually exit them (if they were really gone, PM will tell us about it).
492 * Skip processes that handle parts of the file system; we first need to give
493 * them the chance to unmount (which should be possible as all normal
494 * processes have no open files anymore).
496 /* This is the only place where we allow special modification of "fp". The
497 * reboot procedure should really be implemented as a PM message broadcasted
498 * to all processes, so that each process will be shut down cleanly by a
499 * thread operating on its behalf. Doing everything here is simpler, but it
500 * requires an exception to the strict model of having "fp" be the process
501 * that owns the current worker thread.
503 for (i = 0; i < NR_PROCS; i++) {
504 rfp = &fproc[i];
506 /* Don't just free the proc right away, but let it finish what it was
507 * doing first */
508 if (rfp != fp) lock_proc(rfp);
509 if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) {
510 worker_set_proc(rfp); /* temporarily fake process context */
511 free_proc(0);
512 worker_set_proc(pmfp); /* restore original process context */
514 if (rfp != fp) unlock_proc(rfp);
517 do_sync();
518 unmount_all(0 /* Don't force */);
520 /* Try to exit all processes again including File Servers */
521 for (i = 0; i < NR_PROCS; i++) {
522 rfp = &fproc[i];
524 /* Don't just free the proc right away, but let it finish what it was
525 * doing first */
526 if (rfp != fp) lock_proc(rfp);
527 if (rfp->fp_endpoint != NONE) {
528 worker_set_proc(rfp); /* temporarily fake process context */
529 free_proc(0);
530 worker_set_proc(pmfp); /* restore original process context */
532 if (rfp != fp) unlock_proc(rfp);
535 do_sync();
536 unmount_all(1 /* Force */);
538 /* Reply to PM for synchronization */
539 memset(&m_out, 0, sizeof(m_out));
541 m_out.m_type = VFS_PM_REBOOT_REPLY;
543 if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK)
544 panic("pm_reboot: ipc_send failed: %d", r);
547 /*===========================================================================*
548 * pm_fork *
549 *===========================================================================*/
550 void pm_fork(endpoint_t pproc, endpoint_t cproc, pid_t cpid)
552 /* Perform those aspects of the fork() system call that relate to files.
553 * In particular, let the child inherit its parent's file descriptors.
554 * The parent and child parameters tell who forked off whom. The file
555 * system uses the same slot numbers as the kernel. Only PM makes this call.
558 struct fproc *cp, *pp;
559 int i, parentno, childno;
560 mutex_t c_fp_lock;
562 /* Check up-to-dateness of fproc. */
563 okendpt(pproc, &parentno);
565 /* PM gives child endpoint, which implies process slot information.
566 * Don't call isokendpt, because that will verify if the endpoint
567 * number is correct in fproc, which it won't be.
569 childno = _ENDPOINT_P(cproc);
570 if (childno < 0 || childno >= NR_PROCS)
571 panic("VFS: bogus child for forking: %d", cproc);
572 if (fproc[childno].fp_pid != PID_FREE)
573 panic("VFS: forking on top of in-use child: %d", childno);
575 /* Copy the parent's fproc struct to the child. */
576 /* However, the mutex variables belong to a slot and must stay the same. */
577 c_fp_lock = fproc[childno].fp_lock;
578 fproc[childno] = fproc[parentno];
579 fproc[childno].fp_lock = c_fp_lock;
581 /* Increase the counters in the 'filp' table. */
582 cp = &fproc[childno];
583 pp = &fproc[parentno];
585 for (i = 0; i < OPEN_MAX; i++)
586 if (cp->fp_filp[i] != NULL) cp->fp_filp[i]->filp_count++;
588 /* Fill in new process and endpoint id. */
589 cp->fp_pid = cpid;
590 cp->fp_endpoint = cproc;
592 /* A forking process never has an outstanding grant, as it isn't blocking on
593 * I/O. */
594 if (GRANT_VALID(pp->fp_grant)) {
595 panic("VFS: fork: pp (endpoint %d) has grant %d\n", pp->fp_endpoint,
596 pp->fp_grant);
598 if (GRANT_VALID(cp->fp_grant)) {
599 panic("VFS: fork: cp (endpoint %d) has grant %d\n", cp->fp_endpoint,
600 cp->fp_grant);
603 /* A child is not a process leader, not being revived, etc. */
604 cp->fp_flags = FP_NOFLAGS;
606 /* Record the fact that both root and working dir have another user. */
607 if (cp->fp_rd) dup_vnode(cp->fp_rd);
608 if (cp->fp_wd) dup_vnode(cp->fp_wd);
611 /*===========================================================================*
612 * free_proc *
613 *===========================================================================*/
614 static void free_proc(int flags)
616 int i;
617 register struct fproc *rfp;
618 register struct filp *rfilp;
619 register struct vnode *vp;
620 dev_t dev;
622 if (fp->fp_endpoint == NONE)
623 panic("free_proc: already free");
625 if (fp_is_blocked(fp))
626 unpause();
628 /* Loop on file descriptors, closing any that are open. */
629 for (i = 0; i < OPEN_MAX; i++) {
630 (void) close_fd(fp, i);
633 /* Release root and working directories. */
634 if (fp->fp_rd) { put_vnode(fp->fp_rd); fp->fp_rd = NULL; }
635 if (fp->fp_wd) { put_vnode(fp->fp_wd); fp->fp_wd = NULL; }
637 /* The rest of these actions is only done when processes actually exit. */
638 if (!(flags & FP_EXITING)) return;
640 fp->fp_flags |= FP_EXITING;
642 /* Check if any process is SUSPENDed on this driver.
643 * If a driver exits, unmap its entries in the dmap table.
644 * (unmapping has to be done after the first step, because the
645 * dmap table is used in the first step.)
647 unsuspend_by_endpt(fp->fp_endpoint);
648 dmap_unmap_by_endpt(fp->fp_endpoint);
650 worker_stop_by_endpt(fp->fp_endpoint); /* Unblock waiting threads */
651 vmnt_unmap_by_endpt(fp->fp_endpoint); /* Invalidate open files if this
652 * was an active FS */
654 /* If a session leader exits and it has a controlling tty, then revoke
655 * access to its controlling tty from all other processes using it.
657 if ((fp->fp_flags & FP_SESLDR) && fp->fp_tty != 0) {
658 dev = fp->fp_tty;
659 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
660 if(rfp->fp_pid == PID_FREE) continue;
661 if (rfp->fp_tty == dev) rfp->fp_tty = 0;
663 for (i = 0; i < OPEN_MAX; i++) {
664 if ((rfilp = rfp->fp_filp[i]) == NULL) continue;
665 if (rfilp->filp_mode == FILP_CLOSED) continue;
666 vp = rfilp->filp_vno;
667 if (!S_ISCHR(vp->v_mode)) continue;
668 if (vp->v_sdev != dev) continue;
669 lock_filp(rfilp, VNODE_READ);
670 (void) cdev_close(dev); /* Ignore any errors. */
671 /* FIXME: missing select check */
672 rfilp->filp_mode = FILP_CLOSED;
673 unlock_filp(rfilp);
678 /* Exit done. Mark slot as free. */
679 fp->fp_endpoint = NONE;
680 fp->fp_pid = PID_FREE;
681 fp->fp_flags = FP_NOFLAGS;
684 /*===========================================================================*
685 * pm_exit *
686 *===========================================================================*/
687 void pm_exit(void)
689 /* Perform the file system portion of the exit(status) system call.
690 * This function is called from the context of the exiting process.
693 free_proc(FP_EXITING);
696 /*===========================================================================*
697 * pm_setgid *
698 *===========================================================================*/
699 void pm_setgid(proc_e, egid, rgid)
700 endpoint_t proc_e;
701 int egid;
702 int rgid;
704 register struct fproc *tfp;
705 int slot;
707 okendpt(proc_e, &slot);
708 tfp = &fproc[slot];
710 tfp->fp_effgid = egid;
711 tfp->fp_realgid = rgid;
715 /*===========================================================================*
716 * pm_setgroups *
717 *===========================================================================*/
718 void pm_setgroups(proc_e, ngroups, groups)
719 endpoint_t proc_e;
720 int ngroups;
721 gid_t *groups;
723 struct fproc *rfp;
724 int slot;
726 okendpt(proc_e, &slot);
727 rfp = &fproc[slot];
728 if (ngroups * sizeof(gid_t) > sizeof(rfp->fp_sgroups))
729 panic("VFS: pm_setgroups: too much data to copy");
730 if (sys_datacopy_wrapper(who_e, (vir_bytes) groups, SELF, (vir_bytes) rfp->fp_sgroups,
731 ngroups * sizeof(gid_t)) == OK) {
732 rfp->fp_ngroups = ngroups;
733 } else
734 panic("VFS: pm_setgroups: datacopy failed");
738 /*===========================================================================*
739 * pm_setuid *
740 *===========================================================================*/
741 void pm_setuid(proc_e, euid, ruid)
742 endpoint_t proc_e;
743 int euid;
744 int ruid;
746 struct fproc *tfp;
747 int slot;
749 okendpt(proc_e, &slot);
750 tfp = &fproc[slot];
752 tfp->fp_effuid = euid;
753 tfp->fp_realuid = ruid;
756 /*===========================================================================*
757 * pm_setsid *
758 *===========================================================================*/
759 void pm_setsid(endpoint_t proc_e)
761 /* Perform the VFS side of the SETSID call, i.e. get rid of the controlling
762 * terminal of a process, and make the process a session leader.
764 struct fproc *rfp;
765 int slot;
767 /* Make the process a session leader with no controlling tty. */
768 okendpt(proc_e, &slot);
769 rfp = &fproc[slot];
770 rfp->fp_flags |= FP_SESLDR;
771 rfp->fp_tty = 0;
774 /*===========================================================================*
775 * do_svrctl *
776 *===========================================================================*/
777 int do_svrctl(void)
779 unsigned int svrctl;
780 vir_bytes ptr;
782 svrctl = job_m_in.m_lsys_svrctl.request;
783 ptr = job_m_in.m_lsys_svrctl.arg;
784 if (((svrctl >> 8) & 0xFF) != 'M') return(EINVAL);
786 switch (svrctl) {
787 case VFSSETPARAM:
788 case VFSGETPARAM:
790 struct sysgetenv sysgetenv;
791 char search_key[64];
792 char val[64];
793 int r, s;
795 /* Copy sysgetenv structure to VFS */
796 if (sys_datacopy_wrapper(who_e, ptr, SELF, (vir_bytes) &sysgetenv,
797 sizeof(sysgetenv)) != OK)
798 return(EFAULT);
800 /* Basic sanity checking */
801 if (svrctl == VFSSETPARAM) {
802 if (sysgetenv.keylen <= 0 ||
803 sysgetenv.keylen > (sizeof(search_key) - 1) ||
804 sysgetenv.vallen <= 0 ||
805 sysgetenv.vallen >= sizeof(val)) {
806 return(EINVAL);
810 /* Copy parameter "key" */
811 if ((s = sys_datacopy_wrapper(who_e, (vir_bytes) sysgetenv.key,
812 SELF, (vir_bytes) search_key,
813 sysgetenv.keylen)) != OK)
814 return(s);
815 search_key[sysgetenv.keylen] = '\0'; /* Limit string */
817 /* Is it a parameter we know? */
818 if (svrctl == VFSSETPARAM) {
819 if (!strcmp(search_key, "verbose")) {
820 int verbose_val;
821 if ((s = sys_datacopy_wrapper(who_e,
822 (vir_bytes) sysgetenv.val, SELF,
823 (vir_bytes) &val, sysgetenv.vallen)) != OK)
824 return(s);
825 val[sysgetenv.vallen] = '\0'; /* Limit string */
826 verbose_val = atoi(val);
827 if (verbose_val < 0 || verbose_val > 4) {
828 return(EINVAL);
830 verbose = verbose_val;
831 r = OK;
832 } else {
833 r = ESRCH;
835 } else { /* VFSGETPARAM */
836 char small_buf[60];
838 r = ESRCH;
839 if (!strcmp(search_key, "print_traces")) {
840 mthread_stacktraces();
841 sysgetenv.val = 0;
842 sysgetenv.vallen = 0;
843 r = OK;
844 } else if (!strcmp(search_key, "active_threads")) {
845 int active = NR_WTHREADS - worker_available();
846 snprintf(small_buf, sizeof(small_buf) - 1,
847 "%d", active);
848 sysgetenv.vallen = strlen(small_buf);
849 r = OK;
852 if (r == OK) {
853 if ((s = sys_datacopy_wrapper(SELF,
854 (vir_bytes) &sysgetenv, who_e, ptr,
855 sizeof(sysgetenv))) != OK)
856 return(s);
857 if (sysgetenv.val != 0) {
858 if ((s = sys_datacopy_wrapper(SELF,
859 (vir_bytes) small_buf, who_e,
860 (vir_bytes) sysgetenv.val,
861 sysgetenv.vallen)) != OK)
862 return(s);
867 return(r);
869 default:
870 return(EINVAL);
874 /*===========================================================================*
875 * pm_dumpcore *
876 *===========================================================================*/
877 int pm_dumpcore(int csig, vir_bytes exe_name)
879 int r = OK, core_fd;
880 struct filp *f;
881 char core_path[PATH_MAX];
882 char proc_name[PROC_NAME_LEN];
884 /* if a process is blocked, scratch(fp).file.fd_nr holds the fd it's blocked
885 * on. free it up for use by common_open().
887 if (fp_is_blocked(fp))
888 unpause();
890 /* open core file */
891 snprintf(core_path, PATH_MAX, "%s.%d", CORE_NAME, fp->fp_pid);
892 core_fd = common_open(core_path, O_WRONLY | O_CREAT | O_TRUNC, CORE_MODE);
893 if (core_fd < 0) { r = core_fd; goto core_exit; }
895 /* get process' name */
896 r = sys_datacopy_wrapper(PM_PROC_NR, exe_name, VFS_PROC_NR, (vir_bytes) proc_name,
897 PROC_NAME_LEN);
898 if (r != OK) goto core_exit;
899 proc_name[PROC_NAME_LEN - 1] = '\0';
901 if ((f = get_filp(core_fd, VNODE_WRITE)) == NULL) { r=EBADF; goto core_exit; }
902 write_elf_core_file(f, csig, proc_name);
903 unlock_filp(f);
904 (void) close_fd(fp, core_fd); /* ignore failure, we're exiting anyway */
906 core_exit:
907 if(csig)
908 free_proc(FP_EXITING);
909 return(r);
912 /*===========================================================================*
913 * ds_event *
914 *===========================================================================*/
915 void
916 ds_event(void)
918 char key[DS_MAX_KEYLEN];
919 char *blkdrv_prefix = "drv.blk.";
920 char *chrdrv_prefix = "drv.chr.";
921 u32_t value;
922 int type, r, is_blk;
923 endpoint_t owner_endpoint;
925 /* Get the event and the owner from DS. */
926 while ((r = ds_check(key, &type, &owner_endpoint)) == OK) {
927 /* Only check for block and character driver up events. */
928 if (!strncmp(key, blkdrv_prefix, strlen(blkdrv_prefix))) {
929 is_blk = TRUE;
930 } else if (!strncmp(key, chrdrv_prefix, strlen(chrdrv_prefix))) {
931 is_blk = FALSE;
932 } else {
933 continue;
936 if ((r = ds_retrieve_u32(key, &value)) != OK) {
937 printf("VFS: ds_event: ds_retrieve_u32 failed\n");
938 break;
940 if (value != DS_DRIVER_UP) continue;
942 /* Perform up. */
943 dmap_endpt_up(owner_endpoint, is_blk);
946 if (r != ENOENT) printf("VFS: ds_event: ds_check failed: %d\n", r);
949 /* A function to be called on panic(). */
950 void panic_hook(void)
952 printf("VFS mthread stacktraces:\n");
953 mthread_stacktraces();
956 /*===========================================================================*
957 * do_getrusage *
958 *===========================================================================*/
959 int do_getrusage(void)
961 int res;
962 struct rusage r_usage;
964 if ((res = sys_datacopy_wrapper(who_e, m_in.m_lc_vfs_rusage.addr, SELF,
965 (vir_bytes) &r_usage, (vir_bytes) sizeof(r_usage))) < 0)
966 return res;
968 r_usage.ru_inblock = 0;
969 r_usage.ru_oublock = 0;
970 r_usage.ru_ixrss = fp->text_size;
971 r_usage.ru_idrss = fp->data_size;
972 r_usage.ru_isrss = DEFAULT_STACK_LIMIT;
974 return sys_datacopy_wrapper(SELF, (vir_bytes) &r_usage, who_e,
975 m_in.m_lc_vfs_rusage.addr, (phys_bytes) sizeof(r_usage));