kernel: kill proc with bogus ipc address
[minix.git] / kernel / system / do_vmctl.c
blobebc51fdc88975eb53fbcd4e3f34d38485b116f04
1 /* The kernel call implemented in this file:
2 * m_type: SYS_VMCTL
4 * The parameters for this kernel call are:
5 * SVMCTL_WHO which process
6 * SVMCTL_PARAM set this setting (VMCTL_*)
7 * SVMCTL_VALUE to this value
8 */
10 #include "kernel/system.h"
11 #include "kernel/vm.h"
12 #include "kernel/debug.h"
13 #include <assert.h>
14 #include <minix/type.h>
16 /*===========================================================================*
17 * do_vmctl *
18 *===========================================================================*/
19 int do_vmctl(struct proc * caller, message * m_ptr)
21 int proc_nr;
22 endpoint_t ep = m_ptr->SVMCTL_WHO;
23 struct proc *p, *rp, *target;
25 if(ep == SELF) { ep = caller->p_endpoint; }
27 if(!isokendpt(ep, &proc_nr)) {
28 printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
29 return EINVAL;
32 p = proc_addr(proc_nr);
34 switch(m_ptr->SVMCTL_PARAM) {
35 case VMCTL_CLEAR_PAGEFAULT:
36 assert(RTS_ISSET(p,RTS_PAGEFAULT));
37 RTS_UNSET(p, RTS_PAGEFAULT);
38 return OK;
39 case VMCTL_MEMREQ_GET:
40 /* Send VM the information about the memory request. */
41 if(!(rp = vmrequest))
42 return ESRCH;
43 assert(RTS_ISSET(rp, RTS_VMREQUEST));
45 okendpt(rp->p_vmrequest.target, &proc_nr);
46 target = proc_addr(proc_nr);
48 /* Reply with request fields. */
49 switch(rp->p_vmrequest.req_type) {
50 case VMPTYPE_CHECK:
51 m_ptr->SVMCTL_MRG_TARGET =
52 rp->p_vmrequest.target;
53 m_ptr->SVMCTL_MRG_ADDR =
54 rp->p_vmrequest.params.check.start;
55 m_ptr->SVMCTL_MRG_LENGTH =
56 rp->p_vmrequest.params.check.length;
57 m_ptr->SVMCTL_MRG_FLAG =
58 rp->p_vmrequest.params.check.writeflag;
59 m_ptr->SVMCTL_MRG_REQUESTOR =
60 (void *) rp->p_endpoint;
61 break;
62 default:
63 panic("VMREQUEST wrong type");
66 rp->p_vmrequest.vmresult = VMSUSPEND;
68 /* Remove from request chain. */
69 vmrequest = vmrequest->p_vmrequest.nextrequestor;
71 return rp->p_vmrequest.req_type;
72 case VMCTL_MEMREQ_REPLY:
73 assert(RTS_ISSET(p, RTS_VMREQUEST));
74 assert(p->p_vmrequest.vmresult == VMSUSPEND);
75 okendpt(p->p_vmrequest.target, &proc_nr);
76 target = proc_addr(proc_nr);
77 p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
78 assert(p->p_vmrequest.vmresult != VMSUSPEND);
80 switch(p->p_vmrequest.type) {
81 case VMSTYPE_KERNELCALL:
83 * we will have to resume execution of the kernel call
84 * as soon the scheduler picks up this process again
86 p->p_misc_flags |= MF_KCALL_RESUME;
87 break;
88 case VMSTYPE_DELIVERMSG:
89 assert(p->p_misc_flags & MF_DELIVERMSG);
90 assert(p == target);
91 assert(RTS_ISSET(p, RTS_VMREQUEST));
92 break;
93 case VMSTYPE_MAP:
94 assert(RTS_ISSET(p, RTS_VMREQUEST));
95 break;
96 default:
97 panic("strange request type: %d",p->p_vmrequest.type);
100 RTS_UNSET(p, RTS_VMREQUEST);
101 return OK;
103 case VMCTL_KERN_PHYSMAP:
105 int i = m_ptr->SVMCTL_VALUE;
106 return arch_phys_map(i,
107 (phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
108 (phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
109 &m_ptr->SVMCTL_MAP_FLAGS);
111 case VMCTL_KERN_MAP_REPLY:
113 return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
114 (vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
116 case VMCTL_VMINHIBIT_SET:
117 /* check if we must stop a process on a different CPU */
118 #if CONFIG_SMP
119 if (p->p_cpu != cpuid) {
120 smp_schedule_vminhibit(p);
121 } else
122 #endif
123 RTS_SET(p, RTS_VMINHIBIT);
124 #if CONFIG_SMP
125 p->p_misc_flags |= MF_FLUSH_TLB;
126 #endif
127 return OK;
128 case VMCTL_VMINHIBIT_CLEAR:
129 assert(RTS_ISSET(p, RTS_VMINHIBIT));
131 * the processes is certainly not runnable, no need to tell its
132 * cpu
134 RTS_UNSET(p, RTS_VMINHIBIT);
135 #ifdef CONFIG_SMP
136 if (p->p_misc_flags & MF_SENDA_VM_MISS) {
137 struct priv *privp;
138 p->p_misc_flags &= ~MF_SENDA_VM_MISS;
139 privp = priv(p);
140 try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
141 privp->s_asynsize);
144 * We don't know whether kernel has the changed mapping
145 * installed to access userspace memory. And if so, on what CPU.
146 * More over we don't know what mapping has changed and how and
147 * therefore we must invalidate all mappings we have anywhere.
148 * Next time we map memory, we map it fresh.
150 bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
151 #endif
152 return OK;
153 case VMCTL_CLEARMAPCACHE:
154 /* VM says: forget about old mappings we have cached. */
155 mem_clear_mapcache();
156 return OK;
157 case VMCTL_BOOTINHIBIT_CLEAR:
158 RTS_UNSET(p, RTS_BOOTINHIBIT);
159 return OK;
162 /* Try architecture-specific vmctls. */
163 return arch_do_vmctl(m_ptr, p);