memory: use sys_safememset() for /dev/zero
[minix.git] / kernel / system / do_vmctl.c
blob7ecc5e30b783144147ed56c43be94f1c055b3f3b
1 /* The kernel call implemented in this file:
2 * m_type: SYS_VMCTL
4 * The parameters for this kernel call are:
5 * SVMCTL_WHO which process
6 * SVMCTL_PARAM set this setting (VMCTL_*)
7 * SVMCTL_VALUE to this value
8 */
10 #include "kernel/system.h"
11 #include "kernel/vm.h"
12 #include "kernel/debug.h"
13 #include <assert.h>
14 #include <minix/type.h>
16 /*===========================================================================*
17 * do_vmctl *
18 *===========================================================================*/
19 int do_vmctl(struct proc * caller, message * m_ptr)
21 int proc_nr;
22 endpoint_t ep = m_ptr->SVMCTL_WHO;
23 struct proc *p, *rp, *target;
25 if(ep == SELF) { ep = caller->p_endpoint; }
27 if(!isokendpt(ep, &proc_nr)) {
28 printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
29 return EINVAL;
32 p = proc_addr(proc_nr);
34 switch(m_ptr->SVMCTL_PARAM) {
35 case VMCTL_CLEAR_PAGEFAULT:
36 assert(RTS_ISSET(p,RTS_PAGEFAULT));
37 RTS_UNSET(p, RTS_PAGEFAULT);
38 return OK;
39 case VMCTL_MEMREQ_GET:
40 /* Send VM the information about the memory request. */
41 if(!(rp = vmrequest))
42 return ESRCH;
43 assert(RTS_ISSET(rp, RTS_VMREQUEST));
45 okendpt(rp->p_vmrequest.target, &proc_nr);
46 target = proc_addr(proc_nr);
48 /* Reply with request fields. */
49 switch(rp->p_vmrequest.req_type) {
50 case VMPTYPE_CHECK:
51 m_ptr->SVMCTL_MRG_TARGET =
52 rp->p_vmrequest.target;
53 m_ptr->SVMCTL_MRG_ADDR =
54 rp->p_vmrequest.params.check.start;
55 m_ptr->SVMCTL_MRG_LENGTH =
56 rp->p_vmrequest.params.check.length;
57 m_ptr->SVMCTL_MRG_FLAG =
58 rp->p_vmrequest.params.check.writeflag;
59 m_ptr->SVMCTL_MRG_REQUESTOR =
60 (void *) rp->p_endpoint;
61 break;
62 case VMPTYPE_SMAP:
63 case VMPTYPE_SUNMAP:
64 case VMPTYPE_COWMAP:
65 assert(RTS_ISSET(target,RTS_VMREQTARGET));
66 RTS_UNSET(target, RTS_VMREQTARGET);
67 m_ptr->SVMCTL_MRG_TARGET =
68 rp->p_vmrequest.target;
69 m_ptr->SVMCTL_MRG_ADDR =
70 rp->p_vmrequest.params.map.vir_d;
71 m_ptr->SVMCTL_MRG_EP2 =
72 rp->p_vmrequest.params.map.ep_s;
73 m_ptr->SVMCTL_MRG_ADDR2 =
74 rp->p_vmrequest.params.map.vir_s;
75 m_ptr->SVMCTL_MRG_LENGTH =
76 rp->p_vmrequest.params.map.length;
77 m_ptr->SVMCTL_MRG_FLAG =
78 rp->p_vmrequest.params.map.writeflag;
79 m_ptr->SVMCTL_MRG_REQUESTOR =
80 (void *) rp->p_endpoint;
81 break;
82 default:
83 panic("VMREQUEST wrong type");
86 rp->p_vmrequest.vmresult = VMSUSPEND;
88 /* Remove from request chain. */
89 vmrequest = vmrequest->p_vmrequest.nextrequestor;
91 return rp->p_vmrequest.req_type;
92 case VMCTL_MEMREQ_REPLY:
93 assert(RTS_ISSET(p, RTS_VMREQUEST));
94 assert(p->p_vmrequest.vmresult == VMSUSPEND);
95 okendpt(p->p_vmrequest.target, &proc_nr);
96 target = proc_addr(proc_nr);
97 p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
98 assert(p->p_vmrequest.vmresult != VMSUSPEND);
100 switch(p->p_vmrequest.type) {
101 case VMSTYPE_KERNELCALL:
103 * we will have to resume execution of the kernel call
104 * as soon the scheduler picks up this process again
106 p->p_misc_flags |= MF_KCALL_RESUME;
107 break;
108 case VMSTYPE_DELIVERMSG:
109 assert(p->p_misc_flags & MF_DELIVERMSG);
110 assert(p == target);
111 assert(RTS_ISSET(p, RTS_VMREQUEST));
112 break;
113 case VMSTYPE_MAP:
114 assert(RTS_ISSET(p, RTS_VMREQUEST));
115 break;
116 default:
117 panic("strange request type: %d",p->p_vmrequest.type);
120 RTS_UNSET(p, RTS_VMREQUEST);
121 return OK;
123 case VMCTL_KERN_PHYSMAP:
125 int i = m_ptr->SVMCTL_VALUE;
126 return arch_phys_map(i,
127 (phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
128 (phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
129 &m_ptr->SVMCTL_MAP_FLAGS);
131 case VMCTL_KERN_MAP_REPLY:
133 return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
134 (vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
136 case VMCTL_VMINHIBIT_SET:
137 /* check if we must stop a process on a different CPU */
138 #if CONFIG_SMP
139 if (p->p_cpu != cpuid) {
140 smp_schedule_vminhibit(p);
141 } else
142 #endif
143 RTS_SET(p, RTS_VMINHIBIT);
144 #if CONFIG_SMP
145 p->p_misc_flags |= MF_FLUSH_TLB;
146 #endif
147 return OK;
148 case VMCTL_VMINHIBIT_CLEAR:
149 assert(RTS_ISSET(p, RTS_VMINHIBIT));
151 * the processes is certainly not runnable, no need to tell its
152 * cpu
154 RTS_UNSET(p, RTS_VMINHIBIT);
155 #ifdef CONFIG_SMP
156 if (p->p_misc_flags & MF_SENDA_VM_MISS) {
157 struct priv *privp;
158 p->p_misc_flags &= ~MF_SENDA_VM_MISS;
159 privp = priv(p);
160 try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
161 privp->s_asynsize);
164 * We don't know whether kernel has the changed mapping
165 * installed to access userspace memory. And if so, on what CPU.
166 * More over we don't know what mapping has changed and how and
167 * therefore we must invalidate all mappings we have anywhere.
168 * Next time we map memory, we map it fresh.
170 bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
171 #endif
172 return OK;
173 case VMCTL_CLEARMAPCACHE:
174 /* VM says: forget about old mappings we have cached. */
175 mem_clear_mapcache();
176 return OK;
177 case VMCTL_BOOTINHIBIT_CLEAR:
178 RTS_UNSET(p, RTS_BOOTINHIBIT);
179 return OK;
182 /* Try architecture-specific vmctls. */
183 return arch_do_vmctl(m_ptr, p);