1 /* The kernel call implemented in this file:
4 * The parameters for this kernel call are:
5 * SVMCTL_WHO which process
6 * SVMCTL_PARAM set this setting (VMCTL_*)
7 * SVMCTL_VALUE to this value
10 #include "kernel/system.h"
11 #include "kernel/vm.h"
12 #include "kernel/debug.h"
14 #include <minix/type.h>
16 /*===========================================================================*
18 *===========================================================================*/
19 int do_vmctl(struct proc
* caller
, message
* m_ptr
)
22 endpoint_t ep
= m_ptr
->SVMCTL_WHO
;
23 struct proc
*p
, *rp
, *target
;
25 if(ep
== SELF
) { ep
= caller
->p_endpoint
; }
27 if(!isokendpt(ep
, &proc_nr
)) {
28 printf("do_vmctl: unexpected endpoint %d from VM\n", ep
);
32 p
= proc_addr(proc_nr
);
34 switch(m_ptr
->SVMCTL_PARAM
) {
35 case VMCTL_CLEAR_PAGEFAULT
:
36 assert(RTS_ISSET(p
,RTS_PAGEFAULT
));
37 RTS_UNSET(p
, RTS_PAGEFAULT
);
39 case VMCTL_MEMREQ_GET
:
40 /* Send VM the information about the memory request. */
43 assert(RTS_ISSET(rp
, RTS_VMREQUEST
));
45 okendpt(rp
->p_vmrequest
.target
, &proc_nr
);
46 target
= proc_addr(proc_nr
);
48 /* Reply with request fields. */
49 switch(rp
->p_vmrequest
.req_type
) {
51 m_ptr
->SVMCTL_MRG_TARGET
=
52 rp
->p_vmrequest
.target
;
53 m_ptr
->SVMCTL_MRG_ADDR
=
54 rp
->p_vmrequest
.params
.check
.start
;
55 m_ptr
->SVMCTL_MRG_LENGTH
=
56 rp
->p_vmrequest
.params
.check
.length
;
57 m_ptr
->SVMCTL_MRG_FLAG
=
58 rp
->p_vmrequest
.params
.check
.writeflag
;
59 m_ptr
->SVMCTL_MRG_REQUESTOR
=
60 (void *) rp
->p_endpoint
;
65 assert(RTS_ISSET(target
,RTS_VMREQTARGET
));
66 RTS_UNSET(target
, RTS_VMREQTARGET
);
67 m_ptr
->SVMCTL_MRG_TARGET
=
68 rp
->p_vmrequest
.target
;
69 m_ptr
->SVMCTL_MRG_ADDR
=
70 rp
->p_vmrequest
.params
.map
.vir_d
;
71 m_ptr
->SVMCTL_MRG_EP2
=
72 rp
->p_vmrequest
.params
.map
.ep_s
;
73 m_ptr
->SVMCTL_MRG_ADDR2
=
74 rp
->p_vmrequest
.params
.map
.vir_s
;
75 m_ptr
->SVMCTL_MRG_LENGTH
=
76 rp
->p_vmrequest
.params
.map
.length
;
77 m_ptr
->SVMCTL_MRG_FLAG
=
78 rp
->p_vmrequest
.params
.map
.writeflag
;
79 m_ptr
->SVMCTL_MRG_REQUESTOR
=
80 (void *) rp
->p_endpoint
;
83 panic("VMREQUEST wrong type");
86 rp
->p_vmrequest
.vmresult
= VMSUSPEND
;
88 /* Remove from request chain. */
89 vmrequest
= vmrequest
->p_vmrequest
.nextrequestor
;
91 return rp
->p_vmrequest
.req_type
;
92 case VMCTL_MEMREQ_REPLY
:
93 assert(RTS_ISSET(p
, RTS_VMREQUEST
));
94 assert(p
->p_vmrequest
.vmresult
== VMSUSPEND
);
95 okendpt(p
->p_vmrequest
.target
, &proc_nr
);
96 target
= proc_addr(proc_nr
);
97 p
->p_vmrequest
.vmresult
= m_ptr
->SVMCTL_VALUE
;
98 assert(p
->p_vmrequest
.vmresult
!= VMSUSPEND
);
100 switch(p
->p_vmrequest
.type
) {
101 case VMSTYPE_KERNELCALL
:
103 * we will have to resume execution of the kernel call
104 * as soon the scheduler picks up this process again
106 p
->p_misc_flags
|= MF_KCALL_RESUME
;
108 case VMSTYPE_DELIVERMSG
:
109 assert(p
->p_misc_flags
& MF_DELIVERMSG
);
111 assert(RTS_ISSET(p
, RTS_VMREQUEST
));
114 assert(RTS_ISSET(p
, RTS_VMREQUEST
));
117 panic("strange request type: %d",p
->p_vmrequest
.type
);
120 RTS_UNSET(p
, RTS_VMREQUEST
);
123 case VMCTL_KERN_PHYSMAP
:
125 int i
= m_ptr
->SVMCTL_VALUE
;
126 return arch_phys_map(i
,
127 (phys_bytes
*) &m_ptr
->SVMCTL_MAP_PHYS_ADDR
,
128 (phys_bytes
*) &m_ptr
->SVMCTL_MAP_PHYS_LEN
,
129 &m_ptr
->SVMCTL_MAP_FLAGS
);
131 case VMCTL_KERN_MAP_REPLY
:
133 return arch_phys_map_reply(m_ptr
->SVMCTL_VALUE
,
134 (vir_bytes
) m_ptr
->SVMCTL_MAP_VIR_ADDR
);
136 case VMCTL_VMINHIBIT_SET
:
137 /* check if we must stop a process on a different CPU */
139 if (p
->p_cpu
!= cpuid
) {
140 smp_schedule_vminhibit(p
);
143 RTS_SET(p
, RTS_VMINHIBIT
);
145 p
->p_misc_flags
|= MF_FLUSH_TLB
;
148 case VMCTL_VMINHIBIT_CLEAR
:
149 assert(RTS_ISSET(p
, RTS_VMINHIBIT
));
151 * the processes is certainly not runnable, no need to tell its
154 RTS_UNSET(p
, RTS_VMINHIBIT
);
156 if (p
->p_misc_flags
& MF_SENDA_VM_MISS
) {
158 p
->p_misc_flags
&= ~MF_SENDA_VM_MISS
;
160 try_deliver_senda(p
, (asynmsg_t
*) privp
->s_asyntab
,
164 * We don't know whether kernel has the changed mapping
165 * installed to access userspace memory. And if so, on what CPU.
166 * More over we don't know what mapping has changed and how and
167 * therefore we must invalidate all mappings we have anywhere.
168 * Next time we map memory, we map it fresh.
170 bits_fill(p
->p_stale_tlb
, CONFIG_MAX_CPUS
);
173 case VMCTL_CLEARMAPCACHE
:
174 /* VM says: forget about old mappings we have cached. */
175 mem_clear_mapcache();
177 case VMCTL_BOOTINHIBIT_CLEAR
:
178 RTS_UNSET(p
, RTS_BOOTINHIBIT
);
182 /* Try architecture-specific vmctls. */
183 return arch_do_vmctl(m_ptr
, p
);