1 /* The kernel call implemented in this file:
4 * The parameters for this kernel call are:
5 * SVMCTL_WHO which process
6 * SVMCTL_PARAM set this setting (VMCTL_*)
7 * SVMCTL_VALUE to this value
10 #include "kernel/system.h"
11 #include "kernel/vm.h"
14 /*===========================================================================*
16 *===========================================================================*/
17 int do_vmctl(struct proc
* caller
, message
* m_ptr
)
20 endpoint_t ep
= m_ptr
->SVMCTL_WHO
;
21 struct proc
*p
, *rp
, **rpp
, *target
;
23 if(ep
== SELF
) { ep
= caller
->p_endpoint
; }
25 if(!isokendpt(ep
, &proc_nr
)) {
26 printf("do_vmctl: unexpected endpoint %d from VM\n", ep
);
30 p
= proc_addr(proc_nr
);
32 switch(m_ptr
->SVMCTL_PARAM
) {
33 case VMCTL_CLEAR_PAGEFAULT
:
34 assert(RTS_ISSET(p
,RTS_PAGEFAULT
));
35 RTS_UNSET(p
, RTS_PAGEFAULT
);
37 case VMCTL_MEMREQ_GET
:
38 /* Send VM the information about the memory request. We can
39 * not simply send the first request on the list, because IPC
40 * filters may forbid VM from getting requests for particular
41 * sources. However, IPC filters are used only in rare cases.
43 for (rpp
= &vmrequest
; *rpp
!= NULL
;
44 rpp
= &(*rpp
)->p_vmrequest
.nextrequestor
) {
47 assert(RTS_ISSET(rp
, RTS_VMREQUEST
));
49 okendpt(rp
->p_vmrequest
.target
, &proc_nr
);
50 target
= proc_addr(proc_nr
);
52 /* Check against IPC filters. */
53 if (!allow_ipc_filtered_memreq(rp
, target
))
56 /* Reply with request fields. */
57 if (rp
->p_vmrequest
.req_type
!= VMPTYPE_CHECK
)
58 panic("VMREQUEST wrong type");
60 m_ptr
->SVMCTL_MRG_TARGET
=
61 rp
->p_vmrequest
.target
;
62 m_ptr
->SVMCTL_MRG_ADDR
=
63 rp
->p_vmrequest
.params
.check
.start
;
64 m_ptr
->SVMCTL_MRG_LENGTH
=
65 rp
->p_vmrequest
.params
.check
.length
;
66 m_ptr
->SVMCTL_MRG_FLAG
=
67 rp
->p_vmrequest
.params
.check
.writeflag
;
68 m_ptr
->SVMCTL_MRG_REQUESTOR
=
69 (void *) rp
->p_endpoint
;
71 rp
->p_vmrequest
.vmresult
= VMSUSPEND
;
73 /* Remove from request chain. */
74 *rpp
= rp
->p_vmrequest
.nextrequestor
;
76 return rp
->p_vmrequest
.req_type
;
81 case VMCTL_MEMREQ_REPLY
:
82 assert(RTS_ISSET(p
, RTS_VMREQUEST
));
83 assert(p
->p_vmrequest
.vmresult
== VMSUSPEND
);
84 okendpt(p
->p_vmrequest
.target
, &proc_nr
);
85 target
= proc_addr(proc_nr
);
86 p
->p_vmrequest
.vmresult
= m_ptr
->SVMCTL_VALUE
;
87 assert(p
->p_vmrequest
.vmresult
!= VMSUSPEND
);
89 switch(p
->p_vmrequest
.type
) {
90 case VMSTYPE_KERNELCALL
:
92 * we will have to resume execution of the kernel call
93 * as soon the scheduler picks up this process again
95 p
->p_misc_flags
|= MF_KCALL_RESUME
;
97 case VMSTYPE_DELIVERMSG
:
98 assert(p
->p_misc_flags
& MF_DELIVERMSG
);
100 assert(RTS_ISSET(p
, RTS_VMREQUEST
));
103 assert(RTS_ISSET(p
, RTS_VMREQUEST
));
106 panic("strange request type: %d",p
->p_vmrequest
.type
);
109 RTS_UNSET(p
, RTS_VMREQUEST
);
112 case VMCTL_KERN_PHYSMAP
:
114 int i
= m_ptr
->SVMCTL_VALUE
;
115 return arch_phys_map(i
,
116 (phys_bytes
*) &m_ptr
->SVMCTL_MAP_PHYS_ADDR
,
117 (phys_bytes
*) &m_ptr
->SVMCTL_MAP_PHYS_LEN
,
118 &m_ptr
->SVMCTL_MAP_FLAGS
);
120 case VMCTL_KERN_MAP_REPLY
:
122 return arch_phys_map_reply(m_ptr
->SVMCTL_VALUE
,
123 (vir_bytes
) m_ptr
->SVMCTL_MAP_VIR_ADDR
);
125 case VMCTL_VMINHIBIT_SET
:
126 /* check if we must stop a process on a different CPU */
128 if (p
->p_cpu
!= cpuid
) {
129 smp_schedule_vminhibit(p
);
132 RTS_SET(p
, RTS_VMINHIBIT
);
134 p
->p_misc_flags
|= MF_FLUSH_TLB
;
137 case VMCTL_VMINHIBIT_CLEAR
:
138 assert(RTS_ISSET(p
, RTS_VMINHIBIT
));
140 * the processes is certainly not runnable, no need to tell its
143 RTS_UNSET(p
, RTS_VMINHIBIT
);
145 if (p
->p_misc_flags
& MF_SENDA_VM_MISS
) {
147 p
->p_misc_flags
&= ~MF_SENDA_VM_MISS
;
149 try_deliver_senda(p
, (asynmsg_t
*) privp
->s_asyntab
,
153 * We don't know whether kernel has the changed mapping
154 * installed to access userspace memory. And if so, on what CPU.
155 * More over we don't know what mapping has changed and how and
156 * therefore we must invalidate all mappings we have anywhere.
157 * Next time we map memory, we map it fresh.
159 bits_fill(p
->p_stale_tlb
, CONFIG_MAX_CPUS
);
162 case VMCTL_CLEARMAPCACHE
:
163 /* VM says: forget about old mappings we have cached. */
164 mem_clear_mapcache();
166 case VMCTL_BOOTINHIBIT_CLEAR
:
167 RTS_UNSET(p
, RTS_BOOTINHIBIT
);
171 /* Try architecture-specific vmctls. */
172 return arch_do_vmctl(m_ptr
, p
);