Improve the process for GNU tools
[minix3.git] / minix / servers / vm / rs.c
blob9f7298cb58e147e1ffa2308c3347ad15e8be3954
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
17 #include <minix/rs.h>
19 #include <sys/mman.h>
21 #include <errno.h>
22 #include <string.h>
23 #include <env.h>
24 #include <stdio.h>
25 #include <assert.h>
27 #include "glo.h"
28 #include "proto.h"
29 #include "util.h"
30 #include "region.h"
32 /*===========================================================================*
33 * do_rs_set_priv *
34 *===========================================================================*/
35 int do_rs_set_priv(message *m)
37 int r, n, nr;
38 struct vmproc *vmp;
39 bitchunk_t call_mask[VM_CALL_MASK_SIZE], *call_mask_p;
41 nr = m->VM_RS_NR;
43 if ((r = vm_isokendpt(nr, &n)) != OK) {
44 printf("do_rs_set_priv: bad endpoint %d\n", nr);
45 return EINVAL;
48 vmp = &vmproc[n];
50 if (m->VM_RS_BUF) {
51 r = sys_datacopy(m->m_source, (vir_bytes) m->VM_RS_BUF, SELF,
52 (vir_bytes) call_mask, sizeof(call_mask));
53 if (r != OK)
54 return r;
55 call_mask_p = call_mask;
56 } else {
57 if (m->VM_RS_SYS) {
58 printf("VM: do_rs_set_priv: sys procs don't share!\n");
59 return EINVAL;
61 call_mask_p = NULL;
64 acl_set(vmp, call_mask_p, m->VM_RS_SYS);
66 return OK;
69 /*===========================================================================*
70 * do_rs_prepare *
71 *===========================================================================*/
72 int do_rs_prepare(message *m_ptr)
74 /* Prepare a new instance of a service for an upcoming live-update
75 * switch, based on the old instance of this service. This call is
76 * used only by RS and only for a multicomponent live update which
77 * includes VM. In this case, all processes need to be prepared such
78 * that they don't require the new VM instance to perform actions
79 * during live update that cannot be undone in the case of a rollback.
81 endpoint_t src_e, dst_e;
82 int src_p, dst_p;
83 struct vmproc *src_vmp, *dst_vmp;
84 struct vir_region *src_data_vr, *dst_data_vr;
85 vir_bytes src_addr, dst_addr;
86 int sys_upd_flags;
88 src_e = m_ptr->m_lsys_vm_update.src;
89 dst_e = m_ptr->m_lsys_vm_update.dst;
90 sys_upd_flags = m_ptr->m_lsys_vm_update.flags;
92 /* Lookup slots for source and destination process. */
93 if(vm_isokendpt(src_e, &src_p) != OK) {
94 printf("VM: do_rs_prepare: bad src endpoint %d\n", src_e);
95 return EINVAL;
97 src_vmp = &vmproc[src_p];
98 if(vm_isokendpt(dst_e, &dst_p) != OK) {
99 printf("VM: do_rs_prepare: bad dst endpoint %d\n", dst_e);
100 return EINVAL;
102 dst_vmp = &vmproc[dst_p];
104 /* Pin memory for the source process. */
105 map_pin_memory(src_vmp);
107 /* See if the source process has a larger heap than the destination
108 * process. If so, extend the heap of the destination process to
109 * match the source's. While this may end up wasting quite some
110 * memory, it is absolutely essential that the destination process
111 * does not run out of heap memory during the live update window,
112 * and since most processes will be doing an identity transfer, they
113 * are likely to require as much heap as their previous instances.
114 * Better safe than sorry. TODO: prevent wasting memory somehow;
115 * this seems particularly relevant for RS.
117 src_data_vr = region_search(&src_vmp->vm_regions_avl, VM_MMAPBASE,
118 AVL_LESS);
119 assert(src_data_vr);
120 dst_data_vr = region_search(&dst_vmp->vm_regions_avl, VM_MMAPBASE,
121 AVL_LESS);
122 assert(dst_data_vr);
124 src_addr = src_data_vr->vaddr + src_data_vr->length;
125 dst_addr = dst_data_vr->vaddr + dst_data_vr->length;
126 if (src_addr > dst_addr)
127 real_brk(dst_vmp, src_addr);
129 /* Now also pin memory for the destination process. */
130 map_pin_memory(dst_vmp);
132 /* Finally, map the source process's memory-mapped regions into the
133 * destination process. This needs to happen now, because VM may not
134 * allocate any objects during the live update window, since this
135 * would prevent successful rollback of VM afterwards. The
136 * destination may not actually touch these regions during the live
137 * update window either, because they are mapped copy-on-write and a
138 * pagefault would also cause object allocation. Objects are pages,
139 * slab objects, anything in the new VM instance to which changes are
140 * visible in the old VM basically.
142 if (!(sys_upd_flags & SF_VM_NOMMAP))
143 map_proc_dyn_data(src_vmp, dst_vmp);
145 return OK;
148 /*===========================================================================*
149 * do_rs_update *
150 *===========================================================================*/
151 int do_rs_update(message *m_ptr)
153 endpoint_t src_e, dst_e, reply_e;
154 int src_p, dst_p;
155 struct vmproc *src_vmp, *dst_vmp;
156 int r, sys_upd_flags;
158 src_e = m_ptr->m_lsys_vm_update.src;
159 dst_e = m_ptr->m_lsys_vm_update.dst;
160 sys_upd_flags = m_ptr->m_lsys_vm_update.flags;
161 reply_e = m_ptr->m_source;
163 /* Lookup slots for source and destination process. */
164 if(vm_isokendpt(src_e, &src_p) != OK) {
165 printf("do_rs_update: bad src endpoint %d\n", src_e);
166 return EINVAL;
168 src_vmp = &vmproc[src_p];
169 if(vm_isokendpt(dst_e, &dst_p) != OK) {
170 printf("do_rs_update: bad dst endpoint %d\n", dst_e);
171 return EINVAL;
173 dst_vmp = &vmproc[dst_p];
175 /* Check flags. */
176 if((sys_upd_flags & (SF_VM_ROLLBACK|SF_VM_NOMMAP)) == 0) {
177 /* Can't preallocate when transfering mmapped regions. */
178 if(map_region_lookup_type(dst_vmp, VR_PREALLOC_MAP)) {
179 return ENOSYS;
183 /* Let the kernel do the update first. */
184 r = sys_update(src_e, dst_e,
185 sys_upd_flags & SF_VM_ROLLBACK ? SYS_UPD_ROLLBACK : 0);
186 if(r != OK) {
187 return r;
190 /* Do the update in VM now. */
191 r = swap_proc_slot(src_vmp, dst_vmp);
192 if(r != OK) {
193 return r;
195 r = swap_proc_dyn_data(src_vmp, dst_vmp, sys_upd_flags);
196 if(r != OK) {
197 return r;
199 pt_bind(&src_vmp->vm_pt, src_vmp);
200 pt_bind(&dst_vmp->vm_pt, dst_vmp);
202 /* Reply in case of external request, update-aware. */
203 if(reply_e != VM_PROC_NR) {
204 if(reply_e == src_e) reply_e = dst_e;
205 else if(reply_e == dst_e) reply_e = src_e;
206 m_ptr->m_type = OK;
207 r = ipc_send(reply_e, m_ptr);
208 if(r != OK) {
209 panic("ipc_send() error");
213 return SUSPEND;
216 /*===========================================================================*
217 * rs_memctl_make_vm_instance *
218 *===========================================================================*/
219 static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
221 int r;
222 u32_t flags;
223 int verify;
224 struct vmproc *this_vm_vmp;
226 this_vm_vmp = &vmproc[VM_PROC_NR];
228 pt_assert(&this_vm_vmp->vm_pt);
230 /* Check if the operation is allowed. */
231 assert(num_vm_instances == 1 || num_vm_instances == 2);
232 if(num_vm_instances == 2) {
233 printf("VM can currently support no more than 2 VM instances at the time.");
234 return EPERM;
237 /* Copy settings from current VM. */
238 new_vm_vmp->vm_flags |= VMF_VM_INSTANCE;
239 num_vm_instances++;
241 /* Pin memory for the new VM instance. */
242 r = map_pin_memory(new_vm_vmp);
243 if(r != OK) {
244 return r;
247 /* Preallocate page tables for the entire address space for both
248 * VM and the new VM instance.
250 flags = 0;
251 verify = FALSE;
252 r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt,
253 VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
254 if(r != OK) {
255 return r;
257 r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt,
258 VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
259 if(r != OK) {
260 return r;
263 /* Let the new VM instance map VM's page tables and its own. */
264 r = pt_ptmap(this_vm_vmp, new_vm_vmp);
265 if(r != OK) {
266 return r;
268 r = pt_ptmap(new_vm_vmp, new_vm_vmp);
269 if(r != OK) {
270 return r;
273 pt_assert(&this_vm_vmp->vm_pt);
274 pt_assert(&new_vm_vmp->vm_pt);
276 return OK;
279 /*===========================================================================*
280 * rs_memctl_heap_prealloc *
281 *===========================================================================*/
282 static int rs_memctl_heap_prealloc(struct vmproc *vmp,
283 vir_bytes *addr, size_t *len)
285 struct vir_region *data_vr;
286 vir_bytes bytes;
288 if(*len <= 0) {
289 return EINVAL;
291 data_vr = region_search(&vmp->vm_regions_avl, VM_MMAPBASE, AVL_LESS);
292 *addr = data_vr->vaddr + data_vr->length;
293 bytes = *addr + *len;
295 return real_brk(vmp, bytes);
298 /*===========================================================================*
299 * rs_memctl_map_prealloc *
300 *===========================================================================*/
301 static int rs_memctl_map_prealloc(struct vmproc *vmp,
302 vir_bytes *addr, size_t *len)
304 struct vir_region *vr;
305 vir_bytes base, top;
306 int is_vm;
308 if(*len <= 0) {
309 return EINVAL;
311 *len = CLICK_CEIL(*len);
313 is_vm = (vmp->vm_endpoint == VM_PROC_NR);
314 base = is_vm ? VM_OWN_MMAPBASE : VM_MMAPBASE;
315 top = is_vm ? VM_OWN_MMAPTOP : VM_MMAPTOP;
317 if (!(vr = map_page_region(vmp, base, top, *len,
318 VR_ANON|VR_WRITABLE|VR_UNINITIALIZED, MF_PREALLOC,
319 &mem_type_anon))) {
320 return ENOMEM;
322 vr->flags |= VR_PREALLOC_MAP;
323 *addr = vr->vaddr;
324 return OK;
327 /*===========================================================================*
328 * rs_memctl_get_prealloc_map *
329 *===========================================================================*/
330 static int rs_memctl_get_prealloc_map(struct vmproc *vmp,
331 vir_bytes *addr, size_t *len)
333 struct vir_region *vr;
335 vr = map_region_lookup_type(vmp, VR_PREALLOC_MAP);
336 if(!vr) {
337 *addr = 0;
338 *len = 0;
340 else {
341 *addr = vr->vaddr;
342 *len = vr->length;
344 return OK;
347 /*===========================================================================*
348 * do_rs_memctl *
349 *===========================================================================*/
350 int do_rs_memctl(message *m_ptr)
352 endpoint_t ep;
353 int req, r, proc_nr;
354 struct vmproc *vmp;
356 ep = m_ptr->VM_RS_CTL_ENDPT;
357 req = m_ptr->VM_RS_CTL_REQ;
359 /* Lookup endpoint. */
360 if ((r = vm_isokendpt(ep, &proc_nr)) != OK) {
361 printf("do_rs_memctl: bad endpoint %d\n", ep);
362 return EINVAL;
364 vmp = &vmproc[proc_nr];
366 /* Process request. */
367 switch(req)
369 case VM_RS_MEM_PIN:
370 /* Only actually pin RS memory if VM can recover from crashes (saves memory). */
371 if (num_vm_instances <= 1)
372 return OK;
373 r = map_pin_memory(vmp);
374 return r;
375 case VM_RS_MEM_MAKE_VM:
376 r = rs_memctl_make_vm_instance(vmp);
377 return r;
378 case VM_RS_MEM_HEAP_PREALLOC:
379 r = rs_memctl_heap_prealloc(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN);
380 return r;
381 case VM_RS_MEM_MAP_PREALLOC:
382 r = rs_memctl_map_prealloc(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN);
383 return r;
384 case VM_RS_MEM_GET_PREALLOC_MAP:
385 r = rs_memctl_get_prealloc_map(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN);
386 return r;
387 default:
388 printf("do_rs_memctl: bad request %d\n", req);
389 return EINVAL;