4 #include <minix/callnr.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
32 /*===========================================================================*
34 *===========================================================================*/
35 int do_rs_set_priv(message
*m
)
39 bitchunk_t call_mask
[VM_CALL_MASK_SIZE
], *call_mask_p
;
43 if ((r
= vm_isokendpt(nr
, &n
)) != OK
) {
44 printf("do_rs_set_priv: bad endpoint %d\n", nr
);
51 r
= sys_datacopy(m
->m_source
, (vir_bytes
) m
->VM_RS_BUF
, SELF
,
52 (vir_bytes
) call_mask
, sizeof(call_mask
));
55 call_mask_p
= call_mask
;
58 printf("VM: do_rs_set_priv: sys procs don't share!\n");
64 acl_set(vmp
, call_mask_p
, m
->VM_RS_SYS
);
69 /*===========================================================================*
71 *===========================================================================*/
72 int do_rs_prepare(message
*m_ptr
)
74 /* Prepare a new instance of a service for an upcoming live-update
75 * switch, based on the old instance of this service. This call is
76 * used only by RS and only for a multicomponent live update which
77 * includes VM. In this case, all processes need to be prepared such
78 * that they don't require the new VM instance to perform actions
79 * during live update that cannot be undone in the case of a rollback.
81 endpoint_t src_e
, dst_e
;
83 struct vmproc
*src_vmp
, *dst_vmp
;
84 struct vir_region
*src_data_vr
, *dst_data_vr
;
85 vir_bytes src_addr
, dst_addr
;
88 src_e
= m_ptr
->m_lsys_vm_update
.src
;
89 dst_e
= m_ptr
->m_lsys_vm_update
.dst
;
90 sys_upd_flags
= m_ptr
->m_lsys_vm_update
.flags
;
92 /* Lookup slots for source and destination process. */
93 if(vm_isokendpt(src_e
, &src_p
) != OK
) {
94 printf("VM: do_rs_prepare: bad src endpoint %d\n", src_e
);
97 src_vmp
= &vmproc
[src_p
];
98 if(vm_isokendpt(dst_e
, &dst_p
) != OK
) {
99 printf("VM: do_rs_prepare: bad dst endpoint %d\n", dst_e
);
102 dst_vmp
= &vmproc
[dst_p
];
104 /* Pin memory for the source process. */
105 map_pin_memory(src_vmp
);
107 /* See if the source process has a larger heap than the destination
108 * process. If so, extend the heap of the destination process to
109 * match the source's. While this may end up wasting quite some
110 * memory, it is absolutely essential that the destination process
111 * does not run out of heap memory during the live update window,
112 * and since most processes will be doing an identity transfer, they
113 * are likely to require as much heap as their previous instances.
114 * Better safe than sorry. TODO: prevent wasting memory somehow;
115 * this seems particularly relevant for RS.
117 src_data_vr
= region_search(&src_vmp
->vm_regions_avl
, VM_MMAPBASE
,
120 dst_data_vr
= region_search(&dst_vmp
->vm_regions_avl
, VM_MMAPBASE
,
124 src_addr
= src_data_vr
->vaddr
+ src_data_vr
->length
;
125 dst_addr
= dst_data_vr
->vaddr
+ dst_data_vr
->length
;
126 if (src_addr
> dst_addr
)
127 real_brk(dst_vmp
, src_addr
);
129 /* Now also pin memory for the destination process. */
130 map_pin_memory(dst_vmp
);
132 /* Finally, map the source process's memory-mapped regions into the
133 * destination process. This needs to happen now, because VM may not
134 * allocate any objects during the live update window, since this
135 * would prevent successful rollback of VM afterwards. The
136 * destination may not actually touch these regions during the live
137 * update window either, because they are mapped copy-on-write and a
138 * pagefault would also cause object allocation. Objects are pages,
139 * slab objects, anything in the new VM instance to which changes are
140 * visible in the old VM basically.
142 if (!(sys_upd_flags
& SF_VM_NOMMAP
))
143 map_proc_dyn_data(src_vmp
, dst_vmp
);
148 /*===========================================================================*
150 *===========================================================================*/
151 int do_rs_update(message
*m_ptr
)
153 endpoint_t src_e
, dst_e
, reply_e
;
155 struct vmproc
*src_vmp
, *dst_vmp
;
156 int r
, sys_upd_flags
;
158 src_e
= m_ptr
->m_lsys_vm_update
.src
;
159 dst_e
= m_ptr
->m_lsys_vm_update
.dst
;
160 sys_upd_flags
= m_ptr
->m_lsys_vm_update
.flags
;
161 reply_e
= m_ptr
->m_source
;
163 /* Lookup slots for source and destination process. */
164 if(vm_isokendpt(src_e
, &src_p
) != OK
) {
165 printf("do_rs_update: bad src endpoint %d\n", src_e
);
168 src_vmp
= &vmproc
[src_p
];
169 if(vm_isokendpt(dst_e
, &dst_p
) != OK
) {
170 printf("do_rs_update: bad dst endpoint %d\n", dst_e
);
173 dst_vmp
= &vmproc
[dst_p
];
176 if((sys_upd_flags
& (SF_VM_ROLLBACK
|SF_VM_NOMMAP
)) == 0) {
177 /* Can't preallocate when transfering mmapped regions. */
178 if(map_region_lookup_type(dst_vmp
, VR_PREALLOC_MAP
)) {
183 /* Let the kernel do the update first. */
184 r
= sys_update(src_e
, dst_e
,
185 sys_upd_flags
& SF_VM_ROLLBACK
? SYS_UPD_ROLLBACK
: 0);
190 /* Do the update in VM now. */
191 r
= swap_proc_slot(src_vmp
, dst_vmp
);
195 r
= swap_proc_dyn_data(src_vmp
, dst_vmp
, sys_upd_flags
);
199 pt_bind(&src_vmp
->vm_pt
, src_vmp
);
200 pt_bind(&dst_vmp
->vm_pt
, dst_vmp
);
202 /* Reply in case of external request, update-aware. */
203 if(reply_e
!= VM_PROC_NR
) {
204 if(reply_e
== src_e
) reply_e
= dst_e
;
205 else if(reply_e
== dst_e
) reply_e
= src_e
;
207 r
= ipc_send(reply_e
, m_ptr
);
209 panic("ipc_send() error");
216 /*===========================================================================*
217 * rs_memctl_make_vm_instance *
218 *===========================================================================*/
219 static int rs_memctl_make_vm_instance(struct vmproc
*new_vm_vmp
)
224 struct vmproc
*this_vm_vmp
;
226 this_vm_vmp
= &vmproc
[VM_PROC_NR
];
228 pt_assert(&this_vm_vmp
->vm_pt
);
230 /* Check if the operation is allowed. */
231 assert(num_vm_instances
== 1 || num_vm_instances
== 2);
232 if(num_vm_instances
== 2) {
233 printf("VM can currently support no more than 2 VM instances at the time.");
237 /* Copy settings from current VM. */
238 new_vm_vmp
->vm_flags
|= VMF_VM_INSTANCE
;
241 /* Pin memory for the new VM instance. */
242 r
= map_pin_memory(new_vm_vmp
);
247 /* Preallocate page tables for the entire address space for both
248 * VM and the new VM instance.
252 r
= pt_ptalloc_in_range(&this_vm_vmp
->vm_pt
,
253 VM_OWN_HEAPBASE
, VM_DATATOP
, flags
, verify
);
257 r
= pt_ptalloc_in_range(&new_vm_vmp
->vm_pt
,
258 VM_OWN_HEAPBASE
, VM_DATATOP
, flags
, verify
);
263 /* Let the new VM instance map VM's page tables and its own. */
264 r
= pt_ptmap(this_vm_vmp
, new_vm_vmp
);
268 r
= pt_ptmap(new_vm_vmp
, new_vm_vmp
);
273 pt_assert(&this_vm_vmp
->vm_pt
);
274 pt_assert(&new_vm_vmp
->vm_pt
);
279 /*===========================================================================*
280 * rs_memctl_heap_prealloc *
281 *===========================================================================*/
282 static int rs_memctl_heap_prealloc(struct vmproc
*vmp
,
283 vir_bytes
*addr
, size_t *len
)
285 struct vir_region
*data_vr
;
291 data_vr
= region_search(&vmp
->vm_regions_avl
, VM_MMAPBASE
, AVL_LESS
);
292 *addr
= data_vr
->vaddr
+ data_vr
->length
;
293 bytes
= *addr
+ *len
;
295 return real_brk(vmp
, bytes
);
298 /*===========================================================================*
299 * rs_memctl_map_prealloc *
300 *===========================================================================*/
301 static int rs_memctl_map_prealloc(struct vmproc
*vmp
,
302 vir_bytes
*addr
, size_t *len
)
304 struct vir_region
*vr
;
311 *len
= CLICK_CEIL(*len
);
313 is_vm
= (vmp
->vm_endpoint
== VM_PROC_NR
);
314 base
= is_vm
? VM_OWN_MMAPBASE
: VM_MMAPBASE
;
315 top
= is_vm
? VM_OWN_MMAPTOP
: VM_MMAPTOP
;
317 if (!(vr
= map_page_region(vmp
, base
, top
, *len
,
318 VR_ANON
|VR_WRITABLE
|VR_UNINITIALIZED
, MF_PREALLOC
,
322 vr
->flags
|= VR_PREALLOC_MAP
;
327 /*===========================================================================*
328 * rs_memctl_get_prealloc_map *
329 *===========================================================================*/
330 static int rs_memctl_get_prealloc_map(struct vmproc
*vmp
,
331 vir_bytes
*addr
, size_t *len
)
333 struct vir_region
*vr
;
335 vr
= map_region_lookup_type(vmp
, VR_PREALLOC_MAP
);
347 /*===========================================================================*
349 *===========================================================================*/
350 int do_rs_memctl(message
*m_ptr
)
356 ep
= m_ptr
->VM_RS_CTL_ENDPT
;
357 req
= m_ptr
->VM_RS_CTL_REQ
;
359 /* Lookup endpoint. */
360 if ((r
= vm_isokendpt(ep
, &proc_nr
)) != OK
) {
361 printf("do_rs_memctl: bad endpoint %d\n", ep
);
364 vmp
= &vmproc
[proc_nr
];
366 /* Process request. */
370 /* Only actually pin RS memory if VM can recover from crashes (saves memory). */
371 if (num_vm_instances
<= 1)
373 r
= map_pin_memory(vmp
);
375 case VM_RS_MEM_MAKE_VM
:
376 r
= rs_memctl_make_vm_instance(vmp
);
378 case VM_RS_MEM_HEAP_PREALLOC
:
379 r
= rs_memctl_heap_prealloc(vmp
, (vir_bytes
*) &m_ptr
->VM_RS_CTL_ADDR
, (size_t*) &m_ptr
->VM_RS_CTL_LEN
);
381 case VM_RS_MEM_MAP_PREALLOC
:
382 r
= rs_memctl_map_prealloc(vmp
, (vir_bytes
*) &m_ptr
->VM_RS_CTL_ADDR
, (size_t*) &m_ptr
->VM_RS_CTL_LEN
);
384 case VM_RS_MEM_GET_PREALLOC_MAP
:
385 r
= rs_memctl_get_prealloc_map(vmp
, (vir_bytes
*) &m_ptr
->VM_RS_CTL_ADDR
, (size_t*) &m_ptr
->VM_RS_CTL_LEN
);
388 printf("do_rs_memctl: bad request %d\n", req
);