4 #include <minix/callnr.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
31 /*===========================================================================*
33 *===========================================================================*/
34 int do_rs_set_priv(message
*m
)
38 bitchunk_t call_mask
[VM_CALL_MASK_SIZE
], *call_mask_p
;
42 if ((r
= vm_isokendpt(nr
, &n
)) != OK
) {
43 printf("do_rs_set_priv: bad endpoint %d\n", nr
);
50 r
= sys_datacopy(m
->m_source
, (vir_bytes
) m
->VM_RS_BUF
, SELF
,
51 (vir_bytes
) call_mask
, sizeof(call_mask
));
54 call_mask_p
= call_mask
;
57 printf("VM: do_rs_set_priv: sys procs don't share!\n");
63 acl_set(vmp
, call_mask_p
, m
->VM_RS_SYS
);
68 /*===========================================================================*
70 *===========================================================================*/
71 int do_rs_prepare(message
*m_ptr
)
73 /* Prepare a new instance of a service for an upcoming live-update
74 * switch, based on the old instance of this service. This call is
75 * used only by RS and only for a multicomponent live update which
76 * includes VM. In this case, all processes need to be prepared such
77 * that they don't require the new VM instance to perform actions
78 * during live update that cannot be undone in the case of a rollback.
80 endpoint_t src_e
, dst_e
;
82 struct vmproc
*src_vmp
, *dst_vmp
;
83 struct vir_region
*src_data_vr
, *dst_data_vr
;
84 vir_bytes src_addr
, dst_addr
;
87 src_e
= m_ptr
->m_lsys_vm_update
.src
;
88 dst_e
= m_ptr
->m_lsys_vm_update
.dst
;
89 sys_upd_flags
= m_ptr
->m_lsys_vm_update
.flags
;
91 /* Lookup slots for source and destination process. */
92 if(vm_isokendpt(src_e
, &src_p
) != OK
) {
93 printf("VM: do_rs_prepare: bad src endpoint %d\n", src_e
);
96 src_vmp
= &vmproc
[src_p
];
97 if(vm_isokendpt(dst_e
, &dst_p
) != OK
) {
98 printf("VM: do_rs_prepare: bad dst endpoint %d\n", dst_e
);
101 dst_vmp
= &vmproc
[dst_p
];
103 /* Pin memory for the source process. */
104 map_pin_memory(src_vmp
);
106 /* See if the source process has a larger heap than the destination
107 * process. If so, extend the heap of the destination process to
108 * match the source's. While this may end up wasting quite some
109 * memory, it is absolutely essential that the destination process
110 * does not run out of heap memory during the live update window,
111 * and since most processes will be doing an identity transfer, they
112 * are likely to require as much heap as their previous instances.
113 * Better safe than sorry. TODO: prevent wasting memory somehow;
114 * this seems particularly relevant for RS.
116 src_data_vr
= region_search(&src_vmp
->vm_regions_avl
, VM_MMAPBASE
,
119 dst_data_vr
= region_search(&dst_vmp
->vm_regions_avl
, VM_MMAPBASE
,
123 src_addr
= src_data_vr
->vaddr
+ src_data_vr
->length
;
124 dst_addr
= dst_data_vr
->vaddr
+ dst_data_vr
->length
;
125 if (src_addr
> dst_addr
)
126 real_brk(dst_vmp
, src_addr
);
128 /* Now also pin memory for the destination process. */
129 map_pin_memory(dst_vmp
);
131 /* Finally, map the source process's memory-mapped regions into the
132 * destination process. This needs to happen now, because VM may not
133 * allocate any objects during the live update window, since this
134 * would prevent successful rollback of VM afterwards. The
135 * destination may not actually touch these regions during the live
136 * update window either, because they are mapped copy-on-write and a
137 * pagefault would also cause object allocation. Objects are pages,
138 * slab objects, anything in the new VM instance to which changes are
139 * visible in the old VM basically.
141 if (!(sys_upd_flags
& SF_VM_NOMMAP
))
142 map_proc_dyn_data(src_vmp
, dst_vmp
);
147 /*===========================================================================*
149 *===========================================================================*/
150 int do_rs_update(message
*m_ptr
)
152 endpoint_t src_e
, dst_e
, reply_e
;
154 struct vmproc
*src_vmp
, *dst_vmp
;
155 int r
, sys_upd_flags
;
157 src_e
= m_ptr
->m_lsys_vm_update
.src
;
158 dst_e
= m_ptr
->m_lsys_vm_update
.dst
;
159 sys_upd_flags
= m_ptr
->m_lsys_vm_update
.flags
;
160 reply_e
= m_ptr
->m_source
;
162 /* Lookup slots for source and destination process. */
163 if(vm_isokendpt(src_e
, &src_p
) != OK
) {
164 printf("do_rs_update: bad src endpoint %d\n", src_e
);
167 src_vmp
= &vmproc
[src_p
];
168 if(vm_isokendpt(dst_e
, &dst_p
) != OK
) {
169 printf("do_rs_update: bad dst endpoint %d\n", dst_e
);
172 dst_vmp
= &vmproc
[dst_p
];
175 if((sys_upd_flags
& (SF_VM_ROLLBACK
|SF_VM_NOMMAP
)) == 0) {
176 /* Can't preallocate when transfering mmapped regions. */
177 if(map_region_lookup_type(dst_vmp
, VR_PREALLOC_MAP
)) {
182 /* Let the kernel do the update first. */
183 r
= sys_update(src_e
, dst_e
,
184 sys_upd_flags
& SF_VM_ROLLBACK
? SYS_UPD_ROLLBACK
: 0);
189 /* Do the update in VM now. */
190 r
= swap_proc_slot(src_vmp
, dst_vmp
);
194 r
= swap_proc_dyn_data(src_vmp
, dst_vmp
, sys_upd_flags
);
198 pt_bind(&src_vmp
->vm_pt
, src_vmp
);
199 pt_bind(&dst_vmp
->vm_pt
, dst_vmp
);
201 /* Reply in case of external request, update-aware. */
202 if(reply_e
!= VM_PROC_NR
) {
203 if(reply_e
== src_e
) reply_e
= dst_e
;
204 else if(reply_e
== dst_e
) reply_e
= src_e
;
206 r
= ipc_send(reply_e
, m_ptr
);
208 panic("ipc_send() error");
215 /*===========================================================================*
216 * rs_memctl_make_vm_instance *
217 *===========================================================================*/
218 static int rs_memctl_make_vm_instance(struct vmproc
*new_vm_vmp
)
223 struct vmproc
*this_vm_vmp
;
225 this_vm_vmp
= &vmproc
[VM_PROC_NR
];
227 pt_assert(&this_vm_vmp
->vm_pt
);
229 /* Check if the operation is allowed. */
230 assert(num_vm_instances
== 1 || num_vm_instances
== 2);
231 if(num_vm_instances
== 2) {
232 printf("VM can currently support no more than 2 VM instances at the time.");
236 /* Copy settings from current VM. */
237 new_vm_vmp
->vm_flags
|= VMF_VM_INSTANCE
;
240 /* Pin memory for the new VM instance. */
241 r
= map_pin_memory(new_vm_vmp
);
246 /* Preallocate page tables for the entire address space for both
247 * VM and the new VM instance.
251 r
= pt_ptalloc_in_range(&this_vm_vmp
->vm_pt
,
252 VM_OWN_HEAPBASE
, VM_DATATOP
, flags
, verify
);
256 r
= pt_ptalloc_in_range(&new_vm_vmp
->vm_pt
,
257 VM_OWN_HEAPBASE
, VM_DATATOP
, flags
, verify
);
262 /* Let the new VM instance map VM's page tables and its own. */
263 r
= pt_ptmap(this_vm_vmp
, new_vm_vmp
);
267 r
= pt_ptmap(new_vm_vmp
, new_vm_vmp
);
272 pt_assert(&this_vm_vmp
->vm_pt
);
273 pt_assert(&new_vm_vmp
->vm_pt
);
278 /*===========================================================================*
279 * rs_memctl_heap_prealloc *
280 *===========================================================================*/
281 static int rs_memctl_heap_prealloc(struct vmproc
*vmp
,
282 vir_bytes
*addr
, size_t *len
)
284 struct vir_region
*data_vr
;
290 data_vr
= region_search(&vmp
->vm_regions_avl
, VM_MMAPBASE
, AVL_LESS
);
291 *addr
= data_vr
->vaddr
+ data_vr
->length
;
292 bytes
= *addr
+ *len
;
294 return real_brk(vmp
, bytes
);
297 /*===========================================================================*
298 * rs_memctl_map_prealloc *
299 *===========================================================================*/
300 static int rs_memctl_map_prealloc(struct vmproc
*vmp
,
301 vir_bytes
*addr
, size_t *len
)
303 struct vir_region
*vr
;
310 *len
= CLICK_CEIL(*len
);
312 is_vm
= (vmp
->vm_endpoint
== VM_PROC_NR
);
313 base
= is_vm
? VM_OWN_MMAPBASE
: VM_MMAPBASE
;
314 top
= is_vm
? VM_OWN_MMAPTOP
: VM_MMAPTOP
;
316 if (!(vr
= map_page_region(vmp
, base
, top
, *len
,
317 VR_ANON
|VR_WRITABLE
|VR_UNINITIALIZED
, MF_PREALLOC
,
321 vr
->flags
|= VR_PREALLOC_MAP
;
326 /*===========================================================================*
327 * rs_memctl_get_prealloc_map *
328 *===========================================================================*/
329 static int rs_memctl_get_prealloc_map(struct vmproc
*vmp
,
330 vir_bytes
*addr
, size_t *len
)
332 struct vir_region
*vr
;
334 vr
= map_region_lookup_type(vmp
, VR_PREALLOC_MAP
);
346 /*===========================================================================*
348 *===========================================================================*/
349 int do_rs_memctl(message
*m_ptr
)
355 ep
= m_ptr
->VM_RS_CTL_ENDPT
;
356 req
= m_ptr
->VM_RS_CTL_REQ
;
358 /* Lookup endpoint. */
359 if ((r
= vm_isokendpt(ep
, &proc_nr
)) != OK
) {
360 printf("do_rs_memctl: bad endpoint %d\n", ep
);
363 vmp
= &vmproc
[proc_nr
];
365 /* Process request. */
369 /* Only actually pin RS memory if VM can recover from crashes (saves memory). */
370 if (num_vm_instances
<= 1)
372 r
= map_pin_memory(vmp
);
374 case VM_RS_MEM_MAKE_VM
:
375 r
= rs_memctl_make_vm_instance(vmp
);
377 case VM_RS_MEM_HEAP_PREALLOC
:
378 r
= rs_memctl_heap_prealloc(vmp
, (vir_bytes
*) &m_ptr
->VM_RS_CTL_ADDR
, (size_t*) &m_ptr
->VM_RS_CTL_LEN
);
380 case VM_RS_MEM_MAP_PREALLOC
:
381 r
= rs_memctl_map_prealloc(vmp
, (vir_bytes
*) &m_ptr
->VM_RS_CTL_ADDR
, (size_t*) &m_ptr
->VM_RS_CTL_LEN
);
383 case VM_RS_MEM_GET_PREALLOC_MAP
:
384 r
= rs_memctl_get_prealloc_map(vmp
, (vir_bytes
*) &m_ptr
->VM_RS_CTL_ADDR
, (size_t*) &m_ptr
->VM_RS_CTL_LEN
);
387 printf("do_rs_memctl: bad request %d\n", req
);