VM: only single page chunks
[minix.git] / servers / vm / utility.c
blob8b6e64f9f71573bf1e9f3e9791fafbff4c5fbee6
2 /* This file contains some utility routines for VM. */
4 #define _SYSTEM 1
6 #define _MINIX 1 /* To get the brk() prototype (as _brk()). */
8 #include <minix/callnr.h>
9 #include <minix/com.h>
10 #include <minix/config.h>
11 #include <minix/const.h>
12 #include <minix/ds.h>
13 #include <minix/endpoint.h>
14 #include <minix/minlib.h>
15 #include <minix/type.h>
16 #include <minix/ipc.h>
17 #include <minix/sysutil.h>
18 #include <minix/syslib.h>
19 #include <minix/type.h>
20 #include <minix/bitmap.h>
21 #include <string.h>
22 #include <errno.h>
23 #include <env.h>
24 #include <unistd.h>
25 #include <memory.h>
26 #include <assert.h>
27 #include <sys/param.h>
29 #include "proto.h"
30 #include "glo.h"
31 #include "util.h"
32 #include "region.h"
33 #include "sanitycheck.h"
35 #include <machine/archtypes.h>
36 #include "kernel/const.h"
37 #include "kernel/config.h"
38 #include "kernel/type.h"
39 #include "kernel/proc.h"
41 /*===========================================================================*
42 * get_mem_chunks *
43 *===========================================================================*/
44 void get_mem_chunks(mem_chunks)
45 struct memory *mem_chunks; /* store mem chunks here */
47 /* Initialize the free memory list from the 'memory' boot variable. Translate
48 * the byte offsets and sizes in this list to clicks, properly truncated.
50 phys_bytes base, size, limit;
51 int i;
52 struct memory *memp;
54 /* Obtain and parse memory from system environment. */
55 if(env_memory_parse(mem_chunks, NR_MEMS) != OK)
56 panic("couldn't obtain memory chunks");
58 /* Round physical memory to clicks. Round start up, round end down. */
59 for (i = 0; i < NR_MEMS; i++) {
60 memp = &mem_chunks[i]; /* next mem chunk is stored here */
61 base = mem_chunks[i].base;
62 size = mem_chunks[i].size;
63 limit = base + size;
64 base = (phys_bytes) (CLICK_CEIL(base));
65 limit = (phys_bytes) (CLICK_FLOOR(limit));
66 if (limit <= base) {
67 memp->base = memp->size = 0;
68 } else {
69 memp->base = base >> CLICK_SHIFT;
70 memp->size = (limit - base) >> CLICK_SHIFT;
75 #if 0
76 /*===========================================================================*
77 * reserve_proc_mem *
78 *===========================================================================*/
79 void reserve_proc_mem(mem_chunks, map_ptr)
80 struct memory *mem_chunks; /* store mem chunks here */
81 struct mem_map *map_ptr; /* memory to remove */
83 /* Remove server memory from the free memory list.
85 struct memory *memp;
86 for (memp = mem_chunks; memp < &mem_chunks[NR_MEMS]; memp++) {
87 if(memp->base <= map_ptr[T].mem_phys
88 && memp->base+memp->size >= map_ptr[T].mem_phys)
90 phys_bytes progsz = map_ptr[S].mem_phys
91 - map_ptr[T].mem_phys;
92 phys_bytes progend = map_ptr[S].mem_phys;
94 if (memp->base == map_ptr[T].mem_phys) {
95 memp->base += progsz;
96 memp->size -= progsz;
97 } else {
98 struct memory *mempr;
100 /* have to split mem_chunks */
101 if(mem_chunks[NR_MEMS-1].size>0)
102 panic("reserve_proc_mem: can't find free mem_chunks to map: 0x%lx",
103 map_ptr[T].mem_phys);
104 for(mempr=&mem_chunks[NR_MEMS-1];mempr>memp;mempr--) {
105 *mempr=*(mempr-1);
107 assert(memp < &mem_chunks[NR_MEMS-1]);
109 (memp+1)->base = progend;
110 (memp+1)->size = memp->base + memp->size
111 - progend;
112 memp->size = map_ptr[T].mem_phys - memp->base;
114 break;
117 if (memp >= &mem_chunks[NR_MEMS]) {
118 panic("reserve_proc_mem: can't find map in mem_chunks: 0x%lx",
119 map_ptr[T].mem_phys);
122 #endif
124 /*===========================================================================*
125 * vm_isokendpt *
126 *===========================================================================*/
127 int vm_isokendpt(endpoint_t endpoint, int *proc)
129 *proc = _ENDPOINT_P(endpoint);
130 if(*proc < 0 || *proc >= NR_PROCS)
131 return EINVAL;
132 if(*proc >= 0 && endpoint != vmproc[*proc].vm_endpoint)
133 return EDEADEPT;
134 if(*proc >= 0 && !(vmproc[*proc].vm_flags & VMF_INUSE))
135 return EDEADEPT;
136 return OK;
140 /*===========================================================================*
141 * do_info *
142 *===========================================================================*/
143 int do_info(message *m)
145 struct vm_stats_info vsi;
146 struct vm_usage_info vui;
147 static struct vm_region_info vri[MAX_VRI_COUNT];
148 struct vmproc *vmp;
149 vir_bytes addr, size, next, ptr;
150 int r, pr, dummy, count, free_pages, largest_contig;
152 if (vm_isokendpt(m->m_source, &pr) != OK)
153 return EINVAL;
154 vmp = &vmproc[pr];
156 ptr = (vir_bytes) m->VMI_PTR;
158 switch(m->VMI_WHAT) {
159 case VMIW_STATS:
160 vsi.vsi_pagesize = VM_PAGE_SIZE;
161 vsi.vsi_total = total_pages;
162 memstats(&dummy, &free_pages, &largest_contig);
163 vsi.vsi_free = free_pages;
164 vsi.vsi_largest = largest_contig;
166 get_stats_info(&vsi);
168 addr = (vir_bytes) &vsi;
169 size = sizeof(vsi);
171 break;
173 case VMIW_USAGE:
174 if (vm_isokendpt(m->VMI_EP, &pr) != OK)
175 return EINVAL;
177 get_usage_info(&vmproc[pr], &vui);
179 addr = (vir_bytes) &vui;
180 size = sizeof(vui);
182 break;
184 case VMIW_REGION:
185 if (vm_isokendpt(m->VMI_EP, &pr) != OK)
186 return EINVAL;
188 count = MIN(m->VMI_COUNT, MAX_VRI_COUNT);
189 next = m->VMI_NEXT;
191 count = get_region_info(&vmproc[pr], vri, count, &next);
193 m->VMI_COUNT = count;
194 m->VMI_NEXT = next;
196 addr = (vir_bytes) vri;
197 size = sizeof(vri[0]) * count;
199 break;
201 default:
202 return EINVAL;
205 if (size == 0)
206 return OK;
208 /* Make sure that no page faults can occur while copying out. A page
209 * fault would cause the kernel to send a notify to us, while we would
210 * be waiting for the result of the copy system call, resulting in a
211 * deadlock. Note that no memory mapping can be undone without the
212 * involvement of VM, so we are safe until we're done.
214 r = handle_memory(vmp, ptr, size, 1 /*wrflag*/);
215 if (r != OK) return r;
217 /* Now that we know the copy out will succeed, perform the actual copy
218 * operation.
220 return sys_datacopy(SELF, addr,
221 (vir_bytes) vmp->vm_endpoint, ptr, size);
224 /*===========================================================================*
225 * swap_proc_slot *
226 *===========================================================================*/
227 int swap_proc_slot(struct vmproc *src_vmp, struct vmproc *dst_vmp)
229 struct vmproc orig_src_vmproc, orig_dst_vmproc;
231 #if LU_DEBUG
232 printf("VM: swap_proc: swapping %d (%d) and %d (%d)\n",
233 src_vmp->vm_endpoint, src_vmp->vm_slot,
234 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
235 #endif
237 /* Save existing data. */
238 orig_src_vmproc = *src_vmp;
239 orig_dst_vmproc = *dst_vmp;
241 /* Swap slots. */
242 *src_vmp = orig_dst_vmproc;
243 *dst_vmp = orig_src_vmproc;
245 /* Preserve endpoints and slot numbers. */
246 src_vmp->vm_endpoint = orig_src_vmproc.vm_endpoint;
247 src_vmp->vm_slot = orig_src_vmproc.vm_slot;
248 dst_vmp->vm_endpoint = orig_dst_vmproc.vm_endpoint;
249 dst_vmp->vm_slot = orig_dst_vmproc.vm_slot;
251 #if LU_DEBUG
252 printf("VM: swap_proc: swapped %d (%d) and %d (%d)\n",
253 src_vmp->vm_endpoint, src_vmp->vm_slot,
254 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
255 #endif
257 return OK;
260 /*===========================================================================*
261 * swap_proc_dyn_data *
262 *===========================================================================*/
263 int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
265 int is_vm;
266 int r;
268 is_vm = (dst_vmp->vm_endpoint == VM_PROC_NR);
270 /* For VM, transfer memory regions above the stack first. */
271 if(is_vm) {
272 #if LU_DEBUG
273 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from old VM (%d) to new VM (%d)\n",
274 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
275 #endif
276 r = pt_map_in_range(src_vmp, dst_vmp, VM_STACKTOP, 0);
277 if(r != OK) {
278 printf("swap_proc_dyn_data: pt_map_in_range failed\n");
279 return r;
283 #if LU_DEBUG
284 printf("VM: swap_proc_dyn_data: swapping regions' parents for %d (%d) and %d (%d)\n",
285 src_vmp->vm_endpoint, src_vmp->vm_slot,
286 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
287 #endif
289 /* Swap vir_regions' parents. */
290 map_setparent(src_vmp);
291 map_setparent(dst_vmp);
293 /* For regular processes, transfer regions above the stack now.
294 * In case of rollback, we need to skip this step. To sandbox the
295 * new instance and prevent state corruption on rollback, we share all
296 * the regions between the two instances as COW.
298 if(!is_vm) {
299 struct vir_region *vr;
300 vr = map_lookup(dst_vmp, VM_STACKTOP);
301 if(vr && !map_lookup(src_vmp, VM_STACKTOP)) {
302 #if LU_DEBUG
303 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from %d to %d\n",
304 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
305 #endif
306 r = map_proc_copy_from(src_vmp, dst_vmp, vr);
307 if(r != OK) {
308 return r;
313 return OK;