vm: merge i386 and arm pagetable code
[minix.git] / servers / vm / utility.c
bloba23a1996c43885c9ecfc051a8cb33a66f22d6c77
2 /* This file contains some utility routines for VM. */
4 #define _SYSTEM 1
6 #define _MINIX 1 /* To get the brk() prototype (as _brk()). */
8 #include <minix/callnr.h>
9 #include <minix/com.h>
10 #include <minix/config.h>
11 #include <minix/const.h>
12 #include <minix/ds.h>
13 #include <minix/endpoint.h>
14 #include <minix/minlib.h>
15 #include <minix/type.h>
16 #include <minix/ipc.h>
17 #include <minix/sysutil.h>
18 #include <minix/syslib.h>
19 #include <minix/type.h>
20 #include <minix/bitmap.h>
21 #include <string.h>
22 #include <errno.h>
23 #include <env.h>
24 #include <unistd.h>
25 #include <assert.h>
26 #include <sys/param.h>
28 #include "proto.h"
29 #include "glo.h"
30 #include "util.h"
31 #include "region.h"
32 #include "sanitycheck.h"
34 #include <machine/archtypes.h>
35 #include "kernel/const.h"
36 #include "kernel/config.h"
37 #include "kernel/type.h"
38 #include "kernel/proc.h"
40 /*===========================================================================*
41 * get_mem_chunks *
42 *===========================================================================*/
43 void get_mem_chunks(mem_chunks)
44 struct memory *mem_chunks; /* store mem chunks here */
46 /* Initialize the free memory list from the 'memory' boot variable. Translate
47 * the byte offsets and sizes in this list to clicks, properly truncated.
49 phys_bytes base, size, limit;
50 int i;
51 struct memory *memp;
53 /* Obtain and parse memory from system environment. */
54 if(env_memory_parse(mem_chunks, NR_MEMS) != OK)
55 panic("couldn't obtain memory chunks");
57 /* Round physical memory to clicks. Round start up, round end down. */
58 for (i = 0; i < NR_MEMS; i++) {
59 memp = &mem_chunks[i]; /* next mem chunk is stored here */
60 base = mem_chunks[i].base;
61 size = mem_chunks[i].size;
62 limit = base + size;
63 base = (phys_bytes) (CLICK_CEIL(base));
64 limit = (phys_bytes) (CLICK_FLOOR(limit));
65 if (limit <= base) {
66 memp->base = memp->size = 0;
67 } else {
68 memp->base = base >> CLICK_SHIFT;
69 memp->size = (limit - base) >> CLICK_SHIFT;
74 /*===========================================================================*
75 * vm_isokendpt *
76 *===========================================================================*/
77 int vm_isokendpt(endpoint_t endpoint, int *proc)
79 *proc = _ENDPOINT_P(endpoint);
80 if(*proc < 0 || *proc >= NR_PROCS)
81 return EINVAL;
82 if(*proc >= 0 && endpoint != vmproc[*proc].vm_endpoint)
83 return EDEADEPT;
84 if(*proc >= 0 && !(vmproc[*proc].vm_flags & VMF_INUSE))
85 return EDEADEPT;
86 return OK;
90 /*===========================================================================*
91 * do_info *
92 *===========================================================================*/
93 int do_info(message *m)
95 struct vm_stats_info vsi;
96 struct vm_usage_info vui;
97 static struct vm_region_info vri[MAX_VRI_COUNT];
98 struct vmproc *vmp;
99 vir_bytes addr, size, next, ptr;
100 int r, pr, dummy, count, free_pages, largest_contig;
102 if (vm_isokendpt(m->m_source, &pr) != OK)
103 return EINVAL;
104 vmp = &vmproc[pr];
106 ptr = (vir_bytes) m->VMI_PTR;
108 switch(m->VMI_WHAT) {
109 case VMIW_STATS:
110 vsi.vsi_pagesize = VM_PAGE_SIZE;
111 vsi.vsi_total = total_pages;
112 memstats(&dummy, &free_pages, &largest_contig);
113 vsi.vsi_free = free_pages;
114 vsi.vsi_largest = largest_contig;
116 get_stats_info(&vsi);
118 addr = (vir_bytes) &vsi;
119 size = sizeof(vsi);
121 break;
123 case VMIW_USAGE:
124 if(m->VMI_EP < 0)
125 get_usage_info_kernel(&vui);
126 else if (vm_isokendpt(m->VMI_EP, &pr) != OK)
127 return EINVAL;
128 else get_usage_info(&vmproc[pr], &vui);
130 addr = (vir_bytes) &vui;
131 size = sizeof(vui);
133 break;
135 case VMIW_REGION:
136 if (vm_isokendpt(m->VMI_EP, &pr) != OK)
137 return EINVAL;
139 count = MIN(m->VMI_COUNT, MAX_VRI_COUNT);
140 next = m->VMI_NEXT;
142 count = get_region_info(&vmproc[pr], vri, count, &next);
144 m->VMI_COUNT = count;
145 m->VMI_NEXT = next;
147 addr = (vir_bytes) vri;
148 size = sizeof(vri[0]) * count;
150 break;
152 default:
153 return EINVAL;
156 if (size == 0)
157 return OK;
159 /* Make sure that no page faults can occur while copying out. A page
160 * fault would cause the kernel to send a notify to us, while we would
161 * be waiting for the result of the copy system call, resulting in a
162 * deadlock. Note that no memory mapping can be undone without the
163 * involvement of VM, so we are safe until we're done.
165 r = handle_memory(vmp, ptr, size, 1 /*wrflag*/);
166 if (r != OK) return r;
168 /* Now that we know the copy out will succeed, perform the actual copy
169 * operation.
171 return sys_datacopy(SELF, addr,
172 (vir_bytes) vmp->vm_endpoint, ptr, size);
175 /*===========================================================================*
176 * swap_proc_slot *
177 *===========================================================================*/
178 int swap_proc_slot(struct vmproc *src_vmp, struct vmproc *dst_vmp)
180 struct vmproc orig_src_vmproc, orig_dst_vmproc;
182 #if LU_DEBUG
183 printf("VM: swap_proc: swapping %d (%d) and %d (%d)\n",
184 src_vmp->vm_endpoint, src_vmp->vm_slot,
185 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
186 #endif
188 /* Save existing data. */
189 orig_src_vmproc = *src_vmp;
190 orig_dst_vmproc = *dst_vmp;
192 /* Swap slots. */
193 *src_vmp = orig_dst_vmproc;
194 *dst_vmp = orig_src_vmproc;
196 /* Preserve endpoints and slot numbers. */
197 src_vmp->vm_endpoint = orig_src_vmproc.vm_endpoint;
198 src_vmp->vm_slot = orig_src_vmproc.vm_slot;
199 dst_vmp->vm_endpoint = orig_dst_vmproc.vm_endpoint;
200 dst_vmp->vm_slot = orig_dst_vmproc.vm_slot;
202 #if LU_DEBUG
203 printf("VM: swap_proc: swapped %d (%d) and %d (%d)\n",
204 src_vmp->vm_endpoint, src_vmp->vm_slot,
205 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
206 #endif
208 return OK;
211 /*===========================================================================*
212 * swap_proc_dyn_data *
213 *===========================================================================*/
214 int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
216 int is_vm;
217 int r;
219 is_vm = (dst_vmp->vm_endpoint == VM_PROC_NR);
221 /* For VM, transfer memory regions above the stack first. */
222 if(is_vm) {
223 #if LU_DEBUG
224 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from old VM (%d) to new VM (%d)\n",
225 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
226 #endif
227 r = pt_map_in_range(src_vmp, dst_vmp, VM_STACKTOP, 0);
228 if(r != OK) {
229 printf("swap_proc_dyn_data: pt_map_in_range failed\n");
230 return r;
234 #if LU_DEBUG
235 printf("VM: swap_proc_dyn_data: swapping regions' parents for %d (%d) and %d (%d)\n",
236 src_vmp->vm_endpoint, src_vmp->vm_slot,
237 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
238 #endif
240 /* Swap vir_regions' parents. */
241 map_setparent(src_vmp);
242 map_setparent(dst_vmp);
244 /* For regular processes, transfer regions above the stack now.
245 * In case of rollback, we need to skip this step. To sandbox the
246 * new instance and prevent state corruption on rollback, we share all
247 * the regions between the two instances as COW.
249 if(!is_vm) {
250 struct vir_region *vr;
251 vr = map_lookup(dst_vmp, VM_STACKTOP, NULL);
252 if(vr && !map_lookup(src_vmp, VM_STACKTOP, NULL)) {
253 #if LU_DEBUG
254 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from %d to %d\n",
255 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
256 #endif
257 r = map_proc_copy_from(src_vmp, dst_vmp, vr);
258 if(r != OK) {
259 return r;
264 return OK;