VM: memtype fix
[minix3.git] / servers / vm / utility.c
blob4c835b85fc1f2c6a7f38723d7732de566a335dc3
2 /* This file contains some utility routines for VM. */
4 #define _SYSTEM 1
6 #include <minix/callnr.h>
7 #include <minix/com.h>
8 #include <minix/config.h>
9 #include <minix/const.h>
10 #include <minix/ds.h>
11 #include <minix/endpoint.h>
12 #include <minix/minlib.h>
13 #include <minix/type.h>
14 #include <minix/ipc.h>
15 #include <minix/sysutil.h>
16 #include <minix/syslib.h>
17 #include <minix/type.h>
18 #include <minix/bitmap.h>
19 #include <string.h>
20 #include <errno.h>
21 #include <env.h>
22 #include <unistd.h>
23 #include <assert.h>
24 #include <sys/param.h>
25 #include <sys/mman.h>
27 #include "proto.h"
28 #include "glo.h"
29 #include "util.h"
30 #include "region.h"
31 #include "sanitycheck.h"
33 #include <machine/archtypes.h>
34 #include "kernel/const.h"
35 #include "kernel/config.h"
36 #include "kernel/type.h"
37 #include "kernel/proc.h"
39 /*===========================================================================*
40 * get_mem_chunks *
41 *===========================================================================*/
42 void get_mem_chunks(mem_chunks)
43 struct memory *mem_chunks; /* store mem chunks here */
45 /* Initialize the free memory list from the 'memory' boot variable. Translate
46 * the byte offsets and sizes in this list to clicks, properly truncated.
48 phys_bytes base, size, limit;
49 int i;
50 struct memory *memp;
52 /* Obtain and parse memory from system environment. */
53 if(env_memory_parse(mem_chunks, NR_MEMS) != OK)
54 panic("couldn't obtain memory chunks");
56 /* Round physical memory to clicks. Round start up, round end down. */
57 for (i = 0; i < NR_MEMS; i++) {
58 memp = &mem_chunks[i]; /* next mem chunk is stored here */
59 base = mem_chunks[i].base;
60 size = mem_chunks[i].size;
61 limit = base + size;
62 base = (phys_bytes) (CLICK_CEIL(base));
63 limit = (phys_bytes) (CLICK_FLOOR(limit));
64 if (limit <= base) {
65 memp->base = memp->size = 0;
66 } else {
67 memp->base = base >> CLICK_SHIFT;
68 memp->size = (limit - base) >> CLICK_SHIFT;
73 /*===========================================================================*
74 * vm_isokendpt *
75 *===========================================================================*/
76 int vm_isokendpt(endpoint_t endpoint, int *proc)
78 *proc = _ENDPOINT_P(endpoint);
79 if(*proc < 0 || *proc >= NR_PROCS)
80 return EINVAL;
81 if(*proc >= 0 && endpoint != vmproc[*proc].vm_endpoint)
82 return EDEADEPT;
83 if(*proc >= 0 && !(vmproc[*proc].vm_flags & VMF_INUSE))
84 return EDEADEPT;
85 return OK;
89 /*===========================================================================*
90 * do_info *
91 *===========================================================================*/
92 int do_info(message *m)
94 struct vm_stats_info vsi;
95 struct vm_usage_info vui;
96 static struct vm_region_info vri[MAX_VRI_COUNT];
97 struct vmproc *vmp;
98 vir_bytes addr, size, next, ptr;
99 int r, pr, dummy, count, free_pages, largest_contig;
101 if (vm_isokendpt(m->m_source, &pr) != OK)
102 return EINVAL;
103 vmp = &vmproc[pr];
105 ptr = (vir_bytes) m->VMI_PTR;
107 switch(m->VMI_WHAT) {
108 case VMIW_STATS:
109 vsi.vsi_pagesize = VM_PAGE_SIZE;
110 vsi.vsi_total = total_pages;
111 memstats(&dummy, &free_pages, &largest_contig);
112 vsi.vsi_free = free_pages;
113 vsi.vsi_largest = largest_contig;
115 get_stats_info(&vsi);
117 addr = (vir_bytes) &vsi;
118 size = sizeof(vsi);
120 break;
122 case VMIW_USAGE:
123 if(m->VMI_EP < 0)
124 get_usage_info_kernel(&vui);
125 else if (vm_isokendpt(m->VMI_EP, &pr) != OK)
126 return EINVAL;
127 else get_usage_info(&vmproc[pr], &vui);
129 addr = (vir_bytes) &vui;
130 size = sizeof(vui);
132 break;
134 case VMIW_REGION:
135 if (vm_isokendpt(m->VMI_EP, &pr) != OK)
136 return EINVAL;
138 count = MIN(m->VMI_COUNT, MAX_VRI_COUNT);
139 next = m->VMI_NEXT;
141 count = get_region_info(&vmproc[pr], vri, count, &next);
143 m->VMI_COUNT = count;
144 m->VMI_NEXT = next;
146 addr = (vir_bytes) vri;
147 size = sizeof(vri[0]) * count;
149 break;
151 default:
152 return EINVAL;
155 if (size == 0)
156 return OK;
158 /* Make sure that no page faults can occur while copying out. A page
159 * fault would cause the kernel to send a notify to us, while we would
160 * be waiting for the result of the copy system call, resulting in a
161 * deadlock. Note that no memory mapping can be undone without the
162 * involvement of VM, so we are safe until we're done.
164 r = handle_memory(vmp, ptr, size, 1 /*wrflag*/);
165 if (r != OK) return r;
167 /* Now that we know the copy out will succeed, perform the actual copy
168 * operation.
170 return sys_datacopy(SELF, addr,
171 (vir_bytes) vmp->vm_endpoint, ptr, size);
174 /*===========================================================================*
175 * swap_proc_slot *
176 *===========================================================================*/
177 int swap_proc_slot(struct vmproc *src_vmp, struct vmproc *dst_vmp)
179 struct vmproc orig_src_vmproc, orig_dst_vmproc;
181 #if LU_DEBUG
182 printf("VM: swap_proc: swapping %d (%d) and %d (%d)\n",
183 src_vmp->vm_endpoint, src_vmp->vm_slot,
184 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
185 #endif
187 /* Save existing data. */
188 orig_src_vmproc = *src_vmp;
189 orig_dst_vmproc = *dst_vmp;
191 /* Swap slots. */
192 *src_vmp = orig_dst_vmproc;
193 *dst_vmp = orig_src_vmproc;
195 /* Preserve endpoints and slot numbers. */
196 src_vmp->vm_endpoint = orig_src_vmproc.vm_endpoint;
197 src_vmp->vm_slot = orig_src_vmproc.vm_slot;
198 dst_vmp->vm_endpoint = orig_dst_vmproc.vm_endpoint;
199 dst_vmp->vm_slot = orig_dst_vmproc.vm_slot;
201 #if LU_DEBUG
202 printf("VM: swap_proc: swapped %d (%d) and %d (%d)\n",
203 src_vmp->vm_endpoint, src_vmp->vm_slot,
204 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
205 #endif
207 return OK;
210 /*===========================================================================*
211 * swap_proc_dyn_data *
212 *===========================================================================*/
213 int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
215 int is_vm;
216 int r;
218 is_vm = (dst_vmp->vm_endpoint == VM_PROC_NR);
220 /* For VM, transfer memory regions above the stack first. */
221 if(is_vm) {
222 #if LU_DEBUG
223 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from old VM (%d) to new VM (%d)\n",
224 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
225 #endif
226 r = pt_map_in_range(src_vmp, dst_vmp, VM_STACKTOP, 0);
227 if(r != OK) {
228 printf("swap_proc_dyn_data: pt_map_in_range failed\n");
229 return r;
233 #if LU_DEBUG
234 printf("VM: swap_proc_dyn_data: swapping regions' parents for %d (%d) and %d (%d)\n",
235 src_vmp->vm_endpoint, src_vmp->vm_slot,
236 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
237 #endif
239 /* Swap vir_regions' parents. */
240 map_setparent(src_vmp);
241 map_setparent(dst_vmp);
243 /* For regular processes, transfer regions above the stack now.
244 * In case of rollback, we need to skip this step. To sandbox the
245 * new instance and prevent state corruption on rollback, we share all
246 * the regions between the two instances as COW.
248 if(!is_vm) {
249 struct vir_region *vr;
250 vr = map_lookup(dst_vmp, VM_STACKTOP, NULL);
251 if(vr && !map_lookup(src_vmp, VM_STACKTOP, NULL)) {
252 #if LU_DEBUG
253 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from %d to %d\n",
254 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
255 #endif
256 r = map_proc_copy_from(src_vmp, dst_vmp, vr);
257 if(r != OK) {
258 return r;
263 return OK;
266 void *minix_mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
268 void *ret;
269 phys_bytes p;
271 assert(!addr);
272 assert(!(len % VM_PAGE_SIZE));
274 ret = vm_allocpages(&p, VMP_SLAB, len/VM_PAGE_SIZE);
276 if(!ret) return MAP_FAILED;
277 memset(ret, 0, len);
278 return ret;
281 int minix_munmap(void * addr, size_t len)
283 vm_freepages((vir_bytes) addr, roundup(len, VM_PAGE_SIZE)/VM_PAGE_SIZE);
284 return 0;
287 int _brk(void *addr)
289 vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
290 extern char _end;
291 extern char *_brksize;
292 static vir_bytes prevbrk = (vir_bytes) &_end;
293 struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
295 for(v = roundup(prevbrk, VM_PAGE_SIZE); v < target;
296 v += VM_PAGE_SIZE) {
297 phys_bytes mem, newpage = alloc_mem(1, 0);
298 if(newpage == NO_MEM) return -1;
299 mem = CLICK2ABS(newpage);
300 if(pt_writemap(vmprocess, &vmprocess->vm_pt,
301 v, mem, VM_PAGE_SIZE,
302 ARCH_VM_PTE_PRESENT
303 | ARCH_VM_PTE_USER
304 | ARCH_VM_PTE_RW
305 #if defined(__arm__)
306 | ARM_VM_PTE_WB
307 #endif
308 , 0) != OK) {
309 free_mem(newpage, 1);
310 return -1;
312 prevbrk = v + VM_PAGE_SIZE;
315 _brksize = (char *) addr;
317 if(sys_vmctl(SELF, VMCTL_FLUSHTLB, 0) != OK)
318 panic("flushtlb failed");
320 return 0;