Make VM fix up memory for kernel that crosses region boundaries
[minix.git] / servers / vm / mmap.c
blob5abc459b6887b817db0b8c1b761c107b63df671a
2 #define _SYSTEM 1
4 #define VERBOSE 0
6 #include <minix/callnr.h>
7 #include <minix/com.h>
8 #include <minix/config.h>
9 #include <minix/const.h>
10 #include <minix/ds.h>
11 #include <minix/endpoint.h>
12 #include <minix/keymap.h>
13 #include <minix/minlib.h>
14 #include <minix/type.h>
15 #include <minix/ipc.h>
16 #include <minix/sysutil.h>
17 #include <minix/syslib.h>
18 #include <minix/safecopies.h>
19 #include <minix/bitmap.h>
21 #include <sys/mman.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <env.h>
26 #include <stdio.h>
27 #include <fcntl.h>
28 #include <memory.h>
30 #include "glo.h"
31 #include "proto.h"
32 #include "util.h"
33 #include "region.h"
35 /*===========================================================================*
36 * do_mmap *
37 *===========================================================================*/
38 PUBLIC int do_mmap(message *m)
40 int r, n;
41 struct vmproc *vmp;
42 int mfflags = 0;
43 struct vir_region *vr = NULL;
45 if((r=vm_isokendpt(m->m_source, &n)) != OK) {
46 vm_panic("do_mmap: message from strange source", m->m_source);
49 vmp = &vmproc[n];
51 if(!(vmp->vm_flags & VMF_HASPT))
52 return ENXIO;
54 if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) {
55 int s;
56 vir_bytes v;
57 u32_t vrflags = VR_ANON | VR_WRITABLE;
58 size_t len = (vir_bytes) m->VMM_LEN;
60 if(m->VMM_FD != -1) {
61 return EINVAL;
64 if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
65 if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
66 if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
67 if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
68 if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
69 if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;
71 if(len % VM_PAGE_SIZE)
72 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
74 if(!(vr = map_page_region(vmp,
75 arch_vir2map(vmp,
76 m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop),
77 VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
78 return ENOMEM;
80 } else {
81 return ENOSYS;
84 /* Return mapping, as seen from process. */
85 vm_assert(vr);
86 m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);
89 return OK;
92 /*===========================================================================*
93 * map_perm_check *
94 *===========================================================================*/
95 PUBLIC int map_perm_check(endpoint_t caller, endpoint_t target,
96 phys_bytes physaddr, phys_bytes len)
98 int r;
100 /* TTY and memory are allowed to do anything.
101 * They have to be special cases as they have to be able to do
102 * anything; TTY even on behalf of anyone for the TIOCMAPMEM
103 * ioctl. MEM just for itself.
105 if(caller == TTY_PROC_NR)
106 return OK;
107 if(caller != target)
108 return EPERM;
109 if(caller == MEM_PROC_NR)
110 return OK;
112 /* Anyone else needs explicit permission from the kernel (ultimately
113 * set by PCI).
115 r = sys_privquery_mem(caller, physaddr, len);
117 return r;
120 /*===========================================================================*
121 * do_map_phys *
122 *===========================================================================*/
123 PUBLIC int do_map_phys(message *m)
125 int r, n;
126 struct vmproc *vmp;
127 endpoint_t target;
128 struct vir_region *vr;
129 vir_bytes len;
130 phys_bytes startaddr;
132 target = m->VMMP_EP;
133 len = m->VMMP_LEN;
135 if(target == SELF)
136 target = m->m_source;
138 if((r=vm_isokendpt(target, &n)) != OK)
139 return EINVAL;
141 startaddr = (vir_bytes)m->VMMP_PHADDR;
143 /* First check permission, then round range down/up. Caller can't
144 * help it if we can't map in lower than page granularity.
146 if(map_perm_check(m->m_source, target, startaddr, len) != OK) {
147 printf("VM: unauthorized mapping of 0x%lx by %d\n",
148 startaddr, m->m_source);
149 return EPERM;
152 vmp = &vmproc[n];
154 if(!(vmp->vm_flags & VMF_HASPT))
155 return ENXIO;
157 if(len % VM_PAGE_SIZE)
158 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
160 if(!(vr = map_page_region(vmp, arch_vir2map(vmp, vmp->vm_stacktop),
161 VM_DATATOP, len, startaddr,
162 VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) {
163 return ENOMEM;
166 m->VMMP_VADDR_REPLY = (void *) arch_map2vir(vmp, vr->vaddr);
168 return OK;
171 /*===========================================================================*
172 * do_unmap_phys *
173 *===========================================================================*/
174 PUBLIC int do_unmap_phys(message *m)
176 int r, n;
177 struct vmproc *vmp;
178 endpoint_t target;
179 struct vir_region *region;
181 target = m->VMUP_EP;
182 if(target == SELF)
183 target = m->m_source;
185 if((r=vm_isokendpt(target, &n)) != OK)
186 return EINVAL;
188 vmp = &vmproc[n];
190 if(!(region = map_lookup(vmp,
191 arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR)))) {
192 return EINVAL;
195 if(!(region->flags & VR_DIRECT)) {
196 return EINVAL;
199 if(map_unmap_region(vmp, region, region->length) != OK) {
200 return EINVAL;
203 return OK;
206 /*===========================================================================*
207 * do_remap *
208 *===========================================================================*/
209 PUBLIC int do_remap(message *m)
211 int d, dn, s, sn;
212 vir_bytes da, sa, startv;
213 size_t size;
214 struct vir_region *vr, *region;
215 struct vmproc *dvmp, *svmp;
216 int r;
218 d = m->VMRE_D;
219 s = m->VMRE_S;
220 da = (vir_bytes) m->VMRE_DA;
221 sa = (vir_bytes) m->VMRE_SA;
222 size = m->VMRE_SIZE;
224 if ((r = vm_isokendpt(d, &dn)) != OK)
225 return EINVAL;
226 if ((r = vm_isokendpt(s, &sn)) != OK)
227 return EINVAL;
229 dvmp = &vmproc[dn];
230 svmp = &vmproc[sn];
232 /* da is not translated by arch_vir2map(),
233 * it's handled a little differently,
234 * since in map_remap(), we have to know
235 * about whether the user needs to bind to
236 * THAT address or be chosen by the system.
238 sa = arch_vir2map(svmp, sa);
240 if (!(region = map_lookup(svmp, sa)))
241 return EINVAL;
243 if ((r = map_remap(dvmp, da, size, region, &startv)) != OK)
244 return r;
246 m->VMRE_RETA = (char *) arch_map2vir(dvmp, startv);
247 return OK;
250 /*===========================================================================*
251 * do_shared_unmap *
252 *===========================================================================*/
253 PUBLIC int do_shared_unmap(message *m)
255 int r, n;
256 struct vmproc *vmp;
257 endpoint_t target;
258 struct vir_region *vr;
259 vir_bytes addr;
261 target = m->VMUN_ENDPT;
263 if ((r = vm_isokendpt(target, &n)) != OK)
264 return EINVAL;
266 vmp = &vmproc[n];
268 addr = arch_vir2map(vmp, m->VMUN_ADDR);
270 if(!(vr = map_lookup(vmp, addr))) {
271 printf("VM: addr 0x%lx not found.\n", m->VMUN_ADDR);
272 return EFAULT;
275 if(vr->vaddr != addr) {
276 printf("VM: wrong address for shared_unmap.\n");
277 return EFAULT;
280 if(!(vr->flags & VR_SHARED)) {
281 printf("VM: address does not point to shared region.\n");
282 return EFAULT;
285 if(map_unmap_region(vmp, vr, vr->length) != OK)
286 vm_panic("do_shared_unmap: map_unmap_region failed", NO_NUM);
288 return OK;
291 /*===========================================================================*
292 * do_get_phys *
293 *===========================================================================*/
294 PUBLIC int do_get_phys(message *m)
296 int r, n;
297 struct vmproc *vmp;
298 endpoint_t target;
299 phys_bytes ret;
300 vir_bytes addr;
302 target = m->VMPHYS_ENDPT;
303 addr = m->VMPHYS_ADDR;
305 if ((r = vm_isokendpt(target, &n)) != OK)
306 return EINVAL;
308 vmp = &vmproc[n];
309 addr = arch_vir2map(vmp, addr);
311 r = map_get_phys(vmp, addr, &ret);
313 m->VMPHYS_RETA = ret;
314 return r;
317 /*===========================================================================*
318 * do_get_refcount *
319 *===========================================================================*/
320 PUBLIC int do_get_refcount(message *m)
322 int r, n;
323 struct vmproc *vmp;
324 endpoint_t target;
325 u8_t cnt;
326 vir_bytes addr;
328 target = m->VMREFCNT_ENDPT;
329 addr = m->VMREFCNT_ADDR;
331 if ((r = vm_isokendpt(target, &n)) != OK)
332 return EINVAL;
334 vmp = &vmproc[n];
335 addr = arch_vir2map(vmp, addr);
337 r = map_get_ref(vmp, addr, &cnt);
339 m->VMREFCNT_RETC = cnt;
340 return r;
343 /*===========================================================================*
344 * do_munmap *
345 *===========================================================================*/
346 PUBLIC int do_munmap(message *m)
348 int r, n;
349 struct vmproc *vmp;
350 vir_bytes addr, len;
351 struct vir_region *vr;
353 if((r=vm_isokendpt(m->m_source, &n)) != OK) {
354 vm_panic("do_mmap: message from strange source", m->m_source);
357 vmp = &vmproc[n];
359 if(!(vmp->vm_flags & VMF_HASPT))
360 return ENXIO;
362 if(m->m_type == VM_MUNMAP) {
363 addr = (vir_bytes) arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR);
364 } else if(m->m_type == VM_MUNMAP_TEXT) {
365 addr = (vir_bytes) arch_vir2map_text(vmp, (vir_bytes) m->VMUM_ADDR);
366 } else {
367 vm_panic("do_munmap: strange type", NO_NUM);
370 if(!(vr = map_lookup(vmp, addr))) {
371 printf("VM: unmap: virtual address 0x%lx not found in %d\n",
372 m->VMUM_ADDR, vmp->vm_endpoint);
373 return EFAULT;
376 len = m->VMUM_LEN;
377 len -= len % VM_PAGE_SIZE;
379 if(addr != vr->vaddr || len > vr->length || len < VM_PAGE_SIZE) {
380 return EFAULT;
383 if(map_unmap_region(vmp, vr, len) != OK)
384 vm_panic("do_munmap: map_unmap_region failed", NO_NUM);
386 return OK;
389 int unmap_ok = 0;
391 /*===========================================================================*
392 * munmap_lin (used for overrides for VM) *
393 *===========================================================================*/
394 PRIVATE int munmap_lin(vir_bytes addr, size_t len)
396 if(addr % VM_PAGE_SIZE) {
397 printf("munmap_lin: offset not page aligned\n");
398 return EFAULT;
401 if(len % VM_PAGE_SIZE) {
402 printf("munmap_lin: len not page aligned\n");
403 return EFAULT;
406 if(pt_writemap(&vmproc[VM_PROC_NR].vm_pt, addr, MAP_NONE, len, 0,
407 WMF_OVERWRITE | WMF_FREE) != OK) {
408 printf("munmap_lin: pt_writemap failed\n");
409 return EFAULT;
412 return OK;
415 /*===========================================================================*
416 * munmap (override for VM) *
417 *===========================================================================*/
418 PUBLIC int munmap(void *addr, size_t len)
420 vir_bytes laddr;
421 if(!unmap_ok)
422 return ENOSYS;
423 laddr = (vir_bytes) arch_vir2map(&vmproc[VM_PROC_NR], (vir_bytes) addr);
424 return munmap_lin(laddr, len);
427 /*===========================================================================*
428 * munmap_text (override for VM) *
429 *===========================================================================*/
430 PUBLIC int munmap_text(void *addr, size_t len)
432 vir_bytes laddr;
433 if(!unmap_ok)
434 return ENOSYS;
435 laddr = (vir_bytes) arch_vir2map_text(&vmproc[VM_PROC_NR],
436 (vir_bytes) addr);
437 return munmap_lin(laddr, len);