tty: try more often to get the config byte.
[minix.git] / servers / vm / mmap.c
blobc85847a6dfd1337f8568486e366fa37509bae9a5
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/keymap.h>
11 #include <minix/minlib.h>
12 #include <minix/type.h>
13 #include <minix/ipc.h>
14 #include <minix/sysutil.h>
15 #include <minix/syslib.h>
16 #include <minix/safecopies.h>
17 #include <minix/bitmap.h>
18 #include <minix/debug.h>
20 #include <sys/mman.h>
22 #include <errno.h>
23 #include <assert.h>
24 #include <string.h>
25 #include <env.h>
26 #include <stdio.h>
27 #include <fcntl.h>
28 #include <memory.h>
30 #include "glo.h"
31 #include "proto.h"
32 #include "util.h"
33 #include "region.h"
35 /*===========================================================================*
36 * do_mmap *
37 *===========================================================================*/
38 PUBLIC int do_mmap(message *m)
40 int r, n;
41 struct vmproc *vmp;
42 int mfflags = 0;
43 struct vir_region *vr = NULL;
45 if((r=vm_isokendpt(m->m_source, &n)) != OK) {
46 panic("do_mmap: message from strange source: %d", m->m_source);
49 vmp = &vmproc[n];
51 if(!(vmp->vm_flags & VMF_HASPT))
52 return ENXIO;
54 if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) {
55 int s;
56 vir_bytes v;
57 u32_t vrflags = VR_ANON | VR_WRITABLE;
58 size_t len = (vir_bytes) m->VMM_LEN;
60 if(m->VMM_FD != -1) {
61 return EINVAL;
64 if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
65 if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
66 if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
67 if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
68 if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;
69 if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG;
71 if(len % VM_PAGE_SIZE)
72 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
74 if(!(vr = map_page_region(vmp,
75 arch_vir2map(vmp,
76 m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop),
77 VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
78 return ENOMEM;
80 } else {
81 return ENOSYS;
84 /* Return mapping, as seen from process. */
85 assert(vr);
86 m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);
89 return OK;
92 /*===========================================================================*
93 * map_perm_check *
94 *===========================================================================*/
95 PUBLIC int map_perm_check(endpoint_t caller, endpoint_t target,
96 phys_bytes physaddr, phys_bytes len)
98 int r;
100 /* TTY and memory are allowed to do anything.
101 * They have to be special cases as they have to be able to do
102 * anything; TTY even on behalf of anyone for the TIOCMAPMEM
103 * ioctl. MEM just for itself.
105 if(caller == TTY_PROC_NR)
106 return OK;
107 if(caller != target)
108 return EPERM;
109 if(caller == MEM_PROC_NR)
110 return OK;
112 /* Anyone else needs explicit permission from the kernel (ultimately
113 * set by PCI).
115 r = sys_privquery_mem(caller, physaddr, len);
117 return r;
120 /*===========================================================================*
121 * do_map_phys *
122 *===========================================================================*/
123 PUBLIC int do_map_phys(message *m)
125 int r, n;
126 struct vmproc *vmp;
127 endpoint_t target;
128 struct vir_region *vr;
129 vir_bytes len;
130 phys_bytes startaddr;
132 target = m->VMMP_EP;
133 len = m->VMMP_LEN;
135 if(target == SELF)
136 target = m->m_source;
138 if((r=vm_isokendpt(target, &n)) != OK)
139 return EINVAL;
141 startaddr = (vir_bytes)m->VMMP_PHADDR;
143 /* First check permission, then round range down/up. Caller can't
144 * help it if we can't map in lower than page granularity.
146 if(map_perm_check(m->m_source, target, startaddr, len) != OK) {
147 printf("VM: unauthorized mapping of 0x%lx by %d\n",
148 startaddr, m->m_source);
149 return EPERM;
152 vmp = &vmproc[n];
154 if(!(vmp->vm_flags & VMF_HASPT))
155 return ENXIO;
157 if(len % VM_PAGE_SIZE)
158 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
160 if(!(vr = map_page_region(vmp, arch_vir2map(vmp, vmp->vm_stacktop),
161 VM_DATATOP, len, startaddr,
162 VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) {
163 return ENOMEM;
166 m->VMMP_VADDR_REPLY = (void *) arch_map2vir(vmp, vr->vaddr);
168 return OK;
171 /*===========================================================================*
172 * do_unmap_phys *
173 *===========================================================================*/
174 PUBLIC int do_unmap_phys(message *m)
176 int r, n;
177 struct vmproc *vmp;
178 endpoint_t target;
179 struct vir_region *region;
181 target = m->VMUP_EP;
182 if(target == SELF)
183 target = m->m_source;
185 if((r=vm_isokendpt(target, &n)) != OK)
186 return EINVAL;
188 vmp = &vmproc[n];
190 if(!(region = map_lookup(vmp,
191 arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR)))) {
192 return EINVAL;
195 if(!(region->flags & VR_DIRECT)) {
196 return EINVAL;
199 if(map_unmap_region(vmp, region, region->length) != OK) {
200 return EINVAL;
203 return OK;
206 /*===========================================================================*
207 * do_remap *
208 *===========================================================================*/
209 PUBLIC int do_remap(message *m)
211 int d, dn, s, sn;
212 vir_bytes da, sa, startv;
213 size_t size;
214 struct vir_region *vr, *region;
215 struct vmproc *dvmp, *svmp;
216 int r;
218 d = m->VMRE_D;
219 s = m->VMRE_S;
220 da = (vir_bytes) m->VMRE_DA;
221 sa = (vir_bytes) m->VMRE_SA;
222 size = m->VMRE_SIZE;
224 if ((r = vm_isokendpt(d, &dn)) != OK)
225 return EINVAL;
226 if ((r = vm_isokendpt(s, &sn)) != OK)
227 return EINVAL;
229 dvmp = &vmproc[dn];
230 svmp = &vmproc[sn];
232 /* da is not translated by arch_vir2map(),
233 * it's handled a little differently,
234 * since in map_remap(), we have to know
235 * about whether the user needs to bind to
236 * THAT address or be chosen by the system.
238 sa = arch_vir2map(svmp, sa);
240 if (!(region = map_lookup(svmp, sa)))
241 return EINVAL;
243 if(region->vaddr != sa) {
244 printf("VM: do_remap: not start of region.\n");
245 return EFAULT;
248 if(!(region->flags & VR_SHARED)) {
249 printf("VM: do_remap: not shared.\n");
250 return EFAULT;
253 if (size % VM_PAGE_SIZE)
254 size += VM_PAGE_SIZE - size % VM_PAGE_SIZE;
256 if(size != region->length) {
257 printf("VM: do_remap: not size of region.\n");
258 return EFAULT;
261 if ((r = map_remap(dvmp, da, size, region, &startv)) != OK)
262 return r;
264 m->VMRE_RETA = (char *) arch_map2vir(dvmp, startv);
265 return OK;
268 /*===========================================================================*
269 * do_shared_unmap *
270 *===========================================================================*/
271 PUBLIC int do_shared_unmap(message *m)
273 int r, n;
274 struct vmproc *vmp;
275 endpoint_t target;
276 struct vir_region *vr;
277 vir_bytes addr;
279 target = m->VMUN_ENDPT;
281 if ((r = vm_isokendpt(target, &n)) != OK)
282 return EINVAL;
284 vmp = &vmproc[n];
286 addr = arch_vir2map(vmp, m->VMUN_ADDR);
288 if(!(vr = map_lookup(vmp, addr))) {
289 printf("VM: addr 0x%lx not found.\n", m->VMUN_ADDR);
290 return EFAULT;
293 if(vr->vaddr != addr) {
294 printf("VM: wrong address for shared_unmap.\n");
295 return EFAULT;
298 if(!(vr->flags & VR_SHARED)) {
299 printf("VM: address does not point to shared region.\n");
300 return EFAULT;
303 if(map_unmap_region(vmp, vr, vr->length) != OK)
304 panic("do_shared_unmap: map_unmap_region failed");
306 return OK;
309 /*===========================================================================*
310 * do_get_phys *
311 *===========================================================================*/
312 PUBLIC int do_get_phys(message *m)
314 int r, n;
315 struct vmproc *vmp;
316 endpoint_t target;
317 phys_bytes ret;
318 vir_bytes addr;
320 target = m->VMPHYS_ENDPT;
321 addr = m->VMPHYS_ADDR;
323 if ((r = vm_isokendpt(target, &n)) != OK)
324 return EINVAL;
326 vmp = &vmproc[n];
327 addr = arch_vir2map(vmp, addr);
329 r = map_get_phys(vmp, addr, &ret);
331 m->VMPHYS_RETA = ret;
332 return r;
335 /*===========================================================================*
336 * do_get_refcount *
337 *===========================================================================*/
338 PUBLIC int do_get_refcount(message *m)
340 int r, n;
341 struct vmproc *vmp;
342 endpoint_t target;
343 u8_t cnt;
344 vir_bytes addr;
346 target = m->VMREFCNT_ENDPT;
347 addr = m->VMREFCNT_ADDR;
349 if ((r = vm_isokendpt(target, &n)) != OK)
350 return EINVAL;
352 vmp = &vmproc[n];
353 addr = arch_vir2map(vmp, addr);
355 r = map_get_ref(vmp, addr, &cnt);
357 m->VMREFCNT_RETC = cnt;
358 return r;
361 /*===========================================================================*
362 * do_munmap *
363 *===========================================================================*/
364 PUBLIC int do_munmap(message *m)
366 int r, n;
367 struct vmproc *vmp;
368 vir_bytes addr, len;
369 struct vir_region *vr;
371 if((r=vm_isokendpt(m->m_source, &n)) != OK) {
372 panic("do_mmap: message from strange source: %d", m->m_source);
375 vmp = &vmproc[n];
377 if(!(vmp->vm_flags & VMF_HASPT))
378 return ENXIO;
380 if(m->m_type == VM_MUNMAP) {
381 addr = (vir_bytes) arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR);
382 } else if(m->m_type == VM_MUNMAP_TEXT) {
383 addr = (vir_bytes) arch_vir2map_text(vmp, (vir_bytes) m->VMUM_ADDR);
384 } else {
385 panic("do_munmap: strange type");
388 if(!(vr = map_lookup(vmp, addr))) {
389 printf("VM: unmap: virtual address 0x%lx not found in %d\n",
390 m->VMUM_ADDR, vmp->vm_endpoint);
391 return EFAULT;
394 len = m->VMUM_LEN;
395 if (len % VM_PAGE_SIZE)
396 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
398 if(addr != vr->vaddr || len > vr->length || len < VM_PAGE_SIZE) {
399 return EFAULT;
402 if(map_unmap_region(vmp, vr, len) != OK)
403 panic("do_munmap: map_unmap_region failed");
405 return OK;
408 int unmap_ok = 0;
410 /*===========================================================================*
411 * munmap_lin (used for overrides for VM) *
412 *===========================================================================*/
413 PRIVATE int munmap_lin(vir_bytes addr, size_t len)
415 if(addr % VM_PAGE_SIZE) {
416 printf("munmap_lin: offset not page aligned\n");
417 return EFAULT;
420 if(len % VM_PAGE_SIZE) {
421 printf("munmap_lin: len not page aligned\n");
422 return EFAULT;
425 if(pt_writemap(&vmproc[VM_PROC_NR].vm_pt, addr, MAP_NONE, len, 0,
426 WMF_OVERWRITE | WMF_FREE) != OK) {
427 printf("munmap_lin: pt_writemap failed\n");
428 return EFAULT;
431 return OK;
434 /*===========================================================================*
435 * munmap (override for VM) *
436 *===========================================================================*/
437 PUBLIC int munmap(void *addr, size_t len)
439 vir_bytes laddr;
440 if(!unmap_ok)
441 return ENOSYS;
442 laddr = (vir_bytes) arch_vir2map(&vmproc[VM_PROC_NR], (vir_bytes) addr);
443 return munmap_lin(laddr, len);
446 /*===========================================================================*
447 * munmap_text (override for VM) *
448 *===========================================================================*/
449 PUBLIC int munmap_text(void *addr, size_t len)
451 vir_bytes laddr;
452 if(!unmap_ok)
453 return ENOSYS;
454 laddr = (vir_bytes) arch_vir2map_text(&vmproc[VM_PROC_NR],
455 (vir_bytes) addr);
456 return munmap_lin(laddr, len);