VM: restore >4k secondary cache functionality
[minix.git] / servers / vm / mmap.c
blob6a2470071ac390e9528869e6a2c86803333e085d
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/keymap.h>
11 #include <minix/minlib.h>
12 #include <minix/type.h>
13 #include <minix/ipc.h>
14 #include <minix/sysutil.h>
15 #include <minix/syslib.h>
16 #include <minix/safecopies.h>
17 #include <minix/bitmap.h>
18 #include <minix/debug.h>
20 #include <sys/mman.h>
21 #include <sys/param.h>
23 #include <errno.h>
24 #include <assert.h>
25 #include <string.h>
26 #include <env.h>
27 #include <stdio.h>
28 #include <fcntl.h>
29 #include <memory.h>
31 #include "glo.h"
32 #include "proto.h"
33 #include "util.h"
34 #include "region.h"
36 /*===========================================================================*
37 * do_mmap *
38 *===========================================================================*/
39 int do_mmap(message *m)
41 int r, n;
42 struct vmproc *vmp;
43 int mfflags = 0;
44 vir_bytes addr;
45 struct vir_region *vr = NULL;
46 int execpriv = 0;
48 /* RS and VFS can do slightly more special mmap() things */
49 if(m->m_source == VFS_PROC_NR || m->m_source == RS_PROC_NR)
50 execpriv = 1;
52 if(m->VMM_FLAGS & MAP_THIRDPARTY) {
53 if(!execpriv) return EPERM;
54 if((r=vm_isokendpt(m->VMM_FORWHOM, &n)) != OK)
55 return ESRCH;
56 } else {
57 /* regular mmap, i.e. for caller */
58 if((r=vm_isokendpt(m->m_source, &n)) != OK) {
59 panic("do_mmap: message from strange source: %d",
60 m->m_source);
64 vmp = &vmproc[n];
66 if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) {
67 u32_t vrflags = VR_ANON | VR_WRITABLE;
68 size_t len = (vir_bytes) m->VMM_LEN;
70 if(m->VMM_FD != -1 || len <= 0) {
71 return EINVAL;
74 /* Contiguous phys memory has to be preallocated. */
75 if((m->VMM_FLAGS & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) {
76 return EINVAL;
79 if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
80 if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
81 if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
82 if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
83 if(m->VMM_FLAGS & MAP_UNINITIALIZED) {
84 if(!execpriv) return EPERM;
85 vrflags |= VR_UNINITIALIZED;
87 if(m->VMM_FLAGS & MAP_IPC_SHARED) {
88 vrflags |= VR_SHARED;
89 /* Shared memory has to be preallocated. */
90 if((m->VMM_FLAGS & (MAP_PREALLOC|MAP_ANON)) !=
91 (MAP_PREALLOC|MAP_ANON)) {
92 return EINVAL;
95 if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG;
97 if(len % VM_PAGE_SIZE)
98 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
100 vr = NULL;
101 if (m->VMM_ADDR || (m->VMM_FLAGS & MAP_FIXED)) {
102 /* An address is given, first try at that address. */
103 addr = m->VMM_ADDR;
104 vr = map_page_region(vmp, addr, 0, len, MAP_NONE,
105 vrflags, mfflags);
106 if(!vr && (m->VMM_FLAGS & MAP_FIXED))
107 return ENOMEM;
109 if (!vr) {
110 /* No address given or address already in use. */
111 vr = map_page_region(vmp, 0, VM_DATATOP, len,
112 MAP_NONE, vrflags, mfflags);
114 if (!vr) {
115 return ENOMEM;
117 } else {
118 return ENOSYS;
121 /* Return mapping, as seen from process. */
122 assert(vr);
123 m->VMM_RETADDR = vr->vaddr;
126 return OK;
129 /*===========================================================================*
130 * map_perm_check *
131 *===========================================================================*/
132 int map_perm_check(endpoint_t caller, endpoint_t target,
133 phys_bytes physaddr, phys_bytes len)
135 int r;
137 /* TTY and memory are allowed to do anything.
138 * They have to be special cases as they have to be able to do
139 * anything; TTY even on behalf of anyone for the TIOCMAPMEM
140 * ioctl. MEM just for itself.
142 if(caller == TTY_PROC_NR)
143 return OK;
144 if(caller != target)
145 return EPERM;
146 if(caller == MEM_PROC_NR)
147 return OK;
149 /* Anyone else needs explicit permission from the kernel (ultimately
150 * set by PCI).
152 r = sys_privquery_mem(caller, physaddr, len);
154 return r;
157 /*===========================================================================*
158 * do_map_phys *
159 *===========================================================================*/
160 int do_map_phys(message *m)
162 int r, n;
163 struct vmproc *vmp;
164 endpoint_t target;
165 struct vir_region *vr;
166 vir_bytes len;
167 phys_bytes startaddr;
168 size_t offset;
170 target = m->VMMP_EP;
171 len = m->VMMP_LEN;
173 if (len <= 0) return EINVAL;
175 if(target == SELF)
176 target = m->m_source;
178 if((r=vm_isokendpt(target, &n)) != OK)
179 return EINVAL;
181 startaddr = (vir_bytes)m->VMMP_PHADDR;
183 /* First check permission, then round range down/up. Caller can't
184 * help it if we can't map in lower than page granularity.
186 if(map_perm_check(m->m_source, target, startaddr, len) != OK) {
187 printf("VM: unauthorized mapping of 0x%lx by %d\n",
188 startaddr, m->m_source);
189 return EPERM;
192 vmp = &vmproc[n];
194 offset = startaddr % VM_PAGE_SIZE;
195 len += offset;
196 startaddr -= offset;
198 if(len % VM_PAGE_SIZE)
199 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
201 if(!(vr = map_page_region(vmp, 0, VM_DATATOP, len, startaddr,
202 VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) {
203 return ENOMEM;
206 m->VMMP_VADDR_REPLY = (void *) (vr->vaddr + offset);
208 return OK;
211 /*===========================================================================*
212 * do_unmap_phys *
213 *===========================================================================*/
214 int do_unmap_phys(message *m)
216 int r, n;
217 struct vmproc *vmp;
218 endpoint_t target;
219 struct vir_region *region;
221 target = m->VMUP_EP;
222 if(target == SELF)
223 target = m->m_source;
225 if((r=vm_isokendpt(target, &n)) != OK)
226 return EINVAL;
228 vmp = &vmproc[n];
230 if(!(region = map_lookup(vmp, (vir_bytes) m->VMUM_ADDR, NULL))) {
231 return EINVAL;
234 if(!(region->flags & VR_DIRECT)) {
235 return EINVAL;
238 if(map_unmap_region(vmp, region, 0, region->length) != OK) {
239 return EINVAL;
242 return OK;
245 /*===========================================================================*
246 * do_remap *
247 *===========================================================================*/
248 int do_remap(message *m)
250 int dn, sn;
251 vir_bytes da, sa, startv;
252 size_t size;
253 struct vir_region *region;
254 struct vmproc *dvmp, *svmp;
255 int r;
256 int readonly;
258 if(m->m_type == VM_REMAP)
259 readonly = 0;
260 else if(m->m_type == VM_REMAP_RO)
261 readonly = 1;
262 else panic("do_remap: can't be");
264 da = (vir_bytes) m->VMRE_DA;
265 sa = (vir_bytes) m->VMRE_SA;
266 size = m->VMRE_SIZE;
268 if (size <= 0) return EINVAL;
270 if ((r = vm_isokendpt((endpoint_t) m->VMRE_D, &dn)) != OK)
271 return EINVAL;
272 if ((r = vm_isokendpt((endpoint_t) m->VMRE_S, &sn)) != OK)
273 return EINVAL;
275 dvmp = &vmproc[dn];
276 svmp = &vmproc[sn];
278 /* da is not translated by arch_vir2map(),
279 * it's handled a little differently,
280 * since in map_remap(), we have to know
281 * about whether the user needs to bind to
282 * THAT address or be chosen by the system.
284 if (!(region = map_lookup(svmp, sa, NULL)))
285 return EINVAL;
287 if(region->vaddr != sa) {
288 printf("VM: do_remap: not start of region.\n");
289 return EFAULT;
292 if(!(region->flags & VR_SHARED)) {
293 printf("VM: do_remap: not shared.\n");
294 return EFAULT;
297 if (size % VM_PAGE_SIZE)
298 size += VM_PAGE_SIZE - size % VM_PAGE_SIZE;
300 if(size != region->length) {
301 printf("VM: do_remap: not size of region.\n");
302 return EFAULT;
305 if ((r = map_remap(dvmp, da, size, region, &startv, readonly)) != OK)
306 return r;
308 m->VMRE_RETA = (char *) startv;
309 return OK;
312 /*===========================================================================*
313 * do_shared_unmap *
314 *===========================================================================*/
315 int do_shared_unmap(message *m)
317 int r, n;
318 struct vmproc *vmp;
319 endpoint_t target;
320 struct vir_region *vr;
321 vir_bytes addr;
323 target = m->VMUN_ENDPT;
324 if (target == SELF)
325 target = m->m_source;
327 if ((r = vm_isokendpt(target, &n)) != OK)
328 return EINVAL;
330 vmp = &vmproc[n];
332 addr = m->VMUN_ADDR;
334 if(!(vr = map_lookup(vmp, addr, NULL))) {
335 printf("VM: addr 0x%lx not found.\n", m->VMUN_ADDR);
336 return EFAULT;
339 if(vr->vaddr != addr) {
340 printf("VM: wrong address for shared_unmap.\n");
341 return EFAULT;
344 if(!(vr->flags & VR_SHARED)) {
345 printf("VM: address does not point to shared region.\n");
346 return EFAULT;
349 if(map_unmap_region(vmp, vr, 0, vr->length) != OK)
350 panic("do_shared_unmap: map_unmap_region failed");
352 return OK;
355 /*===========================================================================*
356 * do_get_phys *
357 *===========================================================================*/
358 int do_get_phys(message *m)
360 int r, n;
361 struct vmproc *vmp;
362 endpoint_t target;
363 phys_bytes ret;
364 vir_bytes addr;
366 target = m->VMPHYS_ENDPT;
367 addr = m->VMPHYS_ADDR;
369 if ((r = vm_isokendpt(target, &n)) != OK)
370 return EINVAL;
372 vmp = &vmproc[n];
374 r = map_get_phys(vmp, addr, &ret);
376 m->VMPHYS_RETA = ret;
377 return r;
380 /*===========================================================================*
381 * do_get_refcount *
382 *===========================================================================*/
383 int do_get_refcount(message *m)
385 int r, n;
386 struct vmproc *vmp;
387 endpoint_t target;
388 u8_t cnt;
389 vir_bytes addr;
391 target = m->VMREFCNT_ENDPT;
392 addr = m->VMREFCNT_ADDR;
394 if ((r = vm_isokendpt(target, &n)) != OK)
395 return EINVAL;
397 vmp = &vmproc[n];
399 r = map_get_ref(vmp, addr, &cnt);
401 m->VMREFCNT_RETC = cnt;
402 return r;
405 /*===========================================================================*
406 * do_munmap *
407 *===========================================================================*/
408 int do_munmap(message *m)
410 int r, n;
411 struct vmproc *vmp;
412 vir_bytes addr, len, offset;
413 struct vir_region *vr;
415 if((r=vm_isokendpt(m->m_source, &n)) != OK) {
416 panic("do_mmap: message from strange source: %d", m->m_source);
419 vmp = &vmproc[n];
421 assert(m->m_type == VM_MUNMAP);
422 addr = (vir_bytes) (vir_bytes) m->VMUM_ADDR;
424 if(!(vr = map_lookup(vmp, addr, NULL))) {
425 printf("VM: unmap: virtual address %p not found in %d\n",
426 m->VMUM_ADDR, vmp->vm_endpoint);
427 return EFAULT;
430 if(addr % VM_PAGE_SIZE)
431 return EFAULT;
433 len = roundup(m->VMUM_LEN, VM_PAGE_SIZE);
435 offset = addr - vr->vaddr;
437 if(offset + len > vr->length) {
438 printf("munmap: addr 0x%lx len 0x%lx spills out of region\n",
439 addr, len);
440 return EFAULT;
443 if(map_unmap_region(vmp, vr, offset, len) != OK)
444 panic("do_munmap: map_unmap_region failed");
446 return OK;
449 int unmap_ok = 0;
451 /*===========================================================================*
452 * munmap_lin (used for overrides for VM) *
453 *===========================================================================*/
454 static int munmap_lin(vir_bytes addr, size_t len)
456 if(addr % VM_PAGE_SIZE) {
457 printf("munmap_lin: offset not page aligned\n");
458 return EFAULT;
461 if(len % VM_PAGE_SIZE) {
462 printf("munmap_lin: len not page aligned\n");
463 return EFAULT;
466 if(pt_writemap(NULL, &vmproc[VM_PROC_NR].vm_pt, addr, MAP_NONE, len, 0,
467 WMF_OVERWRITE | WMF_FREE) != OK) {
468 printf("munmap_lin: pt_writemap failed\n");
469 return EFAULT;
472 return OK;
475 /*===========================================================================*
476 * munmap (override for VM) *
477 *===========================================================================*/
478 int minix_munmap(void *addr, size_t len)
480 vir_bytes laddr;
481 if(!unmap_ok)
482 return ENOSYS;
483 laddr = (vir_bytes) (vir_bytes) addr;
484 return munmap_lin(laddr, len);