tools/llvm: Do not build with symbols
[minix3.git] / minix / servers / vm / mmap.c
blobd6cc1cda8f63bb04ed287a061a3d0797cfbaa2d7
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
17 #include <minix/debug.h>
19 #include <machine/vmparam.h>
21 #include <sys/mman.h>
22 #include <sys/param.h>
24 #include <errno.h>
25 #include <assert.h>
26 #include <string.h>
27 #include <env.h>
28 #include <stdio.h>
29 #include <fcntl.h>
31 #include "glo.h"
32 #include "proto.h"
33 #include "util.h"
34 #include "region.h"
37 static struct vir_region *mmap_region(struct vmproc *vmp, vir_bytes addr,
38 u32_t vmm_flags, size_t len, u32_t vrflags,
39 mem_type_t *mt, int execpriv)
41 u32_t mfflags = 0;
42 struct vir_region *vr = NULL;
44 if(vmm_flags & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
45 if(vmm_flags & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
46 if(vmm_flags & MAP_ALIGNMENT_64KB) vrflags |= VR_PHYS64K;
47 if(vmm_flags & MAP_PREALLOC) mfflags |= MF_PREALLOC;
48 if(vmm_flags & MAP_UNINITIALIZED) {
49 if(!execpriv) return NULL;
50 vrflags |= VR_UNINITIALIZED;
53 if(len <= 0) {
54 return NULL;
57 if(len % VM_PAGE_SIZE)
58 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
60 if (addr && (vmm_flags & MAP_FIXED)) {
61 int r = map_unmap_range(vmp, addr, len);
62 if(r != OK) {
63 printf("mmap_region: map_unmap_range failed (%d)\n", r);
64 return NULL;
68 if (addr || (vmm_flags & MAP_FIXED)) {
69 /* An address is given, first try at that address. */
70 vr = map_page_region(vmp, addr, 0, len,
71 vrflags, mfflags, mt);
72 if(!vr && (vmm_flags & MAP_FIXED))
73 return NULL;
76 if (!vr) {
77 /* No address given or address already in use. */
78 vr = map_page_region(vmp, VM_PAGE_SIZE, VM_DATATOP, len,
79 vrflags, mfflags, mt);
82 return vr;
85 static int mmap_file(struct vmproc *vmp,
86 int vmfd, off_t file_offset, int flags,
87 ino_t ino, dev_t dev, u64_t filesize, vir_bytes addr, vir_bytes len,
88 vir_bytes *retaddr, u16_t clearend, int writable, int mayclosefd)
90 /* VFS has replied to a VMVFSREQ_FDLOOKUP request. */
91 struct vir_region *vr;
92 u64_t page_offset;
93 int result = OK;
94 u32_t vrflags = 0;
96 if(writable) vrflags |= VR_WRITABLE;
98 /* Do some page alignments. */
99 if((page_offset = (file_offset % VM_PAGE_SIZE))) {
100 file_offset -= page_offset;
101 len += page_offset;
104 len = roundup(len, VM_PAGE_SIZE);
106 /* All numbers should be page-aligned now. */
107 assert(!(len % VM_PAGE_SIZE));
108 assert(!(filesize % VM_PAGE_SIZE));
109 assert(!(file_offset % VM_PAGE_SIZE));
111 #if 0
112 /* XXX ld.so relies on longer-than-file mapping */
113 if((u64_t) len + file_offset > filesize) {
114 printf("VM: truncating mmap dev 0x%x ino %d beyond file size in %d; offset %llu, len %lu, size %llu; ",
115 dev, ino, vmp->vm_endpoint,
116 file_offset, len, filesize);
117 len = filesize - file_offset;
118 return EINVAL;
120 #endif
122 if(!(vr = mmap_region(vmp, addr, flags, len,
123 vrflags, &mem_type_mappedfile, 0))) {
124 result = ENOMEM;
125 } else {
126 *retaddr = vr->vaddr + page_offset;
127 result = OK;
129 mappedfile_setfile(vmp, vr, vmfd,
130 file_offset, dev, ino, clearend, 1, mayclosefd);
133 return result;
136 int do_vfs_mmap(message *m)
138 vir_bytes v;
139 struct vmproc *vmp;
140 int r, n;
141 u16_t clearend, flags = 0;
143 /* It might be disabled */
144 if(!enable_filemap) return ENXIO;
146 clearend = m->m_vm_vfs_mmap.clearend;
147 flags = m->m_vm_vfs_mmap.flags;
149 if((r=vm_isokendpt(m->m_vm_vfs_mmap.who, &n)) != OK)
150 panic("bad ep %d from vfs", m->m_vm_vfs_mmap.who);
151 vmp = &vmproc[n];
153 return mmap_file(vmp, m->m_vm_vfs_mmap.fd, m->m_vm_vfs_mmap.offset,
154 MAP_PRIVATE | MAP_FIXED,
155 m->m_vm_vfs_mmap.ino, m->m_vm_vfs_mmap.dev,
156 (u64_t) LONG_MAX * VM_PAGE_SIZE,
157 m->m_vm_vfs_mmap.vaddr, m->m_vm_vfs_mmap.len, &v,
158 clearend, flags, 0);
161 static void mmap_file_cont(struct vmproc *vmp, message *replymsg, void *cbarg,
162 void *origmsg_v)
164 message *origmsg = (message *) origmsg_v;
165 message mmap_reply;
166 int result;
167 int writable = 0;
168 vir_bytes v = (vir_bytes) MAP_FAILED;
170 if(origmsg->m_mmap.prot & PROT_WRITE)
171 writable = 1;
173 if(replymsg->VMV_RESULT != OK) {
174 #if 0 /* Noisy diagnostic for mmap() by ld.so */
175 printf("VM: VFS reply failed (%d)\n", replymsg->VMV_RESULT);
176 sys_diagctl_stacktrace(vmp->vm_endpoint);
177 #endif
178 result = origmsg->VMV_RESULT;
179 } else {
180 /* Finish mmap */
181 result = mmap_file(vmp, replymsg->VMV_FD, origmsg->m_mmap.offset,
182 origmsg->m_mmap.flags,
183 replymsg->VMV_INO, replymsg->VMV_DEV,
184 (u64_t) replymsg->VMV_SIZE_PAGES*PAGE_SIZE,
185 (vir_bytes) origmsg->m_mmap.addr,
186 origmsg->m_mmap.len, &v, 0, writable, 1);
189 /* Unblock requesting process. */
190 memset(&mmap_reply, 0, sizeof(mmap_reply));
191 mmap_reply.m_type = result;
192 mmap_reply.m_mmap.retaddr = (void *) v;
194 if(ipc_send(vmp->vm_endpoint, &mmap_reply) != OK)
195 panic("VM: mmap_file_cont: ipc_send() failed");
198 /*===========================================================================*
199 * do_mmap *
200 *===========================================================================*/
201 int do_mmap(message *m)
203 int r, n;
204 struct vmproc *vmp;
205 vir_bytes addr = (vir_bytes) m->m_mmap.addr;
206 struct vir_region *vr = NULL;
207 int execpriv = 0;
208 size_t len = (vir_bytes) m->m_mmap.len;
210 /* RS and VFS can do slightly more special mmap() things */
211 if(m->m_source == VFS_PROC_NR || m->m_source == RS_PROC_NR)
212 execpriv = 1;
214 if(m->m_mmap.flags & MAP_THIRDPARTY) {
215 if(!execpriv) return EPERM;
216 if((r=vm_isokendpt(m->m_mmap.forwhom, &n)) != OK)
217 return ESRCH;
218 } else {
219 /* regular mmap, i.e. for caller */
220 if((r=vm_isokendpt(m->m_source, &n)) != OK) {
221 panic("do_mmap: message from strange source: %d",
222 m->m_source);
226 vmp = &vmproc[n];
228 /* "SUSv3 specifies that mmap() should fail if length is 0" */
229 if(len <= 0) {
230 return EINVAL;
233 if(m->m_mmap.fd == -1 || (m->m_mmap.flags & MAP_ANON)) {
234 /* actual memory in some form */
235 mem_type_t *mt = NULL;
237 if(m->m_mmap.fd != -1) {
238 printf("VM: mmap: fd %d, len 0x%x\n", m->m_mmap.fd, len);
239 return EINVAL;
242 /* Contiguous phys memory has to be preallocated. */
243 if((m->m_mmap.flags & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) {
244 return EINVAL;
247 if(m->m_mmap.flags & MAP_CONTIG) {
248 mt = &mem_type_anon_contig;
249 } else mt = &mem_type_anon;
251 if(!(vr = mmap_region(vmp, addr, m->m_mmap.flags, len,
252 VR_WRITABLE | VR_ANON, mt, execpriv))) {
253 return ENOMEM;
255 } else {
256 /* File mapping might be disabled */
257 if(!enable_filemap) return ENXIO;
259 /* For files, we only can't accept writable MAP_SHARED
260 * mappings.
262 if((m->m_mmap.flags & MAP_SHARED) && (m->m_mmap.prot & PROT_WRITE)) {
263 return ENXIO;
266 if(vfs_request(VMVFSREQ_FDLOOKUP, m->m_mmap.fd, vmp, 0, 0,
267 mmap_file_cont, NULL, m, sizeof(*m)) != OK) {
268 printf("VM: vfs_request for mmap failed\n");
269 return ENXIO;
272 /* request queued; don't reply. */
273 return SUSPEND;
276 /* Return mapping, as seen from process. */
277 m->m_mmap.retaddr = (void *) vr->vaddr;
279 return OK;
282 /*===========================================================================*
283 * map_perm_check *
284 *===========================================================================*/
285 static int map_perm_check(endpoint_t caller, endpoint_t target,
286 phys_bytes physaddr, phys_bytes len)
288 int r;
290 /* TTY and memory are allowed to do anything.
291 * They have to be special cases as they have to be able to do
292 * anything; TTY even on behalf of anyone for the TIOCMAPMEM
293 * ioctl. MEM just for itself.
295 if(caller == TTY_PROC_NR)
296 return OK;
297 if(caller != target)
298 return EPERM;
299 if(caller == MEM_PROC_NR)
300 return OK;
302 /* Anyone else needs explicit permission from the kernel (ultimately
303 * set by PCI).
305 r = sys_privquery_mem(caller, physaddr, len);
307 return r;
310 /*===========================================================================*
311 * do_map_phys *
312 *===========================================================================*/
313 int do_map_phys(message *m)
315 int r, n;
316 struct vmproc *vmp;
317 endpoint_t target;
318 struct vir_region *vr;
319 vir_bytes len;
320 phys_bytes startaddr;
321 size_t offset;
323 target = m->m_lsys_vm_map_phys.ep;
324 len = m->m_lsys_vm_map_phys.len;
326 if (len <= 0) return EINVAL;
328 if(target == SELF)
329 target = m->m_source;
331 if((r=vm_isokendpt(target, &n)) != OK)
332 return EINVAL;
334 startaddr = (vir_bytes)m->m_lsys_vm_map_phys.phaddr;
336 /* First check permission, then round range down/up. Caller can't
337 * help it if we can't map in lower than page granularity.
339 if(map_perm_check(m->m_source, target, startaddr, len) != OK) {
340 printf("VM: unauthorized mapping of 0x%lx by %d\n",
341 startaddr, m->m_source);
342 return EPERM;
345 vmp = &vmproc[n];
347 offset = startaddr % VM_PAGE_SIZE;
348 len += offset;
349 startaddr -= offset;
351 if(len % VM_PAGE_SIZE)
352 len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
354 if(!(vr = map_page_region(vmp, 0, VM_DATATOP, len,
355 VR_DIRECT | VR_WRITABLE, 0, &mem_type_directphys))) {
356 return ENOMEM;
359 phys_setphys(vr, startaddr);
361 m->m_lsys_vm_map_phys.reply = (void *) (vr->vaddr + offset);
363 return OK;
366 /*===========================================================================*
367 * do_remap *
368 *===========================================================================*/
369 int do_remap(message *m)
371 int dn, sn;
372 vir_bytes da, sa;
373 size_t size;
374 u32_t flags;
375 struct vir_region *src_region, *vr;
376 struct vmproc *dvmp, *svmp;
377 int r;
378 int readonly;
380 if(m->m_type == VM_REMAP)
381 readonly = 0;
382 else if(m->m_type == VM_REMAP_RO)
383 readonly = 1;
384 else panic("do_remap: can't be");
386 da = (vir_bytes) m->m_lsys_vm_vmremap.dest_addr;
387 sa = (vir_bytes) m->m_lsys_vm_vmremap.src_addr;
388 size = m->m_lsys_vm_vmremap.size;
390 if (size <= 0) return EINVAL;
392 if ((r = vm_isokendpt((endpoint_t) m->m_lsys_vm_vmremap.destination, &dn)) != OK)
393 return EINVAL;
394 if ((r = vm_isokendpt((endpoint_t) m->m_lsys_vm_vmremap.source, &sn)) != OK)
395 return EINVAL;
397 dvmp = &vmproc[dn];
398 svmp = &vmproc[sn];
400 if (!(src_region = map_lookup(svmp, sa, NULL)))
401 return EINVAL;
403 if(src_region->vaddr != sa) {
404 printf("VM: do_remap: not start of region.\n");
405 return EFAULT;
408 if (size % VM_PAGE_SIZE)
409 size += VM_PAGE_SIZE - size % VM_PAGE_SIZE;
411 if(size != src_region->length) {
412 printf("VM: do_remap: not size of region.\n");
413 return EFAULT;
416 flags = VR_SHARED;
417 if(!readonly)
418 flags |= VR_WRITABLE;
420 if(da)
421 vr = map_page_region(dvmp, da, 0, size, flags, 0,
422 &mem_type_shared);
423 else
424 vr = map_page_region(dvmp, 0, VM_DATATOP, size, flags, 0,
425 &mem_type_shared);
427 if(!vr) {
428 printf("VM: re-map of shared area failed\n");
429 return ENOMEM;
432 shared_setsource(vr, svmp->vm_endpoint, src_region);
434 m->m_lsys_vm_vmremap.ret_addr = (void *) vr->vaddr;
435 return OK;
438 /*===========================================================================*
439 * do_get_phys *
440 *===========================================================================*/
441 int do_get_phys(message *m)
443 int r, n;
444 struct vmproc *vmp;
445 endpoint_t target;
446 phys_bytes ret;
447 vir_bytes addr;
449 target = m->m_lc_vm_getphys.endpt;
450 addr = (vir_bytes) m->m_lc_vm_getphys.addr;
452 if ((r = vm_isokendpt(target, &n)) != OK)
453 return EINVAL;
455 vmp = &vmproc[n];
457 r = map_get_phys(vmp, addr, &ret);
459 m->m_lc_vm_getphys.ret_addr = (void *) ret;
460 return r;
463 /*===========================================================================*
464 * do_get_refcount *
465 *===========================================================================*/
466 int do_get_refcount(message *m)
468 int r, n;
469 struct vmproc *vmp;
470 endpoint_t target;
471 u8_t cnt;
472 vir_bytes addr;
474 target = m->m_lsys_vm_getref.endpt;
475 addr = (vir_bytes) m->m_lsys_vm_getref.addr;
477 if ((r = vm_isokendpt(target, &n)) != OK)
478 return EINVAL;
480 vmp = &vmproc[n];
482 r = map_get_ref(vmp, addr, &cnt);
484 m->m_lsys_vm_getref.retc = cnt;
485 return r;
488 /*===========================================================================*
489 * do_munmap *
490 *===========================================================================*/
491 int do_munmap(message *m)
493 int r, n;
494 struct vmproc *vmp;
495 vir_bytes addr, len;
496 endpoint_t target = SELF;
498 if(m->m_type == VM_UNMAP_PHYS) {
499 target = m->m_lsys_vm_unmap_phys.ep;
500 } else if(m->m_type == VM_SHM_UNMAP) {
501 target = m->m_lc_vm_shm_unmap.forwhom;
504 if(target == SELF)
505 target = m->m_source;
507 if((r=vm_isokendpt(target, &n)) != OK) {
508 panic("do_mmap: message from strange source: %d", m->m_source);
511 vmp = &vmproc[n];
513 if(m->m_type == VM_UNMAP_PHYS) {
514 addr = (vir_bytes) m->m_lsys_vm_unmap_phys.vaddr;
515 } else if(m->m_type == VM_SHM_UNMAP) {
516 addr = (vir_bytes) m->m_lc_vm_shm_unmap.addr;
517 } else addr = (vir_bytes) m->VMUM_ADDR;
519 if(addr % VM_PAGE_SIZE)
520 return EFAULT;
522 if(m->m_type == VM_UNMAP_PHYS || m->m_type == VM_SHM_UNMAP) {
523 struct vir_region *vr;
524 if(!(vr = map_lookup(vmp, addr, NULL))) {
525 printf("VM: unmap: address 0x%lx not found in %d\n",
526 addr, target);
527 sys_diagctl_stacktrace(target);
528 return EFAULT;
530 len = vr->length;
531 } else len = roundup(m->VMUM_LEN, VM_PAGE_SIZE);
533 return map_unmap_range(vmp, addr, len);