4 #include <minix/callnr.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
17 #include <minix/debug.h>
19 #include <machine/vmparam.h>
22 #include <sys/param.h>
37 static struct vir_region
*mmap_region(struct vmproc
*vmp
, vir_bytes addr
,
38 u32_t vmm_flags
, size_t len
, u32_t vrflags
,
39 mem_type_t
*mt
, int execpriv
)
42 struct vir_region
*vr
= NULL
;
44 if(vmm_flags
& MAP_LOWER16M
) vrflags
|= VR_LOWER16MB
;
45 if(vmm_flags
& MAP_LOWER1M
) vrflags
|= VR_LOWER1MB
;
46 if(vmm_flags
& MAP_ALIGNMENT_64KB
) vrflags
|= VR_PHYS64K
;
47 if(vmm_flags
& MAP_PREALLOC
) mfflags
|= MF_PREALLOC
;
48 if(vmm_flags
& MAP_UNINITIALIZED
) {
49 if(!execpriv
) return NULL
;
50 vrflags
|= VR_UNINITIALIZED
;
57 if(len
% VM_PAGE_SIZE
)
58 len
+= VM_PAGE_SIZE
- (len
% VM_PAGE_SIZE
);
60 if (addr
&& (vmm_flags
& MAP_FIXED
)) {
61 int r
= map_unmap_range(vmp
, addr
, len
);
63 printf("mmap_region: map_unmap_range failed (%d)\n", r
);
68 if (addr
|| (vmm_flags
& MAP_FIXED
)) {
69 /* An address is given, first try at that address. */
70 vr
= map_page_region(vmp
, addr
, 0, len
,
71 vrflags
, mfflags
, mt
);
72 if(!vr
&& (vmm_flags
& MAP_FIXED
))
77 /* No address given or address already in use. */
78 vr
= map_page_region(vmp
, VM_PAGE_SIZE
, VM_DATATOP
, len
,
79 vrflags
, mfflags
, mt
);
85 static int mmap_file(struct vmproc
*vmp
,
86 int vmfd
, off_t file_offset
, int flags
,
87 ino_t ino
, dev_t dev
, u64_t filesize
, vir_bytes addr
, vir_bytes len
,
88 vir_bytes
*retaddr
, u16_t clearend
, int writable
, int mayclosefd
)
90 /* VFS has replied to a VMVFSREQ_FDLOOKUP request. */
91 struct vir_region
*vr
;
96 if(writable
) vrflags
|= VR_WRITABLE
;
98 /* Do some page alignments. */
99 if((page_offset
= (file_offset
% VM_PAGE_SIZE
))) {
100 file_offset
-= page_offset
;
104 len
= roundup(len
, VM_PAGE_SIZE
);
106 /* All numbers should be page-aligned now. */
107 assert(!(len
% VM_PAGE_SIZE
));
108 assert(!(filesize
% VM_PAGE_SIZE
));
109 assert(!(file_offset
% VM_PAGE_SIZE
));
112 /* XXX ld.so relies on longer-than-file mapping */
113 if((u64_t
) len
+ file_offset
> filesize
) {
114 printf("VM: truncating mmap dev 0x%x ino %d beyond file size in %d; offset %llu, len %lu, size %llu; ",
115 dev
, ino
, vmp
->vm_endpoint
,
116 file_offset
, len
, filesize
);
117 len
= filesize
- file_offset
;
122 if(!(vr
= mmap_region(vmp
, addr
, flags
, len
,
123 vrflags
, &mem_type_mappedfile
, 0))) {
126 *retaddr
= vr
->vaddr
+ page_offset
;
129 mappedfile_setfile(vmp
, vr
, vmfd
,
130 file_offset
, dev
, ino
, clearend
, 1, mayclosefd
);
136 int do_vfs_mmap(message
*m
)
141 u16_t clearend
, flags
= 0;
143 /* It might be disabled */
144 if(!enable_filemap
) return ENXIO
;
146 clearend
= m
->m_vm_vfs_mmap
.clearend
;
147 flags
= m
->m_vm_vfs_mmap
.flags
;
149 if((r
=vm_isokendpt(m
->m_vm_vfs_mmap
.who
, &n
)) != OK
)
150 panic("bad ep %d from vfs", m
->m_vm_vfs_mmap
.who
);
153 return mmap_file(vmp
, m
->m_vm_vfs_mmap
.fd
, m
->m_vm_vfs_mmap
.offset
,
154 MAP_PRIVATE
| MAP_FIXED
,
155 m
->m_vm_vfs_mmap
.ino
, m
->m_vm_vfs_mmap
.dev
,
156 (u64_t
) LONG_MAX
* VM_PAGE_SIZE
,
157 m
->m_vm_vfs_mmap
.vaddr
, m
->m_vm_vfs_mmap
.len
, &v
,
161 static void mmap_file_cont(struct vmproc
*vmp
, message
*replymsg
, void *cbarg
,
164 message
*origmsg
= (message
*) origmsg_v
;
168 vir_bytes v
= (vir_bytes
) MAP_FAILED
;
170 if(origmsg
->m_mmap
.prot
& PROT_WRITE
)
173 if(replymsg
->VMV_RESULT
!= OK
) {
174 #if 0 /* Noisy diagnostic for mmap() by ld.so */
175 printf("VM: VFS reply failed (%d)\n", replymsg
->VMV_RESULT
);
176 sys_diagctl_stacktrace(vmp
->vm_endpoint
);
178 result
= origmsg
->VMV_RESULT
;
181 result
= mmap_file(vmp
, replymsg
->VMV_FD
, origmsg
->m_mmap
.offset
,
182 origmsg
->m_mmap
.flags
,
183 replymsg
->VMV_INO
, replymsg
->VMV_DEV
,
184 (u64_t
) replymsg
->VMV_SIZE_PAGES
*PAGE_SIZE
,
185 (vir_bytes
) origmsg
->m_mmap
.addr
,
186 origmsg
->m_mmap
.len
, &v
, 0, writable
, 1);
189 /* Unblock requesting process. */
190 memset(&mmap_reply
, 0, sizeof(mmap_reply
));
191 mmap_reply
.m_type
= result
;
192 mmap_reply
.m_mmap
.retaddr
= (void *) v
;
194 if(ipc_send(vmp
->vm_endpoint
, &mmap_reply
) != OK
)
195 panic("VM: mmap_file_cont: ipc_send() failed");
198 /*===========================================================================*
200 *===========================================================================*/
201 int do_mmap(message
*m
)
205 vir_bytes addr
= (vir_bytes
) m
->m_mmap
.addr
;
206 struct vir_region
*vr
= NULL
;
208 size_t len
= (vir_bytes
) m
->m_mmap
.len
;
210 /* RS and VFS can do slightly more special mmap() things */
211 if(m
->m_source
== VFS_PROC_NR
|| m
->m_source
== RS_PROC_NR
)
214 if(m
->m_mmap
.flags
& MAP_THIRDPARTY
) {
215 if(!execpriv
) return EPERM
;
216 if((r
=vm_isokendpt(m
->m_mmap
.forwhom
, &n
)) != OK
)
219 /* regular mmap, i.e. for caller */
220 if((r
=vm_isokendpt(m
->m_source
, &n
)) != OK
) {
221 panic("do_mmap: message from strange source: %d",
228 /* "SUSv3 specifies that mmap() should fail if length is 0" */
233 if(m
->m_mmap
.fd
== -1 || (m
->m_mmap
.flags
& MAP_ANON
)) {
234 /* actual memory in some form */
235 mem_type_t
*mt
= NULL
;
237 if(m
->m_mmap
.fd
!= -1) {
238 printf("VM: mmap: fd %d, len 0x%x\n", m
->m_mmap
.fd
, len
);
242 /* Contiguous phys memory has to be preallocated. */
243 if((m
->m_mmap
.flags
& (MAP_CONTIG
|MAP_PREALLOC
)) == MAP_CONTIG
) {
247 if(m
->m_mmap
.flags
& MAP_CONTIG
) {
248 mt
= &mem_type_anon_contig
;
249 } else mt
= &mem_type_anon
;
251 if(!(vr
= mmap_region(vmp
, addr
, m
->m_mmap
.flags
, len
,
252 VR_WRITABLE
| VR_ANON
, mt
, execpriv
))) {
256 /* File mapping might be disabled */
257 if(!enable_filemap
) return ENXIO
;
259 /* For files, we only can't accept writable MAP_SHARED
262 if((m
->m_mmap
.flags
& MAP_SHARED
) && (m
->m_mmap
.prot
& PROT_WRITE
)) {
266 if(vfs_request(VMVFSREQ_FDLOOKUP
, m
->m_mmap
.fd
, vmp
, 0, 0,
267 mmap_file_cont
, NULL
, m
, sizeof(*m
)) != OK
) {
268 printf("VM: vfs_request for mmap failed\n");
272 /* request queued; don't reply. */
276 /* Return mapping, as seen from process. */
277 m
->m_mmap
.retaddr
= (void *) vr
->vaddr
;
282 /*===========================================================================*
284 *===========================================================================*/
285 static int map_perm_check(endpoint_t caller
, endpoint_t target
,
286 phys_bytes physaddr
, phys_bytes len
)
290 /* TTY and memory are allowed to do anything.
291 * They have to be special cases as they have to be able to do
292 * anything; TTY even on behalf of anyone for the TIOCMAPMEM
293 * ioctl. MEM just for itself.
295 if(caller
== TTY_PROC_NR
)
299 if(caller
== MEM_PROC_NR
)
302 /* Anyone else needs explicit permission from the kernel (ultimately
305 r
= sys_privquery_mem(caller
, physaddr
, len
);
310 /*===========================================================================*
312 *===========================================================================*/
313 int do_map_phys(message
*m
)
318 struct vir_region
*vr
;
320 phys_bytes startaddr
;
323 target
= m
->m_lsys_vm_map_phys
.ep
;
324 len
= m
->m_lsys_vm_map_phys
.len
;
326 if (len
<= 0) return EINVAL
;
329 target
= m
->m_source
;
331 if((r
=vm_isokendpt(target
, &n
)) != OK
)
334 startaddr
= (vir_bytes
)m
->m_lsys_vm_map_phys
.phaddr
;
336 /* First check permission, then round range down/up. Caller can't
337 * help it if we can't map in lower than page granularity.
339 if(map_perm_check(m
->m_source
, target
, startaddr
, len
) != OK
) {
340 printf("VM: unauthorized mapping of 0x%lx by %d\n",
341 startaddr
, m
->m_source
);
347 offset
= startaddr
% VM_PAGE_SIZE
;
351 if(len
% VM_PAGE_SIZE
)
352 len
+= VM_PAGE_SIZE
- (len
% VM_PAGE_SIZE
);
354 if(!(vr
= map_page_region(vmp
, 0, VM_DATATOP
, len
,
355 VR_DIRECT
| VR_WRITABLE
, 0, &mem_type_directphys
))) {
359 phys_setphys(vr
, startaddr
);
361 m
->m_lsys_vm_map_phys
.reply
= (void *) (vr
->vaddr
+ offset
);
366 /*===========================================================================*
368 *===========================================================================*/
369 int do_remap(message
*m
)
375 struct vir_region
*src_region
, *vr
;
376 struct vmproc
*dvmp
, *svmp
;
380 if(m
->m_type
== VM_REMAP
)
382 else if(m
->m_type
== VM_REMAP_RO
)
384 else panic("do_remap: can't be");
386 da
= (vir_bytes
) m
->m_lsys_vm_vmremap
.dest_addr
;
387 sa
= (vir_bytes
) m
->m_lsys_vm_vmremap
.src_addr
;
388 size
= m
->m_lsys_vm_vmremap
.size
;
390 if (size
<= 0) return EINVAL
;
392 if ((r
= vm_isokendpt((endpoint_t
) m
->m_lsys_vm_vmremap
.destination
, &dn
)) != OK
)
394 if ((r
= vm_isokendpt((endpoint_t
) m
->m_lsys_vm_vmremap
.source
, &sn
)) != OK
)
400 if (!(src_region
= map_lookup(svmp
, sa
, NULL
)))
403 if(src_region
->vaddr
!= sa
) {
404 printf("VM: do_remap: not start of region.\n");
408 if (size
% VM_PAGE_SIZE
)
409 size
+= VM_PAGE_SIZE
- size
% VM_PAGE_SIZE
;
411 if(size
!= src_region
->length
) {
412 printf("VM: do_remap: not size of region.\n");
418 flags
|= VR_WRITABLE
;
421 vr
= map_page_region(dvmp
, da
, 0, size
, flags
, 0,
424 vr
= map_page_region(dvmp
, 0, VM_DATATOP
, size
, flags
, 0,
428 printf("VM: re-map of shared area failed\n");
432 shared_setsource(vr
, svmp
->vm_endpoint
, src_region
);
434 m
->m_lsys_vm_vmremap
.ret_addr
= (void *) vr
->vaddr
;
438 /*===========================================================================*
440 *===========================================================================*/
441 int do_get_phys(message
*m
)
449 target
= m
->m_lc_vm_getphys
.endpt
;
450 addr
= (vir_bytes
) m
->m_lc_vm_getphys
.addr
;
452 if ((r
= vm_isokendpt(target
, &n
)) != OK
)
457 r
= map_get_phys(vmp
, addr
, &ret
);
459 m
->m_lc_vm_getphys
.ret_addr
= (void *) ret
;
463 /*===========================================================================*
465 *===========================================================================*/
466 int do_get_refcount(message
*m
)
474 target
= m
->m_lsys_vm_getref
.endpt
;
475 addr
= (vir_bytes
) m
->m_lsys_vm_getref
.addr
;
477 if ((r
= vm_isokendpt(target
, &n
)) != OK
)
482 r
= map_get_ref(vmp
, addr
, &cnt
);
484 m
->m_lsys_vm_getref
.retc
= cnt
;
488 /*===========================================================================*
490 *===========================================================================*/
491 int do_munmap(message
*m
)
496 endpoint_t target
= SELF
;
498 if(m
->m_type
== VM_UNMAP_PHYS
) {
499 target
= m
->m_lsys_vm_unmap_phys
.ep
;
500 } else if(m
->m_type
== VM_SHM_UNMAP
) {
501 target
= m
->m_lc_vm_shm_unmap
.forwhom
;
505 target
= m
->m_source
;
507 if((r
=vm_isokendpt(target
, &n
)) != OK
) {
508 panic("do_mmap: message from strange source: %d", m
->m_source
);
513 if(m
->m_type
== VM_UNMAP_PHYS
) {
514 addr
= (vir_bytes
) m
->m_lsys_vm_unmap_phys
.vaddr
;
515 } else if(m
->m_type
== VM_SHM_UNMAP
) {
516 addr
= (vir_bytes
) m
->m_lc_vm_shm_unmap
.addr
;
517 } else addr
= (vir_bytes
) m
->VMUM_ADDR
;
519 if(addr
% VM_PAGE_SIZE
)
522 if(m
->m_type
== VM_UNMAP_PHYS
|| m
->m_type
== VM_SHM_UNMAP
) {
523 struct vir_region
*vr
;
524 if(!(vr
= map_lookup(vmp
, addr
, NULL
))) {
525 printf("VM: unmap: address 0x%lx not found in %d\n",
527 sys_diagctl_stacktrace(target
);
531 } else len
= roundup(m
->VMUM_LEN
, VM_PAGE_SIZE
);
533 return map_unmap_range(vmp
, addr
, len
);