4 #include <minix/callnr.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
9 #include <minix/endpoint.h>
10 #include <minix/keymap.h>
11 #include <minix/minlib.h>
12 #include <minix/type.h>
13 #include <minix/ipc.h>
14 #include <minix/sysutil.h>
15 #include <minix/syslib.h>
16 #include <minix/safecopies.h>
17 #include <minix/bitmap.h>
18 #include <minix/debug.h>
21 #include <sys/param.h>
36 /*===========================================================================*
38 *===========================================================================*/
39 int do_mmap(message
*m
)
45 struct vir_region
*vr
= NULL
;
48 /* RS and VFS can do slightly more special mmap() things */
49 if(m
->m_source
== VFS_PROC_NR
|| m
->m_source
== RS_PROC_NR
)
52 if(m
->VMM_FLAGS
& MAP_THIRDPARTY
) {
53 if(!execpriv
) return EPERM
;
54 if((r
=vm_isokendpt(m
->VMM_FORWHOM
, &n
)) != OK
)
57 /* regular mmap, i.e. for caller */
58 if((r
=vm_isokendpt(m
->m_source
, &n
)) != OK
) {
59 panic("do_mmap: message from strange source: %d",
66 if(m
->VMM_FD
== -1 || (m
->VMM_FLAGS
& MAP_ANON
)) {
68 u32_t vrflags
= VR_ANON
| VR_WRITABLE
;
69 size_t len
= (vir_bytes
) m
->VMM_LEN
;
71 if(m
->VMM_FD
!= -1 || len
<= 0) {
75 /* Contiguous phys memory has to be preallocated. */
76 if((m
->VMM_FLAGS
& (MAP_CONTIG
|MAP_PREALLOC
)) == MAP_CONTIG
) {
80 if(m
->VMM_FLAGS
& MAP_PREALLOC
) mfflags
|= MF_PREALLOC
;
81 if(m
->VMM_FLAGS
& MAP_LOWER16M
) vrflags
|= VR_LOWER16MB
;
82 if(m
->VMM_FLAGS
& MAP_LOWER1M
) vrflags
|= VR_LOWER1MB
;
83 if(m
->VMM_FLAGS
& MAP_ALIGN64K
) vrflags
|= VR_PHYS64K
;
84 if(m
->VMM_FLAGS
& MAP_UNINITIALIZED
) {
85 if(!execpriv
) return EPERM
;
86 vrflags
|= VR_UNINITIALIZED
;
88 if(m
->VMM_FLAGS
& MAP_CONTIG
) {
90 mt
= &mem_type_anon_contig
;
91 } else mt
= &mem_type_anon
;
93 if(len
% VM_PAGE_SIZE
)
94 len
+= VM_PAGE_SIZE
- (len
% VM_PAGE_SIZE
);
97 if (m
->VMM_ADDR
|| (m
->VMM_FLAGS
& MAP_FIXED
)) {
98 /* An address is given, first try at that address. */
100 vr
= map_page_region(vmp
, addr
, 0, len
,
101 vrflags
, mfflags
, mt
);
102 if(!vr
&& (m
->VMM_FLAGS
& MAP_FIXED
))
106 /* No address given or address already in use. */
107 vr
= map_page_region(vmp
, 0, VM_DATATOP
, len
,
108 vrflags
, mfflags
, mt
);
117 /* Return mapping, as seen from process. */
119 m
->VMM_RETADDR
= vr
->vaddr
;
125 /*===========================================================================*
127 *===========================================================================*/
128 int map_perm_check(endpoint_t caller
, endpoint_t target
,
129 phys_bytes physaddr
, phys_bytes len
)
133 /* TTY and memory are allowed to do anything.
134 * They have to be special cases as they have to be able to do
135 * anything; TTY even on behalf of anyone for the TIOCMAPMEM
136 * ioctl. MEM just for itself.
138 if(caller
== TTY_PROC_NR
)
142 if(caller
== MEM_PROC_NR
)
145 /* Anyone else needs explicit permission from the kernel (ultimately
148 r
= sys_privquery_mem(caller
, physaddr
, len
);
153 /*===========================================================================*
155 *===========================================================================*/
156 int do_map_phys(message
*m
)
161 struct vir_region
*vr
;
163 phys_bytes startaddr
;
169 if (len
<= 0) return EINVAL
;
172 target
= m
->m_source
;
174 if((r
=vm_isokendpt(target
, &n
)) != OK
)
177 startaddr
= (vir_bytes
)m
->VMMP_PHADDR
;
179 /* First check permission, then round range down/up. Caller can't
180 * help it if we can't map in lower than page granularity.
182 if(map_perm_check(m
->m_source
, target
, startaddr
, len
) != OK
) {
183 printf("VM: unauthorized mapping of 0x%lx by %d\n",
184 startaddr
, m
->m_source
);
190 offset
= startaddr
% VM_PAGE_SIZE
;
194 if(len
% VM_PAGE_SIZE
)
195 len
+= VM_PAGE_SIZE
- (len
% VM_PAGE_SIZE
);
197 if(!(vr
= map_page_region(vmp
, 0, VM_DATATOP
, len
,
198 VR_DIRECT
| VR_WRITABLE
, 0, &mem_type_directphys
))) {
202 phys_setphys(vr
, startaddr
);
204 m
->VMMP_VADDR_REPLY
= (void *) (vr
->vaddr
+ offset
);
209 /*===========================================================================*
211 *===========================================================================*/
212 int do_remap(message
*m
)
218 struct vir_region
*src_region
, *vr
;
219 struct vmproc
*dvmp
, *svmp
;
223 if(m
->m_type
== VM_REMAP
)
225 else if(m
->m_type
== VM_REMAP_RO
)
227 else panic("do_remap: can't be");
229 da
= (vir_bytes
) m
->VMRE_DA
;
230 sa
= (vir_bytes
) m
->VMRE_SA
;
233 if (size
<= 0) return EINVAL
;
235 if ((r
= vm_isokendpt((endpoint_t
) m
->VMRE_D
, &dn
)) != OK
)
237 if ((r
= vm_isokendpt((endpoint_t
) m
->VMRE_S
, &sn
)) != OK
)
243 if (!(src_region
= map_lookup(svmp
, sa
, NULL
)))
246 if(src_region
->vaddr
!= sa
) {
247 printf("VM: do_remap: not start of region.\n");
251 if (size
% VM_PAGE_SIZE
)
252 size
+= VM_PAGE_SIZE
- size
% VM_PAGE_SIZE
;
254 if(size
!= src_region
->length
) {
255 printf("VM: do_remap: not size of region.\n");
261 flags
|= VR_WRITABLE
;
264 vr
= map_page_region(dvmp
, da
, 0, size
, flags
, 0,
267 vr
= map_page_region(dvmp
, 0, VM_DATATOP
, size
, flags
, 0,
271 printf("VM: re-map of shared area failed\n");
275 shared_setsource(vr
, svmp
->vm_endpoint
, src_region
);
277 m
->VMRE_RETA
= (char *) vr
->vaddr
;
281 /*===========================================================================*
283 *===========================================================================*/
284 int do_get_phys(message
*m
)
292 target
= m
->VMPHYS_ENDPT
;
293 addr
= m
->VMPHYS_ADDR
;
295 if ((r
= vm_isokendpt(target
, &n
)) != OK
)
300 r
= map_get_phys(vmp
, addr
, &ret
);
302 m
->VMPHYS_RETA
= ret
;
306 /*===========================================================================*
308 *===========================================================================*/
309 int do_get_refcount(message
*m
)
317 target
= m
->VMREFCNT_ENDPT
;
318 addr
= m
->VMREFCNT_ADDR
;
320 if ((r
= vm_isokendpt(target
, &n
)) != OK
)
325 r
= map_get_ref(vmp
, addr
, &cnt
);
327 m
->VMREFCNT_RETC
= cnt
;
331 /*===========================================================================*
333 *===========================================================================*/
334 int do_munmap(message
*m
)
338 vir_bytes addr
, len
, offset
;
339 struct vir_region
*vr
;
340 endpoint_t target
= SELF
;
342 if(m
->m_type
== VM_UNMAP_PHYS
) {
344 } else if(m
->m_type
== VM_SHM_UNMAP
) {
345 target
= m
->VMUN_ENDPT
;
349 target
= m
->m_source
;
351 if((r
=vm_isokendpt(target
, &n
)) != OK
) {
352 panic("do_mmap: message from strange source: %d", m
->m_source
);
357 if(m
->m_type
== VM_UNMAP_PHYS
) {
358 addr
= (vir_bytes
) m
->VMUP_VADDR
;
359 } else if(m
->m_type
== VM_SHM_UNMAP
) {
360 addr
= (vir_bytes
) m
->VMUN_ADDR
;
361 } else addr
= (vir_bytes
) m
->VMUM_ADDR
;
363 if(!(vr
= map_lookup(vmp
, addr
, NULL
))) {
364 printf("VM: unmap: virtual address 0x%lx not found in %d\n",
369 if(addr
% VM_PAGE_SIZE
)
372 if(m
->m_type
== VM_UNMAP_PHYS
|| m
->m_type
== VM_SHM_UNMAP
) {
374 } else len
= roundup(m
->VMUM_LEN
, VM_PAGE_SIZE
);
376 offset
= addr
- vr
->vaddr
;
378 if(offset
+ len
> vr
->length
) {
379 printf("munmap: addr 0x%lx len 0x%lx spills out of region\n",
384 if(map_unmap_region(vmp
, vr
, offset
, len
) != OK
)
385 panic("do_munmap: map_unmap_region failed");