1 /* $NetBSD: uvm_mremap.c,v 1.14 2009/08/02 16:03:47 yamt Exp $ */
4 * Copyright (c)2006,2007,2009 YAMAMOTO Takashi,
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: uvm_mremap.c,v 1.14 2009/08/02 16:03:47 yamt Exp $");
32 #include <sys/param.h>
34 #include <sys/sched.h>
35 #include <sys/syscallargs.h>
37 #include <sys/atomic.h>
42 uvm_mapent_extend(struct vm_map
*map
, vaddr_t endva
, vsize_t size
)
44 struct vm_map_entry
*entry
;
45 struct vm_map_entry
*reserved_entry
;
46 struct uvm_object
*uobj
;
50 if (!uvm_map_lookup_entry(map
, endva
, &reserved_entry
)) {
54 if (reserved_entry
->start
!= endva
||
55 reserved_entry
->end
!= endva
+ size
||
56 reserved_entry
->object
.uvm_obj
!= NULL
||
57 reserved_entry
->aref
.ar_amap
!= NULL
||
58 reserved_entry
->protection
!= VM_PROT_NONE
) {
62 entry
= reserved_entry
->prev
;
63 if (&map
->header
== entry
|| entry
->end
!= endva
) {
69 * now, make reserved_entry compatible with entry, and then
73 uobj
= entry
->object
.uvm_obj
;
75 voff_t offset
= entry
->offset
;
78 newoffset
= offset
+ entry
->end
- entry
->start
;
79 if (newoffset
<= offset
) {
80 error
= E2BIG
; /* XXX */
83 mutex_enter(&uobj
->vmobjlock
);
84 KASSERT(uobj
->uo_refs
> 0);
85 atomic_inc_uint(&uobj
->uo_refs
);
86 mutex_exit(&uobj
->vmobjlock
);
87 reserved_entry
->object
.uvm_obj
= uobj
;
88 reserved_entry
->offset
= newoffset
;
90 reserved_entry
->etype
= entry
->etype
;
91 if (UVM_ET_ISCOPYONWRITE(entry
)) {
92 reserved_entry
->etype
|= UVM_ET_NEEDSCOPY
;
94 reserved_entry
->flags
&= ~UVM_MAP_NOMERGE
;
95 reserved_entry
->protection
= entry
->protection
;
96 reserved_entry
->max_protection
= entry
->max_protection
;
97 reserved_entry
->inheritance
= entry
->inheritance
;
98 reserved_entry
->advice
= entry
->advice
;
99 reserved_entry
->wired_count
= 0; /* XXX should inherit? */
100 uvm_mapent_trymerge(map
, reserved_entry
, 0);
108 * uvm_mremap: move and/or resize existing mappings.
112 uvm_mremap(struct vm_map
*oldmap
, vaddr_t oldva
, vsize_t oldsize
,
113 struct vm_map
*newmap
, vaddr_t
*newvap
, vsize_t newsize
,
114 struct proc
*newproc
, int flags
)
122 const bool fixed
= (flags
& MAP_FIXED
) != 0;
129 if ((oldva
& PAGE_MASK
) != 0 ||
130 (newva
& PAGE_MASK
) != 0 ||
131 (oldsize
& PAGE_MASK
) != 0 ||
132 (newsize
& PAGE_MASK
) != 0) {
135 /* XXX zero-size should be allowed? */
136 if (oldva
+ oldsize
<= oldva
|| newva
+ newsize
<= newva
) {
141 * Try to see if any requested alignment can even be attempted.
142 * Make sure we can express the alignment (asking for a >= 4GB
143 * alignment on an ILP32 architecure make no sense) and the
144 * alignment is at least for a page sized quanitiy. If the
145 * request was for a fixed mapping, make sure supplied address
146 * adheres to the request alignment.
148 alignshift
= (flags
& MAP_ALIGNMENT_MASK
) >> MAP_ALIGNMENT_SHIFT
;
149 if (alignshift
!= 0) {
150 if (alignshift
>= sizeof(vaddr_t
) * NBBY
)
152 align
= 1L << alignshift
;
153 if (align
< PAGE_SIZE
)
155 if (align
>= vm_map_max(oldmap
))
157 if ((flags
& MAP_FIXED
) != 0) {
158 if ((*newvap
& (align
- 1)) != 0)
165 * check the easy cases first.
168 if ((!fixed
|| newva
== oldva
) && newmap
== oldmap
&&
169 (align
== 0 || (oldva
& (align
- 1)) == 0)) {
172 if (newsize
== oldsize
) {
176 if (newsize
< oldsize
) {
177 uvm_unmap(oldmap
, oldva
+ newsize
, oldva
+ oldsize
);
181 va
= oldva
+ oldsize
;
182 if (uvm_map_reserve(oldmap
, newsize
- oldsize
, 0, 0, &va
,
193 * we need to move mappings.
197 KASSERT(&newproc
->p_vmspace
->vm_map
== newmap
);
198 newva
= newproc
->p_emul
->e_vm_default_addr(newproc
,
199 (vaddr_t
)newproc
->p_vmspace
->vm_daddr
, newsize
);
202 if (!uvm_map_reserve(newmap
, newsize
, oldva
, align
, &dstva
,
203 fixed
? UVM_FLAG_FIXED
: 0)) {
206 KASSERT(!fixed
|| dstva
== newva
);
208 movesize
= MIN(oldsize
, newsize
);
209 error
= uvm_map_extract(oldmap
, oldva
, movesize
, newmap
, &dstva
,
210 UVM_EXTRACT_RESERVED
);
211 KASSERT(dstva
== newva
);
214 * undo uvm_map_reserve.
216 uvm_unmap(newmap
, newva
, newva
+ newsize
);
219 if (newsize
> oldsize
) {
221 error
= uvm_mapent_extend(newmap
, newva
+ oldsize
,
225 * undo uvm_map_reserve and uvm_map_extract.
227 if (newva
== oldva
&& newmap
== oldmap
) {
228 uvm_unmap(newmap
, newva
+ oldsize
,
231 uvm_unmap(newmap
, newva
, newva
+ newsize
);
239 * remove original entries unless we did in-place extend.
242 if (oldva
!= newva
|| oldmap
!= newmap
) {
243 uvm_unmap(oldmap
, oldva
, oldva
+ oldsize
);
251 * sys_mremap: mremap system call.
255 sys_mremap(struct lwp
*l
, const struct sys_mremap_args
*uap
, register_t
*retval
)
258 syscallarg(void *) old_address;
259 syscallarg(size_t) old_size;
260 syscallarg(void *) new_address;
261 syscallarg(size_t) new_size;
262 syscallarg(int) flags;
274 flags
= SCARG(uap
, flags
);
275 oldva
= (vaddr_t
)SCARG(uap
, old_address
);
276 oldsize
= (vsize_t
)(SCARG(uap
, old_size
));
277 newva
= (vaddr_t
)SCARG(uap
, new_address
);
278 newsize
= (vsize_t
)(SCARG(uap
, new_size
));
280 if ((flags
& ~(MAP_FIXED
| MAP_ALIGNMENT_MASK
)) != 0) {
285 oldsize
= round_page(oldsize
);
286 newsize
= round_page(newsize
);
289 map
= &p
->p_vmspace
->vm_map
;
290 error
= uvm_mremap(map
, oldva
, oldsize
, map
, &newva
, newsize
, p
,
294 *retval
= (error
!= 0) ? 0 : (register_t
)newva
;