4 * (C) Copyright 1996 Linus Torvalds
7 #include <linux/slab.h>
8 #include <linux/smp_lock.h>
10 #include <linux/mman.h>
11 #include <linux/swap.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 extern int vm_enough_memory(long pages
);
18 static inline pte_t
*get_one_pte(struct mm_struct
*mm
, unsigned long addr
)
24 pgd
= pgd_offset(mm
, addr
);
33 pmd
= pmd_offset(pgd
, addr
);
42 pte
= pte_offset(pmd
, addr
);
49 static inline pte_t
*alloc_one_pte(struct mm_struct
*mm
, unsigned long addr
)
54 pmd
= pmd_alloc(pgd_offset(mm
, addr
), addr
);
56 pte
= pte_alloc(pmd
, addr
);
60 static inline int copy_one_pte(struct mm_struct
*mm
, pte_t
* src
, pte_t
* dst
)
65 spin_lock(&mm
->page_table_lock
);
66 if (!pte_none(*src
)) {
67 pte
= ptep_get_and_clear(src
);
69 /* No dest? We must put it back. */
75 spin_unlock(&mm
->page_table_lock
);
79 static int move_one_page(struct mm_struct
*mm
, unsigned long old_addr
, unsigned long new_addr
)
84 src
= get_one_pte(mm
, old_addr
);
86 error
= copy_one_pte(mm
, src
, alloc_one_pte(mm
, new_addr
));
90 static int move_page_tables(struct mm_struct
* mm
,
91 unsigned long new_addr
, unsigned long old_addr
, unsigned long len
)
93 unsigned long offset
= len
;
95 flush_cache_range(mm
, old_addr
, old_addr
+ len
);
98 * This is not the clever way to do this, but we're taking the
99 * easy way out on the assumption that most remappings will be
100 * only a few pages.. This also makes error recovery easier.
104 if (move_one_page(mm
, old_addr
+ offset
, new_addr
+ offset
))
107 flush_tlb_range(mm
, old_addr
, old_addr
+ len
);
111 * Ok, the move failed because we didn't have enough pages for
112 * the new page table tree. This is unlikely, but we have to
113 * take the possibility into account. In that case we just move
114 * all the pages back (this will work, because we still have
115 * the old page tables)
118 flush_cache_range(mm
, new_addr
, new_addr
+ len
);
119 while ((offset
+= PAGE_SIZE
) < len
)
120 move_one_page(mm
, new_addr
+ offset
, old_addr
+ offset
);
121 zap_page_range(mm
, new_addr
, len
);
122 flush_tlb_range(mm
, new_addr
, new_addr
+ len
);
126 static inline unsigned long move_vma(struct vm_area_struct
* vma
,
127 unsigned long addr
, unsigned long old_len
, unsigned long new_len
,
128 unsigned long new_addr
)
130 struct vm_area_struct
* new_vma
;
132 new_vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
134 if (!move_page_tables(current
->mm
, new_addr
, addr
, old_len
)) {
136 new_vma
->vm_start
= new_addr
;
137 new_vma
->vm_end
= new_addr
+new_len
;
138 new_vma
->vm_pgoff
+= (addr
- vma
->vm_start
) >> PAGE_SHIFT
;
139 new_vma
->vm_raend
= 0;
140 if (new_vma
->vm_file
)
141 get_file(new_vma
->vm_file
);
142 if (new_vma
->vm_ops
&& new_vma
->vm_ops
->open
)
143 new_vma
->vm_ops
->open(new_vma
);
144 insert_vm_struct(current
->mm
, new_vma
);
145 do_munmap(current
->mm
, addr
, old_len
);
146 current
->mm
->total_vm
+= new_len
>> PAGE_SHIFT
;
147 if (new_vma
->vm_flags
& VM_LOCKED
) {
148 current
->mm
->locked_vm
+= new_len
>> PAGE_SHIFT
;
149 make_pages_present(new_vma
->vm_start
,
154 kmem_cache_free(vm_area_cachep
, new_vma
);
160 * Expand (or shrink) an existing mapping, potentially moving it at the
161 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
163 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
164 * This option implies MREMAP_MAYMOVE.
166 unsigned long do_mremap(unsigned long addr
,
167 unsigned long old_len
, unsigned long new_len
,
168 unsigned long flags
, unsigned long new_addr
)
170 struct vm_area_struct
*vma
;
171 unsigned long ret
= -EINVAL
;
173 if (flags
& ~(MREMAP_FIXED
| MREMAP_MAYMOVE
))
176 if (addr
& ~PAGE_MASK
)
179 old_len
= PAGE_ALIGN(old_len
);
180 new_len
= PAGE_ALIGN(new_len
);
182 /* new_addr is only valid if MREMAP_FIXED is specified */
183 if (flags
& MREMAP_FIXED
) {
184 if (new_addr
& ~PAGE_MASK
)
186 if (!(flags
& MREMAP_MAYMOVE
))
189 if (new_len
> TASK_SIZE
|| new_addr
> TASK_SIZE
- new_len
)
192 /* Check if the location we're moving into overlaps the
193 * old location at all, and fail if it does.
195 if ((new_addr
<= addr
) && (new_addr
+new_len
) > addr
)
198 if ((addr
<= new_addr
) && (addr
+old_len
) > new_addr
)
201 do_munmap(current
->mm
, new_addr
, new_len
);
205 * Always allow a shrinking remap: that just unmaps
206 * the unnecessary pages..
209 if (old_len
>= new_len
) {
210 do_munmap(current
->mm
, addr
+new_len
, old_len
- new_len
);
211 if (!(flags
& MREMAP_FIXED
) || (new_addr
== addr
))
216 * Ok, we need to grow.. or relocate.
219 vma
= find_vma(current
->mm
, addr
);
220 if (!vma
|| vma
->vm_start
> addr
)
222 /* We can't remap across vm area boundaries */
223 if (old_len
> vma
->vm_end
- addr
)
225 if (vma
->vm_flags
& VM_DONTEXPAND
) {
226 if (new_len
> old_len
)
229 if (vma
->vm_flags
& VM_LOCKED
) {
230 unsigned long locked
= current
->mm
->locked_vm
<< PAGE_SHIFT
;
231 locked
+= new_len
- old_len
;
233 if (locked
> current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
)
237 if ((current
->mm
->total_vm
<< PAGE_SHIFT
) + (new_len
- old_len
)
238 > current
->rlim
[RLIMIT_AS
].rlim_cur
)
240 /* Private writable mapping? Check memory availability.. */
241 if ((vma
->vm_flags
& (VM_SHARED
| VM_WRITE
)) == VM_WRITE
&&
242 !(flags
& MAP_NORESERVE
) &&
243 !vm_enough_memory((new_len
- old_len
) >> PAGE_SHIFT
))
246 /* old_len exactly to the end of the area..
247 * And we're not relocating the area.
249 if (old_len
== vma
->vm_end
- addr
&&
250 !((flags
& MREMAP_FIXED
) && (addr
!= new_addr
)) &&
251 (old_len
!= new_len
|| !(flags
& MREMAP_MAYMOVE
))) {
252 unsigned long max_addr
= TASK_SIZE
;
254 max_addr
= vma
->vm_next
->vm_start
;
255 /* can we just expand the current mapping? */
256 if (max_addr
- addr
>= new_len
) {
257 int pages
= (new_len
- old_len
) >> PAGE_SHIFT
;
258 spin_lock(&vma
->vm_mm
->page_table_lock
);
259 vma
->vm_end
= addr
+ new_len
;
260 spin_unlock(&vma
->vm_mm
->page_table_lock
);
261 current
->mm
->total_vm
+= pages
;
262 if (vma
->vm_flags
& VM_LOCKED
) {
263 current
->mm
->locked_vm
+= pages
;
264 make_pages_present(addr
+ old_len
,
273 * We weren't able to just expand or shrink the area,
274 * we need to create a new one and move it..
277 if (flags
& MREMAP_MAYMOVE
) {
278 if (!(flags
& MREMAP_FIXED
)) {
279 new_addr
= get_unmapped_area(0, new_len
);
283 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
);
289 asmlinkage
unsigned long sys_mremap(unsigned long addr
,
290 unsigned long old_len
, unsigned long new_len
,
291 unsigned long flags
, unsigned long new_addr
)
295 down(¤t
->mm
->mmap_sem
);
296 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
297 up(¤t
->mm
->mmap_sem
);