4 * (C) Copyright 1996 Linus Torvalds
6 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
11 #include <linux/hugetlb.h>
12 #include <linux/shm.h>
13 #include <linux/ksm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/capability.h>
18 #include <linux/swapops.h>
19 #include <linux/highmem.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/mmu_notifier.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/uaccess.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
31 static pmd_t
*get_old_pmd(struct mm_struct
*mm
, unsigned long addr
)
37 pgd
= pgd_offset(mm
, addr
);
38 if (pgd_none_or_clear_bad(pgd
))
41 pud
= pud_offset(pgd
, addr
);
42 if (pud_none_or_clear_bad(pud
))
45 pmd
= pmd_offset(pud
, addr
);
52 static pmd_t
*alloc_new_pmd(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
59 pgd
= pgd_offset(mm
, addr
);
60 pud
= pud_alloc(mm
, pgd
, addr
);
64 pmd
= pmd_alloc(mm
, pud
, addr
);
68 VM_BUG_ON(pmd_trans_huge(*pmd
));
73 static pte_t
move_soft_dirty_pte(pte_t pte
)
76 * Set soft dirty bit so we can notice
77 * in userspace the ptes were moved.
79 #ifdef CONFIG_MEM_SOFT_DIRTY
81 pte
= pte_mksoft_dirty(pte
);
82 else if (is_swap_pte(pte
))
83 pte
= pte_swp_mksoft_dirty(pte
);
88 static void move_ptes(struct vm_area_struct
*vma
, pmd_t
*old_pmd
,
89 unsigned long old_addr
, unsigned long old_end
,
90 struct vm_area_struct
*new_vma
, pmd_t
*new_pmd
,
91 unsigned long new_addr
, bool need_rmap_locks
)
93 struct address_space
*mapping
= NULL
;
94 struct anon_vma
*anon_vma
= NULL
;
95 struct mm_struct
*mm
= vma
->vm_mm
;
96 pte_t
*old_pte
, *new_pte
, pte
;
97 spinlock_t
*old_ptl
, *new_ptl
;
100 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
101 * locks to ensure that rmap will always observe either the old or the
102 * new ptes. This is the easiest way to avoid races with
103 * truncate_pagecache(), page migration, etc...
105 * When need_rmap_locks is false, we use other ways to avoid
108 * - During exec() shift_arg_pages(), we use a specially tagged vma
109 * which rmap call sites look for using is_vma_temporary_stack().
111 * - During mremap(), new_vma is often known to be placed after vma
112 * in rmap traversal order. This ensures rmap will always observe
113 * either the old pte, or the new pte, or both (the page table locks
114 * serialize access to individual ptes, but only rmap traversal
115 * order guarantees that we won't miss both the old and new ptes).
117 if (need_rmap_locks
) {
119 mapping
= vma
->vm_file
->f_mapping
;
120 i_mmap_lock_write(mapping
);
123 anon_vma
= vma
->anon_vma
;
124 anon_vma_lock_write(anon_vma
);
129 * We don't have to worry about the ordering of src and dst
130 * pte locks because exclusive mmap_sem prevents deadlock.
132 old_pte
= pte_offset_map_lock(mm
, old_pmd
, old_addr
, &old_ptl
);
133 new_pte
= pte_offset_map(new_pmd
, new_addr
);
134 new_ptl
= pte_lockptr(mm
, new_pmd
);
135 if (new_ptl
!= old_ptl
)
136 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
137 arch_enter_lazy_mmu_mode();
139 for (; old_addr
< old_end
; old_pte
++, old_addr
+= PAGE_SIZE
,
140 new_pte
++, new_addr
+= PAGE_SIZE
) {
141 if (pte_none(*old_pte
))
143 pte
= ptep_get_and_clear(mm
, old_addr
, old_pte
);
144 pte
= move_pte(pte
, new_vma
->vm_page_prot
, old_addr
, new_addr
);
145 pte
= move_soft_dirty_pte(pte
);
146 set_pte_at(mm
, new_addr
, new_pte
, pte
);
149 arch_leave_lazy_mmu_mode();
150 if (new_ptl
!= old_ptl
)
151 spin_unlock(new_ptl
);
152 pte_unmap(new_pte
- 1);
153 pte_unmap_unlock(old_pte
- 1, old_ptl
);
155 anon_vma_unlock_write(anon_vma
);
157 i_mmap_unlock_write(mapping
);
160 #define LATENCY_LIMIT (64 * PAGE_SIZE)
162 unsigned long move_page_tables(struct vm_area_struct
*vma
,
163 unsigned long old_addr
, struct vm_area_struct
*new_vma
,
164 unsigned long new_addr
, unsigned long len
,
165 bool need_rmap_locks
)
167 unsigned long extent
, next
, old_end
;
168 pmd_t
*old_pmd
, *new_pmd
;
169 bool need_flush
= false;
170 unsigned long mmun_start
; /* For mmu_notifiers */
171 unsigned long mmun_end
; /* For mmu_notifiers */
173 old_end
= old_addr
+ len
;
174 flush_cache_range(vma
, old_addr
, old_end
);
176 mmun_start
= old_addr
;
178 mmu_notifier_invalidate_range_start(vma
->vm_mm
, mmun_start
, mmun_end
);
180 for (; old_addr
< old_end
; old_addr
+= extent
, new_addr
+= extent
) {
182 next
= (old_addr
+ PMD_SIZE
) & PMD_MASK
;
183 /* even if next overflowed, extent below will be ok */
184 extent
= next
- old_addr
;
185 if (extent
> old_end
- old_addr
)
186 extent
= old_end
- old_addr
;
187 old_pmd
= get_old_pmd(vma
->vm_mm
, old_addr
);
190 new_pmd
= alloc_new_pmd(vma
->vm_mm
, vma
, new_addr
);
193 if (pmd_trans_huge(*old_pmd
)) {
195 if (extent
== HPAGE_PMD_SIZE
) {
196 VM_BUG_ON_VMA(vma
->vm_file
|| !vma
->anon_vma
,
198 /* See comment in move_ptes() */
200 anon_vma_lock_write(vma
->anon_vma
);
201 err
= move_huge_pmd(vma
, new_vma
, old_addr
,
205 anon_vma_unlock_write(vma
->anon_vma
);
211 split_huge_page_pmd(vma
, old_addr
, old_pmd
);
213 VM_BUG_ON(pmd_trans_huge(*old_pmd
));
215 if (pmd_none(*new_pmd
) && __pte_alloc(new_vma
->vm_mm
, new_vma
,
218 next
= (new_addr
+ PMD_SIZE
) & PMD_MASK
;
219 if (extent
> next
- new_addr
)
220 extent
= next
- new_addr
;
221 if (extent
> LATENCY_LIMIT
)
222 extent
= LATENCY_LIMIT
;
223 move_ptes(vma
, old_pmd
, old_addr
, old_addr
+ extent
,
224 new_vma
, new_pmd
, new_addr
, need_rmap_locks
);
227 if (likely(need_flush
))
228 flush_tlb_range(vma
, old_end
-len
, old_addr
);
230 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
232 return len
+ old_addr
- old_end
; /* how much done */
235 static unsigned long move_vma(struct vm_area_struct
*vma
,
236 unsigned long old_addr
, unsigned long old_len
,
237 unsigned long new_len
, unsigned long new_addr
, bool *locked
)
239 struct mm_struct
*mm
= vma
->vm_mm
;
240 struct vm_area_struct
*new_vma
;
241 unsigned long vm_flags
= vma
->vm_flags
;
242 unsigned long new_pgoff
;
243 unsigned long moved_len
;
244 unsigned long excess
= 0;
245 unsigned long hiwater_vm
;
248 bool need_rmap_locks
;
251 * We'd prefer to avoid failure later on in do_munmap:
252 * which may split one vma into three before unmapping.
254 if (mm
->map_count
>= sysctl_max_map_count
- 3)
258 * Advise KSM to break any KSM pages in the area to be moved:
259 * it would be confusing if they were to turn up at the new
260 * location, where they happen to coincide with different KSM
261 * pages recently unmapped. But leave vma->vm_flags as it was,
262 * so KSM can come around to merge on vma and new_vma afterwards.
264 err
= ksm_madvise(vma
, old_addr
, old_addr
+ old_len
,
265 MADV_UNMERGEABLE
, &vm_flags
);
269 new_pgoff
= vma
->vm_pgoff
+ ((old_addr
- vma
->vm_start
) >> PAGE_SHIFT
);
270 new_vma
= copy_vma(&vma
, new_addr
, new_len
, new_pgoff
,
275 moved_len
= move_page_tables(vma
, old_addr
, new_vma
, new_addr
, old_len
,
277 if (moved_len
< old_len
) {
279 * On error, move entries back from new area to old,
280 * which will succeed since page tables still there,
281 * and then proceed to unmap new area instead of old.
283 move_page_tables(new_vma
, new_addr
, vma
, old_addr
, moved_len
,
289 } else if (vma
->vm_file
&& vma
->vm_file
->f_op
->mremap
) {
290 err
= vma
->vm_file
->f_op
->mremap(vma
->vm_file
, new_vma
);
292 move_page_tables(new_vma
, new_addr
, vma
, old_addr
,
298 /* Conceal VM_ACCOUNT so old reservation is not undone */
299 if (vm_flags
& VM_ACCOUNT
) {
300 vma
->vm_flags
&= ~VM_ACCOUNT
;
301 excess
= vma
->vm_end
- vma
->vm_start
- old_len
;
302 if (old_addr
> vma
->vm_start
&&
303 old_addr
+ old_len
< vma
->vm_end
)
308 * If we failed to move page tables we still do total_vm increment
309 * since do_munmap() will decrement it by old_len == new_len.
311 * Since total_vm is about to be raised artificially high for a
312 * moment, we need to restore high watermark afterwards: if stats
313 * are taken meanwhile, total_vm and hiwater_vm appear too high.
314 * If this were a serious issue, we'd add a flag to do_munmap().
316 hiwater_vm
= mm
->hiwater_vm
;
317 vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, new_len
>>PAGE_SHIFT
);
319 if (do_munmap(mm
, old_addr
, old_len
) < 0) {
320 /* OOM: unable to split vma, just get accounts right */
321 vm_unacct_memory(excess
>> PAGE_SHIFT
);
324 mm
->hiwater_vm
= hiwater_vm
;
326 /* Restore VM_ACCOUNT if one or two pieces of vma left */
328 vma
->vm_flags
|= VM_ACCOUNT
;
330 vma
->vm_next
->vm_flags
|= VM_ACCOUNT
;
333 if (vm_flags
& VM_LOCKED
) {
334 mm
->locked_vm
+= new_len
>> PAGE_SHIFT
;
341 static struct vm_area_struct
*vma_to_resize(unsigned long addr
,
342 unsigned long old_len
, unsigned long new_len
, unsigned long *p
)
344 struct mm_struct
*mm
= current
->mm
;
345 struct vm_area_struct
*vma
= find_vma(mm
, addr
);
347 if (!vma
|| vma
->vm_start
> addr
)
348 return ERR_PTR(-EFAULT
);
350 if (is_vm_hugetlb_page(vma
))
351 return ERR_PTR(-EINVAL
);
353 /* We can't remap across vm area boundaries */
354 if (old_len
> vma
->vm_end
- addr
)
355 return ERR_PTR(-EFAULT
);
357 /* Need to be careful about a growing mapping */
358 if (new_len
> old_len
) {
361 if (vma
->vm_flags
& (VM_DONTEXPAND
| VM_PFNMAP
))
362 return ERR_PTR(-EFAULT
);
363 pgoff
= (addr
- vma
->vm_start
) >> PAGE_SHIFT
;
364 pgoff
+= vma
->vm_pgoff
;
365 if (pgoff
+ (new_len
>> PAGE_SHIFT
) < pgoff
)
366 return ERR_PTR(-EINVAL
);
369 if (vma
->vm_flags
& VM_LOCKED
) {
370 unsigned long locked
, lock_limit
;
371 locked
= mm
->locked_vm
<< PAGE_SHIFT
;
372 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
373 locked
+= new_len
- old_len
;
374 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
375 return ERR_PTR(-EAGAIN
);
378 if (!may_expand_vm(mm
, (new_len
- old_len
) >> PAGE_SHIFT
))
379 return ERR_PTR(-ENOMEM
);
381 if (vma
->vm_flags
& VM_ACCOUNT
) {
382 unsigned long charged
= (new_len
- old_len
) >> PAGE_SHIFT
;
383 if (security_vm_enough_memory_mm(mm
, charged
))
384 return ERR_PTR(-ENOMEM
);
391 static unsigned long mremap_to(unsigned long addr
, unsigned long old_len
,
392 unsigned long new_addr
, unsigned long new_len
, bool *locked
)
394 struct mm_struct
*mm
= current
->mm
;
395 struct vm_area_struct
*vma
;
396 unsigned long ret
= -EINVAL
;
397 unsigned long charged
= 0;
398 unsigned long map_flags
;
400 if (new_addr
& ~PAGE_MASK
)
403 if (new_len
> TASK_SIZE
|| new_addr
> TASK_SIZE
- new_len
)
406 /* Check if the location we're moving into overlaps the
407 * old location at all, and fail if it does.
409 if ((new_addr
<= addr
) && (new_addr
+new_len
) > addr
)
412 if ((addr
<= new_addr
) && (addr
+old_len
) > new_addr
)
415 ret
= do_munmap(mm
, new_addr
, new_len
);
419 if (old_len
>= new_len
) {
420 ret
= do_munmap(mm
, addr
+new_len
, old_len
- new_len
);
421 if (ret
&& old_len
!= new_len
)
426 vma
= vma_to_resize(addr
, old_len
, new_len
, &charged
);
432 map_flags
= MAP_FIXED
;
433 if (vma
->vm_flags
& VM_MAYSHARE
)
434 map_flags
|= MAP_SHARED
;
436 ret
= get_unmapped_area(vma
->vm_file
, new_addr
, new_len
, vma
->vm_pgoff
+
437 ((addr
- vma
->vm_start
) >> PAGE_SHIFT
),
439 if (ret
& ~PAGE_MASK
)
442 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
, locked
);
443 if (!(ret
& ~PAGE_MASK
))
446 vm_unacct_memory(charged
);
452 static int vma_expandable(struct vm_area_struct
*vma
, unsigned long delta
)
454 unsigned long end
= vma
->vm_end
+ delta
;
455 if (end
< vma
->vm_end
) /* overflow */
457 if (vma
->vm_next
&& vma
->vm_next
->vm_start
< end
) /* intersection */
459 if (get_unmapped_area(NULL
, vma
->vm_start
, end
- vma
->vm_start
,
460 0, MAP_FIXED
) & ~PAGE_MASK
)
466 * Expand (or shrink) an existing mapping, potentially moving it at the
467 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
469 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
470 * This option implies MREMAP_MAYMOVE.
472 SYSCALL_DEFINE5(mremap
, unsigned long, addr
, unsigned long, old_len
,
473 unsigned long, new_len
, unsigned long, flags
,
474 unsigned long, new_addr
)
476 struct mm_struct
*mm
= current
->mm
;
477 struct vm_area_struct
*vma
;
478 unsigned long ret
= -EINVAL
;
479 unsigned long charged
= 0;
482 if (flags
& ~(MREMAP_FIXED
| MREMAP_MAYMOVE
))
485 if (flags
& MREMAP_FIXED
&& !(flags
& MREMAP_MAYMOVE
))
488 if (addr
& ~PAGE_MASK
)
491 old_len
= PAGE_ALIGN(old_len
);
492 new_len
= PAGE_ALIGN(new_len
);
495 * We allow a zero old-len as a special case
496 * for DOS-emu "duplicate shm area" thing. But
497 * a zero new-len is nonsensical.
502 down_write(¤t
->mm
->mmap_sem
);
504 if (flags
& MREMAP_FIXED
) {
505 ret
= mremap_to(addr
, old_len
, new_addr
, new_len
,
511 * Always allow a shrinking remap: that just unmaps
512 * the unnecessary pages..
513 * do_munmap does all the needed commit accounting
515 if (old_len
>= new_len
) {
516 ret
= do_munmap(mm
, addr
+new_len
, old_len
- new_len
);
517 if (ret
&& old_len
!= new_len
)
524 * Ok, we need to grow..
526 vma
= vma_to_resize(addr
, old_len
, new_len
, &charged
);
532 /* old_len exactly to the end of the area..
534 if (old_len
== vma
->vm_end
- addr
) {
535 /* can we just expand the current mapping? */
536 if (vma_expandable(vma
, new_len
- old_len
)) {
537 int pages
= (new_len
- old_len
) >> PAGE_SHIFT
;
539 if (vma_adjust(vma
, vma
->vm_start
, addr
+ new_len
,
540 vma
->vm_pgoff
, NULL
)) {
545 vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, pages
);
546 if (vma
->vm_flags
& VM_LOCKED
) {
547 mm
->locked_vm
+= pages
;
557 * We weren't able to just expand or shrink the area,
558 * we need to create a new one and move it..
561 if (flags
& MREMAP_MAYMOVE
) {
562 unsigned long map_flags
= 0;
563 if (vma
->vm_flags
& VM_MAYSHARE
)
564 map_flags
|= MAP_SHARED
;
566 new_addr
= get_unmapped_area(vma
->vm_file
, 0, new_len
,
568 ((addr
- vma
->vm_start
) >> PAGE_SHIFT
),
570 if (new_addr
& ~PAGE_MASK
) {
575 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
, &locked
);
578 if (ret
& ~PAGE_MASK
)
579 vm_unacct_memory(charged
);
580 up_write(¤t
->mm
->mmap_sem
);
581 if (locked
&& new_len
> old_len
)
582 mm_populate(new_addr
+ old_len
, new_len
- old_len
);