1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1996 Linus Torvalds
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/ksm.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/capability.h>
19 #include <linux/swapops.h>
20 #include <linux/highmem.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/uaccess.h>
25 #include <linux/mm-arch-hooks.h>
26 #include <linux/userfaultfd_k.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
33 static pmd_t
*get_old_pmd(struct mm_struct
*mm
, unsigned long addr
)
40 pgd
= pgd_offset(mm
, addr
);
41 if (pgd_none_or_clear_bad(pgd
))
44 p4d
= p4d_offset(pgd
, addr
);
45 if (p4d_none_or_clear_bad(p4d
))
48 pud
= pud_offset(p4d
, addr
);
49 if (pud_none_or_clear_bad(pud
))
52 pmd
= pmd_offset(pud
, addr
);
59 static pmd_t
*alloc_new_pmd(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
67 pgd
= pgd_offset(mm
, addr
);
68 p4d
= p4d_alloc(mm
, pgd
, addr
);
71 pud
= pud_alloc(mm
, p4d
, addr
);
75 pmd
= pmd_alloc(mm
, pud
, addr
);
79 VM_BUG_ON(pmd_trans_huge(*pmd
));
84 static void take_rmap_locks(struct vm_area_struct
*vma
)
87 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
89 anon_vma_lock_write(vma
->anon_vma
);
92 static void drop_rmap_locks(struct vm_area_struct
*vma
)
95 anon_vma_unlock_write(vma
->anon_vma
);
97 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
100 static pte_t
move_soft_dirty_pte(pte_t pte
)
103 * Set soft dirty bit so we can notice
104 * in userspace the ptes were moved.
106 #ifdef CONFIG_MEM_SOFT_DIRTY
107 if (pte_present(pte
))
108 pte
= pte_mksoft_dirty(pte
);
109 else if (is_swap_pte(pte
))
110 pte
= pte_swp_mksoft_dirty(pte
);
115 static void move_ptes(struct vm_area_struct
*vma
, pmd_t
*old_pmd
,
116 unsigned long old_addr
, unsigned long old_end
,
117 struct vm_area_struct
*new_vma
, pmd_t
*new_pmd
,
118 unsigned long new_addr
, bool need_rmap_locks
)
120 struct mm_struct
*mm
= vma
->vm_mm
;
121 pte_t
*old_pte
, *new_pte
, pte
;
122 spinlock_t
*old_ptl
, *new_ptl
;
123 bool force_flush
= false;
124 unsigned long len
= old_end
- old_addr
;
127 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
128 * locks to ensure that rmap will always observe either the old or the
129 * new ptes. This is the easiest way to avoid races with
130 * truncate_pagecache(), page migration, etc...
132 * When need_rmap_locks is false, we use other ways to avoid
135 * - During exec() shift_arg_pages(), we use a specially tagged vma
136 * which rmap call sites look for using is_vma_temporary_stack().
138 * - During mremap(), new_vma is often known to be placed after vma
139 * in rmap traversal order. This ensures rmap will always observe
140 * either the old pte, or the new pte, or both (the page table locks
141 * serialize access to individual ptes, but only rmap traversal
142 * order guarantees that we won't miss both the old and new ptes).
145 take_rmap_locks(vma
);
148 * We don't have to worry about the ordering of src and dst
149 * pte locks because exclusive mmap_sem prevents deadlock.
151 old_pte
= pte_offset_map_lock(mm
, old_pmd
, old_addr
, &old_ptl
);
152 new_pte
= pte_offset_map(new_pmd
, new_addr
);
153 new_ptl
= pte_lockptr(mm
, new_pmd
);
154 if (new_ptl
!= old_ptl
)
155 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
156 flush_tlb_batched_pending(vma
->vm_mm
);
157 arch_enter_lazy_mmu_mode();
159 for (; old_addr
< old_end
; old_pte
++, old_addr
+= PAGE_SIZE
,
160 new_pte
++, new_addr
+= PAGE_SIZE
) {
161 if (pte_none(*old_pte
))
164 pte
= ptep_get_and_clear(mm
, old_addr
, old_pte
);
166 * If we are remapping a valid PTE, make sure
167 * to flush TLB before we drop the PTL for the
170 * NOTE! Both old and new PTL matter: the old one
171 * for racing with page_mkclean(), the new one to
172 * make sure the physical page stays valid until
173 * the TLB entry for the old mapping has been
176 if (pte_present(pte
))
178 pte
= move_pte(pte
, new_vma
->vm_page_prot
, old_addr
, new_addr
);
179 pte
= move_soft_dirty_pte(pte
);
180 set_pte_at(mm
, new_addr
, new_pte
, pte
);
183 arch_leave_lazy_mmu_mode();
185 flush_tlb_range(vma
, old_end
- len
, old_end
);
186 if (new_ptl
!= old_ptl
)
187 spin_unlock(new_ptl
);
188 pte_unmap(new_pte
- 1);
189 pte_unmap_unlock(old_pte
- 1, old_ptl
);
191 drop_rmap_locks(vma
);
194 #ifdef CONFIG_HAVE_MOVE_PMD
195 static bool move_normal_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
196 unsigned long new_addr
, unsigned long old_end
,
197 pmd_t
*old_pmd
, pmd_t
*new_pmd
)
199 spinlock_t
*old_ptl
, *new_ptl
;
200 struct mm_struct
*mm
= vma
->vm_mm
;
203 if ((old_addr
& ~PMD_MASK
) || (new_addr
& ~PMD_MASK
)
204 || old_end
- old_addr
< PMD_SIZE
)
208 * The destination pmd shouldn't be established, free_pgtables()
209 * should have release it.
211 if (WARN_ON(!pmd_none(*new_pmd
)))
215 * We don't have to worry about the ordering of src and dst
216 * ptlocks because exclusive mmap_sem prevents deadlock.
218 old_ptl
= pmd_lock(vma
->vm_mm
, old_pmd
);
219 new_ptl
= pmd_lockptr(mm
, new_pmd
);
220 if (new_ptl
!= old_ptl
)
221 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
227 VM_BUG_ON(!pmd_none(*new_pmd
));
229 /* Set the new pmd */
230 set_pmd_at(mm
, new_addr
, new_pmd
, pmd
);
231 flush_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
232 if (new_ptl
!= old_ptl
)
233 spin_unlock(new_ptl
);
234 spin_unlock(old_ptl
);
240 unsigned long move_page_tables(struct vm_area_struct
*vma
,
241 unsigned long old_addr
, struct vm_area_struct
*new_vma
,
242 unsigned long new_addr
, unsigned long len
,
243 bool need_rmap_locks
)
245 unsigned long extent
, next
, old_end
;
246 struct mmu_notifier_range range
;
247 pmd_t
*old_pmd
, *new_pmd
;
249 old_end
= old_addr
+ len
;
250 flush_cache_range(vma
, old_addr
, old_end
);
252 mmu_notifier_range_init(&range
, vma
->vm_mm
, old_addr
, old_end
);
253 mmu_notifier_invalidate_range_start(&range
);
255 for (; old_addr
< old_end
; old_addr
+= extent
, new_addr
+= extent
) {
257 next
= (old_addr
+ PMD_SIZE
) & PMD_MASK
;
258 /* even if next overflowed, extent below will be ok */
259 extent
= next
- old_addr
;
260 if (extent
> old_end
- old_addr
)
261 extent
= old_end
- old_addr
;
262 old_pmd
= get_old_pmd(vma
->vm_mm
, old_addr
);
265 new_pmd
= alloc_new_pmd(vma
->vm_mm
, vma
, new_addr
);
268 if (is_swap_pmd(*old_pmd
) || pmd_trans_huge(*old_pmd
)) {
269 if (extent
== HPAGE_PMD_SIZE
) {
271 /* See comment in move_ptes() */
273 take_rmap_locks(vma
);
274 moved
= move_huge_pmd(vma
, old_addr
, new_addr
,
275 old_end
, old_pmd
, new_pmd
);
277 drop_rmap_locks(vma
);
281 split_huge_pmd(vma
, old_pmd
, old_addr
);
282 if (pmd_trans_unstable(old_pmd
))
284 } else if (extent
== PMD_SIZE
) {
285 #ifdef CONFIG_HAVE_MOVE_PMD
287 * If the extent is PMD-sized, try to speed the move by
288 * moving at the PMD level if possible.
293 take_rmap_locks(vma
);
294 moved
= move_normal_pmd(vma
, old_addr
, new_addr
,
295 old_end
, old_pmd
, new_pmd
);
297 drop_rmap_locks(vma
);
303 if (pte_alloc(new_vma
->vm_mm
, new_pmd
))
305 next
= (new_addr
+ PMD_SIZE
) & PMD_MASK
;
306 if (extent
> next
- new_addr
)
307 extent
= next
- new_addr
;
308 move_ptes(vma
, old_pmd
, old_addr
, old_addr
+ extent
, new_vma
,
309 new_pmd
, new_addr
, need_rmap_locks
);
312 mmu_notifier_invalidate_range_end(&range
);
314 return len
+ old_addr
- old_end
; /* how much done */
317 static unsigned long move_vma(struct vm_area_struct
*vma
,
318 unsigned long old_addr
, unsigned long old_len
,
319 unsigned long new_len
, unsigned long new_addr
,
320 bool *locked
, struct vm_userfaultfd_ctx
*uf
,
321 struct list_head
*uf_unmap
)
323 struct mm_struct
*mm
= vma
->vm_mm
;
324 struct vm_area_struct
*new_vma
;
325 unsigned long vm_flags
= vma
->vm_flags
;
326 unsigned long new_pgoff
;
327 unsigned long moved_len
;
328 unsigned long excess
= 0;
329 unsigned long hiwater_vm
;
332 bool need_rmap_locks
;
335 * We'd prefer to avoid failure later on in do_munmap:
336 * which may split one vma into three before unmapping.
338 if (mm
->map_count
>= sysctl_max_map_count
- 3)
342 * Advise KSM to break any KSM pages in the area to be moved:
343 * it would be confusing if they were to turn up at the new
344 * location, where they happen to coincide with different KSM
345 * pages recently unmapped. But leave vma->vm_flags as it was,
346 * so KSM can come around to merge on vma and new_vma afterwards.
348 err
= ksm_madvise(vma
, old_addr
, old_addr
+ old_len
,
349 MADV_UNMERGEABLE
, &vm_flags
);
353 new_pgoff
= vma
->vm_pgoff
+ ((old_addr
- vma
->vm_start
) >> PAGE_SHIFT
);
354 new_vma
= copy_vma(&vma
, new_addr
, new_len
, new_pgoff
,
359 moved_len
= move_page_tables(vma
, old_addr
, new_vma
, new_addr
, old_len
,
361 if (moved_len
< old_len
) {
363 } else if (vma
->vm_ops
&& vma
->vm_ops
->mremap
) {
364 err
= vma
->vm_ops
->mremap(new_vma
);
369 * On error, move entries back from new area to old,
370 * which will succeed since page tables still there,
371 * and then proceed to unmap new area instead of old.
373 move_page_tables(new_vma
, new_addr
, vma
, old_addr
, moved_len
,
380 mremap_userfaultfd_prep(new_vma
, uf
);
381 arch_remap(mm
, old_addr
, old_addr
+ old_len
,
382 new_addr
, new_addr
+ new_len
);
385 /* Conceal VM_ACCOUNT so old reservation is not undone */
386 if (vm_flags
& VM_ACCOUNT
) {
387 vma
->vm_flags
&= ~VM_ACCOUNT
;
388 excess
= vma
->vm_end
- vma
->vm_start
- old_len
;
389 if (old_addr
> vma
->vm_start
&&
390 old_addr
+ old_len
< vma
->vm_end
)
395 * If we failed to move page tables we still do total_vm increment
396 * since do_munmap() will decrement it by old_len == new_len.
398 * Since total_vm is about to be raised artificially high for a
399 * moment, we need to restore high watermark afterwards: if stats
400 * are taken meanwhile, total_vm and hiwater_vm appear too high.
401 * If this were a serious issue, we'd add a flag to do_munmap().
403 hiwater_vm
= mm
->hiwater_vm
;
404 vm_stat_account(mm
, vma
->vm_flags
, new_len
>> PAGE_SHIFT
);
406 /* Tell pfnmap has moved from this vma */
407 if (unlikely(vma
->vm_flags
& VM_PFNMAP
))
408 untrack_pfn_moved(vma
);
410 if (do_munmap(mm
, old_addr
, old_len
, uf_unmap
) < 0) {
411 /* OOM: unable to split vma, just get accounts right */
412 vm_unacct_memory(excess
>> PAGE_SHIFT
);
415 mm
->hiwater_vm
= hiwater_vm
;
417 /* Restore VM_ACCOUNT if one or two pieces of vma left */
419 vma
->vm_flags
|= VM_ACCOUNT
;
421 vma
->vm_next
->vm_flags
|= VM_ACCOUNT
;
424 if (vm_flags
& VM_LOCKED
) {
425 mm
->locked_vm
+= new_len
>> PAGE_SHIFT
;
432 static struct vm_area_struct
*vma_to_resize(unsigned long addr
,
433 unsigned long old_len
, unsigned long new_len
, unsigned long *p
)
435 struct mm_struct
*mm
= current
->mm
;
436 struct vm_area_struct
*vma
= find_vma(mm
, addr
);
439 if (!vma
|| vma
->vm_start
> addr
)
440 return ERR_PTR(-EFAULT
);
443 * !old_len is a special case where an attempt is made to 'duplicate'
444 * a mapping. This makes no sense for private mappings as it will
445 * instead create a fresh/new mapping unrelated to the original. This
446 * is contrary to the basic idea of mremap which creates new mappings
447 * based on the original. There are no known use cases for this
448 * behavior. As a result, fail such attempts.
450 if (!old_len
&& !(vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
))) {
451 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current
->comm
, current
->pid
);
452 return ERR_PTR(-EINVAL
);
455 if (is_vm_hugetlb_page(vma
))
456 return ERR_PTR(-EINVAL
);
458 /* We can't remap across vm area boundaries */
459 if (old_len
> vma
->vm_end
- addr
)
460 return ERR_PTR(-EFAULT
);
462 if (new_len
== old_len
)
465 /* Need to be careful about a growing mapping */
466 pgoff
= (addr
- vma
->vm_start
) >> PAGE_SHIFT
;
467 pgoff
+= vma
->vm_pgoff
;
468 if (pgoff
+ (new_len
>> PAGE_SHIFT
) < pgoff
)
469 return ERR_PTR(-EINVAL
);
471 if (vma
->vm_flags
& (VM_DONTEXPAND
| VM_PFNMAP
))
472 return ERR_PTR(-EFAULT
);
474 if (vma
->vm_flags
& VM_LOCKED
) {
475 unsigned long locked
, lock_limit
;
476 locked
= mm
->locked_vm
<< PAGE_SHIFT
;
477 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
478 locked
+= new_len
- old_len
;
479 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
480 return ERR_PTR(-EAGAIN
);
483 if (!may_expand_vm(mm
, vma
->vm_flags
,
484 (new_len
- old_len
) >> PAGE_SHIFT
))
485 return ERR_PTR(-ENOMEM
);
487 if (vma
->vm_flags
& VM_ACCOUNT
) {
488 unsigned long charged
= (new_len
- old_len
) >> PAGE_SHIFT
;
489 if (security_vm_enough_memory_mm(mm
, charged
))
490 return ERR_PTR(-ENOMEM
);
497 static unsigned long mremap_to(unsigned long addr
, unsigned long old_len
,
498 unsigned long new_addr
, unsigned long new_len
, bool *locked
,
499 struct vm_userfaultfd_ctx
*uf
,
500 struct list_head
*uf_unmap_early
,
501 struct list_head
*uf_unmap
)
503 struct mm_struct
*mm
= current
->mm
;
504 struct vm_area_struct
*vma
;
505 unsigned long ret
= -EINVAL
;
506 unsigned long charged
= 0;
507 unsigned long map_flags
;
509 if (offset_in_page(new_addr
))
512 if (new_len
> TASK_SIZE
|| new_addr
> TASK_SIZE
- new_len
)
515 /* Ensure the old/new locations do not overlap */
516 if (addr
+ old_len
> new_addr
&& new_addr
+ new_len
> addr
)
520 * move_vma() need us to stay 4 maps below the threshold, otherwise
521 * it will bail out at the very beginning.
522 * That is a problem if we have already unmaped the regions here
523 * (new_addr, and old_addr), because userspace will not know the
524 * state of the vma's after it gets -ENOMEM.
525 * So, to avoid such scenario we can pre-compute if the whole
526 * operation has high chances to success map-wise.
527 * Worst-scenario case is when both vma's (new_addr and old_addr) get
528 * split in 3 before unmaping it.
529 * That means 2 more maps (1 for each) to the ones we already hold.
530 * Check whether current map count plus 2 still leads us to 4 maps below
531 * the threshold, otherwise return -ENOMEM here to be more safe.
533 if ((mm
->map_count
+ 2) >= sysctl_max_map_count
- 3)
536 ret
= do_munmap(mm
, new_addr
, new_len
, uf_unmap_early
);
540 if (old_len
>= new_len
) {
541 ret
= do_munmap(mm
, addr
+new_len
, old_len
- new_len
, uf_unmap
);
542 if (ret
&& old_len
!= new_len
)
547 vma
= vma_to_resize(addr
, old_len
, new_len
, &charged
);
553 map_flags
= MAP_FIXED
;
554 if (vma
->vm_flags
& VM_MAYSHARE
)
555 map_flags
|= MAP_SHARED
;
557 ret
= get_unmapped_area(vma
->vm_file
, new_addr
, new_len
, vma
->vm_pgoff
+
558 ((addr
- vma
->vm_start
) >> PAGE_SHIFT
),
560 if (offset_in_page(ret
))
563 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
, locked
, uf
,
565 if (!(offset_in_page(ret
)))
568 vm_unacct_memory(charged
);
574 static int vma_expandable(struct vm_area_struct
*vma
, unsigned long delta
)
576 unsigned long end
= vma
->vm_end
+ delta
;
577 if (end
< vma
->vm_end
) /* overflow */
579 if (vma
->vm_next
&& vma
->vm_next
->vm_start
< end
) /* intersection */
581 if (get_unmapped_area(NULL
, vma
->vm_start
, end
- vma
->vm_start
,
582 0, MAP_FIXED
) & ~PAGE_MASK
)
588 * Expand (or shrink) an existing mapping, potentially moving it at the
589 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
591 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
592 * This option implies MREMAP_MAYMOVE.
594 SYSCALL_DEFINE5(mremap
, unsigned long, addr
, unsigned long, old_len
,
595 unsigned long, new_len
, unsigned long, flags
,
596 unsigned long, new_addr
)
598 struct mm_struct
*mm
= current
->mm
;
599 struct vm_area_struct
*vma
;
600 unsigned long ret
= -EINVAL
;
601 unsigned long charged
= 0;
603 bool downgraded
= false;
604 struct vm_userfaultfd_ctx uf
= NULL_VM_UFFD_CTX
;
605 LIST_HEAD(uf_unmap_early
);
608 if (flags
& ~(MREMAP_FIXED
| MREMAP_MAYMOVE
))
611 if (flags
& MREMAP_FIXED
&& !(flags
& MREMAP_MAYMOVE
))
614 if (offset_in_page(addr
))
617 old_len
= PAGE_ALIGN(old_len
);
618 new_len
= PAGE_ALIGN(new_len
);
621 * We allow a zero old-len as a special case
622 * for DOS-emu "duplicate shm area" thing. But
623 * a zero new-len is nonsensical.
628 if (down_write_killable(¤t
->mm
->mmap_sem
))
631 if (flags
& MREMAP_FIXED
) {
632 ret
= mremap_to(addr
, old_len
, new_addr
, new_len
,
633 &locked
, &uf
, &uf_unmap_early
, &uf_unmap
);
638 * Always allow a shrinking remap: that just unmaps
639 * the unnecessary pages..
640 * __do_munmap does all the needed commit accounting, and
641 * downgrades mmap_sem to read if so directed.
643 if (old_len
>= new_len
) {
646 retval
= __do_munmap(mm
, addr
+new_len
, old_len
- new_len
,
648 if (retval
< 0 && old_len
!= new_len
) {
651 /* Returning 1 indicates mmap_sem is downgraded to read. */
652 } else if (retval
== 1)
659 * Ok, we need to grow..
661 vma
= vma_to_resize(addr
, old_len
, new_len
, &charged
);
667 /* old_len exactly to the end of the area..
669 if (old_len
== vma
->vm_end
- addr
) {
670 /* can we just expand the current mapping? */
671 if (vma_expandable(vma
, new_len
- old_len
)) {
672 int pages
= (new_len
- old_len
) >> PAGE_SHIFT
;
674 if (vma_adjust(vma
, vma
->vm_start
, addr
+ new_len
,
675 vma
->vm_pgoff
, NULL
)) {
680 vm_stat_account(mm
, vma
->vm_flags
, pages
);
681 if (vma
->vm_flags
& VM_LOCKED
) {
682 mm
->locked_vm
+= pages
;
692 * We weren't able to just expand or shrink the area,
693 * we need to create a new one and move it..
696 if (flags
& MREMAP_MAYMOVE
) {
697 unsigned long map_flags
= 0;
698 if (vma
->vm_flags
& VM_MAYSHARE
)
699 map_flags
|= MAP_SHARED
;
701 new_addr
= get_unmapped_area(vma
->vm_file
, 0, new_len
,
703 ((addr
- vma
->vm_start
) >> PAGE_SHIFT
),
705 if (offset_in_page(new_addr
)) {
710 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
,
711 &locked
, &uf
, &uf_unmap
);
714 if (offset_in_page(ret
)) {
715 vm_unacct_memory(charged
);
719 up_read(¤t
->mm
->mmap_sem
);
721 up_write(¤t
->mm
->mmap_sem
);
722 if (locked
&& new_len
> old_len
)
723 mm_populate(new_addr
+ old_len
, new_len
- old_len
);
724 userfaultfd_unmap_complete(mm
, &uf_unmap_early
);
725 mremap_userfaultfd_complete(&uf
, addr
, new_addr
, old_len
);
726 userfaultfd_unmap_complete(mm
, &uf_unmap
);