Merge tag 'hwmon-for-v6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / mm / mremap.c
blob60473413836bea6015c4efe68303703415f659f4
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/mremap.c
5 * (C) Copyright 1996 Linus Torvalds
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
11 #include <linux/mm.h>
12 #include <linux/mm_inline.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/capability.h>
19 #include <linux/fs.h>
20 #include <linux/swapops.h>
21 #include <linux/highmem.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/uaccess.h>
26 #include <linux/userfaultfd_k.h>
27 #include <linux/mempolicy.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
33 #include "internal.h"
35 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
37 pgd_t *pgd;
38 p4d_t *p4d;
39 pud_t *pud;
41 pgd = pgd_offset(mm, addr);
42 if (pgd_none_or_clear_bad(pgd))
43 return NULL;
45 p4d = p4d_offset(pgd, addr);
46 if (p4d_none_or_clear_bad(p4d))
47 return NULL;
49 pud = pud_offset(p4d, addr);
50 if (pud_none_or_clear_bad(pud))
51 return NULL;
53 return pud;
56 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
58 pud_t *pud;
59 pmd_t *pmd;
61 pud = get_old_pud(mm, addr);
62 if (!pud)
63 return NULL;
65 pmd = pmd_offset(pud, addr);
66 if (pmd_none(*pmd))
67 return NULL;
69 return pmd;
72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
73 unsigned long addr)
75 pgd_t *pgd;
76 p4d_t *p4d;
78 pgd = pgd_offset(mm, addr);
79 p4d = p4d_alloc(mm, pgd, addr);
80 if (!p4d)
81 return NULL;
83 return pud_alloc(mm, p4d, addr);
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
87 unsigned long addr)
89 pud_t *pud;
90 pmd_t *pmd;
92 pud = alloc_new_pud(mm, vma, addr);
93 if (!pud)
94 return NULL;
96 pmd = pmd_alloc(mm, pud, addr);
97 if (!pmd)
98 return NULL;
100 VM_BUG_ON(pmd_trans_huge(*pmd));
102 return pmd;
105 static void take_rmap_locks(struct vm_area_struct *vma)
107 if (vma->vm_file)
108 i_mmap_lock_write(vma->vm_file->f_mapping);
109 if (vma->anon_vma)
110 anon_vma_lock_write(vma->anon_vma);
113 static void drop_rmap_locks(struct vm_area_struct *vma)
115 if (vma->anon_vma)
116 anon_vma_unlock_write(vma->anon_vma);
117 if (vma->vm_file)
118 i_mmap_unlock_write(vma->vm_file->f_mapping);
121 static pte_t move_soft_dirty_pte(pte_t pte)
124 * Set soft dirty bit so we can notice
125 * in userspace the ptes were moved.
127 #ifdef CONFIG_MEM_SOFT_DIRTY
128 if (pte_present(pte))
129 pte = pte_mksoft_dirty(pte);
130 else if (is_swap_pte(pte))
131 pte = pte_swp_mksoft_dirty(pte);
132 #endif
133 return pte;
136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
137 unsigned long old_addr, unsigned long old_end,
138 struct vm_area_struct *new_vma, pmd_t *new_pmd,
139 unsigned long new_addr, bool need_rmap_locks)
141 struct mm_struct *mm = vma->vm_mm;
142 pte_t *old_pte, *new_pte, pte;
143 pmd_t dummy_pmdval;
144 spinlock_t *old_ptl, *new_ptl;
145 bool force_flush = false;
146 unsigned long len = old_end - old_addr;
147 int err = 0;
150 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
151 * locks to ensure that rmap will always observe either the old or the
152 * new ptes. This is the easiest way to avoid races with
153 * truncate_pagecache(), page migration, etc...
155 * When need_rmap_locks is false, we use other ways to avoid
156 * such races:
158 * - During exec() shift_arg_pages(), we use a specially tagged vma
159 * which rmap call sites look for using vma_is_temporary_stack().
161 * - During mremap(), new_vma is often known to be placed after vma
162 * in rmap traversal order. This ensures rmap will always observe
163 * either the old pte, or the new pte, or both (the page table locks
164 * serialize access to individual ptes, but only rmap traversal
165 * order guarantees that we won't miss both the old and new ptes).
167 if (need_rmap_locks)
168 take_rmap_locks(vma);
171 * We don't have to worry about the ordering of src and dst
172 * pte locks because exclusive mmap_lock prevents deadlock.
174 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
175 if (!old_pte) {
176 err = -EAGAIN;
177 goto out;
180 * Now new_pte is none, so hpage_collapse_scan_file() path can not find
181 * this by traversing file->f_mapping, so there is no concurrency with
182 * retract_page_tables(). In addition, we already hold the exclusive
183 * mmap_lock, so this new_pte page is stable, so there is no need to get
184 * pmdval and do pmd_same() check.
186 new_pte = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
187 &new_ptl);
188 if (!new_pte) {
189 pte_unmap_unlock(old_pte, old_ptl);
190 err = -EAGAIN;
191 goto out;
193 if (new_ptl != old_ptl)
194 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
195 flush_tlb_batched_pending(vma->vm_mm);
196 arch_enter_lazy_mmu_mode();
198 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
199 new_pte++, new_addr += PAGE_SIZE) {
200 if (pte_none(ptep_get(old_pte)))
201 continue;
203 pte = ptep_get_and_clear(mm, old_addr, old_pte);
205 * If we are remapping a valid PTE, make sure
206 * to flush TLB before we drop the PTL for the
207 * PTE.
209 * NOTE! Both old and new PTL matter: the old one
210 * for racing with folio_mkclean(), the new one to
211 * make sure the physical page stays valid until
212 * the TLB entry for the old mapping has been
213 * flushed.
215 if (pte_present(pte))
216 force_flush = true;
217 pte = move_pte(pte, old_addr, new_addr);
218 pte = move_soft_dirty_pte(pte);
219 set_pte_at(mm, new_addr, new_pte, pte);
222 arch_leave_lazy_mmu_mode();
223 if (force_flush)
224 flush_tlb_range(vma, old_end - len, old_end);
225 if (new_ptl != old_ptl)
226 spin_unlock(new_ptl);
227 pte_unmap(new_pte - 1);
228 pte_unmap_unlock(old_pte - 1, old_ptl);
229 out:
230 if (need_rmap_locks)
231 drop_rmap_locks(vma);
232 return err;
235 #ifndef arch_supports_page_table_move
236 #define arch_supports_page_table_move arch_supports_page_table_move
237 static inline bool arch_supports_page_table_move(void)
239 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
240 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
242 #endif
244 #ifdef CONFIG_HAVE_MOVE_PMD
245 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
246 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
248 spinlock_t *old_ptl, *new_ptl;
249 struct mm_struct *mm = vma->vm_mm;
250 bool res = false;
251 pmd_t pmd;
253 if (!arch_supports_page_table_move())
254 return false;
256 * The destination pmd shouldn't be established, free_pgtables()
257 * should have released it.
259 * However, there's a case during execve() where we use mremap
260 * to move the initial stack, and in that case the target area
261 * may overlap the source area (always moving down).
263 * If everything is PMD-aligned, that works fine, as moving
264 * each pmd down will clear the source pmd. But if we first
265 * have a few 4kB-only pages that get moved down, and then
266 * hit the "now the rest is PMD-aligned, let's do everything
267 * one pmd at a time", we will still have the old (now empty
268 * of any 4kB pages, but still there) PMD in the page table
269 * tree.
271 * Warn on it once - because we really should try to figure
272 * out how to do this better - but then say "I won't move
273 * this pmd".
275 * One alternative might be to just unmap the target pmd at
276 * this point, and verify that it really is empty. We'll see.
278 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
279 return false;
282 * We don't have to worry about the ordering of src and dst
283 * ptlocks because exclusive mmap_lock prevents deadlock.
285 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
286 new_ptl = pmd_lockptr(mm, new_pmd);
287 if (new_ptl != old_ptl)
288 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
290 pmd = *old_pmd;
292 /* Racing with collapse? */
293 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
294 goto out_unlock;
295 /* Clear the pmd */
296 pmd_clear(old_pmd);
297 res = true;
299 VM_BUG_ON(!pmd_none(*new_pmd));
301 pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
302 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
303 out_unlock:
304 if (new_ptl != old_ptl)
305 spin_unlock(new_ptl);
306 spin_unlock(old_ptl);
308 return res;
310 #else
311 static inline bool move_normal_pmd(struct vm_area_struct *vma,
312 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
313 pmd_t *new_pmd)
315 return false;
317 #endif
319 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
320 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
321 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
323 spinlock_t *old_ptl, *new_ptl;
324 struct mm_struct *mm = vma->vm_mm;
325 pud_t pud;
327 if (!arch_supports_page_table_move())
328 return false;
330 * The destination pud shouldn't be established, free_pgtables()
331 * should have released it.
333 if (WARN_ON_ONCE(!pud_none(*new_pud)))
334 return false;
337 * We don't have to worry about the ordering of src and dst
338 * ptlocks because exclusive mmap_lock prevents deadlock.
340 old_ptl = pud_lock(vma->vm_mm, old_pud);
341 new_ptl = pud_lockptr(mm, new_pud);
342 if (new_ptl != old_ptl)
343 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
345 /* Clear the pud */
346 pud = *old_pud;
347 pud_clear(old_pud);
349 VM_BUG_ON(!pud_none(*new_pud));
351 pud_populate(mm, new_pud, pud_pgtable(pud));
352 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
353 if (new_ptl != old_ptl)
354 spin_unlock(new_ptl);
355 spin_unlock(old_ptl);
357 return true;
359 #else
360 static inline bool move_normal_pud(struct vm_area_struct *vma,
361 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
362 pud_t *new_pud)
364 return false;
366 #endif
368 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
369 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
370 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
372 spinlock_t *old_ptl, *new_ptl;
373 struct mm_struct *mm = vma->vm_mm;
374 pud_t pud;
377 * The destination pud shouldn't be established, free_pgtables()
378 * should have released it.
380 if (WARN_ON_ONCE(!pud_none(*new_pud)))
381 return false;
384 * We don't have to worry about the ordering of src and dst
385 * ptlocks because exclusive mmap_lock prevents deadlock.
387 old_ptl = pud_lock(vma->vm_mm, old_pud);
388 new_ptl = pud_lockptr(mm, new_pud);
389 if (new_ptl != old_ptl)
390 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
392 /* Clear the pud */
393 pud = *old_pud;
394 pud_clear(old_pud);
396 VM_BUG_ON(!pud_none(*new_pud));
398 /* Set the new pud */
399 /* mark soft_ditry when we add pud level soft dirty support */
400 set_pud_at(mm, new_addr, new_pud, pud);
401 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
402 if (new_ptl != old_ptl)
403 spin_unlock(new_ptl);
404 spin_unlock(old_ptl);
406 return true;
408 #else
409 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
410 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
412 WARN_ON_ONCE(1);
413 return false;
416 #endif
418 enum pgt_entry {
419 NORMAL_PMD,
420 HPAGE_PMD,
421 NORMAL_PUD,
422 HPAGE_PUD,
426 * Returns an extent of the corresponding size for the pgt_entry specified if
427 * valid. Else returns a smaller extent bounded by the end of the source and
428 * destination pgt_entry.
430 static __always_inline unsigned long get_extent(enum pgt_entry entry,
431 unsigned long old_addr, unsigned long old_end,
432 unsigned long new_addr)
434 unsigned long next, extent, mask, size;
436 switch (entry) {
437 case HPAGE_PMD:
438 case NORMAL_PMD:
439 mask = PMD_MASK;
440 size = PMD_SIZE;
441 break;
442 case HPAGE_PUD:
443 case NORMAL_PUD:
444 mask = PUD_MASK;
445 size = PUD_SIZE;
446 break;
447 default:
448 BUILD_BUG();
449 break;
452 next = (old_addr + size) & mask;
453 /* even if next overflowed, extent below will be ok */
454 extent = next - old_addr;
455 if (extent > old_end - old_addr)
456 extent = old_end - old_addr;
457 next = (new_addr + size) & mask;
458 if (extent > next - new_addr)
459 extent = next - new_addr;
460 return extent;
464 * Attempts to speedup the move by moving entry at the level corresponding to
465 * pgt_entry. Returns true if the move was successful, else false.
467 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
468 unsigned long old_addr, unsigned long new_addr,
469 void *old_entry, void *new_entry, bool need_rmap_locks)
471 bool moved = false;
473 /* See comment in move_ptes() */
474 if (need_rmap_locks)
475 take_rmap_locks(vma);
477 switch (entry) {
478 case NORMAL_PMD:
479 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
480 new_entry);
481 break;
482 case NORMAL_PUD:
483 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
484 new_entry);
485 break;
486 case HPAGE_PMD:
487 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
488 move_huge_pmd(vma, old_addr, new_addr, old_entry,
489 new_entry);
490 break;
491 case HPAGE_PUD:
492 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
493 move_huge_pud(vma, old_addr, new_addr, old_entry,
494 new_entry);
495 break;
497 default:
498 WARN_ON_ONCE(1);
499 break;
502 if (need_rmap_locks)
503 drop_rmap_locks(vma);
505 return moved;
509 * A helper to check if aligning down is OK. The aligned address should fall
510 * on *no mapping*. For the stack moving down, that's a special move within
511 * the VMA that is created to span the source and destination of the move,
512 * so we make an exception for it.
514 static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align,
515 unsigned long mask, bool for_stack)
517 unsigned long addr_masked = addr_to_align & mask;
520 * If @addr_to_align of either source or destination is not the beginning
521 * of the corresponding VMA, we can't align down or we will destroy part
522 * of the current mapping.
524 if (!for_stack && vma->vm_start != addr_to_align)
525 return false;
527 /* In the stack case we explicitly permit in-VMA alignment. */
528 if (for_stack && addr_masked >= vma->vm_start)
529 return true;
532 * Make sure the realignment doesn't cause the address to fall on an
533 * existing mapping.
535 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
538 /* Opportunistically realign to specified boundary for faster copy. */
539 static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma,
540 unsigned long *new_addr, struct vm_area_struct *new_vma,
541 unsigned long mask, bool for_stack)
543 /* Skip if the addresses are already aligned. */
544 if ((*old_addr & ~mask) == 0)
545 return;
547 /* Only realign if the new and old addresses are mutually aligned. */
548 if ((*old_addr & ~mask) != (*new_addr & ~mask))
549 return;
551 /* Ensure realignment doesn't cause overlap with existing mappings. */
552 if (!can_align_down(old_vma, *old_addr, mask, for_stack) ||
553 !can_align_down(new_vma, *new_addr, mask, for_stack))
554 return;
556 *old_addr = *old_addr & mask;
557 *new_addr = *new_addr & mask;
560 unsigned long move_page_tables(struct vm_area_struct *vma,
561 unsigned long old_addr, struct vm_area_struct *new_vma,
562 unsigned long new_addr, unsigned long len,
563 bool need_rmap_locks, bool for_stack)
565 unsigned long extent, old_end;
566 struct mmu_notifier_range range;
567 pmd_t *old_pmd, *new_pmd;
568 pud_t *old_pud, *new_pud;
570 if (!len)
571 return 0;
573 old_end = old_addr + len;
575 if (is_vm_hugetlb_page(vma))
576 return move_hugetlb_page_tables(vma, new_vma, old_addr,
577 new_addr, len);
580 * If possible, realign addresses to PMD boundary for faster copy.
581 * Only realign if the mremap copying hits a PMD boundary.
583 if (len >= PMD_SIZE - (old_addr & ~PMD_MASK))
584 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
585 for_stack);
587 flush_cache_range(vma, old_addr, old_end);
588 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
589 old_addr, old_end);
590 mmu_notifier_invalidate_range_start(&range);
592 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
593 cond_resched();
595 * If extent is PUD-sized try to speed up the move by moving at the
596 * PUD level if possible.
598 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
600 old_pud = get_old_pud(vma->vm_mm, old_addr);
601 if (!old_pud)
602 continue;
603 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
604 if (!new_pud)
605 break;
606 if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
607 if (extent == HPAGE_PUD_SIZE) {
608 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
609 old_pud, new_pud, need_rmap_locks);
610 /* We ignore and continue on error? */
611 continue;
613 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
615 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
616 old_pud, new_pud, true))
617 continue;
620 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
621 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
622 if (!old_pmd)
623 continue;
624 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
625 if (!new_pmd)
626 break;
627 again:
628 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
629 pmd_devmap(*old_pmd)) {
630 if (extent == HPAGE_PMD_SIZE &&
631 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
632 old_pmd, new_pmd, need_rmap_locks))
633 continue;
634 split_huge_pmd(vma, old_pmd, old_addr);
635 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
636 extent == PMD_SIZE) {
638 * If the extent is PMD-sized, try to speed the move by
639 * moving at the PMD level if possible.
641 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
642 old_pmd, new_pmd, true))
643 continue;
645 if (pmd_none(*old_pmd))
646 continue;
647 if (pte_alloc(new_vma->vm_mm, new_pmd))
648 break;
649 if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
650 new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
651 goto again;
654 mmu_notifier_invalidate_range_end(&range);
657 * Prevent negative return values when {old,new}_addr was realigned
658 * but we broke out of the above loop for the first PMD itself.
660 if (old_addr < old_end - len)
661 return 0;
663 return len + old_addr - old_end; /* how much done */
666 static unsigned long move_vma(struct vm_area_struct *vma,
667 unsigned long old_addr, unsigned long old_len,
668 unsigned long new_len, unsigned long new_addr,
669 bool *locked, unsigned long flags,
670 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
672 long to_account = new_len - old_len;
673 struct mm_struct *mm = vma->vm_mm;
674 struct vm_area_struct *new_vma;
675 unsigned long vm_flags = vma->vm_flags;
676 unsigned long new_pgoff;
677 unsigned long moved_len;
678 unsigned long account_start = 0;
679 unsigned long account_end = 0;
680 unsigned long hiwater_vm;
681 int err = 0;
682 bool need_rmap_locks;
683 struct vma_iterator vmi;
686 * We'd prefer to avoid failure later on in do_munmap:
687 * which may split one vma into three before unmapping.
689 if (mm->map_count >= sysctl_max_map_count - 3)
690 return -ENOMEM;
692 if (unlikely(flags & MREMAP_DONTUNMAP))
693 to_account = new_len;
695 if (vma->vm_ops && vma->vm_ops->may_split) {
696 if (vma->vm_start != old_addr)
697 err = vma->vm_ops->may_split(vma, old_addr);
698 if (!err && vma->vm_end != old_addr + old_len)
699 err = vma->vm_ops->may_split(vma, old_addr + old_len);
700 if (err)
701 return err;
705 * Advise KSM to break any KSM pages in the area to be moved:
706 * it would be confusing if they were to turn up at the new
707 * location, where they happen to coincide with different KSM
708 * pages recently unmapped. But leave vma->vm_flags as it was,
709 * so KSM can come around to merge on vma and new_vma afterwards.
711 err = ksm_madvise(vma, old_addr, old_addr + old_len,
712 MADV_UNMERGEABLE, &vm_flags);
713 if (err)
714 return err;
716 if (vm_flags & VM_ACCOUNT) {
717 if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
718 return -ENOMEM;
721 vma_start_write(vma);
722 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
723 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
724 &need_rmap_locks);
725 if (!new_vma) {
726 if (vm_flags & VM_ACCOUNT)
727 vm_unacct_memory(to_account >> PAGE_SHIFT);
728 return -ENOMEM;
731 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
732 need_rmap_locks, false);
733 if (moved_len < old_len) {
734 err = -ENOMEM;
735 } else if (vma->vm_ops && vma->vm_ops->mremap) {
736 err = vma->vm_ops->mremap(new_vma);
739 if (unlikely(err)) {
741 * On error, move entries back from new area to old,
742 * which will succeed since page tables still there,
743 * and then proceed to unmap new area instead of old.
745 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
746 true, false);
747 vma = new_vma;
748 old_len = new_len;
749 old_addr = new_addr;
750 new_addr = err;
751 } else {
752 mremap_userfaultfd_prep(new_vma, uf);
755 if (is_vm_hugetlb_page(vma)) {
756 clear_vma_resv_huge_pages(vma);
759 /* Conceal VM_ACCOUNT so old reservation is not undone */
760 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
761 vm_flags_clear(vma, VM_ACCOUNT);
762 if (vma->vm_start < old_addr)
763 account_start = vma->vm_start;
764 if (vma->vm_end > old_addr + old_len)
765 account_end = vma->vm_end;
769 * If we failed to move page tables we still do total_vm increment
770 * since do_munmap() will decrement it by old_len == new_len.
772 * Since total_vm is about to be raised artificially high for a
773 * moment, we need to restore high watermark afterwards: if stats
774 * are taken meanwhile, total_vm and hiwater_vm appear too high.
775 * If this were a serious issue, we'd add a flag to do_munmap().
777 hiwater_vm = mm->hiwater_vm;
778 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
780 /* Tell pfnmap has moved from this vma */
781 if (unlikely(vma->vm_flags & VM_PFNMAP))
782 untrack_pfn_clear(vma);
784 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
785 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
786 vm_flags_clear(vma, VM_LOCKED_MASK);
789 * anon_vma links of the old vma is no longer needed after its page
790 * table has been moved.
792 if (new_vma != vma && vma->vm_start == old_addr &&
793 vma->vm_end == (old_addr + old_len))
794 unlink_anon_vmas(vma);
796 /* Because we won't unmap we don't need to touch locked_vm */
797 return new_addr;
800 vma_iter_init(&vmi, mm, old_addr);
801 if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
802 /* OOM: unable to split vma, just get accounts right */
803 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
804 vm_acct_memory(old_len >> PAGE_SHIFT);
805 account_start = account_end = 0;
808 if (vm_flags & VM_LOCKED) {
809 mm->locked_vm += new_len >> PAGE_SHIFT;
810 *locked = true;
813 mm->hiwater_vm = hiwater_vm;
815 /* Restore VM_ACCOUNT if one or two pieces of vma left */
816 if (account_start) {
817 vma = vma_prev(&vmi);
818 vm_flags_set(vma, VM_ACCOUNT);
821 if (account_end) {
822 vma = vma_next(&vmi);
823 vm_flags_set(vma, VM_ACCOUNT);
826 return new_addr;
830 * resize_is_valid() - Ensure the vma can be resized to the new length at the give
831 * address.
833 * @vma: The vma to resize
834 * @addr: The old address
835 * @old_len: The current size
836 * @new_len: The desired size
837 * @flags: The vma flags
839 * Return 0 on success, error otherwise.
841 static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr,
842 unsigned long old_len, unsigned long new_len, unsigned long flags)
844 struct mm_struct *mm = current->mm;
845 unsigned long pgoff;
848 * !old_len is a special case where an attempt is made to 'duplicate'
849 * a mapping. This makes no sense for private mappings as it will
850 * instead create a fresh/new mapping unrelated to the original. This
851 * is contrary to the basic idea of mremap which creates new mappings
852 * based on the original. There are no known use cases for this
853 * behavior. As a result, fail such attempts.
855 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
856 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
857 return -EINVAL;
860 if ((flags & MREMAP_DONTUNMAP) &&
861 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
862 return -EINVAL;
864 /* We can't remap across vm area boundaries */
865 if (old_len > vma->vm_end - addr)
866 return -EFAULT;
868 if (new_len == old_len)
869 return 0;
871 /* Need to be careful about a growing mapping */
872 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
873 pgoff += vma->vm_pgoff;
874 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
875 return -EINVAL;
877 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
878 return -EFAULT;
880 if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
881 return -EAGAIN;
883 if (!may_expand_vm(mm, vma->vm_flags,
884 (new_len - old_len) >> PAGE_SHIFT))
885 return -ENOMEM;
887 return 0;
891 * mremap_to() - remap a vma to a new location
892 * @addr: The old address
893 * @old_len: The old size
894 * @new_addr: The target address
895 * @new_len: The new size
896 * @locked: If the returned vma is locked (VM_LOCKED)
897 * @flags: the mremap flags
898 * @uf: The mremap userfaultfd context
899 * @uf_unmap_early: The userfaultfd unmap early context
900 * @uf_unmap: The userfaultfd unmap context
902 * Returns: The new address of the vma or an error.
904 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
905 unsigned long new_addr, unsigned long new_len, bool *locked,
906 unsigned long flags, struct vm_userfaultfd_ctx *uf,
907 struct list_head *uf_unmap_early,
908 struct list_head *uf_unmap)
910 struct mm_struct *mm = current->mm;
911 struct vm_area_struct *vma;
912 unsigned long ret;
913 unsigned long map_flags = 0;
915 if (offset_in_page(new_addr))
916 return -EINVAL;
918 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
919 return -EINVAL;
921 /* Ensure the old/new locations do not overlap */
922 if (addr + old_len > new_addr && new_addr + new_len > addr)
923 return -EINVAL;
926 * move_vma() need us to stay 4 maps below the threshold, otherwise
927 * it will bail out at the very beginning.
928 * That is a problem if we have already unmaped the regions here
929 * (new_addr, and old_addr), because userspace will not know the
930 * state of the vma's after it gets -ENOMEM.
931 * So, to avoid such scenario we can pre-compute if the whole
932 * operation has high chances to success map-wise.
933 * Worst-scenario case is when both vma's (new_addr and old_addr) get
934 * split in 3 before unmapping it.
935 * That means 2 more maps (1 for each) to the ones we already hold.
936 * Check whether current map count plus 2 still leads us to 4 maps below
937 * the threshold, otherwise return -ENOMEM here to be more safe.
939 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
940 return -ENOMEM;
942 if (flags & MREMAP_FIXED) {
944 * In mremap_to().
945 * VMA is moved to dst address, and munmap dst first.
946 * do_munmap will check if dst is sealed.
948 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
949 if (ret)
950 return ret;
953 if (old_len > new_len) {
954 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
955 if (ret)
956 return ret;
957 old_len = new_len;
960 vma = vma_lookup(mm, addr);
961 if (!vma)
962 return -EFAULT;
964 ret = resize_is_valid(vma, addr, old_len, new_len, flags);
965 if (ret)
966 return ret;
968 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
969 if (flags & MREMAP_DONTUNMAP &&
970 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
971 return -ENOMEM;
974 if (flags & MREMAP_FIXED)
975 map_flags |= MAP_FIXED;
977 if (vma->vm_flags & VM_MAYSHARE)
978 map_flags |= MAP_SHARED;
980 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
981 ((addr - vma->vm_start) >> PAGE_SHIFT),
982 map_flags);
983 if (IS_ERR_VALUE(ret))
984 return ret;
986 /* We got a new mapping */
987 if (!(flags & MREMAP_FIXED))
988 new_addr = ret;
990 return move_vma(vma, addr, old_len, new_len, new_addr, locked, flags,
991 uf, uf_unmap);
994 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
996 unsigned long end = vma->vm_end + delta;
998 if (end < vma->vm_end) /* overflow */
999 return 0;
1000 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
1001 return 0;
1002 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
1003 0, MAP_FIXED) & ~PAGE_MASK)
1004 return 0;
1005 return 1;
1009 * Expand (or shrink) an existing mapping, potentially moving it at the
1010 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1012 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
1013 * This option implies MREMAP_MAYMOVE.
1015 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1016 unsigned long, new_len, unsigned long, flags,
1017 unsigned long, new_addr)
1019 struct mm_struct *mm = current->mm;
1020 struct vm_area_struct *vma;
1021 unsigned long ret = -EINVAL;
1022 bool locked = false;
1023 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
1024 LIST_HEAD(uf_unmap_early);
1025 LIST_HEAD(uf_unmap);
1028 * There is a deliberate asymmetry here: we strip the pointer tag
1029 * from the old address but leave the new address alone. This is
1030 * for consistency with mmap(), where we prevent the creation of
1031 * aliasing mappings in userspace by leaving the tag bits of the
1032 * mapping address intact. A non-zero tag will cause the subsequent
1033 * range checks to reject the address as invalid.
1035 * See Documentation/arch/arm64/tagged-address-abi.rst for more
1036 * information.
1038 addr = untagged_addr(addr);
1040 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1041 return ret;
1043 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
1044 return ret;
1047 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
1048 * in the process.
1050 if (flags & MREMAP_DONTUNMAP &&
1051 (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
1052 return ret;
1055 if (offset_in_page(addr))
1056 return ret;
1058 old_len = PAGE_ALIGN(old_len);
1059 new_len = PAGE_ALIGN(new_len);
1062 * We allow a zero old-len as a special case
1063 * for DOS-emu "duplicate shm area" thing. But
1064 * a zero new-len is nonsensical.
1066 if (!new_len)
1067 return ret;
1069 if (mmap_write_lock_killable(current->mm))
1070 return -EINTR;
1071 vma = vma_lookup(mm, addr);
1072 if (!vma) {
1073 ret = -EFAULT;
1074 goto out;
1077 /* Don't allow remapping vmas when they have already been sealed */
1078 if (!can_modify_vma(vma)) {
1079 ret = -EPERM;
1080 goto out;
1083 if (is_vm_hugetlb_page(vma)) {
1084 struct hstate *h __maybe_unused = hstate_vma(vma);
1086 old_len = ALIGN(old_len, huge_page_size(h));
1087 new_len = ALIGN(new_len, huge_page_size(h));
1089 /* addrs must be huge page aligned */
1090 if (addr & ~huge_page_mask(h))
1091 goto out;
1092 if (new_addr & ~huge_page_mask(h))
1093 goto out;
1096 * Don't allow remap expansion, because the underlying hugetlb
1097 * reservation is not yet capable to handle split reservation.
1099 if (new_len > old_len)
1100 goto out;
1103 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
1104 ret = mremap_to(addr, old_len, new_addr, new_len,
1105 &locked, flags, &uf, &uf_unmap_early,
1106 &uf_unmap);
1107 goto out;
1111 * Always allow a shrinking remap: that just unmaps
1112 * the unnecessary pages..
1113 * do_vmi_munmap does all the needed commit accounting, and
1114 * unlocks the mmap_lock if so directed.
1116 if (old_len >= new_len) {
1117 VMA_ITERATOR(vmi, mm, addr + new_len);
1119 if (old_len == new_len) {
1120 ret = addr;
1121 goto out;
1124 ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
1125 &uf_unmap, true);
1126 if (ret)
1127 goto out;
1129 ret = addr;
1130 goto out_unlocked;
1134 * Ok, we need to grow..
1136 ret = resize_is_valid(vma, addr, old_len, new_len, flags);
1137 if (ret)
1138 goto out;
1140 /* old_len exactly to the end of the area..
1142 if (old_len == vma->vm_end - addr) {
1143 unsigned long delta = new_len - old_len;
1145 /* can we just expand the current mapping? */
1146 if (vma_expandable(vma, delta)) {
1147 long pages = delta >> PAGE_SHIFT;
1148 VMA_ITERATOR(vmi, mm, vma->vm_end);
1149 long charged = 0;
1151 if (vma->vm_flags & VM_ACCOUNT) {
1152 if (security_vm_enough_memory_mm(mm, pages)) {
1153 ret = -ENOMEM;
1154 goto out;
1156 charged = pages;
1160 * Function vma_merge_extend() is called on the
1161 * extension we are adding to the already existing vma,
1162 * vma_merge_extend() will merge this extension with the
1163 * already existing vma (expand operation itself) and
1164 * possibly also with the next vma if it becomes
1165 * adjacent to the expanded vma and otherwise
1166 * compatible.
1168 vma = vma_merge_extend(&vmi, vma, delta);
1169 if (!vma) {
1170 vm_unacct_memory(charged);
1171 ret = -ENOMEM;
1172 goto out;
1175 vm_stat_account(mm, vma->vm_flags, pages);
1176 if (vma->vm_flags & VM_LOCKED) {
1177 mm->locked_vm += pages;
1178 locked = true;
1179 new_addr = addr;
1181 ret = addr;
1182 goto out;
1187 * We weren't able to just expand or shrink the area,
1188 * we need to create a new one and move it..
1190 ret = -ENOMEM;
1191 if (flags & MREMAP_MAYMOVE) {
1192 unsigned long map_flags = 0;
1193 if (vma->vm_flags & VM_MAYSHARE)
1194 map_flags |= MAP_SHARED;
1196 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1197 vma->vm_pgoff +
1198 ((addr - vma->vm_start) >> PAGE_SHIFT),
1199 map_flags);
1200 if (IS_ERR_VALUE(new_addr)) {
1201 ret = new_addr;
1202 goto out;
1205 ret = move_vma(vma, addr, old_len, new_len, new_addr,
1206 &locked, flags, &uf, &uf_unmap);
1208 out:
1209 if (offset_in_page(ret))
1210 locked = false;
1211 mmap_write_unlock(current->mm);
1212 if (locked && new_len > old_len)
1213 mm_populate(new_addr + old_len, new_len - old_len);
1214 out_unlocked:
1215 userfaultfd_unmap_complete(mm, &uf_unmap_early);
1216 mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1217 userfaultfd_unmap_complete(mm, &uf_unmap);
1218 return ret;