1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * VMA-specific functions.
7 #include "vma_internal.h"
12 struct vma_iterator
*vmi
;
21 unsigned long charged
;
24 struct vm_area_struct
*prev
;
25 struct vm_area_struct
*next
;
27 /* Unmapping state. */
28 struct vma_munmap_struct vms
;
29 struct ma_state mas_detach
;
30 struct maple_tree mt_detach
;
33 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \
34 struct mmap_state name = { \
38 .end = (addr_) + (len_), \
40 .pglen = PHYS_PFN(len_), \
45 #define VMG_MMAP_STATE(name, map_, vma_) \
46 struct vma_merge_struct name = { \
49 .start = (map_)->addr, \
51 .flags = (map_)->flags, \
52 .pgoff = (map_)->pgoff, \
53 .file = (map_)->file, \
54 .prev = (map_)->prev, \
56 .next = (vma_) ? NULL : (map_)->next, \
57 .state = VMA_MERGE_START, \
58 .merge_flags = VMG_FLAG_DEFAULT, \
61 static inline bool is_mergeable_vma(struct vma_merge_struct
*vmg
, bool merge_next
)
63 struct vm_area_struct
*vma
= merge_next
? vmg
->next
: vmg
->prev
;
65 if (!mpol_equal(vmg
->policy
, vma_policy(vma
)))
68 * VM_SOFTDIRTY should not prevent from VMA merging, if we
69 * match the flags but dirty bit -- the caller should mark
70 * merged VMA as dirty. If dirty bit won't be excluded from
71 * comparison, we increase pressure on the memory system forcing
72 * the kernel to generate new VMAs when old one could be
75 if ((vma
->vm_flags
^ vmg
->flags
) & ~VM_SOFTDIRTY
)
77 if (vma
->vm_file
!= vmg
->file
)
79 if (!is_mergeable_vm_userfaultfd_ctx(vma
, vmg
->uffd_ctx
))
81 if (!anon_vma_name_eq(anon_vma_name(vma
), vmg
->anon_name
))
86 static inline bool is_mergeable_anon_vma(struct anon_vma
*anon_vma1
,
87 struct anon_vma
*anon_vma2
, struct vm_area_struct
*vma
)
90 * The list_is_singular() test is to avoid merging VMA cloned from
91 * parents. This can improve scalability caused by anon_vma lock.
93 if ((!anon_vma1
|| !anon_vma2
) && (!vma
||
94 list_is_singular(&vma
->anon_vma_chain
)))
96 return anon_vma1
== anon_vma2
;
99 /* Are the anon_vma's belonging to each VMA compatible with one another? */
100 static inline bool are_anon_vmas_compatible(struct vm_area_struct
*vma1
,
101 struct vm_area_struct
*vma2
)
103 return is_mergeable_anon_vma(vma1
->anon_vma
, vma2
->anon_vma
, NULL
);
107 * init_multi_vma_prep() - Initializer for struct vma_prepare
108 * @vp: The vma_prepare struct
109 * @vma: The vma that will be altered once locked
110 * @next: The next vma if it is to be adjusted
111 * @remove: The first vma to be removed
112 * @remove2: The second vma to be removed
114 static void init_multi_vma_prep(struct vma_prepare
*vp
,
115 struct vm_area_struct
*vma
,
116 struct vm_area_struct
*next
,
117 struct vm_area_struct
*remove
,
118 struct vm_area_struct
*remove2
)
120 memset(vp
, 0, sizeof(struct vma_prepare
));
122 vp
->anon_vma
= vma
->anon_vma
;
124 vp
->remove2
= remove2
;
126 if (!vp
->anon_vma
&& next
)
127 vp
->anon_vma
= next
->anon_vma
;
129 vp
->file
= vma
->vm_file
;
131 vp
->mapping
= vma
->vm_file
->f_mapping
;
136 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
137 * in front of (at a lower virtual address and file offset than) the vma.
139 * We cannot merge two vmas if they have differently assigned (non-NULL)
140 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
142 * We don't check here for the merged mmap wrapping around the end of pagecache
143 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
144 * wrap, nor mmaps which cover the final page at index -1UL.
146 * We assume the vma may be removed as part of the merge.
148 static bool can_vma_merge_before(struct vma_merge_struct
*vmg
)
150 pgoff_t pglen
= PHYS_PFN(vmg
->end
- vmg
->start
);
152 if (is_mergeable_vma(vmg
, /* merge_next = */ true) &&
153 is_mergeable_anon_vma(vmg
->anon_vma
, vmg
->next
->anon_vma
, vmg
->next
)) {
154 if (vmg
->next
->vm_pgoff
== vmg
->pgoff
+ pglen
)
162 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
163 * beyond (at a higher virtual address and file offset than) the vma.
165 * We cannot merge two vmas if they have differently assigned (non-NULL)
166 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
168 * We assume that vma is not removed as part of the merge.
170 static bool can_vma_merge_after(struct vma_merge_struct
*vmg
)
172 if (is_mergeable_vma(vmg
, /* merge_next = */ false) &&
173 is_mergeable_anon_vma(vmg
->anon_vma
, vmg
->prev
->anon_vma
, vmg
->prev
)) {
174 if (vmg
->prev
->vm_pgoff
+ vma_pages(vmg
->prev
) == vmg
->pgoff
)
180 static void __vma_link_file(struct vm_area_struct
*vma
,
181 struct address_space
*mapping
)
183 if (vma_is_shared_maywrite(vma
))
184 mapping_allow_writable(mapping
);
186 flush_dcache_mmap_lock(mapping
);
187 vma_interval_tree_insert(vma
, &mapping
->i_mmap
);
188 flush_dcache_mmap_unlock(mapping
);
192 * Requires inode->i_mapping->i_mmap_rwsem
194 static void __remove_shared_vm_struct(struct vm_area_struct
*vma
,
195 struct address_space
*mapping
)
197 if (vma_is_shared_maywrite(vma
))
198 mapping_unmap_writable(mapping
);
200 flush_dcache_mmap_lock(mapping
);
201 vma_interval_tree_remove(vma
, &mapping
->i_mmap
);
202 flush_dcache_mmap_unlock(mapping
);
206 * vma has some anon_vma assigned, and is already inserted on that
207 * anon_vma's interval trees.
209 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
210 * vma must be removed from the anon_vma's interval trees using
211 * anon_vma_interval_tree_pre_update_vma().
213 * After the update, the vma will be reinserted using
214 * anon_vma_interval_tree_post_update_vma().
216 * The entire update must be protected by exclusive mmap_lock and by
217 * the root anon_vma's mutex.
220 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct
*vma
)
222 struct anon_vma_chain
*avc
;
224 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
225 anon_vma_interval_tree_remove(avc
, &avc
->anon_vma
->rb_root
);
229 anon_vma_interval_tree_post_update_vma(struct vm_area_struct
*vma
)
231 struct anon_vma_chain
*avc
;
233 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
234 anon_vma_interval_tree_insert(avc
, &avc
->anon_vma
->rb_root
);
238 * vma_prepare() - Helper function for handling locking VMAs prior to altering
239 * @vp: The initialized vma_prepare struct
241 static void vma_prepare(struct vma_prepare
*vp
)
244 uprobe_munmap(vp
->vma
, vp
->vma
->vm_start
, vp
->vma
->vm_end
);
247 uprobe_munmap(vp
->adj_next
, vp
->adj_next
->vm_start
,
248 vp
->adj_next
->vm_end
);
250 i_mmap_lock_write(vp
->mapping
);
251 if (vp
->insert
&& vp
->insert
->vm_file
) {
253 * Put into interval tree now, so instantiated pages
254 * are visible to arm/parisc __flush_dcache_page
255 * throughout; but we cannot insert into address
256 * space until vma start or end is updated.
258 __vma_link_file(vp
->insert
,
259 vp
->insert
->vm_file
->f_mapping
);
264 anon_vma_lock_write(vp
->anon_vma
);
265 anon_vma_interval_tree_pre_update_vma(vp
->vma
);
267 anon_vma_interval_tree_pre_update_vma(vp
->adj_next
);
271 flush_dcache_mmap_lock(vp
->mapping
);
272 vma_interval_tree_remove(vp
->vma
, &vp
->mapping
->i_mmap
);
274 vma_interval_tree_remove(vp
->adj_next
,
275 &vp
->mapping
->i_mmap
);
281 * vma_complete- Helper function for handling the unlocking after altering VMAs,
282 * or for inserting a VMA.
284 * @vp: The vma_prepare struct
285 * @vmi: The vma iterator
288 static void vma_complete(struct vma_prepare
*vp
, struct vma_iterator
*vmi
,
289 struct mm_struct
*mm
)
293 vma_interval_tree_insert(vp
->adj_next
,
294 &vp
->mapping
->i_mmap
);
295 vma_interval_tree_insert(vp
->vma
, &vp
->mapping
->i_mmap
);
296 flush_dcache_mmap_unlock(vp
->mapping
);
299 if (vp
->remove
&& vp
->file
) {
300 __remove_shared_vm_struct(vp
->remove
, vp
->mapping
);
302 __remove_shared_vm_struct(vp
->remove2
, vp
->mapping
);
303 } else if (vp
->insert
) {
305 * split_vma has split insert from vma, and needs
306 * us to insert it before dropping the locks
307 * (it may either follow vma or precede it).
309 vma_iter_store(vmi
, vp
->insert
);
314 anon_vma_interval_tree_post_update_vma(vp
->vma
);
316 anon_vma_interval_tree_post_update_vma(vp
->adj_next
);
317 anon_vma_unlock_write(vp
->anon_vma
);
321 i_mmap_unlock_write(vp
->mapping
);
322 uprobe_mmap(vp
->vma
);
325 uprobe_mmap(vp
->adj_next
);
330 vma_mark_detached(vp
->remove
, true);
332 uprobe_munmap(vp
->remove
, vp
->remove
->vm_start
,
336 if (vp
->remove
->anon_vma
)
337 anon_vma_merge(vp
->vma
, vp
->remove
);
339 mpol_put(vma_policy(vp
->remove
));
341 WARN_ON_ONCE(vp
->vma
->vm_end
< vp
->remove
->vm_end
);
342 vm_area_free(vp
->remove
);
345 * In mprotect's case 6 (see comments on vma_merge),
346 * we are removing both mid and next vmas
349 vp
->remove
= vp
->remove2
;
354 if (vp
->insert
&& vp
->file
)
355 uprobe_mmap(vp
->insert
);
359 * init_vma_prep() - Initializer wrapper for vma_prepare struct
360 * @vp: The vma_prepare struct
361 * @vma: The vma that will be altered once locked
363 static void init_vma_prep(struct vma_prepare
*vp
, struct vm_area_struct
*vma
)
365 init_multi_vma_prep(vp
, vma
, NULL
, NULL
, NULL
);
369 * Can the proposed VMA be merged with the left (previous) VMA taking into
370 * account the start position of the proposed range.
372 static bool can_vma_merge_left(struct vma_merge_struct
*vmg
)
375 return vmg
->prev
&& vmg
->prev
->vm_end
== vmg
->start
&&
376 can_vma_merge_after(vmg
);
380 * Can the proposed VMA be merged with the right (next) VMA taking into
381 * account the end position of the proposed range.
383 * In addition, if we can merge with the left VMA, ensure that left and right
384 * anon_vma's are also compatible.
386 static bool can_vma_merge_right(struct vma_merge_struct
*vmg
,
389 if (!vmg
->next
|| vmg
->end
!= vmg
->next
->vm_start
||
390 !can_vma_merge_before(vmg
))
397 * If we can merge with prev (left) and next (right), indicating that
398 * each VMA's anon_vma is compatible with the proposed anon_vma, this
399 * does not mean prev and next are compatible with EACH OTHER.
401 * We therefore check this in addition to mergeability to either side.
403 return are_anon_vmas_compatible(vmg
->prev
, vmg
->next
);
407 * Close a vm structure and free it.
409 void remove_vma(struct vm_area_struct
*vma
, bool unreachable
)
415 mpol_put(vma_policy(vma
));
423 * Get rid of page table information in the indicated region.
425 * Called with the mm semaphore held.
427 void unmap_region(struct ma_state
*mas
, struct vm_area_struct
*vma
,
428 struct vm_area_struct
*prev
, struct vm_area_struct
*next
)
430 struct mm_struct
*mm
= vma
->vm_mm
;
431 struct mmu_gather tlb
;
433 tlb_gather_mmu(&tlb
, mm
);
434 update_hiwater_rss(mm
);
435 unmap_vmas(&tlb
, mas
, vma
, vma
->vm_start
, vma
->vm_end
, vma
->vm_end
,
436 /* mm_wr_locked = */ true);
437 mas_set(mas
, vma
->vm_end
);
438 free_pgtables(&tlb
, mas
, vma
, prev
? prev
->vm_end
: FIRST_USER_ADDRESS
,
439 next
? next
->vm_start
: USER_PGTABLES_CEILING
,
440 /* mm_wr_locked = */ true);
441 tlb_finish_mmu(&tlb
);
445 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
446 * has already been checked or doesn't make sense to fail.
447 * VMA Iterator will point to the original VMA.
449 static __must_check
int
450 __split_vma(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
451 unsigned long addr
, int new_below
)
453 struct vma_prepare vp
;
454 struct vm_area_struct
*new;
457 WARN_ON(vma
->vm_start
>= addr
);
458 WARN_ON(vma
->vm_end
<= addr
);
460 if (vma
->vm_ops
&& vma
->vm_ops
->may_split
) {
461 err
= vma
->vm_ops
->may_split(vma
, addr
);
466 new = vm_area_dup(vma
);
473 new->vm_start
= addr
;
474 new->vm_pgoff
+= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
);
478 vma_iter_config(vmi
, new->vm_start
, new->vm_end
);
479 if (vma_iter_prealloc(vmi
, new))
482 err
= vma_dup_policy(vma
, new);
486 err
= anon_vma_clone(new, vma
);
491 get_file(new->vm_file
);
493 if (new->vm_ops
&& new->vm_ops
->open
)
494 new->vm_ops
->open(new);
496 vma_start_write(vma
);
497 vma_start_write(new);
499 init_vma_prep(&vp
, vma
);
502 vma_adjust_trans_huge(vma
, vma
->vm_start
, addr
, 0);
505 vma
->vm_start
= addr
;
506 vma
->vm_pgoff
+= (addr
- new->vm_start
) >> PAGE_SHIFT
;
511 /* vma_complete stores the new vma */
512 vma_complete(&vp
, vmi
, vma
->vm_mm
);
513 validate_mm(vma
->vm_mm
);
524 mpol_put(vma_policy(new));
533 * Split a vma into two pieces at address 'addr', a new vma is allocated
534 * either for the first part or the tail.
536 static int split_vma(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
537 unsigned long addr
, int new_below
)
539 if (vma
->vm_mm
->map_count
>= sysctl_max_map_count
)
542 return __split_vma(vmi
, vma
, addr
, new_below
);
546 * dup_anon_vma() - Helper function to duplicate anon_vma
547 * @dst: The destination VMA
548 * @src: The source VMA
549 * @dup: Pointer to the destination VMA when successful.
551 * Returns: 0 on success.
553 static int dup_anon_vma(struct vm_area_struct
*dst
,
554 struct vm_area_struct
*src
, struct vm_area_struct
**dup
)
557 * Easily overlooked: when mprotect shifts the boundary, make sure the
558 * expanding vma has anon_vma set if the shrinking vma had, to cover any
559 * anon pages imported.
561 if (src
->anon_vma
&& !dst
->anon_vma
) {
564 vma_assert_write_locked(dst
);
565 dst
->anon_vma
= src
->anon_vma
;
566 ret
= anon_vma_clone(dst
, src
);
576 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
577 void validate_mm(struct mm_struct
*mm
)
581 struct vm_area_struct
*vma
;
582 VMA_ITERATOR(vmi
, mm
, 0);
584 mt_validate(&mm
->mm_mt
);
585 for_each_vma(vmi
, vma
) {
586 #ifdef CONFIG_DEBUG_VM_RB
587 struct anon_vma
*anon_vma
= vma
->anon_vma
;
588 struct anon_vma_chain
*avc
;
590 unsigned long vmi_start
, vmi_end
;
593 vmi_start
= vma_iter_addr(&vmi
);
594 vmi_end
= vma_iter_end(&vmi
);
595 if (VM_WARN_ON_ONCE_MM(vma
->vm_end
!= vmi_end
, mm
))
598 if (VM_WARN_ON_ONCE_MM(vma
->vm_start
!= vmi_start
, mm
))
602 pr_emerg("issue in %s\n", current
->comm
);
605 pr_emerg("tree range: %px start %lx end %lx\n", vma
,
606 vmi_start
, vmi_end
- 1);
607 vma_iter_dump_tree(&vmi
);
610 #ifdef CONFIG_DEBUG_VM_RB
612 anon_vma_lock_read(anon_vma
);
613 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
614 anon_vma_interval_tree_verify(avc
);
615 anon_vma_unlock_read(anon_vma
);
618 /* Check for a infinite loop */
619 if (++i
> mm
->map_count
+ 10) {
624 if (i
!= mm
->map_count
) {
625 pr_emerg("map_count %d vma iterator %d\n", mm
->map_count
, i
);
628 VM_BUG_ON_MM(bug
, mm
);
630 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
632 /* Actually perform the VMA merge operation. */
633 static int commit_merge(struct vma_merge_struct
*vmg
,
634 struct vm_area_struct
*adjust
,
635 struct vm_area_struct
*remove
,
636 struct vm_area_struct
*remove2
,
640 struct vma_prepare vp
;
642 init_multi_vma_prep(&vp
, vmg
->vma
, adjust
, remove
, remove2
);
644 VM_WARN_ON(vp
.anon_vma
&& adjust
&& adjust
->anon_vma
&&
645 vp
.anon_vma
!= adjust
->anon_vma
);
648 /* Note: vma iterator must be pointing to 'start'. */
649 vma_iter_config(vmg
->vmi
, vmg
->start
, vmg
->end
);
651 vma_iter_config(vmg
->vmi
, adjust
->vm_start
+ adj_start
,
655 if (vma_iter_prealloc(vmg
->vmi
, vmg
->vma
))
659 vma_adjust_trans_huge(vmg
->vma
, vmg
->start
, vmg
->end
, adj_start
);
660 vma_set_range(vmg
->vma
, vmg
->start
, vmg
->end
, vmg
->pgoff
);
663 vma_iter_store(vmg
->vmi
, vmg
->vma
);
666 adjust
->vm_start
+= adj_start
;
667 adjust
->vm_pgoff
+= PHYS_PFN(adj_start
);
670 vma_iter_store(vmg
->vmi
, adjust
);
674 vma_complete(&vp
, vmg
->vmi
, vmg
->vma
->vm_mm
);
679 /* We can only remove VMAs when merging if they do not have a close hook. */
680 static bool can_merge_remove_vma(struct vm_area_struct
*vma
)
682 return !vma
->vm_ops
|| !vma
->vm_ops
->close
;
686 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
687 * attributes modified.
689 * @vmg: Describes the modifications being made to a VMA and associated
692 * When the attributes of a range within a VMA change, then it might be possible
693 * for immediately adjacent VMAs to be merged into that VMA due to having
694 * identical properties.
696 * This function checks for the existence of any such mergeable VMAs and updates
697 * the maple tree describing the @vmg->vma->vm_mm address space to account for
698 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
700 * As part of this operation, if a merge occurs, the @vmg object will have its
701 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
702 * calls to this function should reset these fields.
704 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
707 * - The caller must assign the VMA to be modifed to @vmg->vma.
708 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
709 * - The caller must not set @vmg->next, as we determine this.
710 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
711 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
713 static __must_check
struct vm_area_struct
*vma_merge_existing_range(
714 struct vma_merge_struct
*vmg
)
716 struct vm_area_struct
*vma
= vmg
->vma
;
717 struct vm_area_struct
*prev
= vmg
->prev
;
718 struct vm_area_struct
*next
, *res
;
719 struct vm_area_struct
*anon_dup
= NULL
;
720 struct vm_area_struct
*adjust
= NULL
;
721 unsigned long start
= vmg
->start
;
722 unsigned long end
= vmg
->end
;
723 bool left_side
= vma
&& start
== vma
->vm_start
;
724 bool right_side
= vma
&& end
== vma
->vm_end
;
727 bool merge_will_delete_vma
, merge_will_delete_next
;
728 bool merge_left
, merge_right
, merge_both
;
731 mmap_assert_write_locked(vmg
->mm
);
732 VM_WARN_ON_VMG(!vma
, vmg
); /* We are modifying a VMA, so caller must specify. */
733 VM_WARN_ON_VMG(vmg
->next
, vmg
); /* We set this. */
734 VM_WARN_ON_VMG(prev
&& start
<= prev
->vm_start
, vmg
);
735 VM_WARN_ON_VMG(start
>= end
, vmg
);
738 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
739 * not, we must span a portion of the VMA.
741 VM_WARN_ON_VMG(vma
&& ((vma
!= prev
&& vmg
->start
!= vma
->vm_start
) ||
742 vmg
->end
> vma
->vm_end
), vmg
);
743 /* The vmi must be positioned within vmg->vma. */
744 VM_WARN_ON_VMG(vma
&& !(vma_iter_addr(vmg
->vmi
) >= vma
->vm_start
&&
745 vma_iter_addr(vmg
->vmi
) < vma
->vm_end
), vmg
);
747 vmg
->state
= VMA_MERGE_NOMERGE
;
750 * If a special mapping or if the range being modified is neither at the
751 * furthermost left or right side of the VMA, then we have no chance of
752 * merging and should abort.
754 if (vmg
->flags
& VM_SPECIAL
|| (!left_side
&& !right_side
))
758 merge_left
= can_vma_merge_left(vmg
);
763 next
= vmg
->next
= vma_iter_next_range(vmg
->vmi
);
764 vma_iter_prev_range(vmg
->vmi
);
766 merge_right
= can_vma_merge_right(vmg
, merge_left
);
772 if (merge_left
) /* If merging prev, position iterator there. */
774 else if (!merge_right
) /* If we have nothing to merge, abort. */
777 merge_both
= merge_left
&& merge_right
;
778 /* If we span the entire VMA, a merge implies it will be deleted. */
779 merge_will_delete_vma
= left_side
&& right_side
;
782 * If we need to remove vma in its entirety but are unable to do so,
783 * we have no sensible recourse but to abort the merge.
785 if (merge_will_delete_vma
&& !can_merge_remove_vma(vma
))
789 * If we merge both VMAs, then next is also deleted. This implies
790 * merge_will_delete_vma also.
792 merge_will_delete_next
= merge_both
;
795 * If we cannot delete next, then we can reduce the operation to merging
796 * prev and vma (thereby deleting vma).
798 if (merge_will_delete_next
&& !can_merge_remove_vma(next
)) {
799 merge_will_delete_next
= false;
804 /* No matter what happens, we will be adjusting vma. */
805 vma_start_write(vma
);
808 vma_start_write(prev
);
811 vma_start_write(next
);
816 * |-------*********-------|
818 * extend delete delete
822 vmg
->start
= prev
->vm_start
;
823 vmg
->end
= next
->vm_end
;
824 vmg
->pgoff
= prev
->vm_pgoff
;
827 * We already ensured anon_vma compatibility above, so now it's
828 * simply a case of, if prev has no anon_vma object, which of
829 * next or vma contains the anon_vma we must duplicate.
831 err
= dup_anon_vma(prev
, next
->anon_vma
? next
: vma
, &anon_dup
);
832 } else if (merge_left
) {
836 * |-------*************
838 * extend shrink/delete
842 vmg
->start
= prev
->vm_start
;
843 vmg
->pgoff
= prev
->vm_pgoff
;
845 if (!merge_will_delete_vma
) {
847 adj_start
= vmg
->end
- vma
->vm_start
;
850 err
= dup_anon_vma(prev
, vma
, &anon_dup
);
851 } else { /* merge_right */
855 * *************-------|
857 * shrink/delete extend
860 pgoff_t pglen
= PHYS_PFN(vmg
->end
- vmg
->start
);
862 VM_WARN_ON_VMG(!merge_right
, vmg
);
863 /* If we are offset into a VMA, then prev must be vma. */
864 VM_WARN_ON_VMG(vmg
->start
> vma
->vm_start
&& prev
&& vma
!= prev
, vmg
);
866 if (merge_will_delete_vma
) {
868 vmg
->end
= next
->vm_end
;
869 vmg
->pgoff
= next
->vm_pgoff
- pglen
;
872 * We shrink vma and expand next.
874 * IMPORTANT: This is the ONLY case where the final
875 * merged VMA is NOT vmg->vma, but rather vmg->next.
878 vmg
->start
= vma
->vm_start
;
880 vmg
->pgoff
= vma
->vm_pgoff
;
883 adj_start
= -(vma
->vm_end
- start
);
886 err
= dup_anon_vma(next
, vma
, &anon_dup
);
893 * In nearly all cases, we expand vmg->vma. There is one exception -
894 * merge_right where we partially span the VMA. In this case we shrink
895 * the end of vmg->vma and adjust the start of vmg->next accordingly.
897 expanded
= !merge_right
|| merge_will_delete_vma
;
899 if (commit_merge(vmg
, adjust
,
900 merge_will_delete_vma
? vma
: NULL
,
901 merge_will_delete_next
? next
: NULL
,
902 adj_start
, expanded
)) {
904 unlink_anon_vmas(anon_dup
);
906 vmg
->state
= VMA_MERGE_ERROR_NOMEM
;
910 res
= merge_left
? prev
: next
;
911 khugepaged_enter_vma(res
, vmg
->flags
);
913 vmg
->state
= VMA_MERGE_SUCCESS
;
917 vma_iter_set(vmg
->vmi
, start
);
918 vma_iter_load(vmg
->vmi
);
919 vmg
->state
= VMA_MERGE_ERROR_NOMEM
;
924 * vma_merge_new_range - Attempt to merge a new VMA into address space
926 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
927 * (exclusive), which we try to merge with any adjacent VMAs if possible.
929 * We are about to add a VMA to the address space starting at @vmg->start and
930 * ending at @vmg->end. There are three different possible scenarios:
932 * 1. There is a VMA with identical properties immediately adjacent to the
933 * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
936 * Proposed: |-----| or |-----|
937 * Existing: |----| |----|
939 * 2. There are VMAs with identical properties immediately adjacent to the
940 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
941 * EXPAND the former and REMOVE the latter:
944 * Existing: |----| |----|
946 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
947 * VMAs do not have identical attributes - NO MERGE POSSIBLE.
949 * In instances where we can merge, this function returns the expanded VMA which
950 * will have its range adjusted accordingly and the underlying maple tree also
953 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
954 * to the VMA we expanded.
956 * This function adjusts @vmg to provide @vmg->next if not already specified,
957 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
960 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
961 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
962 other than VMAs that will be unmapped should the operation succeed.
963 * - The caller must have specified the previous vma in @vmg->prev.
964 * - The caller must have specified the next vma in @vmg->next.
965 * - The caller must have positioned the vmi at or before the gap.
967 struct vm_area_struct
*vma_merge_new_range(struct vma_merge_struct
*vmg
)
969 struct vm_area_struct
*prev
= vmg
->prev
;
970 struct vm_area_struct
*next
= vmg
->next
;
971 unsigned long end
= vmg
->end
;
972 bool can_merge_left
, can_merge_right
;
973 bool just_expand
= vmg
->merge_flags
& VMG_FLAG_JUST_EXPAND
;
975 mmap_assert_write_locked(vmg
->mm
);
976 VM_WARN_ON_VMG(vmg
->vma
, vmg
);
977 /* vmi must point at or before the gap. */
978 VM_WARN_ON_VMG(vma_iter_addr(vmg
->vmi
) > end
, vmg
);
980 vmg
->state
= VMA_MERGE_NOMERGE
;
982 /* Special VMAs are unmergeable, also if no prev/next. */
983 if ((vmg
->flags
& VM_SPECIAL
) || (!prev
&& !next
))
986 can_merge_left
= can_vma_merge_left(vmg
);
987 can_merge_right
= !just_expand
&& can_vma_merge_right(vmg
, can_merge_left
);
989 /* If we can merge with the next VMA, adjust vmg accordingly. */
990 if (can_merge_right
) {
991 vmg
->end
= next
->vm_end
;
995 /* If we can merge with the previous VMA, adjust vmg accordingly. */
996 if (can_merge_left
) {
997 vmg
->start
= prev
->vm_start
;
999 vmg
->pgoff
= prev
->vm_pgoff
;
1002 * If this merge would result in removal of the next VMA but we
1003 * are not permitted to do so, reduce the operation to merging
1006 if (can_merge_right
&& !can_merge_remove_vma(next
))
1009 /* In expand-only case we are already positioned at prev. */
1011 /* Equivalent to going to the previous range. */
1017 * Now try to expand adjacent VMA(s). This takes care of removing the
1018 * following VMA if we have VMAs on both sides.
1020 if (vmg
->vma
&& !vma_expand(vmg
)) {
1021 khugepaged_enter_vma(vmg
->vma
, vmg
->flags
);
1022 vmg
->state
= VMA_MERGE_SUCCESS
;
1030 * vma_expand - Expand an existing VMA
1032 * @vmg: Describes a VMA expansion operation.
1034 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
1035 * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
1036 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
1037 * vmg->next needs to be handled by the caller.
1039 * Returns: 0 on success.
1042 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1043 * - The caller must have set @vmg->vma and @vmg->next.
1045 int vma_expand(struct vma_merge_struct
*vmg
)
1047 struct vm_area_struct
*anon_dup
= NULL
;
1048 bool remove_next
= false;
1049 struct vm_area_struct
*vma
= vmg
->vma
;
1050 struct vm_area_struct
*next
= vmg
->next
;
1052 mmap_assert_write_locked(vmg
->mm
);
1054 vma_start_write(vma
);
1055 if (next
&& (vma
!= next
) && (vmg
->end
== next
->vm_end
)) {
1059 /* This should already have been checked by this point. */
1060 VM_WARN_ON_VMG(!can_merge_remove_vma(next
), vmg
);
1061 vma_start_write(next
);
1062 ret
= dup_anon_vma(vma
, next
, &anon_dup
);
1067 /* Not merging but overwriting any part of next is not handled. */
1068 VM_WARN_ON_VMG(next
&& !remove_next
&&
1069 next
!= vma
&& vmg
->end
> next
->vm_start
, vmg
);
1070 /* Only handles expanding */
1071 VM_WARN_ON_VMG(vma
->vm_start
< vmg
->start
|| vma
->vm_end
> vmg
->end
, vmg
);
1073 if (commit_merge(vmg
, NULL
, remove_next
? next
: NULL
, NULL
, 0, true))
1079 vmg
->state
= VMA_MERGE_ERROR_NOMEM
;
1081 unlink_anon_vmas(anon_dup
);
1086 * vma_shrink() - Reduce an existing VMAs memory area
1087 * @vmi: The vma iterator
1088 * @vma: The VMA to modify
1089 * @start: The new start
1092 * Returns: 0 on success, -ENOMEM otherwise
1094 int vma_shrink(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
1095 unsigned long start
, unsigned long end
, pgoff_t pgoff
)
1097 struct vma_prepare vp
;
1099 WARN_ON((vma
->vm_start
!= start
) && (vma
->vm_end
!= end
));
1101 if (vma
->vm_start
< start
)
1102 vma_iter_config(vmi
, vma
->vm_start
, start
);
1104 vma_iter_config(vmi
, end
, vma
->vm_end
);
1106 if (vma_iter_prealloc(vmi
, NULL
))
1109 vma_start_write(vma
);
1111 init_vma_prep(&vp
, vma
);
1113 vma_adjust_trans_huge(vma
, start
, end
, 0);
1115 vma_iter_clear(vmi
);
1116 vma_set_range(vma
, start
, end
, pgoff
);
1117 vma_complete(&vp
, vmi
, vma
->vm_mm
);
1118 validate_mm(vma
->vm_mm
);
1122 static inline void vms_clear_ptes(struct vma_munmap_struct
*vms
,
1123 struct ma_state
*mas_detach
, bool mm_wr_locked
)
1125 struct mmu_gather tlb
;
1127 if (!vms
->clear_ptes
) /* Nothing to do */
1131 * We can free page tables without write-locking mmap_lock because VMAs
1132 * were isolated before we downgraded mmap_lock.
1134 mas_set(mas_detach
, 1);
1135 tlb_gather_mmu(&tlb
, vms
->vma
->vm_mm
);
1136 update_hiwater_rss(vms
->vma
->vm_mm
);
1137 unmap_vmas(&tlb
, mas_detach
, vms
->vma
, vms
->start
, vms
->end
,
1138 vms
->vma_count
, mm_wr_locked
);
1140 mas_set(mas_detach
, 1);
1141 /* start and end may be different if there is no prev or next vma. */
1142 free_pgtables(&tlb
, mas_detach
, vms
->vma
, vms
->unmap_start
,
1143 vms
->unmap_end
, mm_wr_locked
);
1144 tlb_finish_mmu(&tlb
);
1145 vms
->clear_ptes
= false;
1148 static void vms_clean_up_area(struct vma_munmap_struct
*vms
,
1149 struct ma_state
*mas_detach
)
1151 struct vm_area_struct
*vma
;
1156 vms_clear_ptes(vms
, mas_detach
, true);
1157 mas_set(mas_detach
, 0);
1158 mas_for_each(mas_detach
, vma
, ULONG_MAX
)
1163 * vms_complete_munmap_vmas() - Finish the munmap() operation
1164 * @vms: The vma munmap struct
1165 * @mas_detach: The maple state of the detached vmas
1167 * This updates the mm_struct, unmaps the region, frees the resources
1168 * used for the munmap() and may downgrade the lock - if requested. Everything
1169 * needed to be done once the vma maple tree is updated.
1171 static void vms_complete_munmap_vmas(struct vma_munmap_struct
*vms
,
1172 struct ma_state
*mas_detach
)
1174 struct vm_area_struct
*vma
;
1175 struct mm_struct
*mm
;
1178 mm
->map_count
-= vms
->vma_count
;
1179 mm
->locked_vm
-= vms
->locked_vm
;
1181 mmap_write_downgrade(mm
);
1186 vms_clear_ptes(vms
, mas_detach
, !vms
->unlock
);
1187 /* Update high watermark before we lower total_vm */
1188 update_hiwater_vm(mm
);
1189 /* Stat accounting */
1190 WRITE_ONCE(mm
->total_vm
, READ_ONCE(mm
->total_vm
) - vms
->nr_pages
);
1191 /* Paranoid bookkeeping */
1192 VM_WARN_ON(vms
->exec_vm
> mm
->exec_vm
);
1193 VM_WARN_ON(vms
->stack_vm
> mm
->stack_vm
);
1194 VM_WARN_ON(vms
->data_vm
> mm
->data_vm
);
1195 mm
->exec_vm
-= vms
->exec_vm
;
1196 mm
->stack_vm
-= vms
->stack_vm
;
1197 mm
->data_vm
-= vms
->data_vm
;
1199 /* Remove and clean up vmas */
1200 mas_set(mas_detach
, 0);
1201 mas_for_each(mas_detach
, vma
, ULONG_MAX
)
1202 remove_vma(vma
, /* unreachable = */ false);
1204 vm_unacct_memory(vms
->nr_accounted
);
1207 mmap_read_unlock(mm
);
1209 __mt_destroy(mas_detach
->tree
);
1213 * reattach_vmas() - Undo any munmap work and free resources
1214 * @mas_detach: The maple state with the detached maple tree
1216 * Reattach any detached vmas and free up the maple tree used to track the vmas.
1218 static void reattach_vmas(struct ma_state
*mas_detach
)
1220 struct vm_area_struct
*vma
;
1222 mas_set(mas_detach
, 0);
1223 mas_for_each(mas_detach
, vma
, ULONG_MAX
)
1224 vma_mark_detached(vma
, false);
1226 __mt_destroy(mas_detach
->tree
);
1230 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1231 * for removal at a later date. Handles splitting first and last if necessary
1232 * and marking the vmas as isolated.
1234 * @vms: The vma munmap struct
1235 * @mas_detach: The maple state tracking the detached tree
1237 * Return: 0 on success, error otherwise
1239 static int vms_gather_munmap_vmas(struct vma_munmap_struct
*vms
,
1240 struct ma_state
*mas_detach
)
1242 struct vm_area_struct
*next
= NULL
;
1246 * If we need to split any vma, do it now to save pain later.
1247 * Does it split the first one?
1249 if (vms
->start
> vms
->vma
->vm_start
) {
1252 * Make sure that map_count on return from munmap() will
1253 * not exceed its limit; but let map_count go just above
1254 * its limit temporarily, to help free resources as expected.
1256 if (vms
->end
< vms
->vma
->vm_end
&&
1257 vms
->vma
->vm_mm
->map_count
>= sysctl_max_map_count
) {
1259 goto map_count_exceeded
;
1262 /* Don't bother splitting the VMA if we can't unmap it anyway */
1263 if (!can_modify_vma(vms
->vma
)) {
1265 goto start_split_failed
;
1268 error
= __split_vma(vms
->vmi
, vms
->vma
, vms
->start
, 1);
1270 goto start_split_failed
;
1272 vms
->prev
= vma_prev(vms
->vmi
);
1274 vms
->unmap_start
= vms
->prev
->vm_end
;
1277 * Detach a range of VMAs from the mm. Using next as a temp variable as
1278 * it is always overwritten.
1280 for_each_vma_range(*(vms
->vmi
), next
, vms
->end
) {
1283 if (!can_modify_vma(next
)) {
1285 goto modify_vma_failed
;
1287 /* Does it split the end? */
1288 if (next
->vm_end
> vms
->end
) {
1289 error
= __split_vma(vms
->vmi
, next
, vms
->end
, 0);
1291 goto end_split_failed
;
1293 vma_start_write(next
);
1294 mas_set(mas_detach
, vms
->vma_count
++);
1295 error
= mas_store_gfp(mas_detach
, next
, GFP_KERNEL
);
1297 goto munmap_gather_failed
;
1299 vma_mark_detached(next
, true);
1300 nrpages
= vma_pages(next
);
1302 vms
->nr_pages
+= nrpages
;
1303 if (next
->vm_flags
& VM_LOCKED
)
1304 vms
->locked_vm
+= nrpages
;
1306 if (next
->vm_flags
& VM_ACCOUNT
)
1307 vms
->nr_accounted
+= nrpages
;
1309 if (is_exec_mapping(next
->vm_flags
))
1310 vms
->exec_vm
+= nrpages
;
1311 else if (is_stack_mapping(next
->vm_flags
))
1312 vms
->stack_vm
+= nrpages
;
1313 else if (is_data_mapping(next
->vm_flags
))
1314 vms
->data_vm
+= nrpages
;
1318 * If userfaultfd_unmap_prep returns an error the vmas
1319 * will remain split, but userland will get a
1320 * highly unexpected error anyway. This is no
1321 * different than the case where the first of the two
1322 * __split_vma fails, but we don't undo the first
1323 * split, despite we could. This is unlikely enough
1324 * failure that it's not worth optimizing it for.
1326 error
= userfaultfd_unmap_prep(next
, vms
->start
,
1329 goto userfaultfd_error
;
1331 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1332 BUG_ON(next
->vm_start
< vms
->start
);
1333 BUG_ON(next
->vm_start
> vms
->end
);
1337 vms
->next
= vma_next(vms
->vmi
);
1339 vms
->unmap_end
= vms
->next
->vm_start
;
1341 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1342 /* Make sure no VMAs are about to be lost. */
1344 MA_STATE(test
, mas_detach
->tree
, 0, 0);
1345 struct vm_area_struct
*vma_mas
, *vma_test
;
1348 vma_iter_set(vms
->vmi
, vms
->start
);
1350 vma_test
= mas_find(&test
, vms
->vma_count
- 1);
1351 for_each_vma_range(*(vms
->vmi
), vma_mas
, vms
->end
) {
1352 BUG_ON(vma_mas
!= vma_test
);
1354 vma_test
= mas_next(&test
, vms
->vma_count
- 1);
1357 BUG_ON(vms
->vma_count
!= test_count
);
1361 while (vma_iter_addr(vms
->vmi
) > vms
->start
)
1362 vma_iter_prev_range(vms
->vmi
);
1364 vms
->clear_ptes
= true;
1368 munmap_gather_failed
:
1371 reattach_vmas(mas_detach
);
1378 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1379 * @vms: The vma munmap struct
1380 * @vmi: The vma iterator
1381 * @vma: The first vm_area_struct to munmap
1382 * @start: The aligned start address to munmap
1383 * @end: The aligned end address to munmap
1384 * @uf: The userfaultfd list_head
1385 * @unlock: Unlock after the operation. Only unlocked on success
1387 static void init_vma_munmap(struct vma_munmap_struct
*vms
,
1388 struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
1389 unsigned long start
, unsigned long end
, struct list_head
*uf
,
1398 vms
->start
= vms
->end
= 0;
1400 vms
->unlock
= unlock
;
1403 vms
->nr_pages
= vms
->locked_vm
= vms
->nr_accounted
= 0;
1404 vms
->exec_vm
= vms
->stack_vm
= vms
->data_vm
= 0;
1405 vms
->unmap_start
= FIRST_USER_ADDRESS
;
1406 vms
->unmap_end
= USER_PGTABLES_CEILING
;
1407 vms
->clear_ptes
= false;
1411 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1412 * @vmi: The vma iterator
1413 * @vma: The starting vm_area_struct
1414 * @mm: The mm_struct
1415 * @start: The aligned start address to munmap.
1416 * @end: The aligned end address to munmap.
1417 * @uf: The userfaultfd list_head
1418 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
1421 * Return: 0 on success and drops the lock if so directed, error and leaves the
1422 * lock held otherwise.
1424 int do_vmi_align_munmap(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
1425 struct mm_struct
*mm
, unsigned long start
, unsigned long end
,
1426 struct list_head
*uf
, bool unlock
)
1428 struct maple_tree mt_detach
;
1429 MA_STATE(mas_detach
, &mt_detach
, 0, 0);
1430 mt_init_flags(&mt_detach
, vmi
->mas
.tree
->ma_flags
& MT_FLAGS_LOCK_MASK
);
1431 mt_on_stack(mt_detach
);
1432 struct vma_munmap_struct vms
;
1435 init_vma_munmap(&vms
, vmi
, vma
, start
, end
, uf
, unlock
);
1436 error
= vms_gather_munmap_vmas(&vms
, &mas_detach
);
1440 error
= vma_iter_clear_gfp(vmi
, start
, end
, GFP_KERNEL
);
1442 goto clear_tree_failed
;
1444 /* Point of no return */
1445 vms_complete_munmap_vmas(&vms
, &mas_detach
);
1449 reattach_vmas(&mas_detach
);
1456 * do_vmi_munmap() - munmap a given range.
1457 * @vmi: The vma iterator
1458 * @mm: The mm_struct
1459 * @start: The start address to munmap
1460 * @len: The length of the range to munmap
1461 * @uf: The userfaultfd list_head
1462 * @unlock: set to true if the user wants to drop the mmap_lock on success
1464 * This function takes a @mas that is either pointing to the previous VMA or set
1465 * to MA_START and sets it up to remove the mapping(s). The @len will be
1468 * Return: 0 on success and drops the lock if so directed, error and leaves the
1469 * lock held otherwise.
1471 int do_vmi_munmap(struct vma_iterator
*vmi
, struct mm_struct
*mm
,
1472 unsigned long start
, size_t len
, struct list_head
*uf
,
1476 struct vm_area_struct
*vma
;
1478 if ((offset_in_page(start
)) || start
> TASK_SIZE
|| len
> TASK_SIZE
-start
)
1481 end
= start
+ PAGE_ALIGN(len
);
1485 /* Find the first overlapping VMA */
1486 vma
= vma_find(vmi
, end
);
1489 mmap_write_unlock(mm
);
1493 return do_vmi_align_munmap(vmi
, vma
, mm
, start
, end
, uf
, unlock
);
1497 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1498 * context and anonymous VMA name within the range [start, end).
1500 * As a result, we might be able to merge the newly modified VMA range with an
1501 * adjacent VMA with identical properties.
1503 * If no merge is possible and the range does not span the entirety of the VMA,
1504 * we then need to split the VMA to accommodate the change.
1506 * The function returns either the merged VMA, the original VMA if a split was
1507 * required instead, or an error if the split failed.
1509 static struct vm_area_struct
*vma_modify(struct vma_merge_struct
*vmg
)
1511 struct vm_area_struct
*vma
= vmg
->vma
;
1512 struct vm_area_struct
*merged
;
1514 /* First, try to merge. */
1515 merged
= vma_merge_existing_range(vmg
);
1519 /* Split any preceding portion of the VMA. */
1520 if (vma
->vm_start
< vmg
->start
) {
1521 int err
= split_vma(vmg
->vmi
, vma
, vmg
->start
, 1);
1524 return ERR_PTR(err
);
1527 /* Split any trailing portion of the VMA. */
1528 if (vma
->vm_end
> vmg
->end
) {
1529 int err
= split_vma(vmg
->vmi
, vma
, vmg
->end
, 0);
1532 return ERR_PTR(err
);
1538 struct vm_area_struct
*vma_modify_flags(
1539 struct vma_iterator
*vmi
, struct vm_area_struct
*prev
,
1540 struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
,
1541 unsigned long new_flags
)
1543 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1545 vmg
.flags
= new_flags
;
1547 return vma_modify(&vmg
);
1550 struct vm_area_struct
1551 *vma_modify_flags_name(struct vma_iterator
*vmi
,
1552 struct vm_area_struct
*prev
,
1553 struct vm_area_struct
*vma
,
1554 unsigned long start
,
1556 unsigned long new_flags
,
1557 struct anon_vma_name
*new_name
)
1559 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1561 vmg
.flags
= new_flags
;
1562 vmg
.anon_name
= new_name
;
1564 return vma_modify(&vmg
);
1567 struct vm_area_struct
1568 *vma_modify_policy(struct vma_iterator
*vmi
,
1569 struct vm_area_struct
*prev
,
1570 struct vm_area_struct
*vma
,
1571 unsigned long start
, unsigned long end
,
1572 struct mempolicy
*new_pol
)
1574 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1576 vmg
.policy
= new_pol
;
1578 return vma_modify(&vmg
);
1581 struct vm_area_struct
1582 *vma_modify_flags_uffd(struct vma_iterator
*vmi
,
1583 struct vm_area_struct
*prev
,
1584 struct vm_area_struct
*vma
,
1585 unsigned long start
, unsigned long end
,
1586 unsigned long new_flags
,
1587 struct vm_userfaultfd_ctx new_ctx
)
1589 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1591 vmg
.flags
= new_flags
;
1592 vmg
.uffd_ctx
= new_ctx
;
1594 return vma_modify(&vmg
);
1598 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1599 * VMA with identical properties.
1601 struct vm_area_struct
*vma_merge_extend(struct vma_iterator
*vmi
,
1602 struct vm_area_struct
*vma
,
1603 unsigned long delta
)
1605 VMG_VMA_STATE(vmg
, vmi
, vma
, vma
, vma
->vm_end
, vma
->vm_end
+ delta
);
1607 vmg
.next
= vma_iter_next_rewind(vmi
, NULL
);
1608 vmg
.vma
= NULL
; /* We use the VMA to populate VMG fields only. */
1610 return vma_merge_new_range(&vmg
);
1613 void unlink_file_vma_batch_init(struct unlink_vma_file_batch
*vb
)
1618 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch
*vb
)
1620 struct address_space
*mapping
;
1623 mapping
= vb
->vmas
[0]->vm_file
->f_mapping
;
1624 i_mmap_lock_write(mapping
);
1625 for (i
= 0; i
< vb
->count
; i
++) {
1626 VM_WARN_ON_ONCE(vb
->vmas
[i
]->vm_file
->f_mapping
!= mapping
);
1627 __remove_shared_vm_struct(vb
->vmas
[i
], mapping
);
1629 i_mmap_unlock_write(mapping
);
1631 unlink_file_vma_batch_init(vb
);
1634 void unlink_file_vma_batch_add(struct unlink_vma_file_batch
*vb
,
1635 struct vm_area_struct
*vma
)
1637 if (vma
->vm_file
== NULL
)
1640 if ((vb
->count
> 0 && vb
->vmas
[0]->vm_file
!= vma
->vm_file
) ||
1641 vb
->count
== ARRAY_SIZE(vb
->vmas
))
1642 unlink_file_vma_batch_process(vb
);
1644 vb
->vmas
[vb
->count
] = vma
;
1648 void unlink_file_vma_batch_final(struct unlink_vma_file_batch
*vb
)
1651 unlink_file_vma_batch_process(vb
);
1655 * Unlink a file-based vm structure from its interval tree, to hide
1656 * vma from rmap and vmtruncate before freeing its page tables.
1658 void unlink_file_vma(struct vm_area_struct
*vma
)
1660 struct file
*file
= vma
->vm_file
;
1663 struct address_space
*mapping
= file
->f_mapping
;
1665 i_mmap_lock_write(mapping
);
1666 __remove_shared_vm_struct(vma
, mapping
);
1667 i_mmap_unlock_write(mapping
);
1671 void vma_link_file(struct vm_area_struct
*vma
)
1673 struct file
*file
= vma
->vm_file
;
1674 struct address_space
*mapping
;
1677 mapping
= file
->f_mapping
;
1678 i_mmap_lock_write(mapping
);
1679 __vma_link_file(vma
, mapping
);
1680 i_mmap_unlock_write(mapping
);
1684 int vma_link(struct mm_struct
*mm
, struct vm_area_struct
*vma
)
1686 VMA_ITERATOR(vmi
, mm
, 0);
1688 vma_iter_config(&vmi
, vma
->vm_start
, vma
->vm_end
);
1689 if (vma_iter_prealloc(&vmi
, vma
))
1692 vma_start_write(vma
);
1693 vma_iter_store(&vmi
, vma
);
1701 * Copy the vma structure to a new location in the same mm,
1702 * prior to moving page table entries, to effect an mremap move.
1704 struct vm_area_struct
*copy_vma(struct vm_area_struct
**vmap
,
1705 unsigned long addr
, unsigned long len
, pgoff_t pgoff
,
1706 bool *need_rmap_locks
)
1708 struct vm_area_struct
*vma
= *vmap
;
1709 unsigned long vma_start
= vma
->vm_start
;
1710 struct mm_struct
*mm
= vma
->vm_mm
;
1711 struct vm_area_struct
*new_vma
;
1712 bool faulted_in_anon_vma
= true;
1713 VMA_ITERATOR(vmi
, mm
, addr
);
1714 VMG_VMA_STATE(vmg
, &vmi
, NULL
, vma
, addr
, addr
+ len
);
1717 * If anonymous vma has not yet been faulted, update new pgoff
1718 * to match new location, to increase its chance of merging.
1720 if (unlikely(vma_is_anonymous(vma
) && !vma
->anon_vma
)) {
1721 pgoff
= addr
>> PAGE_SHIFT
;
1722 faulted_in_anon_vma
= false;
1725 new_vma
= find_vma_prev(mm
, addr
, &vmg
.prev
);
1726 if (new_vma
&& new_vma
->vm_start
< addr
+ len
)
1727 return NULL
; /* should never get here */
1729 vmg
.vma
= NULL
; /* New VMA range. */
1731 vmg
.next
= vma_iter_next_rewind(&vmi
, NULL
);
1732 new_vma
= vma_merge_new_range(&vmg
);
1736 * Source vma may have been merged into new_vma
1738 if (unlikely(vma_start
>= new_vma
->vm_start
&&
1739 vma_start
< new_vma
->vm_end
)) {
1741 * The only way we can get a vma_merge with
1742 * self during an mremap is if the vma hasn't
1743 * been faulted in yet and we were allowed to
1744 * reset the dst vma->vm_pgoff to the
1745 * destination address of the mremap to allow
1746 * the merge to happen. mremap must change the
1747 * vm_pgoff linearity between src and dst vmas
1748 * (in turn preventing a vma_merge) to be
1749 * safe. It is only safe to keep the vm_pgoff
1750 * linear if there are no pages mapped yet.
1752 VM_BUG_ON_VMA(faulted_in_anon_vma
, new_vma
);
1753 *vmap
= vma
= new_vma
;
1755 *need_rmap_locks
= (new_vma
->vm_pgoff
<= vma
->vm_pgoff
);
1757 new_vma
= vm_area_dup(vma
);
1760 vma_set_range(new_vma
, addr
, addr
+ len
, pgoff
);
1761 if (vma_dup_policy(vma
, new_vma
))
1763 if (anon_vma_clone(new_vma
, vma
))
1764 goto out_free_mempol
;
1765 if (new_vma
->vm_file
)
1766 get_file(new_vma
->vm_file
);
1767 if (new_vma
->vm_ops
&& new_vma
->vm_ops
->open
)
1768 new_vma
->vm_ops
->open(new_vma
);
1769 if (vma_link(mm
, new_vma
))
1771 *need_rmap_locks
= false;
1778 if (new_vma
->vm_file
)
1779 fput(new_vma
->vm_file
);
1781 unlink_anon_vmas(new_vma
);
1783 mpol_put(vma_policy(new_vma
));
1785 vm_area_free(new_vma
);
1791 * Rough compatibility check to quickly see if it's even worth looking
1792 * at sharing an anon_vma.
1794 * They need to have the same vm_file, and the flags can only differ
1795 * in things that mprotect may change.
1797 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1798 * we can merge the two vma's. For example, we refuse to merge a vma if
1799 * there is a vm_ops->close() function, because that indicates that the
1800 * driver is doing some kind of reference counting. But that doesn't
1801 * really matter for the anon_vma sharing case.
1803 static int anon_vma_compatible(struct vm_area_struct
*a
, struct vm_area_struct
*b
)
1805 return a
->vm_end
== b
->vm_start
&&
1806 mpol_equal(vma_policy(a
), vma_policy(b
)) &&
1807 a
->vm_file
== b
->vm_file
&&
1808 !((a
->vm_flags
^ b
->vm_flags
) & ~(VM_ACCESS_FLAGS
| VM_SOFTDIRTY
)) &&
1809 b
->vm_pgoff
== a
->vm_pgoff
+ ((b
->vm_start
- a
->vm_start
) >> PAGE_SHIFT
);
1813 * Do some basic sanity checking to see if we can re-use the anon_vma
1814 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1815 * the same as 'old', the other will be the new one that is trying
1816 * to share the anon_vma.
1818 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1819 * the anon_vma of 'old' is concurrently in the process of being set up
1820 * by another page fault trying to merge _that_. But that's ok: if it
1821 * is being set up, that automatically means that it will be a singleton
1822 * acceptable for merging, so we can do all of this optimistically. But
1823 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1825 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1826 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1827 * is to return an anon_vma that is "complex" due to having gone through
1830 * We also make sure that the two vma's are compatible (adjacent,
1831 * and with the same memory policies). That's all stable, even with just
1832 * a read lock on the mmap_lock.
1834 static struct anon_vma
*reusable_anon_vma(struct vm_area_struct
*old
,
1835 struct vm_area_struct
*a
,
1836 struct vm_area_struct
*b
)
1838 if (anon_vma_compatible(a
, b
)) {
1839 struct anon_vma
*anon_vma
= READ_ONCE(old
->anon_vma
);
1841 if (anon_vma
&& list_is_singular(&old
->anon_vma_chain
))
1848 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1849 * neighbouring vmas for a suitable anon_vma, before it goes off
1850 * to allocate a new anon_vma. It checks because a repetitive
1851 * sequence of mprotects and faults may otherwise lead to distinct
1852 * anon_vmas being allocated, preventing vma merge in subsequent
1855 struct anon_vma
*find_mergeable_anon_vma(struct vm_area_struct
*vma
)
1857 struct anon_vma
*anon_vma
= NULL
;
1858 struct vm_area_struct
*prev
, *next
;
1859 VMA_ITERATOR(vmi
, vma
->vm_mm
, vma
->vm_end
);
1861 /* Try next first. */
1862 next
= vma_iter_load(&vmi
);
1864 anon_vma
= reusable_anon_vma(next
, vma
, next
);
1869 prev
= vma_prev(&vmi
);
1870 VM_BUG_ON_VMA(prev
!= vma
, vma
);
1871 prev
= vma_prev(&vmi
);
1872 /* Try prev next. */
1874 anon_vma
= reusable_anon_vma(prev
, prev
, vma
);
1877 * We might reach here with anon_vma == NULL if we can't find
1878 * any reusable anon_vma.
1879 * There's no absolute need to look only at touching neighbours:
1880 * we could search further afield for "compatible" anon_vmas.
1881 * But it would probably just be a waste of time searching,
1882 * or lead to too many vmas hanging off the same anon_vma.
1883 * We're trying to allow mprotect remerging later on,
1884 * not trying to minimize memory used for anon_vmas.
1889 static bool vm_ops_needs_writenotify(const struct vm_operations_struct
*vm_ops
)
1891 return vm_ops
&& (vm_ops
->page_mkwrite
|| vm_ops
->pfn_mkwrite
);
1894 static bool vma_is_shared_writable(struct vm_area_struct
*vma
)
1896 return (vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) ==
1897 (VM_WRITE
| VM_SHARED
);
1900 static bool vma_fs_can_writeback(struct vm_area_struct
*vma
)
1902 /* No managed pages to writeback. */
1903 if (vma
->vm_flags
& VM_PFNMAP
)
1906 return vma
->vm_file
&& vma
->vm_file
->f_mapping
&&
1907 mapping_can_writeback(vma
->vm_file
->f_mapping
);
1911 * Does this VMA require the underlying folios to have their dirty state
1914 bool vma_needs_dirty_tracking(struct vm_area_struct
*vma
)
1916 /* Only shared, writable VMAs require dirty tracking. */
1917 if (!vma_is_shared_writable(vma
))
1920 /* Does the filesystem need to be notified? */
1921 if (vm_ops_needs_writenotify(vma
->vm_ops
))
1925 * Even if the filesystem doesn't indicate a need for writenotify, if it
1926 * can writeback, dirty tracking is still required.
1928 return vma_fs_can_writeback(vma
);
1932 * Some shared mappings will want the pages marked read-only
1933 * to track write events. If so, we'll downgrade vm_page_prot
1934 * to the private version (using protection_map[] without the
1937 bool vma_wants_writenotify(struct vm_area_struct
*vma
, pgprot_t vm_page_prot
)
1939 /* If it was private or non-writable, the write bit is already clear */
1940 if (!vma_is_shared_writable(vma
))
1943 /* The backer wishes to know when pages are first written to? */
1944 if (vm_ops_needs_writenotify(vma
->vm_ops
))
1947 /* The open routine did something to the protections that pgprot_modify
1948 * won't preserve? */
1949 if (pgprot_val(vm_page_prot
) !=
1950 pgprot_val(vm_pgprot_modify(vm_page_prot
, vma
->vm_flags
)))
1954 * Do we need to track softdirty? hugetlb does not support softdirty
1957 if (vma_soft_dirty_enabled(vma
) && !is_vm_hugetlb_page(vma
))
1960 /* Do we need write faults for uffd-wp tracking? */
1961 if (userfaultfd_wp(vma
))
1964 /* Can the mapping track the dirty pages? */
1965 return vma_fs_can_writeback(vma
);
1968 static DEFINE_MUTEX(mm_all_locks_mutex
);
1970 static void vm_lock_anon_vma(struct mm_struct
*mm
, struct anon_vma
*anon_vma
)
1972 if (!test_bit(0, (unsigned long *) &anon_vma
->root
->rb_root
.rb_root
.rb_node
)) {
1974 * The LSB of head.next can't change from under us
1975 * because we hold the mm_all_locks_mutex.
1977 down_write_nest_lock(&anon_vma
->root
->rwsem
, &mm
->mmap_lock
);
1979 * We can safely modify head.next after taking the
1980 * anon_vma->root->rwsem. If some other vma in this mm shares
1981 * the same anon_vma we won't take it again.
1983 * No need of atomic instructions here, head.next
1984 * can't change from under us thanks to the
1985 * anon_vma->root->rwsem.
1987 if (__test_and_set_bit(0, (unsigned long *)
1988 &anon_vma
->root
->rb_root
.rb_root
.rb_node
))
1993 static void vm_lock_mapping(struct mm_struct
*mm
, struct address_space
*mapping
)
1995 if (!test_bit(AS_MM_ALL_LOCKS
, &mapping
->flags
)) {
1997 * AS_MM_ALL_LOCKS can't change from under us because
1998 * we hold the mm_all_locks_mutex.
2000 * Operations on ->flags have to be atomic because
2001 * even if AS_MM_ALL_LOCKS is stable thanks to the
2002 * mm_all_locks_mutex, there may be other cpus
2003 * changing other bitflags in parallel to us.
2005 if (test_and_set_bit(AS_MM_ALL_LOCKS
, &mapping
->flags
))
2007 down_write_nest_lock(&mapping
->i_mmap_rwsem
, &mm
->mmap_lock
);
2012 * This operation locks against the VM for all pte/vma/mm related
2013 * operations that could ever happen on a certain mm. This includes
2014 * vmtruncate, try_to_unmap, and all page faults.
2016 * The caller must take the mmap_lock in write mode before calling
2017 * mm_take_all_locks(). The caller isn't allowed to release the
2018 * mmap_lock until mm_drop_all_locks() returns.
2020 * mmap_lock in write mode is required in order to block all operations
2021 * that could modify pagetables and free pages without need of
2022 * altering the vma layout. It's also needed in write mode to avoid new
2023 * anon_vmas to be associated with existing vmas.
2025 * A single task can't take more than one mm_take_all_locks() in a row
2026 * or it would deadlock.
2028 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2029 * mapping->flags avoid to take the same lock twice, if more than one
2030 * vma in this mm is backed by the same anon_vma or address_space.
2032 * We take locks in following order, accordingly to comment at beginning
2034 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2036 * - all vmas marked locked
2037 * - all i_mmap_rwsem locks;
2038 * - all anon_vma->rwseml
2040 * We can take all locks within these types randomly because the VM code
2041 * doesn't nest them and we protected from parallel mm_take_all_locks() by
2042 * mm_all_locks_mutex.
2044 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2045 * that may have to take thousand of locks.
2047 * mm_take_all_locks() can fail if it's interrupted by signals.
2049 int mm_take_all_locks(struct mm_struct
*mm
)
2051 struct vm_area_struct
*vma
;
2052 struct anon_vma_chain
*avc
;
2053 VMA_ITERATOR(vmi
, mm
, 0);
2055 mmap_assert_write_locked(mm
);
2057 mutex_lock(&mm_all_locks_mutex
);
2060 * vma_start_write() does not have a complement in mm_drop_all_locks()
2061 * because vma_start_write() is always asymmetrical; it marks a VMA as
2062 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2065 for_each_vma(vmi
, vma
) {
2066 if (signal_pending(current
))
2068 vma_start_write(vma
);
2071 vma_iter_init(&vmi
, mm
, 0);
2072 for_each_vma(vmi
, vma
) {
2073 if (signal_pending(current
))
2075 if (vma
->vm_file
&& vma
->vm_file
->f_mapping
&&
2076 is_vm_hugetlb_page(vma
))
2077 vm_lock_mapping(mm
, vma
->vm_file
->f_mapping
);
2080 vma_iter_init(&vmi
, mm
, 0);
2081 for_each_vma(vmi
, vma
) {
2082 if (signal_pending(current
))
2084 if (vma
->vm_file
&& vma
->vm_file
->f_mapping
&&
2085 !is_vm_hugetlb_page(vma
))
2086 vm_lock_mapping(mm
, vma
->vm_file
->f_mapping
);
2089 vma_iter_init(&vmi
, mm
, 0);
2090 for_each_vma(vmi
, vma
) {
2091 if (signal_pending(current
))
2094 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
2095 vm_lock_anon_vma(mm
, avc
->anon_vma
);
2101 mm_drop_all_locks(mm
);
2105 static void vm_unlock_anon_vma(struct anon_vma
*anon_vma
)
2107 if (test_bit(0, (unsigned long *) &anon_vma
->root
->rb_root
.rb_root
.rb_node
)) {
2109 * The LSB of head.next can't change to 0 from under
2110 * us because we hold the mm_all_locks_mutex.
2112 * We must however clear the bitflag before unlocking
2113 * the vma so the users using the anon_vma->rb_root will
2114 * never see our bitflag.
2116 * No need of atomic instructions here, head.next
2117 * can't change from under us until we release the
2118 * anon_vma->root->rwsem.
2120 if (!__test_and_clear_bit(0, (unsigned long *)
2121 &anon_vma
->root
->rb_root
.rb_root
.rb_node
))
2123 anon_vma_unlock_write(anon_vma
);
2127 static void vm_unlock_mapping(struct address_space
*mapping
)
2129 if (test_bit(AS_MM_ALL_LOCKS
, &mapping
->flags
)) {
2131 * AS_MM_ALL_LOCKS can't change to 0 from under us
2132 * because we hold the mm_all_locks_mutex.
2134 i_mmap_unlock_write(mapping
);
2135 if (!test_and_clear_bit(AS_MM_ALL_LOCKS
,
2142 * The mmap_lock cannot be released by the caller until
2143 * mm_drop_all_locks() returns.
2145 void mm_drop_all_locks(struct mm_struct
*mm
)
2147 struct vm_area_struct
*vma
;
2148 struct anon_vma_chain
*avc
;
2149 VMA_ITERATOR(vmi
, mm
, 0);
2151 mmap_assert_write_locked(mm
);
2152 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex
));
2154 for_each_vma(vmi
, vma
) {
2156 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
2157 vm_unlock_anon_vma(avc
->anon_vma
);
2158 if (vma
->vm_file
&& vma
->vm_file
->f_mapping
)
2159 vm_unlock_mapping(vma
->vm_file
->f_mapping
);
2162 mutex_unlock(&mm_all_locks_mutex
);
2166 * We account for memory if it's a private writeable mapping,
2167 * not hugepages and VM_NORESERVE wasn't set.
2169 static bool accountable_mapping(struct file
*file
, vm_flags_t vm_flags
)
2172 * hugetlb has its own accounting separate from the core VM
2173 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2175 if (file
&& is_file_hugepages(file
))
2178 return (vm_flags
& (VM_NORESERVE
| VM_SHARED
| VM_WRITE
)) == VM_WRITE
;
2182 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2184 * @vms: The vma unmap structure
2185 * @mas_detach: The maple state with the detached maple tree
2187 * Reattach any detached vmas, free up the maple tree used to track the vmas.
2188 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2189 * have been called), then a NULL is written over the vmas and the vmas are
2190 * removed (munmap() completed).
2192 static void vms_abort_munmap_vmas(struct vma_munmap_struct
*vms
,
2193 struct ma_state
*mas_detach
)
2195 struct ma_state
*mas
= &vms
->vmi
->mas
;
2200 if (vms
->clear_ptes
)
2201 return reattach_vmas(mas_detach
);
2204 * Aborting cannot just call the vm_ops open() because they are often
2205 * not symmetrical and state data has been lost. Resort to the old
2206 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2208 mas_set_range(mas
, vms
->start
, vms
->end
- 1);
2209 mas_store_gfp(mas
, NULL
, GFP_KERNEL
|__GFP_NOFAIL
);
2210 /* Clean up the insertion of the unfortunate gap */
2211 vms_complete_munmap_vmas(vms
, mas_detach
);
2215 * __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
2216 * unmapped once the map operation is completed, check limits, account mapping
2217 * and clean up any pre-existing VMAs.
2219 * @map: Mapping state.
2220 * @uf: Userfaultfd context list.
2222 * Returns: 0 on success, error code otherwise.
2224 static int __mmap_prepare(struct mmap_state
*map
, struct list_head
*uf
)
2227 struct vma_iterator
*vmi
= map
->vmi
;
2228 struct vma_munmap_struct
*vms
= &map
->vms
;
2230 /* Find the first overlapping VMA and initialise unmap state. */
2231 vms
->vma
= vma_find(vmi
, map
->end
);
2232 init_vma_munmap(vms
, vmi
, vms
->vma
, map
->addr
, map
->end
, uf
,
2233 /* unlock = */ false);
2235 /* OK, we have overlapping VMAs - prepare to unmap them. */
2237 mt_init_flags(&map
->mt_detach
,
2238 vmi
->mas
.tree
->ma_flags
& MT_FLAGS_LOCK_MASK
);
2239 mt_on_stack(map
->mt_detach
);
2240 mas_init(&map
->mas_detach
, &map
->mt_detach
, /* addr = */ 0);
2241 /* Prepare to unmap any existing mapping in the area */
2242 error
= vms_gather_munmap_vmas(vms
, &map
->mas_detach
);
2244 /* On error VMAs will already have been reattached. */
2249 map
->next
= vms
->next
;
2250 map
->prev
= vms
->prev
;
2252 map
->next
= vma_iter_next_rewind(vmi
, &map
->prev
);
2255 /* Check against address space limit. */
2256 if (!may_expand_vm(map
->mm
, map
->flags
, map
->pglen
- vms
->nr_pages
))
2259 /* Private writable mapping: check memory availability. */
2260 if (accountable_mapping(map
->file
, map
->flags
)) {
2261 map
->charged
= map
->pglen
;
2262 map
->charged
-= vms
->nr_accounted
;
2264 error
= security_vm_enough_memory_mm(map
->mm
, map
->charged
);
2269 vms
->nr_accounted
= 0;
2270 map
->flags
|= VM_ACCOUNT
;
2274 * Clear PTEs while the vma is still in the tree so that rmap
2275 * cannot race with the freeing later in the truncate scenario.
2276 * This is also needed for mmap_file(), which is why vm_ops
2277 * close function is called.
2279 vms_clean_up_area(vms
, &map
->mas_detach
);
2285 static int __mmap_new_file_vma(struct mmap_state
*map
,
2286 struct vm_area_struct
*vma
)
2288 struct vma_iterator
*vmi
= map
->vmi
;
2291 vma
->vm_file
= get_file(map
->file
);
2292 error
= mmap_file(vma
->vm_file
, vma
);
2295 vma
->vm_file
= NULL
;
2297 vma_iter_set(vmi
, vma
->vm_end
);
2298 /* Undo any partial mapping done by a device driver. */
2299 unmap_region(&vmi
->mas
, vma
, map
->prev
, map
->next
);
2304 /* Drivers cannot alter the address of the VMA. */
2305 WARN_ON_ONCE(map
->addr
!= vma
->vm_start
);
2307 * Drivers should not permit writability when previously it was
2310 VM_WARN_ON_ONCE(map
->flags
!= vma
->vm_flags
&&
2311 !(map
->flags
& VM_MAYWRITE
) &&
2312 (vma
->vm_flags
& VM_MAYWRITE
));
2314 /* If the flags change (and are mergeable), let's retry later. */
2315 map
->retry_merge
= vma
->vm_flags
!= map
->flags
&& !(vma
->vm_flags
& VM_SPECIAL
);
2316 map
->flags
= vma
->vm_flags
;
2322 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2325 * @map: Mapping state.
2326 * @vmap: Output pointer for the new VMA.
2328 * Returns: Zero on success, or an error.
2330 static int __mmap_new_vma(struct mmap_state
*map
, struct vm_area_struct
**vmap
)
2332 struct vma_iterator
*vmi
= map
->vmi
;
2334 struct vm_area_struct
*vma
;
2337 * Determine the object being mapped and call the appropriate
2338 * specific mapper. the address has already been validated, but
2339 * not unmapped, but the maps are removed from the list.
2341 vma
= vm_area_alloc(map
->mm
);
2345 vma_iter_config(vmi
, map
->addr
, map
->end
);
2346 vma_set_range(vma
, map
->addr
, map
->end
, map
->pgoff
);
2347 vm_flags_init(vma
, map
->flags
);
2348 vma
->vm_page_prot
= vm_get_page_prot(map
->flags
);
2350 if (vma_iter_prealloc(vmi
, vma
)) {
2356 error
= __mmap_new_file_vma(map
, vma
);
2357 else if (map
->flags
& VM_SHARED
)
2358 error
= shmem_zero_setup(vma
);
2360 vma_set_anonymous(vma
);
2365 #ifdef CONFIG_SPARC64
2366 /* TODO: Fix SPARC ADI! */
2367 WARN_ON_ONCE(!arch_validate_flags(map
->flags
));
2370 /* Lock the VMA since it is modified after insertion into VMA tree */
2371 vma_start_write(vma
);
2372 vma_iter_store(vmi
, vma
);
2373 map
->mm
->map_count
++;
2377 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2378 * call covers the non-merge case.
2380 khugepaged_enter_vma(vma
, map
->flags
);
2393 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2394 * statistics, handle locking and finalise the VMA.
2396 * @map: Mapping state.
2397 * @vma: Merged or newly allocated VMA for the mmap()'d region.
2399 static void __mmap_complete(struct mmap_state
*map
, struct vm_area_struct
*vma
)
2401 struct mm_struct
*mm
= map
->mm
;
2402 unsigned long vm_flags
= vma
->vm_flags
;
2404 perf_event_mmap(vma
);
2406 /* Unmap any existing mapping in the area. */
2407 vms_complete_munmap_vmas(&map
->vms
, &map
->mas_detach
);
2409 vm_stat_account(mm
, vma
->vm_flags
, map
->pglen
);
2410 if (vm_flags
& VM_LOCKED
) {
2411 if ((vm_flags
& VM_SPECIAL
) || vma_is_dax(vma
) ||
2412 is_vm_hugetlb_page(vma
) ||
2413 vma
== get_gate_vma(mm
))
2414 vm_flags_clear(vma
, VM_LOCKED_MASK
);
2416 mm
->locked_vm
+= map
->pglen
;
2423 * New (or expanded) vma always get soft dirty status.
2424 * Otherwise user-space soft-dirty page tracker won't
2425 * be able to distinguish situation when vma area unmapped,
2426 * then new mapped in-place (which must be aimed as
2427 * a completely new data area).
2429 vm_flags_set(vma
, VM_SOFTDIRTY
);
2431 vma_set_page_prot(vma
);
2434 static unsigned long __mmap_region(struct file
*file
, unsigned long addr
,
2435 unsigned long len
, vm_flags_t vm_flags
, unsigned long pgoff
,
2436 struct list_head
*uf
)
2438 struct mm_struct
*mm
= current
->mm
;
2439 struct vm_area_struct
*vma
= NULL
;
2441 VMA_ITERATOR(vmi
, mm
, addr
);
2442 MMAP_STATE(map
, mm
, &vmi
, addr
, len
, pgoff
, vm_flags
, file
);
2444 error
= __mmap_prepare(&map
, uf
);
2448 /* Attempt to merge with adjacent VMAs... */
2449 if (map
.prev
|| map
.next
) {
2450 VMG_MMAP_STATE(vmg
, &map
, /* vma = */ NULL
);
2452 vma
= vma_merge_new_range(&vmg
);
2455 /* ...but if we can't, allocate a new VMA. */
2457 error
= __mmap_new_vma(&map
, &vma
);
2462 /* If flags changed, we might be able to merge, so try again. */
2463 if (map
.retry_merge
) {
2464 struct vm_area_struct
*merged
;
2465 VMG_MMAP_STATE(vmg
, &map
, vma
);
2467 vma_iter_config(map
.vmi
, map
.addr
, map
.end
);
2468 merged
= vma_merge_existing_range(&vmg
);
2473 __mmap_complete(&map
, vma
);
2477 /* Accounting was done by __mmap_prepare(). */
2480 vm_unacct_memory(map
.charged
);
2482 vms_abort_munmap_vmas(&map
.vms
, &map
.mas_detach
);
2487 * mmap_region() - Actually perform the userland mapping of a VMA into
2488 * current->mm with known, aligned and overflow-checked @addr and @len, and
2489 * correctly determined VMA flags @vm_flags and page offset @pgoff.
2491 * This is an internal memory management function, and should not be used
2494 * The caller must write-lock current->mm->mmap_lock.
2496 * @file: If a file-backed mapping, a pointer to the struct file describing the
2497 * file to be mapped, otherwise NULL.
2498 * @addr: The page-aligned address at which to perform the mapping.
2499 * @len: The page-aligned, non-zero, length of the mapping.
2500 * @vm_flags: The VMA flags which should be applied to the mapping.
2501 * @pgoff: If @file is specified, the page offset into the file, if not then
2502 * the virtual page offset in memory of the anonymous mapping.
2503 * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
2506 * Returns: Either an error, or the address at which the requested mapping has
2509 unsigned long mmap_region(struct file
*file
, unsigned long addr
,
2510 unsigned long len
, vm_flags_t vm_flags
, unsigned long pgoff
,
2511 struct list_head
*uf
)
2514 bool writable_file_mapping
= false;
2516 mmap_assert_write_locked(current
->mm
);
2518 /* Check to see if MDWE is applicable. */
2519 if (map_deny_write_exec(vm_flags
, vm_flags
))
2522 /* Allow architectures to sanity-check the vm_flags. */
2523 if (!arch_validate_flags(vm_flags
))
2526 /* Map writable and ensure this isn't a sealed memfd. */
2527 if (file
&& is_shared_maywrite(vm_flags
)) {
2528 int error
= mapping_map_writable(file
->f_mapping
);
2532 writable_file_mapping
= true;
2535 ret
= __mmap_region(file
, addr
, len
, vm_flags
, pgoff
, uf
);
2537 /* Clear our write mapping regardless of error. */
2538 if (writable_file_mapping
)
2539 mapping_unmap_writable(file
->f_mapping
);
2541 validate_mm(current
->mm
);
2546 * do_brk_flags() - Increase the brk vma if the flags match.
2547 * @vmi: The vma iterator
2548 * @addr: The start address
2549 * @len: The length of the increase
2551 * @flags: The VMA Flags
2553 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
2554 * do not match then create a new anonymous VMA. Eventually we may be able to
2555 * do some brk-specific accounting here.
2557 int do_brk_flags(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
2558 unsigned long addr
, unsigned long len
, unsigned long flags
)
2560 struct mm_struct
*mm
= current
->mm
;
2563 * Check against address space limits by the changed size
2564 * Note: This happens *after* clearing old mappings in some code paths.
2566 flags
|= VM_DATA_DEFAULT_FLAGS
| VM_ACCOUNT
| mm
->def_flags
;
2567 if (!may_expand_vm(mm
, flags
, len
>> PAGE_SHIFT
))
2570 if (mm
->map_count
> sysctl_max_map_count
)
2573 if (security_vm_enough_memory_mm(mm
, len
>> PAGE_SHIFT
))
2577 * Expand the existing vma if possible; Note that singular lists do not
2578 * occur after forking, so the expand will only happen on new VMAs.
2580 if (vma
&& vma
->vm_end
== addr
) {
2581 VMG_STATE(vmg
, mm
, vmi
, addr
, addr
+ len
, flags
, PHYS_PFN(addr
));
2584 /* vmi is positioned at prev, which this mode expects. */
2585 vmg
.merge_flags
= VMG_FLAG_JUST_EXPAND
;
2587 if (vma_merge_new_range(&vmg
))
2589 else if (vmg_nomem(&vmg
))
2594 vma_iter_next_range(vmi
);
2595 /* create a vma struct for an anonymous mapping */
2596 vma
= vm_area_alloc(mm
);
2600 vma_set_anonymous(vma
);
2601 vma_set_range(vma
, addr
, addr
+ len
, addr
>> PAGE_SHIFT
);
2602 vm_flags_init(vma
, flags
);
2603 vma
->vm_page_prot
= vm_get_page_prot(flags
);
2604 vma_start_write(vma
);
2605 if (vma_iter_store_gfp(vmi
, vma
, GFP_KERNEL
))
2606 goto mas_store_fail
;
2612 perf_event_mmap(vma
);
2613 mm
->total_vm
+= len
>> PAGE_SHIFT
;
2614 mm
->data_vm
+= len
>> PAGE_SHIFT
;
2615 if (flags
& VM_LOCKED
)
2616 mm
->locked_vm
+= (len
>> PAGE_SHIFT
);
2617 vm_flags_set(vma
, VM_SOFTDIRTY
);
2623 vm_unacct_memory(len
>> PAGE_SHIFT
);
2628 * unmapped_area() - Find an area between the low_limit and the high_limit with
2629 * the correct alignment and offset, all from @info. Note: current->mm is used
2632 * @info: The unmapped area information including the range [low_limit -
2633 * high_limit), the alignment offset and mask.
2635 * Return: A memory address or -ENOMEM.
2637 unsigned long unmapped_area(struct vm_unmapped_area_info
*info
)
2639 unsigned long length
, gap
;
2640 unsigned long low_limit
, high_limit
;
2641 struct vm_area_struct
*tmp
;
2642 VMA_ITERATOR(vmi
, current
->mm
, 0);
2644 /* Adjust search length to account for worst case alignment overhead */
2645 length
= info
->length
+ info
->align_mask
+ info
->start_gap
;
2646 if (length
< info
->length
)
2649 low_limit
= info
->low_limit
;
2650 if (low_limit
< mmap_min_addr
)
2651 low_limit
= mmap_min_addr
;
2652 high_limit
= info
->high_limit
;
2654 if (vma_iter_area_lowest(&vmi
, low_limit
, high_limit
, length
))
2658 * Adjust for the gap first so it doesn't interfere with the
2659 * later alignment. The first step is the minimum needed to
2660 * fulill the start gap, the next steps is the minimum to align
2661 * that. It is the minimum needed to fulill both.
2663 gap
= vma_iter_addr(&vmi
) + info
->start_gap
;
2664 gap
+= (info
->align_offset
- gap
) & info
->align_mask
;
2665 tmp
= vma_next(&vmi
);
2666 if (tmp
&& (tmp
->vm_flags
& VM_STARTGAP_FLAGS
)) { /* Avoid prev check if possible */
2667 if (vm_start_gap(tmp
) < gap
+ length
- 1) {
2668 low_limit
= tmp
->vm_end
;
2669 vma_iter_reset(&vmi
);
2673 tmp
= vma_prev(&vmi
);
2674 if (tmp
&& vm_end_gap(tmp
) > gap
) {
2675 low_limit
= vm_end_gap(tmp
);
2676 vma_iter_reset(&vmi
);
2685 * unmapped_area_topdown() - Find an area between the low_limit and the
2686 * high_limit with the correct alignment and offset at the highest available
2687 * address, all from @info. Note: current->mm is used for the search.
2689 * @info: The unmapped area information including the range [low_limit -
2690 * high_limit), the alignment offset and mask.
2692 * Return: A memory address or -ENOMEM.
2694 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info
*info
)
2696 unsigned long length
, gap
, gap_end
;
2697 unsigned long low_limit
, high_limit
;
2698 struct vm_area_struct
*tmp
;
2699 VMA_ITERATOR(vmi
, current
->mm
, 0);
2701 /* Adjust search length to account for worst case alignment overhead */
2702 length
= info
->length
+ info
->align_mask
+ info
->start_gap
;
2703 if (length
< info
->length
)
2706 low_limit
= info
->low_limit
;
2707 if (low_limit
< mmap_min_addr
)
2708 low_limit
= mmap_min_addr
;
2709 high_limit
= info
->high_limit
;
2711 if (vma_iter_area_highest(&vmi
, low_limit
, high_limit
, length
))
2714 gap
= vma_iter_end(&vmi
) - info
->length
;
2715 gap
-= (gap
- info
->align_offset
) & info
->align_mask
;
2716 gap_end
= vma_iter_end(&vmi
);
2717 tmp
= vma_next(&vmi
);
2718 if (tmp
&& (tmp
->vm_flags
& VM_STARTGAP_FLAGS
)) { /* Avoid prev check if possible */
2719 if (vm_start_gap(tmp
) < gap_end
) {
2720 high_limit
= vm_start_gap(tmp
);
2721 vma_iter_reset(&vmi
);
2725 tmp
= vma_prev(&vmi
);
2726 if (tmp
&& vm_end_gap(tmp
) > gap
) {
2727 high_limit
= tmp
->vm_start
;
2728 vma_iter_reset(&vmi
);
2737 * Verify that the stack growth is acceptable and
2738 * update accounting. This is shared with both the
2739 * grow-up and grow-down cases.
2741 static int acct_stack_growth(struct vm_area_struct
*vma
,
2742 unsigned long size
, unsigned long grow
)
2744 struct mm_struct
*mm
= vma
->vm_mm
;
2745 unsigned long new_start
;
2747 /* address space limit tests */
2748 if (!may_expand_vm(mm
, vma
->vm_flags
, grow
))
2751 /* Stack limit test */
2752 if (size
> rlimit(RLIMIT_STACK
))
2755 /* mlock limit tests */
2756 if (!mlock_future_ok(mm
, vma
->vm_flags
, grow
<< PAGE_SHIFT
))
2759 /* Check to ensure the stack will not grow into a hugetlb-only region */
2760 new_start
= (vma
->vm_flags
& VM_GROWSUP
) ? vma
->vm_start
:
2762 if (is_hugepage_only_range(vma
->vm_mm
, new_start
, size
))
2766 * Overcommit.. This must be the final test, as it will
2767 * update security statistics.
2769 if (security_vm_enough_memory_mm(mm
, grow
))
2775 #if defined(CONFIG_STACK_GROWSUP)
2777 * PA-RISC uses this for its stack.
2778 * vma is the last one with address > vma->vm_end. Have to extend vma.
2780 int expand_upwards(struct vm_area_struct
*vma
, unsigned long address
)
2782 struct mm_struct
*mm
= vma
->vm_mm
;
2783 struct vm_area_struct
*next
;
2784 unsigned long gap_addr
;
2786 VMA_ITERATOR(vmi
, mm
, vma
->vm_start
);
2788 if (!(vma
->vm_flags
& VM_GROWSUP
))
2791 mmap_assert_write_locked(mm
);
2793 /* Guard against exceeding limits of the address space. */
2794 address
&= PAGE_MASK
;
2795 if (address
>= (TASK_SIZE
& PAGE_MASK
))
2797 address
+= PAGE_SIZE
;
2799 /* Enforce stack_guard_gap */
2800 gap_addr
= address
+ stack_guard_gap
;
2802 /* Guard against overflow */
2803 if (gap_addr
< address
|| gap_addr
> TASK_SIZE
)
2804 gap_addr
= TASK_SIZE
;
2806 next
= find_vma_intersection(mm
, vma
->vm_end
, gap_addr
);
2807 if (next
&& vma_is_accessible(next
)) {
2808 if (!(next
->vm_flags
& VM_GROWSUP
))
2810 /* Check that both stack segments have the same anon_vma? */
2814 vma_iter_prev_range_limit(&vmi
, address
);
2816 vma_iter_config(&vmi
, vma
->vm_start
, address
);
2817 if (vma_iter_prealloc(&vmi
, vma
))
2820 /* We must make sure the anon_vma is allocated. */
2821 if (unlikely(anon_vma_prepare(vma
))) {
2822 vma_iter_free(&vmi
);
2826 /* Lock the VMA before expanding to prevent concurrent page faults */
2827 vma_start_write(vma
);
2828 /* We update the anon VMA tree. */
2829 anon_vma_lock_write(vma
->anon_vma
);
2831 /* Somebody else might have raced and expanded it already */
2832 if (address
> vma
->vm_end
) {
2833 unsigned long size
, grow
;
2835 size
= address
- vma
->vm_start
;
2836 grow
= (address
- vma
->vm_end
) >> PAGE_SHIFT
;
2839 if (vma
->vm_pgoff
+ (size
>> PAGE_SHIFT
) >= vma
->vm_pgoff
) {
2840 error
= acct_stack_growth(vma
, size
, grow
);
2842 if (vma
->vm_flags
& VM_LOCKED
)
2843 mm
->locked_vm
+= grow
;
2844 vm_stat_account(mm
, vma
->vm_flags
, grow
);
2845 anon_vma_interval_tree_pre_update_vma(vma
);
2846 vma
->vm_end
= address
;
2847 /* Overwrite old entry in mtree. */
2848 vma_iter_store(&vmi
, vma
);
2849 anon_vma_interval_tree_post_update_vma(vma
);
2851 perf_event_mmap(vma
);
2855 anon_vma_unlock_write(vma
->anon_vma
);
2856 vma_iter_free(&vmi
);
2860 #endif /* CONFIG_STACK_GROWSUP */
2863 * vma is the first one with address < vma->vm_start. Have to extend vma.
2864 * mmap_lock held for writing.
2866 int expand_downwards(struct vm_area_struct
*vma
, unsigned long address
)
2868 struct mm_struct
*mm
= vma
->vm_mm
;
2869 struct vm_area_struct
*prev
;
2871 VMA_ITERATOR(vmi
, mm
, vma
->vm_start
);
2873 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
2876 mmap_assert_write_locked(mm
);
2878 address
&= PAGE_MASK
;
2879 if (address
< mmap_min_addr
|| address
< FIRST_USER_ADDRESS
)
2882 /* Enforce stack_guard_gap */
2883 prev
= vma_prev(&vmi
);
2884 /* Check that both stack segments have the same anon_vma? */
2886 if (!(prev
->vm_flags
& VM_GROWSDOWN
) &&
2887 vma_is_accessible(prev
) &&
2888 (address
- prev
->vm_end
< stack_guard_gap
))
2893 vma_iter_next_range_limit(&vmi
, vma
->vm_start
);
2895 vma_iter_config(&vmi
, address
, vma
->vm_end
);
2896 if (vma_iter_prealloc(&vmi
, vma
))
2899 /* We must make sure the anon_vma is allocated. */
2900 if (unlikely(anon_vma_prepare(vma
))) {
2901 vma_iter_free(&vmi
);
2905 /* Lock the VMA before expanding to prevent concurrent page faults */
2906 vma_start_write(vma
);
2907 /* We update the anon VMA tree. */
2908 anon_vma_lock_write(vma
->anon_vma
);
2910 /* Somebody else might have raced and expanded it already */
2911 if (address
< vma
->vm_start
) {
2912 unsigned long size
, grow
;
2914 size
= vma
->vm_end
- address
;
2915 grow
= (vma
->vm_start
- address
) >> PAGE_SHIFT
;
2918 if (grow
<= vma
->vm_pgoff
) {
2919 error
= acct_stack_growth(vma
, size
, grow
);
2921 if (vma
->vm_flags
& VM_LOCKED
)
2922 mm
->locked_vm
+= grow
;
2923 vm_stat_account(mm
, vma
->vm_flags
, grow
);
2924 anon_vma_interval_tree_pre_update_vma(vma
);
2925 vma
->vm_start
= address
;
2926 vma
->vm_pgoff
-= grow
;
2927 /* Overwrite old entry in mtree. */
2928 vma_iter_store(&vmi
, vma
);
2929 anon_vma_interval_tree_post_update_vma(vma
);
2931 perf_event_mmap(vma
);
2935 anon_vma_unlock_write(vma
->anon_vma
);
2936 vma_iter_free(&vmi
);
2941 int __vm_munmap(unsigned long start
, size_t len
, bool unlock
)
2944 struct mm_struct
*mm
= current
->mm
;
2946 VMA_ITERATOR(vmi
, mm
, start
);
2948 if (mmap_write_lock_killable(mm
))
2951 ret
= do_vmi_munmap(&vmi
, mm
, start
, len
, &uf
, unlock
);
2953 mmap_write_unlock(mm
);
2955 userfaultfd_unmap_complete(mm
, &uf
);