1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * VMA-specific functions.
7 #include "vma_internal.h"
12 struct vma_iterator
*vmi
;
21 unsigned long charged
;
24 struct vm_area_struct
*prev
;
25 struct vm_area_struct
*next
;
27 /* Unmapping state. */
28 struct vma_munmap_struct vms
;
29 struct ma_state mas_detach
;
30 struct maple_tree mt_detach
;
33 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \
34 struct mmap_state name = { \
38 .end = (addr_) + (len_), \
40 .pglen = PHYS_PFN(len_), \
45 #define VMG_MMAP_STATE(name, map_, vma_) \
46 struct vma_merge_struct name = { \
49 .start = (map_)->addr, \
51 .flags = (map_)->flags, \
52 .pgoff = (map_)->pgoff, \
53 .file = (map_)->file, \
54 .prev = (map_)->prev, \
56 .next = (vma_) ? NULL : (map_)->next, \
57 .state = VMA_MERGE_START, \
58 .merge_flags = VMG_FLAG_DEFAULT, \
61 static inline bool is_mergeable_vma(struct vma_merge_struct
*vmg
, bool merge_next
)
63 struct vm_area_struct
*vma
= merge_next
? vmg
->next
: vmg
->prev
;
65 if (!mpol_equal(vmg
->policy
, vma_policy(vma
)))
68 * VM_SOFTDIRTY should not prevent from VMA merging, if we
69 * match the flags but dirty bit -- the caller should mark
70 * merged VMA as dirty. If dirty bit won't be excluded from
71 * comparison, we increase pressure on the memory system forcing
72 * the kernel to generate new VMAs when old one could be
75 if ((vma
->vm_flags
^ vmg
->flags
) & ~VM_SOFTDIRTY
)
77 if (vma
->vm_file
!= vmg
->file
)
79 if (!is_mergeable_vm_userfaultfd_ctx(vma
, vmg
->uffd_ctx
))
81 if (!anon_vma_name_eq(anon_vma_name(vma
), vmg
->anon_name
))
86 static inline bool is_mergeable_anon_vma(struct anon_vma
*anon_vma1
,
87 struct anon_vma
*anon_vma2
, struct vm_area_struct
*vma
)
90 * The list_is_singular() test is to avoid merging VMA cloned from
91 * parents. This can improve scalability caused by anon_vma lock.
93 if ((!anon_vma1
|| !anon_vma2
) && (!vma
||
94 list_is_singular(&vma
->anon_vma_chain
)))
96 return anon_vma1
== anon_vma2
;
99 /* Are the anon_vma's belonging to each VMA compatible with one another? */
100 static inline bool are_anon_vmas_compatible(struct vm_area_struct
*vma1
,
101 struct vm_area_struct
*vma2
)
103 return is_mergeable_anon_vma(vma1
->anon_vma
, vma2
->anon_vma
, NULL
);
107 * init_multi_vma_prep() - Initializer for struct vma_prepare
108 * @vp: The vma_prepare struct
109 * @vma: The vma that will be altered once locked
110 * @next: The next vma if it is to be adjusted
111 * @remove: The first vma to be removed
112 * @remove2: The second vma to be removed
114 static void init_multi_vma_prep(struct vma_prepare
*vp
,
115 struct vm_area_struct
*vma
,
116 struct vm_area_struct
*next
,
117 struct vm_area_struct
*remove
,
118 struct vm_area_struct
*remove2
)
120 memset(vp
, 0, sizeof(struct vma_prepare
));
122 vp
->anon_vma
= vma
->anon_vma
;
124 vp
->remove2
= remove2
;
126 if (!vp
->anon_vma
&& next
)
127 vp
->anon_vma
= next
->anon_vma
;
129 vp
->file
= vma
->vm_file
;
131 vp
->mapping
= vma
->vm_file
->f_mapping
;
136 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
137 * in front of (at a lower virtual address and file offset than) the vma.
139 * We cannot merge two vmas if they have differently assigned (non-NULL)
140 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
142 * We don't check here for the merged mmap wrapping around the end of pagecache
143 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
144 * wrap, nor mmaps which cover the final page at index -1UL.
146 * We assume the vma may be removed as part of the merge.
148 static bool can_vma_merge_before(struct vma_merge_struct
*vmg
)
150 pgoff_t pglen
= PHYS_PFN(vmg
->end
- vmg
->start
);
152 if (is_mergeable_vma(vmg
, /* merge_next = */ true) &&
153 is_mergeable_anon_vma(vmg
->anon_vma
, vmg
->next
->anon_vma
, vmg
->next
)) {
154 if (vmg
->next
->vm_pgoff
== vmg
->pgoff
+ pglen
)
162 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
163 * beyond (at a higher virtual address and file offset than) the vma.
165 * We cannot merge two vmas if they have differently assigned (non-NULL)
166 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
168 * We assume that vma is not removed as part of the merge.
170 static bool can_vma_merge_after(struct vma_merge_struct
*vmg
)
172 if (is_mergeable_vma(vmg
, /* merge_next = */ false) &&
173 is_mergeable_anon_vma(vmg
->anon_vma
, vmg
->prev
->anon_vma
, vmg
->prev
)) {
174 if (vmg
->prev
->vm_pgoff
+ vma_pages(vmg
->prev
) == vmg
->pgoff
)
180 static void __vma_link_file(struct vm_area_struct
*vma
,
181 struct address_space
*mapping
)
183 if (vma_is_shared_maywrite(vma
))
184 mapping_allow_writable(mapping
);
186 flush_dcache_mmap_lock(mapping
);
187 vma_interval_tree_insert(vma
, &mapping
->i_mmap
);
188 flush_dcache_mmap_unlock(mapping
);
192 * Requires inode->i_mapping->i_mmap_rwsem
194 static void __remove_shared_vm_struct(struct vm_area_struct
*vma
,
195 struct address_space
*mapping
)
197 if (vma_is_shared_maywrite(vma
))
198 mapping_unmap_writable(mapping
);
200 flush_dcache_mmap_lock(mapping
);
201 vma_interval_tree_remove(vma
, &mapping
->i_mmap
);
202 flush_dcache_mmap_unlock(mapping
);
206 * vma_prepare() - Helper function for handling locking VMAs prior to altering
207 * @vp: The initialized vma_prepare struct
209 static void vma_prepare(struct vma_prepare
*vp
)
212 uprobe_munmap(vp
->vma
, vp
->vma
->vm_start
, vp
->vma
->vm_end
);
215 uprobe_munmap(vp
->adj_next
, vp
->adj_next
->vm_start
,
216 vp
->adj_next
->vm_end
);
218 i_mmap_lock_write(vp
->mapping
);
219 if (vp
->insert
&& vp
->insert
->vm_file
) {
221 * Put into interval tree now, so instantiated pages
222 * are visible to arm/parisc __flush_dcache_page
223 * throughout; but we cannot insert into address
224 * space until vma start or end is updated.
226 __vma_link_file(vp
->insert
,
227 vp
->insert
->vm_file
->f_mapping
);
232 anon_vma_lock_write(vp
->anon_vma
);
233 anon_vma_interval_tree_pre_update_vma(vp
->vma
);
235 anon_vma_interval_tree_pre_update_vma(vp
->adj_next
);
239 flush_dcache_mmap_lock(vp
->mapping
);
240 vma_interval_tree_remove(vp
->vma
, &vp
->mapping
->i_mmap
);
242 vma_interval_tree_remove(vp
->adj_next
,
243 &vp
->mapping
->i_mmap
);
249 * vma_complete- Helper function for handling the unlocking after altering VMAs,
250 * or for inserting a VMA.
252 * @vp: The vma_prepare struct
253 * @vmi: The vma iterator
256 static void vma_complete(struct vma_prepare
*vp
, struct vma_iterator
*vmi
,
257 struct mm_struct
*mm
)
261 vma_interval_tree_insert(vp
->adj_next
,
262 &vp
->mapping
->i_mmap
);
263 vma_interval_tree_insert(vp
->vma
, &vp
->mapping
->i_mmap
);
264 flush_dcache_mmap_unlock(vp
->mapping
);
267 if (vp
->remove
&& vp
->file
) {
268 __remove_shared_vm_struct(vp
->remove
, vp
->mapping
);
270 __remove_shared_vm_struct(vp
->remove2
, vp
->mapping
);
271 } else if (vp
->insert
) {
273 * split_vma has split insert from vma, and needs
274 * us to insert it before dropping the locks
275 * (it may either follow vma or precede it).
277 vma_iter_store(vmi
, vp
->insert
);
282 anon_vma_interval_tree_post_update_vma(vp
->vma
);
284 anon_vma_interval_tree_post_update_vma(vp
->adj_next
);
285 anon_vma_unlock_write(vp
->anon_vma
);
289 i_mmap_unlock_write(vp
->mapping
);
290 uprobe_mmap(vp
->vma
);
293 uprobe_mmap(vp
->adj_next
);
298 vma_mark_detached(vp
->remove
, true);
300 uprobe_munmap(vp
->remove
, vp
->remove
->vm_start
,
304 if (vp
->remove
->anon_vma
)
305 anon_vma_merge(vp
->vma
, vp
->remove
);
307 mpol_put(vma_policy(vp
->remove
));
309 WARN_ON_ONCE(vp
->vma
->vm_end
< vp
->remove
->vm_end
);
310 vm_area_free(vp
->remove
);
313 * In mprotect's case 6 (see comments on vma_merge),
314 * we are removing both mid and next vmas
317 vp
->remove
= vp
->remove2
;
322 if (vp
->insert
&& vp
->file
)
323 uprobe_mmap(vp
->insert
);
327 * init_vma_prep() - Initializer wrapper for vma_prepare struct
328 * @vp: The vma_prepare struct
329 * @vma: The vma that will be altered once locked
331 static void init_vma_prep(struct vma_prepare
*vp
, struct vm_area_struct
*vma
)
333 init_multi_vma_prep(vp
, vma
, NULL
, NULL
, NULL
);
337 * Can the proposed VMA be merged with the left (previous) VMA taking into
338 * account the start position of the proposed range.
340 static bool can_vma_merge_left(struct vma_merge_struct
*vmg
)
343 return vmg
->prev
&& vmg
->prev
->vm_end
== vmg
->start
&&
344 can_vma_merge_after(vmg
);
348 * Can the proposed VMA be merged with the right (next) VMA taking into
349 * account the end position of the proposed range.
351 * In addition, if we can merge with the left VMA, ensure that left and right
352 * anon_vma's are also compatible.
354 static bool can_vma_merge_right(struct vma_merge_struct
*vmg
,
357 if (!vmg
->next
|| vmg
->end
!= vmg
->next
->vm_start
||
358 !can_vma_merge_before(vmg
))
365 * If we can merge with prev (left) and next (right), indicating that
366 * each VMA's anon_vma is compatible with the proposed anon_vma, this
367 * does not mean prev and next are compatible with EACH OTHER.
369 * We therefore check this in addition to mergeability to either side.
371 return are_anon_vmas_compatible(vmg
->prev
, vmg
->next
);
375 * Close a vm structure and free it.
377 void remove_vma(struct vm_area_struct
*vma
, bool unreachable
)
383 mpol_put(vma_policy(vma
));
391 * Get rid of page table information in the indicated region.
393 * Called with the mm semaphore held.
395 void unmap_region(struct ma_state
*mas
, struct vm_area_struct
*vma
,
396 struct vm_area_struct
*prev
, struct vm_area_struct
*next
)
398 struct mm_struct
*mm
= vma
->vm_mm
;
399 struct mmu_gather tlb
;
402 tlb_gather_mmu(&tlb
, mm
);
403 update_hiwater_rss(mm
);
404 unmap_vmas(&tlb
, mas
, vma
, vma
->vm_start
, vma
->vm_end
, vma
->vm_end
,
405 /* mm_wr_locked = */ true);
406 mas_set(mas
, vma
->vm_end
);
407 free_pgtables(&tlb
, mas
, vma
, prev
? prev
->vm_end
: FIRST_USER_ADDRESS
,
408 next
? next
->vm_start
: USER_PGTABLES_CEILING
,
409 /* mm_wr_locked = */ true);
410 tlb_finish_mmu(&tlb
);
414 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
415 * has already been checked or doesn't make sense to fail.
416 * VMA Iterator will point to the original VMA.
418 static int __split_vma(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
419 unsigned long addr
, int new_below
)
421 struct vma_prepare vp
;
422 struct vm_area_struct
*new;
425 WARN_ON(vma
->vm_start
>= addr
);
426 WARN_ON(vma
->vm_end
<= addr
);
428 if (vma
->vm_ops
&& vma
->vm_ops
->may_split
) {
429 err
= vma
->vm_ops
->may_split(vma
, addr
);
434 new = vm_area_dup(vma
);
441 new->vm_start
= addr
;
442 new->vm_pgoff
+= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
);
446 vma_iter_config(vmi
, new->vm_start
, new->vm_end
);
447 if (vma_iter_prealloc(vmi
, new))
450 err
= vma_dup_policy(vma
, new);
454 err
= anon_vma_clone(new, vma
);
459 get_file(new->vm_file
);
461 if (new->vm_ops
&& new->vm_ops
->open
)
462 new->vm_ops
->open(new);
464 vma_start_write(vma
);
465 vma_start_write(new);
467 init_vma_prep(&vp
, vma
);
470 vma_adjust_trans_huge(vma
, vma
->vm_start
, addr
, 0);
473 vma
->vm_start
= addr
;
474 vma
->vm_pgoff
+= (addr
- new->vm_start
) >> PAGE_SHIFT
;
479 /* vma_complete stores the new vma */
480 vma_complete(&vp
, vmi
, vma
->vm_mm
);
481 validate_mm(vma
->vm_mm
);
492 mpol_put(vma_policy(new));
501 * Split a vma into two pieces at address 'addr', a new vma is allocated
502 * either for the first part or the tail.
504 static int split_vma(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
505 unsigned long addr
, int new_below
)
507 if (vma
->vm_mm
->map_count
>= sysctl_max_map_count
)
510 return __split_vma(vmi
, vma
, addr
, new_below
);
514 * vma has some anon_vma assigned, and is already inserted on that
515 * anon_vma's interval trees.
517 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
518 * vma must be removed from the anon_vma's interval trees using
519 * anon_vma_interval_tree_pre_update_vma().
521 * After the update, the vma will be reinserted using
522 * anon_vma_interval_tree_post_update_vma().
524 * The entire update must be protected by exclusive mmap_lock and by
525 * the root anon_vma's mutex.
528 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct
*vma
)
530 struct anon_vma_chain
*avc
;
532 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
533 anon_vma_interval_tree_remove(avc
, &avc
->anon_vma
->rb_root
);
537 anon_vma_interval_tree_post_update_vma(struct vm_area_struct
*vma
)
539 struct anon_vma_chain
*avc
;
541 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
542 anon_vma_interval_tree_insert(avc
, &avc
->anon_vma
->rb_root
);
546 * dup_anon_vma() - Helper function to duplicate anon_vma
547 * @dst: The destination VMA
548 * @src: The source VMA
549 * @dup: Pointer to the destination VMA when successful.
551 * Returns: 0 on success.
553 static int dup_anon_vma(struct vm_area_struct
*dst
,
554 struct vm_area_struct
*src
, struct vm_area_struct
**dup
)
557 * Easily overlooked: when mprotect shifts the boundary, make sure the
558 * expanding vma has anon_vma set if the shrinking vma had, to cover any
559 * anon pages imported.
561 if (src
->anon_vma
&& !dst
->anon_vma
) {
564 vma_assert_write_locked(dst
);
565 dst
->anon_vma
= src
->anon_vma
;
566 ret
= anon_vma_clone(dst
, src
);
576 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
577 void validate_mm(struct mm_struct
*mm
)
581 struct vm_area_struct
*vma
;
582 VMA_ITERATOR(vmi
, mm
, 0);
584 mt_validate(&mm
->mm_mt
);
585 for_each_vma(vmi
, vma
) {
586 #ifdef CONFIG_DEBUG_VM_RB
587 struct anon_vma
*anon_vma
= vma
->anon_vma
;
588 struct anon_vma_chain
*avc
;
590 unsigned long vmi_start
, vmi_end
;
593 vmi_start
= vma_iter_addr(&vmi
);
594 vmi_end
= vma_iter_end(&vmi
);
595 if (VM_WARN_ON_ONCE_MM(vma
->vm_end
!= vmi_end
, mm
))
598 if (VM_WARN_ON_ONCE_MM(vma
->vm_start
!= vmi_start
, mm
))
602 pr_emerg("issue in %s\n", current
->comm
);
605 pr_emerg("tree range: %px start %lx end %lx\n", vma
,
606 vmi_start
, vmi_end
- 1);
607 vma_iter_dump_tree(&vmi
);
610 #ifdef CONFIG_DEBUG_VM_RB
612 anon_vma_lock_read(anon_vma
);
613 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
614 anon_vma_interval_tree_verify(avc
);
615 anon_vma_unlock_read(anon_vma
);
618 /* Check for a infinite loop */
619 if (++i
> mm
->map_count
+ 10) {
624 if (i
!= mm
->map_count
) {
625 pr_emerg("map_count %d vma iterator %d\n", mm
->map_count
, i
);
628 VM_BUG_ON_MM(bug
, mm
);
630 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
632 /* Actually perform the VMA merge operation. */
633 static int commit_merge(struct vma_merge_struct
*vmg
,
634 struct vm_area_struct
*adjust
,
635 struct vm_area_struct
*remove
,
636 struct vm_area_struct
*remove2
,
640 struct vma_prepare vp
;
642 init_multi_vma_prep(&vp
, vmg
->vma
, adjust
, remove
, remove2
);
644 VM_WARN_ON(vp
.anon_vma
&& adjust
&& adjust
->anon_vma
&&
645 vp
.anon_vma
!= adjust
->anon_vma
);
648 /* Note: vma iterator must be pointing to 'start'. */
649 vma_iter_config(vmg
->vmi
, vmg
->start
, vmg
->end
);
651 vma_iter_config(vmg
->vmi
, adjust
->vm_start
+ adj_start
,
655 if (vma_iter_prealloc(vmg
->vmi
, vmg
->vma
))
659 vma_adjust_trans_huge(vmg
->vma
, vmg
->start
, vmg
->end
, adj_start
);
660 vma_set_range(vmg
->vma
, vmg
->start
, vmg
->end
, vmg
->pgoff
);
663 vma_iter_store(vmg
->vmi
, vmg
->vma
);
666 adjust
->vm_start
+= adj_start
;
667 adjust
->vm_pgoff
+= PHYS_PFN(adj_start
);
670 vma_iter_store(vmg
->vmi
, adjust
);
674 vma_complete(&vp
, vmg
->vmi
, vmg
->vma
->vm_mm
);
679 /* We can only remove VMAs when merging if they do not have a close hook. */
680 static bool can_merge_remove_vma(struct vm_area_struct
*vma
)
682 return !vma
->vm_ops
|| !vma
->vm_ops
->close
;
686 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
687 * attributes modified.
689 * @vmg: Describes the modifications being made to a VMA and associated
692 * When the attributes of a range within a VMA change, then it might be possible
693 * for immediately adjacent VMAs to be merged into that VMA due to having
694 * identical properties.
696 * This function checks for the existence of any such mergeable VMAs and updates
697 * the maple tree describing the @vmg->vma->vm_mm address space to account for
698 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
700 * As part of this operation, if a merge occurs, the @vmg object will have its
701 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
702 * calls to this function should reset these fields.
704 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
707 * - The caller must assign the VMA to be modifed to @vmg->vma.
708 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
709 * - The caller must not set @vmg->next, as we determine this.
710 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
711 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
713 static struct vm_area_struct
*vma_merge_existing_range(struct vma_merge_struct
*vmg
)
715 struct vm_area_struct
*vma
= vmg
->vma
;
716 struct vm_area_struct
*prev
= vmg
->prev
;
717 struct vm_area_struct
*next
, *res
;
718 struct vm_area_struct
*anon_dup
= NULL
;
719 struct vm_area_struct
*adjust
= NULL
;
720 unsigned long start
= vmg
->start
;
721 unsigned long end
= vmg
->end
;
722 bool left_side
= vma
&& start
== vma
->vm_start
;
723 bool right_side
= vma
&& end
== vma
->vm_end
;
726 bool merge_will_delete_vma
, merge_will_delete_next
;
727 bool merge_left
, merge_right
, merge_both
;
730 mmap_assert_write_locked(vmg
->mm
);
731 VM_WARN_ON(!vma
); /* We are modifying a VMA, so caller must specify. */
732 VM_WARN_ON(vmg
->next
); /* We set this. */
733 VM_WARN_ON(prev
&& start
<= prev
->vm_start
);
734 VM_WARN_ON(start
>= end
);
736 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
737 * not, we must span a portion of the VMA.
739 VM_WARN_ON(vma
&& ((vma
!= prev
&& vmg
->start
!= vma
->vm_start
) ||
740 vmg
->end
> vma
->vm_end
));
741 /* The vmi must be positioned within vmg->vma. */
742 VM_WARN_ON(vma
&& !(vma_iter_addr(vmg
->vmi
) >= vma
->vm_start
&&
743 vma_iter_addr(vmg
->vmi
) < vma
->vm_end
));
745 vmg
->state
= VMA_MERGE_NOMERGE
;
748 * If a special mapping or if the range being modified is neither at the
749 * furthermost left or right side of the VMA, then we have no chance of
750 * merging and should abort.
752 if (vmg
->flags
& VM_SPECIAL
|| (!left_side
&& !right_side
))
756 merge_left
= can_vma_merge_left(vmg
);
761 next
= vmg
->next
= vma_iter_next_range(vmg
->vmi
);
762 vma_iter_prev_range(vmg
->vmi
);
764 merge_right
= can_vma_merge_right(vmg
, merge_left
);
770 if (merge_left
) /* If merging prev, position iterator there. */
772 else if (!merge_right
) /* If we have nothing to merge, abort. */
775 merge_both
= merge_left
&& merge_right
;
776 /* If we span the entire VMA, a merge implies it will be deleted. */
777 merge_will_delete_vma
= left_side
&& right_side
;
780 * If we need to remove vma in its entirety but are unable to do so,
781 * we have no sensible recourse but to abort the merge.
783 if (merge_will_delete_vma
&& !can_merge_remove_vma(vma
))
787 * If we merge both VMAs, then next is also deleted. This implies
788 * merge_will_delete_vma also.
790 merge_will_delete_next
= merge_both
;
793 * If we cannot delete next, then we can reduce the operation to merging
794 * prev and vma (thereby deleting vma).
796 if (merge_will_delete_next
&& !can_merge_remove_vma(next
)) {
797 merge_will_delete_next
= false;
802 /* No matter what happens, we will be adjusting vma. */
803 vma_start_write(vma
);
806 vma_start_write(prev
);
809 vma_start_write(next
);
814 * |-------*********-------|
816 * extend delete delete
820 vmg
->start
= prev
->vm_start
;
821 vmg
->end
= next
->vm_end
;
822 vmg
->pgoff
= prev
->vm_pgoff
;
825 * We already ensured anon_vma compatibility above, so now it's
826 * simply a case of, if prev has no anon_vma object, which of
827 * next or vma contains the anon_vma we must duplicate.
829 err
= dup_anon_vma(prev
, next
->anon_vma
? next
: vma
, &anon_dup
);
830 } else if (merge_left
) {
834 * |-------*************
836 * extend shrink/delete
840 vmg
->start
= prev
->vm_start
;
841 vmg
->pgoff
= prev
->vm_pgoff
;
843 if (!merge_will_delete_vma
) {
845 adj_start
= vmg
->end
- vma
->vm_start
;
848 err
= dup_anon_vma(prev
, vma
, &anon_dup
);
849 } else { /* merge_right */
853 * *************-------|
855 * shrink/delete extend
858 pgoff_t pglen
= PHYS_PFN(vmg
->end
- vmg
->start
);
860 VM_WARN_ON(!merge_right
);
861 /* If we are offset into a VMA, then prev must be vma. */
862 VM_WARN_ON(vmg
->start
> vma
->vm_start
&& prev
&& vma
!= prev
);
864 if (merge_will_delete_vma
) {
866 vmg
->end
= next
->vm_end
;
867 vmg
->pgoff
= next
->vm_pgoff
- pglen
;
870 * We shrink vma and expand next.
872 * IMPORTANT: This is the ONLY case where the final
873 * merged VMA is NOT vmg->vma, but rather vmg->next.
876 vmg
->start
= vma
->vm_start
;
878 vmg
->pgoff
= vma
->vm_pgoff
;
881 adj_start
= -(vma
->vm_end
- start
);
884 err
= dup_anon_vma(next
, vma
, &anon_dup
);
891 * In nearly all cases, we expand vmg->vma. There is one exception -
892 * merge_right where we partially span the VMA. In this case we shrink
893 * the end of vmg->vma and adjust the start of vmg->next accordingly.
895 expanded
= !merge_right
|| merge_will_delete_vma
;
897 if (commit_merge(vmg
, adjust
,
898 merge_will_delete_vma
? vma
: NULL
,
899 merge_will_delete_next
? next
: NULL
,
900 adj_start
, expanded
)) {
902 unlink_anon_vmas(anon_dup
);
904 vmg
->state
= VMA_MERGE_ERROR_NOMEM
;
908 res
= merge_left
? prev
: next
;
909 khugepaged_enter_vma(res
, vmg
->flags
);
911 vmg
->state
= VMA_MERGE_SUCCESS
;
915 vma_iter_set(vmg
->vmi
, start
);
916 vma_iter_load(vmg
->vmi
);
917 vmg
->state
= VMA_MERGE_ERROR_NOMEM
;
922 * vma_merge_new_range - Attempt to merge a new VMA into address space
924 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
925 * (exclusive), which we try to merge with any adjacent VMAs if possible.
927 * We are about to add a VMA to the address space starting at @vmg->start and
928 * ending at @vmg->end. There are three different possible scenarios:
930 * 1. There is a VMA with identical properties immediately adjacent to the
931 * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
934 * Proposed: |-----| or |-----|
935 * Existing: |----| |----|
937 * 2. There are VMAs with identical properties immediately adjacent to the
938 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
939 * EXPAND the former and REMOVE the latter:
942 * Existing: |----| |----|
944 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
945 * VMAs do not have identical attributes - NO MERGE POSSIBLE.
947 * In instances where we can merge, this function returns the expanded VMA which
948 * will have its range adjusted accordingly and the underlying maple tree also
951 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
952 * to the VMA we expanded.
954 * This function adjusts @vmg to provide @vmg->next if not already specified,
955 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
958 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
959 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
960 other than VMAs that will be unmapped should the operation succeed.
961 * - The caller must have specified the previous vma in @vmg->prev.
962 * - The caller must have specified the next vma in @vmg->next.
963 * - The caller must have positioned the vmi at or before the gap.
965 struct vm_area_struct
*vma_merge_new_range(struct vma_merge_struct
*vmg
)
967 struct vm_area_struct
*prev
= vmg
->prev
;
968 struct vm_area_struct
*next
= vmg
->next
;
969 unsigned long end
= vmg
->end
;
970 bool can_merge_left
, can_merge_right
;
971 bool just_expand
= vmg
->merge_flags
& VMG_FLAG_JUST_EXPAND
;
973 mmap_assert_write_locked(vmg
->mm
);
974 VM_WARN_ON(vmg
->vma
);
975 /* vmi must point at or before the gap. */
976 VM_WARN_ON(vma_iter_addr(vmg
->vmi
) > end
);
978 vmg
->state
= VMA_MERGE_NOMERGE
;
980 /* Special VMAs are unmergeable, also if no prev/next. */
981 if ((vmg
->flags
& VM_SPECIAL
) || (!prev
&& !next
))
984 can_merge_left
= can_vma_merge_left(vmg
);
985 can_merge_right
= !just_expand
&& can_vma_merge_right(vmg
, can_merge_left
);
987 /* If we can merge with the next VMA, adjust vmg accordingly. */
988 if (can_merge_right
) {
989 vmg
->end
= next
->vm_end
;
993 /* If we can merge with the previous VMA, adjust vmg accordingly. */
994 if (can_merge_left
) {
995 vmg
->start
= prev
->vm_start
;
997 vmg
->pgoff
= prev
->vm_pgoff
;
1000 * If this merge would result in removal of the next VMA but we
1001 * are not permitted to do so, reduce the operation to merging
1004 if (can_merge_right
&& !can_merge_remove_vma(next
))
1007 /* In expand-only case we are already positioned at prev. */
1009 /* Equivalent to going to the previous range. */
1015 * Now try to expand adjacent VMA(s). This takes care of removing the
1016 * following VMA if we have VMAs on both sides.
1018 if (vmg
->vma
&& !vma_expand(vmg
)) {
1019 khugepaged_enter_vma(vmg
->vma
, vmg
->flags
);
1020 vmg
->state
= VMA_MERGE_SUCCESS
;
1028 * vma_expand - Expand an existing VMA
1030 * @vmg: Describes a VMA expansion operation.
1032 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
1033 * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
1034 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
1035 * vmg->next needs to be handled by the caller.
1037 * Returns: 0 on success.
1040 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1041 * - The caller must have set @vmg->vma and @vmg->next.
1043 int vma_expand(struct vma_merge_struct
*vmg
)
1045 struct vm_area_struct
*anon_dup
= NULL
;
1046 bool remove_next
= false;
1047 struct vm_area_struct
*vma
= vmg
->vma
;
1048 struct vm_area_struct
*next
= vmg
->next
;
1050 mmap_assert_write_locked(vmg
->mm
);
1052 vma_start_write(vma
);
1053 if (next
&& (vma
!= next
) && (vmg
->end
== next
->vm_end
)) {
1057 /* This should already have been checked by this point. */
1058 VM_WARN_ON(!can_merge_remove_vma(next
));
1059 vma_start_write(next
);
1060 ret
= dup_anon_vma(vma
, next
, &anon_dup
);
1065 /* Not merging but overwriting any part of next is not handled. */
1066 VM_WARN_ON(next
&& !remove_next
&&
1067 next
!= vma
&& vmg
->end
> next
->vm_start
);
1068 /* Only handles expanding */
1069 VM_WARN_ON(vma
->vm_start
< vmg
->start
|| vma
->vm_end
> vmg
->end
);
1071 if (commit_merge(vmg
, NULL
, remove_next
? next
: NULL
, NULL
, 0, true))
1077 vmg
->state
= VMA_MERGE_ERROR_NOMEM
;
1079 unlink_anon_vmas(anon_dup
);
1084 * vma_shrink() - Reduce an existing VMAs memory area
1085 * @vmi: The vma iterator
1086 * @vma: The VMA to modify
1087 * @start: The new start
1090 * Returns: 0 on success, -ENOMEM otherwise
1092 int vma_shrink(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
1093 unsigned long start
, unsigned long end
, pgoff_t pgoff
)
1095 struct vma_prepare vp
;
1097 WARN_ON((vma
->vm_start
!= start
) && (vma
->vm_end
!= end
));
1099 if (vma
->vm_start
< start
)
1100 vma_iter_config(vmi
, vma
->vm_start
, start
);
1102 vma_iter_config(vmi
, end
, vma
->vm_end
);
1104 if (vma_iter_prealloc(vmi
, NULL
))
1107 vma_start_write(vma
);
1109 init_vma_prep(&vp
, vma
);
1111 vma_adjust_trans_huge(vma
, start
, end
, 0);
1113 vma_iter_clear(vmi
);
1114 vma_set_range(vma
, start
, end
, pgoff
);
1115 vma_complete(&vp
, vmi
, vma
->vm_mm
);
1116 validate_mm(vma
->vm_mm
);
1120 static inline void vms_clear_ptes(struct vma_munmap_struct
*vms
,
1121 struct ma_state
*mas_detach
, bool mm_wr_locked
)
1123 struct mmu_gather tlb
;
1125 if (!vms
->clear_ptes
) /* Nothing to do */
1129 * We can free page tables without write-locking mmap_lock because VMAs
1130 * were isolated before we downgraded mmap_lock.
1132 mas_set(mas_detach
, 1);
1134 tlb_gather_mmu(&tlb
, vms
->vma
->vm_mm
);
1135 update_hiwater_rss(vms
->vma
->vm_mm
);
1136 unmap_vmas(&tlb
, mas_detach
, vms
->vma
, vms
->start
, vms
->end
,
1137 vms
->vma_count
, mm_wr_locked
);
1139 mas_set(mas_detach
, 1);
1140 /* start and end may be different if there is no prev or next vma. */
1141 free_pgtables(&tlb
, mas_detach
, vms
->vma
, vms
->unmap_start
,
1142 vms
->unmap_end
, mm_wr_locked
);
1143 tlb_finish_mmu(&tlb
);
1144 vms
->clear_ptes
= false;
1147 static void vms_clean_up_area(struct vma_munmap_struct
*vms
,
1148 struct ma_state
*mas_detach
)
1150 struct vm_area_struct
*vma
;
1155 vms_clear_ptes(vms
, mas_detach
, true);
1156 mas_set(mas_detach
, 0);
1157 mas_for_each(mas_detach
, vma
, ULONG_MAX
)
1162 * vms_complete_munmap_vmas() - Finish the munmap() operation
1163 * @vms: The vma munmap struct
1164 * @mas_detach: The maple state of the detached vmas
1166 * This updates the mm_struct, unmaps the region, frees the resources
1167 * used for the munmap() and may downgrade the lock - if requested. Everything
1168 * needed to be done once the vma maple tree is updated.
1170 static void vms_complete_munmap_vmas(struct vma_munmap_struct
*vms
,
1171 struct ma_state
*mas_detach
)
1173 struct vm_area_struct
*vma
;
1174 struct mm_struct
*mm
;
1177 mm
->map_count
-= vms
->vma_count
;
1178 mm
->locked_vm
-= vms
->locked_vm
;
1180 mmap_write_downgrade(mm
);
1185 vms_clear_ptes(vms
, mas_detach
, !vms
->unlock
);
1186 /* Update high watermark before we lower total_vm */
1187 update_hiwater_vm(mm
);
1188 /* Stat accounting */
1189 WRITE_ONCE(mm
->total_vm
, READ_ONCE(mm
->total_vm
) - vms
->nr_pages
);
1190 /* Paranoid bookkeeping */
1191 VM_WARN_ON(vms
->exec_vm
> mm
->exec_vm
);
1192 VM_WARN_ON(vms
->stack_vm
> mm
->stack_vm
);
1193 VM_WARN_ON(vms
->data_vm
> mm
->data_vm
);
1194 mm
->exec_vm
-= vms
->exec_vm
;
1195 mm
->stack_vm
-= vms
->stack_vm
;
1196 mm
->data_vm
-= vms
->data_vm
;
1198 /* Remove and clean up vmas */
1199 mas_set(mas_detach
, 0);
1200 mas_for_each(mas_detach
, vma
, ULONG_MAX
)
1201 remove_vma(vma
, /* unreachable = */ false);
1203 vm_unacct_memory(vms
->nr_accounted
);
1206 mmap_read_unlock(mm
);
1208 __mt_destroy(mas_detach
->tree
);
1212 * reattach_vmas() - Undo any munmap work and free resources
1213 * @mas_detach: The maple state with the detached maple tree
1215 * Reattach any detached vmas and free up the maple tree used to track the vmas.
1217 static void reattach_vmas(struct ma_state
*mas_detach
)
1219 struct vm_area_struct
*vma
;
1221 mas_set(mas_detach
, 0);
1222 mas_for_each(mas_detach
, vma
, ULONG_MAX
)
1223 vma_mark_detached(vma
, false);
1225 __mt_destroy(mas_detach
->tree
);
1229 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1230 * for removal at a later date. Handles splitting first and last if necessary
1231 * and marking the vmas as isolated.
1233 * @vms: The vma munmap struct
1234 * @mas_detach: The maple state tracking the detached tree
1236 * Return: 0 on success, error otherwise
1238 static int vms_gather_munmap_vmas(struct vma_munmap_struct
*vms
,
1239 struct ma_state
*mas_detach
)
1241 struct vm_area_struct
*next
= NULL
;
1245 * If we need to split any vma, do it now to save pain later.
1246 * Does it split the first one?
1248 if (vms
->start
> vms
->vma
->vm_start
) {
1251 * Make sure that map_count on return from munmap() will
1252 * not exceed its limit; but let map_count go just above
1253 * its limit temporarily, to help free resources as expected.
1255 if (vms
->end
< vms
->vma
->vm_end
&&
1256 vms
->vma
->vm_mm
->map_count
>= sysctl_max_map_count
) {
1258 goto map_count_exceeded
;
1261 /* Don't bother splitting the VMA if we can't unmap it anyway */
1262 if (!can_modify_vma(vms
->vma
)) {
1264 goto start_split_failed
;
1267 error
= __split_vma(vms
->vmi
, vms
->vma
, vms
->start
, 1);
1269 goto start_split_failed
;
1271 vms
->prev
= vma_prev(vms
->vmi
);
1273 vms
->unmap_start
= vms
->prev
->vm_end
;
1276 * Detach a range of VMAs from the mm. Using next as a temp variable as
1277 * it is always overwritten.
1279 for_each_vma_range(*(vms
->vmi
), next
, vms
->end
) {
1282 if (!can_modify_vma(next
)) {
1284 goto modify_vma_failed
;
1286 /* Does it split the end? */
1287 if (next
->vm_end
> vms
->end
) {
1288 error
= __split_vma(vms
->vmi
, next
, vms
->end
, 0);
1290 goto end_split_failed
;
1292 vma_start_write(next
);
1293 mas_set(mas_detach
, vms
->vma_count
++);
1294 error
= mas_store_gfp(mas_detach
, next
, GFP_KERNEL
);
1296 goto munmap_gather_failed
;
1298 vma_mark_detached(next
, true);
1299 nrpages
= vma_pages(next
);
1301 vms
->nr_pages
+= nrpages
;
1302 if (next
->vm_flags
& VM_LOCKED
)
1303 vms
->locked_vm
+= nrpages
;
1305 if (next
->vm_flags
& VM_ACCOUNT
)
1306 vms
->nr_accounted
+= nrpages
;
1308 if (is_exec_mapping(next
->vm_flags
))
1309 vms
->exec_vm
+= nrpages
;
1310 else if (is_stack_mapping(next
->vm_flags
))
1311 vms
->stack_vm
+= nrpages
;
1312 else if (is_data_mapping(next
->vm_flags
))
1313 vms
->data_vm
+= nrpages
;
1317 * If userfaultfd_unmap_prep returns an error the vmas
1318 * will remain split, but userland will get a
1319 * highly unexpected error anyway. This is no
1320 * different than the case where the first of the two
1321 * __split_vma fails, but we don't undo the first
1322 * split, despite we could. This is unlikely enough
1323 * failure that it's not worth optimizing it for.
1325 error
= userfaultfd_unmap_prep(next
, vms
->start
,
1328 goto userfaultfd_error
;
1330 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1331 BUG_ON(next
->vm_start
< vms
->start
);
1332 BUG_ON(next
->vm_start
> vms
->end
);
1336 vms
->next
= vma_next(vms
->vmi
);
1338 vms
->unmap_end
= vms
->next
->vm_start
;
1340 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1341 /* Make sure no VMAs are about to be lost. */
1343 MA_STATE(test
, mas_detach
->tree
, 0, 0);
1344 struct vm_area_struct
*vma_mas
, *vma_test
;
1347 vma_iter_set(vms
->vmi
, vms
->start
);
1349 vma_test
= mas_find(&test
, vms
->vma_count
- 1);
1350 for_each_vma_range(*(vms
->vmi
), vma_mas
, vms
->end
) {
1351 BUG_ON(vma_mas
!= vma_test
);
1353 vma_test
= mas_next(&test
, vms
->vma_count
- 1);
1356 BUG_ON(vms
->vma_count
!= test_count
);
1360 while (vma_iter_addr(vms
->vmi
) > vms
->start
)
1361 vma_iter_prev_range(vms
->vmi
);
1363 vms
->clear_ptes
= true;
1367 munmap_gather_failed
:
1370 reattach_vmas(mas_detach
);
1377 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1378 * @vms: The vma munmap struct
1379 * @vmi: The vma iterator
1380 * @vma: The first vm_area_struct to munmap
1381 * @start: The aligned start address to munmap
1382 * @end: The aligned end address to munmap
1383 * @uf: The userfaultfd list_head
1384 * @unlock: Unlock after the operation. Only unlocked on success
1386 static void init_vma_munmap(struct vma_munmap_struct
*vms
,
1387 struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
1388 unsigned long start
, unsigned long end
, struct list_head
*uf
,
1397 vms
->start
= vms
->end
= 0;
1399 vms
->unlock
= unlock
;
1402 vms
->nr_pages
= vms
->locked_vm
= vms
->nr_accounted
= 0;
1403 vms
->exec_vm
= vms
->stack_vm
= vms
->data_vm
= 0;
1404 vms
->unmap_start
= FIRST_USER_ADDRESS
;
1405 vms
->unmap_end
= USER_PGTABLES_CEILING
;
1406 vms
->clear_ptes
= false;
1410 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1411 * @vmi: The vma iterator
1412 * @vma: The starting vm_area_struct
1413 * @mm: The mm_struct
1414 * @start: The aligned start address to munmap.
1415 * @end: The aligned end address to munmap.
1416 * @uf: The userfaultfd list_head
1417 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
1420 * Return: 0 on success and drops the lock if so directed, error and leaves the
1421 * lock held otherwise.
1423 int do_vmi_align_munmap(struct vma_iterator
*vmi
, struct vm_area_struct
*vma
,
1424 struct mm_struct
*mm
, unsigned long start
, unsigned long end
,
1425 struct list_head
*uf
, bool unlock
)
1427 struct maple_tree mt_detach
;
1428 MA_STATE(mas_detach
, &mt_detach
, 0, 0);
1429 mt_init_flags(&mt_detach
, vmi
->mas
.tree
->ma_flags
& MT_FLAGS_LOCK_MASK
);
1430 mt_on_stack(mt_detach
);
1431 struct vma_munmap_struct vms
;
1434 init_vma_munmap(&vms
, vmi
, vma
, start
, end
, uf
, unlock
);
1435 error
= vms_gather_munmap_vmas(&vms
, &mas_detach
);
1439 error
= vma_iter_clear_gfp(vmi
, start
, end
, GFP_KERNEL
);
1441 goto clear_tree_failed
;
1443 /* Point of no return */
1444 vms_complete_munmap_vmas(&vms
, &mas_detach
);
1448 reattach_vmas(&mas_detach
);
1455 * do_vmi_munmap() - munmap a given range.
1456 * @vmi: The vma iterator
1457 * @mm: The mm_struct
1458 * @start: The start address to munmap
1459 * @len: The length of the range to munmap
1460 * @uf: The userfaultfd list_head
1461 * @unlock: set to true if the user wants to drop the mmap_lock on success
1463 * This function takes a @mas that is either pointing to the previous VMA or set
1464 * to MA_START and sets it up to remove the mapping(s). The @len will be
1467 * Return: 0 on success and drops the lock if so directed, error and leaves the
1468 * lock held otherwise.
1470 int do_vmi_munmap(struct vma_iterator
*vmi
, struct mm_struct
*mm
,
1471 unsigned long start
, size_t len
, struct list_head
*uf
,
1475 struct vm_area_struct
*vma
;
1477 if ((offset_in_page(start
)) || start
> TASK_SIZE
|| len
> TASK_SIZE
-start
)
1480 end
= start
+ PAGE_ALIGN(len
);
1484 /* Find the first overlapping VMA */
1485 vma
= vma_find(vmi
, end
);
1488 mmap_write_unlock(mm
);
1492 return do_vmi_align_munmap(vmi
, vma
, mm
, start
, end
, uf
, unlock
);
1496 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1497 * context and anonymous VMA name within the range [start, end).
1499 * As a result, we might be able to merge the newly modified VMA range with an
1500 * adjacent VMA with identical properties.
1502 * If no merge is possible and the range does not span the entirety of the VMA,
1503 * we then need to split the VMA to accommodate the change.
1505 * The function returns either the merged VMA, the original VMA if a split was
1506 * required instead, or an error if the split failed.
1508 static struct vm_area_struct
*vma_modify(struct vma_merge_struct
*vmg
)
1510 struct vm_area_struct
*vma
= vmg
->vma
;
1511 struct vm_area_struct
*merged
;
1513 /* First, try to merge. */
1514 merged
= vma_merge_existing_range(vmg
);
1518 /* Split any preceding portion of the VMA. */
1519 if (vma
->vm_start
< vmg
->start
) {
1520 int err
= split_vma(vmg
->vmi
, vma
, vmg
->start
, 1);
1523 return ERR_PTR(err
);
1526 /* Split any trailing portion of the VMA. */
1527 if (vma
->vm_end
> vmg
->end
) {
1528 int err
= split_vma(vmg
->vmi
, vma
, vmg
->end
, 0);
1531 return ERR_PTR(err
);
1537 struct vm_area_struct
*vma_modify_flags(
1538 struct vma_iterator
*vmi
, struct vm_area_struct
*prev
,
1539 struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
,
1540 unsigned long new_flags
)
1542 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1544 vmg
.flags
= new_flags
;
1546 return vma_modify(&vmg
);
1549 struct vm_area_struct
1550 *vma_modify_flags_name(struct vma_iterator
*vmi
,
1551 struct vm_area_struct
*prev
,
1552 struct vm_area_struct
*vma
,
1553 unsigned long start
,
1555 unsigned long new_flags
,
1556 struct anon_vma_name
*new_name
)
1558 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1560 vmg
.flags
= new_flags
;
1561 vmg
.anon_name
= new_name
;
1563 return vma_modify(&vmg
);
1566 struct vm_area_struct
1567 *vma_modify_policy(struct vma_iterator
*vmi
,
1568 struct vm_area_struct
*prev
,
1569 struct vm_area_struct
*vma
,
1570 unsigned long start
, unsigned long end
,
1571 struct mempolicy
*new_pol
)
1573 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1575 vmg
.policy
= new_pol
;
1577 return vma_modify(&vmg
);
1580 struct vm_area_struct
1581 *vma_modify_flags_uffd(struct vma_iterator
*vmi
,
1582 struct vm_area_struct
*prev
,
1583 struct vm_area_struct
*vma
,
1584 unsigned long start
, unsigned long end
,
1585 unsigned long new_flags
,
1586 struct vm_userfaultfd_ctx new_ctx
)
1588 VMG_VMA_STATE(vmg
, vmi
, prev
, vma
, start
, end
);
1590 vmg
.flags
= new_flags
;
1591 vmg
.uffd_ctx
= new_ctx
;
1593 return vma_modify(&vmg
);
1597 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1598 * VMA with identical properties.
1600 struct vm_area_struct
*vma_merge_extend(struct vma_iterator
*vmi
,
1601 struct vm_area_struct
*vma
,
1602 unsigned long delta
)
1604 VMG_VMA_STATE(vmg
, vmi
, vma
, vma
, vma
->vm_end
, vma
->vm_end
+ delta
);
1606 vmg
.next
= vma_iter_next_rewind(vmi
, NULL
);
1607 vmg
.vma
= NULL
; /* We use the VMA to populate VMG fields only. */
1609 return vma_merge_new_range(&vmg
);
1612 void unlink_file_vma_batch_init(struct unlink_vma_file_batch
*vb
)
1617 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch
*vb
)
1619 struct address_space
*mapping
;
1622 mapping
= vb
->vmas
[0]->vm_file
->f_mapping
;
1623 i_mmap_lock_write(mapping
);
1624 for (i
= 0; i
< vb
->count
; i
++) {
1625 VM_WARN_ON_ONCE(vb
->vmas
[i
]->vm_file
->f_mapping
!= mapping
);
1626 __remove_shared_vm_struct(vb
->vmas
[i
], mapping
);
1628 i_mmap_unlock_write(mapping
);
1630 unlink_file_vma_batch_init(vb
);
1633 void unlink_file_vma_batch_add(struct unlink_vma_file_batch
*vb
,
1634 struct vm_area_struct
*vma
)
1636 if (vma
->vm_file
== NULL
)
1639 if ((vb
->count
> 0 && vb
->vmas
[0]->vm_file
!= vma
->vm_file
) ||
1640 vb
->count
== ARRAY_SIZE(vb
->vmas
))
1641 unlink_file_vma_batch_process(vb
);
1643 vb
->vmas
[vb
->count
] = vma
;
1647 void unlink_file_vma_batch_final(struct unlink_vma_file_batch
*vb
)
1650 unlink_file_vma_batch_process(vb
);
1654 * Unlink a file-based vm structure from its interval tree, to hide
1655 * vma from rmap and vmtruncate before freeing its page tables.
1657 void unlink_file_vma(struct vm_area_struct
*vma
)
1659 struct file
*file
= vma
->vm_file
;
1662 struct address_space
*mapping
= file
->f_mapping
;
1664 i_mmap_lock_write(mapping
);
1665 __remove_shared_vm_struct(vma
, mapping
);
1666 i_mmap_unlock_write(mapping
);
1670 void vma_link_file(struct vm_area_struct
*vma
)
1672 struct file
*file
= vma
->vm_file
;
1673 struct address_space
*mapping
;
1676 mapping
= file
->f_mapping
;
1677 i_mmap_lock_write(mapping
);
1678 __vma_link_file(vma
, mapping
);
1679 i_mmap_unlock_write(mapping
);
1683 int vma_link(struct mm_struct
*mm
, struct vm_area_struct
*vma
)
1685 VMA_ITERATOR(vmi
, mm
, 0);
1687 vma_iter_config(&vmi
, vma
->vm_start
, vma
->vm_end
);
1688 if (vma_iter_prealloc(&vmi
, vma
))
1691 vma_start_write(vma
);
1692 vma_iter_store(&vmi
, vma
);
1700 * Copy the vma structure to a new location in the same mm,
1701 * prior to moving page table entries, to effect an mremap move.
1703 struct vm_area_struct
*copy_vma(struct vm_area_struct
**vmap
,
1704 unsigned long addr
, unsigned long len
, pgoff_t pgoff
,
1705 bool *need_rmap_locks
)
1707 struct vm_area_struct
*vma
= *vmap
;
1708 unsigned long vma_start
= vma
->vm_start
;
1709 struct mm_struct
*mm
= vma
->vm_mm
;
1710 struct vm_area_struct
*new_vma
;
1711 bool faulted_in_anon_vma
= true;
1712 VMA_ITERATOR(vmi
, mm
, addr
);
1713 VMG_VMA_STATE(vmg
, &vmi
, NULL
, vma
, addr
, addr
+ len
);
1716 * If anonymous vma has not yet been faulted, update new pgoff
1717 * to match new location, to increase its chance of merging.
1719 if (unlikely(vma_is_anonymous(vma
) && !vma
->anon_vma
)) {
1720 pgoff
= addr
>> PAGE_SHIFT
;
1721 faulted_in_anon_vma
= false;
1724 new_vma
= find_vma_prev(mm
, addr
, &vmg
.prev
);
1725 if (new_vma
&& new_vma
->vm_start
< addr
+ len
)
1726 return NULL
; /* should never get here */
1728 vmg
.vma
= NULL
; /* New VMA range. */
1730 vmg
.next
= vma_iter_next_rewind(&vmi
, NULL
);
1731 new_vma
= vma_merge_new_range(&vmg
);
1735 * Source vma may have been merged into new_vma
1737 if (unlikely(vma_start
>= new_vma
->vm_start
&&
1738 vma_start
< new_vma
->vm_end
)) {
1740 * The only way we can get a vma_merge with
1741 * self during an mremap is if the vma hasn't
1742 * been faulted in yet and we were allowed to
1743 * reset the dst vma->vm_pgoff to the
1744 * destination address of the mremap to allow
1745 * the merge to happen. mremap must change the
1746 * vm_pgoff linearity between src and dst vmas
1747 * (in turn preventing a vma_merge) to be
1748 * safe. It is only safe to keep the vm_pgoff
1749 * linear if there are no pages mapped yet.
1751 VM_BUG_ON_VMA(faulted_in_anon_vma
, new_vma
);
1752 *vmap
= vma
= new_vma
;
1754 *need_rmap_locks
= (new_vma
->vm_pgoff
<= vma
->vm_pgoff
);
1756 new_vma
= vm_area_dup(vma
);
1759 vma_set_range(new_vma
, addr
, addr
+ len
, pgoff
);
1760 if (vma_dup_policy(vma
, new_vma
))
1762 if (anon_vma_clone(new_vma
, vma
))
1763 goto out_free_mempol
;
1764 if (new_vma
->vm_file
)
1765 get_file(new_vma
->vm_file
);
1766 if (new_vma
->vm_ops
&& new_vma
->vm_ops
->open
)
1767 new_vma
->vm_ops
->open(new_vma
);
1768 if (vma_link(mm
, new_vma
))
1770 *need_rmap_locks
= false;
1777 if (new_vma
->vm_file
)
1778 fput(new_vma
->vm_file
);
1780 unlink_anon_vmas(new_vma
);
1782 mpol_put(vma_policy(new_vma
));
1784 vm_area_free(new_vma
);
1790 * Rough compatibility check to quickly see if it's even worth looking
1791 * at sharing an anon_vma.
1793 * They need to have the same vm_file, and the flags can only differ
1794 * in things that mprotect may change.
1796 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1797 * we can merge the two vma's. For example, we refuse to merge a vma if
1798 * there is a vm_ops->close() function, because that indicates that the
1799 * driver is doing some kind of reference counting. But that doesn't
1800 * really matter for the anon_vma sharing case.
1802 static int anon_vma_compatible(struct vm_area_struct
*a
, struct vm_area_struct
*b
)
1804 return a
->vm_end
== b
->vm_start
&&
1805 mpol_equal(vma_policy(a
), vma_policy(b
)) &&
1806 a
->vm_file
== b
->vm_file
&&
1807 !((a
->vm_flags
^ b
->vm_flags
) & ~(VM_ACCESS_FLAGS
| VM_SOFTDIRTY
)) &&
1808 b
->vm_pgoff
== a
->vm_pgoff
+ ((b
->vm_start
- a
->vm_start
) >> PAGE_SHIFT
);
1812 * Do some basic sanity checking to see if we can re-use the anon_vma
1813 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1814 * the same as 'old', the other will be the new one that is trying
1815 * to share the anon_vma.
1817 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1818 * the anon_vma of 'old' is concurrently in the process of being set up
1819 * by another page fault trying to merge _that_. But that's ok: if it
1820 * is being set up, that automatically means that it will be a singleton
1821 * acceptable for merging, so we can do all of this optimistically. But
1822 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1824 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1825 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1826 * is to return an anon_vma that is "complex" due to having gone through
1829 * We also make sure that the two vma's are compatible (adjacent,
1830 * and with the same memory policies). That's all stable, even with just
1831 * a read lock on the mmap_lock.
1833 static struct anon_vma
*reusable_anon_vma(struct vm_area_struct
*old
,
1834 struct vm_area_struct
*a
,
1835 struct vm_area_struct
*b
)
1837 if (anon_vma_compatible(a
, b
)) {
1838 struct anon_vma
*anon_vma
= READ_ONCE(old
->anon_vma
);
1840 if (anon_vma
&& list_is_singular(&old
->anon_vma_chain
))
1847 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1848 * neighbouring vmas for a suitable anon_vma, before it goes off
1849 * to allocate a new anon_vma. It checks because a repetitive
1850 * sequence of mprotects and faults may otherwise lead to distinct
1851 * anon_vmas being allocated, preventing vma merge in subsequent
1854 struct anon_vma
*find_mergeable_anon_vma(struct vm_area_struct
*vma
)
1856 struct anon_vma
*anon_vma
= NULL
;
1857 struct vm_area_struct
*prev
, *next
;
1858 VMA_ITERATOR(vmi
, vma
->vm_mm
, vma
->vm_end
);
1860 /* Try next first. */
1861 next
= vma_iter_load(&vmi
);
1863 anon_vma
= reusable_anon_vma(next
, vma
, next
);
1868 prev
= vma_prev(&vmi
);
1869 VM_BUG_ON_VMA(prev
!= vma
, vma
);
1870 prev
= vma_prev(&vmi
);
1871 /* Try prev next. */
1873 anon_vma
= reusable_anon_vma(prev
, prev
, vma
);
1876 * We might reach here with anon_vma == NULL if we can't find
1877 * any reusable anon_vma.
1878 * There's no absolute need to look only at touching neighbours:
1879 * we could search further afield for "compatible" anon_vmas.
1880 * But it would probably just be a waste of time searching,
1881 * or lead to too many vmas hanging off the same anon_vma.
1882 * We're trying to allow mprotect remerging later on,
1883 * not trying to minimize memory used for anon_vmas.
1888 static bool vm_ops_needs_writenotify(const struct vm_operations_struct
*vm_ops
)
1890 return vm_ops
&& (vm_ops
->page_mkwrite
|| vm_ops
->pfn_mkwrite
);
1893 static bool vma_is_shared_writable(struct vm_area_struct
*vma
)
1895 return (vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) ==
1896 (VM_WRITE
| VM_SHARED
);
1899 static bool vma_fs_can_writeback(struct vm_area_struct
*vma
)
1901 /* No managed pages to writeback. */
1902 if (vma
->vm_flags
& VM_PFNMAP
)
1905 return vma
->vm_file
&& vma
->vm_file
->f_mapping
&&
1906 mapping_can_writeback(vma
->vm_file
->f_mapping
);
1910 * Does this VMA require the underlying folios to have their dirty state
1913 bool vma_needs_dirty_tracking(struct vm_area_struct
*vma
)
1915 /* Only shared, writable VMAs require dirty tracking. */
1916 if (!vma_is_shared_writable(vma
))
1919 /* Does the filesystem need to be notified? */
1920 if (vm_ops_needs_writenotify(vma
->vm_ops
))
1924 * Even if the filesystem doesn't indicate a need for writenotify, if it
1925 * can writeback, dirty tracking is still required.
1927 return vma_fs_can_writeback(vma
);
1931 * Some shared mappings will want the pages marked read-only
1932 * to track write events. If so, we'll downgrade vm_page_prot
1933 * to the private version (using protection_map[] without the
1936 bool vma_wants_writenotify(struct vm_area_struct
*vma
, pgprot_t vm_page_prot
)
1938 /* If it was private or non-writable, the write bit is already clear */
1939 if (!vma_is_shared_writable(vma
))
1942 /* The backer wishes to know when pages are first written to? */
1943 if (vm_ops_needs_writenotify(vma
->vm_ops
))
1946 /* The open routine did something to the protections that pgprot_modify
1947 * won't preserve? */
1948 if (pgprot_val(vm_page_prot
) !=
1949 pgprot_val(vm_pgprot_modify(vm_page_prot
, vma
->vm_flags
)))
1953 * Do we need to track softdirty? hugetlb does not support softdirty
1956 if (vma_soft_dirty_enabled(vma
) && !is_vm_hugetlb_page(vma
))
1959 /* Do we need write faults for uffd-wp tracking? */
1960 if (userfaultfd_wp(vma
))
1963 /* Can the mapping track the dirty pages? */
1964 return vma_fs_can_writeback(vma
);
1967 static DEFINE_MUTEX(mm_all_locks_mutex
);
1969 static void vm_lock_anon_vma(struct mm_struct
*mm
, struct anon_vma
*anon_vma
)
1971 if (!test_bit(0, (unsigned long *) &anon_vma
->root
->rb_root
.rb_root
.rb_node
)) {
1973 * The LSB of head.next can't change from under us
1974 * because we hold the mm_all_locks_mutex.
1976 down_write_nest_lock(&anon_vma
->root
->rwsem
, &mm
->mmap_lock
);
1978 * We can safely modify head.next after taking the
1979 * anon_vma->root->rwsem. If some other vma in this mm shares
1980 * the same anon_vma we won't take it again.
1982 * No need of atomic instructions here, head.next
1983 * can't change from under us thanks to the
1984 * anon_vma->root->rwsem.
1986 if (__test_and_set_bit(0, (unsigned long *)
1987 &anon_vma
->root
->rb_root
.rb_root
.rb_node
))
1992 static void vm_lock_mapping(struct mm_struct
*mm
, struct address_space
*mapping
)
1994 if (!test_bit(AS_MM_ALL_LOCKS
, &mapping
->flags
)) {
1996 * AS_MM_ALL_LOCKS can't change from under us because
1997 * we hold the mm_all_locks_mutex.
1999 * Operations on ->flags have to be atomic because
2000 * even if AS_MM_ALL_LOCKS is stable thanks to the
2001 * mm_all_locks_mutex, there may be other cpus
2002 * changing other bitflags in parallel to us.
2004 if (test_and_set_bit(AS_MM_ALL_LOCKS
, &mapping
->flags
))
2006 down_write_nest_lock(&mapping
->i_mmap_rwsem
, &mm
->mmap_lock
);
2011 * This operation locks against the VM for all pte/vma/mm related
2012 * operations that could ever happen on a certain mm. This includes
2013 * vmtruncate, try_to_unmap, and all page faults.
2015 * The caller must take the mmap_lock in write mode before calling
2016 * mm_take_all_locks(). The caller isn't allowed to release the
2017 * mmap_lock until mm_drop_all_locks() returns.
2019 * mmap_lock in write mode is required in order to block all operations
2020 * that could modify pagetables and free pages without need of
2021 * altering the vma layout. It's also needed in write mode to avoid new
2022 * anon_vmas to be associated with existing vmas.
2024 * A single task can't take more than one mm_take_all_locks() in a row
2025 * or it would deadlock.
2027 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2028 * mapping->flags avoid to take the same lock twice, if more than one
2029 * vma in this mm is backed by the same anon_vma or address_space.
2031 * We take locks in following order, accordingly to comment at beginning
2033 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2035 * - all vmas marked locked
2036 * - all i_mmap_rwsem locks;
2037 * - all anon_vma->rwseml
2039 * We can take all locks within these types randomly because the VM code
2040 * doesn't nest them and we protected from parallel mm_take_all_locks() by
2041 * mm_all_locks_mutex.
2043 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2044 * that may have to take thousand of locks.
2046 * mm_take_all_locks() can fail if it's interrupted by signals.
2048 int mm_take_all_locks(struct mm_struct
*mm
)
2050 struct vm_area_struct
*vma
;
2051 struct anon_vma_chain
*avc
;
2052 VMA_ITERATOR(vmi
, mm
, 0);
2054 mmap_assert_write_locked(mm
);
2056 mutex_lock(&mm_all_locks_mutex
);
2059 * vma_start_write() does not have a complement in mm_drop_all_locks()
2060 * because vma_start_write() is always asymmetrical; it marks a VMA as
2061 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2064 for_each_vma(vmi
, vma
) {
2065 if (signal_pending(current
))
2067 vma_start_write(vma
);
2070 vma_iter_init(&vmi
, mm
, 0);
2071 for_each_vma(vmi
, vma
) {
2072 if (signal_pending(current
))
2074 if (vma
->vm_file
&& vma
->vm_file
->f_mapping
&&
2075 is_vm_hugetlb_page(vma
))
2076 vm_lock_mapping(mm
, vma
->vm_file
->f_mapping
);
2079 vma_iter_init(&vmi
, mm
, 0);
2080 for_each_vma(vmi
, vma
) {
2081 if (signal_pending(current
))
2083 if (vma
->vm_file
&& vma
->vm_file
->f_mapping
&&
2084 !is_vm_hugetlb_page(vma
))
2085 vm_lock_mapping(mm
, vma
->vm_file
->f_mapping
);
2088 vma_iter_init(&vmi
, mm
, 0);
2089 for_each_vma(vmi
, vma
) {
2090 if (signal_pending(current
))
2093 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
2094 vm_lock_anon_vma(mm
, avc
->anon_vma
);
2100 mm_drop_all_locks(mm
);
2104 static void vm_unlock_anon_vma(struct anon_vma
*anon_vma
)
2106 if (test_bit(0, (unsigned long *) &anon_vma
->root
->rb_root
.rb_root
.rb_node
)) {
2108 * The LSB of head.next can't change to 0 from under
2109 * us because we hold the mm_all_locks_mutex.
2111 * We must however clear the bitflag before unlocking
2112 * the vma so the users using the anon_vma->rb_root will
2113 * never see our bitflag.
2115 * No need of atomic instructions here, head.next
2116 * can't change from under us until we release the
2117 * anon_vma->root->rwsem.
2119 if (!__test_and_clear_bit(0, (unsigned long *)
2120 &anon_vma
->root
->rb_root
.rb_root
.rb_node
))
2122 anon_vma_unlock_write(anon_vma
);
2126 static void vm_unlock_mapping(struct address_space
*mapping
)
2128 if (test_bit(AS_MM_ALL_LOCKS
, &mapping
->flags
)) {
2130 * AS_MM_ALL_LOCKS can't change to 0 from under us
2131 * because we hold the mm_all_locks_mutex.
2133 i_mmap_unlock_write(mapping
);
2134 if (!test_and_clear_bit(AS_MM_ALL_LOCKS
,
2141 * The mmap_lock cannot be released by the caller until
2142 * mm_drop_all_locks() returns.
2144 void mm_drop_all_locks(struct mm_struct
*mm
)
2146 struct vm_area_struct
*vma
;
2147 struct anon_vma_chain
*avc
;
2148 VMA_ITERATOR(vmi
, mm
, 0);
2150 mmap_assert_write_locked(mm
);
2151 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex
));
2153 for_each_vma(vmi
, vma
) {
2155 list_for_each_entry(avc
, &vma
->anon_vma_chain
, same_vma
)
2156 vm_unlock_anon_vma(avc
->anon_vma
);
2157 if (vma
->vm_file
&& vma
->vm_file
->f_mapping
)
2158 vm_unlock_mapping(vma
->vm_file
->f_mapping
);
2161 mutex_unlock(&mm_all_locks_mutex
);
2165 * We account for memory if it's a private writeable mapping,
2166 * not hugepages and VM_NORESERVE wasn't set.
2168 static bool accountable_mapping(struct file
*file
, vm_flags_t vm_flags
)
2171 * hugetlb has its own accounting separate from the core VM
2172 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2174 if (file
&& is_file_hugepages(file
))
2177 return (vm_flags
& (VM_NORESERVE
| VM_SHARED
| VM_WRITE
)) == VM_WRITE
;
2181 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2183 * @vms: The vma unmap structure
2184 * @mas_detach: The maple state with the detached maple tree
2186 * Reattach any detached vmas, free up the maple tree used to track the vmas.
2187 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2188 * have been called), then a NULL is written over the vmas and the vmas are
2189 * removed (munmap() completed).
2191 static void vms_abort_munmap_vmas(struct vma_munmap_struct
*vms
,
2192 struct ma_state
*mas_detach
)
2194 struct ma_state
*mas
= &vms
->vmi
->mas
;
2199 if (vms
->clear_ptes
)
2200 return reattach_vmas(mas_detach
);
2203 * Aborting cannot just call the vm_ops open() because they are often
2204 * not symmetrical and state data has been lost. Resort to the old
2205 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2207 mas_set_range(mas
, vms
->start
, vms
->end
- 1);
2208 mas_store_gfp(mas
, NULL
, GFP_KERNEL
|__GFP_NOFAIL
);
2209 /* Clean up the insertion of the unfortunate gap */
2210 vms_complete_munmap_vmas(vms
, mas_detach
);
2214 * __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
2215 * unmapped once the map operation is completed, check limits, account mapping
2216 * and clean up any pre-existing VMAs.
2218 * @map: Mapping state.
2219 * @uf: Userfaultfd context list.
2221 * Returns: 0 on success, error code otherwise.
2223 static int __mmap_prepare(struct mmap_state
*map
, struct list_head
*uf
)
2226 struct vma_iterator
*vmi
= map
->vmi
;
2227 struct vma_munmap_struct
*vms
= &map
->vms
;
2229 /* Find the first overlapping VMA and initialise unmap state. */
2230 vms
->vma
= vma_find(vmi
, map
->end
);
2231 init_vma_munmap(vms
, vmi
, vms
->vma
, map
->addr
, map
->end
, uf
,
2232 /* unlock = */ false);
2234 /* OK, we have overlapping VMAs - prepare to unmap them. */
2236 mt_init_flags(&map
->mt_detach
,
2237 vmi
->mas
.tree
->ma_flags
& MT_FLAGS_LOCK_MASK
);
2238 mt_on_stack(map
->mt_detach
);
2239 mas_init(&map
->mas_detach
, &map
->mt_detach
, /* addr = */ 0);
2240 /* Prepare to unmap any existing mapping in the area */
2241 error
= vms_gather_munmap_vmas(vms
, &map
->mas_detach
);
2243 /* On error VMAs will already have been reattached. */
2248 map
->next
= vms
->next
;
2249 map
->prev
= vms
->prev
;
2251 map
->next
= vma_iter_next_rewind(vmi
, &map
->prev
);
2254 /* Check against address space limit. */
2255 if (!may_expand_vm(map
->mm
, map
->flags
, map
->pglen
- vms
->nr_pages
))
2258 /* Private writable mapping: check memory availability. */
2259 if (accountable_mapping(map
->file
, map
->flags
)) {
2260 map
->charged
= map
->pglen
;
2261 map
->charged
-= vms
->nr_accounted
;
2263 error
= security_vm_enough_memory_mm(map
->mm
, map
->charged
);
2268 vms
->nr_accounted
= 0;
2269 map
->flags
|= VM_ACCOUNT
;
2273 * Clear PTEs while the vma is still in the tree so that rmap
2274 * cannot race with the freeing later in the truncate scenario.
2275 * This is also needed for mmap_file(), which is why vm_ops
2276 * close function is called.
2278 vms_clean_up_area(vms
, &map
->mas_detach
);
2284 static int __mmap_new_file_vma(struct mmap_state
*map
,
2285 struct vm_area_struct
*vma
)
2287 struct vma_iterator
*vmi
= map
->vmi
;
2290 vma
->vm_file
= get_file(map
->file
);
2291 error
= mmap_file(vma
->vm_file
, vma
);
2294 vma
->vm_file
= NULL
;
2296 vma_iter_set(vmi
, vma
->vm_end
);
2297 /* Undo any partial mapping done by a device driver. */
2298 unmap_region(&vmi
->mas
, vma
, map
->prev
, map
->next
);
2303 /* Drivers cannot alter the address of the VMA. */
2304 WARN_ON_ONCE(map
->addr
!= vma
->vm_start
);
2306 * Drivers should not permit writability when previously it was
2309 VM_WARN_ON_ONCE(map
->flags
!= vma
->vm_flags
&&
2310 !(map
->flags
& VM_MAYWRITE
) &&
2311 (vma
->vm_flags
& VM_MAYWRITE
));
2313 /* If the flags change (and are mergeable), let's retry later. */
2314 map
->retry_merge
= vma
->vm_flags
!= map
->flags
&& !(vma
->vm_flags
& VM_SPECIAL
);
2315 map
->flags
= vma
->vm_flags
;
2321 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2324 * @map: Mapping state.
2325 * @vmap: Output pointer for the new VMA.
2327 * Returns: Zero on success, or an error.
2329 static int __mmap_new_vma(struct mmap_state
*map
, struct vm_area_struct
**vmap
)
2331 struct vma_iterator
*vmi
= map
->vmi
;
2333 struct vm_area_struct
*vma
;
2336 * Determine the object being mapped and call the appropriate
2337 * specific mapper. the address has already been validated, but
2338 * not unmapped, but the maps are removed from the list.
2340 vma
= vm_area_alloc(map
->mm
);
2344 vma_iter_config(vmi
, map
->addr
, map
->end
);
2345 vma_set_range(vma
, map
->addr
, map
->end
, map
->pgoff
);
2346 vm_flags_init(vma
, map
->flags
);
2347 vma
->vm_page_prot
= vm_get_page_prot(map
->flags
);
2349 if (vma_iter_prealloc(vmi
, vma
)) {
2355 error
= __mmap_new_file_vma(map
, vma
);
2356 else if (map
->flags
& VM_SHARED
)
2357 error
= shmem_zero_setup(vma
);
2359 vma_set_anonymous(vma
);
2364 #ifdef CONFIG_SPARC64
2365 /* TODO: Fix SPARC ADI! */
2366 WARN_ON_ONCE(!arch_validate_flags(map
->flags
));
2369 /* Lock the VMA since it is modified after insertion into VMA tree */
2370 vma_start_write(vma
);
2371 vma_iter_store(vmi
, vma
);
2372 map
->mm
->map_count
++;
2376 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2377 * call covers the non-merge case.
2379 khugepaged_enter_vma(vma
, map
->flags
);
2392 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2393 * statistics, handle locking and finalise the VMA.
2395 * @map: Mapping state.
2396 * @vma: Merged or newly allocated VMA for the mmap()'d region.
2398 static void __mmap_complete(struct mmap_state
*map
, struct vm_area_struct
*vma
)
2400 struct mm_struct
*mm
= map
->mm
;
2401 unsigned long vm_flags
= vma
->vm_flags
;
2403 perf_event_mmap(vma
);
2405 /* Unmap any existing mapping in the area. */
2406 vms_complete_munmap_vmas(&map
->vms
, &map
->mas_detach
);
2408 vm_stat_account(mm
, vma
->vm_flags
, map
->pglen
);
2409 if (vm_flags
& VM_LOCKED
) {
2410 if ((vm_flags
& VM_SPECIAL
) || vma_is_dax(vma
) ||
2411 is_vm_hugetlb_page(vma
) ||
2412 vma
== get_gate_vma(mm
))
2413 vm_flags_clear(vma
, VM_LOCKED_MASK
);
2415 mm
->locked_vm
+= map
->pglen
;
2422 * New (or expanded) vma always get soft dirty status.
2423 * Otherwise user-space soft-dirty page tracker won't
2424 * be able to distinguish situation when vma area unmapped,
2425 * then new mapped in-place (which must be aimed as
2426 * a completely new data area).
2428 vm_flags_set(vma
, VM_SOFTDIRTY
);
2430 vma_set_page_prot(vma
);
2433 unsigned long __mmap_region(struct file
*file
, unsigned long addr
,
2434 unsigned long len
, vm_flags_t vm_flags
, unsigned long pgoff
,
2435 struct list_head
*uf
)
2437 struct mm_struct
*mm
= current
->mm
;
2438 struct vm_area_struct
*vma
= NULL
;
2440 VMA_ITERATOR(vmi
, mm
, addr
);
2441 MMAP_STATE(map
, mm
, &vmi
, addr
, len
, pgoff
, vm_flags
, file
);
2443 error
= __mmap_prepare(&map
, uf
);
2447 /* Attempt to merge with adjacent VMAs... */
2448 if (map
.prev
|| map
.next
) {
2449 VMG_MMAP_STATE(vmg
, &map
, /* vma = */ NULL
);
2451 vma
= vma_merge_new_range(&vmg
);
2454 /* ...but if we can't, allocate a new VMA. */
2456 error
= __mmap_new_vma(&map
, &vma
);
2461 /* If flags changed, we might be able to merge, so try again. */
2462 if (map
.retry_merge
) {
2463 VMG_MMAP_STATE(vmg
, &map
, vma
);
2465 vma_iter_config(map
.vmi
, map
.addr
, map
.end
);
2466 vma_merge_existing_range(&vmg
);
2469 __mmap_complete(&map
, vma
);
2473 /* Accounting was done by __mmap_prepare(). */
2476 vm_unacct_memory(map
.charged
);
2478 vms_abort_munmap_vmas(&map
.vms
, &map
.mas_detach
);