2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_rwsem (while writing or truncating, not reading or faulting)
25 * mapping->invalidate_lock (in filemap_fault)
27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
29 * mapping->i_mmap_rwsem
31 * mm->page_table_lock or pte_lock
32 * swap_lock (in swap_duplicate, swap_info_get)
33 * mmlist_lock (in mmput, drain_mmlist and others)
34 * mapping->private_lock (in block_dirty_folio)
35 * i_pages lock (widely used)
36 * lruvec->lru_lock (in folio_lruvec_lock_irq)
37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
39 * sb_lock (within inode_lock in fs/fs-writeback.c)
40 * i_pages lock (widely used, in set_page_dirty,
41 * in arch-dependent flush_dcache_mmap_lock,
42 * within bdi.wb->list_lock in __sync_single_inode)
44 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
48 * hugetlbfs PageHuge() take locks in this order:
49 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50 * vma_lock (hugetlb specific lock for pmd_sharing)
51 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
56 #include <linux/sched/mm.h>
57 #include <linux/sched/task.h>
58 #include <linux/pagemap.h>
59 #include <linux/swap.h>
60 #include <linux/swapops.h>
61 #include <linux/slab.h>
62 #include <linux/init.h>
63 #include <linux/ksm.h>
64 #include <linux/rmap.h>
65 #include <linux/rcupdate.h>
66 #include <linux/export.h>
67 #include <linux/memcontrol.h>
68 #include <linux/mmu_notifier.h>
69 #include <linux/migrate.h>
70 #include <linux/hugetlb.h>
71 #include <linux/huge_mm.h>
72 #include <linux/backing-dev.h>
73 #include <linux/page_idle.h>
74 #include <linux/memremap.h>
75 #include <linux/userfaultfd_k.h>
76 #include <linux/mm_inline.h>
77 #include <linux/oom.h>
79 #include <asm/tlbflush.h>
81 #define CREATE_TRACE_POINTS
82 #include <trace/events/tlb.h>
83 #include <trace/events/migrate.h>
87 static struct kmem_cache
*anon_vma_cachep
;
88 static struct kmem_cache
*anon_vma_chain_cachep
;
90 static inline struct anon_vma
*anon_vma_alloc(void)
92 struct anon_vma
*anon_vma
;
94 anon_vma
= kmem_cache_alloc(anon_vma_cachep
, GFP_KERNEL
);
96 atomic_set(&anon_vma
->refcount
, 1);
97 anon_vma
->num_children
= 0;
98 anon_vma
->num_active_vmas
= 0;
99 anon_vma
->parent
= anon_vma
;
101 * Initialise the anon_vma root to point to itself. If called
102 * from fork, the root will be reset to the parents anon_vma.
104 anon_vma
->root
= anon_vma
;
110 static inline void anon_vma_free(struct anon_vma
*anon_vma
)
112 VM_BUG_ON(atomic_read(&anon_vma
->refcount
));
115 * Synchronize against folio_lock_anon_vma_read() such that
116 * we can safely hold the lock without the anon_vma getting
119 * Relies on the full mb implied by the atomic_dec_and_test() from
120 * put_anon_vma() against the acquire barrier implied by
121 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
123 * folio_lock_anon_vma_read() VS put_anon_vma()
124 * down_read_trylock() atomic_dec_and_test()
126 * atomic_read() rwsem_is_locked()
128 * LOCK should suffice since the actual taking of the lock must
129 * happen _before_ what follows.
132 if (rwsem_is_locked(&anon_vma
->root
->rwsem
)) {
133 anon_vma_lock_write(anon_vma
);
134 anon_vma_unlock_write(anon_vma
);
137 kmem_cache_free(anon_vma_cachep
, anon_vma
);
140 static inline struct anon_vma_chain
*anon_vma_chain_alloc(gfp_t gfp
)
142 return kmem_cache_alloc(anon_vma_chain_cachep
, gfp
);
145 static void anon_vma_chain_free(struct anon_vma_chain
*anon_vma_chain
)
147 kmem_cache_free(anon_vma_chain_cachep
, anon_vma_chain
);
150 static void anon_vma_chain_link(struct vm_area_struct
*vma
,
151 struct anon_vma_chain
*avc
,
152 struct anon_vma
*anon_vma
)
155 avc
->anon_vma
= anon_vma
;
156 list_add(&avc
->same_vma
, &vma
->anon_vma_chain
);
157 anon_vma_interval_tree_insert(avc
, &anon_vma
->rb_root
);
161 * __anon_vma_prepare - attach an anon_vma to a memory region
162 * @vma: the memory region in question
164 * This makes sure the memory mapping described by 'vma' has
165 * an 'anon_vma' attached to it, so that we can associate the
166 * anonymous pages mapped into it with that anon_vma.
168 * The common case will be that we already have one, which
169 * is handled inline by anon_vma_prepare(). But if
170 * not we either need to find an adjacent mapping that we
171 * can re-use the anon_vma from (very common when the only
172 * reason for splitting a vma has been mprotect()), or we
173 * allocate a new one.
175 * Anon-vma allocations are very subtle, because we may have
176 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
177 * and that may actually touch the rwsem even in the newly
178 * allocated vma (it depends on RCU to make sure that the
179 * anon_vma isn't actually destroyed).
181 * As a result, we need to do proper anon_vma locking even
182 * for the new allocation. At the same time, we do not want
183 * to do any locking for the common case of already having
186 int __anon_vma_prepare(struct vm_area_struct
*vma
)
188 struct mm_struct
*mm
= vma
->vm_mm
;
189 struct anon_vma
*anon_vma
, *allocated
;
190 struct anon_vma_chain
*avc
;
192 mmap_assert_locked(mm
);
195 avc
= anon_vma_chain_alloc(GFP_KERNEL
);
199 anon_vma
= find_mergeable_anon_vma(vma
);
202 anon_vma
= anon_vma_alloc();
203 if (unlikely(!anon_vma
))
204 goto out_enomem_free_avc
;
205 anon_vma
->num_children
++; /* self-parent link for new root */
206 allocated
= anon_vma
;
209 anon_vma_lock_write(anon_vma
);
210 /* page_table_lock to protect against threads */
211 spin_lock(&mm
->page_table_lock
);
212 if (likely(!vma
->anon_vma
)) {
213 vma
->anon_vma
= anon_vma
;
214 anon_vma_chain_link(vma
, avc
, anon_vma
);
215 anon_vma
->num_active_vmas
++;
219 spin_unlock(&mm
->page_table_lock
);
220 anon_vma_unlock_write(anon_vma
);
222 if (unlikely(allocated
))
223 put_anon_vma(allocated
);
225 anon_vma_chain_free(avc
);
230 anon_vma_chain_free(avc
);
236 * This is a useful helper function for locking the anon_vma root as
237 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
240 * Such anon_vma's should have the same root, so you'd expect to see
241 * just a single mutex_lock for the whole traversal.
243 static inline struct anon_vma
*lock_anon_vma_root(struct anon_vma
*root
, struct anon_vma
*anon_vma
)
245 struct anon_vma
*new_root
= anon_vma
->root
;
246 if (new_root
!= root
) {
247 if (WARN_ON_ONCE(root
))
248 up_write(&root
->rwsem
);
250 down_write(&root
->rwsem
);
255 static inline void unlock_anon_vma_root(struct anon_vma
*root
)
258 up_write(&root
->rwsem
);
262 * Attach the anon_vmas from src to dst.
263 * Returns 0 on success, -ENOMEM on failure.
265 * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(),
266 * copy_vma() and anon_vma_fork(). The first four want an exact copy of src,
267 * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to
268 * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
269 * call, we can identify this case by checking (!dst->anon_vma &&
272 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
273 * and reuse existing anon_vma which has no vmas and only one child anon_vma.
274 * This prevents degradation of anon_vma hierarchy to endless linear chain in
275 * case of constantly forking task. On the other hand, an anon_vma with more
276 * than one child isn't reused even if there was no alive vma, thus rmap
277 * walker has a good chance of avoiding scanning the whole hierarchy when it
278 * searches where page is mapped.
280 int anon_vma_clone(struct vm_area_struct
*dst
, struct vm_area_struct
*src
)
282 struct anon_vma_chain
*avc
, *pavc
;
283 struct anon_vma
*root
= NULL
;
285 list_for_each_entry_reverse(pavc
, &src
->anon_vma_chain
, same_vma
) {
286 struct anon_vma
*anon_vma
;
288 avc
= anon_vma_chain_alloc(GFP_NOWAIT
| __GFP_NOWARN
);
289 if (unlikely(!avc
)) {
290 unlock_anon_vma_root(root
);
292 avc
= anon_vma_chain_alloc(GFP_KERNEL
);
296 anon_vma
= pavc
->anon_vma
;
297 root
= lock_anon_vma_root(root
, anon_vma
);
298 anon_vma_chain_link(dst
, avc
, anon_vma
);
301 * Reuse existing anon_vma if it has no vma and only one
304 * Root anon_vma is never reused:
305 * it has self-parent reference and at least one child.
307 if (!dst
->anon_vma
&& src
->anon_vma
&&
308 anon_vma
->num_children
< 2 &&
309 anon_vma
->num_active_vmas
== 0)
310 dst
->anon_vma
= anon_vma
;
313 dst
->anon_vma
->num_active_vmas
++;
314 unlock_anon_vma_root(root
);
319 * dst->anon_vma is dropped here otherwise its num_active_vmas can
320 * be incorrectly decremented in unlink_anon_vmas().
321 * We can safely do this because callers of anon_vma_clone() don't care
322 * about dst->anon_vma if anon_vma_clone() failed.
324 dst
->anon_vma
= NULL
;
325 unlink_anon_vmas(dst
);
330 * Attach vma to its own anon_vma, as well as to the anon_vmas that
331 * the corresponding VMA in the parent process is attached to.
332 * Returns 0 on success, non-zero on failure.
334 int anon_vma_fork(struct vm_area_struct
*vma
, struct vm_area_struct
*pvma
)
336 struct anon_vma_chain
*avc
;
337 struct anon_vma
*anon_vma
;
340 /* Don't bother if the parent process has no anon_vma here. */
344 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
345 vma
->anon_vma
= NULL
;
348 * First, attach the new VMA to the parent VMA's anon_vmas,
349 * so rmap can find non-COWed pages in child processes.
351 error
= anon_vma_clone(vma
, pvma
);
355 /* An existing anon_vma has been reused, all done then. */
359 /* Then add our own anon_vma. */
360 anon_vma
= anon_vma_alloc();
363 anon_vma
->num_active_vmas
++;
364 avc
= anon_vma_chain_alloc(GFP_KERNEL
);
366 goto out_error_free_anon_vma
;
369 * The root anon_vma's rwsem is the lock actually used when we
370 * lock any of the anon_vmas in this anon_vma tree.
372 anon_vma
->root
= pvma
->anon_vma
->root
;
373 anon_vma
->parent
= pvma
->anon_vma
;
375 * With refcounts, an anon_vma can stay around longer than the
376 * process it belongs to. The root anon_vma needs to be pinned until
377 * this anon_vma is freed, because the lock lives in the root.
379 get_anon_vma(anon_vma
->root
);
380 /* Mark this anon_vma as the one where our new (COWed) pages go. */
381 vma
->anon_vma
= anon_vma
;
382 anon_vma_lock_write(anon_vma
);
383 anon_vma_chain_link(vma
, avc
, anon_vma
);
384 anon_vma
->parent
->num_children
++;
385 anon_vma_unlock_write(anon_vma
);
389 out_error_free_anon_vma
:
390 put_anon_vma(anon_vma
);
392 unlink_anon_vmas(vma
);
396 void unlink_anon_vmas(struct vm_area_struct
*vma
)
398 struct anon_vma_chain
*avc
, *next
;
399 struct anon_vma
*root
= NULL
;
402 * Unlink each anon_vma chained to the VMA. This list is ordered
403 * from newest to oldest, ensuring the root anon_vma gets freed last.
405 list_for_each_entry_safe(avc
, next
, &vma
->anon_vma_chain
, same_vma
) {
406 struct anon_vma
*anon_vma
= avc
->anon_vma
;
408 root
= lock_anon_vma_root(root
, anon_vma
);
409 anon_vma_interval_tree_remove(avc
, &anon_vma
->rb_root
);
412 * Leave empty anon_vmas on the list - we'll need
413 * to free them outside the lock.
415 if (RB_EMPTY_ROOT(&anon_vma
->rb_root
.rb_root
)) {
416 anon_vma
->parent
->num_children
--;
420 list_del(&avc
->same_vma
);
421 anon_vma_chain_free(avc
);
424 vma
->anon_vma
->num_active_vmas
--;
427 * vma would still be needed after unlink, and anon_vma will be prepared
430 vma
->anon_vma
= NULL
;
432 unlock_anon_vma_root(root
);
435 * Iterate the list once more, it now only contains empty and unlinked
436 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
437 * needing to write-acquire the anon_vma->root->rwsem.
439 list_for_each_entry_safe(avc
, next
, &vma
->anon_vma_chain
, same_vma
) {
440 struct anon_vma
*anon_vma
= avc
->anon_vma
;
442 VM_WARN_ON(anon_vma
->num_children
);
443 VM_WARN_ON(anon_vma
->num_active_vmas
);
444 put_anon_vma(anon_vma
);
446 list_del(&avc
->same_vma
);
447 anon_vma_chain_free(avc
);
451 static void anon_vma_ctor(void *data
)
453 struct anon_vma
*anon_vma
= data
;
455 init_rwsem(&anon_vma
->rwsem
);
456 atomic_set(&anon_vma
->refcount
, 0);
457 anon_vma
->rb_root
= RB_ROOT_CACHED
;
460 void __init
anon_vma_init(void)
462 anon_vma_cachep
= kmem_cache_create("anon_vma", sizeof(struct anon_vma
),
463 0, SLAB_TYPESAFE_BY_RCU
|SLAB_PANIC
|SLAB_ACCOUNT
,
465 anon_vma_chain_cachep
= KMEM_CACHE(anon_vma_chain
,
466 SLAB_PANIC
|SLAB_ACCOUNT
);
470 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
472 * Since there is no serialization what so ever against folio_remove_rmap_*()
473 * the best this function can do is return a refcount increased anon_vma
474 * that might have been relevant to this page.
476 * The page might have been remapped to a different anon_vma or the anon_vma
477 * returned may already be freed (and even reused).
479 * In case it was remapped to a different anon_vma, the new anon_vma will be a
480 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
481 * ensure that any anon_vma obtained from the page will still be valid for as
482 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
484 * All users of this function must be very careful when walking the anon_vma
485 * chain and verify that the page in question is indeed mapped in it
486 * [ something equivalent to page_mapped_in_vma() ].
488 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
489 * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
490 * if there is a mapcount, we can dereference the anon_vma after observing
493 * NOTE: the caller should normally hold folio lock when calling this. If
494 * not, the caller needs to double check the anon_vma didn't change after
495 * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it
496 * concurrently without folio lock protection). See folio_lock_anon_vma_read()
497 * which has already covered that, and comment above remap_pages().
499 struct anon_vma
*folio_get_anon_vma(const struct folio
*folio
)
501 struct anon_vma
*anon_vma
= NULL
;
502 unsigned long anon_mapping
;
505 anon_mapping
= (unsigned long)READ_ONCE(folio
->mapping
);
506 if ((anon_mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
508 if (!folio_mapped(folio
))
511 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
512 if (!atomic_inc_not_zero(&anon_vma
->refcount
)) {
518 * If this folio is still mapped, then its anon_vma cannot have been
519 * freed. But if it has been unmapped, we have no security against the
520 * anon_vma structure being freed and reused (for another anon_vma:
521 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
522 * above cannot corrupt).
524 if (!folio_mapped(folio
)) {
526 put_anon_vma(anon_vma
);
536 * Similar to folio_get_anon_vma() except it locks the anon_vma.
538 * Its a little more complex as it tries to keep the fast path to a single
539 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
540 * reference like with folio_get_anon_vma() and then block on the mutex
541 * on !rwc->try_lock case.
543 struct anon_vma
*folio_lock_anon_vma_read(const struct folio
*folio
,
544 struct rmap_walk_control
*rwc
)
546 struct anon_vma
*anon_vma
= NULL
;
547 struct anon_vma
*root_anon_vma
;
548 unsigned long anon_mapping
;
552 anon_mapping
= (unsigned long)READ_ONCE(folio
->mapping
);
553 if ((anon_mapping
& PAGE_MAPPING_FLAGS
) != PAGE_MAPPING_ANON
)
555 if (!folio_mapped(folio
))
558 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
559 root_anon_vma
= READ_ONCE(anon_vma
->root
);
560 if (down_read_trylock(&root_anon_vma
->rwsem
)) {
562 * folio_move_anon_rmap() might have changed the anon_vma as we
563 * might not hold the folio lock here.
565 if (unlikely((unsigned long)READ_ONCE(folio
->mapping
) !=
567 up_read(&root_anon_vma
->rwsem
);
573 * If the folio is still mapped, then this anon_vma is still
574 * its anon_vma, and holding the mutex ensures that it will
575 * not go away, see anon_vma_free().
577 if (!folio_mapped(folio
)) {
578 up_read(&root_anon_vma
->rwsem
);
584 if (rwc
&& rwc
->try_lock
) {
586 rwc
->contended
= true;
590 /* trylock failed, we got to sleep */
591 if (!atomic_inc_not_zero(&anon_vma
->refcount
)) {
596 if (!folio_mapped(folio
)) {
598 put_anon_vma(anon_vma
);
602 /* we pinned the anon_vma, its safe to sleep */
604 anon_vma_lock_read(anon_vma
);
607 * folio_move_anon_rmap() might have changed the anon_vma as we might
608 * not hold the folio lock here.
610 if (unlikely((unsigned long)READ_ONCE(folio
->mapping
) !=
612 anon_vma_unlock_read(anon_vma
);
613 put_anon_vma(anon_vma
);
618 if (atomic_dec_and_test(&anon_vma
->refcount
)) {
620 * Oops, we held the last refcount, release the lock
621 * and bail -- can't simply use put_anon_vma() because
622 * we'll deadlock on the anon_vma_lock_write() recursion.
624 anon_vma_unlock_read(anon_vma
);
625 __put_anon_vma(anon_vma
);
636 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
638 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
639 * important if a PTE was dirty when it was unmapped that it's flushed
640 * before any IO is initiated on the page to prevent lost writes. Similarly,
641 * it must be flushed before freeing to prevent data leakage.
643 void try_to_unmap_flush(void)
645 struct tlbflush_unmap_batch
*tlb_ubc
= ¤t
->tlb_ubc
;
647 if (!tlb_ubc
->flush_required
)
650 arch_tlbbatch_flush(&tlb_ubc
->arch
);
651 tlb_ubc
->flush_required
= false;
652 tlb_ubc
->writable
= false;
655 /* Flush iff there are potentially writable TLB entries that can race with IO */
656 void try_to_unmap_flush_dirty(void)
658 struct tlbflush_unmap_batch
*tlb_ubc
= ¤t
->tlb_ubc
;
660 if (tlb_ubc
->writable
)
661 try_to_unmap_flush();
665 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
666 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
668 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16
669 #define TLB_FLUSH_BATCH_PENDING_MASK \
670 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
671 #define TLB_FLUSH_BATCH_PENDING_LARGE \
672 (TLB_FLUSH_BATCH_PENDING_MASK / 2)
674 static void set_tlb_ubc_flush_pending(struct mm_struct
*mm
, pte_t pteval
,
677 struct tlbflush_unmap_batch
*tlb_ubc
= ¤t
->tlb_ubc
;
679 bool writable
= pte_dirty(pteval
);
681 if (!pte_accessible(mm
, pteval
))
684 arch_tlbbatch_add_pending(&tlb_ubc
->arch
, mm
, uaddr
);
685 tlb_ubc
->flush_required
= true;
688 * Ensure compiler does not re-order the setting of tlb_flush_batched
689 * before the PTE is cleared.
692 batch
= atomic_read(&mm
->tlb_flush_batched
);
694 if ((batch
& TLB_FLUSH_BATCH_PENDING_MASK
) > TLB_FLUSH_BATCH_PENDING_LARGE
) {
696 * Prevent `pending' from catching up with `flushed' because of
697 * overflow. Reset `pending' and `flushed' to be 1 and 0 if
698 * `pending' becomes large.
700 if (!atomic_try_cmpxchg(&mm
->tlb_flush_batched
, &batch
, 1))
703 atomic_inc(&mm
->tlb_flush_batched
);
707 * If the PTE was dirty then it's best to assume it's writable. The
708 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
709 * before the page is queued for IO.
712 tlb_ubc
->writable
= true;
716 * Returns true if the TLB flush should be deferred to the end of a batch of
717 * unmap operations to reduce IPIs.
719 static bool should_defer_flush(struct mm_struct
*mm
, enum ttu_flags flags
)
721 if (!(flags
& TTU_BATCH_FLUSH
))
724 return arch_tlbbatch_should_defer(mm
);
728 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
729 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
730 * operation such as mprotect or munmap to race between reclaim unmapping
731 * the page and flushing the page. If this race occurs, it potentially allows
732 * access to data via a stale TLB entry. Tracking all mm's that have TLB
733 * batching in flight would be expensive during reclaim so instead track
734 * whether TLB batching occurred in the past and if so then do a flush here
735 * if required. This will cost one additional flush per reclaim cycle paid
736 * by the first operation at risk such as mprotect and mumap.
738 * This must be called under the PTL so that an access to tlb_flush_batched
739 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
742 void flush_tlb_batched_pending(struct mm_struct
*mm
)
744 int batch
= atomic_read(&mm
->tlb_flush_batched
);
745 int pending
= batch
& TLB_FLUSH_BATCH_PENDING_MASK
;
746 int flushed
= batch
>> TLB_FLUSH_BATCH_FLUSHED_SHIFT
;
748 if (pending
!= flushed
) {
749 arch_flush_tlb_batched_pending(mm
);
751 * If the new TLB flushing is pending during flushing, leave
752 * mm->tlb_flush_batched as is, to avoid losing flushing.
754 atomic_cmpxchg(&mm
->tlb_flush_batched
, batch
,
755 pending
| (pending
<< TLB_FLUSH_BATCH_FLUSHED_SHIFT
));
759 static void set_tlb_ubc_flush_pending(struct mm_struct
*mm
, pte_t pteval
,
764 static bool should_defer_flush(struct mm_struct
*mm
, enum ttu_flags flags
)
768 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
771 * page_address_in_vma - The virtual address of a page in this VMA.
772 * @folio: The folio containing the page.
773 * @page: The page within the folio.
774 * @vma: The VMA we need to know the address in.
776 * Calculates the user virtual address of this page in the specified VMA.
777 * It is the caller's responsibililty to check the page is actually
778 * within the VMA. There may not currently be a PTE pointing at this
779 * page, but if a page fault occurs at this address, this is the page
780 * which will be accessed.
782 * Context: Caller should hold a reference to the folio. Caller should
783 * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
784 * VMA from being altered.
786 * Return: The virtual address corresponding to this page in the VMA.
788 unsigned long page_address_in_vma(const struct folio
*folio
,
789 const struct page
*page
, const struct vm_area_struct
*vma
)
791 if (folio_test_anon(folio
)) {
792 struct anon_vma
*page__anon_vma
= folio_anon_vma(folio
);
794 * Note: swapoff's unuse_vma() is more efficient with this
795 * check, and needs it to match anon_vma when KSM is active.
797 if (!vma
->anon_vma
|| !page__anon_vma
||
798 vma
->anon_vma
->root
!= page__anon_vma
->root
)
800 } else if (!vma
->vm_file
) {
802 } else if (vma
->vm_file
->f_mapping
!= folio
->mapping
) {
806 /* KSM folios don't reach here because of the !page__anon_vma check */
807 return vma_address(vma
, page_pgoff(folio
, page
), 1);
811 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
812 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t*
815 pmd_t
*mm_find_pmd(struct mm_struct
*mm
, unsigned long address
)
822 pgd
= pgd_offset(mm
, address
);
823 if (!pgd_present(*pgd
))
826 p4d
= p4d_offset(pgd
, address
);
827 if (!p4d_present(*p4d
))
830 pud
= pud_offset(p4d
, address
);
831 if (!pud_present(*pud
))
834 pmd
= pmd_offset(pud
, address
);
839 struct folio_referenced_arg
{
842 unsigned long vm_flags
;
843 struct mem_cgroup
*memcg
;
847 * arg: folio_referenced_arg will be passed
849 static bool folio_referenced_one(struct folio
*folio
,
850 struct vm_area_struct
*vma
, unsigned long address
, void *arg
)
852 struct folio_referenced_arg
*pra
= arg
;
853 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
855 unsigned long start
= address
, ptes
= 0;
857 while (page_vma_mapped_walk(&pvmw
)) {
858 address
= pvmw
.address
;
860 if (vma
->vm_flags
& VM_LOCKED
) {
861 if (!folio_test_large(folio
) || !pvmw
.pte
) {
862 /* Restore the mlock which got missed */
863 mlock_vma_folio(folio
, vma
);
864 page_vma_mapped_walk_done(&pvmw
);
865 pra
->vm_flags
|= VM_LOCKED
;
866 return false; /* To break the loop */
869 * For large folio fully mapped to VMA, will
870 * be handled after the pvmw loop.
872 * For large folio cross VMA boundaries, it's
873 * expected to be picked by page reclaim. But
874 * should skip reference of pages which are in
875 * the range of VM_LOCKED vma. As page reclaim
876 * should just count the reference of pages out
877 * the range of VM_LOCKED vma.
885 * Skip the non-shared swapbacked folio mapped solely by
886 * the exiting or OOM-reaped process. This avoids redundant
887 * swap-out followed by an immediate unmap.
889 if ((!atomic_read(&vma
->vm_mm
->mm_users
) ||
890 check_stable_address_space(vma
->vm_mm
)) &&
891 folio_test_anon(folio
) && folio_test_swapbacked(folio
) &&
892 !folio_likely_mapped_shared(folio
)) {
893 pra
->referenced
= -1;
894 page_vma_mapped_walk_done(&pvmw
);
898 if (lru_gen_enabled() && pvmw
.pte
) {
899 if (lru_gen_look_around(&pvmw
))
901 } else if (pvmw
.pte
) {
902 if (ptep_clear_flush_young_notify(vma
, address
,
905 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
)) {
906 if (pmdp_clear_flush_young_notify(vma
, address
,
910 /* unexpected pmd-mapped folio? */
917 if ((vma
->vm_flags
& VM_LOCKED
) &&
918 folio_test_large(folio
) &&
919 folio_within_vma(folio
, vma
)) {
920 unsigned long s_align
, e_align
;
922 s_align
= ALIGN_DOWN(start
, PMD_SIZE
);
923 e_align
= ALIGN_DOWN(start
+ folio_size(folio
) - 1, PMD_SIZE
);
925 /* folio doesn't cross page table boundary and fully mapped */
926 if ((s_align
== e_align
) && (ptes
== folio_nr_pages(folio
))) {
927 /* Restore the mlock which got missed */
928 mlock_vma_folio(folio
, vma
);
929 pra
->vm_flags
|= VM_LOCKED
;
930 return false; /* To break the loop */
935 folio_clear_idle(folio
);
936 if (folio_test_clear_young(folio
))
941 pra
->vm_flags
|= vma
->vm_flags
& ~VM_LOCKED
;
945 return false; /* To break the loop */
950 static bool invalid_folio_referenced_vma(struct vm_area_struct
*vma
, void *arg
)
952 struct folio_referenced_arg
*pra
= arg
;
953 struct mem_cgroup
*memcg
= pra
->memcg
;
956 * Ignore references from this mapping if it has no recency. If the
957 * folio has been used in another mapping, we will catch it; if this
958 * other mapping is already gone, the unmap path will have set the
959 * referenced flag or activated the folio in zap_pte_range().
961 if (!vma_has_recency(vma
))
965 * If we are reclaiming on behalf of a cgroup, skip counting on behalf
966 * of references from different cgroups.
968 if (memcg
&& !mm_match_cgroup(vma
->vm_mm
, memcg
))
975 * folio_referenced() - Test if the folio was referenced.
976 * @folio: The folio to test.
977 * @is_locked: Caller holds lock on the folio.
978 * @memcg: target memory cgroup
979 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
981 * Quick test_and_clear_referenced for all mappings of a folio,
983 * Return: The number of mappings which referenced the folio. Return -1 if
984 * the function bailed out due to rmap lock contention.
986 int folio_referenced(struct folio
*folio
, int is_locked
,
987 struct mem_cgroup
*memcg
, unsigned long *vm_flags
)
989 bool we_locked
= false;
990 struct folio_referenced_arg pra
= {
991 .mapcount
= folio_mapcount(folio
),
994 struct rmap_walk_control rwc
= {
995 .rmap_one
= folio_referenced_one
,
997 .anon_lock
= folio_lock_anon_vma_read
,
999 .invalid_vma
= invalid_folio_referenced_vma
,
1006 if (!folio_raw_mapping(folio
))
1009 if (!is_locked
&& (!folio_test_anon(folio
) || folio_test_ksm(folio
))) {
1010 we_locked
= folio_trylock(folio
);
1015 rmap_walk(folio
, &rwc
);
1016 *vm_flags
= pra
.vm_flags
;
1019 folio_unlock(folio
);
1021 return rwc
.contended
? -1 : pra
.referenced
;
1024 static int page_vma_mkclean_one(struct page_vma_mapped_walk
*pvmw
)
1027 struct vm_area_struct
*vma
= pvmw
->vma
;
1028 struct mmu_notifier_range range
;
1029 unsigned long address
= pvmw
->address
;
1032 * We have to assume the worse case ie pmd for invalidation. Note that
1033 * the folio can not be freed from this function.
1035 mmu_notifier_range_init(&range
, MMU_NOTIFY_PROTECTION_PAGE
, 0,
1036 vma
->vm_mm
, address
, vma_address_end(pvmw
));
1037 mmu_notifier_invalidate_range_start(&range
);
1039 while (page_vma_mapped_walk(pvmw
)) {
1042 address
= pvmw
->address
;
1044 pte_t
*pte
= pvmw
->pte
;
1045 pte_t entry
= ptep_get(pte
);
1047 if (!pte_dirty(entry
) && !pte_write(entry
))
1050 flush_cache_page(vma
, address
, pte_pfn(entry
));
1051 entry
= ptep_clear_flush(vma
, address
, pte
);
1052 entry
= pte_wrprotect(entry
);
1053 entry
= pte_mkclean(entry
);
1054 set_pte_at(vma
->vm_mm
, address
, pte
, entry
);
1057 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1058 pmd_t
*pmd
= pvmw
->pmd
;
1061 if (!pmd_dirty(*pmd
) && !pmd_write(*pmd
))
1064 flush_cache_range(vma
, address
,
1065 address
+ HPAGE_PMD_SIZE
);
1066 entry
= pmdp_invalidate(vma
, address
, pmd
);
1067 entry
= pmd_wrprotect(entry
);
1068 entry
= pmd_mkclean(entry
);
1069 set_pmd_at(vma
->vm_mm
, address
, pmd
, entry
);
1072 /* unexpected pmd-mapped folio? */
1081 mmu_notifier_invalidate_range_end(&range
);
1086 static bool page_mkclean_one(struct folio
*folio
, struct vm_area_struct
*vma
,
1087 unsigned long address
, void *arg
)
1089 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, PVMW_SYNC
);
1092 *cleaned
+= page_vma_mkclean_one(&pvmw
);
1097 static bool invalid_mkclean_vma(struct vm_area_struct
*vma
, void *arg
)
1099 if (vma
->vm_flags
& VM_SHARED
)
1105 int folio_mkclean(struct folio
*folio
)
1108 struct address_space
*mapping
;
1109 struct rmap_walk_control rwc
= {
1110 .arg
= (void *)&cleaned
,
1111 .rmap_one
= page_mkclean_one
,
1112 .invalid_vma
= invalid_mkclean_vma
,
1115 BUG_ON(!folio_test_locked(folio
));
1117 if (!folio_mapped(folio
))
1120 mapping
= folio_mapping(folio
);
1124 rmap_walk(folio
, &rwc
);
1128 EXPORT_SYMBOL_GPL(folio_mkclean
);
1131 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1132 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1133 * within the @vma of shared mappings. And since clean PTEs
1134 * should also be readonly, write protects them too.
1136 * @nr_pages: number of physically contiguous pages srarting with @pfn.
1137 * @pgoff: page offset that the @pfn mapped with.
1138 * @vma: vma that @pfn mapped within.
1140 * Returns the number of cleaned PTEs (including PMDs).
1142 int pfn_mkclean_range(unsigned long pfn
, unsigned long nr_pages
, pgoff_t pgoff
,
1143 struct vm_area_struct
*vma
)
1145 struct page_vma_mapped_walk pvmw
= {
1147 .nr_pages
= nr_pages
,
1153 if (invalid_mkclean_vma(vma
, NULL
))
1156 pvmw
.address
= vma_address(vma
, pgoff
, nr_pages
);
1157 VM_BUG_ON_VMA(pvmw
.address
== -EFAULT
, vma
);
1159 return page_vma_mkclean_one(&pvmw
);
1162 static __always_inline
unsigned int __folio_add_rmap(struct folio
*folio
,
1163 struct page
*page
, int nr_pages
, enum rmap_level level
,
1166 atomic_t
*mapped
= &folio
->_nr_pages_mapped
;
1167 const int orig_nr_pages
= nr_pages
;
1168 int first
= 0, nr
= 0;
1170 __folio_rmap_sanity_checks(folio
, page
, nr_pages
, level
);
1173 case RMAP_LEVEL_PTE
:
1174 if (!folio_test_large(folio
)) {
1175 nr
= atomic_inc_and_test(&folio
->_mapcount
);
1180 first
+= atomic_inc_and_test(&page
->_mapcount
);
1181 } while (page
++, --nr_pages
> 0);
1184 atomic_add_return_relaxed(first
, mapped
) < ENTIRELY_MAPPED
)
1187 atomic_add(orig_nr_pages
, &folio
->_large_mapcount
);
1189 case RMAP_LEVEL_PMD
:
1190 first
= atomic_inc_and_test(&folio
->_entire_mapcount
);
1192 nr
= atomic_add_return_relaxed(ENTIRELY_MAPPED
, mapped
);
1193 if (likely(nr
< ENTIRELY_MAPPED
+ ENTIRELY_MAPPED
)) {
1194 *nr_pmdmapped
= folio_nr_pages(folio
);
1195 nr
= *nr_pmdmapped
- (nr
& FOLIO_PAGES_MAPPED
);
1196 /* Raced ahead of a remove and another add? */
1197 if (unlikely(nr
< 0))
1200 /* Raced ahead of a remove of ENTIRELY_MAPPED */
1204 atomic_inc(&folio
->_large_mapcount
);
1211 * folio_move_anon_rmap - move a folio to our anon_vma
1212 * @folio: The folio to move to our anon_vma
1213 * @vma: The vma the folio belongs to
1215 * When a folio belongs exclusively to one process after a COW event,
1216 * that folio can be moved into the anon_vma that belongs to just that
1217 * process, so the rmap code will not search the parent or sibling processes.
1219 void folio_move_anon_rmap(struct folio
*folio
, struct vm_area_struct
*vma
)
1221 void *anon_vma
= vma
->anon_vma
;
1223 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
1224 VM_BUG_ON_VMA(!anon_vma
, vma
);
1226 anon_vma
+= PAGE_MAPPING_ANON
;
1228 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1229 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1230 * folio_test_anon()) will not see one without the other.
1232 WRITE_ONCE(folio
->mapping
, anon_vma
);
1236 * __folio_set_anon - set up a new anonymous rmap for a folio
1237 * @folio: The folio to set up the new anonymous rmap for.
1238 * @vma: VM area to add the folio to.
1239 * @address: User virtual address of the mapping
1240 * @exclusive: Whether the folio is exclusive to the process.
1242 static void __folio_set_anon(struct folio
*folio
, struct vm_area_struct
*vma
,
1243 unsigned long address
, bool exclusive
)
1245 struct anon_vma
*anon_vma
= vma
->anon_vma
;
1250 * If the folio isn't exclusive to this vma, we must use the _oldest_
1251 * possible anon_vma for the folio mapping!
1254 anon_vma
= anon_vma
->root
;
1257 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1258 * Make sure the compiler doesn't split the stores of anon_vma and
1259 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1260 * could mistake the mapping for a struct address_space and crash.
1262 anon_vma
= (void *) anon_vma
+ PAGE_MAPPING_ANON
;
1263 WRITE_ONCE(folio
->mapping
, (struct address_space
*) anon_vma
);
1264 folio
->index
= linear_page_index(vma
, address
);
1268 * __page_check_anon_rmap - sanity check anonymous rmap addition
1269 * @folio: The folio containing @page.
1270 * @page: the page to check the mapping of
1271 * @vma: the vm area in which the mapping is added
1272 * @address: the user virtual address mapped
1274 static void __page_check_anon_rmap(const struct folio
*folio
,
1275 const struct page
*page
, struct vm_area_struct
*vma
,
1276 unsigned long address
)
1279 * The page's anon-rmap details (mapping and index) are guaranteed to
1280 * be set up correctly at this point.
1282 * We have exclusion against folio_add_anon_rmap_*() because the caller
1283 * always holds the page locked.
1285 * We have exclusion against folio_add_new_anon_rmap because those pages
1286 * are initially only visible via the pagetables, and the pte is locked
1287 * over the call to folio_add_new_anon_rmap.
1289 VM_BUG_ON_FOLIO(folio_anon_vma(folio
)->root
!= vma
->anon_vma
->root
,
1291 VM_BUG_ON_PAGE(page_pgoff(folio
, page
) != linear_page_index(vma
, address
),
1295 static void __folio_mod_stat(struct folio
*folio
, int nr
, int nr_pmdmapped
)
1300 idx
= folio_test_anon(folio
) ? NR_ANON_MAPPED
: NR_FILE_MAPPED
;
1301 __lruvec_stat_mod_folio(folio
, idx
, nr
);
1304 if (folio_test_anon(folio
)) {
1306 __lruvec_stat_mod_folio(folio
, idx
, nr_pmdmapped
);
1308 /* NR_*_PMDMAPPED are not maintained per-memcg */
1309 idx
= folio_test_swapbacked(folio
) ?
1310 NR_SHMEM_PMDMAPPED
: NR_FILE_PMDMAPPED
;
1311 __mod_node_page_state(folio_pgdat(folio
), idx
,
1317 static __always_inline
void __folio_add_anon_rmap(struct folio
*folio
,
1318 struct page
*page
, int nr_pages
, struct vm_area_struct
*vma
,
1319 unsigned long address
, rmap_t flags
, enum rmap_level level
)
1321 int i
, nr
, nr_pmdmapped
= 0;
1323 VM_WARN_ON_FOLIO(!folio_test_anon(folio
), folio
);
1325 nr
= __folio_add_rmap(folio
, page
, nr_pages
, level
, &nr_pmdmapped
);
1327 if (likely(!folio_test_ksm(folio
)))
1328 __page_check_anon_rmap(folio
, page
, vma
, address
);
1330 __folio_mod_stat(folio
, nr
, nr_pmdmapped
);
1332 if (flags
& RMAP_EXCLUSIVE
) {
1334 case RMAP_LEVEL_PTE
:
1335 for (i
= 0; i
< nr_pages
; i
++)
1336 SetPageAnonExclusive(page
+ i
);
1338 case RMAP_LEVEL_PMD
:
1339 SetPageAnonExclusive(page
);
1343 for (i
= 0; i
< nr_pages
; i
++) {
1344 struct page
*cur_page
= page
+ i
;
1346 /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
1347 VM_WARN_ON_FOLIO((atomic_read(&cur_page
->_mapcount
) > 0 ||
1348 (folio_test_large(folio
) &&
1349 folio_entire_mapcount(folio
) > 1)) &&
1350 PageAnonExclusive(cur_page
), folio
);
1354 * For large folio, only mlock it if it's fully mapped to VMA. It's
1355 * not easy to check whether the large folio is fully mapped to VMA
1356 * here. Only mlock normal 4K folio and leave page reclaim to handle
1359 if (!folio_test_large(folio
))
1360 mlock_vma_folio(folio
, vma
);
1364 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
1365 * @folio: The folio to add the mappings to
1366 * @page: The first page to add
1367 * @nr_pages: The number of pages which will be mapped
1368 * @vma: The vm area in which the mappings are added
1369 * @address: The user virtual address of the first page to map
1370 * @flags: The rmap flags
1372 * The page range of folio is defined by [first_page, first_page + nr_pages)
1374 * The caller needs to hold the page table lock, and the page must be locked in
1375 * the anon_vma case: to serialize mapping,index checking after setting,
1376 * and to ensure that an anon folio is not being upgraded racily to a KSM folio
1377 * (but KSM folios are never downgraded).
1379 void folio_add_anon_rmap_ptes(struct folio
*folio
, struct page
*page
,
1380 int nr_pages
, struct vm_area_struct
*vma
, unsigned long address
,
1383 __folio_add_anon_rmap(folio
, page
, nr_pages
, vma
, address
, flags
,
1388 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
1389 * @folio: The folio to add the mapping to
1390 * @page: The first page to add
1391 * @vma: The vm area in which the mapping is added
1392 * @address: The user virtual address of the first page to map
1393 * @flags: The rmap flags
1395 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
1397 * The caller needs to hold the page table lock, and the page must be locked in
1398 * the anon_vma case: to serialize mapping,index checking after setting.
1400 void folio_add_anon_rmap_pmd(struct folio
*folio
, struct page
*page
,
1401 struct vm_area_struct
*vma
, unsigned long address
, rmap_t flags
)
1403 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1404 __folio_add_anon_rmap(folio
, page
, HPAGE_PMD_NR
, vma
, address
, flags
,
1412 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1413 * @folio: The folio to add the mapping to.
1414 * @vma: the vm area in which the mapping is added
1415 * @address: the user virtual address mapped
1416 * @flags: The rmap flags
1418 * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
1419 * This means the inc-and-test can be bypassed.
1420 * The folio doesn't necessarily need to be locked while it's exclusive
1421 * unless two threads map it concurrently. However, the folio must be
1422 * locked if it's shared.
1424 * If the folio is pmd-mappable, it is accounted as a THP.
1426 void folio_add_new_anon_rmap(struct folio
*folio
, struct vm_area_struct
*vma
,
1427 unsigned long address
, rmap_t flags
)
1429 const int nr
= folio_nr_pages(folio
);
1430 const bool exclusive
= flags
& RMAP_EXCLUSIVE
;
1431 int nr_pmdmapped
= 0;
1433 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio
), folio
);
1434 VM_WARN_ON_FOLIO(!exclusive
&& !folio_test_locked(folio
), folio
);
1435 VM_BUG_ON_VMA(address
< vma
->vm_start
||
1436 address
+ (nr
<< PAGE_SHIFT
) > vma
->vm_end
, vma
);
1439 * VM_DROPPABLE mappings don't swap; instead they're just dropped when
1440 * under memory pressure.
1442 if (!folio_test_swapbacked(folio
) && !(vma
->vm_flags
& VM_DROPPABLE
))
1443 __folio_set_swapbacked(folio
);
1444 __folio_set_anon(folio
, vma
, address
, exclusive
);
1446 if (likely(!folio_test_large(folio
))) {
1447 /* increment count (starts at -1) */
1448 atomic_set(&folio
->_mapcount
, 0);
1450 SetPageAnonExclusive(&folio
->page
);
1451 } else if (!folio_test_pmd_mappable(folio
)) {
1454 for (i
= 0; i
< nr
; i
++) {
1455 struct page
*page
= folio_page(folio
, i
);
1457 /* increment count (starts at -1) */
1458 atomic_set(&page
->_mapcount
, 0);
1460 SetPageAnonExclusive(page
);
1463 /* increment count (starts at -1) */
1464 atomic_set(&folio
->_large_mapcount
, nr
- 1);
1465 atomic_set(&folio
->_nr_pages_mapped
, nr
);
1467 /* increment count (starts at -1) */
1468 atomic_set(&folio
->_entire_mapcount
, 0);
1469 /* increment count (starts at -1) */
1470 atomic_set(&folio
->_large_mapcount
, 0);
1471 atomic_set(&folio
->_nr_pages_mapped
, ENTIRELY_MAPPED
);
1473 SetPageAnonExclusive(&folio
->page
);
1477 __folio_mod_stat(folio
, nr
, nr_pmdmapped
);
1478 mod_mthp_stat(folio_order(folio
), MTHP_STAT_NR_ANON
, 1);
1481 static __always_inline
void __folio_add_file_rmap(struct folio
*folio
,
1482 struct page
*page
, int nr_pages
, struct vm_area_struct
*vma
,
1483 enum rmap_level level
)
1485 int nr
, nr_pmdmapped
= 0;
1487 VM_WARN_ON_FOLIO(folio_test_anon(folio
), folio
);
1489 nr
= __folio_add_rmap(folio
, page
, nr_pages
, level
, &nr_pmdmapped
);
1490 __folio_mod_stat(folio
, nr
, nr_pmdmapped
);
1492 /* See comments in folio_add_anon_rmap_*() */
1493 if (!folio_test_large(folio
))
1494 mlock_vma_folio(folio
, vma
);
1498 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
1499 * @folio: The folio to add the mappings to
1500 * @page: The first page to add
1501 * @nr_pages: The number of pages that will be mapped using PTEs
1502 * @vma: The vm area in which the mappings are added
1504 * The page range of the folio is defined by [page, page + nr_pages)
1506 * The caller needs to hold the page table lock.
1508 void folio_add_file_rmap_ptes(struct folio
*folio
, struct page
*page
,
1509 int nr_pages
, struct vm_area_struct
*vma
)
1511 __folio_add_file_rmap(folio
, page
, nr_pages
, vma
, RMAP_LEVEL_PTE
);
1515 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
1516 * @folio: The folio to add the mapping to
1517 * @page: The first page to add
1518 * @vma: The vm area in which the mapping is added
1520 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1522 * The caller needs to hold the page table lock.
1524 void folio_add_file_rmap_pmd(struct folio
*folio
, struct page
*page
,
1525 struct vm_area_struct
*vma
)
1527 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1528 __folio_add_file_rmap(folio
, page
, HPAGE_PMD_NR
, vma
, RMAP_LEVEL_PMD
);
1534 static __always_inline
void __folio_remove_rmap(struct folio
*folio
,
1535 struct page
*page
, int nr_pages
, struct vm_area_struct
*vma
,
1536 enum rmap_level level
)
1538 atomic_t
*mapped
= &folio
->_nr_pages_mapped
;
1539 int last
= 0, nr
= 0, nr_pmdmapped
= 0;
1540 bool partially_mapped
= false;
1542 __folio_rmap_sanity_checks(folio
, page
, nr_pages
, level
);
1545 case RMAP_LEVEL_PTE
:
1546 if (!folio_test_large(folio
)) {
1547 nr
= atomic_add_negative(-1, &folio
->_mapcount
);
1551 atomic_sub(nr_pages
, &folio
->_large_mapcount
);
1553 last
+= atomic_add_negative(-1, &page
->_mapcount
);
1554 } while (page
++, --nr_pages
> 0);
1557 atomic_sub_return_relaxed(last
, mapped
) < ENTIRELY_MAPPED
)
1560 partially_mapped
= nr
&& atomic_read(mapped
);
1562 case RMAP_LEVEL_PMD
:
1563 atomic_dec(&folio
->_large_mapcount
);
1564 last
= atomic_add_negative(-1, &folio
->_entire_mapcount
);
1566 nr
= atomic_sub_return_relaxed(ENTIRELY_MAPPED
, mapped
);
1567 if (likely(nr
< ENTIRELY_MAPPED
)) {
1568 nr_pmdmapped
= folio_nr_pages(folio
);
1569 nr
= nr_pmdmapped
- (nr
& FOLIO_PAGES_MAPPED
);
1570 /* Raced ahead of another remove and an add? */
1571 if (unlikely(nr
< 0))
1574 /* An add of ENTIRELY_MAPPED raced ahead */
1579 partially_mapped
= nr
&& nr
< nr_pmdmapped
;
1584 * Queue anon large folio for deferred split if at least one page of
1585 * the folio is unmapped and at least one page is still mapped.
1587 * Check partially_mapped first to ensure it is a large folio.
1589 if (partially_mapped
&& folio_test_anon(folio
) &&
1590 !folio_test_partially_mapped(folio
))
1591 deferred_split_folio(folio
, true);
1593 __folio_mod_stat(folio
, -nr
, -nr_pmdmapped
);
1596 * It would be tidy to reset folio_test_anon mapping when fully
1597 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
1598 * which increments mapcount after us but sets mapping before us:
1599 * so leave the reset to free_pages_prepare, and remember that
1600 * it's only reliable while mapped.
1603 munlock_vma_folio(folio
, vma
);
1607 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
1608 * @folio: The folio to remove the mappings from
1609 * @page: The first page to remove
1610 * @nr_pages: The number of pages that will be removed from the mapping
1611 * @vma: The vm area from which the mappings are removed
1613 * The page range of the folio is defined by [page, page + nr_pages)
1615 * The caller needs to hold the page table lock.
1617 void folio_remove_rmap_ptes(struct folio
*folio
, struct page
*page
,
1618 int nr_pages
, struct vm_area_struct
*vma
)
1620 __folio_remove_rmap(folio
, page
, nr_pages
, vma
, RMAP_LEVEL_PTE
);
1624 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
1625 * @folio: The folio to remove the mapping from
1626 * @page: The first page to remove
1627 * @vma: The vm area from which the mapping is removed
1629 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1631 * The caller needs to hold the page table lock.
1633 void folio_remove_rmap_pmd(struct folio
*folio
, struct page
*page
,
1634 struct vm_area_struct
*vma
)
1636 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1637 __folio_remove_rmap(folio
, page
, HPAGE_PMD_NR
, vma
, RMAP_LEVEL_PMD
);
1644 * @arg: enum ttu_flags will be passed to this argument
1646 static bool try_to_unmap_one(struct folio
*folio
, struct vm_area_struct
*vma
,
1647 unsigned long address
, void *arg
)
1649 struct mm_struct
*mm
= vma
->vm_mm
;
1650 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
1652 struct page
*subpage
;
1653 bool anon_exclusive
, ret
= true;
1654 struct mmu_notifier_range range
;
1655 enum ttu_flags flags
= (enum ttu_flags
)(long)arg
;
1657 unsigned long hsz
= 0;
1660 * When racing against e.g. zap_pte_range() on another cpu,
1661 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
1662 * try_to_unmap() may return before page_mapped() has become false,
1663 * if page table locking is skipped: use TTU_SYNC to wait for that.
1665 if (flags
& TTU_SYNC
)
1666 pvmw
.flags
= PVMW_SYNC
;
1669 * For THP, we have to assume the worse case ie pmd for invalidation.
1670 * For hugetlb, it could be much worse if we need to do pud
1671 * invalidation in the case of pmd sharing.
1673 * Note that the folio can not be freed in this function as call of
1674 * try_to_unmap() must hold a reference on the folio.
1676 range
.end
= vma_address_end(&pvmw
);
1677 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
1678 address
, range
.end
);
1679 if (folio_test_hugetlb(folio
)) {
1681 * If sharing is possible, start and end will be adjusted
1684 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
,
1687 /* We need the huge page size for set_huge_pte_at() */
1688 hsz
= huge_page_size(hstate_vma(vma
));
1690 mmu_notifier_invalidate_range_start(&range
);
1692 while (page_vma_mapped_walk(&pvmw
)) {
1694 * If the folio is in an mlock()d vma, we must not swap it out.
1696 if (!(flags
& TTU_IGNORE_MLOCK
) &&
1697 (vma
->vm_flags
& VM_LOCKED
)) {
1698 /* Restore the mlock which got missed */
1699 if (!folio_test_large(folio
))
1700 mlock_vma_folio(folio
, vma
);
1705 if (unmap_huge_pmd_locked(vma
, pvmw
.address
, pvmw
.pmd
,
1709 if (flags
& TTU_SPLIT_HUGE_PMD
) {
1711 * We temporarily have to drop the PTL and
1712 * restart so we can process the PTE-mapped THP.
1714 split_huge_pmd_locked(vma
, pvmw
.address
,
1715 pvmw
.pmd
, false, folio
);
1716 flags
&= ~TTU_SPLIT_HUGE_PMD
;
1717 page_vma_mapped_walk_restart(&pvmw
);
1722 /* Unexpected PMD-mapped THP? */
1723 VM_BUG_ON_FOLIO(!pvmw
.pte
, folio
);
1725 pfn
= pte_pfn(ptep_get(pvmw
.pte
));
1726 subpage
= folio_page(folio
, pfn
- folio_pfn(folio
));
1727 address
= pvmw
.address
;
1728 anon_exclusive
= folio_test_anon(folio
) &&
1729 PageAnonExclusive(subpage
);
1731 if (folio_test_hugetlb(folio
)) {
1732 bool anon
= folio_test_anon(folio
);
1735 * The try_to_unmap() is only passed a hugetlb page
1736 * in the case where the hugetlb page is poisoned.
1738 VM_BUG_ON_PAGE(!PageHWPoison(subpage
), subpage
);
1740 * huge_pmd_unshare may unmap an entire PMD page.
1741 * There is no way of knowing exactly which PMDs may
1742 * be cached for this mm, so we must flush them all.
1743 * start/end were already adjusted above to cover this
1746 flush_cache_range(vma
, range
.start
, range
.end
);
1749 * To call huge_pmd_unshare, i_mmap_rwsem must be
1750 * held in write mode. Caller needs to explicitly
1751 * do this outside rmap routines.
1753 * We also must hold hugetlb vma_lock in write mode.
1754 * Lock order dictates acquiring vma_lock BEFORE
1755 * i_mmap_rwsem. We can only try lock here and fail
1759 VM_BUG_ON(!(flags
& TTU_RMAP_LOCKED
));
1760 if (!hugetlb_vma_trylock_write(vma
))
1762 if (huge_pmd_unshare(mm
, vma
, address
, pvmw
.pte
)) {
1763 hugetlb_vma_unlock_write(vma
);
1764 flush_tlb_range(vma
,
1765 range
.start
, range
.end
);
1767 * The ref count of the PMD page was
1768 * dropped which is part of the way map
1769 * counting is done for shared PMDs.
1770 * Return 'true' here. When there is
1771 * no other sharing, huge_pmd_unshare
1772 * returns false and we will unmap the
1773 * actual page and drop map count
1778 hugetlb_vma_unlock_write(vma
);
1780 pteval
= huge_ptep_clear_flush(vma
, address
, pvmw
.pte
);
1782 flush_cache_page(vma
, address
, pfn
);
1783 /* Nuke the page table entry. */
1784 if (should_defer_flush(mm
, flags
)) {
1786 * We clear the PTE but do not flush so potentially
1787 * a remote CPU could still be writing to the folio.
1788 * If the entry was previously clean then the
1789 * architecture must guarantee that a clear->dirty
1790 * transition on a cached TLB entry is written through
1791 * and traps if the PTE is unmapped.
1793 pteval
= ptep_get_and_clear(mm
, address
, pvmw
.pte
);
1795 set_tlb_ubc_flush_pending(mm
, pteval
, address
);
1797 pteval
= ptep_clear_flush(vma
, address
, pvmw
.pte
);
1802 * Now the pte is cleared. If this pte was uffd-wp armed,
1803 * we may want to replace a none pte with a marker pte if
1804 * it's file-backed, so we don't lose the tracking info.
1806 pte_install_uffd_wp_if_needed(vma
, address
, pvmw
.pte
, pteval
);
1808 /* Set the dirty flag on the folio now the pte is gone. */
1809 if (pte_dirty(pteval
))
1810 folio_mark_dirty(folio
);
1812 /* Update high watermark before we lower rss */
1813 update_hiwater_rss(mm
);
1815 if (PageHWPoison(subpage
) && (flags
& TTU_HWPOISON
)) {
1816 pteval
= swp_entry_to_pte(make_hwpoison_entry(subpage
));
1817 if (folio_test_hugetlb(folio
)) {
1818 hugetlb_count_sub(folio_nr_pages(folio
), mm
);
1819 set_huge_pte_at(mm
, address
, pvmw
.pte
, pteval
,
1822 dec_mm_counter(mm
, mm_counter(folio
));
1823 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1826 } else if (pte_unused(pteval
) && !userfaultfd_armed(vma
)) {
1828 * The guest indicated that the page content is of no
1829 * interest anymore. Simply discard the pte, vmscan
1830 * will take care of the rest.
1831 * A future reference will then fault in a new zero
1832 * page. When userfaultfd is active, we must not drop
1833 * this page though, as its main user (postcopy
1834 * migration) will not expect userfaults on already
1837 dec_mm_counter(mm
, mm_counter(folio
));
1838 } else if (folio_test_anon(folio
)) {
1839 swp_entry_t entry
= page_swap_entry(subpage
);
1842 * Store the swap location in the pte.
1843 * See handle_pte_fault() ...
1845 if (unlikely(folio_test_swapbacked(folio
) !=
1846 folio_test_swapcache(folio
))) {
1851 /* MADV_FREE page check */
1852 if (!folio_test_swapbacked(folio
)) {
1853 int ref_count
, map_count
;
1856 * Synchronize with gup_pte_range():
1857 * - clear PTE; barrier; read refcount
1858 * - inc refcount; barrier; read PTE
1862 ref_count
= folio_ref_count(folio
);
1863 map_count
= folio_mapcount(folio
);
1866 * Order reads for page refcount and dirty flag
1867 * (see comments in __remove_mapping()).
1872 * The only page refs must be one from isolation
1873 * plus the rmap(s) (dropped by discard:).
1875 if (ref_count
== 1 + map_count
&&
1876 (!folio_test_dirty(folio
) ||
1878 * Unlike MADV_FREE mappings, VM_DROPPABLE
1879 * ones can be dropped even if they've
1882 (vma
->vm_flags
& VM_DROPPABLE
))) {
1883 dec_mm_counter(mm
, MM_ANONPAGES
);
1888 * If the folio was redirtied, it cannot be
1889 * discarded. Remap the page to page table.
1891 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1893 * Unlike MADV_FREE mappings, VM_DROPPABLE ones
1894 * never get swap backed on failure to drop.
1896 if (!(vma
->vm_flags
& VM_DROPPABLE
))
1897 folio_set_swapbacked(folio
);
1901 if (swap_duplicate(entry
) < 0) {
1902 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1905 if (arch_unmap_one(mm
, vma
, address
, pteval
) < 0) {
1907 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1911 /* See folio_try_share_anon_rmap(): clear PTE first. */
1912 if (anon_exclusive
&&
1913 folio_try_share_anon_rmap_pte(folio
, subpage
)) {
1915 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
1918 if (list_empty(&mm
->mmlist
)) {
1919 spin_lock(&mmlist_lock
);
1920 if (list_empty(&mm
->mmlist
))
1921 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
1922 spin_unlock(&mmlist_lock
);
1924 dec_mm_counter(mm
, MM_ANONPAGES
);
1925 inc_mm_counter(mm
, MM_SWAPENTS
);
1926 swp_pte
= swp_entry_to_pte(entry
);
1928 swp_pte
= pte_swp_mkexclusive(swp_pte
);
1929 if (pte_soft_dirty(pteval
))
1930 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
1931 if (pte_uffd_wp(pteval
))
1932 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
1933 set_pte_at(mm
, address
, pvmw
.pte
, swp_pte
);
1936 * This is a locked file-backed folio,
1937 * so it cannot be removed from the page
1938 * cache and replaced by a new folio before
1939 * mmu_notifier_invalidate_range_end, so no
1940 * concurrent thread might update its page table
1941 * to point at a new folio while a device is
1942 * still using this folio.
1944 * See Documentation/mm/mmu_notifier.rst
1946 dec_mm_counter(mm
, mm_counter_file(folio
));
1949 if (unlikely(folio_test_hugetlb(folio
)))
1950 hugetlb_remove_rmap(folio
);
1952 folio_remove_rmap_pte(folio
, subpage
, vma
);
1953 if (vma
->vm_flags
& VM_LOCKED
)
1954 mlock_drain_local();
1960 page_vma_mapped_walk_done(&pvmw
);
1964 mmu_notifier_invalidate_range_end(&range
);
1969 static bool invalid_migration_vma(struct vm_area_struct
*vma
, void *arg
)
1971 return vma_is_temporary_stack(vma
);
1974 static int folio_not_mapped(struct folio
*folio
)
1976 return !folio_mapped(folio
);
1980 * try_to_unmap - Try to remove all page table mappings to a folio.
1981 * @folio: The folio to unmap.
1982 * @flags: action and flags
1984 * Tries to remove all the page table entries which are mapping this
1985 * folio. It is the caller's responsibility to check if the folio is
1986 * still mapped if needed (use TTU_SYNC to prevent accounting races).
1988 * Context: Caller must hold the folio lock.
1990 void try_to_unmap(struct folio
*folio
, enum ttu_flags flags
)
1992 struct rmap_walk_control rwc
= {
1993 .rmap_one
= try_to_unmap_one
,
1994 .arg
= (void *)flags
,
1995 .done
= folio_not_mapped
,
1996 .anon_lock
= folio_lock_anon_vma_read
,
1999 if (flags
& TTU_RMAP_LOCKED
)
2000 rmap_walk_locked(folio
, &rwc
);
2002 rmap_walk(folio
, &rwc
);
2006 * @arg: enum ttu_flags will be passed to this argument.
2008 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
2009 * containing migration entries.
2011 static bool try_to_migrate_one(struct folio
*folio
, struct vm_area_struct
*vma
,
2012 unsigned long address
, void *arg
)
2014 struct mm_struct
*mm
= vma
->vm_mm
;
2015 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
2017 struct page
*subpage
;
2018 bool anon_exclusive
, ret
= true;
2019 struct mmu_notifier_range range
;
2020 enum ttu_flags flags
= (enum ttu_flags
)(long)arg
;
2022 unsigned long hsz
= 0;
2025 * When racing against e.g. zap_pte_range() on another cpu,
2026 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
2027 * try_to_migrate() may return before page_mapped() has become false,
2028 * if page table locking is skipped: use TTU_SYNC to wait for that.
2030 if (flags
& TTU_SYNC
)
2031 pvmw
.flags
= PVMW_SYNC
;
2034 * unmap_page() in mm/huge_memory.c is the only user of migration with
2035 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
2037 if (flags
& TTU_SPLIT_HUGE_PMD
)
2038 split_huge_pmd_address(vma
, address
, true, folio
);
2041 * For THP, we have to assume the worse case ie pmd for invalidation.
2042 * For hugetlb, it could be much worse if we need to do pud
2043 * invalidation in the case of pmd sharing.
2045 * Note that the page can not be free in this function as call of
2046 * try_to_unmap() must hold a reference on the page.
2048 range
.end
= vma_address_end(&pvmw
);
2049 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
2050 address
, range
.end
);
2051 if (folio_test_hugetlb(folio
)) {
2053 * If sharing is possible, start and end will be adjusted
2056 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
,
2059 /* We need the huge page size for set_huge_pte_at() */
2060 hsz
= huge_page_size(hstate_vma(vma
));
2062 mmu_notifier_invalidate_range_start(&range
);
2064 while (page_vma_mapped_walk(&pvmw
)) {
2065 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2066 /* PMD-mapped THP migration entry */
2068 subpage
= folio_page(folio
,
2069 pmd_pfn(*pvmw
.pmd
) - folio_pfn(folio
));
2070 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio
) ||
2071 !folio_test_pmd_mappable(folio
), folio
);
2073 if (set_pmd_migration_entry(&pvmw
, subpage
)) {
2075 page_vma_mapped_walk_done(&pvmw
);
2082 /* Unexpected PMD-mapped THP? */
2083 VM_BUG_ON_FOLIO(!pvmw
.pte
, folio
);
2085 pfn
= pte_pfn(ptep_get(pvmw
.pte
));
2087 if (folio_is_zone_device(folio
)) {
2089 * Our PTE is a non-present device exclusive entry and
2090 * calculating the subpage as for the common case would
2091 * result in an invalid pointer.
2093 * Since only PAGE_SIZE pages can currently be
2094 * migrated, just set it to page. This will need to be
2095 * changed when hugepage migrations to device private
2096 * memory are supported.
2098 VM_BUG_ON_FOLIO(folio_nr_pages(folio
) > 1, folio
);
2099 subpage
= &folio
->page
;
2101 subpage
= folio_page(folio
, pfn
- folio_pfn(folio
));
2103 address
= pvmw
.address
;
2104 anon_exclusive
= folio_test_anon(folio
) &&
2105 PageAnonExclusive(subpage
);
2107 if (folio_test_hugetlb(folio
)) {
2108 bool anon
= folio_test_anon(folio
);
2111 * huge_pmd_unshare may unmap an entire PMD page.
2112 * There is no way of knowing exactly which PMDs may
2113 * be cached for this mm, so we must flush them all.
2114 * start/end were already adjusted above to cover this
2117 flush_cache_range(vma
, range
.start
, range
.end
);
2120 * To call huge_pmd_unshare, i_mmap_rwsem must be
2121 * held in write mode. Caller needs to explicitly
2122 * do this outside rmap routines.
2124 * We also must hold hugetlb vma_lock in write mode.
2125 * Lock order dictates acquiring vma_lock BEFORE
2126 * i_mmap_rwsem. We can only try lock here and
2127 * fail if unsuccessful.
2130 VM_BUG_ON(!(flags
& TTU_RMAP_LOCKED
));
2131 if (!hugetlb_vma_trylock_write(vma
)) {
2132 page_vma_mapped_walk_done(&pvmw
);
2136 if (huge_pmd_unshare(mm
, vma
, address
, pvmw
.pte
)) {
2137 hugetlb_vma_unlock_write(vma
);
2138 flush_tlb_range(vma
,
2139 range
.start
, range
.end
);
2142 * The ref count of the PMD page was
2143 * dropped which is part of the way map
2144 * counting is done for shared PMDs.
2145 * Return 'true' here. When there is
2146 * no other sharing, huge_pmd_unshare
2147 * returns false and we will unmap the
2148 * actual page and drop map count
2151 page_vma_mapped_walk_done(&pvmw
);
2154 hugetlb_vma_unlock_write(vma
);
2156 /* Nuke the hugetlb page table entry */
2157 pteval
= huge_ptep_clear_flush(vma
, address
, pvmw
.pte
);
2159 flush_cache_page(vma
, address
, pfn
);
2160 /* Nuke the page table entry. */
2161 if (should_defer_flush(mm
, flags
)) {
2163 * We clear the PTE but do not flush so potentially
2164 * a remote CPU could still be writing to the folio.
2165 * If the entry was previously clean then the
2166 * architecture must guarantee that a clear->dirty
2167 * transition on a cached TLB entry is written through
2168 * and traps if the PTE is unmapped.
2170 pteval
= ptep_get_and_clear(mm
, address
, pvmw
.pte
);
2172 set_tlb_ubc_flush_pending(mm
, pteval
, address
);
2174 pteval
= ptep_clear_flush(vma
, address
, pvmw
.pte
);
2178 /* Set the dirty flag on the folio now the pte is gone. */
2179 if (pte_dirty(pteval
))
2180 folio_mark_dirty(folio
);
2182 /* Update high watermark before we lower rss */
2183 update_hiwater_rss(mm
);
2185 if (folio_is_device_private(folio
)) {
2186 unsigned long pfn
= folio_pfn(folio
);
2191 WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio
,
2195 * Store the pfn of the page in a special migration
2196 * pte. do_swap_page() will wait until the migration
2197 * pte is removed and then restart fault handling.
2199 entry
= pte_to_swp_entry(pteval
);
2200 if (is_writable_device_private_entry(entry
))
2201 entry
= make_writable_migration_entry(pfn
);
2202 else if (anon_exclusive
)
2203 entry
= make_readable_exclusive_migration_entry(pfn
);
2205 entry
= make_readable_migration_entry(pfn
);
2206 swp_pte
= swp_entry_to_pte(entry
);
2209 * pteval maps a zone device page and is therefore
2212 if (pte_swp_soft_dirty(pteval
))
2213 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
2214 if (pte_swp_uffd_wp(pteval
))
2215 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
2216 set_pte_at(mm
, pvmw
.address
, pvmw
.pte
, swp_pte
);
2217 trace_set_migration_pte(pvmw
.address
, pte_val(swp_pte
),
2218 folio_order(folio
));
2220 * No need to invalidate here it will synchronize on
2221 * against the special swap migration pte.
2223 } else if (PageHWPoison(subpage
)) {
2224 pteval
= swp_entry_to_pte(make_hwpoison_entry(subpage
));
2225 if (folio_test_hugetlb(folio
)) {
2226 hugetlb_count_sub(folio_nr_pages(folio
), mm
);
2227 set_huge_pte_at(mm
, address
, pvmw
.pte
, pteval
,
2230 dec_mm_counter(mm
, mm_counter(folio
));
2231 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
2234 } else if (pte_unused(pteval
) && !userfaultfd_armed(vma
)) {
2236 * The guest indicated that the page content is of no
2237 * interest anymore. Simply discard the pte, vmscan
2238 * will take care of the rest.
2239 * A future reference will then fault in a new zero
2240 * page. When userfaultfd is active, we must not drop
2241 * this page though, as its main user (postcopy
2242 * migration) will not expect userfaults on already
2245 dec_mm_counter(mm
, mm_counter(folio
));
2250 if (arch_unmap_one(mm
, vma
, address
, pteval
) < 0) {
2251 if (folio_test_hugetlb(folio
))
2252 set_huge_pte_at(mm
, address
, pvmw
.pte
,
2255 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
2257 page_vma_mapped_walk_done(&pvmw
);
2260 VM_BUG_ON_PAGE(pte_write(pteval
) && folio_test_anon(folio
) &&
2261 !anon_exclusive
, subpage
);
2263 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
2264 if (folio_test_hugetlb(folio
)) {
2265 if (anon_exclusive
&&
2266 hugetlb_try_share_anon_rmap(folio
)) {
2267 set_huge_pte_at(mm
, address
, pvmw
.pte
,
2270 page_vma_mapped_walk_done(&pvmw
);
2273 } else if (anon_exclusive
&&
2274 folio_try_share_anon_rmap_pte(folio
, subpage
)) {
2275 set_pte_at(mm
, address
, pvmw
.pte
, pteval
);
2277 page_vma_mapped_walk_done(&pvmw
);
2282 * Store the pfn of the page in a special migration
2283 * pte. do_swap_page() will wait until the migration
2284 * pte is removed and then restart fault handling.
2286 if (pte_write(pteval
))
2287 entry
= make_writable_migration_entry(
2288 page_to_pfn(subpage
));
2289 else if (anon_exclusive
)
2290 entry
= make_readable_exclusive_migration_entry(
2291 page_to_pfn(subpage
));
2293 entry
= make_readable_migration_entry(
2294 page_to_pfn(subpage
));
2295 if (pte_young(pteval
))
2296 entry
= make_migration_entry_young(entry
);
2297 if (pte_dirty(pteval
))
2298 entry
= make_migration_entry_dirty(entry
);
2299 swp_pte
= swp_entry_to_pte(entry
);
2300 if (pte_soft_dirty(pteval
))
2301 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
2302 if (pte_uffd_wp(pteval
))
2303 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
2304 if (folio_test_hugetlb(folio
))
2305 set_huge_pte_at(mm
, address
, pvmw
.pte
, swp_pte
,
2308 set_pte_at(mm
, address
, pvmw
.pte
, swp_pte
);
2309 trace_set_migration_pte(address
, pte_val(swp_pte
),
2310 folio_order(folio
));
2312 * No need to invalidate here it will synchronize on
2313 * against the special swap migration pte.
2317 if (unlikely(folio_test_hugetlb(folio
)))
2318 hugetlb_remove_rmap(folio
);
2320 folio_remove_rmap_pte(folio
, subpage
, vma
);
2321 if (vma
->vm_flags
& VM_LOCKED
)
2322 mlock_drain_local();
2326 mmu_notifier_invalidate_range_end(&range
);
2332 * try_to_migrate - try to replace all page table mappings with swap entries
2333 * @folio: the folio to replace page table entries for
2334 * @flags: action and flags
2336 * Tries to remove all the page table entries which are mapping this folio and
2337 * replace them with special swap entries. Caller must hold the folio lock.
2339 void try_to_migrate(struct folio
*folio
, enum ttu_flags flags
)
2341 struct rmap_walk_control rwc
= {
2342 .rmap_one
= try_to_migrate_one
,
2343 .arg
= (void *)flags
,
2344 .done
= folio_not_mapped
,
2345 .anon_lock
= folio_lock_anon_vma_read
,
2349 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2350 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
2352 if (WARN_ON_ONCE(flags
& ~(TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
|
2353 TTU_SYNC
| TTU_BATCH_FLUSH
)))
2356 if (folio_is_zone_device(folio
) &&
2357 (!folio_is_device_private(folio
) && !folio_is_device_coherent(folio
)))
2361 * During exec, a temporary VMA is setup and later moved.
2362 * The VMA is moved under the anon_vma lock but not the
2363 * page tables leading to a race where migration cannot
2364 * find the migration ptes. Rather than increasing the
2365 * locking requirements of exec(), migration skips
2366 * temporary VMAs until after exec() completes.
2368 if (!folio_test_ksm(folio
) && folio_test_anon(folio
))
2369 rwc
.invalid_vma
= invalid_migration_vma
;
2371 if (flags
& TTU_RMAP_LOCKED
)
2372 rmap_walk_locked(folio
, &rwc
);
2374 rmap_walk(folio
, &rwc
);
2377 #ifdef CONFIG_DEVICE_PRIVATE
2378 struct make_exclusive_args
{
2379 struct mm_struct
*mm
;
2380 unsigned long address
;
2385 static bool page_make_device_exclusive_one(struct folio
*folio
,
2386 struct vm_area_struct
*vma
, unsigned long address
, void *priv
)
2388 struct mm_struct
*mm
= vma
->vm_mm
;
2389 DEFINE_FOLIO_VMA_WALK(pvmw
, folio
, vma
, address
, 0);
2390 struct make_exclusive_args
*args
= priv
;
2392 struct page
*subpage
;
2394 struct mmu_notifier_range range
;
2399 mmu_notifier_range_init_owner(&range
, MMU_NOTIFY_EXCLUSIVE
, 0,
2400 vma
->vm_mm
, address
, min(vma
->vm_end
,
2401 address
+ folio_size(folio
)),
2403 mmu_notifier_invalidate_range_start(&range
);
2405 while (page_vma_mapped_walk(&pvmw
)) {
2406 /* Unexpected PMD-mapped THP? */
2407 VM_BUG_ON_FOLIO(!pvmw
.pte
, folio
);
2409 ptent
= ptep_get(pvmw
.pte
);
2410 if (!pte_present(ptent
)) {
2412 page_vma_mapped_walk_done(&pvmw
);
2416 subpage
= folio_page(folio
,
2417 pte_pfn(ptent
) - folio_pfn(folio
));
2418 address
= pvmw
.address
;
2420 /* Nuke the page table entry. */
2421 flush_cache_page(vma
, address
, pte_pfn(ptent
));
2422 pteval
= ptep_clear_flush(vma
, address
, pvmw
.pte
);
2424 /* Set the dirty flag on the folio now the pte is gone. */
2425 if (pte_dirty(pteval
))
2426 folio_mark_dirty(folio
);
2429 * Check that our target page is still mapped at the expected
2432 if (args
->mm
== mm
&& args
->address
== address
&&
2437 * Store the pfn of the page in a special migration
2438 * pte. do_swap_page() will wait until the migration
2439 * pte is removed and then restart fault handling.
2441 if (pte_write(pteval
))
2442 entry
= make_writable_device_exclusive_entry(
2443 page_to_pfn(subpage
));
2445 entry
= make_readable_device_exclusive_entry(
2446 page_to_pfn(subpage
));
2447 swp_pte
= swp_entry_to_pte(entry
);
2448 if (pte_soft_dirty(pteval
))
2449 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
2450 if (pte_uffd_wp(pteval
))
2451 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
2453 set_pte_at(mm
, address
, pvmw
.pte
, swp_pte
);
2456 * There is a reference on the page for the swap entry which has
2457 * been removed, so shouldn't take another.
2459 folio_remove_rmap_pte(folio
, subpage
, vma
);
2462 mmu_notifier_invalidate_range_end(&range
);
2468 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2469 * @folio: The folio to replace page table entries for.
2470 * @mm: The mm_struct where the folio is expected to be mapped.
2471 * @address: Address where the folio is expected to be mapped.
2472 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2474 * Tries to remove all the page table entries which are mapping this
2475 * folio and replace them with special device exclusive swap entries to
2476 * grant a device exclusive access to the folio.
2478 * Context: Caller must hold the folio lock.
2479 * Return: false if the page is still mapped, or if it could not be unmapped
2480 * from the expected address. Otherwise returns true (success).
2482 static bool folio_make_device_exclusive(struct folio
*folio
,
2483 struct mm_struct
*mm
, unsigned long address
, void *owner
)
2485 struct make_exclusive_args args
= {
2491 struct rmap_walk_control rwc
= {
2492 .rmap_one
= page_make_device_exclusive_one
,
2493 .done
= folio_not_mapped
,
2494 .anon_lock
= folio_lock_anon_vma_read
,
2499 * Restrict to anonymous folios for now to avoid potential writeback
2502 if (!folio_test_anon(folio
))
2505 rmap_walk(folio
, &rwc
);
2507 return args
.valid
&& !folio_mapcount(folio
);
2511 * make_device_exclusive_range() - Mark a range for exclusive use by a device
2512 * @mm: mm_struct of associated target process
2513 * @start: start of the region to mark for exclusive device access
2514 * @end: end address of region
2515 * @pages: returns the pages which were successfully marked for exclusive access
2516 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2518 * Returns: number of pages found in the range by GUP. A page is marked for
2519 * exclusive access only if the page pointer is non-NULL.
2521 * This function finds ptes mapping page(s) to the given address range, locks
2522 * them and replaces mappings with special swap entries preventing userspace CPU
2523 * access. On fault these entries are replaced with the original mapping after
2524 * calling MMU notifiers.
2526 * A driver using this to program access from a device must use a mmu notifier
2527 * critical section to hold a device specific lock during programming. Once
2528 * programming is complete it should drop the page lock and reference after
2529 * which point CPU access to the page will revoke the exclusive access.
2531 int make_device_exclusive_range(struct mm_struct
*mm
, unsigned long start
,
2532 unsigned long end
, struct page
**pages
,
2535 long npages
= (end
- start
) >> PAGE_SHIFT
;
2538 npages
= get_user_pages_remote(mm
, start
, npages
,
2539 FOLL_GET
| FOLL_WRITE
| FOLL_SPLIT_PMD
,
2544 for (i
= 0; i
< npages
; i
++, start
+= PAGE_SIZE
) {
2545 struct folio
*folio
= page_folio(pages
[i
]);
2546 if (PageTail(pages
[i
]) || !folio_trylock(folio
)) {
2552 if (!folio_make_device_exclusive(folio
, mm
, start
, owner
)) {
2553 folio_unlock(folio
);
2561 EXPORT_SYMBOL_GPL(make_device_exclusive_range
);
2564 void __put_anon_vma(struct anon_vma
*anon_vma
)
2566 struct anon_vma
*root
= anon_vma
->root
;
2568 anon_vma_free(anon_vma
);
2569 if (root
!= anon_vma
&& atomic_dec_and_test(&root
->refcount
))
2570 anon_vma_free(root
);
2573 static struct anon_vma
*rmap_walk_anon_lock(const struct folio
*folio
,
2574 struct rmap_walk_control
*rwc
)
2576 struct anon_vma
*anon_vma
;
2579 return rwc
->anon_lock(folio
, rwc
);
2582 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2583 * because that depends on page_mapped(); but not all its usages
2584 * are holding mmap_lock. Users without mmap_lock are required to
2585 * take a reference count to prevent the anon_vma disappearing
2587 anon_vma
= folio_anon_vma(folio
);
2591 if (anon_vma_trylock_read(anon_vma
))
2594 if (rwc
->try_lock
) {
2596 rwc
->contended
= true;
2600 anon_vma_lock_read(anon_vma
);
2606 * rmap_walk_anon - do something to anonymous page using the object-based
2608 * @folio: the folio to be handled
2609 * @rwc: control variable according to each walk type
2610 * @locked: caller holds relevant rmap lock
2612 * Find all the mappings of a folio using the mapping pointer and the vma
2613 * chains contained in the anon_vma struct it points to.
2615 static void rmap_walk_anon(struct folio
*folio
,
2616 struct rmap_walk_control
*rwc
, bool locked
)
2618 struct anon_vma
*anon_vma
;
2619 pgoff_t pgoff_start
, pgoff_end
;
2620 struct anon_vma_chain
*avc
;
2623 anon_vma
= folio_anon_vma(folio
);
2624 /* anon_vma disappear under us? */
2625 VM_BUG_ON_FOLIO(!anon_vma
, folio
);
2627 anon_vma
= rmap_walk_anon_lock(folio
, rwc
);
2632 pgoff_start
= folio_pgoff(folio
);
2633 pgoff_end
= pgoff_start
+ folio_nr_pages(folio
) - 1;
2634 anon_vma_interval_tree_foreach(avc
, &anon_vma
->rb_root
,
2635 pgoff_start
, pgoff_end
) {
2636 struct vm_area_struct
*vma
= avc
->vma
;
2637 unsigned long address
= vma_address(vma
, pgoff_start
,
2638 folio_nr_pages(folio
));
2640 VM_BUG_ON_VMA(address
== -EFAULT
, vma
);
2643 if (rwc
->invalid_vma
&& rwc
->invalid_vma(vma
, rwc
->arg
))
2646 if (!rwc
->rmap_one(folio
, vma
, address
, rwc
->arg
))
2648 if (rwc
->done
&& rwc
->done(folio
))
2653 anon_vma_unlock_read(anon_vma
);
2657 * rmap_walk_file - do something to file page using the object-based rmap method
2658 * @folio: the folio to be handled
2659 * @rwc: control variable according to each walk type
2660 * @locked: caller holds relevant rmap lock
2662 * Find all the mappings of a folio using the mapping pointer and the vma chains
2663 * contained in the address_space struct it points to.
2665 static void rmap_walk_file(struct folio
*folio
,
2666 struct rmap_walk_control
*rwc
, bool locked
)
2668 struct address_space
*mapping
= folio_mapping(folio
);
2669 pgoff_t pgoff_start
, pgoff_end
;
2670 struct vm_area_struct
*vma
;
2673 * The page lock not only makes sure that page->mapping cannot
2674 * suddenly be NULLified by truncation, it makes sure that the
2675 * structure at mapping cannot be freed and reused yet,
2676 * so we can safely take mapping->i_mmap_rwsem.
2678 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
2683 pgoff_start
= folio_pgoff(folio
);
2684 pgoff_end
= pgoff_start
+ folio_nr_pages(folio
) - 1;
2686 if (i_mmap_trylock_read(mapping
))
2689 if (rwc
->try_lock
) {
2690 rwc
->contended
= true;
2694 i_mmap_lock_read(mapping
);
2697 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
,
2698 pgoff_start
, pgoff_end
) {
2699 unsigned long address
= vma_address(vma
, pgoff_start
,
2700 folio_nr_pages(folio
));
2702 VM_BUG_ON_VMA(address
== -EFAULT
, vma
);
2705 if (rwc
->invalid_vma
&& rwc
->invalid_vma(vma
, rwc
->arg
))
2708 if (!rwc
->rmap_one(folio
, vma
, address
, rwc
->arg
))
2710 if (rwc
->done
&& rwc
->done(folio
))
2716 i_mmap_unlock_read(mapping
);
2719 void rmap_walk(struct folio
*folio
, struct rmap_walk_control
*rwc
)
2721 if (unlikely(folio_test_ksm(folio
)))
2722 rmap_walk_ksm(folio
, rwc
);
2723 else if (folio_test_anon(folio
))
2724 rmap_walk_anon(folio
, rwc
, false);
2726 rmap_walk_file(folio
, rwc
, false);
2729 /* Like rmap_walk, but caller holds relevant rmap lock */
2730 void rmap_walk_locked(struct folio
*folio
, struct rmap_walk_control
*rwc
)
2732 /* no ksm support for now */
2733 VM_BUG_ON_FOLIO(folio_test_ksm(folio
), folio
);
2734 if (folio_test_anon(folio
))
2735 rmap_walk_anon(folio
, rwc
, true);
2737 rmap_walk_file(folio
, rwc
, true);
2740 #ifdef CONFIG_HUGETLB_PAGE
2742 * The following two functions are for anonymous (private mapped) hugepages.
2743 * Unlike common anonymous pages, anonymous hugepages have no accounting code
2744 * and no lru code, because we handle hugepages differently from common pages.
2746 void hugetlb_add_anon_rmap(struct folio
*folio
, struct vm_area_struct
*vma
,
2747 unsigned long address
, rmap_t flags
)
2749 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio
), folio
);
2750 VM_WARN_ON_FOLIO(!folio_test_anon(folio
), folio
);
2752 atomic_inc(&folio
->_entire_mapcount
);
2753 atomic_inc(&folio
->_large_mapcount
);
2754 if (flags
& RMAP_EXCLUSIVE
)
2755 SetPageAnonExclusive(&folio
->page
);
2756 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio
) > 1 &&
2757 PageAnonExclusive(&folio
->page
), folio
);
2760 void hugetlb_add_new_anon_rmap(struct folio
*folio
,
2761 struct vm_area_struct
*vma
, unsigned long address
)
2763 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio
), folio
);
2765 BUG_ON(address
< vma
->vm_start
|| address
>= vma
->vm_end
);
2766 /* increment count (starts at -1) */
2767 atomic_set(&folio
->_entire_mapcount
, 0);
2768 atomic_set(&folio
->_large_mapcount
, 0);
2769 folio_clear_hugetlb_restore_reserve(folio
);
2770 __folio_set_anon(folio
, vma
, address
, true);
2771 SetPageAnonExclusive(&folio
->page
);
2773 #endif /* CONFIG_HUGETLB_PAGE */