1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
5 #include <linux/atomic.h>
6 #include <linux/huge_mm.h>
7 #include <linux/mm_types.h>
8 #include <linux/swap.h>
9 #include <linux/string.h>
10 #include <linux/userfaultfd_k.h>
11 #include <linux/swapops.h>
14 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
15 * @folio: The folio to test.
17 * We would like to get this info without a page flag, but the state
18 * needs to survive until the folio is last deleted from the LRU, which
19 * could be as far down as __page_cache_release.
21 * Return: An integer (not a boolean!) used to sort a folio onto the
22 * right LRU list and to account folios correctly.
23 * 1 if @folio is a regular filesystem backed page cache folio
24 * or a lazily freed anonymous folio (e.g. via MADV_FREE).
25 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
26 * ram or swap backed folio.
28 static inline int folio_is_file_lru(struct folio
*folio
)
30 return !folio_test_swapbacked(folio
);
33 static inline int page_is_file_lru(struct page
*page
)
35 return folio_is_file_lru(page_folio(page
));
38 static __always_inline
void __update_lru_size(struct lruvec
*lruvec
,
39 enum lru_list lru
, enum zone_type zid
,
42 struct pglist_data
*pgdat
= lruvec_pgdat(lruvec
);
44 lockdep_assert_held(&lruvec
->lru_lock
);
45 WARN_ON_ONCE(nr_pages
!= (int)nr_pages
);
47 __mod_lruvec_state(lruvec
, NR_LRU_BASE
+ lru
, nr_pages
);
48 __mod_zone_page_state(&pgdat
->node_zones
[zid
],
49 NR_ZONE_LRU_BASE
+ lru
, nr_pages
);
52 static __always_inline
void update_lru_size(struct lruvec
*lruvec
,
53 enum lru_list lru
, enum zone_type zid
,
56 __update_lru_size(lruvec
, lru
, zid
, nr_pages
);
58 mem_cgroup_update_lru_size(lruvec
, lru
, zid
, nr_pages
);
63 * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
64 * @folio: The folio that was on lru and now has a zero reference.
66 static __always_inline
void __folio_clear_lru_flags(struct folio
*folio
)
68 VM_BUG_ON_FOLIO(!folio_test_lru(folio
), folio
);
70 __folio_clear_lru(folio
);
72 /* this shouldn't happen, so leave the flags to bad_page() */
73 if (folio_test_active(folio
) && folio_test_unevictable(folio
))
76 __folio_clear_active(folio
);
77 __folio_clear_unevictable(folio
);
81 * folio_lru_list - Which LRU list should a folio be on?
82 * @folio: The folio to test.
84 * Return: The LRU list a folio should be on, as an index
85 * into the array of LRU lists.
87 static __always_inline
enum lru_list
folio_lru_list(struct folio
*folio
)
91 VM_BUG_ON_FOLIO(folio_test_active(folio
) && folio_test_unevictable(folio
), folio
);
93 if (folio_test_unevictable(folio
))
94 return LRU_UNEVICTABLE
;
96 lru
= folio_is_file_lru(folio
) ? LRU_INACTIVE_FILE
: LRU_INACTIVE_ANON
;
97 if (folio_test_active(folio
))
103 #ifdef CONFIG_LRU_GEN
105 #ifdef CONFIG_LRU_GEN_ENABLED
106 static inline bool lru_gen_enabled(void)
108 DECLARE_STATIC_KEY_TRUE(lru_gen_caps
[NR_LRU_GEN_CAPS
]);
110 return static_branch_likely(&lru_gen_caps
[LRU_GEN_CORE
]);
113 static inline bool lru_gen_enabled(void)
115 DECLARE_STATIC_KEY_FALSE(lru_gen_caps
[NR_LRU_GEN_CAPS
]);
117 return static_branch_unlikely(&lru_gen_caps
[LRU_GEN_CORE
]);
121 static inline bool lru_gen_in_fault(void)
123 return current
->in_lru_fault
;
126 static inline int lru_gen_from_seq(unsigned long seq
)
128 return seq
% MAX_NR_GENS
;
131 static inline int lru_hist_from_seq(unsigned long seq
)
133 return seq
% NR_HIST_GENS
;
136 static inline int lru_tier_from_refs(int refs
)
138 VM_WARN_ON_ONCE(refs
> BIT(LRU_REFS_WIDTH
));
140 /* see the comment in folio_lru_refs() */
141 return order_base_2(refs
+ 1);
144 static inline int folio_lru_refs(struct folio
*folio
)
146 unsigned long flags
= READ_ONCE(folio
->flags
);
147 bool workingset
= flags
& BIT(PG_workingset
);
150 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
151 * total number of accesses is N>1, since N=0,1 both map to the first
152 * tier. lru_tier_from_refs() will account for this off-by-one. Also see
153 * the comment on MAX_NR_TIERS.
155 return ((flags
& LRU_REFS_MASK
) >> LRU_REFS_PGOFF
) + workingset
;
158 static inline void folio_clear_lru_refs(struct folio
*folio
)
160 set_mask_bits(&folio
->flags
, LRU_REFS_MASK
| LRU_REFS_FLAGS
, 0);
163 static inline int folio_lru_gen(struct folio
*folio
)
165 unsigned long flags
= READ_ONCE(folio
->flags
);
167 return ((flags
& LRU_GEN_MASK
) >> LRU_GEN_PGOFF
) - 1;
170 static inline bool lru_gen_is_active(struct lruvec
*lruvec
, int gen
)
172 unsigned long max_seq
= lruvec
->lrugen
.max_seq
;
174 VM_WARN_ON_ONCE(gen
>= MAX_NR_GENS
);
176 /* see the comment on MIN_NR_GENS */
177 return gen
== lru_gen_from_seq(max_seq
) || gen
== lru_gen_from_seq(max_seq
- 1);
180 static inline void lru_gen_update_size(struct lruvec
*lruvec
, struct folio
*folio
,
181 int old_gen
, int new_gen
)
183 int type
= folio_is_file_lru(folio
);
184 int zone
= folio_zonenum(folio
);
185 int delta
= folio_nr_pages(folio
);
186 enum lru_list lru
= type
* LRU_INACTIVE_FILE
;
187 struct lru_gen_folio
*lrugen
= &lruvec
->lrugen
;
189 VM_WARN_ON_ONCE(old_gen
!= -1 && old_gen
>= MAX_NR_GENS
);
190 VM_WARN_ON_ONCE(new_gen
!= -1 && new_gen
>= MAX_NR_GENS
);
191 VM_WARN_ON_ONCE(old_gen
== -1 && new_gen
== -1);
194 WRITE_ONCE(lrugen
->nr_pages
[old_gen
][type
][zone
],
195 lrugen
->nr_pages
[old_gen
][type
][zone
] - delta
);
197 WRITE_ONCE(lrugen
->nr_pages
[new_gen
][type
][zone
],
198 lrugen
->nr_pages
[new_gen
][type
][zone
] + delta
);
202 if (lru_gen_is_active(lruvec
, new_gen
))
204 __update_lru_size(lruvec
, lru
, zone
, delta
);
210 if (lru_gen_is_active(lruvec
, old_gen
))
212 __update_lru_size(lruvec
, lru
, zone
, -delta
);
217 if (!lru_gen_is_active(lruvec
, old_gen
) && lru_gen_is_active(lruvec
, new_gen
)) {
218 __update_lru_size(lruvec
, lru
, zone
, -delta
);
219 __update_lru_size(lruvec
, lru
+ LRU_ACTIVE
, zone
, delta
);
222 /* demotion requires isolation, e.g., lru_deactivate_fn() */
223 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec
, old_gen
) && !lru_gen_is_active(lruvec
, new_gen
));
226 static inline bool lru_gen_add_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
231 int gen
= folio_lru_gen(folio
);
232 int type
= folio_is_file_lru(folio
);
233 int zone
= folio_zonenum(folio
);
234 struct lru_gen_folio
*lrugen
= &lruvec
->lrugen
;
236 VM_WARN_ON_ONCE_FOLIO(gen
!= -1, folio
);
238 if (folio_test_unevictable(folio
) || !lrugen
->enabled
)
241 * There are four common cases for this page:
242 * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
243 * generation, and it's protected over the rest below.
244 * 2. If it can't be evicted immediately, i.e., a dirty page pending
245 * writeback, add it to the second youngest generation.
246 * 3. If it should be evicted first, e.g., cold and clean from
247 * folio_rotate_reclaimable(), add it to the oldest generation.
248 * 4. Everything else falls between 2 & 3 above and is added to the
249 * second oldest generation if it's considered inactive, or the
250 * oldest generation otherwise. See lru_gen_is_active().
252 if (folio_test_active(folio
))
253 seq
= lrugen
->max_seq
;
254 else if ((type
== LRU_GEN_ANON
&& !folio_test_swapcache(folio
)) ||
255 (folio_test_reclaim(folio
) &&
256 (folio_test_dirty(folio
) || folio_test_writeback(folio
))))
257 seq
= lrugen
->max_seq
- 1;
258 else if (reclaiming
|| lrugen
->min_seq
[type
] + MIN_NR_GENS
>= lrugen
->max_seq
)
259 seq
= lrugen
->min_seq
[type
];
261 seq
= lrugen
->min_seq
[type
] + 1;
263 gen
= lru_gen_from_seq(seq
);
264 flags
= (gen
+ 1UL) << LRU_GEN_PGOFF
;
265 /* see the comment on MIN_NR_GENS about PG_active */
268 * Don't clear PG_workingset here because it can affect PSI accounting
269 * if the activation is due to workingset refault.
271 if (folio_test_active(folio
))
272 mask
|= LRU_REFS_MASK
| BIT(PG_referenced
) | BIT(PG_active
);
273 set_mask_bits(&folio
->flags
, mask
, flags
);
275 lru_gen_update_size(lruvec
, folio
, -1, gen
);
276 /* for folio_rotate_reclaimable() */
278 list_add_tail(&folio
->lru
, &lrugen
->folios
[gen
][type
][zone
]);
280 list_add(&folio
->lru
, &lrugen
->folios
[gen
][type
][zone
]);
285 static inline bool lru_gen_del_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
288 int gen
= folio_lru_gen(folio
);
293 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio
), folio
);
294 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio
), folio
);
296 /* for folio_migrate_flags() */
297 flags
= !reclaiming
&& lru_gen_is_active(lruvec
, gen
) ? BIT(PG_active
) : 0;
298 flags
= set_mask_bits(&folio
->flags
, LRU_GEN_MASK
, flags
);
299 gen
= ((flags
& LRU_GEN_MASK
) >> LRU_GEN_PGOFF
) - 1;
301 lru_gen_update_size(lruvec
, folio
, gen
, -1);
302 list_del(&folio
->lru
);
307 static inline void folio_migrate_refs(struct folio
*new, struct folio
*old
)
309 unsigned long refs
= READ_ONCE(old
->flags
) & LRU_REFS_MASK
;
311 set_mask_bits(&new->flags
, LRU_REFS_MASK
, refs
);
313 #else /* !CONFIG_LRU_GEN */
315 static inline bool lru_gen_enabled(void)
320 static inline bool lru_gen_in_fault(void)
325 static inline bool lru_gen_add_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
330 static inline bool lru_gen_del_folio(struct lruvec
*lruvec
, struct folio
*folio
, bool reclaiming
)
335 static inline void folio_migrate_refs(struct folio
*new, struct folio
*old
)
339 #endif /* CONFIG_LRU_GEN */
341 static __always_inline
342 void lruvec_add_folio(struct lruvec
*lruvec
, struct folio
*folio
)
344 enum lru_list lru
= folio_lru_list(folio
);
346 if (lru_gen_add_folio(lruvec
, folio
, false))
349 update_lru_size(lruvec
, lru
, folio_zonenum(folio
),
350 folio_nr_pages(folio
));
351 if (lru
!= LRU_UNEVICTABLE
)
352 list_add(&folio
->lru
, &lruvec
->lists
[lru
]);
355 static __always_inline
356 void lruvec_add_folio_tail(struct lruvec
*lruvec
, struct folio
*folio
)
358 enum lru_list lru
= folio_lru_list(folio
);
360 if (lru_gen_add_folio(lruvec
, folio
, true))
363 update_lru_size(lruvec
, lru
, folio_zonenum(folio
),
364 folio_nr_pages(folio
));
365 /* This is not expected to be used on LRU_UNEVICTABLE */
366 list_add_tail(&folio
->lru
, &lruvec
->lists
[lru
]);
369 static __always_inline
370 void lruvec_del_folio(struct lruvec
*lruvec
, struct folio
*folio
)
372 enum lru_list lru
= folio_lru_list(folio
);
374 if (lru_gen_del_folio(lruvec
, folio
, false))
377 if (lru
!= LRU_UNEVICTABLE
)
378 list_del(&folio
->lru
);
379 update_lru_size(lruvec
, lru
, folio_zonenum(folio
),
380 -folio_nr_pages(folio
));
383 #ifdef CONFIG_ANON_VMA_NAME
384 /* mmap_lock should be read-locked */
385 static inline void anon_vma_name_get(struct anon_vma_name
*anon_name
)
388 kref_get(&anon_name
->kref
);
391 static inline void anon_vma_name_put(struct anon_vma_name
*anon_name
)
394 kref_put(&anon_name
->kref
, anon_vma_name_free
);
398 struct anon_vma_name
*anon_vma_name_reuse(struct anon_vma_name
*anon_name
)
400 /* Prevent anon_name refcount saturation early on */
401 if (kref_read(&anon_name
->kref
) < REFCOUNT_MAX
) {
402 anon_vma_name_get(anon_name
);
406 return anon_vma_name_alloc(anon_name
->name
);
409 static inline void dup_anon_vma_name(struct vm_area_struct
*orig_vma
,
410 struct vm_area_struct
*new_vma
)
412 struct anon_vma_name
*anon_name
= anon_vma_name(orig_vma
);
415 new_vma
->anon_name
= anon_vma_name_reuse(anon_name
);
418 static inline void free_anon_vma_name(struct vm_area_struct
*vma
)
421 * Not using anon_vma_name because it generates a warning if mmap_lock
422 * is not held, which might be the case here.
424 anon_vma_name_put(vma
->anon_name
);
427 static inline bool anon_vma_name_eq(struct anon_vma_name
*anon_name1
,
428 struct anon_vma_name
*anon_name2
)
430 if (anon_name1
== anon_name2
)
433 return anon_name1
&& anon_name2
&&
434 !strcmp(anon_name1
->name
, anon_name2
->name
);
437 #else /* CONFIG_ANON_VMA_NAME */
438 static inline void anon_vma_name_get(struct anon_vma_name
*anon_name
) {}
439 static inline void anon_vma_name_put(struct anon_vma_name
*anon_name
) {}
440 static inline void dup_anon_vma_name(struct vm_area_struct
*orig_vma
,
441 struct vm_area_struct
*new_vma
) {}
442 static inline void free_anon_vma_name(struct vm_area_struct
*vma
) {}
444 static inline bool anon_vma_name_eq(struct anon_vma_name
*anon_name1
,
445 struct anon_vma_name
*anon_name2
)
450 #endif /* CONFIG_ANON_VMA_NAME */
452 static inline void init_tlb_flush_pending(struct mm_struct
*mm
)
454 atomic_set(&mm
->tlb_flush_pending
, 0);
457 static inline void inc_tlb_flush_pending(struct mm_struct
*mm
)
459 atomic_inc(&mm
->tlb_flush_pending
);
461 * The only time this value is relevant is when there are indeed pages
462 * to flush. And we'll only flush pages after changing them, which
465 * So the ordering here is:
467 * atomic_inc(&mm->tlb_flush_pending);
474 * mm_tlb_flush_pending();
479 * atomic_dec(&mm->tlb_flush_pending);
481 * Where the increment if constrained by the PTL unlock, it thus
482 * ensures that the increment is visible if the PTE modification is
483 * visible. After all, if there is no PTE modification, nobody cares
484 * about TLB flushes either.
486 * This very much relies on users (mm_tlb_flush_pending() and
487 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
488 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
489 * locks (PPC) the unlock of one doesn't order against the lock of
492 * The decrement is ordered by the flush_tlb_range(), such that
493 * mm_tlb_flush_pending() will not return false unless all flushes have
498 static inline void dec_tlb_flush_pending(struct mm_struct
*mm
)
501 * See inc_tlb_flush_pending().
503 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
504 * not order against TLB invalidate completion, which is what we need.
506 * Therefore we must rely on tlb_flush_*() to guarantee order.
508 atomic_dec(&mm
->tlb_flush_pending
);
511 static inline bool mm_tlb_flush_pending(struct mm_struct
*mm
)
514 * Must be called after having acquired the PTL; orders against that
515 * PTLs release and therefore ensures that if we observe the modified
516 * PTE we must also observe the increment from inc_tlb_flush_pending().
518 * That is, it only guarantees to return true if there is a flush
519 * pending for _this_ PTL.
521 return atomic_read(&mm
->tlb_flush_pending
);
524 static inline bool mm_tlb_flush_nested(struct mm_struct
*mm
)
527 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
528 * for which there is a TLB flush pending in order to guarantee
529 * we've seen both that PTE modification and the increment.
531 * (no requirement on actually still holding the PTL, that is irrelevant)
533 return atomic_read(&mm
->tlb_flush_pending
) > 1;
538 * Computes the pte marker to copy from the given source entry into dst_vma.
539 * If no marker should be copied, returns 0.
540 * The caller should insert a new pte created with make_pte_marker().
542 static inline pte_marker
copy_pte_marker(
543 swp_entry_t entry
, struct vm_area_struct
*dst_vma
)
545 pte_marker srcm
= pte_marker_get(entry
);
546 /* Always copy error entries. */
547 pte_marker dstm
= srcm
& (PTE_MARKER_POISONED
| PTE_MARKER_GUARD
);
549 /* Only copy PTE markers if UFFD register matches. */
550 if ((srcm
& PTE_MARKER_UFFD_WP
) && userfaultfd_wp(dst_vma
))
551 dstm
|= PTE_MARKER_UFFD_WP
;
558 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
559 * replace a none pte. NOTE! This should only be called when *pte is already
560 * cleared so we will never accidentally replace something valuable. Meanwhile
561 * none pte also means we are not demoting the pte so tlb flushed is not needed.
562 * E.g., when pte cleared the caller should have taken care of the tlb flush.
564 * Must be called with pgtable lock held so that no thread will see the none
565 * pte, and if they see it, they'll fault and serialize at the pgtable lock.
567 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
570 pte_install_uffd_wp_if_needed(struct vm_area_struct
*vma
, unsigned long addr
,
571 pte_t
*pte
, pte_t pteval
)
573 #ifdef CONFIG_PTE_MARKER_UFFD_WP
574 bool arm_uffd_pte
= false;
576 /* The current status of the pte should be "cleared" before calling */
577 WARN_ON_ONCE(!pte_none(ptep_get(pte
)));
580 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
581 * thing, because when zapping either it means it's dropping the
582 * page, or in TTU where the present pte will be quickly replaced
583 * with a swap pte. There's no way of leaking the bit.
585 if (vma_is_anonymous(vma
) || !userfaultfd_wp(vma
))
588 /* A uffd-wp wr-protected normal pte */
589 if (unlikely(pte_present(pteval
) && pte_uffd_wp(pteval
)))
593 * A uffd-wp wr-protected swap pte. Note: this should even cover an
594 * existing pte marker with uffd-wp bit set.
596 if (unlikely(pte_swp_uffd_wp_any(pteval
)))
599 if (unlikely(arm_uffd_pte
))
600 set_pte_at(vma
->vm_mm
, addr
, pte
,
601 make_pte_marker(PTE_MARKER_UFFD_WP
));
605 static inline bool vma_has_recency(struct vm_area_struct
*vma
)
607 if (vma
->vm_flags
& (VM_SEQ_READ
| VM_RAND_READ
))
610 if (vma
->vm_file
&& (vma
->vm_file
->f_mode
& FMODE_NOREUSE
))