1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
11 #include <linux/khugepaged.h>
13 #include <linux/mm_inline.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagewalk.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/swap_cgroup.h>
20 #include <linux/tracepoint-defs.h>
22 /* Internal core VMA manipulation functions. */
28 * The set of flags that only affect watermark checking and reclaim
29 * behaviour. This is used by the MM to obey the caller constraints
30 * about IO, FS and watermark checking while ignoring placement
31 * hints such as HIGHMEM usage.
33 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
34 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
35 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
38 /* The GFP flags allowed during early boot */
39 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
41 /* Control allocation cpuset and node placement constraints */
42 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
44 /* Do not use these with a slab allocator */
45 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
48 * Different from WARN_ON_ONCE(), no warning will be issued
49 * when we specify __GFP_NOWARN.
51 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \
52 static bool __section(".data..once") __warned; \
53 int __ret_warn_once = !!(cond); \
55 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
59 unlikely(__ret_warn_once); \
62 void page_writeback_init(void);
65 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
66 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
67 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
68 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
70 #define ENTIRELY_MAPPED 0x800000
71 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
74 * Flags passed to __show_mem() and show_free_areas() to suppress output in
77 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
80 * How many individual pages have an elevated _mapcount. Excludes
81 * the folio's entire_mapcount.
83 * Don't use this function outside of debugging code.
85 static inline int folio_nr_pages_mapped(const struct folio
*folio
)
87 return atomic_read(&folio
->_nr_pages_mapped
) & FOLIO_PAGES_MAPPED
;
91 * Retrieve the first entry of a folio based on a provided entry within the
92 * folio. We cannot rely on folio->swap as there is no guarantee that it has
93 * been initialized. Used for calling arch_swap_restore()
95 static inline swp_entry_t
folio_swap(swp_entry_t entry
,
96 const struct folio
*folio
)
99 .val
= ALIGN_DOWN(entry
.val
, folio_nr_pages(folio
)),
105 static inline void *folio_raw_mapping(const struct folio
*folio
)
107 unsigned long mapping
= (unsigned long)folio
->mapping
;
109 return (void *)(mapping
& ~PAGE_MAPPING_FLAGS
);
113 * This is a file-backed mapping, and is about to be memory mapped - invoke its
114 * mmap hook and safely handle error conditions. On error, VMA hooks will be
117 * @file: File which backs the mapping.
118 * @vma: VMA which we are mapping.
120 * Returns: 0 if success, error otherwise.
122 static inline int mmap_file(struct file
*file
, struct vm_area_struct
*vma
)
124 int err
= call_mmap(file
, vma
);
130 * OK, we tried to call the file hook for mmap(), but an error
131 * arose. The mapping is in an inconsistent state and we most not invoke
132 * any further hooks on it.
134 vma
->vm_ops
= &vma_dummy_vm_ops
;
140 * If the VMA has a close hook then close it, and since closing it might leave
141 * it in an inconsistent state which makes the use of any hooks suspect, clear
142 * them down by installing dummy empty hooks.
144 static inline void vma_close(struct vm_area_struct
*vma
)
146 if (vma
->vm_ops
&& vma
->vm_ops
->close
) {
147 vma
->vm_ops
->close(vma
);
150 * The mapping is in an inconsistent state, and no further hooks
151 * may be invoked upon it.
153 vma
->vm_ops
= &vma_dummy_vm_ops
;
159 /* Flags for folio_pte_batch(). */
160 typedef int __bitwise fpb_t
;
162 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
163 #define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
165 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
166 #define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
168 static inline pte_t
__pte_batch_clear_ignored(pte_t pte
, fpb_t flags
)
170 if (flags
& FPB_IGNORE_DIRTY
)
171 pte
= pte_mkclean(pte
);
172 if (likely(flags
& FPB_IGNORE_SOFT_DIRTY
))
173 pte
= pte_clear_soft_dirty(pte
);
174 return pte_wrprotect(pte_mkold(pte
));
178 * folio_pte_batch - detect a PTE batch for a large folio
179 * @folio: The large folio to detect a PTE batch for.
180 * @addr: The user virtual address the first page is mapped at.
181 * @start_ptep: Page table pointer for the first entry.
182 * @pte: Page table entry for the first page.
183 * @max_nr: The maximum number of table entries to consider.
184 * @flags: Flags to modify the PTE batch semantics.
185 * @any_writable: Optional pointer to indicate whether any entry except the
186 * first one is writable.
187 * @any_young: Optional pointer to indicate whether any entry except the
188 * first one is young.
189 * @any_dirty: Optional pointer to indicate whether any entry except the
190 * first one is dirty.
192 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
193 * pages of the same large folio.
195 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
196 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
197 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
199 * start_ptep must map any page of the folio. max_nr must be at least one and
200 * must be limited by the caller so scanning cannot exceed a single page table.
202 * Return: the number of table entries in the batch.
204 static inline int folio_pte_batch(struct folio
*folio
, unsigned long addr
,
205 pte_t
*start_ptep
, pte_t pte
, int max_nr
, fpb_t flags
,
206 bool *any_writable
, bool *any_young
, bool *any_dirty
)
208 unsigned long folio_end_pfn
= folio_pfn(folio
) + folio_nr_pages(folio
);
209 const pte_t
*end_ptep
= start_ptep
+ max_nr
;
210 pte_t expected_pte
, *ptep
;
211 bool writable
, young
, dirty
;
215 *any_writable
= false;
221 VM_WARN_ON_FOLIO(!pte_present(pte
), folio
);
222 VM_WARN_ON_FOLIO(!folio_test_large(folio
) || max_nr
< 1, folio
);
223 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte
))) != folio
, folio
);
225 nr
= pte_batch_hint(start_ptep
, pte
);
226 expected_pte
= __pte_batch_clear_ignored(pte_advance_pfn(pte
, nr
), flags
);
227 ptep
= start_ptep
+ nr
;
229 while (ptep
< end_ptep
) {
230 pte
= ptep_get(ptep
);
232 writable
= !!pte_write(pte
);
234 young
= !!pte_young(pte
);
236 dirty
= !!pte_dirty(pte
);
237 pte
= __pte_batch_clear_ignored(pte
, flags
);
239 if (!pte_same(pte
, expected_pte
))
243 * Stop immediately once we reached the end of the folio. In
244 * corner cases the next PFN might fall into a different
247 if (pte_pfn(pte
) >= folio_end_pfn
)
251 *any_writable
|= writable
;
257 nr
= pte_batch_hint(ptep
, pte
);
258 expected_pte
= pte_advance_pfn(expected_pte
, nr
);
262 return min(ptep
- start_ptep
, max_nr
);
266 * pte_move_swp_offset - Move the swap entry offset field of a swap pte
267 * forward or backward by delta
268 * @pte: The initial pte state; is_swap_pte(pte) must be true and
269 * non_swap_entry() must be false.
270 * @delta: The direction and the offset we are moving; forward if delta
271 * is positive; backward if delta is negative
273 * Moves the swap offset, while maintaining all other fields, including
274 * swap type, and any swp pte bits. The resulting pte is returned.
276 static inline pte_t
pte_move_swp_offset(pte_t pte
, long delta
)
278 swp_entry_t entry
= pte_to_swp_entry(pte
);
279 pte_t
new = __swp_entry_to_pte(__swp_entry(swp_type(entry
),
280 (swp_offset(entry
) + delta
)));
282 if (pte_swp_soft_dirty(pte
))
283 new = pte_swp_mksoft_dirty(new);
284 if (pte_swp_exclusive(pte
))
285 new = pte_swp_mkexclusive(new);
286 if (pte_swp_uffd_wp(pte
))
287 new = pte_swp_mkuffd_wp(new);
294 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
295 * @pte: The initial pte state; is_swap_pte(pte) must be true and
296 * non_swap_entry() must be false.
298 * Increments the swap offset, while maintaining all other fields, including
299 * swap type, and any swp pte bits. The resulting pte is returned.
301 static inline pte_t
pte_next_swp_offset(pte_t pte
)
303 return pte_move_swp_offset(pte
, 1);
307 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
308 * @start_ptep: Page table pointer for the first entry.
309 * @max_nr: The maximum number of table entries to consider.
310 * @pte: Page table entry for the first entry.
312 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
313 * containing swap entries all with consecutive offsets and targeting the same
314 * swap type, all with matching swp pte bits.
316 * max_nr must be at least one and must be limited by the caller so scanning
317 * cannot exceed a single page table.
319 * Return: the number of table entries in the batch.
321 static inline int swap_pte_batch(pte_t
*start_ptep
, int max_nr
, pte_t pte
)
323 pte_t expected_pte
= pte_next_swp_offset(pte
);
324 const pte_t
*end_ptep
= start_ptep
+ max_nr
;
325 swp_entry_t entry
= pte_to_swp_entry(pte
);
326 pte_t
*ptep
= start_ptep
+ 1;
327 unsigned short cgroup_id
;
329 VM_WARN_ON(max_nr
< 1);
330 VM_WARN_ON(!is_swap_pte(pte
));
331 VM_WARN_ON(non_swap_entry(entry
));
333 cgroup_id
= lookup_swap_cgroup_id(entry
);
334 while (ptep
< end_ptep
) {
335 pte
= ptep_get(ptep
);
337 if (!pte_same(pte
, expected_pte
))
339 if (lookup_swap_cgroup_id(pte_to_swp_entry(pte
)) != cgroup_id
)
341 expected_pte
= pte_next_swp_offset(expected_pte
);
345 return ptep
- start_ptep
;
347 #endif /* CONFIG_MMU */
349 void __acct_reclaim_writeback(pg_data_t
*pgdat
, struct folio
*folio
,
351 static inline void acct_reclaim_writeback(struct folio
*folio
)
353 pg_data_t
*pgdat
= folio_pgdat(folio
);
354 int nr_throttled
= atomic_read(&pgdat
->nr_writeback_throttled
);
357 __acct_reclaim_writeback(pgdat
, folio
, nr_throttled
);
360 static inline void wake_throttle_isolated(pg_data_t
*pgdat
)
362 wait_queue_head_t
*wqh
;
364 wqh
= &pgdat
->reclaim_wait
[VMSCAN_THROTTLE_ISOLATED
];
365 if (waitqueue_active(wqh
))
369 vm_fault_t
__vmf_anon_prepare(struct vm_fault
*vmf
);
370 static inline vm_fault_t
vmf_anon_prepare(struct vm_fault
*vmf
)
372 vm_fault_t ret
= __vmf_anon_prepare(vmf
);
374 if (unlikely(ret
& VM_FAULT_RETRY
))
375 vma_end_read(vmf
->vma
);
379 vm_fault_t
do_swap_page(struct vm_fault
*vmf
);
380 void folio_rotate_reclaimable(struct folio
*folio
);
381 bool __folio_end_writeback(struct folio
*folio
);
382 void deactivate_file_folio(struct folio
*folio
);
383 void folio_activate(struct folio
*folio
);
385 void free_pgtables(struct mmu_gather
*tlb
, struct ma_state
*mas
,
386 struct vm_area_struct
*start_vma
, unsigned long floor
,
387 unsigned long ceiling
, bool mm_wr_locked
);
388 void pmd_install(struct mm_struct
*mm
, pmd_t
*pmd
, pgtable_t
*pte
);
391 void unmap_page_range(struct mmu_gather
*tlb
,
392 struct vm_area_struct
*vma
,
393 unsigned long addr
, unsigned long end
,
394 struct zap_details
*details
);
396 void page_cache_ra_order(struct readahead_control
*, struct file_ra_state
*,
398 void force_page_cache_ra(struct readahead_control
*, unsigned long nr
);
399 static inline void force_page_cache_readahead(struct address_space
*mapping
,
400 struct file
*file
, pgoff_t index
, unsigned long nr_to_read
)
402 DEFINE_READAHEAD(ractl
, file
, &file
->f_ra
, mapping
, index
);
403 force_page_cache_ra(&ractl
, nr_to_read
);
406 unsigned find_lock_entries(struct address_space
*mapping
, pgoff_t
*start
,
407 pgoff_t end
, struct folio_batch
*fbatch
, pgoff_t
*indices
);
408 unsigned find_get_entries(struct address_space
*mapping
, pgoff_t
*start
,
409 pgoff_t end
, struct folio_batch
*fbatch
, pgoff_t
*indices
);
410 void filemap_free_folio(struct address_space
*mapping
, struct folio
*folio
);
411 int truncate_inode_folio(struct address_space
*mapping
, struct folio
*folio
);
412 bool truncate_inode_partial_folio(struct folio
*folio
, loff_t start
,
414 long mapping_evict_folio(struct address_space
*mapping
, struct folio
*folio
);
415 unsigned long mapping_try_invalidate(struct address_space
*mapping
,
416 pgoff_t start
, pgoff_t end
, unsigned long *nr_failed
);
419 * folio_evictable - Test whether a folio is evictable.
420 * @folio: The folio to test.
422 * Test whether @folio is evictable -- i.e., should be placed on
423 * active/inactive lists vs unevictable list.
425 * Reasons folio might not be evictable:
426 * 1. folio's mapping marked unevictable
427 * 2. One of the pages in the folio is part of an mlocked VMA
429 static inline bool folio_evictable(struct folio
*folio
)
433 /* Prevent address_space of inode and swap cache from being freed */
435 ret
= !mapping_unevictable(folio_mapping(folio
)) &&
436 !folio_test_mlocked(folio
);
442 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
445 static inline void set_page_refcounted(struct page
*page
)
447 VM_BUG_ON_PAGE(PageTail(page
), page
);
448 VM_BUG_ON_PAGE(page_ref_count(page
), page
);
449 set_page_count(page
, 1);
453 * Return true if a folio needs ->release_folio() calling upon it.
455 static inline bool folio_needs_release(struct folio
*folio
)
457 struct address_space
*mapping
= folio_mapping(folio
);
459 return folio_has_private(folio
) ||
460 (mapping
&& mapping_release_always(mapping
));
463 extern unsigned long highest_memmap_pfn
;
466 * Maximum number of reclaim retries without progress before the OOM
467 * killer is consider the only way forward.
469 #define MAX_RECLAIM_RETRIES 16
474 bool folio_isolate_lru(struct folio
*folio
);
475 void folio_putback_lru(struct folio
*folio
);
476 extern void reclaim_throttle(pg_data_t
*pgdat
, enum vmscan_throttle_state reason
);
481 pmd_t
*mm_find_pmd(struct mm_struct
*mm
, unsigned long address
);
486 #define K(x) ((x) << (PAGE_SHIFT-10))
488 extern char * const zone_names
[MAX_NR_ZONES
];
490 /* perform sanity checks on struct pages being allocated or freed */
491 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM
, check_pages_enabled
);
493 extern int min_free_kbytes
;
495 void setup_per_zone_wmarks(void);
496 void calculate_min_free_kbytes(void);
497 int __meminit
init_per_zone_wmark_min(void);
498 void page_alloc_sysctl_init(void);
501 * Structure for holding the mostly immutable allocation parameters passed
502 * between functions involved in allocations, including the alloc_pages*
503 * family of functions.
505 * nodemask, migratetype and highest_zoneidx are initialized only once in
506 * __alloc_pages() and then never change.
508 * zonelist, preferred_zone and highest_zoneidx are set first in
509 * __alloc_pages() for the fast path, and might be later changed
510 * in __alloc_pages_slowpath(). All other functions pass the whole structure
511 * by a const pointer.
513 struct alloc_context
{
514 struct zonelist
*zonelist
;
515 nodemask_t
*nodemask
;
516 struct zoneref
*preferred_zoneref
;
520 * highest_zoneidx represents highest usable zone index of
521 * the allocation request. Due to the nature of the zone,
522 * memory on lower zone than the highest_zoneidx will be
523 * protected by lowmem_reserve[highest_zoneidx].
525 * highest_zoneidx is also used by reclaim/compaction to limit
526 * the target zone since higher zone than this index cannot be
527 * usable for this allocation request.
529 enum zone_type highest_zoneidx
;
530 bool spread_dirty_pages
;
534 * This function returns the order of a free page in the buddy system. In
535 * general, page_zone(page)->lock must be held by the caller to prevent the
536 * page from being allocated in parallel and returning garbage as the order.
537 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
538 * page cannot be allocated or merged in parallel. Alternatively, it must
539 * handle invalid values gracefully, and use buddy_order_unsafe() below.
541 static inline unsigned int buddy_order(struct page
*page
)
543 /* PageBuddy() must be checked by the caller */
544 return page_private(page
);
548 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
549 * PageBuddy() should be checked first by the caller to minimize race window,
550 * and invalid values must be handled gracefully.
552 * READ_ONCE is used so that if the caller assigns the result into a local
553 * variable and e.g. tests it for valid range before using, the compiler cannot
554 * decide to remove the variable and inline the page_private(page) multiple
555 * times, potentially observing different values in the tests and the actual
558 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
561 * This function checks whether a page is free && is the buddy
562 * we can coalesce a page and its buddy if
563 * (a) the buddy is not in a hole (check before calling!) &&
564 * (b) the buddy is in the buddy system &&
565 * (c) a page and its buddy have the same order &&
566 * (d) a page and its buddy are in the same zone.
568 * For recording whether a page is in the buddy system, we set PageBuddy.
569 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
571 * For recording page's order, we use page_private(page).
573 static inline bool page_is_buddy(struct page
*page
, struct page
*buddy
,
576 if (!page_is_guard(buddy
) && !PageBuddy(buddy
))
579 if (buddy_order(buddy
) != order
)
583 * zone check is done late to avoid uselessly calculating
584 * zone/node ids for pages that could never merge.
586 if (page_zone_id(page
) != page_zone_id(buddy
))
589 VM_BUG_ON_PAGE(page_count(buddy
) != 0, buddy
);
595 * Locate the struct page for both the matching buddy in our
596 * pair (buddy1) and the combined O(n+1) page they form (page).
598 * 1) Any buddy B1 will have an order O twin B2 which satisfies
599 * the following equation:
601 * For example, if the starting buddy (buddy2) is #8 its order
603 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
605 * 2) Any buddy B will have an order O+1 parent P which
606 * satisfies the following equation:
609 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
611 static inline unsigned long
612 __find_buddy_pfn(unsigned long page_pfn
, unsigned int order
)
614 return page_pfn
^ (1 << order
);
618 * Find the buddy of @page and validate it.
619 * @page: The input page
620 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
621 * function is used in the performance-critical __free_one_page().
622 * @order: The order of the page
623 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
626 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
627 * not the same as @page. The validation is necessary before use it.
629 * Return: the found buddy page or NULL if not found.
631 static inline struct page
*find_buddy_page_pfn(struct page
*page
,
632 unsigned long pfn
, unsigned int order
, unsigned long *buddy_pfn
)
634 unsigned long __buddy_pfn
= __find_buddy_pfn(pfn
, order
);
637 buddy
= page
+ (__buddy_pfn
- pfn
);
639 *buddy_pfn
= __buddy_pfn
;
641 if (page_is_buddy(page
, buddy
, order
))
646 extern struct page
*__pageblock_pfn_to_page(unsigned long start_pfn
,
647 unsigned long end_pfn
, struct zone
*zone
);
649 static inline struct page
*pageblock_pfn_to_page(unsigned long start_pfn
,
650 unsigned long end_pfn
, struct zone
*zone
)
652 if (zone
->contiguous
)
653 return pfn_to_page(start_pfn
);
655 return __pageblock_pfn_to_page(start_pfn
, end_pfn
, zone
);
658 void set_zone_contiguous(struct zone
*zone
);
660 static inline void clear_zone_contiguous(struct zone
*zone
)
662 zone
->contiguous
= false;
665 extern int __isolate_free_page(struct page
*page
, unsigned int order
);
666 extern void __putback_isolated_page(struct page
*page
, unsigned int order
,
668 extern void memblock_free_pages(struct page
*page
, unsigned long pfn
,
670 extern void __free_pages_core(struct page
*page
, unsigned int order
,
671 enum meminit_context context
);
674 * This will have no effect, other than possibly generating a warning, if the
675 * caller passes in a non-large folio.
677 static inline void folio_set_order(struct folio
*folio
, unsigned int order
)
679 if (WARN_ON_ONCE(!order
|| !folio_test_large(folio
)))
682 folio
->_flags_1
= (folio
->_flags_1
& ~0xffUL
) | order
;
684 folio
->_folio_nr_pages
= 1U << order
;
688 bool __folio_unqueue_deferred_split(struct folio
*folio
);
689 static inline bool folio_unqueue_deferred_split(struct folio
*folio
)
691 if (folio_order(folio
) <= 1 || !folio_test_large_rmappable(folio
))
695 * At this point, there is no one trying to add the folio to
696 * deferred_list. If folio is not in deferred_list, it's safe
697 * to check without acquiring the split_queue_lock.
699 if (data_race(list_empty(&folio
->_deferred_list
)))
702 return __folio_unqueue_deferred_split(folio
);
705 static inline struct folio
*page_rmappable_folio(struct page
*page
)
707 struct folio
*folio
= (struct folio
*)page
;
709 if (folio
&& folio_test_large(folio
))
710 folio_set_large_rmappable(folio
);
714 static inline void prep_compound_head(struct page
*page
, unsigned int order
)
716 struct folio
*folio
= (struct folio
*)page
;
718 folio_set_order(folio
, order
);
719 atomic_set(&folio
->_large_mapcount
, -1);
720 atomic_set(&folio
->_entire_mapcount
, -1);
721 atomic_set(&folio
->_nr_pages_mapped
, 0);
722 atomic_set(&folio
->_pincount
, 0);
724 INIT_LIST_HEAD(&folio
->_deferred_list
);
727 static inline void prep_compound_tail(struct page
*head
, int tail_idx
)
729 struct page
*p
= head
+ tail_idx
;
731 p
->mapping
= TAIL_MAPPING
;
732 set_compound_head(p
, head
);
733 set_page_private(p
, 0);
736 extern void prep_compound_page(struct page
*page
, unsigned int order
);
738 extern void post_alloc_hook(struct page
*page
, unsigned int order
,
740 extern bool free_pages_prepare(struct page
*page
, unsigned int order
);
742 extern int user_min_free_kbytes
;
744 void free_unref_page(struct page
*page
, unsigned int order
);
745 void free_unref_folios(struct folio_batch
*fbatch
);
747 extern void zone_pcp_reset(struct zone
*zone
);
748 extern void zone_pcp_disable(struct zone
*zone
);
749 extern void zone_pcp_enable(struct zone
*zone
);
750 extern void zone_pcp_init(struct zone
*zone
);
752 extern void *memmap_alloc(phys_addr_t size
, phys_addr_t align
,
753 phys_addr_t min_addr
,
754 int nid
, bool exact_nid
);
756 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
757 unsigned long, enum meminit_context
, struct vmem_altmap
*, int);
759 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
765 * compact_control is used to track pages being migrated and the free pages
766 * they are being migrated to during memory compaction. The free_pfn starts
767 * at the end of a zone and migrate_pfn begins at the start. Movable pages
768 * are moved to the end of a zone during a compaction run and the run
769 * completes when free_pfn <= migrate_pfn
771 struct compact_control
{
772 struct list_head freepages
[NR_PAGE_ORDERS
]; /* List of free pages to migrate to */
773 struct list_head migratepages
; /* List of pages being migrated */
774 unsigned int nr_freepages
; /* Number of isolated free pages */
775 unsigned int nr_migratepages
; /* Number of pages to migrate */
776 unsigned long free_pfn
; /* isolate_freepages search base */
778 * Acts as an in/out parameter to page isolation for migration.
779 * isolate_migratepages uses it as a search base.
780 * isolate_migratepages_block will update the value to the next pfn
781 * after the last isolated one.
783 unsigned long migrate_pfn
;
784 unsigned long fast_start_pfn
; /* a pfn to start linear scan from */
786 unsigned long total_migrate_scanned
;
787 unsigned long total_free_scanned
;
788 unsigned short fast_search_fail
;/* failures to use free list searches */
789 short search_order
; /* order to start a fast search at */
790 const gfp_t gfp_mask
; /* gfp mask of a direct compactor */
791 int order
; /* order a direct compactor needs */
792 int migratetype
; /* migratetype of direct compactor */
793 const unsigned int alloc_flags
; /* alloc flags of a direct compactor */
794 const int highest_zoneidx
; /* zone index of a direct compactor */
795 enum migrate_mode mode
; /* Async or sync migration mode */
796 bool ignore_skip_hint
; /* Scan blocks even if marked skip */
797 bool no_set_skip_hint
; /* Don't mark blocks for skipping */
798 bool ignore_block_suitable
; /* Scan blocks considered unsuitable */
799 bool direct_compaction
; /* False from kcompactd or /proc/... */
800 bool proactive_compaction
; /* kcompactd proactive compaction */
801 bool whole_zone
; /* Whole zone should/has been scanned */
802 bool contended
; /* Signal lock contention */
803 bool finish_pageblock
; /* Scan the remainder of a pageblock. Used
804 * when there are potentially transient
805 * isolation or migration failures to
806 * ensure forward progress.
808 bool alloc_contig
; /* alloc_contig_range allocation */
812 * Used in direct compaction when a page should be taken from the freelists
813 * immediately when one is created during the free path.
815 struct capture_control
{
816 struct compact_control
*cc
;
821 isolate_freepages_range(struct compact_control
*cc
,
822 unsigned long start_pfn
, unsigned long end_pfn
);
824 isolate_migratepages_range(struct compact_control
*cc
,
825 unsigned long low_pfn
, unsigned long end_pfn
);
827 int __alloc_contig_migrate_range(struct compact_control
*cc
,
828 unsigned long start
, unsigned long end
,
831 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
832 void init_cma_reserved_pageblock(struct page
*page
);
834 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
836 int find_suitable_fallback(struct free_area
*area
, unsigned int order
,
837 int migratetype
, bool only_stealable
, bool *can_steal
);
839 static inline bool free_area_empty(struct free_area
*area
, int migratetype
)
841 return list_empty(&area
->free_list
[migratetype
]);
845 struct anon_vma
*folio_anon_vma(const struct folio
*folio
);
848 void unmap_mapping_folio(struct folio
*folio
);
849 extern long populate_vma_page_range(struct vm_area_struct
*vma
,
850 unsigned long start
, unsigned long end
, int *locked
);
851 extern long faultin_page_range(struct mm_struct
*mm
, unsigned long start
,
852 unsigned long end
, bool write
, int *locked
);
853 extern bool mlock_future_ok(struct mm_struct
*mm
, unsigned long flags
,
854 unsigned long bytes
);
857 * NOTE: This function can't tell whether the folio is "fully mapped" in the
859 * "fully mapped" means all the pages of folio is associated with the page
860 * table of range while this function just check whether the folio range is
861 * within the range [start, end). Function caller needs to do page table
862 * check if it cares about the page table association.
864 * Typical usage (like mlock or madvise) is:
865 * Caller knows at least 1 page of folio is associated with page table of VMA
866 * and the range [start, end) is intersect with the VMA range. Caller wants
867 * to know whether the folio is fully associated with the range. It calls
868 * this function to check whether the folio is in the range first. Then checks
869 * the page table to know whether the folio is fully mapped to the range.
872 folio_within_range(struct folio
*folio
, struct vm_area_struct
*vma
,
873 unsigned long start
, unsigned long end
)
876 unsigned long vma_pglen
= vma_pages(vma
);
878 VM_WARN_ON_FOLIO(folio_test_ksm(folio
), folio
);
882 if (start
< vma
->vm_start
)
883 start
= vma
->vm_start
;
885 if (end
> vma
->vm_end
)
888 pgoff
= folio_pgoff(folio
);
890 /* if folio start address is not in vma range */
891 if (!in_range(pgoff
, vma
->vm_pgoff
, vma_pglen
))
894 addr
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
896 return !(addr
< start
|| end
- addr
< folio_size(folio
));
900 folio_within_vma(struct folio
*folio
, struct vm_area_struct
*vma
)
902 return folio_within_range(folio
, vma
, vma
->vm_start
, vma
->vm_end
);
906 * mlock_vma_folio() and munlock_vma_folio():
907 * should be called with vma's mmap_lock held for read or write,
908 * under page table lock for the pte/pmd being added or removed.
910 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
911 * the end of folio_remove_rmap_*(); but new anon folios are managed by
912 * folio_add_lru_vma() calling mlock_new_folio().
914 void mlock_folio(struct folio
*folio
);
915 static inline void mlock_vma_folio(struct folio
*folio
,
916 struct vm_area_struct
*vma
)
919 * The VM_SPECIAL check here serves two purposes.
920 * 1) VM_IO check prevents migration from double-counting during mlock.
921 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
922 * is never left set on a VM_SPECIAL vma, there is an interval while
923 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
924 * still be set while VM_SPECIAL bits are added: so ignore it then.
926 if (unlikely((vma
->vm_flags
& (VM_LOCKED
|VM_SPECIAL
)) == VM_LOCKED
))
930 void munlock_folio(struct folio
*folio
);
931 static inline void munlock_vma_folio(struct folio
*folio
,
932 struct vm_area_struct
*vma
)
935 * munlock if the function is called. Ideally, we should only
936 * do munlock if any page of folio is unmapped from VMA and
937 * cause folio not fully mapped to VMA.
939 * But it's not easy to confirm that's the situation. So we
940 * always munlock the folio and page reclaim will correct it
943 if (unlikely(vma
->vm_flags
& VM_LOCKED
))
944 munlock_folio(folio
);
947 void mlock_new_folio(struct folio
*folio
);
948 bool need_mlock_drain(int cpu
);
949 void mlock_drain_local(void);
950 void mlock_drain_remote(int cpu
);
952 extern pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
);
955 * vma_address - Find the virtual address a page range is mapped at
956 * @vma: The vma which maps this object.
957 * @pgoff: The page offset within its object.
958 * @nr_pages: The number of pages to consider.
960 * If any page in this range is mapped by this VMA, return the first address
961 * where any of these pages appear. Otherwise, return -EFAULT.
963 static inline unsigned long vma_address(const struct vm_area_struct
*vma
,
964 pgoff_t pgoff
, unsigned long nr_pages
)
966 unsigned long address
;
968 if (pgoff
>= vma
->vm_pgoff
) {
969 address
= vma
->vm_start
+
970 ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
971 /* Check for address beyond vma (or wrapped through 0?) */
972 if (address
< vma
->vm_start
|| address
>= vma
->vm_end
)
974 } else if (pgoff
+ nr_pages
- 1 >= vma
->vm_pgoff
) {
975 /* Test above avoids possibility of wrap to 0 on 32-bit */
976 address
= vma
->vm_start
;
984 * Then at what user virtual address will none of the range be found in vma?
985 * Assumes that vma_address() already returned a good starting address.
987 static inline unsigned long vma_address_end(struct page_vma_mapped_walk
*pvmw
)
989 struct vm_area_struct
*vma
= pvmw
->vma
;
991 unsigned long address
;
993 /* Common case, plus ->pgoff is invalid for KSM */
994 if (pvmw
->nr_pages
== 1)
995 return pvmw
->address
+ PAGE_SIZE
;
997 pgoff
= pvmw
->pgoff
+ pvmw
->nr_pages
;
998 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
999 /* Check for address beyond vma (or wrapped through 0?) */
1000 if (address
< vma
->vm_start
|| address
> vma
->vm_end
)
1001 address
= vma
->vm_end
;
1005 static inline struct file
*maybe_unlock_mmap_for_io(struct vm_fault
*vmf
,
1008 int flags
= vmf
->flags
;
1014 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1015 * anything, so we only pin the file and drop the mmap_lock if only
1016 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1018 if (fault_flag_allow_retry_first(flags
) &&
1019 !(flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
1020 fpin
= get_file(vmf
->vma
->vm_file
);
1021 release_fault_lock(vmf
);
1025 #else /* !CONFIG_MMU */
1026 static inline void unmap_mapping_folio(struct folio
*folio
) { }
1027 static inline void mlock_new_folio(struct folio
*folio
) { }
1028 static inline bool need_mlock_drain(int cpu
) { return false; }
1029 static inline void mlock_drain_local(void) { }
1030 static inline void mlock_drain_remote(int cpu
) { }
1031 static inline void vunmap_range_noflush(unsigned long start
, unsigned long end
)
1034 #endif /* !CONFIG_MMU */
1036 /* Memory initialisation debug and verification */
1037 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1038 DECLARE_STATIC_KEY_TRUE(deferred_pages
);
1040 bool __init
deferred_grow_zone(struct zone
*zone
, unsigned int order
);
1041 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1049 #ifdef CONFIG_DEBUG_MEMORY_INIT
1051 extern int mminit_loglevel
;
1053 #define mminit_dprintk(level, prefix, fmt, arg...) \
1055 if (level < mminit_loglevel) { \
1056 if (level <= MMINIT_WARNING) \
1057 pr_warn("mminit::" prefix " " fmt, ##arg); \
1059 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1063 extern void mminit_verify_pageflags_layout(void);
1064 extern void mminit_verify_zonelist(void);
1067 static inline void mminit_dprintk(enum mminit_level level
,
1068 const char *prefix
, const char *fmt
, ...)
1072 static inline void mminit_verify_pageflags_layout(void)
1076 static inline void mminit_verify_zonelist(void)
1079 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1081 #define NODE_RECLAIM_NOSCAN -2
1082 #define NODE_RECLAIM_FULL -1
1083 #define NODE_RECLAIM_SOME 0
1084 #define NODE_RECLAIM_SUCCESS 1
1087 extern int node_reclaim(struct pglist_data
*, gfp_t
, unsigned int);
1088 extern int find_next_best_node(int node
, nodemask_t
*used_node_mask
);
1090 static inline int node_reclaim(struct pglist_data
*pgdat
, gfp_t mask
,
1093 return NODE_RECLAIM_NOSCAN
;
1095 static inline int find_next_best_node(int node
, nodemask_t
*used_node_mask
)
1097 return NUMA_NO_NODE
;
1102 * mm/memory-failure.c
1104 #ifdef CONFIG_MEMORY_FAILURE
1105 void unmap_poisoned_folio(struct folio
*folio
, enum ttu_flags ttu
);
1106 void shake_folio(struct folio
*folio
);
1107 extern int hwpoison_filter(struct page
*p
);
1109 extern u32 hwpoison_filter_dev_major
;
1110 extern u32 hwpoison_filter_dev_minor
;
1111 extern u64 hwpoison_filter_flags_mask
;
1112 extern u64 hwpoison_filter_flags_value
;
1113 extern u64 hwpoison_filter_memcg
;
1114 extern u32 hwpoison_filter_enable
;
1115 #define MAGIC_HWPOISON 0x48575053U /* HWPS */
1116 void SetPageHWPoisonTakenOff(struct page
*page
);
1117 void ClearPageHWPoisonTakenOff(struct page
*page
);
1118 bool take_page_off_buddy(struct page
*page
);
1119 bool put_page_back_buddy(struct page
*page
);
1120 struct task_struct
*task_early_kill(struct task_struct
*tsk
, int force_early
);
1121 void add_to_kill_ksm(struct task_struct
*tsk
, const struct page
*p
,
1122 struct vm_area_struct
*vma
, struct list_head
*to_kill
,
1123 unsigned long ksm_addr
);
1124 unsigned long page_mapped_in_vma(const struct page
*page
,
1125 struct vm_area_struct
*vma
);
1128 static inline void unmap_poisoned_folio(struct folio
*folio
, enum ttu_flags ttu
)
1133 extern unsigned long __must_check
vm_mmap_pgoff(struct file
*, unsigned long,
1134 unsigned long, unsigned long,
1135 unsigned long, unsigned long);
1137 extern void set_pageblock_order(void);
1138 struct folio
*alloc_migrate_folio(struct folio
*src
, unsigned long private);
1139 unsigned long reclaim_pages(struct list_head
*folio_list
);
1140 unsigned int reclaim_clean_pages_from_list(struct zone
*zone
,
1141 struct list_head
*folio_list
);
1142 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1143 #define ALLOC_WMARK_MIN WMARK_MIN
1144 #define ALLOC_WMARK_LOW WMARK_LOW
1145 #define ALLOC_WMARK_HIGH WMARK_HIGH
1146 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1148 /* Mask to get the watermark bits */
1149 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1152 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1153 * cannot assume a reduced access to memory reserves is sufficient for
1157 #define ALLOC_OOM 0x08
1159 #define ALLOC_OOM ALLOC_NO_WATERMARKS
1162 #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
1163 * to 25% of the min watermark or
1164 * 62.5% if __GFP_HIGH is set.
1166 #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
1167 * of the min watermark.
1169 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1170 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
1171 #ifdef CONFIG_ZONE_DMA32
1172 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
1174 #define ALLOC_NOFRAGMENT 0x0
1176 #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1177 #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1179 /* Flags that allow allocations below the min watermark. */
1180 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1183 struct tlbflush_unmap_batch
;
1187 * only for MM internal work items which do not depend on
1188 * any allocations or locks which might depend on allocations
1190 extern struct workqueue_struct
*mm_percpu_wq
;
1192 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1193 void try_to_unmap_flush(void);
1194 void try_to_unmap_flush_dirty(void);
1195 void flush_tlb_batched_pending(struct mm_struct
*mm
);
1197 static inline void try_to_unmap_flush(void)
1200 static inline void try_to_unmap_flush_dirty(void)
1203 static inline void flush_tlb_batched_pending(struct mm_struct
*mm
)
1206 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1208 extern const struct trace_print_flags pageflag_names
[];
1209 extern const struct trace_print_flags vmaflag_names
[];
1210 extern const struct trace_print_flags gfpflag_names
[];
1212 static inline bool is_migrate_highatomic(enum migratetype migratetype
)
1214 return migratetype
== MIGRATE_HIGHATOMIC
;
1217 void setup_zone_pageset(struct zone
*zone
);
1219 struct migration_target_control
{
1220 int nid
; /* preferred node id */
1223 enum migrate_reason reason
;
1229 size_t splice_folio_into_pipe(struct pipe_inode_info
*pipe
,
1230 struct folio
*folio
, loff_t fpos
, size_t size
);
1236 void __init
vmalloc_init(void);
1237 int __must_check
vmap_pages_range_noflush(unsigned long addr
, unsigned long end
,
1238 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
);
1239 unsigned int get_vm_area_page_order(struct vm_struct
*vm
);
1241 static inline void vmalloc_init(void)
1246 int __must_check
vmap_pages_range_noflush(unsigned long addr
, unsigned long end
,
1247 pgprot_t prot
, struct page
**pages
, unsigned int page_shift
)
1253 int __must_check
__vmap_pages_range_noflush(unsigned long addr
,
1254 unsigned long end
, pgprot_t prot
,
1255 struct page
**pages
, unsigned int page_shift
);
1257 void vunmap_range_noflush(unsigned long start
, unsigned long end
);
1259 void __vunmap_range_noflush(unsigned long start
, unsigned long end
);
1261 int numa_migrate_check(struct folio
*folio
, struct vm_fault
*vmf
,
1262 unsigned long addr
, int *flags
, bool writable
,
1265 void free_zone_device_folio(struct folio
*folio
);
1266 int migrate_device_coherent_folio(struct folio
*folio
);
1268 struct vm_struct
*__get_vm_area_node(unsigned long size
,
1269 unsigned long align
, unsigned long shift
,
1270 unsigned long flags
, unsigned long start
,
1271 unsigned long end
, int node
, gfp_t gfp_mask
,
1272 const void *caller
);
1277 int __must_check
try_grab_folio(struct folio
*folio
, int refs
,
1278 unsigned int flags
);
1283 void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1284 pud_t
*pud
, bool write
);
1285 void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
1286 pmd_t
*pmd
, bool write
);
1288 static inline bool alloc_zeroed(void)
1290 return static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
,
1295 * Parses a string with mem suffixes into its order. Useful to parse kernel
1298 static inline int get_order_from_str(const char *size_str
,
1299 unsigned long valid_orders
)
1305 size
= memparse(size_str
, &endptr
);
1307 if (!is_power_of_2(size
))
1309 order
= get_order(size
);
1310 if (BIT(order
) & ~valid_orders
)
1317 /* mark page accessed */
1318 FOLL_TOUCH
= 1 << 16,
1319 /* a retry, previous pass started an IO */
1320 FOLL_TRIED
= 1 << 17,
1321 /* we are working on non-current tsk/mm */
1322 FOLL_REMOTE
= 1 << 18,
1323 /* pages must be released via unpin_user_page */
1325 /* gup_fast: prevent fall-back to slow gup */
1326 FOLL_FAST_ONLY
= 1 << 20,
1327 /* allow unlocking the mmap lock */
1328 FOLL_UNLOCKABLE
= 1 << 21,
1329 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1330 FOLL_MADV_POPULATE
= 1 << 22,
1333 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1334 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1338 * Indicates for which pages that are write-protected in the page table,
1339 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1340 * GUP pin will remain consistent with the pages mapped into the page tables
1343 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1344 * PageAnonExclusive() has to protect against concurrent GUP:
1345 * * Ordinary GUP: Using the PT lock
1346 * * GUP-fast and fork(): mm->write_protect_seq
1347 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1348 * folio_try_share_anon_rmap_*()
1350 * Must be called with the (sub)page that's actually referenced via the
1351 * page table entry, which might not necessarily be the head page for a
1354 * If the vma is NULL, we're coming from the GUP-fast path and might have
1355 * to fallback to the slow path just to lookup the vma.
1357 static inline bool gup_must_unshare(struct vm_area_struct
*vma
,
1358 unsigned int flags
, struct page
*page
)
1361 * FOLL_WRITE is implicitly handled correctly as the page table entry
1362 * has to be writable -- and if it references (part of) an anonymous
1363 * folio, that part is required to be marked exclusive.
1365 if ((flags
& (FOLL_WRITE
| FOLL_PIN
)) != FOLL_PIN
)
1368 * Note: PageAnon(page) is stable until the page is actually getting
1371 if (!PageAnon(page
)) {
1373 * We only care about R/O long-term pining: R/O short-term
1374 * pinning does not have the semantics to observe successive
1375 * changes through the process page tables.
1377 if (!(flags
& FOLL_LONGTERM
))
1380 /* We really need the vma ... */
1385 * ... because we only care about writable private ("COW")
1386 * mappings where we have to break COW early.
1388 return is_cow_mapping(vma
->vm_flags
);
1391 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1392 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST
))
1396 * Note that KSM pages cannot be exclusive, and consequently,
1397 * cannot get pinned.
1399 return !PageAnonExclusive(page
);
1402 extern bool mirrored_kernelcore
;
1403 extern bool memblock_has_mirror(void);
1405 static __always_inline
void vma_set_range(struct vm_area_struct
*vma
,
1406 unsigned long start
, unsigned long end
,
1409 vma
->vm_start
= start
;
1411 vma
->vm_pgoff
= pgoff
;
1414 static inline bool vma_soft_dirty_enabled(struct vm_area_struct
*vma
)
1417 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1418 * enablements, because when without soft-dirty being compiled in,
1419 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1420 * will be constantly true.
1422 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY
))
1426 * Soft-dirty is kind of special: its tracking is enabled when the
1427 * vma flags not set.
1429 return !(vma
->vm_flags
& VM_SOFTDIRTY
);
1432 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct
*vma
, pmd_t pmd
)
1434 return vma_soft_dirty_enabled(vma
) && !pmd_soft_dirty(pmd
);
1437 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct
*vma
, pte_t pte
)
1439 return vma_soft_dirty_enabled(vma
) && !pte_soft_dirty(pte
);
1442 void __meminit
__init_single_page(struct page
*page
, unsigned long pfn
,
1443 unsigned long zone
, int nid
);
1445 /* shrinker related functions */
1446 unsigned long shrink_slab(gfp_t gfp_mask
, int nid
, struct mem_cgroup
*memcg
,
1450 static inline int can_do_mseal(unsigned long flags
)
1459 static inline int can_do_mseal(unsigned long flags
)
1465 #ifdef CONFIG_SHRINKER_DEBUG
1466 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1467 struct shrinker
*shrinker
, const char *fmt
, va_list ap
)
1469 shrinker
->name
= kvasprintf_const(GFP_KERNEL
, fmt
, ap
);
1471 return shrinker
->name
? 0 : -ENOMEM
;
1474 static inline void shrinker_debugfs_name_free(struct shrinker
*shrinker
)
1476 kfree_const(shrinker
->name
);
1477 shrinker
->name
= NULL
;
1480 extern int shrinker_debugfs_add(struct shrinker
*shrinker
);
1481 extern struct dentry
*shrinker_debugfs_detach(struct shrinker
*shrinker
,
1483 extern void shrinker_debugfs_remove(struct dentry
*debugfs_entry
,
1485 #else /* CONFIG_SHRINKER_DEBUG */
1486 static inline int shrinker_debugfs_add(struct shrinker
*shrinker
)
1490 static inline int shrinker_debugfs_name_alloc(struct shrinker
*shrinker
,
1491 const char *fmt
, va_list ap
)
1495 static inline void shrinker_debugfs_name_free(struct shrinker
*shrinker
)
1498 static inline struct dentry
*shrinker_debugfs_detach(struct shrinker
*shrinker
,
1504 static inline void shrinker_debugfs_remove(struct dentry
*debugfs_entry
,
1508 #endif /* CONFIG_SHRINKER_DEBUG */
1510 /* Only track the nodes of mappings with shadow entries */
1511 void workingset_update_node(struct xa_node
*node
);
1512 extern struct list_lru shadow_nodes
;
1515 unsigned long move_page_tables(struct vm_area_struct
*vma
,
1516 unsigned long old_addr
, struct vm_area_struct
*new_vma
,
1517 unsigned long new_addr
, unsigned long len
,
1518 bool need_rmap_locks
, bool for_stack
);
1520 #ifdef CONFIG_UNACCEPTED_MEMORY
1521 void accept_page(struct page
*page
);
1522 #else /* CONFIG_UNACCEPTED_MEMORY */
1523 static inline void accept_page(struct page
*page
)
1526 #endif /* CONFIG_UNACCEPTED_MEMORY */
1529 int walk_page_range_mm(struct mm_struct
*mm
, unsigned long start
,
1530 unsigned long end
, const struct mm_walk_ops
*ops
,
1533 #endif /* __MM_INTERNAL_H */