1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
6 #include <linux/mm_types.h>
7 #include <linux/mmdebug.h>
9 #include <linux/hugetlb_inline.h>
10 #include <linux/cgroup.h>
11 #include <linux/page_ref.h>
12 #include <linux/list.h>
13 #include <linux/kref.h>
14 #include <linux/pgtable.h>
15 #include <linux/gfp.h>
16 #include <linux/userfaultfd_k.h>
23 void free_huge_folio(struct folio
*folio
);
25 #ifdef CONFIG_HUGETLB_PAGE
27 #include <linux/pagemap.h>
28 #include <linux/shm.h>
29 #include <asm/tlbflush.h>
32 * For HugeTLB page, there are more metadata to save in the struct page. But
33 * the head struct page cannot meet our needs, so we have to abuse other tail
34 * struct page to store the metadata.
36 #define __NR_USED_SUBPAGE 3
38 struct hugepage_subpool
{
41 long max_hpages
; /* Maximum huge pages or -1 if no maximum. */
42 long used_hpages
; /* Used count against maximum, includes */
43 /* both allocated and reserved pages. */
44 struct hstate
*hstate
;
45 long min_hpages
; /* Minimum huge pages or -1 if no minimum. */
46 long rsv_hpages
; /* Pages reserved against global pool to */
47 /* satisfy minimum size. */
53 struct list_head regions
;
54 long adds_in_progress
;
55 struct list_head region_cache
;
56 long region_cache_count
;
57 struct rw_semaphore rw_sema
;
58 #ifdef CONFIG_CGROUP_HUGETLB
60 * On private mappings, the counter to uncharge reservations is stored
61 * here. If these fields are 0, then either the mapping is shared, or
62 * cgroup accounting is disabled for this resv_map.
64 struct page_counter
*reservation_counter
;
65 unsigned long pages_per_hpage
;
66 struct cgroup_subsys_state
*css
;
71 * Region tracking -- allows tracking of reservations and instantiated pages
72 * across the pages in a mapping.
74 * The region data structures are embedded into a resv_map and protected
75 * by a resv_map's lock. The set of regions within the resv_map represent
76 * reservations for huge pages, or huge pages that have already been
77 * instantiated within the map. The from and to elements are huge page
78 * indices into the associated mapping. from indicates the starting index
79 * of the region. to represents the first index past the end of the region.
81 * For example, a file region structure with from == 0 and to == 4 represents
82 * four huge pages in a mapping. It is important to note that the to element
83 * represents the first element past the end of the region. This is used in
84 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
86 * Interval notation of the form [from, to) will be used to indicate that
87 * the endpoint from is inclusive and to is exclusive.
90 struct list_head link
;
93 #ifdef CONFIG_CGROUP_HUGETLB
95 * On shared mappings, each reserved region appears as a struct
96 * file_region in resv_map. These fields hold the info needed to
97 * uncharge each reservation.
99 struct page_counter
*reservation_counter
;
100 struct cgroup_subsys_state
*css
;
104 struct hugetlb_vma_lock
{
106 struct rw_semaphore rw_sema
;
107 struct vm_area_struct
*vma
;
110 extern struct resv_map
*resv_map_alloc(void);
111 void resv_map_release(struct kref
*ref
);
113 extern spinlock_t hugetlb_lock
;
114 extern int hugetlb_max_hstate __read_mostly
;
115 #define for_each_hstate(h) \
116 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
118 struct hugepage_subpool
*hugepage_new_subpool(struct hstate
*h
, long max_hpages
,
120 void hugepage_put_subpool(struct hugepage_subpool
*spool
);
122 void hugetlb_dup_vma_private(struct vm_area_struct
*vma
);
123 void clear_vma_resv_huge_pages(struct vm_area_struct
*vma
);
124 int move_hugetlb_page_tables(struct vm_area_struct
*vma
,
125 struct vm_area_struct
*new_vma
,
126 unsigned long old_addr
, unsigned long new_addr
,
128 int copy_hugetlb_page_range(struct mm_struct
*, struct mm_struct
*,
129 struct vm_area_struct
*, struct vm_area_struct
*);
130 void unmap_hugepage_range(struct vm_area_struct
*,
131 unsigned long, unsigned long, struct page
*,
133 void __unmap_hugepage_range(struct mmu_gather
*tlb
,
134 struct vm_area_struct
*vma
,
135 unsigned long start
, unsigned long end
,
136 struct page
*ref_page
, zap_flags_t zap_flags
);
137 void hugetlb_report_meminfo(struct seq_file
*);
138 int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
);
139 void hugetlb_show_meminfo_node(int nid
);
140 unsigned long hugetlb_total_pages(void);
141 vm_fault_t
hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
142 unsigned long address
, unsigned int flags
);
143 #ifdef CONFIG_USERFAULTFD
144 int hugetlb_mfill_atomic_pte(pte_t
*dst_pte
,
145 struct vm_area_struct
*dst_vma
,
146 unsigned long dst_addr
,
147 unsigned long src_addr
,
149 struct folio
**foliop
);
150 #endif /* CONFIG_USERFAULTFD */
151 bool hugetlb_reserve_pages(struct inode
*inode
, long from
, long to
,
152 struct vm_area_struct
*vma
,
153 vm_flags_t vm_flags
);
154 long hugetlb_unreserve_pages(struct inode
*inode
, long start
, long end
,
156 bool isolate_hugetlb(struct folio
*folio
, struct list_head
*list
);
157 int get_hwpoison_hugetlb_folio(struct folio
*folio
, bool *hugetlb
, bool unpoison
);
158 int get_huge_page_for_hwpoison(unsigned long pfn
, int flags
,
159 bool *migratable_cleared
);
160 void folio_putback_active_hugetlb(struct folio
*folio
);
161 void move_hugetlb_state(struct folio
*old_folio
, struct folio
*new_folio
, int reason
);
162 void hugetlb_fix_reserve_counts(struct inode
*inode
);
163 extern struct mutex
*hugetlb_fault_mutex_table
;
164 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
);
166 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
167 unsigned long addr
, pud_t
*pud
);
168 bool hugetlbfs_pagecache_present(struct hstate
*h
,
169 struct vm_area_struct
*vma
,
170 unsigned long address
);
172 struct address_space
*hugetlb_folio_mapping_lock_write(struct folio
*folio
);
174 extern int sysctl_hugetlb_shm_group
;
175 extern struct list_head huge_boot_pages
[MAX_NUMNODES
];
179 #ifndef CONFIG_HIGHPTE
181 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
182 * which may go down to the lowest PTE level in their huge_pte_offset() and
183 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
185 static inline pte_t
*pte_offset_huge(pmd_t
*pmd
, unsigned long address
)
187 return pte_offset_kernel(pmd
, address
);
189 static inline pte_t
*pte_alloc_huge(struct mm_struct
*mm
, pmd_t
*pmd
,
190 unsigned long address
)
192 return pte_alloc(mm
, pmd
) ? NULL
: pte_offset_huge(pmd
, address
);
196 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
197 unsigned long addr
, unsigned long sz
);
199 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
200 * Returns the pte_t* if found, or NULL if the address is not mapped.
202 * IMPORTANT: we should normally not directly call this function, instead
203 * this is only a common interface to implement arch-specific
204 * walker. Please use hugetlb_walk() instead, because that will attempt to
205 * verify the locking for you.
207 * Since this function will walk all the pgtable pages (including not only
208 * high-level pgtable page, but also PUD entry that can be unshared
209 * concurrently for VM_SHARED), the caller of this function should be
210 * responsible of its thread safety. One can follow this rule:
212 * (1) For private mappings: pmd unsharing is not possible, so holding the
213 * mmap_lock for either read or write is sufficient. Most callers
214 * already hold the mmap_lock, so normally, no special action is
217 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
218 * pgtable page can go away from under us! It can be done by a pmd
219 * unshare with a follow up munmap() on the other process), then we
222 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
223 * won't happen upon the range (it also makes sure the pte_t we
224 * read is the right and stable one), or,
226 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
227 * sure even if unshare happened the racy unmap() will wait until
228 * i_mmap_rwsem is released.
230 * Option (2.1) is the safest, which guarantees pte stability from pmd
231 * sharing pov, until the vma lock released. Option (2.2) doesn't protect
232 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
235 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
236 unsigned long addr
, unsigned long sz
);
237 unsigned long hugetlb_mask_last_page(struct hstate
*h
);
238 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
239 unsigned long addr
, pte_t
*ptep
);
240 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
241 unsigned long *start
, unsigned long *end
);
243 extern void __hugetlb_zap_begin(struct vm_area_struct
*vma
,
244 unsigned long *begin
, unsigned long *end
);
245 extern void __hugetlb_zap_end(struct vm_area_struct
*vma
,
246 struct zap_details
*details
);
248 static inline void hugetlb_zap_begin(struct vm_area_struct
*vma
,
249 unsigned long *start
, unsigned long *end
)
251 if (is_vm_hugetlb_page(vma
))
252 __hugetlb_zap_begin(vma
, start
, end
);
255 static inline void hugetlb_zap_end(struct vm_area_struct
*vma
,
256 struct zap_details
*details
)
258 if (is_vm_hugetlb_page(vma
))
259 __hugetlb_zap_end(vma
, details
);
262 void hugetlb_vma_lock_read(struct vm_area_struct
*vma
);
263 void hugetlb_vma_unlock_read(struct vm_area_struct
*vma
);
264 void hugetlb_vma_lock_write(struct vm_area_struct
*vma
);
265 void hugetlb_vma_unlock_write(struct vm_area_struct
*vma
);
266 int hugetlb_vma_trylock_write(struct vm_area_struct
*vma
);
267 void hugetlb_vma_assert_locked(struct vm_area_struct
*vma
);
268 void hugetlb_vma_lock_release(struct kref
*kref
);
269 long hugetlb_change_protection(struct vm_area_struct
*vma
,
270 unsigned long address
, unsigned long end
, pgprot_t newprot
,
271 unsigned long cp_flags
);
272 bool is_hugetlb_entry_migration(pte_t pte
);
273 bool is_hugetlb_entry_hwpoisoned(pte_t pte
);
274 void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
);
276 #else /* !CONFIG_HUGETLB_PAGE */
278 static inline void hugetlb_dup_vma_private(struct vm_area_struct
*vma
)
282 static inline void clear_vma_resv_huge_pages(struct vm_area_struct
*vma
)
286 static inline unsigned long hugetlb_total_pages(void)
291 static inline struct address_space
*hugetlb_folio_mapping_lock_write(
297 static inline int huge_pmd_unshare(struct mm_struct
*mm
,
298 struct vm_area_struct
*vma
,
299 unsigned long addr
, pte_t
*ptep
)
304 static inline void adjust_range_if_pmd_sharing_possible(
305 struct vm_area_struct
*vma
,
306 unsigned long *start
, unsigned long *end
)
310 static inline void hugetlb_zap_begin(
311 struct vm_area_struct
*vma
,
312 unsigned long *start
, unsigned long *end
)
316 static inline void hugetlb_zap_end(
317 struct vm_area_struct
*vma
,
318 struct zap_details
*details
)
322 static inline int copy_hugetlb_page_range(struct mm_struct
*dst
,
323 struct mm_struct
*src
,
324 struct vm_area_struct
*dst_vma
,
325 struct vm_area_struct
*src_vma
)
331 static inline int move_hugetlb_page_tables(struct vm_area_struct
*vma
,
332 struct vm_area_struct
*new_vma
,
333 unsigned long old_addr
,
334 unsigned long new_addr
,
341 static inline void hugetlb_report_meminfo(struct seq_file
*m
)
345 static inline int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
)
350 static inline void hugetlb_show_meminfo_node(int nid
)
354 static inline int prepare_hugepage_range(struct file
*file
,
355 unsigned long addr
, unsigned long len
)
360 static inline void hugetlb_vma_lock_read(struct vm_area_struct
*vma
)
364 static inline void hugetlb_vma_unlock_read(struct vm_area_struct
*vma
)
368 static inline void hugetlb_vma_lock_write(struct vm_area_struct
*vma
)
372 static inline void hugetlb_vma_unlock_write(struct vm_area_struct
*vma
)
376 static inline int hugetlb_vma_trylock_write(struct vm_area_struct
*vma
)
381 static inline void hugetlb_vma_assert_locked(struct vm_area_struct
*vma
)
385 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
386 unsigned long addr
, unsigned long len
)
391 static inline void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
392 unsigned long addr
, unsigned long end
,
393 unsigned long floor
, unsigned long ceiling
)
398 #ifdef CONFIG_USERFAULTFD
399 static inline int hugetlb_mfill_atomic_pte(pte_t
*dst_pte
,
400 struct vm_area_struct
*dst_vma
,
401 unsigned long dst_addr
,
402 unsigned long src_addr
,
404 struct folio
**foliop
)
409 #endif /* CONFIG_USERFAULTFD */
411 static inline pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
,
417 static inline bool isolate_hugetlb(struct folio
*folio
, struct list_head
*list
)
422 static inline int get_hwpoison_hugetlb_folio(struct folio
*folio
, bool *hugetlb
, bool unpoison
)
427 static inline int get_huge_page_for_hwpoison(unsigned long pfn
, int flags
,
428 bool *migratable_cleared
)
433 static inline void folio_putback_active_hugetlb(struct folio
*folio
)
437 static inline void move_hugetlb_state(struct folio
*old_folio
,
438 struct folio
*new_folio
, int reason
)
442 static inline long hugetlb_change_protection(
443 struct vm_area_struct
*vma
, unsigned long address
,
444 unsigned long end
, pgprot_t newprot
,
445 unsigned long cp_flags
)
450 static inline void __unmap_hugepage_range(struct mmu_gather
*tlb
,
451 struct vm_area_struct
*vma
, unsigned long start
,
452 unsigned long end
, struct page
*ref_page
,
453 zap_flags_t zap_flags
)
458 static inline vm_fault_t
hugetlb_fault(struct mm_struct
*mm
,
459 struct vm_area_struct
*vma
, unsigned long address
,
466 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
) { }
468 #endif /* !CONFIG_HUGETLB_PAGE */
471 static inline int pgd_write(pgd_t pgd
)
478 #define HUGETLB_ANON_FILE "anon_hugepage"
482 * The file will be used as an shm file so shmfs accounting rules
485 HUGETLB_SHMFS_INODE
= 1,
487 * The file is being created on the internal vfs mount and shmfs
488 * accounting rules do not apply
490 HUGETLB_ANONHUGE_INODE
= 2,
493 #ifdef CONFIG_HUGETLBFS
494 struct hugetlbfs_sb_info
{
495 long max_inodes
; /* inodes allowed */
496 long free_inodes
; /* inodes free */
497 spinlock_t stat_lock
;
498 struct hstate
*hstate
;
499 struct hugepage_subpool
*spool
;
505 static inline struct hugetlbfs_sb_info
*HUGETLBFS_SB(struct super_block
*sb
)
507 return sb
->s_fs_info
;
510 struct hugetlbfs_inode_info
{
511 struct inode vfs_inode
;
515 static inline struct hugetlbfs_inode_info
*HUGETLBFS_I(struct inode
*inode
)
517 return container_of(inode
, struct hugetlbfs_inode_info
, vfs_inode
);
520 extern const struct vm_operations_struct hugetlb_vm_ops
;
521 struct file
*hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acct
,
522 int creat_flags
, int page_size_log
);
524 static inline bool is_file_hugepages(const struct file
*file
)
526 return file
->f_op
->fop_flags
& FOP_HUGE_PAGES
;
529 static inline struct hstate
*hstate_inode(struct inode
*i
)
531 return HUGETLBFS_SB(i
->i_sb
)->hstate
;
533 #else /* !CONFIG_HUGETLBFS */
535 #define is_file_hugepages(file) false
536 static inline struct file
*
537 hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acctflag
,
538 int creat_flags
, int page_size_log
)
540 return ERR_PTR(-ENOSYS
);
543 static inline struct hstate
*hstate_inode(struct inode
*i
)
547 #endif /* !CONFIG_HUGETLBFS */
550 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
551 unsigned long len
, unsigned long pgoff
,
552 unsigned long flags
);
555 * huegtlb page specific state flags. These flags are located in page.private
556 * of the hugetlb head page. Functions created via the below macros should be
557 * used to manipulate these flags.
559 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
560 * allocation time. Cleared when page is fully instantiated. Free
561 * routine checks flag to restore a reservation on error paths.
562 * Synchronization: Examined or modified by code that knows it has
563 * the only reference to page. i.e. After allocation but before use
564 * or when the page is being freed.
565 * HPG_migratable - Set after a newly allocated page is added to the page
566 * cache and/or page tables. Indicates the page is a candidate for
568 * Synchronization: Initially set after new page allocation with no
569 * locking. When examined and modified during migration processing
570 * (isolate, migrate, putback) the hugetlb_lock is held.
571 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
572 * allocator. Typically used for migration target pages when no pages
573 * are available in the pool. The hugetlb free page path will
574 * immediately free pages with this flag set to the buddy allocator.
575 * Synchronization: Can be set after huge page allocation from buddy when
576 * code knows it has only reference. All other examinations and
577 * modifications require hugetlb_lock.
578 * HPG_freed - Set when page is on the free lists.
579 * Synchronization: hugetlb_lock held for examination and modification.
580 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
581 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
582 * that is not tracked by raw_hwp_page list.
584 enum hugetlb_page_flags
{
585 HPG_restore_reserve
= 0,
589 HPG_vmemmap_optimized
,
590 HPG_raw_hwp_unreliable
,
595 * Macros to create test, set and clear function definitions for
596 * hugetlb specific page flags.
598 #ifdef CONFIG_HUGETLB_PAGE
599 #define TESTHPAGEFLAG(uname, flname) \
600 static __always_inline \
601 bool folio_test_hugetlb_##flname(struct folio *folio) \
602 { void *private = &folio->private; \
603 return test_bit(HPG_##flname, private); \
606 #define SETHPAGEFLAG(uname, flname) \
607 static __always_inline \
608 void folio_set_hugetlb_##flname(struct folio *folio) \
609 { void *private = &folio->private; \
610 set_bit(HPG_##flname, private); \
613 #define CLEARHPAGEFLAG(uname, flname) \
614 static __always_inline \
615 void folio_clear_hugetlb_##flname(struct folio *folio) \
616 { void *private = &folio->private; \
617 clear_bit(HPG_##flname, private); \
620 #define TESTHPAGEFLAG(uname, flname) \
622 folio_test_hugetlb_##flname(struct folio *folio) \
625 #define SETHPAGEFLAG(uname, flname) \
627 folio_set_hugetlb_##flname(struct folio *folio) \
630 #define CLEARHPAGEFLAG(uname, flname) \
632 folio_clear_hugetlb_##flname(struct folio *folio) \
636 #define HPAGEFLAG(uname, flname) \
637 TESTHPAGEFLAG(uname, flname) \
638 SETHPAGEFLAG(uname, flname) \
639 CLEARHPAGEFLAG(uname, flname) \
642 * Create functions associated with hugetlb page flags
644 HPAGEFLAG(RestoreReserve
, restore_reserve
)
645 HPAGEFLAG(Migratable
, migratable
)
646 HPAGEFLAG(Temporary
, temporary
)
647 HPAGEFLAG(Freed
, freed
)
648 HPAGEFLAG(VmemmapOptimized
, vmemmap_optimized
)
649 HPAGEFLAG(RawHwpUnreliable
, raw_hwp_unreliable
)
651 #ifdef CONFIG_HUGETLB_PAGE
653 #define HSTATE_NAME_LEN 32
654 /* Defines one hugetlb page size */
656 struct mutex resize_lock
;
657 struct lock_class_key resize_key
;
658 int next_nid_to_alloc
;
659 int next_nid_to_free
;
661 unsigned int demote_order
;
663 unsigned long max_huge_pages
;
664 unsigned long nr_huge_pages
;
665 unsigned long free_huge_pages
;
666 unsigned long resv_huge_pages
;
667 unsigned long surplus_huge_pages
;
668 unsigned long nr_overcommit_huge_pages
;
669 struct list_head hugepage_activelist
;
670 struct list_head hugepage_freelists
[MAX_NUMNODES
];
671 unsigned int max_huge_pages_node
[MAX_NUMNODES
];
672 unsigned int nr_huge_pages_node
[MAX_NUMNODES
];
673 unsigned int free_huge_pages_node
[MAX_NUMNODES
];
674 unsigned int surplus_huge_pages_node
[MAX_NUMNODES
];
675 char name
[HSTATE_NAME_LEN
];
678 struct huge_bootmem_page
{
679 struct list_head list
;
680 struct hstate
*hstate
;
683 int isolate_or_dissolve_huge_page(struct page
*page
, struct list_head
*list
);
684 struct folio
*alloc_hugetlb_folio(struct vm_area_struct
*vma
,
685 unsigned long addr
, int avoid_reserve
);
686 struct folio
*alloc_hugetlb_folio_nodemask(struct hstate
*h
, int preferred_nid
,
687 nodemask_t
*nmask
, gfp_t gfp_mask
,
688 bool allow_alloc_fallback
);
689 struct folio
*alloc_hugetlb_folio_reserve(struct hstate
*h
, int preferred_nid
,
690 nodemask_t
*nmask
, gfp_t gfp_mask
);
692 int hugetlb_add_to_page_cache(struct folio
*folio
, struct address_space
*mapping
,
694 void restore_reserve_on_error(struct hstate
*h
, struct vm_area_struct
*vma
,
695 unsigned long address
, struct folio
*folio
);
698 int __init
__alloc_bootmem_huge_page(struct hstate
*h
, int nid
);
699 int __init
alloc_bootmem_huge_page(struct hstate
*h
, int nid
);
700 bool __init
hugetlb_node_alloc_supported(void);
702 void __init
hugetlb_add_hstate(unsigned order
);
703 bool __init
arch_hugetlb_valid_size(unsigned long size
);
704 struct hstate
*size_to_hstate(unsigned long size
);
706 #ifndef HUGE_MAX_HSTATE
707 #define HUGE_MAX_HSTATE 1
710 extern struct hstate hstates
[HUGE_MAX_HSTATE
];
711 extern unsigned int default_hstate_idx
;
713 #define default_hstate (hstates[default_hstate_idx])
715 static inline struct hugepage_subpool
*hugetlb_folio_subpool(struct folio
*folio
)
717 return folio
->_hugetlb_subpool
;
720 static inline void hugetlb_set_folio_subpool(struct folio
*folio
,
721 struct hugepage_subpool
*subpool
)
723 folio
->_hugetlb_subpool
= subpool
;
726 static inline struct hstate
*hstate_file(struct file
*f
)
728 return hstate_inode(file_inode(f
));
731 static inline struct hstate
*hstate_sizelog(int page_size_log
)
734 return &default_hstate
;
736 if (page_size_log
< BITS_PER_LONG
)
737 return size_to_hstate(1UL << page_size_log
);
742 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
744 return hstate_file(vma
->vm_file
);
747 static inline unsigned long huge_page_size(const struct hstate
*h
)
749 return (unsigned long)PAGE_SIZE
<< h
->order
;
752 extern unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
);
754 extern unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
);
756 static inline unsigned long huge_page_mask(struct hstate
*h
)
761 static inline unsigned int huge_page_order(struct hstate
*h
)
766 static inline unsigned huge_page_shift(struct hstate
*h
)
768 return h
->order
+ PAGE_SHIFT
;
771 static inline bool hstate_is_gigantic(struct hstate
*h
)
773 return huge_page_order(h
) > MAX_PAGE_ORDER
;
776 static inline unsigned int pages_per_huge_page(const struct hstate
*h
)
778 return 1 << h
->order
;
781 static inline unsigned int blocks_per_huge_page(struct hstate
*h
)
783 return huge_page_size(h
) / 512;
786 static inline struct folio
*filemap_lock_hugetlb_folio(struct hstate
*h
,
787 struct address_space
*mapping
, pgoff_t idx
)
789 return filemap_lock_folio(mapping
, idx
<< huge_page_order(h
));
792 #include <asm/hugetlb.h>
794 #ifndef is_hugepage_only_range
795 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
796 unsigned long addr
, unsigned long len
)
800 #define is_hugepage_only_range is_hugepage_only_range
803 #ifndef arch_clear_hugetlb_flags
804 static inline void arch_clear_hugetlb_flags(struct folio
*folio
) { }
805 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
808 #ifndef arch_make_huge_pte
809 static inline pte_t
arch_make_huge_pte(pte_t entry
, unsigned int shift
,
812 return pte_mkhuge(entry
);
816 static inline struct hstate
*folio_hstate(struct folio
*folio
)
818 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio
), folio
);
819 return size_to_hstate(folio_size(folio
));
822 static inline unsigned hstate_index_to_shift(unsigned index
)
824 return hstates
[index
].order
+ PAGE_SHIFT
;
827 static inline int hstate_index(struct hstate
*h
)
832 int dissolve_free_hugetlb_folio(struct folio
*folio
);
833 int dissolve_free_hugetlb_folios(unsigned long start_pfn
,
834 unsigned long end_pfn
);
836 #ifdef CONFIG_MEMORY_FAILURE
837 extern void folio_clear_hugetlb_hwpoison(struct folio
*folio
);
839 static inline void folio_clear_hugetlb_hwpoison(struct folio
*folio
)
844 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
845 #ifndef arch_hugetlb_migration_supported
846 static inline bool arch_hugetlb_migration_supported(struct hstate
*h
)
848 if ((huge_page_shift(h
) == PMD_SHIFT
) ||
849 (huge_page_shift(h
) == PUD_SHIFT
) ||
850 (huge_page_shift(h
) == PGDIR_SHIFT
))
857 static inline bool arch_hugetlb_migration_supported(struct hstate
*h
)
863 static inline bool hugepage_migration_supported(struct hstate
*h
)
865 return arch_hugetlb_migration_supported(h
);
869 * Movability check is different as compared to migration check.
870 * It determines whether or not a huge page should be placed on
871 * movable zone or not. Movability of any huge page should be
872 * required only if huge page size is supported for migration.
873 * There won't be any reason for the huge page to be movable if
874 * it is not migratable to start with. Also the size of the huge
875 * page should be large enough to be placed under a movable zone
876 * and still feasible enough to be migratable. Just the presence
877 * in movable zone does not make the migration feasible.
879 * So even though large huge page sizes like the gigantic ones
880 * are migratable they should not be movable because its not
881 * feasible to migrate them from movable zone.
883 static inline bool hugepage_movable_supported(struct hstate
*h
)
885 if (!hugepage_migration_supported(h
))
888 if (hstate_is_gigantic(h
))
893 /* Movability of hugepages depends on migration support. */
894 static inline gfp_t
htlb_alloc_mask(struct hstate
*h
)
896 gfp_t gfp
= __GFP_COMP
| __GFP_NOWARN
;
898 gfp
|= hugepage_movable_supported(h
) ? GFP_HIGHUSER_MOVABLE
: GFP_HIGHUSER
;
903 static inline gfp_t
htlb_modify_alloc_mask(struct hstate
*h
, gfp_t gfp_mask
)
905 gfp_t modified_mask
= htlb_alloc_mask(h
);
907 /* Some callers might want to enforce node */
908 modified_mask
|= (gfp_mask
& __GFP_THISNODE
);
910 modified_mask
|= (gfp_mask
& __GFP_NOWARN
);
912 return modified_mask
;
915 static inline bool htlb_allow_alloc_fallback(int reason
)
917 bool allowed_fallback
= false;
920 * Note: the memory offline, memory failure and migration syscalls will
921 * be allowed to fallback to other nodes due to lack of a better chioce,
922 * that might break the per-node hugetlb pool. While other cases will
923 * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool.
926 case MR_MEMORY_HOTPLUG
:
927 case MR_MEMORY_FAILURE
:
929 case MR_MEMPOLICY_MBIND
:
930 allowed_fallback
= true;
936 return allowed_fallback
;
939 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
940 struct mm_struct
*mm
, pte_t
*pte
)
942 const unsigned long size
= huge_page_size(h
);
944 VM_WARN_ON(size
== PAGE_SIZE
);
947 * hugetlb must use the exact same PT locks as core-mm page table
948 * walkers would. When modifying a PTE table, hugetlb must take the
949 * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD
952 * The expectation is that any hugetlb folio smaller than a PMD is
953 * always mapped into a single PTE table and that any hugetlb folio
954 * smaller than a PUD (but at least as big as a PMD) is always mapped
955 * into a single PMD table.
957 * If that does not hold for an architecture, then that architecture
958 * must disable split PT locks such that all *_lockptr() functions
959 * will give us the same result: the per-MM PT lock.
961 * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where
962 * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr()
963 * and core-mm would use pmd_lockptr(). However, in such configurations
964 * split PMD locks are disabled -- they don't make sense on a single
965 * PGDIR page table -- and the end result is the same.
967 if (size
>= PUD_SIZE
)
968 return pud_lockptr(mm
, (pud_t
*) pte
);
969 else if (size
>= PMD_SIZE
|| IS_ENABLED(CONFIG_HIGHPTE
))
970 return pmd_lockptr(mm
, (pmd_t
*) pte
);
971 /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
972 return ptep_lockptr(mm
, pte
);
975 #ifndef hugepages_supported
977 * Some platform decide whether they support huge pages at boot
978 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
979 * when there is no such support
981 #define hugepages_supported() (HPAGE_SHIFT != 0)
984 void hugetlb_report_usage(struct seq_file
*m
, struct mm_struct
*mm
);
986 static inline void hugetlb_count_init(struct mm_struct
*mm
)
988 atomic_long_set(&mm
->hugetlb_usage
, 0);
991 static inline void hugetlb_count_add(long l
, struct mm_struct
*mm
)
993 atomic_long_add(l
, &mm
->hugetlb_usage
);
996 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
998 atomic_long_sub(l
, &mm
->hugetlb_usage
);
1001 #ifndef huge_ptep_modify_prot_start
1002 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
1003 static inline pte_t
huge_ptep_modify_prot_start(struct vm_area_struct
*vma
,
1004 unsigned long addr
, pte_t
*ptep
)
1006 return huge_ptep_get_and_clear(vma
->vm_mm
, addr
, ptep
);
1010 #ifndef huge_ptep_modify_prot_commit
1011 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
1012 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct
*vma
,
1013 unsigned long addr
, pte_t
*ptep
,
1014 pte_t old_pte
, pte_t pte
)
1016 unsigned long psize
= huge_page_size(hstate_vma(vma
));
1018 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, pte
, psize
);
1023 void hugetlb_register_node(struct node
*node
);
1024 void hugetlb_unregister_node(struct node
*node
);
1028 * Check if a given raw @page in a hugepage is HWPOISON.
1030 bool is_raw_hwpoison_page_in_hugepage(struct page
*page
);
1032 static inline unsigned long huge_page_mask_align(struct file
*file
)
1034 return PAGE_MASK
& ~huge_page_mask(hstate_file(file
));
1037 #else /* CONFIG_HUGETLB_PAGE */
1040 static inline unsigned long huge_page_mask_align(struct file
*file
)
1045 static inline struct hugepage_subpool
*hugetlb_folio_subpool(struct folio
*folio
)
1050 static inline struct folio
*filemap_lock_hugetlb_folio(struct hstate
*h
,
1051 struct address_space
*mapping
, pgoff_t idx
)
1056 static inline int isolate_or_dissolve_huge_page(struct page
*page
,
1057 struct list_head
*list
)
1062 static inline struct folio
*alloc_hugetlb_folio(struct vm_area_struct
*vma
,
1069 static inline struct folio
*
1070 alloc_hugetlb_folio_reserve(struct hstate
*h
, int preferred_nid
,
1071 nodemask_t
*nmask
, gfp_t gfp_mask
)
1076 static inline struct folio
*
1077 alloc_hugetlb_folio_nodemask(struct hstate
*h
, int preferred_nid
,
1078 nodemask_t
*nmask
, gfp_t gfp_mask
,
1079 bool allow_alloc_fallback
)
1084 static inline int __alloc_bootmem_huge_page(struct hstate
*h
)
1089 static inline struct hstate
*hstate_file(struct file
*f
)
1094 static inline struct hstate
*hstate_sizelog(int page_size_log
)
1099 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
1104 static inline struct hstate
*folio_hstate(struct folio
*folio
)
1109 static inline struct hstate
*size_to_hstate(unsigned long size
)
1114 static inline unsigned long huge_page_size(struct hstate
*h
)
1119 static inline unsigned long huge_page_mask(struct hstate
*h
)
1124 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
)
1129 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
1134 static inline unsigned int huge_page_order(struct hstate
*h
)
1139 static inline unsigned int huge_page_shift(struct hstate
*h
)
1144 static inline bool hstate_is_gigantic(struct hstate
*h
)
1149 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
1154 static inline unsigned hstate_index_to_shift(unsigned index
)
1159 static inline int hstate_index(struct hstate
*h
)
1164 static inline int dissolve_free_hugetlb_folio(struct folio
*folio
)
1169 static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn
,
1170 unsigned long end_pfn
)
1175 static inline bool hugepage_migration_supported(struct hstate
*h
)
1180 static inline bool hugepage_movable_supported(struct hstate
*h
)
1185 static inline gfp_t
htlb_alloc_mask(struct hstate
*h
)
1190 static inline gfp_t
htlb_modify_alloc_mask(struct hstate
*h
, gfp_t gfp_mask
)
1195 static inline bool htlb_allow_alloc_fallback(int reason
)
1200 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
1201 struct mm_struct
*mm
, pte_t
*pte
)
1203 return &mm
->page_table_lock
;
1206 static inline void hugetlb_count_init(struct mm_struct
*mm
)
1210 static inline void hugetlb_report_usage(struct seq_file
*f
, struct mm_struct
*m
)
1214 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
1218 static inline pte_t
huge_ptep_clear_flush(struct vm_area_struct
*vma
,
1219 unsigned long addr
, pte_t
*ptep
)
1222 return ptep_get(ptep
);
1228 static inline void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1229 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
1233 static inline void hugetlb_register_node(struct node
*node
)
1237 static inline void hugetlb_unregister_node(struct node
*node
)
1241 static inline bool hugetlbfs_pagecache_present(
1242 struct hstate
*h
, struct vm_area_struct
*vma
, unsigned long address
)
1246 #endif /* CONFIG_HUGETLB_PAGE */
1248 static inline spinlock_t
*huge_pte_lock(struct hstate
*h
,
1249 struct mm_struct
*mm
, pte_t
*pte
)
1253 ptl
= huge_pte_lockptr(h
, mm
, pte
);
1258 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1259 extern void __init
hugetlb_cma_reserve(int order
);
1261 static inline __init
void hugetlb_cma_reserve(int order
)
1266 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
1267 static inline bool hugetlb_pmd_shared(pte_t
*pte
)
1269 return page_count(virt_to_page(pte
)) > 1;
1272 static inline bool hugetlb_pmd_shared(pte_t
*pte
)
1278 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
);
1280 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1282 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1285 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1288 static inline bool __vma_shareable_lock(struct vm_area_struct
*vma
)
1290 return (vma
->vm_flags
& VM_MAYSHARE
) && vma
->vm_private_data
;
1293 bool __vma_private_lock(struct vm_area_struct
*vma
);
1296 * Safe version of huge_pte_offset() to check the locks. See comments
1297 * above huge_pte_offset().
1299 static inline pte_t
*
1300 hugetlb_walk(struct vm_area_struct
*vma
, unsigned long addr
, unsigned long sz
)
1302 #if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
1303 struct hugetlb_vma_lock
*vma_lock
= vma
->vm_private_data
;
1306 * If pmd sharing possible, locking needed to safely walk the
1307 * hugetlb pgtables. More information can be found at the comment
1308 * above huge_pte_offset() in the same file.
1310 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1312 if (__vma_shareable_lock(vma
))
1313 WARN_ON_ONCE(!lockdep_is_held(&vma_lock
->rw_sema
) &&
1315 &vma
->vm_file
->f_mapping
->i_mmap_rwsem
));
1317 return huge_pte_offset(vma
->vm_mm
, addr
, sz
);
1320 #endif /* _LINUX_HUGETLB_H */