1 // SPDX-License-Identifier: GPL-2.0
3 * IA-32 Huge TLB Page Support for Kernel.
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 #include <linux/init.h>
11 #include <linux/sched/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <linux/compat.h>
19 #include <asm/tlbflush.h>
20 #include <asm/pgalloc.h>
24 #if 0 /* This is just for testing */
26 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
28 unsigned long start
= address
;
32 struct vm_area_struct
*vma
;
34 vma
= find_vma(mm
, addr
);
35 if (!vma
|| !is_vm_hugetlb_page(vma
))
36 return ERR_PTR(-EINVAL
);
38 pte
= huge_pte_offset(mm
, address
, vma_mmu_pagesize(vma
));
40 /* hugetlb should be locked, and hence, prefaulted */
41 WARN_ON(!pte
|| pte_none(*pte
));
43 page
= &pte_page(*pte
)[vpfn
% (HPAGE_SIZE
/PAGE_SIZE
)];
45 WARN_ON(!PageHead(page
));
50 int pmd_huge(pmd_t pmd
)
55 int pud_huge(pud_t pud
)
63 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
64 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
65 * Otherwise, returns 0.
67 int pmd_huge(pmd_t pmd
)
69 return !pmd_none(pmd
) &&
70 (pmd_val(pmd
) & (_PAGE_PRESENT
|_PAGE_PSE
)) != _PAGE_PRESENT
;
73 int pud_huge(pud_t pud
)
75 return !!(pud_val(pud
) & _PAGE_PSE
);
79 #ifdef CONFIG_HUGETLB_PAGE
80 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*file
,
81 unsigned long addr
, unsigned long len
,
82 unsigned long pgoff
, unsigned long flags
)
84 struct hstate
*h
= hstate_file(file
);
85 struct vm_unmapped_area_info info
;
89 info
.low_limit
= get_mmap_base(1);
92 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
93 * in the full address space.
95 info
.high_limit
= in_compat_syscall() ?
96 task_size_32bit() : task_size_64bit(addr
> DEFAULT_MAP_WINDOW
);
98 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
99 info
.align_offset
= 0;
100 return vm_unmapped_area(&info
);
103 static unsigned long hugetlb_get_unmapped_area_topdown(struct file
*file
,
104 unsigned long addr
, unsigned long len
,
105 unsigned long pgoff
, unsigned long flags
)
107 struct hstate
*h
= hstate_file(file
);
108 struct vm_unmapped_area_info info
;
110 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
112 info
.low_limit
= PAGE_SIZE
;
113 info
.high_limit
= get_mmap_base(0);
116 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
117 * in the full address space.
119 if (addr
> DEFAULT_MAP_WINDOW
&& !in_compat_syscall())
120 info
.high_limit
+= TASK_SIZE_MAX
- DEFAULT_MAP_WINDOW
;
122 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
123 info
.align_offset
= 0;
124 addr
= vm_unmapped_area(&info
);
127 * A failed mmap() very likely causes application failure,
128 * so fall back to the bottom-up function here. This scenario
129 * can happen with large stack limits and large mmap()
132 if (addr
& ~PAGE_MASK
) {
133 VM_BUG_ON(addr
!= -ENOMEM
);
135 info
.low_limit
= TASK_UNMAPPED_BASE
;
136 info
.high_limit
= TASK_SIZE_LOW
;
137 addr
= vm_unmapped_area(&info
);
144 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
145 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
147 struct hstate
*h
= hstate_file(file
);
148 struct mm_struct
*mm
= current
->mm
;
149 struct vm_area_struct
*vma
;
151 if (len
& ~huge_page_mask(h
))
154 addr
= mpx_unmapped_area_check(addr
, len
, flags
);
155 if (IS_ERR_VALUE(addr
))
161 if (flags
& MAP_FIXED
) {
162 if (prepare_hugepage_range(file
, addr
, len
))
168 addr
= ALIGN(addr
, huge_page_size(h
));
169 vma
= find_vma(mm
, addr
);
170 if (TASK_SIZE
- len
>= addr
&&
171 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
174 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
175 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
178 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
181 #endif /* CONFIG_HUGETLB_PAGE */
184 static __init
int setup_hugepagesz(char *opt
)
186 unsigned long ps
= memparse(opt
, &opt
);
187 if (ps
== PMD_SIZE
) {
188 hugetlb_add_hstate(PMD_SHIFT
- PAGE_SHIFT
);
189 } else if (ps
== PUD_SIZE
&& boot_cpu_has(X86_FEATURE_GBPAGES
)) {
190 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
193 printk(KERN_ERR
"hugepagesz: Unsupported page size %lu M\n",
199 __setup("hugepagesz=", setup_hugepagesz
);
201 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
202 static __init
int gigantic_pages_init(void)
204 /* With compaction or CMA we can allocate gigantic pages at runtime */
205 if (boot_cpu_has(X86_FEATURE_GBPAGES
) && !size_to_hstate(1UL << PUD_SHIFT
))
206 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
209 arch_initcall(gigantic_pages_init
);