1 // SPDX-License-Identifier: GPL-2.0
3 * IA-32 Huge TLB Page Support for Kernel.
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 #include <linux/init.h>
11 #include <linux/sched/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <linux/compat.h>
19 #include <asm/tlbflush.h>
22 #ifdef CONFIG_HUGETLB_PAGE
23 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*file
,
24 unsigned long addr
, unsigned long len
,
25 unsigned long pgoff
, unsigned long flags
)
27 struct hstate
*h
= hstate_file(file
);
28 struct vm_unmapped_area_info info
= {};
31 info
.low_limit
= get_mmap_base(1);
34 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
35 * in the full address space.
37 info
.high_limit
= in_32bit_syscall() ?
38 task_size_32bit() : task_size_64bit(addr
> DEFAULT_MAP_WINDOW
);
40 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
41 return vm_unmapped_area(&info
);
44 static unsigned long hugetlb_get_unmapped_area_topdown(struct file
*file
,
45 unsigned long addr
, unsigned long len
,
46 unsigned long pgoff
, unsigned long flags
)
48 struct hstate
*h
= hstate_file(file
);
49 struct vm_unmapped_area_info info
= {};
51 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
53 info
.low_limit
= PAGE_SIZE
;
54 info
.high_limit
= get_mmap_base(0);
57 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
58 * in the full address space.
60 if (addr
> DEFAULT_MAP_WINDOW
&& !in_32bit_syscall())
61 info
.high_limit
+= TASK_SIZE_MAX
- DEFAULT_MAP_WINDOW
;
63 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
64 addr
= vm_unmapped_area(&info
);
67 * A failed mmap() very likely causes application failure,
68 * so fall back to the bottom-up function here. This scenario
69 * can happen with large stack limits and large mmap()
72 if (addr
& ~PAGE_MASK
) {
73 VM_BUG_ON(addr
!= -ENOMEM
);
75 info
.low_limit
= TASK_UNMAPPED_BASE
;
76 info
.high_limit
= TASK_SIZE_LOW
;
77 addr
= vm_unmapped_area(&info
);
84 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
85 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
87 struct hstate
*h
= hstate_file(file
);
88 struct mm_struct
*mm
= current
->mm
;
89 struct vm_area_struct
*vma
;
91 if (len
& ~huge_page_mask(h
))
97 /* No address checking. See comment at mmap_address_hint_valid() */
98 if (flags
& MAP_FIXED
) {
99 if (prepare_hugepage_range(file
, addr
, len
))
105 addr
&= huge_page_mask(h
);
106 if (!mmap_address_hint_valid(addr
, len
))
107 goto get_unmapped_area
;
109 vma
= find_vma(mm
, addr
);
110 if (!vma
|| addr
+ len
<= vm_start_gap(vma
))
115 if (!test_bit(MMF_TOPDOWN
, &mm
->flags
))
116 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
119 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
122 #endif /* CONFIG_HUGETLB_PAGE */
125 bool __init
arch_hugetlb_valid_size(unsigned long size
)
127 if (size
== PMD_SIZE
)
129 else if (size
== PUD_SIZE
&& boot_cpu_has(X86_FEATURE_GBPAGES
))
135 #ifdef CONFIG_CONTIG_ALLOC
136 static __init
int gigantic_pages_init(void)
138 /* With compaction or CMA we can allocate gigantic pages at runtime */
139 if (boot_cpu_has(X86_FEATURE_GBPAGES
))
140 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
143 arch_initcall(gigantic_pages_init
);