2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/config.h>
8 #include <linux/init.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
21 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
)
27 pgd
= pgd_offset(mm
, addr
);
28 pud
= pud_alloc(mm
, pgd
, addr
);
30 pte
= (pte_t
*) pmd_alloc(mm
, pud
, addr
);
31 BUG_ON(pte
&& !pte_none(*pte
) && !pte_huge(*pte
));
36 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
42 pgd
= pgd_offset(mm
, addr
);
43 if (pgd_present(*pgd
)) {
44 pud
= pud_offset(pgd
, addr
);
45 if (pud_present(*pud
))
46 pmd
= pmd_offset(pud
, addr
);
51 #if 0 /* This is just for testing */
53 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
55 unsigned long start
= address
;
59 struct vm_area_struct
*vma
;
61 vma
= find_vma(mm
, addr
);
62 if (!vma
|| !is_vm_hugetlb_page(vma
))
63 return ERR_PTR(-EINVAL
);
65 pte
= huge_pte_offset(mm
, address
);
67 /* hugetlb should be locked, and hence, prefaulted */
68 WARN_ON(!pte
|| pte_none(*pte
));
70 page
= &pte_page(*pte
)[vpfn
% (HPAGE_SIZE
/PAGE_SIZE
)];
72 WARN_ON(!PageCompound(page
));
77 int pmd_huge(pmd_t pmd
)
83 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
84 pmd_t
*pmd
, int write
)
92 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
94 return ERR_PTR(-EINVAL
);
97 int pmd_huge(pmd_t pmd
)
99 return !!(pmd_val(pmd
) & _PAGE_PSE
);
103 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
104 pmd_t
*pmd
, int write
)
108 page
= pte_page(*(pte_t
*)pmd
);
110 page
+= ((address
& ~HPAGE_MASK
) >> PAGE_SHIFT
);
115 /* x86_64 also uses this file */
117 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
118 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*file
,
119 unsigned long addr
, unsigned long len
,
120 unsigned long pgoff
, unsigned long flags
)
122 struct mm_struct
*mm
= current
->mm
;
123 struct vm_area_struct
*vma
;
124 unsigned long start_addr
;
126 if (len
> mm
->cached_hole_size
) {
127 start_addr
= mm
->free_area_cache
;
129 start_addr
= TASK_UNMAPPED_BASE
;
130 mm
->cached_hole_size
= 0;
134 addr
= ALIGN(start_addr
, HPAGE_SIZE
);
136 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
137 /* At this point: (!vma || addr < vma->vm_end). */
138 if (TASK_SIZE
- len
< addr
) {
140 * Start a new search - just in case we missed
143 if (start_addr
!= TASK_UNMAPPED_BASE
) {
144 start_addr
= TASK_UNMAPPED_BASE
;
145 mm
->cached_hole_size
= 0;
150 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
151 mm
->free_area_cache
= addr
+ len
;
154 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
155 mm
->cached_hole_size
= vma
->vm_start
- addr
;
156 addr
= ALIGN(vma
->vm_end
, HPAGE_SIZE
);
160 static unsigned long hugetlb_get_unmapped_area_topdown(struct file
*file
,
161 unsigned long addr0
, unsigned long len
,
162 unsigned long pgoff
, unsigned long flags
)
164 struct mm_struct
*mm
= current
->mm
;
165 struct vm_area_struct
*vma
, *prev_vma
;
166 unsigned long base
= mm
->mmap_base
, addr
= addr0
;
167 unsigned long largest_hole
= mm
->cached_hole_size
;
170 /* don't allow allocations above current base */
171 if (mm
->free_area_cache
> base
)
172 mm
->free_area_cache
= base
;
174 if (len
<= largest_hole
) {
176 mm
->free_area_cache
= base
;
179 /* make sure it can fit in the remaining address space */
180 if (mm
->free_area_cache
< len
)
183 /* either no address requested or cant fit in requested address hole */
184 addr
= (mm
->free_area_cache
- len
) & HPAGE_MASK
;
187 * Lookup failure means no vma is above this address,
188 * i.e. return with success:
190 if (!(vma
= find_vma_prev(mm
, addr
, &prev_vma
)))
194 * new region fits between prev_vma->vm_end and
195 * vma->vm_start, use it:
197 if (addr
+ len
<= vma
->vm_start
&&
198 (!prev_vma
|| (addr
>= prev_vma
->vm_end
))) {
199 /* remember the address as a hint for next time */
200 mm
->cached_hole_size
= largest_hole
;
201 return (mm
->free_area_cache
= addr
);
203 /* pull free_area_cache down to the first hole */
204 if (mm
->free_area_cache
== vma
->vm_end
) {
205 mm
->free_area_cache
= vma
->vm_start
;
206 mm
->cached_hole_size
= largest_hole
;
210 /* remember the largest hole we saw so far */
211 if (addr
+ largest_hole
< vma
->vm_start
)
212 largest_hole
= vma
->vm_start
- addr
;
214 /* try just below the current vma->vm_start */
215 addr
= (vma
->vm_start
- len
) & HPAGE_MASK
;
216 } while (len
<= vma
->vm_start
);
220 * if hint left us with no space for the requested
221 * mapping then try again:
224 mm
->free_area_cache
= base
;
230 * A failed mmap() very likely causes application failure,
231 * so fall back to the bottom-up function here. This scenario
232 * can happen with large stack limits and large mmap()
235 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
236 mm
->cached_hole_size
= ~0UL;
237 addr
= hugetlb_get_unmapped_area_bottomup(file
, addr0
,
241 * Restore the topdown base:
243 mm
->free_area_cache
= base
;
244 mm
->cached_hole_size
= ~0UL;
250 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
251 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
253 struct mm_struct
*mm
= current
->mm
;
254 struct vm_area_struct
*vma
;
256 if (len
& ~HPAGE_MASK
)
262 addr
= ALIGN(addr
, HPAGE_SIZE
);
263 vma
= find_vma(mm
, addr
);
264 if (TASK_SIZE
- len
>= addr
&&
265 (!vma
|| addr
+ len
<= vma
->vm_start
))
268 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
269 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
272 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
276 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/