2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
19 #include <linux/init.h>
22 #include <linux/hugetlb.h>
23 #include <linux/pagemap.h>
24 #include <linux/slab.h>
25 #include <linux/err.h>
26 #include <linux/sysctl.h>
27 #include <linux/mman.h>
29 #include <asm/tlbflush.h>
31 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
32 unsigned long addr
, unsigned long sz
)
38 /* We do not yet support multiple huge page sizes. */
39 BUG_ON(sz
!= PMD_SIZE
);
41 pgd
= pgd_offset(mm
, addr
);
42 pud
= pud_alloc(mm
, pgd
, addr
);
44 pte
= (pte_t
*) pmd_alloc(mm
, pud
, addr
);
45 BUG_ON(pte
&& !pte_none(*pte
) && !pte_huge(*pte
));
50 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
56 pgd
= pgd_offset(mm
, addr
);
57 if (pgd_present(*pgd
)) {
58 pud
= pud_offset(pgd
, addr
);
59 if (pud_present(*pud
))
60 pmd
= pmd_offset(pud
, addr
);
66 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
69 unsigned long start
= address
;
73 struct vm_area_struct
*vma
;
75 vma
= find_vma(mm
, addr
);
76 if (!vma
|| !is_vm_hugetlb_page(vma
))
77 return ERR_PTR(-EINVAL
);
79 pte
= huge_pte_offset(mm
, address
);
81 /* hugetlb should be locked, and hence, prefaulted */
82 WARN_ON(!pte
|| pte_none(*pte
));
84 page
= &pte_page(*pte
)[vpfn
% (HPAGE_SIZE
/PAGE_SIZE
)];
86 WARN_ON(!PageHead(page
));
91 int pmd_huge(pmd_t pmd
)
96 int pud_huge(pud_t pud
)
101 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
102 pmd_t
*pmd
, int write
)
109 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
112 return ERR_PTR(-EINVAL
);
115 int pmd_huge(pmd_t pmd
)
117 return !!(pmd_val(pmd
) & _PAGE_HUGE_PAGE
);
120 int pud_huge(pud_t pud
)
122 return !!(pud_val(pud
) & _PAGE_HUGE_PAGE
);
125 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
126 pmd_t
*pmd
, int write
)
130 page
= pte_page(*(pte_t
*)pmd
);
132 page
+= ((address
& ~PMD_MASK
) >> PAGE_SHIFT
);
136 struct page
*follow_huge_pud(struct mm_struct
*mm
, unsigned long address
,
137 pud_t
*pud
, int write
)
141 page
= pte_page(*(pte_t
*)pud
);
143 page
+= ((address
& ~PUD_MASK
) >> PAGE_SHIFT
);
147 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
154 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
155 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*file
,
156 unsigned long addr
, unsigned long len
,
157 unsigned long pgoff
, unsigned long flags
)
159 struct hstate
*h
= hstate_file(file
);
160 struct mm_struct
*mm
= current
->mm
;
161 struct vm_area_struct
*vma
;
162 unsigned long start_addr
;
164 if (len
> mm
->cached_hole_size
) {
165 start_addr
= mm
->free_area_cache
;
167 start_addr
= TASK_UNMAPPED_BASE
;
168 mm
->cached_hole_size
= 0;
172 addr
= ALIGN(start_addr
, huge_page_size(h
));
174 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
175 /* At this point: (!vma || addr < vma->vm_end). */
176 if (TASK_SIZE
- len
< addr
) {
178 * Start a new search - just in case we missed
181 if (start_addr
!= TASK_UNMAPPED_BASE
) {
182 start_addr
= TASK_UNMAPPED_BASE
;
183 mm
->cached_hole_size
= 0;
188 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
189 mm
->free_area_cache
= addr
+ len
;
192 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
193 mm
->cached_hole_size
= vma
->vm_start
- addr
;
194 addr
= ALIGN(vma
->vm_end
, huge_page_size(h
));
198 static unsigned long hugetlb_get_unmapped_area_topdown(struct file
*file
,
199 unsigned long addr0
, unsigned long len
,
200 unsigned long pgoff
, unsigned long flags
)
202 struct hstate
*h
= hstate_file(file
);
203 struct mm_struct
*mm
= current
->mm
;
204 struct vm_area_struct
*vma
, *prev_vma
;
205 unsigned long base
= mm
->mmap_base
, addr
= addr0
;
206 unsigned long largest_hole
= mm
->cached_hole_size
;
209 /* don't allow allocations above current base */
210 if (mm
->free_area_cache
> base
)
211 mm
->free_area_cache
= base
;
213 if (len
<= largest_hole
) {
215 mm
->free_area_cache
= base
;
218 /* make sure it can fit in the remaining address space */
219 if (mm
->free_area_cache
< len
)
222 /* either no address requested or can't fit in requested address hole */
223 addr
= (mm
->free_area_cache
- len
) & huge_page_mask(h
);
226 * Lookup failure means no vma is above this address,
227 * i.e. return with success:
229 vma
= find_vma_prev(mm
, addr
, &prev_vma
);
236 * new region fits between prev_vma->vm_end and
237 * vma->vm_start, use it:
239 if (addr
+ len
<= vma
->vm_start
&&
240 (!prev_vma
|| (addr
>= prev_vma
->vm_end
))) {
241 /* remember the address as a hint for next time */
242 mm
->cached_hole_size
= largest_hole
;
243 mm
->free_area_cache
= addr
;
246 /* pull free_area_cache down to the first hole */
247 if (mm
->free_area_cache
== vma
->vm_end
) {
248 mm
->free_area_cache
= vma
->vm_start
;
249 mm
->cached_hole_size
= largest_hole
;
253 /* remember the largest hole we saw so far */
254 if (addr
+ largest_hole
< vma
->vm_start
)
255 largest_hole
= vma
->vm_start
- addr
;
257 /* try just below the current vma->vm_start */
258 addr
= (vma
->vm_start
- len
) & huge_page_mask(h
);
260 } while (len
<= vma
->vm_start
);
264 * if hint left us with no space for the requested
265 * mapping then try again:
268 mm
->free_area_cache
= base
;
274 * A failed mmap() very likely causes application failure,
275 * so fall back to the bottom-up function here. This scenario
276 * can happen with large stack limits and large mmap()
279 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
280 mm
->cached_hole_size
= ~0UL;
281 addr
= hugetlb_get_unmapped_area_bottomup(file
, addr0
,
285 * Restore the topdown base:
287 mm
->free_area_cache
= base
;
288 mm
->cached_hole_size
= ~0UL;
293 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
294 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
296 struct hstate
*h
= hstate_file(file
);
297 struct mm_struct
*mm
= current
->mm
;
298 struct vm_area_struct
*vma
;
300 if (len
& ~huge_page_mask(h
))
305 if (flags
& MAP_FIXED
) {
306 if (prepare_hugepage_range(file
, addr
, len
))
312 addr
= ALIGN(addr
, huge_page_size(h
));
313 vma
= find_vma(mm
, addr
);
314 if (TASK_SIZE
- len
>= addr
&&
315 (!vma
|| addr
+ len
<= vma
->vm_start
))
318 if (current
->mm
->get_unmapped_area
== arch_get_unmapped_area
)
319 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
322 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
326 static __init
int setup_hugepagesz(char *opt
)
328 unsigned long ps
= memparse(opt
, &opt
);
329 if (ps
== PMD_SIZE
) {
330 hugetlb_add_hstate(PMD_SHIFT
- PAGE_SHIFT
);
331 } else if (ps
== PUD_SIZE
) {
332 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
334 pr_err("hugepagesz: Unsupported page size %lu M\n",
340 __setup("hugepagesz=", setup_hugepagesz
);
342 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/