2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
9 #include <linux/hugetlb.h>
10 #include <linux/pagemap.h>
11 #include <linux/sysctl.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mmu_context.h>
21 /* Slightly simplified from the non-hugepage variant because by
22 * definition we don't have to worry about any page coloring stuff
25 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*filp
,
31 unsigned long task_size
= TASK_SIZE
;
32 struct vm_unmapped_area_info info
;
34 if (test_thread_flag(TIF_32BIT
))
35 task_size
= STACK_TOP32
;
39 info
.low_limit
= TASK_UNMAPPED_BASE
;
40 info
.high_limit
= min(task_size
, VA_EXCLUDE_START
);
41 info
.align_mask
= PAGE_MASK
& ~HPAGE_MASK
;
42 info
.align_offset
= 0;
43 addr
= vm_unmapped_area(&info
);
45 if ((addr
& ~PAGE_MASK
) && task_size
> VA_EXCLUDE_END
) {
46 VM_BUG_ON(addr
!= -ENOMEM
);
47 info
.low_limit
= VA_EXCLUDE_END
;
48 info
.high_limit
= task_size
;
49 addr
= vm_unmapped_area(&info
);
56 hugetlb_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
57 const unsigned long len
,
58 const unsigned long pgoff
,
59 const unsigned long flags
)
61 struct mm_struct
*mm
= current
->mm
;
62 unsigned long addr
= addr0
;
63 struct vm_unmapped_area_info info
;
65 /* This should only ever run for 32-bit processes. */
66 BUG_ON(!test_thread_flag(TIF_32BIT
));
68 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
70 info
.low_limit
= PAGE_SIZE
;
71 info
.high_limit
= mm
->mmap_base
;
72 info
.align_mask
= PAGE_MASK
& ~HPAGE_MASK
;
73 info
.align_offset
= 0;
74 addr
= vm_unmapped_area(&info
);
77 * A failed mmap() very likely causes application failure,
78 * so fall back to the bottom-up function here. This scenario
79 * can happen with large stack limits and large mmap()
82 if (addr
& ~PAGE_MASK
) {
83 VM_BUG_ON(addr
!= -ENOMEM
);
85 info
.low_limit
= TASK_UNMAPPED_BASE
;
86 info
.high_limit
= STACK_TOP32
;
87 addr
= vm_unmapped_area(&info
);
94 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
95 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
97 struct mm_struct
*mm
= current
->mm
;
98 struct vm_area_struct
*vma
;
99 unsigned long task_size
= TASK_SIZE
;
101 if (test_thread_flag(TIF_32BIT
))
102 task_size
= STACK_TOP32
;
104 if (len
& ~HPAGE_MASK
)
109 if (flags
& MAP_FIXED
) {
110 if (prepare_hugepage_range(file
, addr
, len
))
116 addr
= ALIGN(addr
, HPAGE_SIZE
);
117 vma
= find_vma(mm
, addr
);
118 if (task_size
- len
>= addr
&&
119 (!vma
|| addr
+ len
<= vma
->vm_start
))
122 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
123 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
126 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
130 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
131 unsigned long addr
, unsigned long sz
)
137 pgd
= pgd_offset(mm
, addr
);
138 pud
= pud_alloc(mm
, pgd
, addr
);
140 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
145 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
151 pgd
= pgd_offset(mm
, addr
);
152 if (!pgd_none(*pgd
)) {
153 pud
= pud_offset(pgd
, addr
);
155 pte
= (pte_t
*)pmd_offset(pud
, addr
);
160 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
161 pte_t
*ptep
, pte_t entry
)
165 if (!pte_present(*ptep
) && pte_present(entry
))
166 mm
->context
.hugetlb_pte_count
++;
172 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
173 maybe_tlb_batch_add(mm
, addr
, ptep
, orig
, 0);
174 maybe_tlb_batch_add(mm
, addr
+ REAL_HPAGE_SIZE
, ptep
, orig
, 0);
177 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
183 if (pte_present(entry
))
184 mm
->context
.hugetlb_pte_count
--;
189 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
190 maybe_tlb_batch_add(mm
, addr
, ptep
, entry
, 0);
191 maybe_tlb_batch_add(mm
, addr
+ REAL_HPAGE_SIZE
, ptep
, entry
, 0);
196 int pmd_huge(pmd_t pmd
)
198 return !pmd_none(pmd
) &&
199 (pmd_val(pmd
) & (_PAGE_VALID
|_PAGE_PMD_HUGE
)) != _PAGE_VALID
;
202 int pud_huge(pud_t pud
)
207 static void hugetlb_free_pte_range(struct mmu_gather
*tlb
, pmd_t
*pmd
,
210 pgtable_t token
= pmd_pgtable(*pmd
);
213 pte_free_tlb(tlb
, token
, addr
);
214 atomic_long_dec(&tlb
->mm
->nr_ptes
);
217 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
218 unsigned long addr
, unsigned long end
,
219 unsigned long floor
, unsigned long ceiling
)
226 pmd
= pmd_offset(pud
, addr
);
228 next
= pmd_addr_end(addr
, end
);
231 if (is_hugetlb_pmd(*pmd
))
234 hugetlb_free_pte_range(tlb
, pmd
, addr
);
235 } while (pmd
++, addr
= next
, addr
!= end
);
245 if (end
- 1 > ceiling
- 1)
248 pmd
= pmd_offset(pud
, start
);
250 pmd_free_tlb(tlb
, pmd
, start
);
251 mm_dec_nr_pmds(tlb
->mm
);
254 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
255 unsigned long addr
, unsigned long end
,
256 unsigned long floor
, unsigned long ceiling
)
263 pud
= pud_offset(pgd
, addr
);
265 next
= pud_addr_end(addr
, end
);
266 if (pud_none_or_clear_bad(pud
))
268 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
270 } while (pud
++, addr
= next
, addr
!= end
);
276 ceiling
&= PGDIR_MASK
;
280 if (end
- 1 > ceiling
- 1)
283 pud
= pud_offset(pgd
, start
);
285 pud_free_tlb(tlb
, pud
, start
);
288 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
289 unsigned long addr
, unsigned long end
,
290 unsigned long floor
, unsigned long ceiling
)
295 pgd
= pgd_offset(tlb
->mm
, addr
);
297 next
= pgd_addr_end(addr
, end
);
298 if (pgd_none_or_clear_bad(pgd
))
300 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
301 } while (pgd
++, addr
= next
, addr
!= end
);