2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
7 #include <linux/init.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
15 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mmu_context.h>
21 /* Slightly simplified from the non-hugepage variant because by
22 * definition we don't have to worry about any page coloring stuff
24 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
25 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
27 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*filp
,
33 unsigned long task_size
= TASK_SIZE
;
34 struct vm_unmapped_area_info info
;
36 if (test_thread_flag(TIF_32BIT
))
37 task_size
= STACK_TOP32
;
41 info
.low_limit
= TASK_UNMAPPED_BASE
;
42 info
.high_limit
= min(task_size
, VA_EXCLUDE_START
);
43 info
.align_mask
= PAGE_MASK
& ~HPAGE_MASK
;
44 info
.align_offset
= 0;
45 addr
= vm_unmapped_area(&info
);
47 if ((addr
& ~PAGE_MASK
) && task_size
> VA_EXCLUDE_END
) {
48 VM_BUG_ON(addr
!= -ENOMEM
);
49 info
.low_limit
= VA_EXCLUDE_END
;
50 info
.high_limit
= task_size
;
51 addr
= vm_unmapped_area(&info
);
58 hugetlb_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
59 const unsigned long len
,
60 const unsigned long pgoff
,
61 const unsigned long flags
)
63 struct mm_struct
*mm
= current
->mm
;
64 unsigned long addr
= addr0
;
65 struct vm_unmapped_area_info info
;
67 /* This should only ever run for 32-bit processes. */
68 BUG_ON(!test_thread_flag(TIF_32BIT
));
70 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
72 info
.low_limit
= PAGE_SIZE
;
73 info
.high_limit
= mm
->mmap_base
;
74 info
.align_mask
= PAGE_MASK
& ~HPAGE_MASK
;
75 info
.align_offset
= 0;
76 addr
= vm_unmapped_area(&info
);
79 * A failed mmap() very likely causes application failure,
80 * so fall back to the bottom-up function here. This scenario
81 * can happen with large stack limits and large mmap()
84 if (addr
& ~PAGE_MASK
) {
85 VM_BUG_ON(addr
!= -ENOMEM
);
87 info
.low_limit
= TASK_UNMAPPED_BASE
;
88 info
.high_limit
= STACK_TOP32
;
89 addr
= vm_unmapped_area(&info
);
96 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
97 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
99 struct mm_struct
*mm
= current
->mm
;
100 struct vm_area_struct
*vma
;
101 unsigned long task_size
= TASK_SIZE
;
103 if (test_thread_flag(TIF_32BIT
))
104 task_size
= STACK_TOP32
;
106 if (len
& ~HPAGE_MASK
)
111 if (flags
& MAP_FIXED
) {
112 if (prepare_hugepage_range(file
, addr
, len
))
118 addr
= ALIGN(addr
, HPAGE_SIZE
);
119 vma
= find_vma(mm
, addr
);
120 if (task_size
- len
>= addr
&&
121 (!vma
|| addr
+ len
<= vma
->vm_start
))
124 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
125 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
128 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
132 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
133 unsigned long addr
, unsigned long sz
)
140 /* We must align the address, because our caller will run
141 * set_huge_pte_at() on whatever we return, which writes out
142 * all of the sub-ptes for the hugepage range. So we have
143 * to give it the first such sub-pte.
147 pgd
= pgd_offset(mm
, addr
);
148 pud
= pud_alloc(mm
, pgd
, addr
);
150 pmd
= pmd_alloc(mm
, pud
, addr
);
152 pte
= pte_alloc_map(mm
, NULL
, pmd
, addr
);
157 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
166 pgd
= pgd_offset(mm
, addr
);
167 if (!pgd_none(*pgd
)) {
168 pud
= pud_offset(pgd
, addr
);
169 if (!pud_none(*pud
)) {
170 pmd
= pmd_offset(pud
, addr
);
172 pte
= pte_offset_map(pmd
, addr
);
178 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
183 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
184 pte_t
*ptep
, pte_t entry
)
188 if (!pte_present(*ptep
) && pte_present(entry
))
189 mm
->context
.huge_pte_count
++;
192 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
193 set_pte_at(mm
, addr
, ptep
, entry
);
196 pte_val(entry
) += PAGE_SIZE
;
200 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
207 if (pte_present(entry
))
208 mm
->context
.huge_pte_count
--;
212 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
213 pte_clear(mm
, addr
, ptep
);
221 struct page
*follow_huge_addr(struct mm_struct
*mm
,
222 unsigned long address
, int write
)
224 return ERR_PTR(-EINVAL
);
227 int pmd_huge(pmd_t pmd
)
232 int pud_huge(pud_t pud
)
237 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
238 pmd_t
*pmd
, int write
)