x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / sparc / mm / hugetlbpage.c
blobd2b59441ebddfb84080da0fcadff6921e71342f6
1 /*
2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5 */
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/pgalloc.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mmu_context.h>
21 /* Slightly simplified from the non-hugepage variant because by
22 * definition we don't have to worry about any page coloring stuff
24 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
25 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
27 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
28 unsigned long addr,
29 unsigned long len,
30 unsigned long pgoff,
31 unsigned long flags)
33 unsigned long task_size = TASK_SIZE;
34 struct vm_unmapped_area_info info;
36 if (test_thread_flag(TIF_32BIT))
37 task_size = STACK_TOP32;
39 info.flags = 0;
40 info.length = len;
41 info.low_limit = TASK_UNMAPPED_BASE;
42 info.high_limit = min(task_size, VA_EXCLUDE_START);
43 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
54 return addr;
57 static unsigned long
58 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
59 const unsigned long len,
60 const unsigned long pgoff,
61 const unsigned long flags)
63 struct mm_struct *mm = current->mm;
64 unsigned long addr = addr0;
65 struct vm_unmapped_area_info info;
67 /* This should only ever run for 32-bit processes. */
68 BUG_ON(!test_thread_flag(TIF_32BIT));
70 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
71 info.length = len;
72 info.low_limit = PAGE_SIZE;
73 info.high_limit = mm->mmap_base;
74 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
75 info.align_offset = 0;
76 addr = vm_unmapped_area(&info);
79 * A failed mmap() very likely causes application failure,
80 * so fall back to the bottom-up function here. This scenario
81 * can happen with large stack limits and large mmap()
82 * allocations.
84 if (addr & ~PAGE_MASK) {
85 VM_BUG_ON(addr != -ENOMEM);
86 info.flags = 0;
87 info.low_limit = TASK_UNMAPPED_BASE;
88 info.high_limit = STACK_TOP32;
89 addr = vm_unmapped_area(&info);
92 return addr;
95 unsigned long
96 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
97 unsigned long len, unsigned long pgoff, unsigned long flags)
99 struct mm_struct *mm = current->mm;
100 struct vm_area_struct *vma;
101 unsigned long task_size = TASK_SIZE;
103 if (test_thread_flag(TIF_32BIT))
104 task_size = STACK_TOP32;
106 if (len & ~HPAGE_MASK)
107 return -EINVAL;
108 if (len > task_size)
109 return -ENOMEM;
111 if (flags & MAP_FIXED) {
112 if (prepare_hugepage_range(file, addr, len))
113 return -EINVAL;
114 return addr;
117 if (addr) {
118 addr = ALIGN(addr, HPAGE_SIZE);
119 vma = find_vma(mm, addr);
120 if (task_size - len >= addr &&
121 (!vma || addr + len <= vma->vm_start))
122 return addr;
124 if (mm->get_unmapped_area == arch_get_unmapped_area)
125 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
126 pgoff, flags);
127 else
128 return hugetlb_get_unmapped_area_topdown(file, addr, len,
129 pgoff, flags);
132 pte_t *huge_pte_alloc(struct mm_struct *mm,
133 unsigned long addr, unsigned long sz)
135 pgd_t *pgd;
136 pud_t *pud;
137 pmd_t *pmd;
138 pte_t *pte = NULL;
140 /* We must align the address, because our caller will run
141 * set_huge_pte_at() on whatever we return, which writes out
142 * all of the sub-ptes for the hugepage range. So we have
143 * to give it the first such sub-pte.
145 addr &= HPAGE_MASK;
147 pgd = pgd_offset(mm, addr);
148 pud = pud_alloc(mm, pgd, addr);
149 if (pud) {
150 pmd = pmd_alloc(mm, pud, addr);
151 if (pmd)
152 pte = pte_alloc_map(mm, NULL, pmd, addr);
154 return pte;
157 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
159 pgd_t *pgd;
160 pud_t *pud;
161 pmd_t *pmd;
162 pte_t *pte = NULL;
164 addr &= HPAGE_MASK;
166 pgd = pgd_offset(mm, addr);
167 if (!pgd_none(*pgd)) {
168 pud = pud_offset(pgd, addr);
169 if (!pud_none(*pud)) {
170 pmd = pmd_offset(pud, addr);
171 if (!pmd_none(*pmd))
172 pte = pte_offset_map(pmd, addr);
175 return pte;
178 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
180 return 0;
183 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
184 pte_t *ptep, pte_t entry)
186 int i;
188 if (!pte_present(*ptep) && pte_present(entry))
189 mm->context.huge_pte_count++;
191 addr &= HPAGE_MASK;
192 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
193 set_pte_at(mm, addr, ptep, entry);
194 ptep++;
195 addr += PAGE_SIZE;
196 pte_val(entry) += PAGE_SIZE;
200 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
201 pte_t *ptep)
203 pte_t entry;
204 int i;
206 entry = *ptep;
207 if (pte_present(entry))
208 mm->context.huge_pte_count--;
210 addr &= HPAGE_MASK;
212 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
213 pte_clear(mm, addr, ptep);
214 addr += PAGE_SIZE;
215 ptep++;
218 return entry;
221 struct page *follow_huge_addr(struct mm_struct *mm,
222 unsigned long address, int write)
224 return ERR_PTR(-EINVAL);
227 int pmd_huge(pmd_t pmd)
229 return 0;
232 int pud_huge(pud_t pud)
234 return 0;
237 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
238 pmd_t *pmd, int write)
240 return NULL;