Linux 3.12.39
[linux/fpc-iii.git] / arch / sparc / mm / hugetlbpage.c
blobd941cd024f22e3b7cc1ec7b2578a71b39a1475ab
1 /*
2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5 */
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/pgalloc.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mmu_context.h>
21 /* Slightly simplified from the non-hugepage variant because by
22 * definition we don't have to worry about any page coloring stuff
25 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
26 unsigned long addr,
27 unsigned long len,
28 unsigned long pgoff,
29 unsigned long flags)
31 unsigned long task_size = TASK_SIZE;
32 struct vm_unmapped_area_info info;
34 if (test_thread_flag(TIF_32BIT))
35 task_size = STACK_TOP32;
37 info.flags = 0;
38 info.length = len;
39 info.low_limit = TASK_UNMAPPED_BASE;
40 info.high_limit = min(task_size, VA_EXCLUDE_START);
41 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
42 info.align_offset = 0;
43 addr = vm_unmapped_area(&info);
45 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
46 VM_BUG_ON(addr != -ENOMEM);
47 info.low_limit = VA_EXCLUDE_END;
48 info.high_limit = task_size;
49 addr = vm_unmapped_area(&info);
52 return addr;
55 static unsigned long
56 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57 const unsigned long len,
58 const unsigned long pgoff,
59 const unsigned long flags)
61 struct mm_struct *mm = current->mm;
62 unsigned long addr = addr0;
63 struct vm_unmapped_area_info info;
65 /* This should only ever run for 32-bit processes. */
66 BUG_ON(!test_thread_flag(TIF_32BIT));
68 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
69 info.length = len;
70 info.low_limit = PAGE_SIZE;
71 info.high_limit = mm->mmap_base;
72 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
73 info.align_offset = 0;
74 addr = vm_unmapped_area(&info);
77 * A failed mmap() very likely causes application failure,
78 * so fall back to the bottom-up function here. This scenario
79 * can happen with large stack limits and large mmap()
80 * allocations.
82 if (addr & ~PAGE_MASK) {
83 VM_BUG_ON(addr != -ENOMEM);
84 info.flags = 0;
85 info.low_limit = TASK_UNMAPPED_BASE;
86 info.high_limit = STACK_TOP32;
87 addr = vm_unmapped_area(&info);
90 return addr;
93 unsigned long
94 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
95 unsigned long len, unsigned long pgoff, unsigned long flags)
97 struct mm_struct *mm = current->mm;
98 struct vm_area_struct *vma;
99 unsigned long task_size = TASK_SIZE;
101 if (test_thread_flag(TIF_32BIT))
102 task_size = STACK_TOP32;
104 if (len & ~HPAGE_MASK)
105 return -EINVAL;
106 if (len > task_size)
107 return -ENOMEM;
109 if (flags & MAP_FIXED) {
110 if (prepare_hugepage_range(file, addr, len))
111 return -EINVAL;
112 return addr;
115 if (addr) {
116 addr = ALIGN(addr, HPAGE_SIZE);
117 vma = find_vma(mm, addr);
118 if (task_size - len >= addr &&
119 (!vma || addr + len <= vma->vm_start))
120 return addr;
122 if (mm->get_unmapped_area == arch_get_unmapped_area)
123 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
124 pgoff, flags);
125 else
126 return hugetlb_get_unmapped_area_topdown(file, addr, len,
127 pgoff, flags);
130 pte_t *huge_pte_alloc(struct mm_struct *mm,
131 unsigned long addr, unsigned long sz)
133 pgd_t *pgd;
134 pud_t *pud;
135 pmd_t *pmd;
136 pte_t *pte = NULL;
138 /* We must align the address, because our caller will run
139 * set_huge_pte_at() on whatever we return, which writes out
140 * all of the sub-ptes for the hugepage range. So we have
141 * to give it the first such sub-pte.
143 addr &= HPAGE_MASK;
145 pgd = pgd_offset(mm, addr);
146 pud = pud_alloc(mm, pgd, addr);
147 if (pud) {
148 pmd = pmd_alloc(mm, pud, addr);
149 if (pmd)
150 pte = pte_alloc_map(mm, NULL, pmd, addr);
152 return pte;
155 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
157 pgd_t *pgd;
158 pud_t *pud;
159 pmd_t *pmd;
160 pte_t *pte = NULL;
162 addr &= HPAGE_MASK;
164 pgd = pgd_offset(mm, addr);
165 if (!pgd_none(*pgd)) {
166 pud = pud_offset(pgd, addr);
167 if (!pud_none(*pud)) {
168 pmd = pmd_offset(pud, addr);
169 if (!pmd_none(*pmd))
170 pte = pte_offset_map(pmd, addr);
173 return pte;
176 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
178 return 0;
181 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
182 pte_t *ptep, pte_t entry)
184 int i;
186 if (!pte_present(*ptep) && pte_present(entry))
187 mm->context.huge_pte_count++;
189 addr &= HPAGE_MASK;
190 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
191 set_pte_at(mm, addr, ptep, entry);
192 ptep++;
193 addr += PAGE_SIZE;
194 pte_val(entry) += PAGE_SIZE;
198 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
199 pte_t *ptep)
201 pte_t entry;
202 int i;
204 entry = *ptep;
205 if (pte_present(entry))
206 mm->context.huge_pte_count--;
208 addr &= HPAGE_MASK;
210 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
211 pte_clear(mm, addr, ptep);
212 addr += PAGE_SIZE;
213 ptep++;
216 return entry;
219 int pmd_huge(pmd_t pmd)
221 return 0;
224 int pud_huge(pud_t pud)
226 return 0;