dccp: do not assume DCCP code is non preemptible
[linux/fpc-iii.git] / arch / sparc / mm / hugetlbpage.c
blob4977800e9770552e44b9fd4571d6aa90e15168ed
1 /*
2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5 */
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 #include <linux/pagemap.h>
11 #include <linux/sysctl.h>
13 #include <asm/mman.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/cacheflush.h>
18 #include <asm/mmu_context.h>
20 /* Slightly simplified from the non-hugepage variant because by
21 * definition we don't have to worry about any page coloring stuff
24 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
25 unsigned long addr,
26 unsigned long len,
27 unsigned long pgoff,
28 unsigned long flags)
30 unsigned long task_size = TASK_SIZE;
31 struct vm_unmapped_area_info info;
33 if (test_thread_flag(TIF_32BIT))
34 task_size = STACK_TOP32;
36 info.flags = 0;
37 info.length = len;
38 info.low_limit = TASK_UNMAPPED_BASE;
39 info.high_limit = min(task_size, VA_EXCLUDE_START);
40 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
41 info.align_offset = 0;
42 addr = vm_unmapped_area(&info);
44 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
45 VM_BUG_ON(addr != -ENOMEM);
46 info.low_limit = VA_EXCLUDE_END;
47 info.high_limit = task_size;
48 addr = vm_unmapped_area(&info);
51 return addr;
54 static unsigned long
55 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
56 const unsigned long len,
57 const unsigned long pgoff,
58 const unsigned long flags)
60 struct mm_struct *mm = current->mm;
61 unsigned long addr = addr0;
62 struct vm_unmapped_area_info info;
64 /* This should only ever run for 32-bit processes. */
65 BUG_ON(!test_thread_flag(TIF_32BIT));
67 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
68 info.length = len;
69 info.low_limit = PAGE_SIZE;
70 info.high_limit = mm->mmap_base;
71 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
72 info.align_offset = 0;
73 addr = vm_unmapped_area(&info);
76 * A failed mmap() very likely causes application failure,
77 * so fall back to the bottom-up function here. This scenario
78 * can happen with large stack limits and large mmap()
79 * allocations.
81 if (addr & ~PAGE_MASK) {
82 VM_BUG_ON(addr != -ENOMEM);
83 info.flags = 0;
84 info.low_limit = TASK_UNMAPPED_BASE;
85 info.high_limit = STACK_TOP32;
86 addr = vm_unmapped_area(&info);
89 return addr;
92 unsigned long
93 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
94 unsigned long len, unsigned long pgoff, unsigned long flags)
96 struct mm_struct *mm = current->mm;
97 struct vm_area_struct *vma;
98 unsigned long task_size = TASK_SIZE;
100 if (test_thread_flag(TIF_32BIT))
101 task_size = STACK_TOP32;
103 if (len & ~HPAGE_MASK)
104 return -EINVAL;
105 if (len > task_size)
106 return -ENOMEM;
108 if (flags & MAP_FIXED) {
109 if (prepare_hugepage_range(file, addr, len))
110 return -EINVAL;
111 return addr;
114 if (addr) {
115 addr = ALIGN(addr, HPAGE_SIZE);
116 vma = find_vma(mm, addr);
117 if (task_size - len >= addr &&
118 (!vma || addr + len <= vma->vm_start))
119 return addr;
121 if (mm->get_unmapped_area == arch_get_unmapped_area)
122 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
123 pgoff, flags);
124 else
125 return hugetlb_get_unmapped_area_topdown(file, addr, len,
126 pgoff, flags);
129 pte_t *huge_pte_alloc(struct mm_struct *mm,
130 unsigned long addr, unsigned long sz)
132 pgd_t *pgd;
133 pud_t *pud;
134 pmd_t *pmd;
135 pte_t *pte = NULL;
137 /* We must align the address, because our caller will run
138 * set_huge_pte_at() on whatever we return, which writes out
139 * all of the sub-ptes for the hugepage range. So we have
140 * to give it the first such sub-pte.
142 addr &= HPAGE_MASK;
144 pgd = pgd_offset(mm, addr);
145 pud = pud_alloc(mm, pgd, addr);
146 if (pud) {
147 pmd = pmd_alloc(mm, pud, addr);
148 if (pmd)
149 pte = pte_alloc_map(mm, pmd, addr);
151 return pte;
154 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
156 pgd_t *pgd;
157 pud_t *pud;
158 pmd_t *pmd;
159 pte_t *pte = NULL;
161 addr &= HPAGE_MASK;
163 pgd = pgd_offset(mm, addr);
164 if (!pgd_none(*pgd)) {
165 pud = pud_offset(pgd, addr);
166 if (!pud_none(*pud)) {
167 pmd = pmd_offset(pud, addr);
168 if (!pmd_none(*pmd))
169 pte = pte_offset_map(pmd, addr);
172 return pte;
175 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
176 pte_t *ptep, pte_t entry)
178 int i;
180 if (!pte_present(*ptep) && pte_present(entry))
181 mm->context.huge_pte_count++;
183 addr &= HPAGE_MASK;
184 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
185 set_pte_at(mm, addr, ptep, entry);
186 ptep++;
187 addr += PAGE_SIZE;
188 pte_val(entry) += PAGE_SIZE;
192 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
193 pte_t *ptep)
195 pte_t entry;
196 int i;
198 entry = *ptep;
199 if (pte_present(entry))
200 mm->context.huge_pte_count--;
202 addr &= HPAGE_MASK;
204 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
205 pte_clear(mm, addr, ptep);
206 addr += PAGE_SIZE;
207 ptep++;
210 return entry;
213 int pmd_huge(pmd_t pmd)
215 return 0;
218 int pud_huge(pud_t pud)
220 return 0;