x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / metag / mm / hugetlbpage.c
blobdb1b7da91e4f496b332335c2c69adf73c13fbe70
1 /*
2 * arch/metag/mm/hugetlbpage.c
4 * METAG HugeTLB page support.
6 * Cloned from SuperH
8 * Cloned from sparc64 by Paul Mundt.
10 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/pagemap.h>
18 #include <linux/sysctl.h>
20 #include <asm/mman.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
24 #include <asm/cacheflush.h>
27 * If the arch doesn't supply something else, assume that hugepage
28 * size aligned regions are ok without further preparation.
30 int prepare_hugepage_range(struct file *file, unsigned long addr,
31 unsigned long len)
33 struct mm_struct *mm = current->mm;
34 struct hstate *h = hstate_file(file);
35 struct vm_area_struct *vma;
37 if (len & ~huge_page_mask(h))
38 return -EINVAL;
39 if (addr & ~huge_page_mask(h))
40 return -EINVAL;
41 if (TASK_SIZE - len < addr)
42 return -EINVAL;
44 vma = find_vma(mm, ALIGN_HUGEPT(addr));
45 if (vma && !(vma->vm_flags & MAP_HUGETLB))
46 return -EINVAL;
48 vma = find_vma(mm, addr);
49 if (vma) {
50 if (addr + len > vma->vm_start)
51 return -EINVAL;
52 if (!(vma->vm_flags & MAP_HUGETLB) &&
53 (ALIGN_HUGEPT(addr + len) > vma->vm_start))
54 return -EINVAL;
56 return 0;
59 pte_t *huge_pte_alloc(struct mm_struct *mm,
60 unsigned long addr, unsigned long sz)
62 pgd_t *pgd;
63 pud_t *pud;
64 pmd_t *pmd;
65 pte_t *pte;
67 pgd = pgd_offset(mm, addr);
68 pud = pud_offset(pgd, addr);
69 pmd = pmd_offset(pud, addr);
70 pte = pte_alloc_map(mm, pmd, addr);
71 pgd->pgd &= ~_PAGE_SZ_MASK;
72 pgd->pgd |= _PAGE_SZHUGE;
74 return pte;
77 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
79 pgd_t *pgd;
80 pud_t *pud;
81 pmd_t *pmd;
82 pte_t *pte = NULL;
84 pgd = pgd_offset(mm, addr);
85 pud = pud_offset(pgd, addr);
86 pmd = pmd_offset(pud, addr);
87 pte = pte_offset_kernel(pmd, addr);
89 return pte;
92 int pmd_huge(pmd_t pmd)
94 return pmd_page_shift(pmd) > PAGE_SHIFT;
97 int pud_huge(pud_t pud)
99 return 0;
102 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
103 pmd_t *pmd, int write)
105 return NULL;
108 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
111 * Look for an unmapped area starting after another hugetlb vma.
112 * There are guaranteed to be no huge pte's spare if all the huge pages are
113 * full size (4MB), so in that case compile out this search.
115 #if HPAGE_SHIFT == HUGEPT_SHIFT
116 static inline unsigned long
117 hugetlb_get_unmapped_area_existing(unsigned long len)
119 return 0;
121 #else
122 static unsigned long
123 hugetlb_get_unmapped_area_existing(unsigned long len)
125 struct mm_struct *mm = current->mm;
126 struct vm_area_struct *vma;
127 unsigned long start_addr, addr;
128 int after_huge;
130 if (mm->context.part_huge) {
131 start_addr = mm->context.part_huge;
132 after_huge = 1;
133 } else {
134 start_addr = TASK_UNMAPPED_BASE;
135 after_huge = 0;
137 new_search:
138 addr = start_addr;
140 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
141 if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
143 * Start a new search - just in case we missed
144 * some holes.
146 if (start_addr != TASK_UNMAPPED_BASE) {
147 start_addr = TASK_UNMAPPED_BASE;
148 goto new_search;
150 return 0;
152 /* skip ahead if we've aligned right over some vmas */
153 if (vma && vma->vm_end <= addr)
154 continue;
155 /* space before the next vma? */
156 if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
157 <= vma->vm_start)) {
158 unsigned long end = addr + len;
159 if (end & HUGEPT_MASK)
160 mm->context.part_huge = end;
161 else if (addr == mm->context.part_huge)
162 mm->context.part_huge = 0;
163 return addr;
165 if (vma->vm_flags & MAP_HUGETLB) {
166 /* space after a huge vma in 2nd level page table? */
167 if (vma->vm_end & HUGEPT_MASK) {
168 after_huge = 1;
169 /* no need to align to the next PT block */
170 addr = vma->vm_end;
171 continue;
174 after_huge = 0;
175 addr = ALIGN_HUGEPT(vma->vm_end);
178 #endif
180 /* Do a full search to find an area without any nearby normal pages. */
181 static unsigned long
182 hugetlb_get_unmapped_area_new_pmd(unsigned long len)
184 struct vm_unmapped_area_info info;
186 info.flags = 0;
187 info.length = len;
188 info.low_limit = TASK_UNMAPPED_BASE;
189 info.high_limit = TASK_SIZE;
190 info.align_mask = PAGE_MASK & HUGEPT_MASK;
191 info.align_offset = 0;
192 return vm_unmapped_area(&info);
195 unsigned long
196 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
197 unsigned long len, unsigned long pgoff, unsigned long flags)
199 struct hstate *h = hstate_file(file);
201 if (len & ~huge_page_mask(h))
202 return -EINVAL;
203 if (len > TASK_SIZE)
204 return -ENOMEM;
206 if (flags & MAP_FIXED) {
207 if (prepare_hugepage_range(file, addr, len))
208 return -EINVAL;
209 return addr;
212 if (addr) {
213 addr = ALIGN(addr, huge_page_size(h));
214 if (!prepare_hugepage_range(file, addr, len))
215 return addr;
219 * Look for an existing hugetlb vma with space after it (this is to to
220 * minimise fragmentation caused by huge pages.
222 addr = hugetlb_get_unmapped_area_existing(len);
223 if (addr)
224 return addr;
227 * Find an unmapped naturally aligned set of 4MB blocks that we can use
228 * for huge pages.
230 return hugetlb_get_unmapped_area_new_pmd(len);
233 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
235 /* necessary for boot time 4MB huge page allocation */
236 static __init int setup_hugepagesz(char *opt)
238 unsigned long ps = memparse(opt, &opt);
239 if (ps == (1 << HPAGE_SHIFT)) {
240 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
241 } else {
242 hugetlb_bad_size();
243 pr_err("hugepagesz: Unsupported page size %lu M\n",
244 ps >> 20);
245 return 0;
247 return 1;
249 __setup("hugepagesz=", setup_hugepagesz);