x86/xen: resume timer irqs early
[linux/fpc-iii.git] / arch / x86 / mm / hugetlbpage.c
blobfa029fb2afaec9cbc31c9bcbf63a6087f842aee1
1 /*
2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
19 #if 0 /* This is just for testing */
20 struct page *
21 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
23 unsigned long start = address;
24 int length = 1;
25 int nr;
26 struct page *page;
27 struct vm_area_struct *vma;
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
31 return ERR_PTR(-EINVAL);
33 pte = huge_pte_offset(mm, address);
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte || pte_none(*pte));
38 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
40 WARN_ON(!PageHead(page));
42 return page;
45 int pmd_huge(pmd_t pmd)
47 return 0;
50 int pud_huge(pud_t pud)
52 return 0;
55 struct page *
56 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
57 pmd_t *pmd, int write)
59 return NULL;
61 #else
63 struct page *
64 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
66 return ERR_PTR(-EINVAL);
69 int pmd_huge(pmd_t pmd)
71 return !!(pmd_val(pmd) & _PAGE_PSE);
74 int pud_huge(pud_t pud)
76 return !!(pud_val(pud) & _PAGE_PSE);
78 #endif
80 /* x86_64 also uses this file */
82 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
83 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
84 unsigned long addr, unsigned long len,
85 unsigned long pgoff, unsigned long flags)
87 struct hstate *h = hstate_file(file);
88 struct vm_unmapped_area_info info;
90 info.flags = 0;
91 info.length = len;
92 info.low_limit = TASK_UNMAPPED_BASE;
93 info.high_limit = TASK_SIZE;
94 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
95 info.align_offset = 0;
96 return vm_unmapped_area(&info);
99 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
100 unsigned long addr0, unsigned long len,
101 unsigned long pgoff, unsigned long flags)
103 struct hstate *h = hstate_file(file);
104 struct vm_unmapped_area_info info;
105 unsigned long addr;
107 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
108 info.length = len;
109 info.low_limit = PAGE_SIZE;
110 info.high_limit = current->mm->mmap_base;
111 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
112 info.align_offset = 0;
113 addr = vm_unmapped_area(&info);
116 * A failed mmap() very likely causes application failure,
117 * so fall back to the bottom-up function here. This scenario
118 * can happen with large stack limits and large mmap()
119 * allocations.
121 if (addr & ~PAGE_MASK) {
122 VM_BUG_ON(addr != -ENOMEM);
123 info.flags = 0;
124 info.low_limit = TASK_UNMAPPED_BASE;
125 info.high_limit = TASK_SIZE;
126 addr = vm_unmapped_area(&info);
129 return addr;
132 unsigned long
133 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
134 unsigned long len, unsigned long pgoff, unsigned long flags)
136 struct hstate *h = hstate_file(file);
137 struct mm_struct *mm = current->mm;
138 struct vm_area_struct *vma;
140 if (len & ~huge_page_mask(h))
141 return -EINVAL;
142 if (len > TASK_SIZE)
143 return -ENOMEM;
145 if (flags & MAP_FIXED) {
146 if (prepare_hugepage_range(file, addr, len))
147 return -EINVAL;
148 return addr;
151 if (addr) {
152 addr = ALIGN(addr, huge_page_size(h));
153 vma = find_vma(mm, addr);
154 if (TASK_SIZE - len >= addr &&
155 (!vma || addr + len <= vma->vm_start))
156 return addr;
158 if (mm->get_unmapped_area == arch_get_unmapped_area)
159 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
160 pgoff, flags);
161 else
162 return hugetlb_get_unmapped_area_topdown(file, addr, len,
163 pgoff, flags);
166 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
168 #ifdef CONFIG_X86_64
169 static __init int setup_hugepagesz(char *opt)
171 unsigned long ps = memparse(opt, &opt);
172 if (ps == PMD_SIZE) {
173 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
174 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
175 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
176 } else {
177 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
178 ps >> 20);
179 return 0;
181 return 1;
183 __setup("hugepagesz=", setup_hugepagesz);
184 #endif