[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / sh / mm / hugetlbpage.c
blob1f897bab2318a00efa6a1d9f845f2c107cb33eff
1 /*
2 * arch/sh/mm/hugetlbpage.c
4 * SuperH HugeTLB page support.
6 * Cloned from sparc64 by Paul Mundt.
8 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
9 */
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
21 #include <asm/mman.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/cacheflush.h>
27 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
29 pgd_t *pgd;
30 pmd_t *pmd;
31 pte_t *pte = NULL;
33 pgd = pgd_offset(mm, addr);
34 if (pgd) {
35 pmd = pmd_alloc(mm, pgd, addr);
36 if (pmd)
37 pte = pte_alloc_map(mm, pmd, addr);
39 return pte;
42 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
44 pgd_t *pgd;
45 pmd_t *pmd;
46 pte_t *pte = NULL;
48 pgd = pgd_offset(mm, addr);
49 if (pgd) {
50 pmd = pmd_offset(pgd, addr);
51 if (pmd)
52 pte = pte_offset_map(pmd, addr);
54 return pte;
57 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
59 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
60 struct page *page, pte_t * page_table, int write_access)
62 unsigned long i;
63 pte_t entry;
65 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
67 if (write_access)
68 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
69 vma->vm_page_prot)));
70 else
71 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
72 entry = pte_mkyoung(entry);
73 mk_pte_huge(entry);
75 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
76 set_pte(page_table, entry);
77 page_table++;
79 pte_val(entry) += PAGE_SIZE;
84 * This function checks for proper alignment of input addr and len parameters.
86 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
88 if (len & ~HPAGE_MASK)
89 return -EINVAL;
90 if (addr & ~HPAGE_MASK)
91 return -EINVAL;
92 return 0;
95 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
96 struct vm_area_struct *vma)
98 pte_t *src_pte, *dst_pte, entry;
99 struct page *ptepage;
100 unsigned long addr = vma->vm_start;
101 unsigned long end = vma->vm_end;
102 int i;
104 while (addr < end) {
105 dst_pte = huge_pte_alloc(dst, addr);
106 if (!dst_pte)
107 goto nomem;
108 src_pte = huge_pte_offset(src, addr);
109 BUG_ON(!src_pte || pte_none(*src_pte));
110 entry = *src_pte;
111 ptepage = pte_page(entry);
112 get_page(ptepage);
113 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
114 set_pte(dst_pte, entry);
115 pte_val(entry) += PAGE_SIZE;
116 dst_pte++;
118 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
119 addr += HPAGE_SIZE;
121 return 0;
123 nomem:
124 return -ENOMEM;
127 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
128 struct page **pages, struct vm_area_struct **vmas,
129 unsigned long *position, int *length, int i)
131 unsigned long vaddr = *position;
132 int remainder = *length;
134 WARN_ON(!is_vm_hugetlb_page(vma));
136 while (vaddr < vma->vm_end && remainder) {
137 if (pages) {
138 pte_t *pte;
139 struct page *page;
141 pte = huge_pte_offset(mm, vaddr);
143 /* hugetlb should be locked, and hence, prefaulted */
144 BUG_ON(!pte || pte_none(*pte));
146 page = pte_page(*pte);
148 WARN_ON(!PageCompound(page));
150 get_page(page);
151 pages[i] = page;
154 if (vmas)
155 vmas[i] = vma;
157 vaddr += PAGE_SIZE;
158 --remainder;
159 ++i;
162 *length = remainder;
163 *position = vaddr;
165 return i;
168 struct page *follow_huge_addr(struct mm_struct *mm,
169 unsigned long address, int write)
171 return ERR_PTR(-EINVAL);
174 int pmd_huge(pmd_t pmd)
176 return 0;
179 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
180 pmd_t *pmd, int write)
182 return NULL;
185 void unmap_hugepage_range(struct vm_area_struct *vma,
186 unsigned long start, unsigned long end)
188 struct mm_struct *mm = vma->vm_mm;
189 unsigned long address;
190 pte_t *pte;
191 struct page *page;
192 int i;
194 BUG_ON(start & (HPAGE_SIZE - 1));
195 BUG_ON(end & (HPAGE_SIZE - 1));
197 for (address = start; address < end; address += HPAGE_SIZE) {
198 pte = huge_pte_offset(mm, address);
199 BUG_ON(!pte);
200 if (pte_none(*pte))
201 continue;
202 page = pte_page(*pte);
203 put_page(page);
204 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
205 pte_clear(mm, address+(i*PAGE_SIZE), pte);
206 pte++;
209 add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
210 flush_tlb_range(vma, start, end);
213 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
215 struct mm_struct *mm = current->mm;
216 unsigned long addr;
217 int ret = 0;
219 BUG_ON(vma->vm_start & ~HPAGE_MASK);
220 BUG_ON(vma->vm_end & ~HPAGE_MASK);
222 spin_lock(&mm->page_table_lock);
223 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
224 unsigned long idx;
225 pte_t *pte = huge_pte_alloc(mm, addr);
226 struct page *page;
228 if (!pte) {
229 ret = -ENOMEM;
230 goto out;
232 if (!pte_none(*pte))
233 continue;
235 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
236 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
237 page = find_get_page(mapping, idx);
238 if (!page) {
239 /* charge the fs quota first */
240 if (hugetlb_get_quota(mapping)) {
241 ret = -ENOMEM;
242 goto out;
244 page = alloc_huge_page();
245 if (!page) {
246 hugetlb_put_quota(mapping);
247 ret = -ENOMEM;
248 goto out;
250 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
251 if (! ret) {
252 unlock_page(page);
253 } else {
254 hugetlb_put_quota(mapping);
255 free_huge_page(page);
256 goto out;
259 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
261 out:
262 spin_unlock(&mm->page_table_lock);
263 return ret;