2 * arch/sh64/mm/hugetlbpage.c
4 * SuperH HugeTLB page support.
6 * Cloned from sparc64 by Paul Mundt.
8 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
11 #include <linux/config.h>
12 #include <linux/init.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/cacheflush.h>
27 static pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
)
33 pgd
= pgd_offset(mm
, addr
);
35 pmd
= pmd_alloc(mm
, pgd
, addr
);
37 pte
= pte_alloc_map(mm
, pmd
, addr
);
42 static pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
48 pgd
= pgd_offset(mm
, addr
);
50 pmd
= pmd_offset(pgd
, addr
);
52 pte
= pte_offset_map(pmd
, addr
);
57 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
59 static void set_huge_pte(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
60 struct page
*page
, pte_t
* page_table
, int write_access
)
65 add_mm_counter(mm
, rss
, HPAGE_SIZE
/ PAGE_SIZE
);
68 entry
= pte_mkwrite(pte_mkdirty(mk_pte(page
,
71 entry
= pte_wrprotect(mk_pte(page
, vma
->vm_page_prot
));
72 entry
= pte_mkyoung(entry
);
75 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
76 set_pte(page_table
, entry
);
79 pte_val(entry
) += PAGE_SIZE
;
84 * This function checks for proper alignment of input addr and len parameters.
86 int is_aligned_hugepage_range(unsigned long addr
, unsigned long len
)
88 if (len
& ~HPAGE_MASK
)
90 if (addr
& ~HPAGE_MASK
)
95 int copy_hugetlb_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
96 struct vm_area_struct
*vma
)
98 pte_t
*src_pte
, *dst_pte
, entry
;
100 unsigned long addr
= vma
->vm_start
;
101 unsigned long end
= vma
->vm_end
;
105 dst_pte
= huge_pte_alloc(dst
, addr
);
108 src_pte
= huge_pte_offset(src
, addr
);
109 BUG_ON(!src_pte
|| pte_none(*src_pte
));
111 ptepage
= pte_page(entry
);
113 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
114 set_pte(dst_pte
, entry
);
115 pte_val(entry
) += PAGE_SIZE
;
118 add_mm_counter(dst
, rss
, HPAGE_SIZE
/ PAGE_SIZE
);
127 int follow_hugetlb_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
128 struct page
**pages
, struct vm_area_struct
**vmas
,
129 unsigned long *position
, int *length
, int i
)
131 unsigned long vaddr
= *position
;
132 int remainder
= *length
;
134 WARN_ON(!is_vm_hugetlb_page(vma
));
136 while (vaddr
< vma
->vm_end
&& remainder
) {
141 pte
= huge_pte_offset(mm
, vaddr
);
143 /* hugetlb should be locked, and hence, prefaulted */
144 BUG_ON(!pte
|| pte_none(*pte
));
146 page
= pte_page(*pte
);
148 WARN_ON(!PageCompound(page
));
168 struct page
*follow_huge_addr(struct mm_struct
*mm
,
169 unsigned long address
, int write
)
171 return ERR_PTR(-EINVAL
);
174 int pmd_huge(pmd_t pmd
)
179 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
180 pmd_t
*pmd
, int write
)
185 void unmap_hugepage_range(struct vm_area_struct
*vma
,
186 unsigned long start
, unsigned long end
)
188 struct mm_struct
*mm
= vma
->vm_mm
;
189 unsigned long address
;
194 BUG_ON(start
& (HPAGE_SIZE
- 1));
195 BUG_ON(end
& (HPAGE_SIZE
- 1));
197 for (address
= start
; address
< end
; address
+= HPAGE_SIZE
) {
198 pte
= huge_pte_offset(mm
, address
);
202 page
= pte_page(*pte
);
204 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
205 pte_clear(mm
, address
+(i
*PAGE_SIZE
), pte
);
209 add_mm_counter(mm
, rss
, -((end
- start
) >> PAGE_SHIFT
));
210 flush_tlb_range(vma
, start
, end
);
213 int hugetlb_prefault(struct address_space
*mapping
, struct vm_area_struct
*vma
)
215 struct mm_struct
*mm
= current
->mm
;
219 BUG_ON(vma
->vm_start
& ~HPAGE_MASK
);
220 BUG_ON(vma
->vm_end
& ~HPAGE_MASK
);
222 spin_lock(&mm
->page_table_lock
);
223 for (addr
= vma
->vm_start
; addr
< vma
->vm_end
; addr
+= HPAGE_SIZE
) {
225 pte_t
*pte
= huge_pte_alloc(mm
, addr
);
235 idx
= ((addr
- vma
->vm_start
) >> HPAGE_SHIFT
)
236 + (vma
->vm_pgoff
>> (HPAGE_SHIFT
- PAGE_SHIFT
));
237 page
= find_get_page(mapping
, idx
);
239 /* charge the fs quota first */
240 if (hugetlb_get_quota(mapping
)) {
244 page
= alloc_huge_page();
246 hugetlb_put_quota(mapping
);
250 ret
= add_to_page_cache(page
, mapping
, idx
, GFP_ATOMIC
);
254 hugetlb_put_quota(mapping
);
255 free_huge_page(page
);
259 set_huge_pte(mm
, vma
, page
, pte
, vma
->vm_flags
& VM_WRITE
);
262 spin_unlock(&mm
->page_table_lock
);