2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
23 #include <asm/mmu_context.h>
25 static pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
)
32 pgd
= pgd_offset(mm
, addr
);
34 pud
= pud_offset(pgd
, addr
);
36 pmd
= pmd_alloc(mm
, pud
, addr
);
38 pte
= pte_alloc_map(mm
, pmd
, addr
);
44 static pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
51 pgd
= pgd_offset(mm
, addr
);
53 pud
= pud_offset(pgd
, addr
);
55 pmd
= pmd_offset(pud
, addr
);
57 pte
= pte_offset_map(pmd
, addr
);
63 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
65 static void set_huge_pte(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
67 struct page
*page
, pte_t
* page_table
, int write_access
)
72 add_mm_counter(mm
, rss
, HPAGE_SIZE
/ PAGE_SIZE
);
75 entry
= pte_mkwrite(pte_mkdirty(mk_pte(page
,
78 entry
= pte_wrprotect(mk_pte(page
, vma
->vm_page_prot
));
79 entry
= pte_mkyoung(entry
);
82 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
83 set_pte_at(mm
, addr
, page_table
, entry
);
87 pte_val(entry
) += PAGE_SIZE
;
92 * This function checks for proper alignment of input addr and len parameters.
94 int is_aligned_hugepage_range(unsigned long addr
, unsigned long len
)
96 if (len
& ~HPAGE_MASK
)
98 if (addr
& ~HPAGE_MASK
)
103 int copy_hugetlb_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
104 struct vm_area_struct
*vma
)
106 pte_t
*src_pte
, *dst_pte
, entry
;
107 struct page
*ptepage
;
108 unsigned long addr
= vma
->vm_start
;
109 unsigned long end
= vma
->vm_end
;
113 dst_pte
= huge_pte_alloc(dst
, addr
);
116 src_pte
= huge_pte_offset(src
, addr
);
117 BUG_ON(!src_pte
|| pte_none(*src_pte
));
119 ptepage
= pte_page(entry
);
121 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
122 set_pte_at(dst
, addr
, dst_pte
, entry
);
123 pte_val(entry
) += PAGE_SIZE
;
127 add_mm_counter(dst
, rss
, HPAGE_SIZE
/ PAGE_SIZE
);
135 int follow_hugetlb_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
136 struct page
**pages
, struct vm_area_struct
**vmas
,
137 unsigned long *position
, int *length
, int i
)
139 unsigned long vaddr
= *position
;
140 int remainder
= *length
;
142 WARN_ON(!is_vm_hugetlb_page(vma
));
144 while (vaddr
< vma
->vm_end
&& remainder
) {
149 pte
= huge_pte_offset(mm
, vaddr
);
151 /* hugetlb should be locked, and hence, prefaulted */
152 BUG_ON(!pte
|| pte_none(*pte
));
154 page
= pte_page(*pte
);
156 WARN_ON(!PageCompound(page
));
176 struct page
*follow_huge_addr(struct mm_struct
*mm
,
177 unsigned long address
, int write
)
179 return ERR_PTR(-EINVAL
);
182 int pmd_huge(pmd_t pmd
)
187 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
188 pmd_t
*pmd
, int write
)
193 void unmap_hugepage_range(struct vm_area_struct
*vma
,
194 unsigned long start
, unsigned long end
)
196 struct mm_struct
*mm
= vma
->vm_mm
;
197 unsigned long address
;
202 BUG_ON(start
& (HPAGE_SIZE
- 1));
203 BUG_ON(end
& (HPAGE_SIZE
- 1));
205 for (address
= start
; address
< end
; address
+= HPAGE_SIZE
) {
206 pte
= huge_pte_offset(mm
, address
);
210 page
= pte_page(*pte
);
212 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
213 pte_clear(mm
, address
+(i
*PAGE_SIZE
), pte
);
217 add_mm_counter(mm
, rss
, -((end
- start
) >> PAGE_SHIFT
));
218 flush_tlb_range(vma
, start
, end
);
221 static void context_reload(void *__data
)
223 struct mm_struct
*mm
= __data
;
225 if (mm
== current
->mm
)
226 load_secondary_context(mm
);
229 int hugetlb_prefault(struct address_space
*mapping
, struct vm_area_struct
*vma
)
231 struct mm_struct
*mm
= current
->mm
;
235 /* On UltraSPARC-III+ and later, configure the second half of
236 * the Data-TLB for huge pages.
238 if (tlb_type
== cheetah_plus
) {
241 spin_lock(&ctx_alloc_lock
);
242 ctx
= mm
->context
.sparc64_ctx_val
;
243 ctx
&= ~CTX_PGSZ_MASK
;
244 ctx
|= CTX_PGSZ_BASE
<< CTX_PGSZ0_SHIFT
;
245 ctx
|= CTX_PGSZ_HUGE
<< CTX_PGSZ1_SHIFT
;
247 if (ctx
!= mm
->context
.sparc64_ctx_val
) {
248 /* When changing the page size fields, we
249 * must perform a context flush so that no
250 * stale entries match. This flush must
251 * occur with the original context register
256 /* Reload the context register of all processors
257 * also executing in this address space.
259 mm
->context
.sparc64_ctx_val
= ctx
;
260 on_each_cpu(context_reload
, mm
, 0, 0);
262 spin_unlock(&ctx_alloc_lock
);
265 BUG_ON(vma
->vm_start
& ~HPAGE_MASK
);
266 BUG_ON(vma
->vm_end
& ~HPAGE_MASK
);
268 spin_lock(&mm
->page_table_lock
);
269 for (addr
= vma
->vm_start
; addr
< vma
->vm_end
; addr
+= HPAGE_SIZE
) {
271 pte_t
*pte
= huge_pte_alloc(mm
, addr
);
281 idx
= ((addr
- vma
->vm_start
) >> HPAGE_SHIFT
)
282 + (vma
->vm_pgoff
>> (HPAGE_SHIFT
- PAGE_SHIFT
));
283 page
= find_get_page(mapping
, idx
);
285 /* charge the fs quota first */
286 if (hugetlb_get_quota(mapping
)) {
290 page
= alloc_huge_page();
292 hugetlb_put_quota(mapping
);
296 ret
= add_to_page_cache(page
, mapping
, idx
, GFP_ATOMIC
);
300 hugetlb_put_quota(mapping
);
301 free_huge_page(page
);
305 set_huge_pte(mm
, vma
, addr
, page
, pte
, vma
->vm_flags
& VM_WRITE
);
308 spin_unlock(&mm
->page_table_lock
);