1 // SPDX-License-Identifier: GPL-2.0
3 * SPARC64 Huge TLB page support.
5 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
16 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
23 static pte_t
sun4u_hugepage_shift_to_tte(pte_t entry
, unsigned int shift
)
28 static pte_t
sun4v_hugepage_shift_to_tte(pte_t entry
, unsigned int shift
)
30 unsigned long hugepage_size
= _PAGE_SZ4MB_4V
;
32 pte_val(entry
) = pte_val(entry
) & ~_PAGE_SZALL_4V
;
35 case HPAGE_16GB_SHIFT
:
36 hugepage_size
= _PAGE_SZ16GB_4V
;
37 pte_val(entry
) |= _PAGE_PUD_HUGE
;
40 hugepage_size
= _PAGE_SZ2GB_4V
;
41 pte_val(entry
) |= _PAGE_PMD_HUGE
;
43 case HPAGE_256MB_SHIFT
:
44 hugepage_size
= _PAGE_SZ256MB_4V
;
45 pte_val(entry
) |= _PAGE_PMD_HUGE
;
48 pte_val(entry
) |= _PAGE_PMD_HUGE
;
51 hugepage_size
= _PAGE_SZ64K_4V
;
54 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift
);
57 pte_val(entry
) = pte_val(entry
) | hugepage_size
;
61 static pte_t
hugepage_shift_to_tte(pte_t entry
, unsigned int shift
)
63 if (tlb_type
== hypervisor
)
64 return sun4v_hugepage_shift_to_tte(entry
, shift
);
66 return sun4u_hugepage_shift_to_tte(entry
, shift
);
69 pte_t
arch_make_huge_pte(pte_t entry
, unsigned int shift
, vm_flags_t flags
)
73 entry
= pte_mkhuge(entry
);
74 pte
= hugepage_shift_to_tte(entry
, shift
);
77 /* If this vma has ADI enabled on it, turn on TTE.mcd
79 if (flags
& VM_SPARC_ADI
)
80 return pte_mkmcd(pte
);
82 return pte_mknotmcd(pte
);
88 static unsigned int sun4v_huge_tte_to_shift(pte_t entry
)
90 unsigned long tte_szbits
= pte_val(entry
) & _PAGE_SZALL_4V
;
95 shift
= HPAGE_16GB_SHIFT
;
98 shift
= HPAGE_2GB_SHIFT
;
100 case _PAGE_SZ256MB_4V
:
101 shift
= HPAGE_256MB_SHIFT
;
104 shift
= REAL_HPAGE_SHIFT
;
107 shift
= HPAGE_64K_SHIFT
;
116 static unsigned int sun4u_huge_tte_to_shift(pte_t entry
)
118 unsigned long tte_szbits
= pte_val(entry
) & _PAGE_SZALL_4U
;
121 switch (tte_szbits
) {
122 case _PAGE_SZ256MB_4U
:
123 shift
= HPAGE_256MB_SHIFT
;
126 shift
= REAL_HPAGE_SHIFT
;
129 shift
= HPAGE_64K_SHIFT
;
138 static unsigned long tte_to_shift(pte_t entry
)
140 if (tlb_type
== hypervisor
)
141 return sun4v_huge_tte_to_shift(entry
);
143 return sun4u_huge_tte_to_shift(entry
);
146 static unsigned int huge_tte_to_shift(pte_t entry
)
148 unsigned long shift
= tte_to_shift(entry
);
150 if (shift
== PAGE_SHIFT
)
151 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
157 static unsigned long huge_tte_to_size(pte_t pte
)
159 unsigned long size
= 1UL << huge_tte_to_shift(pte
);
161 if (size
== REAL_HPAGE_SIZE
)
166 unsigned long pud_leaf_size(pud_t pud
) { return 1UL << tte_to_shift(*(pte_t
*)&pud
); }
167 unsigned long pmd_leaf_size(pmd_t pmd
) { return 1UL << tte_to_shift(*(pte_t
*)&pmd
); }
168 unsigned long pte_leaf_size(pte_t pte
) { return 1UL << tte_to_shift(pte
); }
170 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
171 unsigned long addr
, unsigned long sz
)
178 pgd
= pgd_offset(mm
, addr
);
179 p4d
= p4d_offset(pgd
, addr
);
180 pud
= pud_alloc(mm
, p4d
, addr
);
185 pmd
= pmd_alloc(mm
, pud
, addr
);
190 return pte_alloc_huge(mm
, pmd
, addr
);
193 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
194 unsigned long addr
, unsigned long sz
)
201 pgd
= pgd_offset(mm
, addr
);
204 p4d
= p4d_offset(pgd
, addr
);
207 pud
= pud_offset(p4d
, addr
);
210 if (is_hugetlb_pud(*pud
))
212 pmd
= pmd_offset(pud
, addr
);
215 if (is_hugetlb_pmd(*pmd
))
217 return pte_offset_huge(pmd
, addr
);
220 void __set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
221 pte_t
*ptep
, pte_t entry
)
223 unsigned int nptes
, orig_shift
, shift
;
224 unsigned long i
, size
;
227 size
= huge_tte_to_size(entry
);
230 if (size
>= PUD_SIZE
)
232 else if (size
>= PMD_SIZE
)
237 nptes
= size
>> shift
;
239 if (!pte_present(*ptep
) && pte_present(entry
))
240 mm
->context
.hugetlb_pte_count
+= nptes
;
244 orig_shift
= pte_none(orig
) ? PAGE_SHIFT
: huge_tte_to_shift(orig
);
246 for (i
= 0; i
< nptes
; i
++)
247 ptep
[i
] = __pte(pte_val(entry
) + (i
<< shift
));
249 maybe_tlb_batch_add(mm
, addr
, ptep
, orig
, 0, orig_shift
);
250 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
251 if (size
== HPAGE_SIZE
)
252 maybe_tlb_batch_add(mm
, addr
+ REAL_HPAGE_SIZE
, ptep
, orig
, 0,
256 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
257 pte_t
*ptep
, pte_t entry
, unsigned long sz
)
259 __set_huge_pte_at(mm
, addr
, ptep
, entry
);
262 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
265 unsigned int i
, nptes
, orig_shift
, shift
;
270 size
= huge_tte_to_size(entry
);
273 if (size
>= PUD_SIZE
)
275 else if (size
>= PMD_SIZE
)
280 nptes
= size
>> shift
;
281 orig_shift
= pte_none(entry
) ? PAGE_SHIFT
: huge_tte_to_shift(entry
);
283 if (pte_present(entry
))
284 mm
->context
.hugetlb_pte_count
-= nptes
;
287 for (i
= 0; i
< nptes
; i
++)
288 ptep
[i
] = __pte(0UL);
290 maybe_tlb_batch_add(mm
, addr
, ptep
, entry
, 0, orig_shift
);
291 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
292 if (size
== HPAGE_SIZE
)
293 maybe_tlb_batch_add(mm
, addr
+ REAL_HPAGE_SIZE
, ptep
, entry
, 0,
299 static void hugetlb_free_pte_range(struct mmu_gather
*tlb
, pmd_t
*pmd
,
302 pgtable_t token
= pmd_pgtable(*pmd
);
305 pte_free_tlb(tlb
, token
, addr
);
306 mm_dec_nr_ptes(tlb
->mm
);
309 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
310 unsigned long addr
, unsigned long end
,
311 unsigned long floor
, unsigned long ceiling
)
318 pmd
= pmd_offset(pud
, addr
);
320 next
= pmd_addr_end(addr
, end
);
323 if (is_hugetlb_pmd(*pmd
))
326 hugetlb_free_pte_range(tlb
, pmd
, addr
);
327 } while (pmd
++, addr
= next
, addr
!= end
);
337 if (end
- 1 > ceiling
- 1)
340 pmd
= pmd_offset(pud
, start
);
342 pmd_free_tlb(tlb
, pmd
, start
);
343 mm_dec_nr_pmds(tlb
->mm
);
346 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, p4d_t
*p4d
,
347 unsigned long addr
, unsigned long end
,
348 unsigned long floor
, unsigned long ceiling
)
355 pud
= pud_offset(p4d
, addr
);
357 next
= pud_addr_end(addr
, end
);
358 if (pud_none_or_clear_bad(pud
))
360 if (is_hugetlb_pud(*pud
))
363 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
365 } while (pud
++, addr
= next
, addr
!= end
);
371 ceiling
&= PGDIR_MASK
;
375 if (end
- 1 > ceiling
- 1)
378 pud
= pud_offset(p4d
, start
);
380 pud_free_tlb(tlb
, pud
, start
);
381 mm_dec_nr_puds(tlb
->mm
);
384 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
385 unsigned long addr
, unsigned long end
,
386 unsigned long floor
, unsigned long ceiling
)
403 if (end
- 1 > ceiling
- 1)
408 pgd
= pgd_offset(tlb
->mm
, addr
);
409 p4d
= p4d_offset(pgd
, addr
);
411 next
= p4d_addr_end(addr
, end
);
412 if (p4d_none_or_clear_bad(p4d
))
414 hugetlb_free_pud_range(tlb
, p4d
, addr
, next
, floor
, ceiling
);
415 } while (p4d
++, addr
= next
, addr
!= end
);