1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/mm/hugetlbpage.c
5 * Copyright (C) 2013 Linaro Ltd.
7 * Based on arch/x86/mm/hugetlbpage.c.
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
22 * HugeTLB Support Matrix
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
39 void __init
arm64_hugetlb_cma_reserve(void)
43 #ifdef CONFIG_ARM64_4K_PAGES
44 order
= PUD_SHIFT
- PAGE_SHIFT
;
46 order
= CONT_PMD_SHIFT
+ PMD_SHIFT
- PAGE_SHIFT
;
49 * HugeTLB CMA reservation is required for gigantic
50 * huge pages which could not be allocated via the
51 * page allocator. Just warn if there is any change
52 * breaking this assumption.
54 WARN_ON(order
<= MAX_ORDER
);
55 hugetlb_cma_reserve(order
);
57 #endif /* CONFIG_CMA */
59 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
60 bool arch_hugetlb_migration_supported(struct hstate
*h
)
62 size_t pagesize
= huge_page_size(h
);
65 #ifdef CONFIG_ARM64_4K_PAGES
73 pr_warn("%s: unrecognized huge page size 0x%lx\n",
79 int pmd_huge(pmd_t pmd
)
81 return pmd_val(pmd
) && !(pmd_val(pmd
) & PMD_TABLE_BIT
);
84 int pud_huge(pud_t pud
)
86 #ifndef __PAGETABLE_PMD_FOLDED
87 return pud_val(pud
) && !(pud_val(pud
) & PUD_TABLE_BIT
);
94 * Select all bits except the pfn
96 static inline pgprot_t
pte_pgprot(pte_t pte
)
98 unsigned long pfn
= pte_pfn(pte
);
100 return __pgprot(pte_val(pfn_pte(pfn
, __pgprot(0))) ^ pte_val(pte
));
103 static int find_num_contig(struct mm_struct
*mm
, unsigned long addr
,
104 pte_t
*ptep
, size_t *pgsize
)
106 pgd_t
*pgdp
= pgd_offset(mm
, addr
);
112 p4dp
= p4d_offset(pgdp
, addr
);
113 pudp
= pud_offset(p4dp
, addr
);
114 pmdp
= pmd_offset(pudp
, addr
);
115 if ((pte_t
*)pmdp
== ptep
) {
122 static inline int num_contig_ptes(unsigned long size
, size_t *pgsize
)
129 #ifdef CONFIG_ARM64_4K_PAGES
137 contig_ptes
= CONT_PMDS
;
141 contig_ptes
= CONT_PTES
;
149 * Changing some bits of contiguous entries requires us to follow a
150 * Break-Before-Make approach, breaking the whole contiguous set
151 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
152 * "Misprogramming of the Contiguous bit", page D4-1762.
154 * This helper performs the break step.
156 static pte_t
get_clear_flush(struct mm_struct
*mm
,
159 unsigned long pgsize
,
160 unsigned long ncontig
)
162 pte_t orig_pte
= huge_ptep_get(ptep
);
163 bool valid
= pte_valid(orig_pte
);
164 unsigned long i
, saddr
= addr
;
166 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++) {
167 pte_t pte
= ptep_get_and_clear(mm
, addr
, ptep
);
170 * If HW_AFDBM is enabled, then the HW could turn on
171 * the dirty or accessed bit for any page in the set,
175 orig_pte
= pte_mkdirty(orig_pte
);
178 orig_pte
= pte_mkyoung(orig_pte
);
182 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
183 flush_tlb_range(&vma
, saddr
, addr
);
189 * Changing some bits of contiguous entries requires us to follow a
190 * Break-Before-Make approach, breaking the whole contiguous set
191 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
192 * "Misprogramming of the Contiguous bit", page D4-1762.
194 * This helper performs the break step for use cases where the
195 * original pte is not needed.
197 static void clear_flush(struct mm_struct
*mm
,
200 unsigned long pgsize
,
201 unsigned long ncontig
)
203 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
204 unsigned long i
, saddr
= addr
;
206 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++)
207 pte_clear(mm
, addr
, ptep
);
209 flush_tlb_range(&vma
, saddr
, addr
);
212 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
213 pte_t
*ptep
, pte_t pte
)
218 unsigned long pfn
, dpfn
;
222 * Code needs to be expanded to handle huge swap and migration
223 * entries. Needed for HUGETLB and MEMORY_FAILURE.
225 WARN_ON(!pte_present(pte
));
227 if (!pte_cont(pte
)) {
228 set_pte_at(mm
, addr
, ptep
, pte
);
232 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
234 dpfn
= pgsize
>> PAGE_SHIFT
;
235 hugeprot
= pte_pgprot(pte
);
237 clear_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
239 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
240 set_pte_at(mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
));
243 void set_huge_swap_pte_at(struct mm_struct
*mm
, unsigned long addr
,
244 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
249 ncontig
= num_contig_ptes(sz
, &pgsize
);
251 for (i
= 0; i
< ncontig
; i
++, ptep
++)
255 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
256 unsigned long addr
, unsigned long sz
)
264 pgdp
= pgd_offset(mm
, addr
);
265 p4dp
= p4d_offset(pgdp
, addr
);
266 pudp
= pud_alloc(mm
, p4dp
, addr
);
270 if (sz
== PUD_SIZE
) {
271 ptep
= (pte_t
*)pudp
;
272 } else if (sz
== (CONT_PTE_SIZE
)) {
273 pmdp
= pmd_alloc(mm
, pudp
, addr
);
277 WARN_ON(addr
& (sz
- 1));
279 * Note that if this code were ever ported to the
280 * 32-bit arm platform then it will cause trouble in
281 * the case where CONFIG_HIGHPTE is set, since there
282 * will be no pte_unmap() to correspond with this
285 ptep
= pte_alloc_map(mm
, pmdp
, addr
);
286 } else if (sz
== PMD_SIZE
) {
287 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE
) &&
288 pud_none(READ_ONCE(*pudp
)))
289 ptep
= huge_pmd_share(mm
, addr
, pudp
);
291 ptep
= (pte_t
*)pmd_alloc(mm
, pudp
, addr
);
292 } else if (sz
== (CONT_PMD_SIZE
)) {
293 pmdp
= pmd_alloc(mm
, pudp
, addr
);
294 WARN_ON(addr
& (sz
- 1));
295 return (pte_t
*)pmdp
;
301 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
302 unsigned long addr
, unsigned long sz
)
309 pgdp
= pgd_offset(mm
, addr
);
310 if (!pgd_present(READ_ONCE(*pgdp
)))
313 p4dp
= p4d_offset(pgdp
, addr
);
314 if (!p4d_present(READ_ONCE(*p4dp
)))
317 pudp
= pud_offset(p4dp
, addr
);
318 pud
= READ_ONCE(*pudp
);
319 if (sz
!= PUD_SIZE
&& pud_none(pud
))
321 /* hugepage or swap? */
322 if (pud_huge(pud
) || !pud_present(pud
))
323 return (pte_t
*)pudp
;
324 /* table; check the next level */
326 if (sz
== CONT_PMD_SIZE
)
327 addr
&= CONT_PMD_MASK
;
329 pmdp
= pmd_offset(pudp
, addr
);
330 pmd
= READ_ONCE(*pmdp
);
331 if (!(sz
== PMD_SIZE
|| sz
== CONT_PMD_SIZE
) &&
334 if (pmd_huge(pmd
) || !pmd_present(pmd
))
335 return (pte_t
*)pmdp
;
337 if (sz
== CONT_PTE_SIZE
)
338 return pte_offset_kernel(pmdp
, (addr
& CONT_PTE_MASK
));
343 pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
344 struct page
*page
, int writable
)
346 size_t pagesize
= huge_page_size(hstate_vma(vma
));
348 if (pagesize
== CONT_PTE_SIZE
) {
349 entry
= pte_mkcont(entry
);
350 } else if (pagesize
== CONT_PMD_SIZE
) {
351 entry
= pmd_pte(pmd_mkcont(pte_pmd(entry
)));
352 } else if (pagesize
!= PUD_SIZE
&& pagesize
!= PMD_SIZE
) {
353 pr_warn("%s: unrecognized huge page size 0x%lx\n",
359 void huge_pte_clear(struct mm_struct
*mm
, unsigned long addr
,
360 pte_t
*ptep
, unsigned long sz
)
365 ncontig
= num_contig_ptes(sz
, &pgsize
);
367 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++)
368 pte_clear(mm
, addr
, ptep
);
371 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
372 unsigned long addr
, pte_t
*ptep
)
376 pte_t orig_pte
= huge_ptep_get(ptep
);
378 if (!pte_cont(orig_pte
))
379 return ptep_get_and_clear(mm
, addr
, ptep
);
381 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
383 return get_clear_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
387 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
388 * and write permission.
390 * For a contiguous huge pte range we need to check whether or not write
391 * permission has to change only on the first pte in the set. Then for
392 * all the contiguous ptes we need to check whether or not there is a
393 * discrepancy between dirty or young.
395 static int __cont_access_flags_changed(pte_t
*ptep
, pte_t pte
, int ncontig
)
399 if (pte_write(pte
) != pte_write(huge_ptep_get(ptep
)))
402 for (i
= 0; i
< ncontig
; i
++) {
403 pte_t orig_pte
= huge_ptep_get(ptep
+ i
);
405 if (pte_dirty(pte
) != pte_dirty(orig_pte
))
408 if (pte_young(pte
) != pte_young(orig_pte
))
415 int huge_ptep_set_access_flags(struct vm_area_struct
*vma
,
416 unsigned long addr
, pte_t
*ptep
,
417 pte_t pte
, int dirty
)
421 unsigned long pfn
= pte_pfn(pte
), dpfn
;
426 return ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
428 ncontig
= find_num_contig(vma
->vm_mm
, addr
, ptep
, &pgsize
);
429 dpfn
= pgsize
>> PAGE_SHIFT
;
431 if (!__cont_access_flags_changed(ptep
, pte
, ncontig
))
434 orig_pte
= get_clear_flush(vma
->vm_mm
, addr
, ptep
, pgsize
, ncontig
);
436 /* Make sure we don't lose the dirty or young state */
437 if (pte_dirty(orig_pte
))
438 pte
= pte_mkdirty(pte
);
440 if (pte_young(orig_pte
))
441 pte
= pte_mkyoung(pte
);
443 hugeprot
= pte_pgprot(pte
);
444 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
445 set_pte_at(vma
->vm_mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
));
450 void huge_ptep_set_wrprotect(struct mm_struct
*mm
,
451 unsigned long addr
, pte_t
*ptep
)
453 unsigned long pfn
, dpfn
;
459 if (!pte_cont(READ_ONCE(*ptep
))) {
460 ptep_set_wrprotect(mm
, addr
, ptep
);
464 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
465 dpfn
= pgsize
>> PAGE_SHIFT
;
467 pte
= get_clear_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
468 pte
= pte_wrprotect(pte
);
470 hugeprot
= pte_pgprot(pte
);
473 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
474 set_pte_at(mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
));
477 void huge_ptep_clear_flush(struct vm_area_struct
*vma
,
478 unsigned long addr
, pte_t
*ptep
)
483 if (!pte_cont(READ_ONCE(*ptep
))) {
484 ptep_clear_flush(vma
, addr
, ptep
);
488 ncontig
= find_num_contig(vma
->vm_mm
, addr
, ptep
, &pgsize
);
489 clear_flush(vma
->vm_mm
, addr
, ptep
, pgsize
, ncontig
);
492 static int __init
hugetlbpage_init(void)
494 #ifdef CONFIG_ARM64_4K_PAGES
495 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
497 hugetlb_add_hstate(CONT_PMD_SHIFT
- PAGE_SHIFT
);
498 hugetlb_add_hstate(PMD_SHIFT
- PAGE_SHIFT
);
499 hugetlb_add_hstate(CONT_PTE_SHIFT
- PAGE_SHIFT
);
503 arch_initcall(hugetlbpage_init
);
505 bool __init
arch_hugetlb_valid_size(unsigned long size
)
508 #ifdef CONFIG_ARM64_4K_PAGES