1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/mm/hugetlbpage.c
5 * Copyright (C) 2013 Linaro Ltd.
7 * Based on arch/x86/mm/hugetlbpage.c.
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
22 * HugeTLB Support Matrix
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
39 void __init
arm64_hugetlb_cma_reserve(void)
43 if (pud_sect_supported())
44 order
= PUD_SHIFT
- PAGE_SHIFT
;
46 order
= CONT_PMD_SHIFT
- PAGE_SHIFT
;
48 hugetlb_cma_reserve(order
);
50 #endif /* CONFIG_CMA */
52 static bool __hugetlb_valid_size(unsigned long size
)
55 #ifndef __PAGETABLE_PMD_FOLDED
57 return pud_sect_supported();
68 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
69 bool arch_hugetlb_migration_supported(struct hstate
*h
)
71 size_t pagesize
= huge_page_size(h
);
73 if (!__hugetlb_valid_size(pagesize
)) {
74 pr_warn("%s: unrecognized huge page size 0x%lx\n",
82 static int find_num_contig(struct mm_struct
*mm
, unsigned long addr
,
83 pte_t
*ptep
, size_t *pgsize
)
85 pgd_t
*pgdp
= pgd_offset(mm
, addr
);
91 p4dp
= p4d_offset(pgdp
, addr
);
92 pudp
= pud_offset(p4dp
, addr
);
93 pmdp
= pmd_offset(pudp
, addr
);
94 if ((pte_t
*)pmdp
== ptep
) {
101 static inline int num_contig_ptes(unsigned long size
, size_t *pgsize
)
108 #ifndef __PAGETABLE_PMD_FOLDED
110 if (pud_sect_supported())
119 contig_ptes
= CONT_PMDS
;
123 contig_ptes
= CONT_PTES
;
130 pte_t
huge_ptep_get(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
134 pte_t orig_pte
= __ptep_get(ptep
);
136 if (!pte_present(orig_pte
) || !pte_cont(orig_pte
))
139 ncontig
= num_contig_ptes(page_size(pte_page(orig_pte
)), &pgsize
);
140 for (i
= 0; i
< ncontig
; i
++, ptep
++) {
141 pte_t pte
= __ptep_get(ptep
);
144 orig_pte
= pte_mkdirty(orig_pte
);
147 orig_pte
= pte_mkyoung(orig_pte
);
153 * Changing some bits of contiguous entries requires us to follow a
154 * Break-Before-Make approach, breaking the whole contiguous set
155 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
156 * "Misprogramming of the Contiguous bit", page D4-1762.
158 * This helper performs the break step.
160 static pte_t
get_clear_contig(struct mm_struct
*mm
,
163 unsigned long pgsize
,
164 unsigned long ncontig
)
166 pte_t orig_pte
= __ptep_get(ptep
);
169 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++) {
170 pte_t pte
= __ptep_get_and_clear(mm
, addr
, ptep
);
173 * If HW_AFDBM is enabled, then the HW could turn on
174 * the dirty or accessed bit for any page in the set,
178 orig_pte
= pte_mkdirty(orig_pte
);
181 orig_pte
= pte_mkyoung(orig_pte
);
186 static pte_t
get_clear_contig_flush(struct mm_struct
*mm
,
189 unsigned long pgsize
,
190 unsigned long ncontig
)
192 pte_t orig_pte
= get_clear_contig(mm
, addr
, ptep
, pgsize
, ncontig
);
193 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
195 flush_tlb_range(&vma
, addr
, addr
+ (pgsize
* ncontig
));
200 * Changing some bits of contiguous entries requires us to follow a
201 * Break-Before-Make approach, breaking the whole contiguous set
202 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
203 * "Misprogramming of the Contiguous bit", page D4-1762.
205 * This helper performs the break step for use cases where the
206 * original pte is not needed.
208 static void clear_flush(struct mm_struct
*mm
,
211 unsigned long pgsize
,
212 unsigned long ncontig
)
214 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
215 unsigned long i
, saddr
= addr
;
217 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++)
218 __ptep_get_and_clear(mm
, addr
, ptep
);
220 flush_tlb_range(&vma
, saddr
, addr
);
223 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
224 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
229 unsigned long pfn
, dpfn
;
232 ncontig
= num_contig_ptes(sz
, &pgsize
);
234 if (!pte_present(pte
)) {
235 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
)
236 __set_ptes(mm
, addr
, ptep
, pte
, 1);
240 if (!pte_cont(pte
)) {
241 __set_ptes(mm
, addr
, ptep
, pte
, 1);
246 dpfn
= pgsize
>> PAGE_SHIFT
;
247 hugeprot
= pte_pgprot(pte
);
249 clear_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
251 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
252 __set_ptes(mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
), 1);
255 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
256 unsigned long addr
, unsigned long sz
)
264 pgdp
= pgd_offset(mm
, addr
);
265 p4dp
= p4d_alloc(mm
, pgdp
, addr
);
269 pudp
= pud_alloc(mm
, p4dp
, addr
);
273 if (sz
== PUD_SIZE
) {
274 ptep
= (pte_t
*)pudp
;
275 } else if (sz
== (CONT_PTE_SIZE
)) {
276 pmdp
= pmd_alloc(mm
, pudp
, addr
);
280 WARN_ON(addr
& (sz
- 1));
281 ptep
= pte_alloc_huge(mm
, pmdp
, addr
);
282 } else if (sz
== PMD_SIZE
) {
283 if (want_pmd_share(vma
, addr
) && pud_none(READ_ONCE(*pudp
)))
284 ptep
= huge_pmd_share(mm
, vma
, addr
, pudp
);
286 ptep
= (pte_t
*)pmd_alloc(mm
, pudp
, addr
);
287 } else if (sz
== (CONT_PMD_SIZE
)) {
288 pmdp
= pmd_alloc(mm
, pudp
, addr
);
289 WARN_ON(addr
& (sz
- 1));
290 return (pte_t
*)pmdp
;
296 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
297 unsigned long addr
, unsigned long sz
)
304 pgdp
= pgd_offset(mm
, addr
);
305 if (!pgd_present(READ_ONCE(*pgdp
)))
308 p4dp
= p4d_offset(pgdp
, addr
);
309 if (!p4d_present(READ_ONCE(*p4dp
)))
312 pudp
= pud_offset(p4dp
, addr
);
313 pud
= READ_ONCE(*pudp
);
314 if (sz
!= PUD_SIZE
&& pud_none(pud
))
316 /* hugepage or swap? */
317 if (pud_leaf(pud
) || !pud_present(pud
))
318 return (pte_t
*)pudp
;
319 /* table; check the next level */
321 if (sz
== CONT_PMD_SIZE
)
322 addr
&= CONT_PMD_MASK
;
324 pmdp
= pmd_offset(pudp
, addr
);
325 pmd
= READ_ONCE(*pmdp
);
326 if (!(sz
== PMD_SIZE
|| sz
== CONT_PMD_SIZE
) &&
329 if (pmd_leaf(pmd
) || !pmd_present(pmd
))
330 return (pte_t
*)pmdp
;
332 if (sz
== CONT_PTE_SIZE
)
333 return pte_offset_huge(pmdp
, (addr
& CONT_PTE_MASK
));
338 unsigned long hugetlb_mask_last_page(struct hstate
*h
)
340 unsigned long hp_size
= huge_page_size(h
);
343 #ifndef __PAGETABLE_PMD_FOLDED
345 return PGDIR_SIZE
- PUD_SIZE
;
348 return PUD_SIZE
- CONT_PMD_SIZE
;
350 return PUD_SIZE
- PMD_SIZE
;
352 return PMD_SIZE
- CONT_PTE_SIZE
;
360 pte_t
arch_make_huge_pte(pte_t entry
, unsigned int shift
, vm_flags_t flags
)
362 size_t pagesize
= 1UL << shift
;
364 entry
= pte_mkhuge(entry
);
365 if (pagesize
== CONT_PTE_SIZE
) {
366 entry
= pte_mkcont(entry
);
367 } else if (pagesize
== CONT_PMD_SIZE
) {
368 entry
= pmd_pte(pmd_mkcont(pte_pmd(entry
)));
369 } else if (pagesize
!= PUD_SIZE
&& pagesize
!= PMD_SIZE
) {
370 pr_warn("%s: unrecognized huge page size 0x%lx\n",
376 void huge_pte_clear(struct mm_struct
*mm
, unsigned long addr
,
377 pte_t
*ptep
, unsigned long sz
)
382 ncontig
= num_contig_ptes(sz
, &pgsize
);
384 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++)
385 __pte_clear(mm
, addr
, ptep
);
388 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
389 unsigned long addr
, pte_t
*ptep
)
393 pte_t orig_pte
= __ptep_get(ptep
);
395 if (!pte_cont(orig_pte
))
396 return __ptep_get_and_clear(mm
, addr
, ptep
);
398 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
400 return get_clear_contig(mm
, addr
, ptep
, pgsize
, ncontig
);
404 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
405 * and write permission.
407 * For a contiguous huge pte range we need to check whether or not write
408 * permission has to change only on the first pte in the set. Then for
409 * all the contiguous ptes we need to check whether or not there is a
410 * discrepancy between dirty or young.
412 static int __cont_access_flags_changed(pte_t
*ptep
, pte_t pte
, int ncontig
)
416 if (pte_write(pte
) != pte_write(__ptep_get(ptep
)))
419 for (i
= 0; i
< ncontig
; i
++) {
420 pte_t orig_pte
= __ptep_get(ptep
+ i
);
422 if (pte_dirty(pte
) != pte_dirty(orig_pte
))
425 if (pte_young(pte
) != pte_young(orig_pte
))
432 int huge_ptep_set_access_flags(struct vm_area_struct
*vma
,
433 unsigned long addr
, pte_t
*ptep
,
434 pte_t pte
, int dirty
)
438 unsigned long pfn
= pte_pfn(pte
), dpfn
;
439 struct mm_struct
*mm
= vma
->vm_mm
;
444 return __ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
446 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
447 dpfn
= pgsize
>> PAGE_SHIFT
;
449 if (!__cont_access_flags_changed(ptep
, pte
, ncontig
))
452 orig_pte
= get_clear_contig_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
454 /* Make sure we don't lose the dirty or young state */
455 if (pte_dirty(orig_pte
))
456 pte
= pte_mkdirty(pte
);
458 if (pte_young(orig_pte
))
459 pte
= pte_mkyoung(pte
);
461 hugeprot
= pte_pgprot(pte
);
462 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
463 __set_ptes(mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
), 1);
468 void huge_ptep_set_wrprotect(struct mm_struct
*mm
,
469 unsigned long addr
, pte_t
*ptep
)
471 unsigned long pfn
, dpfn
;
477 if (!pte_cont(__ptep_get(ptep
))) {
478 __ptep_set_wrprotect(mm
, addr
, ptep
);
482 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
483 dpfn
= pgsize
>> PAGE_SHIFT
;
485 pte
= get_clear_contig_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
486 pte
= pte_wrprotect(pte
);
488 hugeprot
= pte_pgprot(pte
);
491 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
492 __set_ptes(mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
), 1);
495 pte_t
huge_ptep_clear_flush(struct vm_area_struct
*vma
,
496 unsigned long addr
, pte_t
*ptep
)
498 struct mm_struct
*mm
= vma
->vm_mm
;
502 if (!pte_cont(__ptep_get(ptep
)))
503 return ptep_clear_flush(vma
, addr
, ptep
);
505 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
506 return get_clear_contig_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
509 static int __init
hugetlbpage_init(void)
511 if (pud_sect_supported())
512 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
514 hugetlb_add_hstate(CONT_PMD_SHIFT
- PAGE_SHIFT
);
515 hugetlb_add_hstate(PMD_SHIFT
- PAGE_SHIFT
);
516 hugetlb_add_hstate(CONT_PTE_SHIFT
- PAGE_SHIFT
);
520 arch_initcall(hugetlbpage_init
);
522 bool __init
arch_hugetlb_valid_size(unsigned long size
)
524 return __hugetlb_valid_size(size
);
527 pte_t
huge_ptep_modify_prot_start(struct vm_area_struct
*vma
, unsigned long addr
, pte_t
*ptep
)
529 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198
)) {
531 * Break-before-make (BBM) is required for all user space mappings
532 * when the permission changes from executable to non-executable
533 * in cases where cpu is affected with errata #2645198.
535 if (pte_user_exec(__ptep_get(ptep
)))
536 return huge_ptep_clear_flush(vma
, addr
, ptep
);
538 return huge_ptep_get_and_clear(vma
->vm_mm
, addr
, ptep
);
541 void huge_ptep_modify_prot_commit(struct vm_area_struct
*vma
, unsigned long addr
, pte_t
*ptep
,
542 pte_t old_pte
, pte_t pte
)
544 unsigned long psize
= huge_page_size(hstate_vma(vma
));
546 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, pte
, psize
);