1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/mm/hugetlbpage.c
5 * Copyright (C) 2013 Linaro Ltd.
7 * Based on arch/x86/mm/hugetlbpage.c.
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
20 #include <asm/pgalloc.h>
22 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
23 bool arch_hugetlb_migration_supported(struct hstate
*h
)
25 size_t pagesize
= huge_page_size(h
);
28 #ifdef CONFIG_ARM64_4K_PAGES
36 pr_warn("%s: unrecognized huge page size 0x%lx\n",
42 int pmd_huge(pmd_t pmd
)
44 return pmd_val(pmd
) && !(pmd_val(pmd
) & PMD_TABLE_BIT
);
47 int pud_huge(pud_t pud
)
49 #ifndef __PAGETABLE_PMD_FOLDED
50 return pud_val(pud
) && !(pud_val(pud
) & PUD_TABLE_BIT
);
57 * Select all bits except the pfn
59 static inline pgprot_t
pte_pgprot(pte_t pte
)
61 unsigned long pfn
= pte_pfn(pte
);
63 return __pgprot(pte_val(pfn_pte(pfn
, __pgprot(0))) ^ pte_val(pte
));
66 static int find_num_contig(struct mm_struct
*mm
, unsigned long addr
,
67 pte_t
*ptep
, size_t *pgsize
)
69 pgd_t
*pgdp
= pgd_offset(mm
, addr
);
74 pudp
= pud_offset(pgdp
, addr
);
75 pmdp
= pmd_offset(pudp
, addr
);
76 if ((pte_t
*)pmdp
== ptep
) {
83 static inline int num_contig_ptes(unsigned long size
, size_t *pgsize
)
90 #ifdef CONFIG_ARM64_4K_PAGES
98 contig_ptes
= CONT_PMDS
;
102 contig_ptes
= CONT_PTES
;
110 * Changing some bits of contiguous entries requires us to follow a
111 * Break-Before-Make approach, breaking the whole contiguous set
112 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
113 * "Misprogramming of the Contiguous bit", page D4-1762.
115 * This helper performs the break step.
117 static pte_t
get_clear_flush(struct mm_struct
*mm
,
120 unsigned long pgsize
,
121 unsigned long ncontig
)
123 pte_t orig_pte
= huge_ptep_get(ptep
);
124 bool valid
= pte_valid(orig_pte
);
125 unsigned long i
, saddr
= addr
;
127 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++) {
128 pte_t pte
= ptep_get_and_clear(mm
, addr
, ptep
);
131 * If HW_AFDBM is enabled, then the HW could turn on
132 * the dirty or accessed bit for any page in the set,
136 orig_pte
= pte_mkdirty(orig_pte
);
139 orig_pte
= pte_mkyoung(orig_pte
);
143 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
144 flush_tlb_range(&vma
, saddr
, addr
);
150 * Changing some bits of contiguous entries requires us to follow a
151 * Break-Before-Make approach, breaking the whole contiguous set
152 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
153 * "Misprogramming of the Contiguous bit", page D4-1762.
155 * This helper performs the break step for use cases where the
156 * original pte is not needed.
158 static void clear_flush(struct mm_struct
*mm
,
161 unsigned long pgsize
,
162 unsigned long ncontig
)
164 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
165 unsigned long i
, saddr
= addr
;
167 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++)
168 pte_clear(mm
, addr
, ptep
);
170 flush_tlb_range(&vma
, saddr
, addr
);
173 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
174 pte_t
*ptep
, pte_t pte
)
179 unsigned long pfn
, dpfn
;
183 * Code needs to be expanded to handle huge swap and migration
184 * entries. Needed for HUGETLB and MEMORY_FAILURE.
186 WARN_ON(!pte_present(pte
));
188 if (!pte_cont(pte
)) {
189 set_pte_at(mm
, addr
, ptep
, pte
);
193 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
195 dpfn
= pgsize
>> PAGE_SHIFT
;
196 hugeprot
= pte_pgprot(pte
);
198 clear_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
200 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
201 set_pte_at(mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
));
204 void set_huge_swap_pte_at(struct mm_struct
*mm
, unsigned long addr
,
205 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
210 ncontig
= num_contig_ptes(sz
, &pgsize
);
212 for (i
= 0; i
< ncontig
; i
++, ptep
++)
216 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
217 unsigned long addr
, unsigned long sz
)
224 pgdp
= pgd_offset(mm
, addr
);
225 pudp
= pud_alloc(mm
, pgdp
, addr
);
229 if (sz
== PUD_SIZE
) {
230 ptep
= (pte_t
*)pudp
;
231 } else if (sz
== (CONT_PTE_SIZE
)) {
232 pmdp
= pmd_alloc(mm
, pudp
, addr
);
236 WARN_ON(addr
& (sz
- 1));
238 * Note that if this code were ever ported to the
239 * 32-bit arm platform then it will cause trouble in
240 * the case where CONFIG_HIGHPTE is set, since there
241 * will be no pte_unmap() to correspond with this
244 ptep
= pte_alloc_map(mm
, pmdp
, addr
);
245 } else if (sz
== PMD_SIZE
) {
246 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE
) &&
247 pud_none(READ_ONCE(*pudp
)))
248 ptep
= huge_pmd_share(mm
, addr
, pudp
);
250 ptep
= (pte_t
*)pmd_alloc(mm
, pudp
, addr
);
251 } else if (sz
== (CONT_PMD_SIZE
)) {
252 pmdp
= pmd_alloc(mm
, pudp
, addr
);
253 WARN_ON(addr
& (sz
- 1));
254 return (pte_t
*)pmdp
;
260 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
261 unsigned long addr
, unsigned long sz
)
267 pgdp
= pgd_offset(mm
, addr
);
268 if (!pgd_present(READ_ONCE(*pgdp
)))
271 pudp
= pud_offset(pgdp
, addr
);
272 pud
= READ_ONCE(*pudp
);
273 if (sz
!= PUD_SIZE
&& pud_none(pud
))
275 /* hugepage or swap? */
276 if (pud_huge(pud
) || !pud_present(pud
))
277 return (pte_t
*)pudp
;
278 /* table; check the next level */
280 if (sz
== CONT_PMD_SIZE
)
281 addr
&= CONT_PMD_MASK
;
283 pmdp
= pmd_offset(pudp
, addr
);
284 pmd
= READ_ONCE(*pmdp
);
285 if (!(sz
== PMD_SIZE
|| sz
== CONT_PMD_SIZE
) &&
288 if (pmd_huge(pmd
) || !pmd_present(pmd
))
289 return (pte_t
*)pmdp
;
291 if (sz
== CONT_PTE_SIZE
)
292 return pte_offset_kernel(pmdp
, (addr
& CONT_PTE_MASK
));
297 pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
298 struct page
*page
, int writable
)
300 size_t pagesize
= huge_page_size(hstate_vma(vma
));
302 if (pagesize
== CONT_PTE_SIZE
) {
303 entry
= pte_mkcont(entry
);
304 } else if (pagesize
== CONT_PMD_SIZE
) {
305 entry
= pmd_pte(pmd_mkcont(pte_pmd(entry
)));
306 } else if (pagesize
!= PUD_SIZE
&& pagesize
!= PMD_SIZE
) {
307 pr_warn("%s: unrecognized huge page size 0x%lx\n",
313 void huge_pte_clear(struct mm_struct
*mm
, unsigned long addr
,
314 pte_t
*ptep
, unsigned long sz
)
319 ncontig
= num_contig_ptes(sz
, &pgsize
);
321 for (i
= 0; i
< ncontig
; i
++, addr
+= pgsize
, ptep
++)
322 pte_clear(mm
, addr
, ptep
);
325 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
326 unsigned long addr
, pte_t
*ptep
)
330 pte_t orig_pte
= huge_ptep_get(ptep
);
332 if (!pte_cont(orig_pte
))
333 return ptep_get_and_clear(mm
, addr
, ptep
);
335 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
337 return get_clear_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
341 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
342 * and write permission.
344 * For a contiguous huge pte range we need to check whether or not write
345 * permission has to change only on the first pte in the set. Then for
346 * all the contiguous ptes we need to check whether or not there is a
347 * discrepancy between dirty or young.
349 static int __cont_access_flags_changed(pte_t
*ptep
, pte_t pte
, int ncontig
)
353 if (pte_write(pte
) != pte_write(huge_ptep_get(ptep
)))
356 for (i
= 0; i
< ncontig
; i
++) {
357 pte_t orig_pte
= huge_ptep_get(ptep
+ i
);
359 if (pte_dirty(pte
) != pte_dirty(orig_pte
))
362 if (pte_young(pte
) != pte_young(orig_pte
))
369 int huge_ptep_set_access_flags(struct vm_area_struct
*vma
,
370 unsigned long addr
, pte_t
*ptep
,
371 pte_t pte
, int dirty
)
375 unsigned long pfn
= pte_pfn(pte
), dpfn
;
380 return ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
382 ncontig
= find_num_contig(vma
->vm_mm
, addr
, ptep
, &pgsize
);
383 dpfn
= pgsize
>> PAGE_SHIFT
;
385 if (!__cont_access_flags_changed(ptep
, pte
, ncontig
))
388 orig_pte
= get_clear_flush(vma
->vm_mm
, addr
, ptep
, pgsize
, ncontig
);
390 /* Make sure we don't lose the dirty or young state */
391 if (pte_dirty(orig_pte
))
392 pte
= pte_mkdirty(pte
);
394 if (pte_young(orig_pte
))
395 pte
= pte_mkyoung(pte
);
397 hugeprot
= pte_pgprot(pte
);
398 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
399 set_pte_at(vma
->vm_mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
));
404 void huge_ptep_set_wrprotect(struct mm_struct
*mm
,
405 unsigned long addr
, pte_t
*ptep
)
407 unsigned long pfn
, dpfn
;
413 if (!pte_cont(READ_ONCE(*ptep
))) {
414 ptep_set_wrprotect(mm
, addr
, ptep
);
418 ncontig
= find_num_contig(mm
, addr
, ptep
, &pgsize
);
419 dpfn
= pgsize
>> PAGE_SHIFT
;
421 pte
= get_clear_flush(mm
, addr
, ptep
, pgsize
, ncontig
);
422 pte
= pte_wrprotect(pte
);
424 hugeprot
= pte_pgprot(pte
);
427 for (i
= 0; i
< ncontig
; i
++, ptep
++, addr
+= pgsize
, pfn
+= dpfn
)
428 set_pte_at(mm
, addr
, ptep
, pfn_pte(pfn
, hugeprot
));
431 void huge_ptep_clear_flush(struct vm_area_struct
*vma
,
432 unsigned long addr
, pte_t
*ptep
)
437 if (!pte_cont(READ_ONCE(*ptep
))) {
438 ptep_clear_flush(vma
, addr
, ptep
);
442 ncontig
= find_num_contig(vma
->vm_mm
, addr
, ptep
, &pgsize
);
443 clear_flush(vma
->vm_mm
, addr
, ptep
, pgsize
, ncontig
);
446 static void __init
add_huge_page_size(unsigned long size
)
448 if (size_to_hstate(size
))
451 hugetlb_add_hstate(ilog2(size
) - PAGE_SHIFT
);
454 static int __init
hugetlbpage_init(void)
456 #ifdef CONFIG_ARM64_4K_PAGES
457 add_huge_page_size(PUD_SIZE
);
459 add_huge_page_size(CONT_PMD_SIZE
);
460 add_huge_page_size(PMD_SIZE
);
461 add_huge_page_size(CONT_PTE_SIZE
);
465 arch_initcall(hugetlbpage_init
);
467 static __init
int setup_hugepagesz(char *opt
)
469 unsigned long ps
= memparse(opt
, &opt
);
472 #ifdef CONFIG_ARM64_4K_PAGES
478 add_huge_page_size(ps
);
483 pr_err("hugepagesz: Unsupported page size %lu K\n", ps
>> 10);
486 __setup("hugepagesz=", setup_hugepagesz
);