Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / arch / arm64 / mm / hugetlbpage.c
blob0be3355e34997544aa43cd652c733a996b9e3ff6
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/mm/hugetlbpage.c
5 * Copyright (C) 2013 Linaro Ltd.
7 * Based on arch/x86/mm/hugetlbpage.c.
8 */
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/pgalloc.h>
22 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
23 bool arch_hugetlb_migration_supported(struct hstate *h)
25 size_t pagesize = huge_page_size(h);
27 switch (pagesize) {
28 #ifdef CONFIG_ARM64_4K_PAGES
29 case PUD_SIZE:
30 #endif
31 case PMD_SIZE:
32 case CONT_PMD_SIZE:
33 case CONT_PTE_SIZE:
34 return true;
36 pr_warn("%s: unrecognized huge page size 0x%lx\n",
37 __func__, pagesize);
38 return false;
40 #endif
42 int pmd_huge(pmd_t pmd)
44 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
47 int pud_huge(pud_t pud)
49 #ifndef __PAGETABLE_PMD_FOLDED
50 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
51 #else
52 return 0;
53 #endif
57 * Select all bits except the pfn
59 static inline pgprot_t pte_pgprot(pte_t pte)
61 unsigned long pfn = pte_pfn(pte);
63 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
66 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
67 pte_t *ptep, size_t *pgsize)
69 pgd_t *pgdp = pgd_offset(mm, addr);
70 pud_t *pudp;
71 pmd_t *pmdp;
73 *pgsize = PAGE_SIZE;
74 pudp = pud_offset(pgdp, addr);
75 pmdp = pmd_offset(pudp, addr);
76 if ((pte_t *)pmdp == ptep) {
77 *pgsize = PMD_SIZE;
78 return CONT_PMDS;
80 return CONT_PTES;
83 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
85 int contig_ptes = 0;
87 *pgsize = size;
89 switch (size) {
90 #ifdef CONFIG_ARM64_4K_PAGES
91 case PUD_SIZE:
92 #endif
93 case PMD_SIZE:
94 contig_ptes = 1;
95 break;
96 case CONT_PMD_SIZE:
97 *pgsize = PMD_SIZE;
98 contig_ptes = CONT_PMDS;
99 break;
100 case CONT_PTE_SIZE:
101 *pgsize = PAGE_SIZE;
102 contig_ptes = CONT_PTES;
103 break;
106 return contig_ptes;
110 * Changing some bits of contiguous entries requires us to follow a
111 * Break-Before-Make approach, breaking the whole contiguous set
112 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
113 * "Misprogramming of the Contiguous bit", page D4-1762.
115 * This helper performs the break step.
117 static pte_t get_clear_flush(struct mm_struct *mm,
118 unsigned long addr,
119 pte_t *ptep,
120 unsigned long pgsize,
121 unsigned long ncontig)
123 pte_t orig_pte = huge_ptep_get(ptep);
124 bool valid = pte_valid(orig_pte);
125 unsigned long i, saddr = addr;
127 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
128 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
131 * If HW_AFDBM is enabled, then the HW could turn on
132 * the dirty or accessed bit for any page in the set,
133 * so check them all.
135 if (pte_dirty(pte))
136 orig_pte = pte_mkdirty(orig_pte);
138 if (pte_young(pte))
139 orig_pte = pte_mkyoung(orig_pte);
142 if (valid) {
143 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
144 flush_tlb_range(&vma, saddr, addr);
146 return orig_pte;
150 * Changing some bits of contiguous entries requires us to follow a
151 * Break-Before-Make approach, breaking the whole contiguous set
152 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
153 * "Misprogramming of the Contiguous bit", page D4-1762.
155 * This helper performs the break step for use cases where the
156 * original pte is not needed.
158 static void clear_flush(struct mm_struct *mm,
159 unsigned long addr,
160 pte_t *ptep,
161 unsigned long pgsize,
162 unsigned long ncontig)
164 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
165 unsigned long i, saddr = addr;
167 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
168 pte_clear(mm, addr, ptep);
170 flush_tlb_range(&vma, saddr, addr);
173 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
174 pte_t *ptep, pte_t pte)
176 size_t pgsize;
177 int i;
178 int ncontig;
179 unsigned long pfn, dpfn;
180 pgprot_t hugeprot;
183 * Code needs to be expanded to handle huge swap and migration
184 * entries. Needed for HUGETLB and MEMORY_FAILURE.
186 WARN_ON(!pte_present(pte));
188 if (!pte_cont(pte)) {
189 set_pte_at(mm, addr, ptep, pte);
190 return;
193 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
194 pfn = pte_pfn(pte);
195 dpfn = pgsize >> PAGE_SHIFT;
196 hugeprot = pte_pgprot(pte);
198 clear_flush(mm, addr, ptep, pgsize, ncontig);
200 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
201 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
204 void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
205 pte_t *ptep, pte_t pte, unsigned long sz)
207 int i, ncontig;
208 size_t pgsize;
210 ncontig = num_contig_ptes(sz, &pgsize);
212 for (i = 0; i < ncontig; i++, ptep++)
213 set_pte(ptep, pte);
216 pte_t *huge_pte_alloc(struct mm_struct *mm,
217 unsigned long addr, unsigned long sz)
219 pgd_t *pgdp;
220 pud_t *pudp;
221 pmd_t *pmdp;
222 pte_t *ptep = NULL;
224 pgdp = pgd_offset(mm, addr);
225 pudp = pud_alloc(mm, pgdp, addr);
226 if (!pudp)
227 return NULL;
229 if (sz == PUD_SIZE) {
230 ptep = (pte_t *)pudp;
231 } else if (sz == (CONT_PTE_SIZE)) {
232 pmdp = pmd_alloc(mm, pudp, addr);
233 if (!pmdp)
234 return NULL;
236 WARN_ON(addr & (sz - 1));
238 * Note that if this code were ever ported to the
239 * 32-bit arm platform then it will cause trouble in
240 * the case where CONFIG_HIGHPTE is set, since there
241 * will be no pte_unmap() to correspond with this
242 * pte_alloc_map().
244 ptep = pte_alloc_map(mm, pmdp, addr);
245 } else if (sz == PMD_SIZE) {
246 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
247 pud_none(READ_ONCE(*pudp)))
248 ptep = huge_pmd_share(mm, addr, pudp);
249 else
250 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
251 } else if (sz == (CONT_PMD_SIZE)) {
252 pmdp = pmd_alloc(mm, pudp, addr);
253 WARN_ON(addr & (sz - 1));
254 return (pte_t *)pmdp;
257 return ptep;
260 pte_t *huge_pte_offset(struct mm_struct *mm,
261 unsigned long addr, unsigned long sz)
263 pgd_t *pgdp;
264 pud_t *pudp, pud;
265 pmd_t *pmdp, pmd;
267 pgdp = pgd_offset(mm, addr);
268 if (!pgd_present(READ_ONCE(*pgdp)))
269 return NULL;
271 pudp = pud_offset(pgdp, addr);
272 pud = READ_ONCE(*pudp);
273 if (sz != PUD_SIZE && pud_none(pud))
274 return NULL;
275 /* hugepage or swap? */
276 if (pud_huge(pud) || !pud_present(pud))
277 return (pte_t *)pudp;
278 /* table; check the next level */
280 if (sz == CONT_PMD_SIZE)
281 addr &= CONT_PMD_MASK;
283 pmdp = pmd_offset(pudp, addr);
284 pmd = READ_ONCE(*pmdp);
285 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
286 pmd_none(pmd))
287 return NULL;
288 if (pmd_huge(pmd) || !pmd_present(pmd))
289 return (pte_t *)pmdp;
291 if (sz == CONT_PTE_SIZE)
292 return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
294 return NULL;
297 pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
298 struct page *page, int writable)
300 size_t pagesize = huge_page_size(hstate_vma(vma));
302 if (pagesize == CONT_PTE_SIZE) {
303 entry = pte_mkcont(entry);
304 } else if (pagesize == CONT_PMD_SIZE) {
305 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
306 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
307 pr_warn("%s: unrecognized huge page size 0x%lx\n",
308 __func__, pagesize);
310 return entry;
313 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
314 pte_t *ptep, unsigned long sz)
316 int i, ncontig;
317 size_t pgsize;
319 ncontig = num_contig_ptes(sz, &pgsize);
321 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
322 pte_clear(mm, addr, ptep);
325 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
326 unsigned long addr, pte_t *ptep)
328 int ncontig;
329 size_t pgsize;
330 pte_t orig_pte = huge_ptep_get(ptep);
332 if (!pte_cont(orig_pte))
333 return ptep_get_and_clear(mm, addr, ptep);
335 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
337 return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
341 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
342 * and write permission.
344 * For a contiguous huge pte range we need to check whether or not write
345 * permission has to change only on the first pte in the set. Then for
346 * all the contiguous ptes we need to check whether or not there is a
347 * discrepancy between dirty or young.
349 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
351 int i;
353 if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
354 return 1;
356 for (i = 0; i < ncontig; i++) {
357 pte_t orig_pte = huge_ptep_get(ptep + i);
359 if (pte_dirty(pte) != pte_dirty(orig_pte))
360 return 1;
362 if (pte_young(pte) != pte_young(orig_pte))
363 return 1;
366 return 0;
369 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
370 unsigned long addr, pte_t *ptep,
371 pte_t pte, int dirty)
373 int ncontig, i;
374 size_t pgsize = 0;
375 unsigned long pfn = pte_pfn(pte), dpfn;
376 pgprot_t hugeprot;
377 pte_t orig_pte;
379 if (!pte_cont(pte))
380 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
382 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
383 dpfn = pgsize >> PAGE_SHIFT;
385 if (!__cont_access_flags_changed(ptep, pte, ncontig))
386 return 0;
388 orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
390 /* Make sure we don't lose the dirty or young state */
391 if (pte_dirty(orig_pte))
392 pte = pte_mkdirty(pte);
394 if (pte_young(orig_pte))
395 pte = pte_mkyoung(pte);
397 hugeprot = pte_pgprot(pte);
398 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
399 set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
401 return 1;
404 void huge_ptep_set_wrprotect(struct mm_struct *mm,
405 unsigned long addr, pte_t *ptep)
407 unsigned long pfn, dpfn;
408 pgprot_t hugeprot;
409 int ncontig, i;
410 size_t pgsize;
411 pte_t pte;
413 if (!pte_cont(READ_ONCE(*ptep))) {
414 ptep_set_wrprotect(mm, addr, ptep);
415 return;
418 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
419 dpfn = pgsize >> PAGE_SHIFT;
421 pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
422 pte = pte_wrprotect(pte);
424 hugeprot = pte_pgprot(pte);
425 pfn = pte_pfn(pte);
427 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
428 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
431 void huge_ptep_clear_flush(struct vm_area_struct *vma,
432 unsigned long addr, pte_t *ptep)
434 size_t pgsize;
435 int ncontig;
437 if (!pte_cont(READ_ONCE(*ptep))) {
438 ptep_clear_flush(vma, addr, ptep);
439 return;
442 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
443 clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
446 static void __init add_huge_page_size(unsigned long size)
448 if (size_to_hstate(size))
449 return;
451 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
454 static int __init hugetlbpage_init(void)
456 #ifdef CONFIG_ARM64_4K_PAGES
457 add_huge_page_size(PUD_SIZE);
458 #endif
459 add_huge_page_size(CONT_PMD_SIZE);
460 add_huge_page_size(PMD_SIZE);
461 add_huge_page_size(CONT_PTE_SIZE);
463 return 0;
465 arch_initcall(hugetlbpage_init);
467 static __init int setup_hugepagesz(char *opt)
469 unsigned long ps = memparse(opt, &opt);
471 switch (ps) {
472 #ifdef CONFIG_ARM64_4K_PAGES
473 case PUD_SIZE:
474 #endif
475 case CONT_PMD_SIZE:
476 case PMD_SIZE:
477 case CONT_PTE_SIZE:
478 add_huge_page_size(ps);
479 return 1;
482 hugetlb_bad_size();
483 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
484 return 0;
486 __setup("hugepagesz=", setup_hugepagesz);