1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2023 ARM Ltd.
8 #include <linux/export.h>
9 #include <asm/tlbflush.h>
11 static inline bool mm_is_user(struct mm_struct
*mm
)
14 * Don't attempt to apply the contig bit to kernel mappings, because
15 * dynamically adding/removing the contig bit can cause page faults.
16 * These racing faults are ok for user space, since they get serialized
17 * on the PTL. But kernel mappings can't tolerate faults.
19 if (unlikely(mm_is_efi(mm
)))
21 return mm
!= &init_mm
;
24 static inline pte_t
*contpte_align_down(pte_t
*ptep
)
26 return PTR_ALIGN_DOWN(ptep
, sizeof(*ptep
) * CONT_PTES
);
29 static void contpte_try_unfold_partial(struct mm_struct
*mm
, unsigned long addr
,
30 pte_t
*ptep
, unsigned int nr
)
33 * Unfold any partially covered contpte block at the beginning and end
37 if (ptep
!= contpte_align_down(ptep
) || nr
< CONT_PTES
)
38 contpte_try_unfold(mm
, addr
, ptep
, __ptep_get(ptep
));
40 if (ptep
+ nr
!= contpte_align_down(ptep
+ nr
)) {
41 unsigned long last_addr
= addr
+ PAGE_SIZE
* (nr
- 1);
42 pte_t
*last_ptep
= ptep
+ nr
- 1;
44 contpte_try_unfold(mm
, last_addr
, last_ptep
,
45 __ptep_get(last_ptep
));
49 static void contpte_convert(struct mm_struct
*mm
, unsigned long addr
,
50 pte_t
*ptep
, pte_t pte
)
52 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
53 unsigned long start_addr
;
57 start_ptep
= ptep
= contpte_align_down(ptep
);
58 start_addr
= addr
= ALIGN_DOWN(addr
, CONT_PTE_SIZE
);
59 pte
= pfn_pte(ALIGN_DOWN(pte_pfn(pte
), CONT_PTES
), pte_pgprot(pte
));
61 for (i
= 0; i
< CONT_PTES
; i
++, ptep
++, addr
+= PAGE_SIZE
) {
62 pte_t ptent
= __ptep_get_and_clear(mm
, addr
, ptep
);
65 pte
= pte_mkdirty(pte
);
68 pte
= pte_mkyoung(pte
);
71 __flush_tlb_range(&vma
, start_addr
, addr
, PAGE_SIZE
, true, 3);
73 __set_ptes(mm
, start_addr
, start_ptep
, pte
, CONT_PTES
);
76 void __contpte_try_fold(struct mm_struct
*mm
, unsigned long addr
,
77 pte_t
*ptep
, pte_t pte
)
80 * We have already checked that the virtual and pysical addresses are
81 * correctly aligned for a contpte mapping in contpte_try_fold() so the
82 * remaining checks are to ensure that the contpte range is fully
83 * covered by a single folio, and ensure that all the ptes are valid
84 * with contiguous PFNs and matching prots. We ignore the state of the
85 * access and dirty bits for the purpose of deciding if its a contiguous
86 * range; the folding process will generate a single contpte entry which
87 * has a single access and dirty bit. Those 2 bits are the logical OR of
88 * their respective bits in the constituent pte entries. In order to
89 * ensure the contpte range is covered by a single folio, we must
90 * recover the folio from the pfn, but special mappings don't have a
91 * folio backing them. Fortunately contpte_try_fold() already checked
92 * that the pte is not special - we never try to fold special mappings.
93 * Note we can't use vm_normal_page() for this since we don't have the
97 unsigned long folio_start
, folio_end
;
98 unsigned long cont_start
, cont_end
;
99 pte_t expected_pte
, subpte
;
111 page
= pte_page(pte
);
112 folio
= page_folio(page
);
113 folio_start
= addr
- (page
- &folio
->page
) * PAGE_SIZE
;
114 folio_end
= folio_start
+ folio_nr_pages(folio
) * PAGE_SIZE
;
115 cont_start
= ALIGN_DOWN(addr
, CONT_PTE_SIZE
);
116 cont_end
= cont_start
+ CONT_PTE_SIZE
;
118 if (folio_start
> cont_start
|| folio_end
< cont_end
)
121 pfn
= ALIGN_DOWN(pte_pfn(pte
), CONT_PTES
);
122 prot
= pte_pgprot(pte_mkold(pte_mkclean(pte
)));
123 expected_pte
= pfn_pte(pfn
, prot
);
125 ptep
= contpte_align_down(ptep
);
127 for (i
= 0; i
< CONT_PTES
; i
++) {
128 subpte
= pte_mkold(pte_mkclean(__ptep_get(ptep
)));
129 if (!pte_same(subpte
, expected_pte
))
131 expected_pte
= pte_advance_pfn(expected_pte
, 1);
135 pte
= pte_mkcont(pte
);
136 contpte_convert(mm
, addr
, orig_ptep
, pte
);
138 EXPORT_SYMBOL_GPL(__contpte_try_fold
);
140 void __contpte_try_unfold(struct mm_struct
*mm
, unsigned long addr
,
141 pte_t
*ptep
, pte_t pte
)
144 * We have already checked that the ptes are contiguous in
145 * contpte_try_unfold(), so just check that the mm is user space.
150 pte
= pte_mknoncont(pte
);
151 contpte_convert(mm
, addr
, ptep
, pte
);
153 EXPORT_SYMBOL_GPL(__contpte_try_unfold
);
155 pte_t
contpte_ptep_get(pte_t
*ptep
, pte_t orig_pte
)
158 * Gather access/dirty bits, which may be populated in any of the ptes
159 * of the contig range. We are guaranteed to be holding the PTL, so any
160 * contiguous range cannot be unfolded or otherwise modified under our
167 ptep
= contpte_align_down(ptep
);
169 for (i
= 0; i
< CONT_PTES
; i
++, ptep
++) {
170 pte
= __ptep_get(ptep
);
173 orig_pte
= pte_mkdirty(orig_pte
);
176 orig_pte
= pte_mkyoung(orig_pte
);
181 EXPORT_SYMBOL_GPL(contpte_ptep_get
);
183 pte_t
contpte_ptep_get_lockless(pte_t
*orig_ptep
)
186 * The ptep_get_lockless() API requires us to read and return *orig_ptep
187 * so that it is self-consistent, without the PTL held, so we may be
188 * racing with other threads modifying the pte. Usually a READ_ONCE()
189 * would suffice, but for the contpte case, we also need to gather the
190 * access and dirty bits from across all ptes in the contiguous block,
191 * and we can't read all of those neighbouring ptes atomically, so any
192 * contiguous range may be unfolded/modified/refolded under our feet.
193 * Therefore we ensure we read a _consistent_ contpte range by checking
194 * that all ptes in the range are valid and have CONT_PTE set, that all
195 * pfns are contiguous and that all pgprots are the same (ignoring
196 * access/dirty). If we find a pte that is not consistent, then we must
197 * be racing with an update so start again. If the target pte does not
198 * have CONT_PTE set then that is considered consistent on its own
199 * because it is not part of a contpte range.
211 orig_pte
= __ptep_get(orig_ptep
);
213 if (!pte_valid_cont(orig_pte
))
216 orig_prot
= pte_pgprot(pte_mkold(pte_mkclean(orig_pte
)));
217 ptep
= contpte_align_down(orig_ptep
);
218 pfn
= pte_pfn(orig_pte
) - (orig_ptep
- ptep
);
220 for (i
= 0; i
< CONT_PTES
; i
++, ptep
++, pfn
++) {
221 pte
= __ptep_get(ptep
);
222 prot
= pte_pgprot(pte_mkold(pte_mkclean(pte
)));
224 if (!pte_valid_cont(pte
) ||
225 pte_pfn(pte
) != pfn
||
226 pgprot_val(prot
) != pgprot_val(orig_prot
))
230 orig_pte
= pte_mkdirty(orig_pte
);
233 orig_pte
= pte_mkyoung(orig_pte
);
238 EXPORT_SYMBOL_GPL(contpte_ptep_get_lockless
);
240 void contpte_set_ptes(struct mm_struct
*mm
, unsigned long addr
,
241 pte_t
*ptep
, pte_t pte
, unsigned int nr
)
249 * The set_ptes() spec guarantees that when nr > 1, the initial state of
250 * all ptes is not-present. Therefore we never need to unfold or
251 * otherwise invalidate a range before we set the new ptes.
252 * contpte_set_ptes() should never be called for nr < 2.
257 return __set_ptes(mm
, addr
, ptep
, pte
, nr
);
259 end
= addr
+ (nr
<< PAGE_SHIFT
);
261 prot
= pte_pgprot(pte
);
264 next
= pte_cont_addr_end(addr
, end
);
265 nr
= (next
- addr
) >> PAGE_SHIFT
;
266 pte
= pfn_pte(pfn
, prot
);
268 if (((addr
| next
| (pfn
<< PAGE_SHIFT
)) & ~CONT_PTE_MASK
) == 0)
269 pte
= pte_mkcont(pte
);
271 pte
= pte_mknoncont(pte
);
273 __set_ptes(mm
, addr
, ptep
, pte
, nr
);
279 } while (addr
!= end
);
281 EXPORT_SYMBOL_GPL(contpte_set_ptes
);
283 void contpte_clear_full_ptes(struct mm_struct
*mm
, unsigned long addr
,
284 pte_t
*ptep
, unsigned int nr
, int full
)
286 contpte_try_unfold_partial(mm
, addr
, ptep
, nr
);
287 __clear_full_ptes(mm
, addr
, ptep
, nr
, full
);
289 EXPORT_SYMBOL_GPL(contpte_clear_full_ptes
);
291 pte_t
contpte_get_and_clear_full_ptes(struct mm_struct
*mm
,
292 unsigned long addr
, pte_t
*ptep
,
293 unsigned int nr
, int full
)
295 contpte_try_unfold_partial(mm
, addr
, ptep
, nr
);
296 return __get_and_clear_full_ptes(mm
, addr
, ptep
, nr
, full
);
298 EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes
);
300 int contpte_ptep_test_and_clear_young(struct vm_area_struct
*vma
,
301 unsigned long addr
, pte_t
*ptep
)
304 * ptep_clear_flush_young() technically requires us to clear the access
305 * flag for a _single_ pte. However, the core-mm code actually tracks
306 * access/dirty per folio, not per page. And since we only create a
307 * contig range when the range is covered by a single folio, we can get
308 * away with clearing young for the whole contig range here, so we avoid
315 ptep
= contpte_align_down(ptep
);
316 addr
= ALIGN_DOWN(addr
, CONT_PTE_SIZE
);
318 for (i
= 0; i
< CONT_PTES
; i
++, ptep
++, addr
+= PAGE_SIZE
)
319 young
|= __ptep_test_and_clear_young(vma
, addr
, ptep
);
323 EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young
);
325 int contpte_ptep_clear_flush_young(struct vm_area_struct
*vma
,
326 unsigned long addr
, pte_t
*ptep
)
330 young
= contpte_ptep_test_and_clear_young(vma
, addr
, ptep
);
334 * See comment in __ptep_clear_flush_young(); same rationale for
335 * eliding the trailing DSB applies here.
337 addr
= ALIGN_DOWN(addr
, CONT_PTE_SIZE
);
338 __flush_tlb_range_nosync(vma
, addr
, addr
+ CONT_PTE_SIZE
,
344 EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young
);
346 void contpte_wrprotect_ptes(struct mm_struct
*mm
, unsigned long addr
,
347 pte_t
*ptep
, unsigned int nr
)
350 * If wrprotecting an entire contig range, we can avoid unfolding. Just
351 * set wrprotect and wait for the later mmu_gather flush to invalidate
352 * the tlb. Until the flush, the page may or may not be wrprotected.
353 * After the flush, it is guaranteed wrprotected. If it's a partial
354 * range though, we must unfold, because we can't have a case where
355 * CONT_PTE is set but wrprotect applies to a subset of the PTEs; this
356 * would cause it to continue to be unpredictable after the flush.
359 contpte_try_unfold_partial(mm
, addr
, ptep
, nr
);
360 __wrprotect_ptes(mm
, addr
, ptep
, nr
);
362 EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes
);
364 void contpte_clear_young_dirty_ptes(struct vm_area_struct
*vma
,
365 unsigned long addr
, pte_t
*ptep
,
366 unsigned int nr
, cydp_t flags
)
369 * We can safely clear access/dirty without needing to unfold from
370 * the architectures perspective, even when contpte is set. If the
371 * range starts or ends midway through a contpte block, we can just
372 * expand to include the full contpte block. While this is not
373 * exactly what the core-mm asked for, it tracks access/dirty per
374 * folio, not per page. And since we only create a contpte block
375 * when it is covered by a single folio, we can get away with
376 * clearing access/dirty for the whole block.
378 unsigned long start
= addr
;
379 unsigned long end
= start
+ nr
* PAGE_SIZE
;
381 if (pte_cont(__ptep_get(ptep
+ nr
- 1)))
382 end
= ALIGN(end
, CONT_PTE_SIZE
);
384 if (pte_cont(__ptep_get(ptep
))) {
385 start
= ALIGN_DOWN(start
, CONT_PTE_SIZE
);
386 ptep
= contpte_align_down(ptep
);
389 __clear_young_dirty_ptes(vma
, start
, ptep
, (end
- start
) / PAGE_SIZE
, flags
);
391 EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes
);
393 int contpte_ptep_set_access_flags(struct vm_area_struct
*vma
,
394 unsigned long addr
, pte_t
*ptep
,
395 pte_t entry
, int dirty
)
397 unsigned long start_addr
;
402 * Gather the access/dirty bits for the contiguous range. If nothing has
403 * changed, its a noop.
405 orig_pte
= pte_mknoncont(ptep_get(ptep
));
406 if (pte_val(orig_pte
) == pte_val(entry
))
410 * We can fix up access/dirty bits without having to unfold the contig
411 * range. But if the write bit is changing, we must unfold.
413 if (pte_write(orig_pte
) == pte_write(entry
)) {
415 * For HW access management, we technically only need to update
416 * the flag on a single pte in the range. But for SW access
417 * management, we need to update all the ptes to prevent extra
418 * faults. Avoid per-page tlb flush in __ptep_set_access_flags()
419 * and instead flush the whole range at the end.
421 ptep
= contpte_align_down(ptep
);
422 start_addr
= addr
= ALIGN_DOWN(addr
, CONT_PTE_SIZE
);
425 * We are not advancing entry because __ptep_set_access_flags()
426 * only consumes access flags from entry. And since we have checked
427 * for the whole contpte block and returned early, pte_same()
428 * within __ptep_set_access_flags() is likely false.
430 for (i
= 0; i
< CONT_PTES
; i
++, ptep
++, addr
+= PAGE_SIZE
)
431 __ptep_set_access_flags(vma
, addr
, ptep
, entry
, 0);
434 __flush_tlb_range(vma
, start_addr
, addr
,
437 __contpte_try_unfold(vma
->vm_mm
, addr
, ptep
, orig_pte
);
438 __ptep_set_access_flags(vma
, addr
, ptep
, entry
, dirty
);
443 EXPORT_SYMBOL_GPL(contpte_ptep_set_access_flags
);