1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
4 * Generic TLB shootdown code
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
11 #ifndef _ASM_GENERIC__TLB_H
12 #define _ASM_GENERIC__TLB_H
14 #include <linux/mmu_notifier.h>
15 #include <linux/swap.h>
16 #include <linux/hugetlb_inline.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
21 * Blindly accessing user memory from NMI context can be dangerous
22 * if we're in the middle of switching the current user task or switching
25 #ifndef nmi_uaccess_okay
26 # define nmi_uaccess_okay() true
32 * Generic MMU-gather implementation.
34 * The mmu_gather data structure is used by the mm code to implement the
35 * correct and efficient ordering of freeing pages and TLB invalidations.
37 * This correct ordering is:
40 * 2) TLB invalidate page
43 * That is, we must never free a page before we have ensured there are no live
44 * translations left to it. Otherwise it might be possible to observe (or
45 * worse, change) the page content after it has been reused.
47 * The mmu_gather API consists of:
49 * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
51 * start and finish a mmu_gather
53 * Finish in particular will issue a (final) TLB invalidate and free
54 * all (remaining) queued pages.
56 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
58 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
59 * there's large holes between the VMAs.
61 * - tlb_remove_table()
63 * tlb_remove_table() is the basic primitive to free page-table directories
64 * (__p*_free_tlb()). In it's most primitive form it is an alias for
65 * tlb_remove_page() below, for when page directories are pages and have no
66 * additional constraints.
68 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
70 * - tlb_remove_page() / __tlb_remove_page()
71 * - tlb_remove_page_size() / __tlb_remove_page_size()
72 * - __tlb_remove_folio_pages()
74 * __tlb_remove_page_size() is the basic primitive that queues a page for
75 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
76 * boolean indicating if the queue is (now) full and a call to
77 * tlb_flush_mmu() is required.
79 * tlb_remove_page() and tlb_remove_page_size() imply the call to
80 * tlb_flush_mmu() when required and has no return value.
82 * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however,
83 * instead of removing a single page, remove the given number of consecutive
84 * pages that are all part of the same (large) folio: just like calling
85 * __tlb_remove_page() on each page individually.
87 * - tlb_change_page_size()
89 * call before __tlb_remove_page*() to set the current page-size; implies a
90 * possible tlb_flush_mmu() call.
92 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
94 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
95 * related state, like the range)
97 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
98 * whatever pages are still batched.
100 * - mmu_gather::fullmm
102 * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
103 * the entire mm; this allows a number of optimizations.
105 * - We can ignore tlb_{start,end}_vma(); because we don't
106 * care about ranges. Everything will be shot down.
108 * - (RISC) architectures that use ASIDs can cycle to a new ASID
109 * and delay the invalidation until ASID space runs out.
111 * - mmu_gather::need_flush_all
113 * A flag that can be set by the arch code if it wants to force
114 * flush the entire TLB irrespective of the range. For instance
115 * x86-PAE needs this when changing top-level entries.
117 * And allows the architecture to provide and implement tlb_flush():
119 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
122 * - mmu_gather::start / mmu_gather::end
124 * which provides the range that needs to be flushed to cover the pages to
127 * - mmu_gather::freed_tables
129 * set when we freed page table pages
131 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
133 * returns the smallest TLB entry size unmapped in this range.
135 * If an architecture does not provide tlb_flush() a default implementation
136 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
137 * specified, in which case we'll default to flush_tlb_mm().
139 * Additionally there are a few opt-in features:
141 * MMU_GATHER_PAGE_SIZE
143 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
144 * changes the size and provides mmu_gather::page_size to tlb_flush().
146 * This might be useful if your architecture has size specific TLB
147 * invalidation instructions.
149 * MMU_GATHER_TABLE_FREE
151 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
152 * for page directores (__p*_free_tlb()).
154 * Useful if your architecture has non-page page directories.
156 * When used, an architecture is expected to provide __tlb_remove_table()
157 * which does the actual freeing of these pages.
159 * MMU_GATHER_RCU_TABLE_FREE
161 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
164 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
165 * and therefore doesn't naturally serialize with software page-table walkers.
167 * MMU_GATHER_NO_FLUSH_CACHE
169 * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
170 * before unmapping a VMA.
172 * NOTE: strictly speaking we shouldn't have this knob and instead rely on
173 * flush_cache_range() being a NOP, except Sparc64 seems to be
176 * MMU_GATHER_MERGE_VMAS
178 * Indicates the architecture wants to merge ranges over VMAs; typical when
179 * multiple range invalidates are more expensive than a full invalidate.
181 * MMU_GATHER_NO_RANGE
183 * Use this if your architecture lacks an efficient flush_tlb_range(). This
184 * option implies MMU_GATHER_MERGE_VMAS above.
186 * MMU_GATHER_NO_GATHER
188 * If the option is set the mmu_gather will not track individual pages for
189 * delayed page free anymore. A platform that enables the option needs to
190 * provide its own implementation of the __tlb_remove_page_size() function to
193 * This is useful if your architecture already flushes TLB entries in the
194 * various ptep_get_and_clear() functions.
197 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
199 struct mmu_table_batch
{
200 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
207 #define MAX_TABLE_BATCH \
208 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
210 extern void tlb_remove_table(struct mmu_gather
*tlb
, void *table
);
212 #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
215 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
216 * page directories and we can use the normal page batching to free them.
218 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
220 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
222 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
224 * This allows an architecture that does not use the linux page-tables for
225 * hardware to skip the TLBI when freeing page tables.
227 #ifndef tlb_needs_table_invalidate
228 #define tlb_needs_table_invalidate() (true)
231 void tlb_remove_table_sync_one(void);
235 #ifdef tlb_needs_table_invalidate
236 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
239 static inline void tlb_remove_table_sync_one(void) { }
241 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
244 #ifndef CONFIG_MMU_GATHER_NO_GATHER
246 * If we can't allocate a page to make a big batch of page pointers
247 * to work on, then just handle a few from the on-stack structure.
249 #define MMU_GATHER_BUNDLE 8
251 struct mmu_gather_batch
{
252 struct mmu_gather_batch
*next
;
255 struct encoded_page
*encoded_pages
[];
258 #define MAX_GATHER_BATCH \
259 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
262 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
263 * lockups for non-preemptible kernels on huge machines when a lot of memory
264 * is zapped during unmapping.
265 * 10K pages freed at once should be safe even without a preemption point.
267 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
269 extern bool __tlb_remove_page_size(struct mmu_gather
*tlb
, struct page
*page
,
270 bool delay_rmap
, int page_size
);
271 bool __tlb_remove_folio_pages(struct mmu_gather
*tlb
, struct page
*page
,
272 unsigned int nr_pages
, bool delay_rmap
);
276 * This both sets 'delayed_rmap', and returns true. It would be an inline
277 * function, except we define it before the 'struct mmu_gather'.
279 #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
280 extern void tlb_flush_rmaps(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
);
286 * We have a no-op version of the rmap removal that doesn't
287 * delay anything. That is used on S390, which flushes remote
288 * TLBs synchronously, and on UP, which doesn't have any
289 * remote TLBs to flush and is not preemptible due to this
290 * all happening under the page table lock.
292 #ifndef tlb_delay_rmap
293 #define tlb_delay_rmap(tlb) (false)
294 static inline void tlb_flush_rmaps(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
) { }
298 * struct mmu_gather is an opaque type used by the mm code for passing around
299 * any data needed by arch specific code for tlb_remove_page.
302 struct mm_struct
*mm
;
304 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
305 struct mmu_table_batch
*batch
;
311 * we are in the middle of an operation to clear
312 * a full mm and can make some optimizations
314 unsigned int fullmm
: 1;
317 * we have performed an operation which
318 * requires a complete flush of the tlb
320 unsigned int need_flush_all
: 1;
323 * we have removed page directories
325 unsigned int freed_tables
: 1;
328 * Do we have pending delayed rmap removals?
330 unsigned int delayed_rmap
: 1;
333 * at which levels have we cleared entries?
335 unsigned int cleared_ptes
: 1;
336 unsigned int cleared_pmds
: 1;
337 unsigned int cleared_puds
: 1;
338 unsigned int cleared_p4ds
: 1;
341 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
343 unsigned int vma_exec
: 1;
344 unsigned int vma_huge
: 1;
345 unsigned int vma_pfn
: 1;
347 unsigned int batch_count
;
349 #ifndef CONFIG_MMU_GATHER_NO_GATHER
350 struct mmu_gather_batch
*active
;
351 struct mmu_gather_batch local
;
352 struct page
*__pages
[MMU_GATHER_BUNDLE
];
354 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
355 unsigned int page_size
;
360 void tlb_flush_mmu(struct mmu_gather
*tlb
);
362 static inline void __tlb_adjust_range(struct mmu_gather
*tlb
,
363 unsigned long address
,
364 unsigned int range_size
)
366 tlb
->start
= min(tlb
->start
, address
);
367 tlb
->end
= max(tlb
->end
, address
+ range_size
);
370 static inline void __tlb_reset_range(struct mmu_gather
*tlb
)
373 tlb
->start
= tlb
->end
= ~0;
375 tlb
->start
= TASK_SIZE
;
378 tlb
->freed_tables
= 0;
379 tlb
->cleared_ptes
= 0;
380 tlb
->cleared_pmds
= 0;
381 tlb
->cleared_puds
= 0;
382 tlb
->cleared_p4ds
= 0;
384 * Do not reset mmu_gather::vma_* fields here, we do not
385 * call into tlb_start_vma() again to set them if there is an
386 * intermediate flush.
390 #ifdef CONFIG_MMU_GATHER_NO_RANGE
392 #if defined(tlb_flush)
393 #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
397 * When an architecture does not have efficient means of range flushing TLBs
398 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
399 * range small. We equally don't have to worry about page granularity or other
402 * All we need to do is issue a full flush for any !0 range.
404 static inline void tlb_flush(struct mmu_gather
*tlb
)
407 flush_tlb_mm(tlb
->mm
);
410 #else /* CONFIG_MMU_GATHER_NO_RANGE */
414 * When an architecture does not provide its own tlb_flush() implementation
415 * but does have a reasonably efficient flush_vma_range() implementation
418 static inline void tlb_flush(struct mmu_gather
*tlb
)
420 if (tlb
->fullmm
|| tlb
->need_flush_all
) {
421 flush_tlb_mm(tlb
->mm
);
422 } else if (tlb
->end
) {
423 struct vm_area_struct vma
= {
425 .vm_flags
= (tlb
->vma_exec
? VM_EXEC
: 0) |
426 (tlb
->vma_huge
? VM_HUGETLB
: 0),
429 flush_tlb_range(&vma
, tlb
->start
, tlb
->end
);
434 #endif /* CONFIG_MMU_GATHER_NO_RANGE */
437 tlb_update_vma_flags(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
440 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
441 * mips-4k) flush only large pages.
443 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
444 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
447 * We rely on tlb_end_vma() to issue a flush, such that when we reset
448 * these values the batch is empty.
450 tlb
->vma_huge
= is_vm_hugetlb_page(vma
);
451 tlb
->vma_exec
= !!(vma
->vm_flags
& VM_EXEC
);
452 tlb
->vma_pfn
= !!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
));
455 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather
*tlb
)
458 * Anything calling __tlb_adjust_range() also sets at least one of
461 if (!(tlb
->freed_tables
|| tlb
->cleared_ptes
|| tlb
->cleared_pmds
||
462 tlb
->cleared_puds
|| tlb
->cleared_p4ds
))
466 __tlb_reset_range(tlb
);
469 static inline void tlb_remove_page_size(struct mmu_gather
*tlb
,
470 struct page
*page
, int page_size
)
472 if (__tlb_remove_page_size(tlb
, page
, false, page_size
))
476 static __always_inline
bool __tlb_remove_page(struct mmu_gather
*tlb
,
477 struct page
*page
, bool delay_rmap
)
479 return __tlb_remove_page_size(tlb
, page
, delay_rmap
, PAGE_SIZE
);
483 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
486 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
488 return tlb_remove_page_size(tlb
, page
, PAGE_SIZE
);
491 static inline void tlb_remove_ptdesc(struct mmu_gather
*tlb
, void *pt
)
493 tlb_remove_table(tlb
, pt
);
496 /* Like tlb_remove_ptdesc, but for page-like page directories. */
497 static inline void tlb_remove_page_ptdesc(struct mmu_gather
*tlb
, struct ptdesc
*pt
)
499 tlb_remove_page(tlb
, ptdesc_page(pt
));
502 static inline void tlb_change_page_size(struct mmu_gather
*tlb
,
503 unsigned int page_size
)
505 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
506 if (tlb
->page_size
&& tlb
->page_size
!= page_size
) {
507 if (!tlb
->fullmm
&& !tlb
->need_flush_all
)
511 tlb
->page_size
= page_size
;
515 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather
*tlb
)
517 if (tlb
->cleared_ptes
)
519 if (tlb
->cleared_pmds
)
521 if (tlb
->cleared_puds
)
523 if (tlb
->cleared_p4ds
)
529 static inline unsigned long tlb_get_unmap_size(struct mmu_gather
*tlb
)
531 return 1UL << tlb_get_unmap_shift(tlb
);
535 * In the case of tlb vma handling, we can optimise these away in the
536 * case where we're doing a full MM flush. When we're doing a munmap,
537 * the vmas are adjusted to only cover the region to be torn down.
539 static inline void tlb_start_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
544 tlb_update_vma_flags(tlb
, vma
);
545 #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
546 flush_cache_range(vma
, vma
->vm_start
, vma
->vm_end
);
550 static inline void tlb_end_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
556 * VM_PFNMAP is more fragile because the core mm will not track the
557 * page mapcount -- there might not be page-frames for these PFNs after
558 * all. Force flush TLBs for such ranges to avoid munmap() vs
559 * unmap_mapping_range() races.
561 if (tlb
->vma_pfn
|| !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS
)) {
563 * Do a TLB flush and reset the range at VMA boundaries; this avoids
564 * the ranges growing with the unused space between consecutive VMAs.
566 tlb_flush_mmu_tlbonly(tlb
);
571 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
572 * and set corresponding cleared_*.
574 static inline void tlb_flush_pte_range(struct mmu_gather
*tlb
,
575 unsigned long address
, unsigned long size
)
577 __tlb_adjust_range(tlb
, address
, size
);
578 tlb
->cleared_ptes
= 1;
581 static inline void tlb_flush_pmd_range(struct mmu_gather
*tlb
,
582 unsigned long address
, unsigned long size
)
584 __tlb_adjust_range(tlb
, address
, size
);
585 tlb
->cleared_pmds
= 1;
588 static inline void tlb_flush_pud_range(struct mmu_gather
*tlb
,
589 unsigned long address
, unsigned long size
)
591 __tlb_adjust_range(tlb
, address
, size
);
592 tlb
->cleared_puds
= 1;
595 static inline void tlb_flush_p4d_range(struct mmu_gather
*tlb
,
596 unsigned long address
, unsigned long size
)
598 __tlb_adjust_range(tlb
, address
, size
);
599 tlb
->cleared_p4ds
= 1;
602 #ifndef __tlb_remove_tlb_entry
603 static inline void __tlb_remove_tlb_entry(struct mmu_gather
*tlb
, pte_t
*ptep
, unsigned long address
)
609 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
611 * Record the fact that pte's were really unmapped by updating the range,
612 * so we can later optimise away the tlb invalidate. This helps when
613 * userspace is unmapping already-unmapped pages, which happens quite a lot.
615 #define tlb_remove_tlb_entry(tlb, ptep, address) \
617 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
618 __tlb_remove_tlb_entry(tlb, ptep, address); \
622 * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
623 * later tlb invalidation.
625 * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple
626 * consecutive ptes instead of only a single one.
628 static inline void tlb_remove_tlb_entries(struct mmu_gather
*tlb
,
629 pte_t
*ptep
, unsigned int nr
, unsigned long address
)
631 tlb_flush_pte_range(tlb
, address
, PAGE_SIZE
* nr
);
633 __tlb_remove_tlb_entry(tlb
, ptep
, address
);
637 address
+= PAGE_SIZE
;
641 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
643 unsigned long _sz = huge_page_size(h); \
644 if (_sz >= P4D_SIZE) \
645 tlb_flush_p4d_range(tlb, address, _sz); \
646 else if (_sz >= PUD_SIZE) \
647 tlb_flush_pud_range(tlb, address, _sz); \
648 else if (_sz >= PMD_SIZE) \
649 tlb_flush_pmd_range(tlb, address, _sz); \
651 tlb_flush_pte_range(tlb, address, _sz); \
652 __tlb_remove_tlb_entry(tlb, ptep, address); \
656 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
657 * This is a nop so far, because only x86 needs it.
659 #ifndef __tlb_remove_pmd_tlb_entry
660 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
663 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
665 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
666 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
670 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
671 * invalidation. This is a nop so far, because only x86 needs it.
673 #ifndef __tlb_remove_pud_tlb_entry
674 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
677 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
679 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
680 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
684 * For things like page tables caches (ie caching addresses "inside" the
685 * page tables, like x86 does), for legacy reasons, flushing an
686 * individual page had better flush the page table caches behind it. This
687 * is definitely how x86 works, for example. And if you have an
688 * architected non-legacy page table cache (which I'm not aware of
689 * anybody actually doing), you're going to have some architecturally
690 * explicit flushing for that, likely *separate* from a regular TLB entry
691 * flush, and thus you'd need more than just some range expansion..
693 * So if we ever find an architecture
694 * that would want something that odd, I think it is up to that
695 * architecture to do its own odd thing, not cause pain for others
696 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
698 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
702 #define pte_free_tlb(tlb, ptep, address) \
704 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
705 tlb->freed_tables = 1; \
706 __pte_free_tlb(tlb, ptep, address); \
711 #define pmd_free_tlb(tlb, pmdp, address) \
713 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
714 tlb->freed_tables = 1; \
715 __pmd_free_tlb(tlb, pmdp, address); \
720 #define pud_free_tlb(tlb, pudp, address) \
722 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
723 tlb->freed_tables = 1; \
724 __pud_free_tlb(tlb, pudp, address); \
729 #define p4d_free_tlb(tlb, pudp, address) \
731 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
732 tlb->freed_tables = 1; \
733 __p4d_free_tlb(tlb, pudp, address); \
737 #ifndef pte_needs_flush
738 static inline bool pte_needs_flush(pte_t oldpte
, pte_t newpte
)
744 #ifndef huge_pmd_needs_flush
745 static inline bool huge_pmd_needs_flush(pmd_t oldpmd
, pmd_t newpmd
)
751 #endif /* CONFIG_MMU */
753 #endif /* _ASM_GENERIC__TLB_H */