1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
4 * Generic TLB shootdown code
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
11 #ifndef _ASM_GENERIC__TLB_H
12 #define _ASM_GENERIC__TLB_H
14 #include <linux/mmu_notifier.h>
15 #include <linux/swap.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
21 * Blindly accessing user memory from NMI context can be dangerous
22 * if we're in the middle of switching the current user task or switching
25 #ifndef nmi_uaccess_okay
26 # define nmi_uaccess_okay() true
32 * Generic MMU-gather implementation.
34 * The mmu_gather data structure is used by the mm code to implement the
35 * correct and efficient ordering of freeing pages and TLB invalidations.
37 * This correct ordering is:
40 * 2) TLB invalidate page
43 * That is, we must never free a page before we have ensured there are no live
44 * translations left to it. Otherwise it might be possible to observe (or
45 * worse, change) the page content after it has been reused.
47 * The mmu_gather API consists of:
49 * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
51 * Finish in particular will issue a (final) TLB invalidate and free
52 * all (remaining) queued pages.
54 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
56 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
57 * there's large holes between the VMAs.
59 * - tlb_remove_table()
61 * tlb_remove_table() is the basic primitive to free page-table directories
62 * (__p*_free_tlb()). In it's most primitive form it is an alias for
63 * tlb_remove_page() below, for when page directories are pages and have no
64 * additional constraints.
66 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
68 * - tlb_remove_page() / __tlb_remove_page()
69 * - tlb_remove_page_size() / __tlb_remove_page_size()
71 * __tlb_remove_page_size() is the basic primitive that queues a page for
72 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
73 * boolean indicating if the queue is (now) full and a call to
74 * tlb_flush_mmu() is required.
76 * tlb_remove_page() and tlb_remove_page_size() imply the call to
77 * tlb_flush_mmu() when required and has no return value.
79 * - tlb_change_page_size()
81 * call before __tlb_remove_page*() to set the current page-size; implies a
82 * possible tlb_flush_mmu() call.
84 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
86 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
87 * related state, like the range)
89 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
90 * whatever pages are still batched.
92 * - mmu_gather::fullmm
94 * A flag set by tlb_gather_mmu() to indicate we're going to free
95 * the entire mm; this allows a number of optimizations.
97 * - We can ignore tlb_{start,end}_vma(); because we don't
98 * care about ranges. Everything will be shot down.
100 * - (RISC) architectures that use ASIDs can cycle to a new ASID
101 * and delay the invalidation until ASID space runs out.
103 * - mmu_gather::need_flush_all
105 * A flag that can be set by the arch code if it wants to force
106 * flush the entire TLB irrespective of the range. For instance
107 * x86-PAE needs this when changing top-level entries.
109 * And allows the architecture to provide and implement tlb_flush():
111 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
114 * - mmu_gather::start / mmu_gather::end
116 * which provides the range that needs to be flushed to cover the pages to
119 * - mmu_gather::freed_tables
121 * set when we freed page table pages
123 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
125 * returns the smallest TLB entry size unmapped in this range.
127 * If an architecture does not provide tlb_flush() a default implementation
128 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
129 * specified, in which case we'll default to flush_tlb_mm().
131 * Additionally there are a few opt-in features:
133 * MMU_GATHER_PAGE_SIZE
135 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
136 * changes the size and provides mmu_gather::page_size to tlb_flush().
138 * This might be useful if your architecture has size specific TLB
139 * invalidation instructions.
141 * MMU_GATHER_TABLE_FREE
143 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
144 * for page directores (__p*_free_tlb()).
146 * Useful if your architecture has non-page page directories.
148 * When used, an architecture is expected to provide __tlb_remove_table()
149 * which does the actual freeing of these pages.
151 * MMU_GATHER_RCU_TABLE_FREE
153 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
156 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
157 * and therefore doesn't naturally serialize with software page-table walkers.
159 * MMU_GATHER_NO_RANGE
161 * Use this if your architecture lacks an efficient flush_tlb_range().
163 * MMU_GATHER_NO_GATHER
165 * If the option is set the mmu_gather will not track individual pages for
166 * delayed page free anymore. A platform that enables the option needs to
167 * provide its own implementation of the __tlb_remove_page_size() function to
170 * This is useful if your architecture already flushes TLB entries in the
171 * various ptep_get_and_clear() functions.
174 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
176 struct mmu_table_batch
{
177 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
184 #define MAX_TABLE_BATCH \
185 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
187 extern void tlb_remove_table(struct mmu_gather
*tlb
, void *table
);
189 #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
192 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
193 * page directories and we can use the normal page batching to free them.
195 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
197 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
199 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
201 * This allows an architecture that does not use the linux page-tables for
202 * hardware to skip the TLBI when freeing page tables.
204 #ifndef tlb_needs_table_invalidate
205 #define tlb_needs_table_invalidate() (true)
210 #ifdef tlb_needs_table_invalidate
211 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
214 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
217 #ifndef CONFIG_MMU_GATHER_NO_GATHER
219 * If we can't allocate a page to make a big batch of page pointers
220 * to work on, then just handle a few from the on-stack structure.
222 #define MMU_GATHER_BUNDLE 8
224 struct mmu_gather_batch
{
225 struct mmu_gather_batch
*next
;
228 struct page
*pages
[0];
231 #define MAX_GATHER_BATCH \
232 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
235 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
236 * lockups for non-preemptible kernels on huge machines when a lot of memory
237 * is zapped during unmapping.
238 * 10K pages freed at once should be safe even without a preemption point.
240 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
242 extern bool __tlb_remove_page_size(struct mmu_gather
*tlb
, struct page
*page
,
247 * struct mmu_gather is an opaque type used by the mm code for passing around
248 * any data needed by arch specific code for tlb_remove_page.
251 struct mm_struct
*mm
;
253 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
254 struct mmu_table_batch
*batch
;
260 * we are in the middle of an operation to clear
261 * a full mm and can make some optimizations
263 unsigned int fullmm
: 1;
266 * we have performed an operation which
267 * requires a complete flush of the tlb
269 unsigned int need_flush_all
: 1;
272 * we have removed page directories
274 unsigned int freed_tables
: 1;
277 * at which levels have we cleared entries?
279 unsigned int cleared_ptes
: 1;
280 unsigned int cleared_pmds
: 1;
281 unsigned int cleared_puds
: 1;
282 unsigned int cleared_p4ds
: 1;
285 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
287 unsigned int vma_exec
: 1;
288 unsigned int vma_huge
: 1;
290 unsigned int batch_count
;
292 #ifndef CONFIG_MMU_GATHER_NO_GATHER
293 struct mmu_gather_batch
*active
;
294 struct mmu_gather_batch local
;
295 struct page
*__pages
[MMU_GATHER_BUNDLE
];
297 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
298 unsigned int page_size
;
303 void tlb_flush_mmu(struct mmu_gather
*tlb
);
305 static inline void __tlb_adjust_range(struct mmu_gather
*tlb
,
306 unsigned long address
,
307 unsigned int range_size
)
309 tlb
->start
= min(tlb
->start
, address
);
310 tlb
->end
= max(tlb
->end
, address
+ range_size
);
313 static inline void __tlb_reset_range(struct mmu_gather
*tlb
)
316 tlb
->start
= tlb
->end
= ~0;
318 tlb
->start
= TASK_SIZE
;
321 tlb
->freed_tables
= 0;
322 tlb
->cleared_ptes
= 0;
323 tlb
->cleared_pmds
= 0;
324 tlb
->cleared_puds
= 0;
325 tlb
->cleared_p4ds
= 0;
327 * Do not reset mmu_gather::vma_* fields here, we do not
328 * call into tlb_start_vma() again to set them if there is an
329 * intermediate flush.
333 #ifdef CONFIG_MMU_GATHER_NO_RANGE
335 #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
336 #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
340 * When an architecture does not have efficient means of range flushing TLBs
341 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
342 * range small. We equally don't have to worry about page granularity or other
345 * All we need to do is issue a full flush for any !0 range.
347 static inline void tlb_flush(struct mmu_gather
*tlb
)
350 flush_tlb_mm(tlb
->mm
);
354 tlb_update_vma_flags(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
) { }
356 #define tlb_end_vma tlb_end_vma
357 static inline void tlb_end_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
) { }
359 #else /* CONFIG_MMU_GATHER_NO_RANGE */
363 #if defined(tlb_start_vma) || defined(tlb_end_vma)
364 #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
368 * When an architecture does not provide its own tlb_flush() implementation
369 * but does have a reasonably efficient flush_vma_range() implementation
372 static inline void tlb_flush(struct mmu_gather
*tlb
)
374 if (tlb
->fullmm
|| tlb
->need_flush_all
) {
375 flush_tlb_mm(tlb
->mm
);
376 } else if (tlb
->end
) {
377 struct vm_area_struct vma
= {
379 .vm_flags
= (tlb
->vma_exec
? VM_EXEC
: 0) |
380 (tlb
->vma_huge
? VM_HUGETLB
: 0),
383 flush_tlb_range(&vma
, tlb
->start
, tlb
->end
);
388 tlb_update_vma_flags(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
391 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
392 * mips-4k) flush only large pages.
394 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
395 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
398 * We rely on tlb_end_vma() to issue a flush, such that when we reset
399 * these values the batch is empty.
401 tlb
->vma_huge
= !!(vma
->vm_flags
& VM_HUGETLB
);
402 tlb
->vma_exec
= !!(vma
->vm_flags
& VM_EXEC
);
408 tlb_update_vma_flags(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
) { }
412 #endif /* CONFIG_MMU_GATHER_NO_RANGE */
414 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather
*tlb
)
417 * Anything calling __tlb_adjust_range() also sets at least one of
420 if (!(tlb
->freed_tables
|| tlb
->cleared_ptes
|| tlb
->cleared_pmds
||
421 tlb
->cleared_puds
|| tlb
->cleared_p4ds
))
425 mmu_notifier_invalidate_range(tlb
->mm
, tlb
->start
, tlb
->end
);
426 __tlb_reset_range(tlb
);
429 static inline void tlb_remove_page_size(struct mmu_gather
*tlb
,
430 struct page
*page
, int page_size
)
432 if (__tlb_remove_page_size(tlb
, page
, page_size
))
436 static inline bool __tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
438 return __tlb_remove_page_size(tlb
, page
, PAGE_SIZE
);
442 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
445 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
447 return tlb_remove_page_size(tlb
, page
, PAGE_SIZE
);
450 static inline void tlb_change_page_size(struct mmu_gather
*tlb
,
451 unsigned int page_size
)
453 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
454 if (tlb
->page_size
&& tlb
->page_size
!= page_size
) {
455 if (!tlb
->fullmm
&& !tlb
->need_flush_all
)
459 tlb
->page_size
= page_size
;
463 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather
*tlb
)
465 if (tlb
->cleared_ptes
)
467 if (tlb
->cleared_pmds
)
469 if (tlb
->cleared_puds
)
471 if (tlb
->cleared_p4ds
)
477 static inline unsigned long tlb_get_unmap_size(struct mmu_gather
*tlb
)
479 return 1UL << tlb_get_unmap_shift(tlb
);
483 * In the case of tlb vma handling, we can optimise these away in the
484 * case where we're doing a full MM flush. When we're doing a munmap,
485 * the vmas are adjusted to only cover the region to be torn down.
487 #ifndef tlb_start_vma
488 static inline void tlb_start_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
493 tlb_update_vma_flags(tlb
, vma
);
494 flush_cache_range(vma
, vma
->vm_start
, vma
->vm_end
);
499 static inline void tlb_end_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
505 * Do a TLB flush and reset the range at VMA boundaries; this avoids
506 * the ranges growing with the unused space between consecutive VMAs,
507 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
510 tlb_flush_mmu_tlbonly(tlb
);
514 #ifndef __tlb_remove_tlb_entry
515 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
519 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
521 * Record the fact that pte's were really unmapped by updating the range,
522 * so we can later optimise away the tlb invalidate. This helps when
523 * userspace is unmapping already-unmapped pages, which happens quite a lot.
525 #define tlb_remove_tlb_entry(tlb, ptep, address) \
527 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
528 tlb->cleared_ptes = 1; \
529 __tlb_remove_tlb_entry(tlb, ptep, address); \
532 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
534 unsigned long _sz = huge_page_size(h); \
535 __tlb_adjust_range(tlb, address, _sz); \
536 if (_sz == PMD_SIZE) \
537 tlb->cleared_pmds = 1; \
538 else if (_sz == PUD_SIZE) \
539 tlb->cleared_puds = 1; \
540 __tlb_remove_tlb_entry(tlb, ptep, address); \
544 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
545 * This is a nop so far, because only x86 needs it.
547 #ifndef __tlb_remove_pmd_tlb_entry
548 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
551 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
553 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
554 tlb->cleared_pmds = 1; \
555 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
559 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
560 * invalidation. This is a nop so far, because only x86 needs it.
562 #ifndef __tlb_remove_pud_tlb_entry
563 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
566 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
568 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
569 tlb->cleared_puds = 1; \
570 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
574 * For things like page tables caches (ie caching addresses "inside" the
575 * page tables, like x86 does), for legacy reasons, flushing an
576 * individual page had better flush the page table caches behind it. This
577 * is definitely how x86 works, for example. And if you have an
578 * architected non-legacy page table cache (which I'm not aware of
579 * anybody actually doing), you're going to have some architecturally
580 * explicit flushing for that, likely *separate* from a regular TLB entry
581 * flush, and thus you'd need more than just some range expansion..
583 * So if we ever find an architecture
584 * that would want something that odd, I think it is up to that
585 * architecture to do its own odd thing, not cause pain for others
586 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
588 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
592 #define pte_free_tlb(tlb, ptep, address) \
594 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
595 tlb->freed_tables = 1; \
596 tlb->cleared_pmds = 1; \
597 __pte_free_tlb(tlb, ptep, address); \
602 #define pmd_free_tlb(tlb, pmdp, address) \
604 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
605 tlb->freed_tables = 1; \
606 tlb->cleared_puds = 1; \
607 __pmd_free_tlb(tlb, pmdp, address); \
612 #define pud_free_tlb(tlb, pudp, address) \
614 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
615 tlb->freed_tables = 1; \
616 tlb->cleared_p4ds = 1; \
617 __pud_free_tlb(tlb, pudp, address); \
622 #define p4d_free_tlb(tlb, pudp, address) \
624 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
625 tlb->freed_tables = 1; \
626 __p4d_free_tlb(tlb, pudp, address); \
630 #endif /* CONFIG_MMU */
632 #endif /* _ASM_GENERIC__TLB_H */