cxgb4/l2t: Mark expected switch fall-through
[linux/fpc-iii.git] / arch / um / include / asm / tlb.h
blobdce6db147f24563eb14310aaabf76cada9a878bb
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __UM_TLB_H
3 #define __UM_TLB_H
5 #include <linux/pagemap.h>
6 #include <linux/swap.h>
7 #include <asm/percpu.h>
8 #include <asm/pgalloc.h>
9 #include <asm/tlbflush.h>
11 #define tlb_start_vma(tlb, vma) do { } while (0)
12 #define tlb_end_vma(tlb, vma) do { } while (0)
13 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
15 /* struct mmu_gather is an opaque type used by the mm code for passing around
16 * any data needed by arch specific code for tlb_remove_page.
18 struct mmu_gather {
19 struct mm_struct *mm;
20 unsigned int need_flush; /* Really unmapped some ptes? */
21 unsigned long start;
22 unsigned long end;
23 unsigned int fullmm; /* non-zero means full mm flush */
26 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
27 unsigned long address)
29 if (tlb->start > address)
30 tlb->start = address;
31 if (tlb->end < address + PAGE_SIZE)
32 tlb->end = address + PAGE_SIZE;
35 static inline void init_tlb_gather(struct mmu_gather *tlb)
37 tlb->need_flush = 0;
39 tlb->start = TASK_SIZE;
40 tlb->end = 0;
42 if (tlb->fullmm) {
43 tlb->start = 0;
44 tlb->end = TASK_SIZE;
48 static inline void
49 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
50 unsigned long start, unsigned long end)
52 tlb->mm = mm;
53 tlb->start = start;
54 tlb->end = end;
55 tlb->fullmm = !(start | (end+1));
57 init_tlb_gather(tlb);
60 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
61 unsigned long end);
63 static inline void
64 tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
66 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
69 static inline void
70 tlb_flush_mmu_free(struct mmu_gather *tlb)
72 init_tlb_gather(tlb);
75 static inline void
76 tlb_flush_mmu(struct mmu_gather *tlb)
78 if (!tlb->need_flush)
79 return;
81 tlb_flush_mmu_tlbonly(tlb);
82 tlb_flush_mmu_free(tlb);
85 /* arch_tlb_finish_mmu
86 * Called at the end of the shootdown operation to free up any resources
87 * that were required.
89 static inline void
90 arch_tlb_finish_mmu(struct mmu_gather *tlb,
91 unsigned long start, unsigned long end, bool force)
93 if (force) {
94 tlb->start = start;
95 tlb->end = end;
96 tlb->need_flush = 1;
98 tlb_flush_mmu(tlb);
100 /* keep the page table cache within bounds */
101 check_pgt_cache();
104 /* tlb_remove_page
105 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
106 * while handling the additional races in SMP caused by other CPUs
107 * caching valid mappings in their TLBs.
109 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
111 tlb->need_flush = 1;
112 free_page_and_swap_cache(page);
113 return false; /* avoid calling tlb_flush_mmu */
116 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
118 __tlb_remove_page(tlb, page);
121 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
122 struct page *page, int page_size)
124 return __tlb_remove_page(tlb, page);
127 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
128 struct page *page, int page_size)
130 return tlb_remove_page(tlb, page);
134 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
136 * Record the fact that pte's were really umapped in ->need_flush, so we can
137 * later optimise away the tlb invalidate. This helps when userspace is
138 * unmapping already-unmapped pages, which happens quite a lot.
140 #define tlb_remove_tlb_entry(tlb, ptep, address) \
141 do { \
142 tlb->need_flush = 1; \
143 __tlb_remove_tlb_entry(tlb, ptep, address); \
144 } while (0)
146 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
147 tlb_remove_tlb_entry(tlb, ptep, address)
149 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
150 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
151 unsigned int page_size)
155 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
157 #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
159 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
161 #define tlb_migrate_finish(mm) do {} while (0)
163 #endif