mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / m32r / include / asm / tlbflush.h
blobf6c7237316d0ef07d526df9c817116b396e331d4
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_M32R_TLBFLUSH_H
3 #define _ASM_M32R_TLBFLUSH_H
5 #include <asm/m32r.h>
7 /*
8 * TLB flushing:
10 * - flush_tlb() flushes the current mm struct TLBs
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
18 extern void local_flush_tlb_all(void);
19 extern void local_flush_tlb_mm(struct mm_struct *);
20 extern void local_flush_tlb_page(struct vm_area_struct *, unsigned long);
21 extern void local_flush_tlb_range(struct vm_area_struct *, unsigned long,
22 unsigned long);
24 #ifndef CONFIG_SMP
25 #ifdef CONFIG_MMU
26 #define flush_tlb_all() local_flush_tlb_all()
27 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
28 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
29 #define flush_tlb_range(vma, start, end) \
30 local_flush_tlb_range(vma, start, end)
31 #define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
32 #else /* CONFIG_MMU */
33 #define flush_tlb_all() do { } while (0)
34 #define flush_tlb_mm(mm) do { } while (0)
35 #define flush_tlb_page(vma, vmaddr) do { } while (0)
36 #define flush_tlb_range(vma, start, end) do { } while (0)
37 #endif /* CONFIG_MMU */
38 #else /* CONFIG_SMP */
39 extern void smp_flush_tlb_all(void);
40 extern void smp_flush_tlb_mm(struct mm_struct *);
41 extern void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
42 extern void smp_flush_tlb_range(struct vm_area_struct *, unsigned long,
43 unsigned long);
45 #define flush_tlb_all() smp_flush_tlb_all()
46 #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
47 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page)
48 #define flush_tlb_range(vma, start, end) \
49 smp_flush_tlb_range(vma, start, end)
50 #define flush_tlb_kernel_range(start, end) smp_flush_tlb_all()
51 #endif /* CONFIG_SMP */
53 static __inline__ void __flush_tlb_page(unsigned long page)
55 unsigned int tmpreg0, tmpreg1, tmpreg2;
57 __asm__ __volatile__ (
58 "seth %0, #high(%4) \n\t"
59 "st %3, @(%5, %0) \n\t"
60 "ldi %1, #1 \n\t"
61 "st %1, @(%6, %0) \n\t"
62 "add3 %1, %0, %7 \n\t"
63 ".fillinsn \n"
64 "1: \n\t"
65 "ld %2, @(%6, %0) \n\t"
66 "bnez %2, 1b \n\t"
67 "ld %0, @%1+ \n\t"
68 "ld %1, @%1 \n\t"
69 "st %2, @+%0 \n\t"
70 "st %2, @+%1 \n\t"
71 : "=&r" (tmpreg0), "=&r" (tmpreg1), "=&r" (tmpreg2)
72 : "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset),
73 "i" (MTOP_offset), "i" (MIDXI_offset)
74 : "memory"
78 static __inline__ void __flush_tlb_all(void)
80 unsigned int tmpreg0, tmpreg1;
82 __asm__ __volatile__ (
83 "seth %0, #high(%2) \n\t"
84 "or3 %0, %0, #low(%2) \n\t"
85 "ldi %1, #0xc \n\t"
86 "st %1, @%0 \n\t"
87 ".fillinsn \n"
88 "1: \n\t"
89 "ld %1, @%0 \n\t"
90 "bnez %1, 1b \n\t"
91 : "=&r" (tmpreg0), "=&r" (tmpreg1)
92 : "i" (MTOP) : "memory"
96 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
98 #endif /* _ASM_M32R_TLBFLUSH_H */