Linux 4.1.18
[linux/fpc-iii.git] / arch / alpha / include / asm / tlbflush.h
blobe89e0c2e15b17f7eda39cd753b5e4f918a0fa902
1 #ifndef _ALPHA_TLBFLUSH_H
2 #define _ALPHA_TLBFLUSH_H
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <asm/compiler.h>
7 #include <asm/pgalloc.h>
9 #ifndef __EXTERN_INLINE
10 #define __EXTERN_INLINE extern inline
11 #define __MMU_EXTERN_INLINE
12 #endif
14 extern void __load_new_mm_context(struct mm_struct *);
17 /* Use a few helper functions to hide the ugly broken ASN
18 numbers on early Alphas (ev4 and ev45). */
20 __EXTERN_INLINE void
21 ev4_flush_tlb_current(struct mm_struct *mm)
23 __load_new_mm_context(mm);
24 tbiap();
27 __EXTERN_INLINE void
28 ev5_flush_tlb_current(struct mm_struct *mm)
30 __load_new_mm_context(mm);
33 /* Flush just one page in the current TLB set. We need to be very
34 careful about the icache here, there is no way to invalidate a
35 specific icache page. */
37 __EXTERN_INLINE void
38 ev4_flush_tlb_current_page(struct mm_struct * mm,
39 struct vm_area_struct *vma,
40 unsigned long addr)
42 int tbi_flag = 2;
43 if (vma->vm_flags & VM_EXEC) {
44 __load_new_mm_context(mm);
45 tbi_flag = 3;
47 tbi(tbi_flag, addr);
50 __EXTERN_INLINE void
51 ev5_flush_tlb_current_page(struct mm_struct * mm,
52 struct vm_area_struct *vma,
53 unsigned long addr)
55 if (vma->vm_flags & VM_EXEC)
56 __load_new_mm_context(mm);
57 else
58 tbi(2, addr);
62 #ifdef CONFIG_ALPHA_GENERIC
63 # define flush_tlb_current alpha_mv.mv_flush_tlb_current
64 # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
65 #else
66 # ifdef CONFIG_ALPHA_EV4
67 # define flush_tlb_current ev4_flush_tlb_current
68 # define flush_tlb_current_page ev4_flush_tlb_current_page
69 # else
70 # define flush_tlb_current ev5_flush_tlb_current
71 # define flush_tlb_current_page ev5_flush_tlb_current_page
72 # endif
73 #endif
75 #ifdef __MMU_EXTERN_INLINE
76 #undef __EXTERN_INLINE
77 #undef __MMU_EXTERN_INLINE
78 #endif
80 /* Flush current user mapping. */
81 static inline void
82 flush_tlb(void)
84 flush_tlb_current(current->active_mm);
87 /* Flush someone else's user mapping. */
88 static inline void
89 flush_tlb_other(struct mm_struct *mm)
91 unsigned long *mmc = &mm->context[smp_processor_id()];
92 /* Check it's not zero first to avoid cacheline ping pong
93 when possible. */
94 if (*mmc) *mmc = 0;
97 #ifndef CONFIG_SMP
98 /* Flush everything (kernel mapping may also have changed
99 due to vmalloc/vfree). */
100 static inline void flush_tlb_all(void)
102 tbia();
105 /* Flush a specified user mapping. */
106 static inline void
107 flush_tlb_mm(struct mm_struct *mm)
109 if (mm == current->active_mm)
110 flush_tlb_current(mm);
111 else
112 flush_tlb_other(mm);
115 /* Page-granular tlb flush. */
116 static inline void
117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
119 struct mm_struct *mm = vma->vm_mm;
121 if (mm == current->active_mm)
122 flush_tlb_current_page(mm, vma, addr);
123 else
124 flush_tlb_other(mm);
127 /* Flush a specified range of user mapping. On the Alpha we flush
128 the whole user tlb. */
129 static inline void
130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
131 unsigned long end)
133 flush_tlb_mm(vma->vm_mm);
136 #else /* CONFIG_SMP */
138 extern void flush_tlb_all(void);
139 extern void flush_tlb_mm(struct mm_struct *);
140 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
141 extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
142 unsigned long);
144 #endif /* CONFIG_SMP */
146 static inline void flush_tlb_kernel_range(unsigned long start,
147 unsigned long end)
149 flush_tlb_all();
152 #endif /* _ALPHA_TLBFLUSH_H */