1 #ifndef _ALPHA_TLBFLUSH_H
2 #define _ALPHA_TLBFLUSH_H
5 #include <linux/sched.h>
6 #include <asm/compiler.h>
7 #include <asm/pgalloc.h>
9 #ifndef __EXTERN_INLINE
10 #define __EXTERN_INLINE extern inline
11 #define __MMU_EXTERN_INLINE
14 extern void __load_new_mm_context(struct mm_struct
*);
17 /* Use a few helper functions to hide the ugly broken ASN
18 numbers on early Alphas (ev4 and ev45). */
21 ev4_flush_tlb_current(struct mm_struct
*mm
)
23 __load_new_mm_context(mm
);
28 ev5_flush_tlb_current(struct mm_struct
*mm
)
30 __load_new_mm_context(mm
);
33 /* Flush just one page in the current TLB set. We need to be very
34 careful about the icache here, there is no way to invalidate a
35 specific icache page. */
38 ev4_flush_tlb_current_page(struct mm_struct
* mm
,
39 struct vm_area_struct
*vma
,
43 if (vma
->vm_flags
& VM_EXEC
) {
44 __load_new_mm_context(mm
);
51 ev5_flush_tlb_current_page(struct mm_struct
* mm
,
52 struct vm_area_struct
*vma
,
55 if (vma
->vm_flags
& VM_EXEC
)
56 __load_new_mm_context(mm
);
62 #ifdef CONFIG_ALPHA_GENERIC
63 # define flush_tlb_current alpha_mv.mv_flush_tlb_current
64 # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
66 # ifdef CONFIG_ALPHA_EV4
67 # define flush_tlb_current ev4_flush_tlb_current
68 # define flush_tlb_current_page ev4_flush_tlb_current_page
70 # define flush_tlb_current ev5_flush_tlb_current
71 # define flush_tlb_current_page ev5_flush_tlb_current_page
75 #ifdef __MMU_EXTERN_INLINE
76 #undef __EXTERN_INLINE
77 #undef __MMU_EXTERN_INLINE
80 /* Flush current user mapping. */
84 flush_tlb_current(current
->active_mm
);
87 /* Flush someone else's user mapping. */
89 flush_tlb_other(struct mm_struct
*mm
)
91 unsigned long *mmc
= &mm
->context
[smp_processor_id()];
92 /* Check it's not zero first to avoid cacheline ping pong
98 /* Flush everything (kernel mapping may also have changed
99 due to vmalloc/vfree). */
100 static inline void flush_tlb_all(void)
105 /* Flush a specified user mapping. */
107 flush_tlb_mm(struct mm_struct
*mm
)
109 if (mm
== current
->active_mm
)
110 flush_tlb_current(mm
);
115 /* Page-granular tlb flush. */
117 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
119 struct mm_struct
*mm
= vma
->vm_mm
;
121 if (mm
== current
->active_mm
)
122 flush_tlb_current_page(mm
, vma
, addr
);
127 /* Flush a specified range of user mapping. On the Alpha we flush
128 the whole user tlb. */
130 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
133 flush_tlb_mm(vma
->vm_mm
);
136 #else /* CONFIG_SMP */
138 extern void flush_tlb_all(void);
139 extern void flush_tlb_mm(struct mm_struct
*);
140 extern void flush_tlb_page(struct vm_area_struct
*, unsigned long);
141 extern void flush_tlb_range(struct vm_area_struct
*, unsigned long,
144 #endif /* CONFIG_SMP */
146 static inline void flush_tlb_kernel_range(unsigned long start
,
152 #endif /* _ALPHA_TLBFLUSH_H */