1 #ifndef _SPARC64_TLBFLUSH_H
2 #define _SPARC64_TLBFLUSH_H
4 #include <asm/mmu_context.h>
6 /* TSB flush operations. */
8 #define TLB_BATCH_NR 192
15 unsigned long vaddrs
[TLB_BATCH_NR
];
18 void flush_tsb_kernel_range(unsigned long start
, unsigned long end
);
19 void flush_tsb_user(struct tlb_batch
*tb
);
20 void flush_tsb_user_page(struct mm_struct
*mm
, unsigned long vaddr
, bool huge
);
22 /* TLB flush operations. */
24 static inline void flush_tlb_mm(struct mm_struct
*mm
)
28 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
33 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
34 unsigned long start
, unsigned long end
)
38 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
40 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
42 void flush_tlb_pending(void);
43 void arch_enter_lazy_mmu_mode(void);
44 void arch_leave_lazy_mmu_mode(void);
45 #define arch_flush_lazy_mmu_mode() do {} while (0)
48 void __flush_tlb_all(void);
49 void __flush_tlb_page(unsigned long context
, unsigned long vaddr
);
50 void __flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
54 static inline void global_flush_tlb_page(struct mm_struct
*mm
, unsigned long vaddr
)
56 __flush_tlb_page(CTX_HWBITS(mm
->context
), vaddr
);
59 #else /* CONFIG_SMP */
61 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
62 void smp_flush_tlb_page(struct mm_struct
*mm
, unsigned long vaddr
);
64 #define global_flush_tlb_page(mm, vaddr) \
65 smp_flush_tlb_page(mm, vaddr)
67 #endif /* ! CONFIG_SMP */
69 #endif /* _SPARC64_TLBFLUSH_H */