1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
5 #define MMU_NO_CONTEXT (0)
7 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
9 void hash__flush_tlb_mm(struct mm_struct
*mm
);
10 void hash__flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
11 void hash__flush_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
14 void _tlbie(unsigned long address
);
16 static inline void _tlbie(unsigned long address
)
18 asm volatile ("tlbie %0; sync" : : "r" (address
) : "memory");
24 * Called at the end of a mmu_gather operation to make sure the
25 * TLB flush is completely done.
27 static inline void tlb_flush(struct mmu_gather
*tlb
)
29 /* 603 needs to flush the whole TLB here since it doesn't use a hash table. */
30 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE
))
34 static inline void flush_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
37 if (mmu_has_feature(MMU_FTR_HPTE_TABLE
))
38 hash__flush_range(mm
, start
, end
);
39 else if (end
- start
<= PAGE_SIZE
)
45 static inline void flush_tlb_mm(struct mm_struct
*mm
)
47 if (mmu_has_feature(MMU_FTR_HPTE_TABLE
))
48 hash__flush_tlb_mm(mm
);
53 static inline void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
55 if (mmu_has_feature(MMU_FTR_HPTE_TABLE
))
56 hash__flush_tlb_page(vma
, vmaddr
);
62 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
64 flush_range(vma
->vm_mm
, start
, end
);
67 static inline void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
69 flush_range(&init_mm
, start
, end
);
72 static inline void local_flush_tlb_page(struct vm_area_struct
*vma
,
75 flush_tlb_page(vma
, vmaddr
);
77 static inline void local_flush_tlb_mm(struct mm_struct
*mm
)
82 #endif /* _ASM_POWERPC_TLBFLUSH_H */