[PATCH] x86_64: CPU hotplug sibling map cleanup
[linux/fpc-iii.git] / include / asm-m32r / tlbflush.h
blobbc7c407dbd927025232fec33aa8dc8b96f7bd304
1 #ifndef _ASM_M32R_TLBFLUSH_H
2 #define _ASM_M32R_TLBFLUSH_H
4 #include <linux/config.h>
5 #include <asm/m32r.h>
7 /*
8 * TLB flushing:
10 * - flush_tlb() flushes the current mm struct TLBs
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
19 extern void local_flush_tlb_all(void);
20 extern void local_flush_tlb_mm(struct mm_struct *);
21 extern void local_flush_tlb_page(struct vm_area_struct *, unsigned long);
22 extern void local_flush_tlb_range(struct vm_area_struct *, unsigned long,
23 unsigned long);
25 #ifndef CONFIG_SMP
26 #ifdef CONFIG_MMU
27 #define flush_tlb_all() local_flush_tlb_all()
28 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
29 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
30 #define flush_tlb_range(vma, start, end) \
31 local_flush_tlb_range(vma, start, end)
32 #define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
33 #else /* CONFIG_MMU */
34 #define flush_tlb_all() do { } while (0)
35 #define flush_tlb_mm(mm) do { } while (0)
36 #define flush_tlb_page(vma, vmaddr) do { } while (0)
37 #define flush_tlb_range(vma, start, end) do { } while (0)
38 #endif /* CONFIG_MMU */
39 #else /* CONFIG_SMP */
40 extern void smp_flush_tlb_all(void);
41 extern void smp_flush_tlb_mm(struct mm_struct *);
42 extern void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
43 extern void smp_flush_tlb_range(struct vm_area_struct *, unsigned long,
44 unsigned long);
46 #define flush_tlb_all() smp_flush_tlb_all()
47 #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
48 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page)
49 #define flush_tlb_range(vma, start, end) \
50 smp_flush_tlb_range(vma, start, end)
51 #define flush_tlb_kernel_range(start, end) smp_flush_tlb_all()
52 #endif /* CONFIG_SMP */
54 static __inline__ void __flush_tlb_page(unsigned long page)
56 unsigned int tmpreg0, tmpreg1, tmpreg2;
58 __asm__ __volatile__ (
59 "seth %0, #high(%4) \n\t"
60 "st %3, @(%5, %0) \n\t"
61 "ldi %1, #1 \n\t"
62 "st %1, @(%6, %0) \n\t"
63 "add3 %1, %0, %7 \n\t"
64 ".fillinsn \n"
65 "1: \n\t"
66 "ld %2, @(%6, %0) \n\t"
67 "bnez %2, 1b \n\t"
68 "ld %0, @%1+ \n\t"
69 "ld %1, @%1 \n\t"
70 "st %2, @+%0 \n\t"
71 "st %2, @+%1 \n\t"
72 : "=&r" (tmpreg0), "=&r" (tmpreg1), "=&r" (tmpreg2)
73 : "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset),
74 "i" (MTOP_offset), "i" (MIDXI_offset)
75 : "memory"
79 static __inline__ void __flush_tlb_all(void)
81 unsigned int tmpreg0, tmpreg1;
83 __asm__ __volatile__ (
84 "seth %0, #high(%2) \n\t"
85 "or3 %0, %0, #low(%2) \n\t"
86 "ldi %1, #0xc \n\t"
87 "st %1, @%0 \n\t"
88 ".fillinsn \n"
89 "1: \n\t"
90 "ld %1, @%0 \n\t"
91 "bnez %1, 1b \n\t"
92 : "=&r" (tmpreg0), "=&r" (tmpreg1)
93 : "i" (MTOP) : "memory"
97 #define flush_tlb_pgtables(mm, start, end) do { } while (0)
99 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
101 #endif /* _ASM_M32R_TLBFLUSH_H */