2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2001 - 2013 Tensilica Inc.
9 #ifndef _XTENSA_TLBFLUSH_H
10 #define _XTENSA_TLBFLUSH_H
12 #include <linux/stringify.h>
13 #include <asm/processor.h>
15 #define DTLB_WAY_PGD 7
17 #define ITLB_ARF_WAYS 4
18 #define DTLB_ARF_WAYS 4
20 #define ITLB_HIT_BIT 3
21 #define DTLB_HIT_BIT 4
27 * - flush_tlb_all() flushes all processes TLB entries
28 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
29 * - flush_tlb_page(mm, vmaddr) flushes a single page
30 * - flush_tlb_range(mm, start, end) flushes a range of pages
33 void local_flush_tlb_all(void);
34 void local_flush_tlb_mm(struct mm_struct
*mm
);
35 void local_flush_tlb_page(struct vm_area_struct
*vma
,
37 void local_flush_tlb_range(struct vm_area_struct
*vma
,
38 unsigned long start
, unsigned long end
);
39 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
43 void flush_tlb_all(void);
44 void flush_tlb_mm(struct mm_struct
*);
45 void flush_tlb_page(struct vm_area_struct
*, unsigned long);
46 void flush_tlb_range(struct vm_area_struct
*, unsigned long,
48 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
50 #else /* !CONFIG_SMP */
52 #define flush_tlb_all() local_flush_tlb_all()
53 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
54 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
55 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
57 #define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
60 #endif /* CONFIG_SMP */
64 static inline unsigned long itlb_probe(unsigned long addr
)
67 __asm__
__volatile__("pitlb %0, %1\n\t" : "=a" (tmp
) : "a" (addr
));
71 static inline unsigned long dtlb_probe(unsigned long addr
)
74 __asm__
__volatile__("pdtlb %0, %1\n\t" : "=a" (tmp
) : "a" (addr
));
78 static inline void invalidate_itlb_entry (unsigned long probe
)
80 __asm__
__volatile__("iitlb %0; isync\n\t" : : "a" (probe
));
83 static inline void invalidate_dtlb_entry (unsigned long probe
)
85 __asm__
__volatile__("idtlb %0; dsync\n\t" : : "a" (probe
));
88 /* Use the .._no_isync functions with caution. Generally, these are
89 * handy for bulk invalidates followed by a single 'isync'. The
90 * caller must follow up with an 'isync', which can be relatively
91 * expensive on some Xtensa implementations.
93 static inline void invalidate_itlb_entry_no_isync (unsigned entry
)
95 /* Caller must follow up with 'isync'. */
96 __asm__
__volatile__ ("iitlb %0\n" : : "a" (entry
) );
99 static inline void invalidate_dtlb_entry_no_isync (unsigned entry
)
101 /* Caller must follow up with 'isync'. */
102 __asm__
__volatile__ ("idtlb %0\n" : : "a" (entry
) );
105 static inline void set_itlbcfg_register (unsigned long val
)
107 __asm__
__volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
111 static inline void set_dtlbcfg_register (unsigned long val
)
113 __asm__
__volatile__("wsr %0, dtlbcfg; dsync\n\t"
117 static inline void set_ptevaddr_register (unsigned long val
)
119 __asm__
__volatile__(" wsr %0, ptevaddr; isync\n"
123 static inline unsigned long read_ptevaddr_register (void)
126 __asm__
__volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp
));
130 static inline void write_dtlb_entry (pte_t entry
, int way
)
132 __asm__
__volatile__("wdtlb %1, %0; dsync\n\t"
133 : : "r" (way
), "r" (entry
) );
136 static inline void write_itlb_entry (pte_t entry
, int way
)
138 __asm__
__volatile__("witlb %1, %0; isync\n\t"
139 : : "r" (way
), "r" (entry
) );
142 static inline void invalidate_page_directory (void)
144 invalidate_dtlb_entry (DTLB_WAY_PGD
);
145 invalidate_dtlb_entry (DTLB_WAY_PGD
+1);
146 invalidate_dtlb_entry (DTLB_WAY_PGD
+2);
149 static inline void invalidate_itlb_mapping (unsigned address
)
151 unsigned long tlb_entry
;
152 if (((tlb_entry
= itlb_probe(address
)) & (1 << ITLB_HIT_BIT
)) != 0)
153 invalidate_itlb_entry(tlb_entry
);
156 static inline void invalidate_dtlb_mapping (unsigned address
)
158 unsigned long tlb_entry
;
159 if (((tlb_entry
= dtlb_probe(address
)) & (1 << DTLB_HIT_BIT
)) != 0)
160 invalidate_dtlb_entry(tlb_entry
);
164 * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
165 * ISA and exist only for test purposes..
166 * You may find it helpful for MMU debugging, however.
168 * 'at' is the unmodified input register
169 * 'as' is the output register, as follows (specific to the Linux config):
171 * as[31..12] contain the virtual address
172 * as[11..08] are meaningless
173 * as[07..00] contain the asid
176 static inline unsigned long read_dtlb_virtual (int way
)
179 __asm__
__volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp
), "+a" (way
));
183 static inline unsigned long read_dtlb_translation (int way
)
186 __asm__
__volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp
), "+a" (way
));
190 static inline unsigned long read_itlb_virtual (int way
)
193 __asm__
__volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp
), "+a" (way
));
197 static inline unsigned long read_itlb_translation (int way
)
200 __asm__
__volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp
), "+a" (way
));
204 #endif /* __ASSEMBLY__ */
205 #endif /* _XTENSA_TLBFLUSH_H */