fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-sh64 / mmu_context.h
blob507bf72bb8e10b13f8bbf0dd82069fa297124f59
1 #ifndef __ASM_SH64_MMU_CONTEXT_H
2 #define __ASM_SH64_MMU_CONTEXT_H
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
9 * include/asm-sh64/mmu_context.h
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
14 * ASID handling idea taken from MIPS implementation.
18 #ifndef __ASSEMBLY__
21 * Cache of MMU context last used.
23 * The MMU "context" consists of two things:
24 * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache)
25 * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache)
27 extern unsigned long mmu_context_cache;
29 #include <asm/page.h>
30 #include <asm-generic/mm_hooks.h>
32 /* Current mm's pgd */
33 extern pgd_t *mmu_pdtp_cache;
35 #define SR_ASID_MASK 0xffffffffff00ffffULL
36 #define SR_ASID_SHIFT 16
38 #define MMU_CONTEXT_ASID_MASK 0x000000ff
39 #define MMU_CONTEXT_VERSION_MASK 0xffffff00
40 #define MMU_CONTEXT_FIRST_VERSION 0x00000100
41 #define NO_CONTEXT 0
43 /* ASID is 8-bit value, so it can't be 0x100 */
44 #define MMU_NO_ASID 0x100
48 * Virtual Page Number mask
50 #define MMU_VPN_MASK 0xfffff000
52 static inline void
53 get_new_mmu_context(struct mm_struct *mm)
55 extern void flush_tlb_all(void);
56 extern void flush_cache_all(void);
58 unsigned long mc = ++mmu_context_cache;
60 if (!(mc & MMU_CONTEXT_ASID_MASK)) {
61 /* We exhaust ASID of this version.
62 Flush all TLB and start new cycle. */
63 flush_tlb_all();
64 /* We have to flush all caches as ASIDs are
65 used in cache */
66 flush_cache_all();
67 /* Fix version if needed.
68 Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */
69 if (!mc)
70 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
72 mm->context = mc;
76 * Get MMU context if needed.
78 static __inline__ void
79 get_mmu_context(struct mm_struct *mm)
81 if (mm) {
82 unsigned long mc = mmu_context_cache;
83 /* Check if we have old version of context.
84 If it's old, we need to get new context with new version. */
85 if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK)
86 get_new_mmu_context(mm);
91 * Initialize the context related info for a new mm_struct
92 * instance.
94 static inline int init_new_context(struct task_struct *tsk,
95 struct mm_struct *mm)
97 mm->context = NO_CONTEXT;
99 return 0;
103 * Destroy context related info for an mm_struct that is about
104 * to be put to rest.
106 static inline void destroy_context(struct mm_struct *mm)
108 extern void flush_tlb_mm(struct mm_struct *mm);
110 /* Well, at least free TLB entries */
111 flush_tlb_mm(mm);
114 #endif /* __ASSEMBLY__ */
116 /* Common defines */
117 #define TLB_STEP 0x00000010
118 #define TLB_PTEH 0x00000000
119 #define TLB_PTEL 0x00000008
121 /* PTEH defines */
122 #define PTEH_ASID_SHIFT 2
123 #define PTEH_VALID 0x0000000000000001
124 #define PTEH_SHARED 0x0000000000000002
125 #define PTEH_MATCH_ASID 0x00000000000003ff
127 #ifndef __ASSEMBLY__
128 /* This has to be a common function because the next location to fill
129 * information is shared. */
130 extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
132 /* Profiling counter. */
133 #ifdef CONFIG_SH64_PROC_TLB
134 extern unsigned long long calls_to_do_fast_page_fault;
135 #endif
137 static inline unsigned long get_asid(void)
139 unsigned long long sr;
141 asm volatile ("getcon " __SR ", %0\n\t"
142 : "=r" (sr));
144 sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
145 return (unsigned long) sr;
148 /* Set ASID into SR */
149 static inline void set_asid(unsigned long asid)
151 unsigned long long sr, pc;
153 asm volatile ("getcon " __SR ", %0" : "=r" (sr));
155 sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
158 * It is possible that this function may be inlined and so to avoid
159 * the assembler reporting duplicate symbols we make use of the gas trick
160 * of generating symbols using numerics and forward reference.
162 asm volatile ("movi 1, %1\n\t"
163 "shlli %1, 28, %1\n\t"
164 "or %0, %1, %1\n\t"
165 "putcon %1, " __SR "\n\t"
166 "putcon %0, " __SSR "\n\t"
167 "movi 1f, %1\n\t"
168 "ori %1, 1 , %1\n\t"
169 "putcon %1, " __SPC "\n\t"
170 "rte\n"
171 "1:\n\t"
172 : "=r" (sr), "=r" (pc) : "0" (sr));
176 * After we have set current->mm to a new value, this activates
177 * the context for the new mm so we see the new mappings.
179 static __inline__ void activate_context(struct mm_struct *mm)
181 get_mmu_context(mm);
182 set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
186 static __inline__ void switch_mm(struct mm_struct *prev,
187 struct mm_struct *next,
188 struct task_struct *tsk)
190 if (prev != next) {
191 mmu_pdtp_cache = next->pgd;
192 activate_context(next);
196 #define deactivate_mm(tsk,mm) do { } while (0)
198 #define activate_mm(prev, next) \
199 switch_mm((prev),(next),NULL)
201 static inline void
202 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
206 #endif /* __ASSEMBLY__ */
208 #endif /* __ASM_SH64_MMU_CONTEXT_H */