1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_MMU_CONTEXT_H
3 #define __ALPHA_MMU_CONTEXT_H
6 * get a new mmu context..
8 * Copyright (C) 1996, Linus Torvalds
11 #include <linux/mm_types.h>
12 #include <linux/sched.h>
14 #include <asm/machvec.h>
15 #include <asm/compiler.h>
16 #include <asm-generic/mm_hooks.h>
19 * Force a context reload. This is needed when we change the page
20 * table pointer or when we update the ASN of the current process.
23 /* Don't get into trouble with dueling __EXTERN_INLINEs. */
24 #ifndef __EXTERN_INLINE
29 static inline unsigned long
30 __reload_thread(struct pcb_struct
*pcb
)
32 register unsigned long a0
__asm__("$16");
33 register unsigned long v0
__asm__("$0");
35 a0
= virt_to_phys(pcb
);
37 "call_pal %2 #__reload_thread"
39 : "i"(PAL_swpctx
), "r"(a0
)
40 : "$1", "$22", "$23", "$24", "$25");
47 * The maximum ASN's the processor supports. On the EV4 this is 63
48 * but the PAL-code doesn't actually use this information. On the
49 * EV5 this is 127, and EV6 has 255.
51 * On the EV4, the ASNs are more-or-less useless anyway, as they are
52 * only used as an icache tag, not for TB entries. On the EV5 and EV6,
53 * ASN's also validate the TB entries, and thus make a lot more sense.
55 * The EV4 ASN's don't even match the architecture manual, ugh. And
56 * I quote: "If a processor implements address space numbers (ASNs),
57 * and the old PTE has the Address Space Match (ASM) bit clear (ASNs
58 * in use) and the Valid bit set, then entries can also effectively be
59 * made coherent by assigning a new, unused ASN to the currently
60 * running process and not reusing the previous ASN before calling the
61 * appropriate PALcode routine to invalidate the translation buffer (TB)".
63 * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
64 * work correctly and can thus not be used (explaining the lack of PAL-code
67 #define EV4_MAX_ASN 63
68 #define EV5_MAX_ASN 127
69 #define EV6_MAX_ASN 255
71 #ifdef CONFIG_ALPHA_GENERIC
72 # define MAX_ASN (alpha_mv.max_asn)
74 # ifdef CONFIG_ALPHA_EV4
75 # define MAX_ASN EV4_MAX_ASN
76 # elif defined(CONFIG_ALPHA_EV5)
77 # define MAX_ASN EV5_MAX_ASN
79 # define MAX_ASN EV6_MAX_ASN
84 * cpu_last_asn(processor):
86 * +-------------+----------------+--------------+
87 * | asn version | this processor | hardware asn |
88 * +-------------+----------------+--------------+
93 #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
95 extern unsigned long last_asn
;
96 #define cpu_last_asn(cpuid) last_asn
97 #endif /* CONFIG_SMP */
99 #define WIDTH_HARDWARE_ASN 8
100 #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN)
101 #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
104 * NOTE! The way this is set up, the high bits of the "asn_cache" (and
105 * the "mm->context") are the ASN _version_ code. A version of 0 is
106 * always considered invalid, so to invalidate another process you only
107 * need to do "p->mm->context = 0".
109 * If we need more ASN's than the processor has, we invalidate the old
110 * user TLB's (tbiap()) and start a new ASN version. That will automatically
111 * force a new asn for any other processes the next time they want to
115 #ifndef __EXTERN_INLINE
116 #define __EXTERN_INLINE extern inline
117 #define __MMU_EXTERN_INLINE
120 extern inline unsigned long
121 __get_new_mm_context(struct mm_struct
*mm
, long cpu
)
123 unsigned long asn
= cpu_last_asn(cpu
);
124 unsigned long next
= asn
+ 1;
126 if ((asn
& HARDWARE_ASN_MASK
) >= MAX_ASN
) {
129 next
= (asn
& ~HARDWARE_ASN_MASK
) + ASN_FIRST_VERSION
;
131 cpu_last_asn(cpu
) = next
;
136 ev5_switch_mm(struct mm_struct
*prev_mm
, struct mm_struct
*next_mm
,
137 struct task_struct
*next
)
139 /* Check if our ASN is of an older version, and thus invalid. */
142 long cpu
= smp_processor_id();
145 cpu_data
[cpu
].asn_lock
= 1;
148 asn
= cpu_last_asn(cpu
);
149 mmc
= next_mm
->context
[cpu
];
150 if ((mmc
^ asn
) & ~HARDWARE_ASN_MASK
) {
151 mmc
= __get_new_mm_context(next_mm
, cpu
);
152 next_mm
->context
[cpu
] = mmc
;
156 cpu_data
[cpu
].need_new_asn
= 1;
159 /* Always update the PCB ASN. Another thread may have allocated
160 a new mm->context (via flush_tlb_mm) without the ASN serial
161 number wrapping. We have no way to detect when this is needed. */
162 task_thread_info(next
)->pcb
.asn
= mmc
& HARDWARE_ASN_MASK
;
166 ev4_switch_mm(struct mm_struct
*prev_mm
, struct mm_struct
*next_mm
,
167 struct task_struct
*next
)
169 /* As described, ASN's are broken for TLB usage. But we can
170 optimize for switching between threads -- if the mm is
171 unchanged from current we needn't flush. */
172 /* ??? May not be needed because EV4 PALcode recognizes that
173 ASN's are broken and does a tbiap itself on swpctx, under
174 the "Must set ASN or flush" rule. At least this is true
175 for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
176 I'm going to leave this here anyway, just to Be Sure. -- r~ */
177 if (prev_mm
!= next_mm
)
180 /* Do continue to allocate ASNs, because we can still use them
181 to avoid flushing the icache. */
182 ev5_switch_mm(prev_mm
, next_mm
, next
);
185 extern void __load_new_mm_context(struct mm_struct
*);
188 #define check_mmu_context() \
190 int cpu = smp_processor_id(); \
191 cpu_data[cpu].asn_lock = 0; \
193 if (cpu_data[cpu].need_new_asn) { \
194 struct mm_struct * mm = current->active_mm; \
195 cpu_data[cpu].need_new_asn = 0; \
196 if (!mm->context[cpu]) \
197 __load_new_mm_context(mm); \
201 #define check_mmu_context() do { } while(0)
205 ev5_activate_mm(struct mm_struct
*prev_mm
, struct mm_struct
*next_mm
)
207 __load_new_mm_context(next_mm
);
211 ev4_activate_mm(struct mm_struct
*prev_mm
, struct mm_struct
*next_mm
)
213 __load_new_mm_context(next_mm
);
217 #define deactivate_mm(tsk,mm) do { } while (0)
219 #ifdef CONFIG_ALPHA_GENERIC
220 # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
221 # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
223 # ifdef CONFIG_ALPHA_EV4
224 # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
225 # define activate_mm(x,y) ev4_activate_mm((x),(y))
227 # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
228 # define activate_mm(x,y) ev5_activate_mm((x),(y))
233 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
237 for_each_online_cpu(i
)
240 task_thread_info(tsk
)->pcb
.ptbr
241 = ((unsigned long)mm
->pgd
- IDENT_ADDR
) >> PAGE_SHIFT
;
246 destroy_context(struct mm_struct
*mm
)
252 enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
254 task_thread_info(tsk
)->pcb
.ptbr
255 = ((unsigned long)mm
->pgd
- IDENT_ADDR
) >> PAGE_SHIFT
;
258 #ifdef __MMU_EXTERN_INLINE
259 #undef __EXTERN_INLINE
260 #undef __MMU_EXTERN_INLINE
263 #endif /* __ALPHA_MMU_CONTEXT_H */