2 * Switch an MMU context.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2001 - 2013 Tensilica Inc.
11 #ifndef _XTENSA_MMU_CONTEXT_H
12 #define _XTENSA_MMU_CONTEXT_H
15 #include <asm/nommu_context.h>
18 #include <linux/stringify.h>
19 #include <linux/sched.h>
21 #include <asm/vectors.h>
23 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm-generic/mm_hooks.h>
27 #include <asm-generic/percpu.h>
29 #if (XCHAL_HAVE_TLBS != 1)
30 # error "Linux must have an MMU!"
33 DECLARE_PER_CPU(unsigned long, asid_cache
);
34 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
38 * any user or kernel context. We use the reserved values in the
39 * ASID_INSERT macro below.
49 #define ASID_USER_FIRST 4
50 #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
51 #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
56 static inline void init_mmu(void) { }
59 static inline void set_rasid_register (unsigned long val
)
61 __asm__
__volatile__ (" wsr %0, rasid\n\t"
62 " isync\n" : : "a" (val
));
65 static inline unsigned long get_rasid_register (void)
68 __asm__
__volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp
));
72 static inline void get_new_mmu_context(struct mm_struct
*mm
, unsigned int cpu
)
74 unsigned long asid
= cpu_asid_cache(cpu
);
75 if ((++asid
& ASID_MASK
) == 0) {
77 * Start new asid cycle; continue counting with next
78 * incarnation bits; skipping over 0, 1, 2, 3.
80 local_flush_tlb_all();
81 asid
+= ASID_USER_FIRST
;
83 cpu_asid_cache(cpu
) = asid
;
84 mm
->context
.asid
[cpu
] = asid
;
85 mm
->context
.cpu
= cpu
;
88 static inline void get_mmu_context(struct mm_struct
*mm
, unsigned int cpu
)
91 * Check if our ASID is of an older version and thus invalid.
95 unsigned long asid
= mm
->context
.asid
[cpu
];
97 if (asid
== NO_CONTEXT
||
98 ((asid
^ cpu_asid_cache(cpu
)) & ~ASID_MASK
))
99 get_new_mmu_context(mm
, cpu
);
103 static inline void activate_context(struct mm_struct
*mm
, unsigned int cpu
)
105 get_mmu_context(mm
, cpu
);
106 set_rasid_register(ASID_INSERT(mm
->context
.asid
[cpu
]));
107 invalidate_page_directory();
111 * Initialize the context related info for a new mm_struct
112 * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
113 * to -1 says the process has never run on any core.
116 static inline int init_new_context(struct task_struct
*tsk
,
117 struct mm_struct
*mm
)
120 for_each_possible_cpu(cpu
) {
121 mm
->context
.asid
[cpu
] = NO_CONTEXT
;
123 mm
->context
.cpu
= -1;
127 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
128 struct task_struct
*tsk
)
130 unsigned int cpu
= smp_processor_id();
131 int migrated
= next
->context
.cpu
!= cpu
;
132 /* Flush the icache if we migrated to a new core. */
134 __invalidate_icache_all();
135 next
->context
.cpu
= cpu
;
137 if (migrated
|| prev
!= next
)
138 activate_context(next
, cpu
);
141 #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
142 #define deactivate_mm(tsk, mm) do { } while (0)
145 * Destroy context related info for an mm_struct that is about
148 static inline void destroy_context(struct mm_struct
*mm
)
150 invalidate_page_directory();
154 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
160 #endif /* CONFIG_MMU */
161 #endif /* _XTENSA_MMU_CONTEXT_H */