Staging: hv: mousevsc: Change the allocation flags to reflect interrupt context
[zen-stable.git] / arch / xtensa / include / asm / mmu_context.h
blobdbd8731a876aec90c2765df2dd1a1c6d5e2310d0
1 /*
2 * include/asm-xtensa/mmu_context.h
4 * Switch an MMU context.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
13 #ifndef _XTENSA_MMU_CONTEXT_H
14 #define _XTENSA_MMU_CONTEXT_H
16 #ifndef CONFIG_MMU
17 #include <asm/nommu_context.h>
18 #else
20 #include <linux/stringify.h>
21 #include <linux/sched.h>
23 #include <variant/core.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm-generic/mm_hooks.h>
30 #if (XCHAL_HAVE_TLBS != 1)
31 # error "Linux must have an MMU!"
32 #endif
34 extern unsigned long asid_cache;
37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
38 * any user or kernel context.
40 * 0 invalid
41 * 1 kernel
42 * 2 reserved
43 * 3 reserved
44 * 4...255 available
47 #define NO_CONTEXT 0
48 #define ASID_USER_FIRST 4
49 #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
50 #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
52 static inline void set_rasid_register (unsigned long val)
54 __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
55 " isync\n" : : "a" (val));
58 static inline unsigned long get_rasid_register (void)
60 unsigned long tmp;
61 __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
62 return tmp;
65 static inline void
66 __get_new_mmu_context(struct mm_struct *mm)
68 extern void flush_tlb_all(void);
69 if (! (++asid_cache & ASID_MASK) ) {
70 flush_tlb_all(); /* start new asid cycle */
71 asid_cache += ASID_USER_FIRST;
73 mm->context = asid_cache;
76 static inline void
77 __load_mmu_context(struct mm_struct *mm)
79 set_rasid_register(ASID_INSERT(mm->context));
80 invalidate_page_directory();
84 * Initialize the context related info for a new mm_struct
85 * instance.
88 static inline int
89 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
91 mm->context = NO_CONTEXT;
92 return 0;
96 * After we have set current->mm to a new value, this activates
97 * the context for the new mm so we see the new mappings.
99 static inline void
100 activate_mm(struct mm_struct *prev, struct mm_struct *next)
102 /* Unconditionally get a new ASID. */
104 __get_new_mmu_context(next);
105 __load_mmu_context(next);
109 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
110 struct task_struct *tsk)
112 unsigned long asid = asid_cache;
114 /* Check if our ASID is of an older version and thus invalid */
116 if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
117 __get_new_mmu_context(next);
119 __load_mmu_context(next);
122 #define deactivate_mm(tsk, mm) do { } while(0)
125 * Destroy context related info for an mm_struct that is about
126 * to be put to rest.
128 static inline void destroy_context(struct mm_struct *mm)
130 invalidate_page_directory();
134 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
136 /* Nothing to do. */
140 #endif /* CONFIG_MMU */
141 #endif /* _XTENSA_MMU_CONTEXT_H */