x86: introduce native_set_pte_atomic() on 64-bit too
[wrt350n-kernel.git] / include / asm-s390 / mmu_context.h
bloba77d4ba3c8ebc5c63ef4fbaa60104ffa6d296fc3
1 /*
2 * include/asm-s390/mmu_context.h
4 * S390 version
6 * Derived from "include/asm-i386/mmu_context.h"
7 */
9 #ifndef __S390_MMU_CONTEXT_H
10 #define __S390_MMU_CONTEXT_H
12 #include <asm/pgalloc.h>
13 #include <asm-generic/mm_hooks.h>
15 static inline int init_new_context(struct task_struct *tsk,
16 struct mm_struct *mm)
18 mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
19 #ifdef CONFIG_64BIT
20 mm->context |= _ASCE_TYPE_REGION3;
21 #endif
22 return 0;
25 #define destroy_context(mm) do { } while (0)
27 #ifndef __s390x__
28 #define LCTL_OPCODE "lctl"
29 #else
30 #define LCTL_OPCODE "lctlg"
31 #endif
33 static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
35 S390_lowcore.user_asce = mm->context | __pa(mm->pgd);
36 if (switch_amode) {
37 /* Load primary space page table origin. */
38 pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd;
39 S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd);
40 asm volatile(LCTL_OPCODE" 1,1,%0\n"
41 : : "m" (S390_lowcore.user_exec_asce) );
42 } else
43 /* Load home space page table origin. */
44 asm volatile(LCTL_OPCODE" 13,13,%0"
45 : : "m" (S390_lowcore.user_asce) );
48 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk)
51 if (unlikely(prev == next))
52 return;
53 cpu_set(smp_processor_id(), next->cpu_vm_mask);
54 update_mm(next, tsk);
57 #define enter_lazy_tlb(mm,tsk) do { } while (0)
58 #define deactivate_mm(tsk,mm) do { } while (0)
60 static inline void activate_mm(struct mm_struct *prev,
61 struct mm_struct *next)
63 switch_mm(prev, next, current);
64 set_fs(current->thread.mm_segment);
67 #endif /* __S390_MMU_CONTEXT_H */