fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-s390 / mmu_context.h
blob501cb9b0631460b0e2694d43ce4d9c32c6518b0a
1 /*
2 * include/asm-s390/mmu_context.h
4 * S390 version
6 * Derived from "include/asm-i386/mmu_context.h"
7 */
9 #ifndef __S390_MMU_CONTEXT_H
10 #define __S390_MMU_CONTEXT_H
12 #include <asm/pgalloc.h>
13 #include <asm-generic/mm_hooks.h>
16 * get a new mmu context.. S390 don't know about contexts.
18 #define init_new_context(tsk,mm) 0
20 #define destroy_context(mm) do { } while (0)
22 #ifndef __s390x__
23 #define LCTL_OPCODE "lctl"
24 #define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
25 #else
26 #define LCTL_OPCODE "lctlg"
27 #define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
28 #endif
30 static inline void enter_lazy_tlb(struct mm_struct *mm,
31 struct task_struct *tsk)
35 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk)
38 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);
40 if (prev != next) {
41 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
42 PGTABLE_BITS;
43 if (shadow_pgd) {
44 /* Load primary/secondary space page table origin. */
45 S390_lowcore.user_exec_asce =
46 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
47 asm volatile(LCTL_OPCODE" 1,1,%0\n"
48 LCTL_OPCODE" 7,7,%1"
49 : : "m" (S390_lowcore.user_exec_asce),
50 "m" (S390_lowcore.user_asce) );
51 } else if (switch_amode) {
52 /* Load primary space page table origin. */
53 asm volatile(LCTL_OPCODE" 1,1,%0"
54 : : "m" (S390_lowcore.user_asce) );
55 } else
56 /* Load home space page table origin. */
57 asm volatile(LCTL_OPCODE" 13,13,%0"
58 : : "m" (S390_lowcore.user_asce) );
60 cpu_set(smp_processor_id(), next->cpu_vm_mask);
63 #define deactivate_mm(tsk,mm) do { } while (0)
65 static inline void activate_mm(struct mm_struct *prev,
66 struct mm_struct *next)
68 switch_mm(prev, next, current);
69 set_fs(current->thread.mm_segment);
72 #endif /* __S390_MMU_CONTEXT_H */