2 * include/asm-s390/mmu_context.h
6 * Derived from "include/asm-i386/mmu_context.h"
9 #ifndef __S390_MMU_CONTEXT_H
10 #define __S390_MMU_CONTEXT_H
12 #include <asm/pgalloc.h>
13 #include <asm/uaccess.h>
14 #include <asm/tlbflush.h>
15 #include <asm-generic/mm_hooks.h>
17 static inline int init_new_context(struct task_struct
*tsk
,
20 atomic_set(&mm
->context
.attach_count
, 0);
21 mm
->context
.flush_mm
= 0;
22 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
| _ASCE_USER_BITS
;
24 mm
->context
.asce_bits
|= _ASCE_TYPE_REGION3
;
26 if (current
->mm
&& current
->mm
->context
.alloc_pgste
) {
28 * alloc_pgste indicates, that any NEW context will be created
29 * with extended page tables. The old context is unchanged. The
30 * page table allocation and the page table operations will
31 * look at has_pgste to distinguish normal and extended page
32 * tables. The only way to create extended page tables is to
33 * set alloc_pgste and then create a new context (e.g. dup_mm).
34 * The page table allocation is called after init_new_context
35 * and if has_pgste is set, it will create extended page
38 mm
->context
.has_pgste
= 1;
39 mm
->context
.alloc_pgste
= 1;
41 mm
->context
.has_pgste
= 0;
42 mm
->context
.alloc_pgste
= 0;
44 mm
->context
.asce_limit
= STACK_TOP_MAX
;
45 crst_table_init((unsigned long *) mm
->pgd
, pgd_entry_type(mm
));
49 #define destroy_context(mm) do { } while (0)
52 #define LCTL_OPCODE "lctl"
54 #define LCTL_OPCODE "lctlg"
57 static inline void update_mm(struct mm_struct
*mm
, struct task_struct
*tsk
)
61 S390_lowcore
.user_asce
= mm
->context
.asce_bits
| __pa(pgd
);
62 if (user_mode
!= HOME_SPACE_MODE
) {
63 /* Load primary space page table origin. */
64 asm volatile(LCTL_OPCODE
" 1,1,%0\n"
65 : : "m" (S390_lowcore
.user_asce
) );
67 /* Load home space page table origin. */
68 asm volatile(LCTL_OPCODE
" 13,13,%0"
69 : : "m" (S390_lowcore
.user_asce
) );
70 set_fs(current
->thread
.mm_segment
);
73 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
74 struct task_struct
*tsk
)
76 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next
));
78 atomic_dec(&prev
->context
.attach_count
);
79 WARN_ON(atomic_read(&prev
->context
.attach_count
) < 0);
80 atomic_inc(&next
->context
.attach_count
);
81 /* Check for TLBs not flushed yet */
82 if (next
->context
.flush_mm
)
86 #define enter_lazy_tlb(mm,tsk) do { } while (0)
87 #define deactivate_mm(tsk,mm) do { } while (0)
89 static inline void activate_mm(struct mm_struct
*prev
,
90 struct mm_struct
*next
)
92 switch_mm(prev
, next
, current
);
95 #endif /* __S390_MMU_CONTEXT_H */