1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
5 #include <linux/sched.h>
6 #include <asm/processor.h>
7 #include <asm/pgalloc.h>
10 * Flush all TLB entries on the local CPU.
12 static inline void __tlb_flush_local(void)
14 asm volatile("ptlb" : : : "memory");
18 * Flush TLB entries for a specific ASCE on all CPUs
20 static inline void __tlb_flush_idte(unsigned long asce
)
22 /* Global TLB flush for the mm */
24 " .insn rrf,0xb98e0000,0,%0,%1,0"
25 : : "a" (2048), "a" (asce
) : "cc");
29 * Flush TLB entries for a specific ASCE on the local CPU
31 static inline void __tlb_flush_idte_local(unsigned long asce
)
33 /* Local TLB flush for the mm */
35 " .insn rrf,0xb98e0000,0,%0,%1,1"
36 : : "a" (2048), "a" (asce
) : "cc");
40 void smp_ptlb_all(void);
43 * Flush all TLB entries on all CPUs.
45 static inline void __tlb_flush_global(void)
47 register unsigned long reg2
asm("2");
48 register unsigned long reg3
asm("3");
49 register unsigned long reg4
asm("4");
54 reg4
= ((unsigned long) &dummy
) + 1;
57 : : "d" (reg2
), "d" (reg3
), "d" (reg4
), "m" (dummy
) : "cc" );
61 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
62 * this implicates multiple ASCEs!).
64 static inline void __tlb_flush_full(struct mm_struct
*mm
)
67 atomic_add(0x10000, &mm
->context
.attach_count
);
68 if (cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id()))) {
72 /* Global TLB flush */
74 /* Reset TLB flush mask */
75 if (MACHINE_HAS_TLB_LC
)
76 cpumask_copy(mm_cpumask(mm
),
77 &mm
->context
.cpu_attach_mask
);
79 atomic_sub(0x10000, &mm
->context
.attach_count
);
84 * Flush TLB entries for a specific ASCE on all CPUs.
86 static inline void __tlb_flush_asce(struct mm_struct
*mm
, unsigned long asce
)
91 active
= (mm
== current
->active_mm
) ? 1 : 0;
92 count
= atomic_add_return(0x10000, &mm
->context
.attach_count
);
93 if (MACHINE_HAS_TLB_LC
&& (count
& 0xffff) <= active
&&
94 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id()))) {
95 __tlb_flush_idte_local(asce
);
98 __tlb_flush_idte(asce
);
100 __tlb_flush_global();
101 /* Reset TLB flush mask */
102 if (MACHINE_HAS_TLB_LC
)
103 cpumask_copy(mm_cpumask(mm
),
104 &mm
->context
.cpu_attach_mask
);
106 atomic_sub(0x10000, &mm
->context
.attach_count
);
110 static inline void __tlb_flush_kernel(void)
112 if (MACHINE_HAS_IDTE
)
113 __tlb_flush_idte((unsigned long) init_mm
.pgd
|
114 init_mm
.context
.asce_bits
);
116 __tlb_flush_global();
119 #define __tlb_flush_global() __tlb_flush_local()
120 #define __tlb_flush_full(mm) __tlb_flush_local()
123 * Flush TLB entries for a specific ASCE on all CPUs.
125 static inline void __tlb_flush_asce(struct mm_struct
*mm
, unsigned long asce
)
127 if (MACHINE_HAS_TLB_LC
)
128 __tlb_flush_idte_local(asce
);
133 static inline void __tlb_flush_kernel(void)
135 if (MACHINE_HAS_TLB_LC
)
136 __tlb_flush_idte_local((unsigned long) init_mm
.pgd
|
137 init_mm
.context
.asce_bits
);
143 static inline void __tlb_flush_mm(struct mm_struct
* mm
)
146 * If the machine has IDTE we prefer to do a per mm flush
147 * on all cpus instead of doing a local flush if the mm
148 * only ran on the local cpu.
150 if (MACHINE_HAS_IDTE
&& list_empty(&mm
->context
.gmap_list
))
151 __tlb_flush_asce(mm
, (unsigned long) mm
->pgd
|
152 mm
->context
.asce_bits
);
154 __tlb_flush_full(mm
);
157 static inline void __tlb_flush_mm_lazy(struct mm_struct
* mm
)
159 if (mm
->context
.flush_mm
) {
161 mm
->context
.flush_mm
= 0;
167 * flush_tlb() - flushes the current mm struct TLBs
168 * flush_tlb_all() - flushes all processes TLBs
169 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
170 * flush_tlb_page(vma, vmaddr) - flushes one page
171 * flush_tlb_range(vma, start, end) - flushes a range of pages
172 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
176 * flush_tlb_mm goes together with ptep_set_wrprotect for the
177 * copy_page_range operation and flush_tlb_range is related to
178 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
179 * ptep_get_and_clear do not flush the TLBs directly if the mm has
180 * only one user. At the end of the update the flush_tlb_mm and
181 * flush_tlb_range functions need to do the flush.
183 #define flush_tlb() do { } while (0)
184 #define flush_tlb_all() do { } while (0)
185 #define flush_tlb_page(vma, addr) do { } while (0)
187 static inline void flush_tlb_mm(struct mm_struct
*mm
)
189 __tlb_flush_mm_lazy(mm
);
192 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
193 unsigned long start
, unsigned long end
)
195 __tlb_flush_mm_lazy(vma
->vm_mm
);
198 static inline void flush_tlb_kernel_range(unsigned long start
,
201 __tlb_flush_kernel();
204 #endif /* _S390_TLBFLUSH_H */