staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / arch / s390 / include / asm / tlbflush.h
blob82703e03f35d8bc4b0ef9674a687552c8f09c4e3
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_TLBFLUSH_H
3 #define _S390_TLBFLUSH_H
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/pgalloc.h>
9 #include <asm/pgtable.h>
12 * Flush all TLB entries on the local CPU.
14 static inline void __tlb_flush_local(void)
16 asm volatile("ptlb" : : : "memory");
20 * Flush TLB entries for a specific ASCE on all CPUs
22 static inline void __tlb_flush_idte(unsigned long asce)
24 unsigned long opt;
26 opt = IDTE_PTOA;
27 if (MACHINE_HAS_TLB_GUEST)
28 opt |= IDTE_GUEST_ASCE;
29 /* Global TLB flush for the mm */
30 asm volatile(
31 " .insn rrf,0xb98e0000,0,%0,%1,0"
32 : : "a" (opt), "a" (asce) : "cc");
35 void smp_ptlb_all(void);
38 * Flush all TLB entries on all CPUs.
40 static inline void __tlb_flush_global(void)
42 unsigned int dummy = 0;
44 csp(&dummy, 0, 0);
48 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
49 * this implicates multiple ASCEs!).
51 static inline void __tlb_flush_mm(struct mm_struct *mm)
53 unsigned long gmap_asce;
56 * If the machine has IDTE we prefer to do a per mm flush
57 * on all cpus instead of doing a local flush if the mm
58 * only ran on the local cpu.
60 preempt_disable();
61 atomic_inc(&mm->context.flush_count);
62 /* Reset TLB flush mask */
63 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
64 barrier();
65 gmap_asce = READ_ONCE(mm->context.gmap_asce);
66 if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
67 if (gmap_asce)
68 __tlb_flush_idte(gmap_asce);
69 __tlb_flush_idte(mm->context.asce);
70 } else {
71 /* Global TLB flush */
72 __tlb_flush_global();
74 atomic_dec(&mm->context.flush_count);
75 preempt_enable();
78 static inline void __tlb_flush_kernel(void)
80 if (MACHINE_HAS_IDTE)
81 __tlb_flush_idte(init_mm.context.asce);
82 else
83 __tlb_flush_global();
86 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
88 spin_lock(&mm->context.lock);
89 if (mm->context.flush_mm) {
90 mm->context.flush_mm = 0;
91 __tlb_flush_mm(mm);
93 spin_unlock(&mm->context.lock);
97 * TLB flushing:
98 * flush_tlb() - flushes the current mm struct TLBs
99 * flush_tlb_all() - flushes all processes TLBs
100 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
101 * flush_tlb_page(vma, vmaddr) - flushes one page
102 * flush_tlb_range(vma, start, end) - flushes a range of pages
103 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
107 * flush_tlb_mm goes together with ptep_set_wrprotect for the
108 * copy_page_range operation and flush_tlb_range is related to
109 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
110 * ptep_get_and_clear do not flush the TLBs directly if the mm has
111 * only one user. At the end of the update the flush_tlb_mm and
112 * flush_tlb_range functions need to do the flush.
114 #define flush_tlb() do { } while (0)
115 #define flush_tlb_all() do { } while (0)
116 #define flush_tlb_page(vma, addr) do { } while (0)
118 static inline void flush_tlb_mm(struct mm_struct *mm)
120 __tlb_flush_mm_lazy(mm);
123 static inline void flush_tlb_range(struct vm_area_struct *vma,
124 unsigned long start, unsigned long end)
126 __tlb_flush_mm_lazy(vma->vm_mm);
129 static inline void flush_tlb_kernel_range(unsigned long start,
130 unsigned long end)
132 __tlb_flush_kernel();
135 #endif /* _S390_TLBFLUSH_H */