pinctrl: cherryview: Prevent possible interrupt storm on resume
[linux/fpc-iii.git] / arch / powerpc / mm / tlb-radix.c
blobbda8c43be78a4df85d067bc4a7759c63c388d29b
1 /*
2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <asm/ppc-opcode.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
20 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
26 static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
29 unsigned long rb,rs,prs,r;
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
37 asm volatile("ptesync": : :"memory");
38 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
39 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
40 asm volatile("ptesync": : :"memory");
44 * We use 128 set in radix mode and 256 set in hpt mode.
46 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
48 int set;
50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
51 __tlbiel_pid(pid, set, ric);
53 return;
56 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
58 unsigned long rb,rs,prs,r;
60 rb = PPC_BIT(53); /* IS = 1 */
61 rs = pid << PPC_BITLSHIFT(31);
62 prs = 1; /* process scoped */
63 r = 1; /* raidx format */
65 asm volatile("ptesync": : :"memory");
66 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
67 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
68 asm volatile("eieio; tlbsync; ptesync": : :"memory");
71 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
72 unsigned long ap, unsigned long ric)
74 unsigned long rb,rs,prs,r;
76 rb = va & ~(PPC_BITMASK(52, 63));
77 rb |= ap << PPC_BITLSHIFT(58);
78 rs = pid << PPC_BITLSHIFT(31);
79 prs = 1; /* process scoped */
80 r = 1; /* raidx format */
82 asm volatile("ptesync": : :"memory");
83 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
84 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
85 asm volatile("ptesync": : :"memory");
88 static inline void _tlbie_va(unsigned long va, unsigned long pid,
89 unsigned long ap, unsigned long ric)
91 unsigned long rb,rs,prs,r;
93 rb = va & ~(PPC_BITMASK(52, 63));
94 rb |= ap << PPC_BITLSHIFT(58);
95 rs = pid << PPC_BITLSHIFT(31);
96 prs = 1; /* process scoped */
97 r = 1; /* raidx format */
99 asm volatile("ptesync": : :"memory");
100 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
101 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
102 asm volatile("eieio; tlbsync; ptesync": : :"memory");
106 * Base TLB flushing operations:
108 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
109 * - flush_tlb_page(vma, vmaddr) flushes one page
110 * - flush_tlb_range(vma, start, end) flushes a range of pages
111 * - flush_tlb_kernel_range(start, end) flushes kernel pages
113 * - local_* variants of page and mm only apply to the current
114 * processor
116 void radix__local_flush_tlb_mm(struct mm_struct *mm)
118 unsigned long pid;
120 preempt_disable();
121 pid = mm->context.id;
122 if (pid != MMU_NO_CONTEXT)
123 _tlbiel_pid(pid, RIC_FLUSH_ALL);
124 preempt_enable();
126 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
128 void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
130 unsigned long pid;
131 struct mm_struct *mm = tlb->mm;
133 preempt_disable();
135 pid = mm->context.id;
136 if (pid != MMU_NO_CONTEXT)
137 _tlbiel_pid(pid, RIC_FLUSH_PWC);
139 preempt_enable();
141 EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
143 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
144 int psize)
146 unsigned long pid;
147 unsigned long ap = mmu_get_ap(psize);
149 preempt_disable();
150 pid = mm ? mm->context.id : 0;
151 if (pid != MMU_NO_CONTEXT)
152 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
153 preempt_enable();
156 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
158 #ifdef CONFIG_HUGETLB_PAGE
159 /* need the return fix for nohash.c */
160 if (vma && is_vm_hugetlb_page(vma))
161 return __local_flush_hugetlb_page(vma, vmaddr);
162 #endif
163 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
164 mmu_virtual_psize);
166 EXPORT_SYMBOL(radix__local_flush_tlb_page);
168 #ifdef CONFIG_SMP
169 void radix__flush_tlb_mm(struct mm_struct *mm)
171 unsigned long pid;
173 preempt_disable();
174 pid = mm->context.id;
175 if (unlikely(pid == MMU_NO_CONTEXT))
176 goto no_context;
178 if (!mm_is_thread_local(mm)) {
179 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
181 if (lock_tlbie)
182 raw_spin_lock(&native_tlbie_lock);
183 _tlbie_pid(pid, RIC_FLUSH_ALL);
184 if (lock_tlbie)
185 raw_spin_unlock(&native_tlbie_lock);
186 } else
187 _tlbiel_pid(pid, RIC_FLUSH_ALL);
188 no_context:
189 preempt_enable();
191 EXPORT_SYMBOL(radix__flush_tlb_mm);
193 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
195 unsigned long pid;
196 struct mm_struct *mm = tlb->mm;
198 preempt_disable();
200 pid = mm->context.id;
201 if (unlikely(pid == MMU_NO_CONTEXT))
202 goto no_context;
204 if (!mm_is_thread_local(mm)) {
205 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
207 if (lock_tlbie)
208 raw_spin_lock(&native_tlbie_lock);
209 _tlbie_pid(pid, RIC_FLUSH_PWC);
210 if (lock_tlbie)
211 raw_spin_unlock(&native_tlbie_lock);
212 } else
213 _tlbiel_pid(pid, RIC_FLUSH_PWC);
214 no_context:
215 preempt_enable();
217 EXPORT_SYMBOL(radix__flush_tlb_pwc);
219 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
220 int psize)
222 unsigned long pid;
223 unsigned long ap = mmu_get_ap(psize);
225 preempt_disable();
226 pid = mm ? mm->context.id : 0;
227 if (unlikely(pid == MMU_NO_CONTEXT))
228 goto bail;
229 if (!mm_is_thread_local(mm)) {
230 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
232 if (lock_tlbie)
233 raw_spin_lock(&native_tlbie_lock);
234 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
235 if (lock_tlbie)
236 raw_spin_unlock(&native_tlbie_lock);
237 } else
238 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
239 bail:
240 preempt_enable();
243 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
245 #ifdef CONFIG_HUGETLB_PAGE
246 if (vma && is_vm_hugetlb_page(vma))
247 return flush_hugetlb_page(vma, vmaddr);
248 #endif
249 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
250 mmu_virtual_psize);
252 EXPORT_SYMBOL(radix__flush_tlb_page);
254 #endif /* CONFIG_SMP */
256 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
258 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
260 if (lock_tlbie)
261 raw_spin_lock(&native_tlbie_lock);
262 _tlbie_pid(0, RIC_FLUSH_ALL);
263 if (lock_tlbie)
264 raw_spin_unlock(&native_tlbie_lock);
266 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
269 * Currently, for range flushing, we just do a full mm flush. Because
270 * we use this in code path where we don' track the page size.
272 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
273 unsigned long end)
276 struct mm_struct *mm = vma->vm_mm;
277 radix__flush_tlb_mm(mm);
279 EXPORT_SYMBOL(radix__flush_tlb_range);
281 static int radix_get_mmu_psize(int page_size)
283 int psize;
285 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
286 psize = mmu_virtual_psize;
287 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
288 psize = MMU_PAGE_2M;
289 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
290 psize = MMU_PAGE_1G;
291 else
292 return -1;
293 return psize;
296 void radix__tlb_flush(struct mmu_gather *tlb)
298 int psize = 0;
299 struct mm_struct *mm = tlb->mm;
300 int page_size = tlb->page_size;
302 psize = radix_get_mmu_psize(page_size);
304 * if page size is not something we understand, do a full mm flush
306 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
307 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
308 else
309 radix__flush_tlb_mm(mm);
312 #define TLB_FLUSH_ALL -1UL
314 * Number of pages above which we will do a bcast tlbie. Just a
315 * number at this point copied from x86
317 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
319 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
320 unsigned long end, int psize)
322 unsigned long pid;
323 unsigned long addr;
324 int local = mm_is_thread_local(mm);
325 unsigned long ap = mmu_get_ap(psize);
326 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
327 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
330 preempt_disable();
331 pid = mm ? mm->context.id : 0;
332 if (unlikely(pid == MMU_NO_CONTEXT))
333 goto err_out;
335 if (end == TLB_FLUSH_ALL ||
336 (end - start) > tlb_single_page_flush_ceiling * page_size) {
337 if (local)
338 _tlbiel_pid(pid, RIC_FLUSH_TLB);
339 else
340 _tlbie_pid(pid, RIC_FLUSH_TLB);
341 goto err_out;
343 for (addr = start; addr < end; addr += page_size) {
345 if (local)
346 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
347 else {
348 if (lock_tlbie)
349 raw_spin_lock(&native_tlbie_lock);
350 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
351 if (lock_tlbie)
352 raw_spin_unlock(&native_tlbie_lock);
355 err_out:
356 preempt_enable();
359 void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
360 unsigned long page_size)
362 unsigned long rb,rs,prs,r;
363 unsigned long ap;
364 unsigned long ric = RIC_FLUSH_TLB;
366 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
367 rb = gpa & ~(PPC_BITMASK(52, 63));
368 rb |= ap << PPC_BITLSHIFT(58);
369 rs = lpid & ((1UL << 32) - 1);
370 prs = 0; /* process scoped */
371 r = 1; /* raidx format */
373 asm volatile("ptesync": : :"memory");
374 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
375 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
376 asm volatile("eieio; tlbsync; ptesync": : :"memory");
378 EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
380 void radix__flush_tlb_lpid(unsigned long lpid)
382 unsigned long rb,rs,prs,r;
383 unsigned long ric = RIC_FLUSH_ALL;
385 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
386 rs = lpid & ((1UL << 32) - 1);
387 prs = 0; /* partition scoped */
388 r = 1; /* raidx format */
390 asm volatile("ptesync": : :"memory");
391 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
392 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
393 asm volatile("eieio; tlbsync; ptesync": : :"memory");
395 EXPORT_SYMBOL(radix__flush_tlb_lpid);
397 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
398 unsigned long start, unsigned long end)
400 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
402 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
404 void radix__flush_tlb_all(void)
406 unsigned long rb,prs,r,rs;
407 unsigned long ric = RIC_FLUSH_ALL;
409 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
410 prs = 0; /* partition scoped */
411 r = 1; /* raidx format */
412 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
414 asm volatile("ptesync": : :"memory");
416 * now flush guest entries by passing PRS = 1 and LPID != 0
418 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
419 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
421 * now flush host entires by passing PRS = 0 and LPID == 0
423 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
424 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
425 asm volatile("eieio; tlbsync; ptesync": : :"memory");