2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
16 #include <asm/ppc-opcode.h>
18 #include <asm/tlbflush.h>
19 #include <asm/trace.h>
20 #include <asm/cputhreads.h>
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
27 * tlbiel instruction for radix, set invalidation
28 * i.e., r=1 and is=01 or is=10 or is=11
30 static inline void tlbiel_radix_set_isa300(unsigned int set
, unsigned int is
,
32 unsigned int ric
, unsigned int prs
)
36 unsigned int r
= 1; /* radix format */
38 rb
= (set
<< PPC_BITLSHIFT(51)) | (is
<< PPC_BITLSHIFT(53));
39 rs
= ((unsigned long)pid
<< PPC_BITLSHIFT(31));
41 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
42 : : "r"(rb
), "r"(rs
), "i"(ric
), "i"(prs
), "r"(r
)
46 static void tlbiel_all_isa300(unsigned int num_sets
, unsigned int is
)
50 asm volatile("ptesync": : :"memory");
53 * Flush the first set of the TLB, and the entire Page Walk Cache
54 * and partition table entries. Then flush the remaining sets of the
57 tlbiel_radix_set_isa300(0, is
, 0, RIC_FLUSH_ALL
, 0);
58 for (set
= 1; set
< num_sets
; set
++)
59 tlbiel_radix_set_isa300(set
, is
, 0, RIC_FLUSH_TLB
, 0);
61 /* Do the same for process scoped entries. */
62 tlbiel_radix_set_isa300(0, is
, 0, RIC_FLUSH_ALL
, 1);
63 for (set
= 1; set
< num_sets
; set
++)
64 tlbiel_radix_set_isa300(set
, is
, 0, RIC_FLUSH_TLB
, 1);
66 asm volatile("ptesync": : :"memory");
69 void radix__tlbiel_all(unsigned int action
)
74 case TLB_INVAL_SCOPE_GLOBAL
:
77 case TLB_INVAL_SCOPE_LPID
:
84 if (early_cpu_has_feature(CPU_FTR_ARCH_300
))
85 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX
, is
);
87 WARN(1, "%s called on pre-POWER9 CPU\n", __func__
);
89 asm volatile(PPC_INVALIDATE_ERAT
"; isync" : : :"memory");
92 static inline void __tlbiel_pid(unsigned long pid
, int set
,
95 unsigned long rb
,rs
,prs
,r
;
97 rb
= PPC_BIT(53); /* IS = 1 */
98 rb
|= set
<< PPC_BITLSHIFT(51);
99 rs
= ((unsigned long)pid
) << PPC_BITLSHIFT(31);
100 prs
= 1; /* process scoped */
101 r
= 1; /* raidx format */
103 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
104 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
105 trace_tlbie(0, 1, rb
, rs
, ric
, prs
, r
);
108 static inline void __tlbie_pid(unsigned long pid
, unsigned long ric
)
110 unsigned long rb
,rs
,prs
,r
;
112 rb
= PPC_BIT(53); /* IS = 1 */
113 rs
= pid
<< PPC_BITLSHIFT(31);
114 prs
= 1; /* process scoped */
115 r
= 1; /* raidx format */
117 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
118 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
119 trace_tlbie(0, 0, rb
, rs
, ric
, prs
, r
);
123 * We use 128 set in radix mode and 256 set in hpt mode.
125 static inline void _tlbiel_pid(unsigned long pid
, unsigned long ric
)
129 asm volatile("ptesync": : :"memory");
132 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
133 * also flush the entire Page Walk Cache.
135 __tlbiel_pid(pid
, 0, ric
);
137 /* For PWC, only one flush is needed */
138 if (ric
== RIC_FLUSH_PWC
) {
139 asm volatile("ptesync": : :"memory");
143 /* For the remaining sets, just flush the TLB */
144 for (set
= 1; set
< POWER9_TLB_SETS_RADIX
; set
++)
145 __tlbiel_pid(pid
, set
, RIC_FLUSH_TLB
);
147 asm volatile("ptesync": : :"memory");
148 asm volatile(PPC_INVALIDATE_ERAT
"; isync" : : :"memory");
151 static inline void _tlbie_pid(unsigned long pid
, unsigned long ric
)
153 asm volatile("ptesync": : :"memory");
154 __tlbie_pid(pid
, ric
);
155 asm volatile("eieio; tlbsync; ptesync": : :"memory");
158 static inline void __tlbiel_va(unsigned long va
, unsigned long pid
,
159 unsigned long ap
, unsigned long ric
)
161 unsigned long rb
,rs
,prs
,r
;
163 rb
= va
& ~(PPC_BITMASK(52, 63));
164 rb
|= ap
<< PPC_BITLSHIFT(58);
165 rs
= pid
<< PPC_BITLSHIFT(31);
166 prs
= 1; /* process scoped */
167 r
= 1; /* raidx format */
169 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
170 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
171 trace_tlbie(0, 1, rb
, rs
, ric
, prs
, r
);
174 static inline void __tlbiel_va_range(unsigned long start
, unsigned long end
,
175 unsigned long pid
, unsigned long page_size
,
179 unsigned long ap
= mmu_get_ap(psize
);
181 for (addr
= start
; addr
< end
; addr
+= page_size
)
182 __tlbiel_va(addr
, pid
, ap
, RIC_FLUSH_TLB
);
185 static inline void _tlbiel_va(unsigned long va
, unsigned long pid
,
186 unsigned long psize
, unsigned long ric
)
188 unsigned long ap
= mmu_get_ap(psize
);
190 asm volatile("ptesync": : :"memory");
191 __tlbiel_va(va
, pid
, ap
, ric
);
192 asm volatile("ptesync": : :"memory");
195 static inline void _tlbiel_va_range(unsigned long start
, unsigned long end
,
196 unsigned long pid
, unsigned long page_size
,
197 unsigned long psize
, bool also_pwc
)
199 asm volatile("ptesync": : :"memory");
201 __tlbiel_pid(pid
, 0, RIC_FLUSH_PWC
);
202 __tlbiel_va_range(start
, end
, pid
, page_size
, psize
);
203 asm volatile("ptesync": : :"memory");
206 static inline void __tlbie_va(unsigned long va
, unsigned long pid
,
207 unsigned long ap
, unsigned long ric
)
209 unsigned long rb
,rs
,prs
,r
;
211 rb
= va
& ~(PPC_BITMASK(52, 63));
212 rb
|= ap
<< PPC_BITLSHIFT(58);
213 rs
= pid
<< PPC_BITLSHIFT(31);
214 prs
= 1; /* process scoped */
215 r
= 1; /* raidx format */
217 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
218 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
219 trace_tlbie(0, 0, rb
, rs
, ric
, prs
, r
);
222 static inline void __tlbie_va_range(unsigned long start
, unsigned long end
,
223 unsigned long pid
, unsigned long page_size
,
227 unsigned long ap
= mmu_get_ap(psize
);
229 for (addr
= start
; addr
< end
; addr
+= page_size
)
230 __tlbie_va(addr
, pid
, ap
, RIC_FLUSH_TLB
);
233 static inline void _tlbie_va(unsigned long va
, unsigned long pid
,
234 unsigned long psize
, unsigned long ric
)
236 unsigned long ap
= mmu_get_ap(psize
);
238 asm volatile("ptesync": : :"memory");
239 __tlbie_va(va
, pid
, ap
, ric
);
240 asm volatile("eieio; tlbsync; ptesync": : :"memory");
243 static inline void _tlbie_va_range(unsigned long start
, unsigned long end
,
244 unsigned long pid
, unsigned long page_size
,
245 unsigned long psize
, bool also_pwc
)
247 asm volatile("ptesync": : :"memory");
249 __tlbie_pid(pid
, RIC_FLUSH_PWC
);
250 __tlbie_va_range(start
, end
, pid
, page_size
, psize
);
251 asm volatile("eieio; tlbsync; ptesync": : :"memory");
255 * Base TLB flushing operations:
257 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
258 * - flush_tlb_page(vma, vmaddr) flushes one page
259 * - flush_tlb_range(vma, start, end) flushes a range of pages
260 * - flush_tlb_kernel_range(start, end) flushes kernel pages
262 * - local_* variants of page and mm only apply to the current
265 void radix__local_flush_tlb_mm(struct mm_struct
*mm
)
270 pid
= mm
->context
.id
;
271 if (pid
!= MMU_NO_CONTEXT
)
272 _tlbiel_pid(pid
, RIC_FLUSH_TLB
);
275 EXPORT_SYMBOL(radix__local_flush_tlb_mm
);
278 void radix__local_flush_all_mm(struct mm_struct
*mm
)
283 pid
= mm
->context
.id
;
284 if (pid
!= MMU_NO_CONTEXT
)
285 _tlbiel_pid(pid
, RIC_FLUSH_ALL
);
288 EXPORT_SYMBOL(radix__local_flush_all_mm
);
289 #endif /* CONFIG_SMP */
291 void radix__local_flush_tlb_page_psize(struct mm_struct
*mm
, unsigned long vmaddr
,
297 pid
= mm
->context
.id
;
298 if (pid
!= MMU_NO_CONTEXT
)
299 _tlbiel_va(vmaddr
, pid
, psize
, RIC_FLUSH_TLB
);
303 void radix__local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
305 #ifdef CONFIG_HUGETLB_PAGE
306 /* need the return fix for nohash.c */
307 if (is_vm_hugetlb_page(vma
))
308 return radix__local_flush_hugetlb_page(vma
, vmaddr
);
310 radix__local_flush_tlb_page_psize(vma
->vm_mm
, vmaddr
, mmu_virtual_psize
);
312 EXPORT_SYMBOL(radix__local_flush_tlb_page
);
315 void radix__flush_tlb_mm(struct mm_struct
*mm
)
319 pid
= mm
->context
.id
;
320 if (unlikely(pid
== MMU_NO_CONTEXT
))
324 if (!mm_is_thread_local(mm
))
325 _tlbie_pid(pid
, RIC_FLUSH_TLB
);
327 _tlbiel_pid(pid
, RIC_FLUSH_TLB
);
330 EXPORT_SYMBOL(radix__flush_tlb_mm
);
332 void radix__flush_all_mm(struct mm_struct
*mm
)
336 pid
= mm
->context
.id
;
337 if (unlikely(pid
== MMU_NO_CONTEXT
))
341 if (!mm_is_thread_local(mm
))
342 _tlbie_pid(pid
, RIC_FLUSH_ALL
);
344 _tlbiel_pid(pid
, RIC_FLUSH_ALL
);
347 EXPORT_SYMBOL(radix__flush_all_mm
);
349 void radix__flush_tlb_pwc(struct mmu_gather
*tlb
, unsigned long addr
)
351 tlb
->need_flush_all
= 1;
353 EXPORT_SYMBOL(radix__flush_tlb_pwc
);
355 void radix__flush_tlb_page_psize(struct mm_struct
*mm
, unsigned long vmaddr
,
360 pid
= mm
->context
.id
;
361 if (unlikely(pid
== MMU_NO_CONTEXT
))
365 if (!mm_is_thread_local(mm
))
366 _tlbie_va(vmaddr
, pid
, psize
, RIC_FLUSH_TLB
);
368 _tlbiel_va(vmaddr
, pid
, psize
, RIC_FLUSH_TLB
);
372 void radix__flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
374 #ifdef CONFIG_HUGETLB_PAGE
375 if (is_vm_hugetlb_page(vma
))
376 return radix__flush_hugetlb_page(vma
, vmaddr
);
378 radix__flush_tlb_page_psize(vma
->vm_mm
, vmaddr
, mmu_virtual_psize
);
380 EXPORT_SYMBOL(radix__flush_tlb_page
);
382 #else /* CONFIG_SMP */
383 #define radix__flush_all_mm radix__local_flush_all_mm
384 #endif /* CONFIG_SMP */
386 void radix__flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
388 _tlbie_pid(0, RIC_FLUSH_ALL
);
390 EXPORT_SYMBOL(radix__flush_tlb_kernel_range
);
392 #define TLB_FLUSH_ALL -1UL
395 * Number of pages above which we invalidate the entire PID rather than
396 * flush individual pages, for local and global flushes respectively.
398 * tlbie goes out to the interconnect and individual ops are more costly.
399 * It also does not iterate over sets like the local tlbiel variant when
400 * invalidating a full PID, so it has a far lower threshold to change from
401 * individual page flushes to full-pid flushes.
403 static unsigned long tlb_single_page_flush_ceiling __read_mostly
= 33;
404 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly
= POWER9_TLB_SETS_RADIX
* 2;
406 void radix__flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
410 struct mm_struct
*mm
= vma
->vm_mm
;
412 unsigned int page_shift
= mmu_psize_defs
[mmu_virtual_psize
].shift
;
413 unsigned long page_size
= 1UL << page_shift
;
414 unsigned long nr_pages
= (end
- start
) >> page_shift
;
417 #ifdef CONFIG_HUGETLB_PAGE
418 if (is_vm_hugetlb_page(vma
))
419 return radix__flush_hugetlb_tlb_range(vma
, start
, end
);
422 pid
= mm
->context
.id
;
423 if (unlikely(pid
== MMU_NO_CONTEXT
))
427 if (mm_is_thread_local(mm
)) {
429 full
= (end
== TLB_FLUSH_ALL
||
430 nr_pages
> tlb_local_single_page_flush_ceiling
);
433 full
= (end
== TLB_FLUSH_ALL
||
434 nr_pages
> tlb_single_page_flush_ceiling
);
439 _tlbiel_pid(pid
, RIC_FLUSH_TLB
);
441 _tlbie_pid(pid
, RIC_FLUSH_TLB
);
444 unsigned long hstart
, hend
;
446 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
447 hstart
= (start
+ HPAGE_PMD_SIZE
- 1) >> HPAGE_PMD_SHIFT
;
448 hend
= end
>> HPAGE_PMD_SHIFT
;
450 hstart
<<= HPAGE_PMD_SHIFT
;
451 hend
<<= HPAGE_PMD_SHIFT
;
456 asm volatile("ptesync": : :"memory");
458 __tlbiel_va_range(start
, end
, pid
, page_size
, mmu_virtual_psize
);
460 __tlbiel_va_range(hstart
, hend
, pid
,
461 HPAGE_PMD_SIZE
, MMU_PAGE_2M
);
462 asm volatile("ptesync": : :"memory");
464 __tlbie_va_range(start
, end
, pid
, page_size
, mmu_virtual_psize
);
466 __tlbie_va_range(hstart
, hend
, pid
,
467 HPAGE_PMD_SIZE
, MMU_PAGE_2M
);
468 asm volatile("eieio; tlbsync; ptesync": : :"memory");
473 EXPORT_SYMBOL(radix__flush_tlb_range
);
475 static int radix_get_mmu_psize(int page_size
)
479 if (page_size
== (1UL << mmu_psize_defs
[mmu_virtual_psize
].shift
))
480 psize
= mmu_virtual_psize
;
481 else if (page_size
== (1UL << mmu_psize_defs
[MMU_PAGE_2M
].shift
))
483 else if (page_size
== (1UL << mmu_psize_defs
[MMU_PAGE_1G
].shift
))
490 static void radix__flush_tlb_pwc_range_psize(struct mm_struct
*mm
, unsigned long start
,
491 unsigned long end
, int psize
);
493 void radix__tlb_flush(struct mmu_gather
*tlb
)
496 struct mm_struct
*mm
= tlb
->mm
;
497 int page_size
= tlb
->page_size
;
500 * if page size is not something we understand, do a full mm flush
502 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
503 * that flushes the process table entry cache upon process teardown.
504 * See the comment for radix in arch_exit_mmap().
507 radix__flush_all_mm(mm
);
508 } else if ( (psize
= radix_get_mmu_psize(page_size
)) == -1) {
509 if (!tlb
->need_flush_all
)
510 radix__flush_tlb_mm(mm
);
512 radix__flush_all_mm(mm
);
514 unsigned long start
= tlb
->start
;
515 unsigned long end
= tlb
->end
;
517 if (!tlb
->need_flush_all
)
518 radix__flush_tlb_range_psize(mm
, start
, end
, psize
);
520 radix__flush_tlb_pwc_range_psize(mm
, start
, end
, psize
);
522 tlb
->need_flush_all
= 0;
525 static inline void __radix__flush_tlb_range_psize(struct mm_struct
*mm
,
526 unsigned long start
, unsigned long end
,
527 int psize
, bool also_pwc
)
530 unsigned int page_shift
= mmu_psize_defs
[psize
].shift
;
531 unsigned long page_size
= 1UL << page_shift
;
532 unsigned long nr_pages
= (end
- start
) >> page_shift
;
535 pid
= mm
->context
.id
;
536 if (unlikely(pid
== MMU_NO_CONTEXT
))
540 if (mm_is_thread_local(mm
)) {
542 full
= (end
== TLB_FLUSH_ALL
||
543 nr_pages
> tlb_local_single_page_flush_ceiling
);
546 full
= (end
== TLB_FLUSH_ALL
||
547 nr_pages
> tlb_single_page_flush_ceiling
);
552 _tlbiel_pid(pid
, also_pwc
? RIC_FLUSH_ALL
: RIC_FLUSH_TLB
);
554 _tlbie_pid(pid
, also_pwc
? RIC_FLUSH_ALL
: RIC_FLUSH_TLB
);
557 _tlbiel_va_range(start
, end
, pid
, page_size
, psize
, also_pwc
);
559 _tlbie_va_range(start
, end
, pid
, page_size
, psize
, also_pwc
);
564 void radix__flush_tlb_range_psize(struct mm_struct
*mm
, unsigned long start
,
565 unsigned long end
, int psize
)
567 return __radix__flush_tlb_range_psize(mm
, start
, end
, psize
, false);
570 static void radix__flush_tlb_pwc_range_psize(struct mm_struct
*mm
, unsigned long start
,
571 unsigned long end
, int psize
)
573 __radix__flush_tlb_range_psize(mm
, start
, end
, psize
, true);
576 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
577 void radix__flush_tlb_collapsed_pmd(struct mm_struct
*mm
, unsigned long addr
)
579 unsigned long pid
, end
;
581 pid
= mm
->context
.id
;
582 if (unlikely(pid
== MMU_NO_CONTEXT
))
585 /* 4k page size, just blow the world */
586 if (PAGE_SIZE
== 0x1000) {
587 radix__flush_all_mm(mm
);
591 end
= addr
+ HPAGE_PMD_SIZE
;
593 /* Otherwise first do the PWC, then iterate the pages. */
596 if (mm_is_thread_local(mm
)) {
597 _tlbiel_va_range(addr
, end
, pid
, PAGE_SIZE
, mmu_virtual_psize
, true);
599 _tlbie_va_range(addr
, end
, pid
, PAGE_SIZE
, mmu_virtual_psize
, true);
604 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
606 void radix__flush_tlb_lpid_va(unsigned long lpid
, unsigned long gpa
,
607 unsigned long page_size
)
609 unsigned long rb
,rs
,prs
,r
;
611 unsigned long ric
= RIC_FLUSH_TLB
;
613 ap
= mmu_get_ap(radix_get_mmu_psize(page_size
));
614 rb
= gpa
& ~(PPC_BITMASK(52, 63));
615 rb
|= ap
<< PPC_BITLSHIFT(58);
616 rs
= lpid
& ((1UL << 32) - 1);
617 prs
= 0; /* process scoped */
618 r
= 1; /* raidx format */
620 asm volatile("ptesync": : :"memory");
621 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
622 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
623 asm volatile("eieio; tlbsync; ptesync": : :"memory");
624 trace_tlbie(lpid
, 0, rb
, rs
, ric
, prs
, r
);
626 EXPORT_SYMBOL(radix__flush_tlb_lpid_va
);
628 void radix__flush_tlb_lpid(unsigned long lpid
)
630 unsigned long rb
,rs
,prs
,r
;
631 unsigned long ric
= RIC_FLUSH_ALL
;
633 rb
= 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
634 rs
= lpid
& ((1UL << 32) - 1);
635 prs
= 0; /* partition scoped */
636 r
= 1; /* raidx format */
638 asm volatile("ptesync": : :"memory");
639 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
640 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
641 asm volatile("eieio; tlbsync; ptesync": : :"memory");
642 trace_tlbie(lpid
, 0, rb
, rs
, ric
, prs
, r
);
644 EXPORT_SYMBOL(radix__flush_tlb_lpid
);
646 void radix__flush_pmd_tlb_range(struct vm_area_struct
*vma
,
647 unsigned long start
, unsigned long end
)
649 radix__flush_tlb_range_psize(vma
->vm_mm
, start
, end
, MMU_PAGE_2M
);
651 EXPORT_SYMBOL(radix__flush_pmd_tlb_range
);
653 void radix__flush_tlb_all(void)
655 unsigned long rb
,prs
,r
,rs
;
656 unsigned long ric
= RIC_FLUSH_ALL
;
658 rb
= 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
659 prs
= 0; /* partition scoped */
660 r
= 1; /* raidx format */
661 rs
= 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
663 asm volatile("ptesync": : :"memory");
665 * now flush guest entries by passing PRS = 1 and LPID != 0
667 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
668 : : "r"(rb
), "i"(r
), "i"(1), "i"(ric
), "r"(rs
) : "memory");
670 * now flush host entires by passing PRS = 0 and LPID == 0
672 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
673 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(0) : "memory");
674 asm volatile("eieio; tlbsync; ptesync": : :"memory");
677 void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte
, struct mm_struct
*mm
,
678 unsigned long address
)
681 * We track page size in pte only for DD1, So we can
682 * call this only on DD1.
684 if (!cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
689 if (old_pte
& R_PAGE_LARGE
)
690 radix__flush_tlb_page_psize(mm
, address
, MMU_PAGE_2M
);
692 radix__flush_tlb_page_psize(mm
, address
, mmu_virtual_psize
);
695 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
696 extern void radix_kvm_prefetch_workaround(struct mm_struct
*mm
)
698 unsigned int pid
= mm
->context
.id
;
700 if (unlikely(pid
== MMU_NO_CONTEXT
))
704 * If this context hasn't run on that CPU before and KVM is
705 * around, there's a slim chance that the guest on another
706 * CPU just brought in obsolete translation into the TLB of
707 * this CPU due to a bad prefetch using the guest PID on
708 * the way into the hypervisor.
710 * We work around this here. If KVM is possible, we check if
711 * any sibling thread is in KVM. If it is, the window may exist
712 * and thus we flush that PID from the core.
714 * A potential future improvement would be to mark which PIDs
715 * have never been used on the system and avoid it if the PID
716 * is new and the process has no other cpumask bit set.
718 if (cpu_has_feature(CPU_FTR_HVMODE
) && radix_enabled()) {
719 int cpu
= smp_processor_id();
720 int sib
= cpu_first_thread_sibling(cpu
);
723 for (; sib
<= cpu_last_thread_sibling(cpu
) && !flush
; sib
++) {
726 if (paca
[sib
].kvm_hstate
.kvm_vcpu
)
730 _tlbiel_pid(pid
, RIC_FLUSH_ALL
);
733 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround
);
734 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */