1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/kvm_host.h>
8 #include <linux/preempt.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
15 #include <linux/cma.h>
16 #include <linux/bitops.h>
18 #include <asm/asm-prototypes.h>
19 #include <asm/cputable.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/archrandom.h>
25 #include <asm/dbell.h>
26 #include <asm/cputhreads.h>
31 #define KVM_CMA_CHUNK_ORDER 18
33 #include "book3s_xics.h"
34 #include "book3s_xive.h"
37 * The XIVE module will populate these when it loads
39 unsigned long (*__xive_vm_h_xirr
)(struct kvm_vcpu
*vcpu
);
40 unsigned long (*__xive_vm_h_ipoll
)(struct kvm_vcpu
*vcpu
, unsigned long server
);
41 int (*__xive_vm_h_ipi
)(struct kvm_vcpu
*vcpu
, unsigned long server
,
43 int (*__xive_vm_h_cppr
)(struct kvm_vcpu
*vcpu
, unsigned long cppr
);
44 int (*__xive_vm_h_eoi
)(struct kvm_vcpu
*vcpu
, unsigned long xirr
);
45 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr
);
46 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll
);
47 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi
);
48 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr
);
49 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi
);
52 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
53 * should be power of 2.
55 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
57 * By default we reserve 5% of memory for hash pagetable allocation.
59 static unsigned long kvm_cma_resv_ratio
= 5;
61 static struct cma
*kvm_cma
;
63 static int __init
early_parse_kvm_cma_resv(char *p
)
65 pr_debug("%s(%s)\n", __func__
, p
);
68 return kstrtoul(p
, 0, &kvm_cma_resv_ratio
);
70 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv
);
72 struct page
*kvm_alloc_hpt_cma(unsigned long nr_pages
)
74 VM_BUG_ON(order_base_2(nr_pages
) < KVM_CMA_CHUNK_ORDER
- PAGE_SHIFT
);
76 return cma_alloc(kvm_cma
, nr_pages
, order_base_2(HPT_ALIGN_PAGES
),
79 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma
);
81 void kvm_free_hpt_cma(struct page
*page
, unsigned long nr_pages
)
83 cma_release(kvm_cma
, page
, nr_pages
);
85 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma
);
88 * kvm_cma_reserve() - reserve area for kvm hash pagetable
90 * This function reserves memory from early allocator. It should be
91 * called by arch specific code once the memblock allocator
92 * has been activated and all other subsystems have already allocated/reserved
95 void __init
kvm_cma_reserve(void)
97 unsigned long align_size
;
98 struct memblock_region
*reg
;
99 phys_addr_t selected_size
= 0;
102 * We need CMA reservation only when we are in HV mode
104 if (!cpu_has_feature(CPU_FTR_HVMODE
))
107 * We cannot use memblock_phys_mem_size() here, because
108 * memblock_analyze() has not been called yet.
110 for_each_memblock(memory
, reg
)
111 selected_size
+= memblock_region_memory_end_pfn(reg
) -
112 memblock_region_memory_base_pfn(reg
);
114 selected_size
= (selected_size
* kvm_cma_resv_ratio
/ 100) << PAGE_SHIFT
;
116 pr_debug("%s: reserving %ld MiB for global area\n", __func__
,
117 (unsigned long)selected_size
/ SZ_1M
);
118 align_size
= HPT_ALIGN_PAGES
<< PAGE_SHIFT
;
119 cma_declare_contiguous(0, selected_size
, 0, align_size
,
120 KVM_CMA_CHUNK_ORDER
- PAGE_SHIFT
, false, "kvm_cma",
126 * Real-mode H_CONFER implementation.
127 * We check if we are the only vcpu out of this virtual core
128 * still running in the guest and not ceded. If so, we pop up
129 * to the virtual-mode implementation; if not, just return to
132 long int kvmppc_rm_h_confer(struct kvm_vcpu
*vcpu
, int target
,
133 unsigned int yield_count
)
135 struct kvmppc_vcore
*vc
= local_paca
->kvm_hstate
.kvm_vcore
;
136 int ptid
= local_paca
->kvm_hstate
.ptid
;
139 int threads_conferring
;
140 u64 stop
= get_tb() + 10 * tb_ticks_per_usec
;
141 int rv
= H_SUCCESS
; /* => don't yield */
143 set_bit(ptid
, &vc
->conferring_threads
);
144 while ((get_tb() < stop
) && !VCORE_IS_EXITING(vc
)) {
145 threads_running
= VCORE_ENTRY_MAP(vc
);
146 threads_ceded
= vc
->napping_threads
;
147 threads_conferring
= vc
->conferring_threads
;
148 if ((threads_ceded
| threads_conferring
) == threads_running
) {
149 rv
= H_TOO_HARD
; /* => do yield */
153 clear_bit(ptid
, &vc
->conferring_threads
);
158 * When running HV mode KVM we need to block certain operations while KVM VMs
159 * exist in the system. We use a counter of VMs to track this.
161 * One of the operations we need to block is onlining of secondaries, so we
162 * protect hv_vm_count with get/put_online_cpus().
164 static atomic_t hv_vm_count
;
166 void kvm_hv_vm_activated(void)
169 atomic_inc(&hv_vm_count
);
172 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated
);
174 void kvm_hv_vm_deactivated(void)
177 atomic_dec(&hv_vm_count
);
180 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated
);
182 bool kvm_hv_mode_active(void)
184 return atomic_read(&hv_vm_count
) != 0;
187 extern int hcall_real_table
[], hcall_real_table_end
[];
189 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd
)
192 if (cmd
< hcall_real_table_end
- hcall_real_table
&&
193 hcall_real_table
[cmd
])
198 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode
);
200 int kvmppc_hwrng_present(void)
202 return powernv_hwrng_present();
204 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present
);
206 long kvmppc_h_random(struct kvm_vcpu
*vcpu
)
210 /* Only need to do the expensive mfmsr() on radix */
211 if (kvm_is_radix(vcpu
->kvm
) && (mfmsr() & MSR_IR
))
212 r
= powernv_get_random_long(&vcpu
->arch
.regs
.gpr
[4]);
214 r
= powernv_get_random_real_mode(&vcpu
->arch
.regs
.gpr
[4]);
222 * Send an interrupt or message to another CPU.
223 * The caller needs to include any barrier needed to order writes
224 * to memory vs. the IPI/message.
226 void kvmhv_rm_send_ipi(int cpu
)
228 void __iomem
*xics_phys
;
229 unsigned long msg
= PPC_DBELL_TYPE(PPC_DBELL_SERVER
);
231 /* For a nested hypervisor, use the XICS via hcall */
232 if (kvmhv_on_pseries()) {
233 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
235 plpar_hcall_raw(H_IPI
, retbuf
, get_hard_smp_processor_id(cpu
),
240 /* On POWER9 we can use msgsnd for any destination cpu. */
241 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
242 msg
|= get_hard_smp_processor_id(cpu
);
243 __asm__
__volatile__ (PPC_MSGSND(%0) : : "r" (msg
));
247 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
248 if (cpu_has_feature(CPU_FTR_ARCH_207S
) &&
249 cpu_first_thread_sibling(cpu
) ==
250 cpu_first_thread_sibling(raw_smp_processor_id())) {
251 msg
|= cpu_thread_in_core(cpu
);
252 __asm__
__volatile__ (PPC_MSGSND(%0) : : "r" (msg
));
256 /* We should never reach this */
257 if (WARN_ON_ONCE(xics_on_xive()))
260 /* Else poke the target with an IPI */
261 xics_phys
= paca_ptrs
[cpu
]->kvm_hstate
.xics_phys
;
263 __raw_rm_writeb(IPI_PRIORITY
, xics_phys
+ XICS_MFRR
);
265 opal_int_set_mfrr(get_hard_smp_processor_id(cpu
), IPI_PRIORITY
);
269 * The following functions are called from the assembly code
270 * in book3s_hv_rmhandlers.S.
272 static void kvmhv_interrupt_vcore(struct kvmppc_vcore
*vc
, int active
)
276 /* Order setting of exit map vs. msgsnd/IPI */
278 for (; active
; active
>>= 1, ++cpu
)
280 kvmhv_rm_send_ipi(cpu
);
283 void kvmhv_commence_exit(int trap
)
285 struct kvmppc_vcore
*vc
= local_paca
->kvm_hstate
.kvm_vcore
;
286 int ptid
= local_paca
->kvm_hstate
.ptid
;
287 struct kvm_split_mode
*sip
= local_paca
->kvm_hstate
.kvm_split_mode
;
291 /* Set our bit in the threads-exiting-guest map in the 0xff00
292 bits of vcore->entry_exit_map */
295 ee
= vc
->entry_exit_map
;
296 } while (cmpxchg(&vc
->entry_exit_map
, ee
, ee
| me
) != ee
);
298 /* Are we the first here? */
303 * Trigger the other threads in this vcore to exit the guest.
304 * If this is a hypervisor decrementer interrupt then they
305 * will be already on their way out of the guest.
307 if (trap
!= BOOK3S_INTERRUPT_HV_DECREMENTER
)
308 kvmhv_interrupt_vcore(vc
, ee
& ~(1 << ptid
));
311 * If we are doing dynamic micro-threading, interrupt the other
312 * subcores to pull them out of their guests too.
317 for (i
= 0; i
< MAX_SUBCORES
; ++i
) {
322 ee
= vc
->entry_exit_map
;
323 /* Already asked to exit? */
326 } while (cmpxchg(&vc
->entry_exit_map
, ee
,
327 ee
| VCORE_EXIT_REQ
) != ee
);
329 kvmhv_interrupt_vcore(vc
, ee
);
333 * On POWER9 when running a HPT guest on a radix host (sip != NULL),
334 * we have to interrupt inactive CPU threads to get them to
335 * restore the host LPCR value.
338 if (cmpxchg(&sip
->do_restore
, 0, 1) == 0) {
339 vc
= local_paca
->kvm_hstate
.kvm_vcore
;
340 cpu0
= vc
->pcpu
+ ptid
- local_paca
->kvm_hstate
.tid
;
341 for (t
= 1; t
< threads_per_core
; ++t
) {
343 kvmhv_rm_send_ipi(cpu0
+ t
);
349 struct kvmppc_host_rm_ops
*kvmppc_host_rm_ops_hv
;
350 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv
);
352 #ifdef CONFIG_KVM_XICS
353 static struct kvmppc_irq_map
*get_irqmap(struct kvmppc_passthru_irqmap
*pimap
,
359 * We access the mapped array here without a lock. That
360 * is safe because we never reduce the number of entries
361 * in the array and we never change the v_hwirq field of
362 * an entry once it is set.
364 * We have also carefully ordered the stores in the writer
365 * and the loads here in the reader, so that if we find a matching
366 * hwirq here, the associated GSI and irq_desc fields are valid.
368 for (i
= 0; i
< pimap
->n_mapped
; i
++) {
369 if (xisr
== pimap
->mapped
[i
].r_hwirq
) {
371 * Order subsequent reads in the caller to serialize
375 return &pimap
->mapped
[i
];
382 * If we have an interrupt that's not an IPI, check if we have a
383 * passthrough adapter and if so, check if this external interrupt
384 * is for the adapter.
385 * We will attempt to deliver the IRQ directly to the target VCPU's
386 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
388 * If the delivery fails or if this is not for a passthrough adapter,
389 * return to the host to handle this interrupt. We earlier
390 * saved a copy of the XIRR in the PACA, it will be picked up by
391 * the host ICP driver.
393 static int kvmppc_check_passthru(u32 xisr
, __be32 xirr
, bool *again
)
395 struct kvmppc_passthru_irqmap
*pimap
;
396 struct kvmppc_irq_map
*irq_map
;
397 struct kvm_vcpu
*vcpu
;
399 vcpu
= local_paca
->kvm_hstate
.kvm_vcpu
;
402 pimap
= kvmppc_get_passthru_irqmap(vcpu
->kvm
);
405 irq_map
= get_irqmap(pimap
, xisr
);
409 /* We're handling this interrupt, generic code doesn't need to */
410 local_paca
->kvm_hstate
.saved_xirr
= 0;
412 return kvmppc_deliver_irq_passthru(vcpu
, xirr
, irq_map
, pimap
, again
);
416 static inline int kvmppc_check_passthru(u32 xisr
, __be32 xirr
, bool *again
)
423 * Determine what sort of external interrupt is pending (if any).
425 * 0 if no interrupt is pending
426 * 1 if an interrupt is pending that needs to be handled by the host
427 * 2 Passthrough that needs completion in the host
428 * -1 if there was a guest wakeup IPI (which has now been cleared)
429 * -2 if there is PCI passthrough external interrupt that was handled
431 static long kvmppc_read_one_intr(bool *again
);
433 long kvmppc_read_intr(void)
444 rc
= kvmppc_read_one_intr(&again
);
445 if (rc
&& (ret
== 0 || rc
> ret
))
451 static long kvmppc_read_one_intr(bool *again
)
453 void __iomem
*xics_phys
;
463 /* see if a host IPI is pending */
464 host_ipi
= local_paca
->kvm_hstate
.host_ipi
;
468 /* Now read the interrupt from the ICP */
469 if (kvmhv_on_pseries()) {
470 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
472 rc
= plpar_hcall_raw(H_XIRR
, retbuf
, 0xFF);
473 xirr
= cpu_to_be32(retbuf
[0]);
475 xics_phys
= local_paca
->kvm_hstate
.xics_phys
;
478 rc
= opal_int_get_xirr(&xirr
, false);
480 xirr
= __raw_rm_readl(xics_phys
+ XICS_XIRR
);
486 * Save XIRR for later. Since we get control in reverse endian
487 * on LE systems, save it byte reversed and fetch it back in
488 * host endian. Note that xirr is the value read from the
489 * XIRR register, while h_xirr is the host endian version.
491 h_xirr
= be32_to_cpu(xirr
);
492 local_paca
->kvm_hstate
.saved_xirr
= h_xirr
;
493 xisr
= h_xirr
& 0xffffff;
495 * Ensure that the store/load complete to guarantee all side
496 * effects of loading from XIRR has completed
500 /* if nothing pending in the ICP */
504 /* We found something in the ICP...
506 * If it is an IPI, clear the MFRR and EOI it.
508 if (xisr
== XICS_IPI
) {
510 if (kvmhv_on_pseries()) {
511 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
513 plpar_hcall_raw(H_IPI
, retbuf
,
514 hard_smp_processor_id(), 0xff);
515 plpar_hcall_raw(H_EOI
, retbuf
, h_xirr
);
516 } else if (xics_phys
) {
517 __raw_rm_writeb(0xff, xics_phys
+ XICS_MFRR
);
518 __raw_rm_writel(xirr
, xics_phys
+ XICS_XIRR
);
520 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
521 rc
= opal_int_eoi(h_xirr
);
523 /* If rc > 0, there is another interrupt pending */
527 * Need to ensure side effects of above stores
528 * complete before proceeding.
533 * We need to re-check host IPI now in case it got set in the
534 * meantime. If it's clear, we bounce the interrupt to the
537 host_ipi
= local_paca
->kvm_hstate
.host_ipi
;
538 if (unlikely(host_ipi
!= 0)) {
539 /* We raced with the host,
540 * we need to resend that IPI, bummer
542 if (kvmhv_on_pseries()) {
543 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
545 plpar_hcall_raw(H_IPI
, retbuf
,
546 hard_smp_processor_id(),
548 } else if (xics_phys
)
549 __raw_rm_writeb(IPI_PRIORITY
,
550 xics_phys
+ XICS_MFRR
);
552 opal_int_set_mfrr(hard_smp_processor_id(),
554 /* Let side effects complete */
559 /* OK, it's an IPI for us */
560 local_paca
->kvm_hstate
.saved_xirr
= 0;
564 return kvmppc_check_passthru(xisr
, xirr
, again
);
567 #ifdef CONFIG_KVM_XICS
568 static inline bool is_rm(void)
570 return !(mfmsr() & MSR_DR
);
573 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu
*vcpu
)
575 if (!kvmppc_xics_enabled(vcpu
))
577 if (xics_on_xive()) {
579 return xive_rm_h_xirr(vcpu
);
580 if (unlikely(!__xive_vm_h_xirr
))
581 return H_NOT_AVAILABLE
;
582 return __xive_vm_h_xirr(vcpu
);
584 return xics_rm_h_xirr(vcpu
);
587 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu
*vcpu
)
589 if (!kvmppc_xics_enabled(vcpu
))
591 vcpu
->arch
.regs
.gpr
[5] = get_tb();
592 if (xics_on_xive()) {
594 return xive_rm_h_xirr(vcpu
);
595 if (unlikely(!__xive_vm_h_xirr
))
596 return H_NOT_AVAILABLE
;
597 return __xive_vm_h_xirr(vcpu
);
599 return xics_rm_h_xirr(vcpu
);
602 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu
*vcpu
, unsigned long server
)
604 if (!kvmppc_xics_enabled(vcpu
))
606 if (xics_on_xive()) {
608 return xive_rm_h_ipoll(vcpu
, server
);
609 if (unlikely(!__xive_vm_h_ipoll
))
610 return H_NOT_AVAILABLE
;
611 return __xive_vm_h_ipoll(vcpu
, server
);
616 int kvmppc_rm_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
619 if (!kvmppc_xics_enabled(vcpu
))
621 if (xics_on_xive()) {
623 return xive_rm_h_ipi(vcpu
, server
, mfrr
);
624 if (unlikely(!__xive_vm_h_ipi
))
625 return H_NOT_AVAILABLE
;
626 return __xive_vm_h_ipi(vcpu
, server
, mfrr
);
628 return xics_rm_h_ipi(vcpu
, server
, mfrr
);
631 int kvmppc_rm_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
633 if (!kvmppc_xics_enabled(vcpu
))
635 if (xics_on_xive()) {
637 return xive_rm_h_cppr(vcpu
, cppr
);
638 if (unlikely(!__xive_vm_h_cppr
))
639 return H_NOT_AVAILABLE
;
640 return __xive_vm_h_cppr(vcpu
, cppr
);
642 return xics_rm_h_cppr(vcpu
, cppr
);
645 int kvmppc_rm_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
647 if (!kvmppc_xics_enabled(vcpu
))
649 if (xics_on_xive()) {
651 return xive_rm_h_eoi(vcpu
, xirr
);
652 if (unlikely(!__xive_vm_h_eoi
))
653 return H_NOT_AVAILABLE
;
654 return __xive_vm_h_eoi(vcpu
, xirr
);
656 return xics_rm_h_eoi(vcpu
, xirr
);
658 #endif /* CONFIG_KVM_XICS */
660 void kvmppc_bad_interrupt(struct pt_regs
*regs
)
663 * 100 could happen at any time, 200 can happen due to invalid real
664 * address access for example (or any time due to a hardware problem).
666 if (TRAP(regs
) == 0x100) {
667 get_paca()->in_nmi
++;
668 system_reset_exception(regs
);
669 get_paca()->in_nmi
--;
670 } else if (TRAP(regs
) == 0x200) {
671 machine_check_exception(regs
);
673 die("Bad interrupt in KVM entry/exit code", regs
, SIGABRT
);
675 panic("Bad KVM trap");
679 * Functions used to switch LPCR HR and UPRT bits on all threads
680 * when entering and exiting HPT guests on a radix host.
683 #define PHASE_REALMODE 1 /* in real mode */
684 #define PHASE_SET_LPCR 2 /* have set LPCR */
685 #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
686 #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
688 #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
690 static void wait_for_sync(struct kvm_split_mode
*sip
, int phase
)
692 int thr
= local_paca
->kvm_hstate
.tid
;
694 sip
->lpcr_sync
.phase
[thr
] |= phase
;
696 while ((sip
->lpcr_sync
.allphases
& phase
) != phase
) {
703 void kvmhv_p9_set_lpcr(struct kvm_split_mode
*sip
)
705 unsigned long rb
, set
;
707 /* wait for every other thread to get to real mode */
708 wait_for_sync(sip
, PHASE_REALMODE
);
710 /* Set LPCR and LPIDR */
711 mtspr(SPRN_LPCR
, sip
->lpcr_req
);
712 mtspr(SPRN_LPID
, sip
->lpidr_req
);
715 /* Invalidate the TLB on thread 0 */
716 if (local_paca
->kvm_hstate
.tid
== 0) {
718 asm volatile("ptesync" : : : "memory");
719 for (set
= 0; set
< POWER9_TLB_SETS_RADIX
; ++set
) {
720 rb
= TLBIEL_INVAL_SET_LPID
+
721 (set
<< TLBIEL_INVAL_SET_SHIFT
);
722 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
725 asm volatile("ptesync" : : : "memory");
728 /* indicate that we have done so and wait for others */
729 wait_for_sync(sip
, PHASE_SET_LPCR
);
730 /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
735 * Called when a thread that has been in the guest needs
736 * to reload the host LPCR value - but only on POWER9 when
737 * running a HPT guest on a radix host.
739 void kvmhv_p9_restore_lpcr(struct kvm_split_mode
*sip
)
741 /* we're out of the guest... */
742 wait_for_sync(sip
, PHASE_OUT_OF_GUEST
);
745 mtspr(SPRN_LPCR
, sip
->host_lpcr
);
748 if (local_paca
->kvm_hstate
.tid
== 0) {
750 smp_wmb(); /* order store of do_restore vs. phase */
753 wait_for_sync(sip
, PHASE_RESET_LPCR
);
755 local_paca
->kvm_hstate
.kvm_split_mode
= NULL
;
758 static void kvmppc_end_cede(struct kvm_vcpu
*vcpu
)
760 vcpu
->arch
.ceded
= 0;
761 if (vcpu
->arch
.timer_running
) {
762 hrtimer_try_to_cancel(&vcpu
->arch
.dec_timer
);
763 vcpu
->arch
.timer_running
= 0;
767 void kvmppc_set_msr_hv(struct kvm_vcpu
*vcpu
, u64 msr
)
770 * Check for illegal transactional state bit combination
771 * and if we find it, force the TS field to a safe state.
773 if ((msr
& MSR_TS_MASK
) == MSR_TS_MASK
)
775 vcpu
->arch
.shregs
.msr
= msr
;
776 kvmppc_end_cede(vcpu
);
778 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv
);
780 static void inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 srr1_flags
)
782 unsigned long msr
, pc
, new_msr
, new_pc
;
784 msr
= kvmppc_get_msr(vcpu
);
785 pc
= kvmppc_get_pc(vcpu
);
786 new_msr
= vcpu
->arch
.intr_msr
;
789 /* If transactional, change to suspend mode on IRQ delivery */
790 if (MSR_TM_TRANSACTIONAL(msr
))
793 new_msr
|= msr
& MSR_TS_MASK
;
796 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
797 * applicable. AIL=2 is not supported.
799 * AIL does not apply to SRESET, MCE, or HMI (which is never
800 * delivered to the guest), and does not apply if IR=0 or DR=0.
802 if (vec
!= BOOK3S_INTERRUPT_SYSTEM_RESET
&&
803 vec
!= BOOK3S_INTERRUPT_MACHINE_CHECK
&&
804 (vcpu
->arch
.vcore
->lpcr
& LPCR_AIL
) == LPCR_AIL_3
&&
805 (msr
& (MSR_IR
|MSR_DR
)) == (MSR_IR
|MSR_DR
) ) {
806 new_msr
|= MSR_IR
| MSR_DR
;
807 new_pc
+= 0xC000000000004000ULL
;
810 kvmppc_set_srr0(vcpu
, pc
);
811 kvmppc_set_srr1(vcpu
, (msr
& SRR1_MSR_BITS
) | srr1_flags
);
812 kvmppc_set_pc(vcpu
, new_pc
);
813 vcpu
->arch
.shregs
.msr
= new_msr
;
816 void kvmppc_inject_interrupt_hv(struct kvm_vcpu
*vcpu
, int vec
, u64 srr1_flags
)
818 inject_interrupt(vcpu
, vec
, srr1_flags
);
819 kvmppc_end_cede(vcpu
);
821 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv
);
824 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
825 * Can we inject a Decrementer or a External interrupt?
827 void kvmppc_guest_entry_inject_int(struct kvm_vcpu
*vcpu
)
832 /* Insert EXTERNAL bit into LPCR at the MER bit position */
833 ext
= (vcpu
->arch
.pending_exceptions
>> BOOK3S_IRQPRIO_EXTERNAL
) & 1;
834 lpcr
= mfspr(SPRN_LPCR
);
835 lpcr
|= ext
<< LPCR_MER_SH
;
836 mtspr(SPRN_LPCR
, lpcr
);
839 if (vcpu
->arch
.shregs
.msr
& MSR_EE
) {
841 inject_interrupt(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
, 0);
843 long int dec
= mfspr(SPRN_DEC
);
844 if (!(lpcr
& LPCR_LD
))
847 inject_interrupt(vcpu
,
848 BOOK3S_INTERRUPT_DECREMENTER
, 0);
852 if (vcpu
->arch
.doorbell_request
) {
853 mtspr(SPRN_DPDES
, 1);
854 vcpu
->arch
.vcore
->dpdes
= 1;
856 vcpu
->arch
.doorbell_request
= 0;
860 static void flush_guest_tlb(struct kvm
*kvm
)
862 unsigned long rb
, set
;
864 rb
= PPC_BIT(52); /* IS = 2 */
865 if (kvm_is_radix(kvm
)) {
866 /* R=1 PRS=1 RIC=2 */
867 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
868 : : "r" (rb
), "i" (1), "i" (1), "i" (2),
870 for (set
= 1; set
< kvm
->arch
.tlb_sets
; ++set
) {
871 rb
+= PPC_BIT(51); /* increment set number */
872 /* R=1 PRS=1 RIC=0 */
873 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
874 : : "r" (rb
), "i" (1), "i" (1), "i" (0),
877 asm volatile("ptesync": : :"memory");
878 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST
: : :"memory");
880 for (set
= 0; set
< kvm
->arch
.tlb_sets
; ++set
) {
881 /* R=0 PRS=0 RIC=0 */
882 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
883 : : "r" (rb
), "i" (0), "i" (0), "i" (0),
885 rb
+= PPC_BIT(51); /* increment set number */
887 asm volatile("ptesync": : :"memory");
888 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT
: : :"memory");
892 void kvmppc_check_need_tlb_flush(struct kvm
*kvm
, int pcpu
,
893 struct kvm_nested_guest
*nested
)
895 cpumask_t
*need_tlb_flush
;
898 * On POWER9, individual threads can come in here, but the
899 * TLB is shared between the 4 threads in a core, hence
900 * invalidating on one thread invalidates for all.
901 * Thus we make all 4 threads use the same bit.
903 if (cpu_has_feature(CPU_FTR_ARCH_300
))
904 pcpu
= cpu_first_thread_sibling(pcpu
);
907 need_tlb_flush
= &nested
->need_tlb_flush
;
909 need_tlb_flush
= &kvm
->arch
.need_tlb_flush
;
911 if (cpumask_test_cpu(pcpu
, need_tlb_flush
)) {
912 flush_guest_tlb(kvm
);
914 /* Clear the bit after the TLB flush */
915 cpumask_clear_cpu(pcpu
, need_tlb_flush
);
918 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush
);