2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
14 #include <asm/kvm_book3s.h>
15 #include <asm/kvm_ppc.h>
16 #include <asm/hvcall.h>
18 #include <asm/debug.h>
19 #include <asm/synch.h>
20 #include <asm/cputhreads.h>
21 #include <asm/ppc-opcode.h>
23 #include "book3s_xics.h"
27 int h_ipi_redirect
= 1;
28 EXPORT_SYMBOL(h_ipi_redirect
);
30 static void icp_rm_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
33 /* -- ICS routines -- */
34 static void ics_rm_check_resend(struct kvmppc_xics
*xics
,
35 struct kvmppc_ics
*ics
, struct kvmppc_icp
*icp
)
39 arch_spin_lock(&ics
->lock
);
41 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
42 struct ics_irq_state
*state
= &ics
->irq_state
[i
];
47 arch_spin_unlock(&ics
->lock
);
48 icp_rm_deliver_irq(xics
, icp
, state
->number
);
49 arch_spin_lock(&ics
->lock
);
52 arch_spin_unlock(&ics
->lock
);
55 /* -- ICP routines -- */
58 static inline void icp_send_hcore_msg(int hcore
, struct kvm_vcpu
*vcpu
)
62 hcpu
= hcore
<< threads_shift
;
63 kvmppc_host_rm_ops_hv
->rm_core
[hcore
].rm_data
= vcpu
;
64 smp_muxed_ipi_set_message(hcpu
, PPC_MSG_RM_HOST_ACTION
);
65 icp_native_cause_ipi_rm(hcpu
);
68 static inline void icp_send_hcore_msg(int hcore
, struct kvm_vcpu
*vcpu
) { }
72 * We start the search from our current CPU Id in the core map
73 * and go in a circle until we get back to our ID looking for a
74 * core that is running in host context and that hasn't already
75 * been targeted for another rm_host_ops.
77 * In the future, could consider using a fairer algorithm (one
78 * that distributes the IPIs better)
80 * Returns -1, if no CPU could be found in the host
81 * Else, returns a CPU Id which has been reserved for use
83 static inline int grab_next_hostcore(int start
,
84 struct kvmppc_host_rm_core
*rm_core
, int max
, int action
)
88 union kvmppc_rm_state old
, new;
90 for (core
= start
+ 1; core
< max
; core
++) {
91 old
= new = READ_ONCE(rm_core
[core
].rm_state
);
93 if (!old
.in_host
|| old
.rm_action
)
96 /* Try to grab this host core if not taken already. */
97 new.rm_action
= action
;
99 success
= cmpxchg64(&rm_core
[core
].rm_state
.raw
,
100 old
.raw
, new.raw
) == old
.raw
;
103 * Make sure that the store to the rm_action is made
104 * visible before we return to caller (and the
105 * subsequent store to rm_data) to synchronize with
116 static inline int find_available_hostcore(int action
)
119 int my_core
= smp_processor_id() >> threads_shift
;
120 struct kvmppc_host_rm_core
*rm_core
= kvmppc_host_rm_ops_hv
->rm_core
;
122 core
= grab_next_hostcore(my_core
, rm_core
, cpu_nr_cores(), action
);
124 core
= grab_next_hostcore(core
, rm_core
, my_core
, action
);
129 static void icp_rm_set_vcpu_irq(struct kvm_vcpu
*vcpu
,
130 struct kvm_vcpu
*this_vcpu
)
132 struct kvmppc_icp
*this_icp
= this_vcpu
->arch
.icp
;
136 /* Mark the target VCPU as having an interrupt pending */
137 vcpu
->stat
.queue_intr
++;
138 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL
, &vcpu
->arch
.pending_exceptions
);
140 /* Kick self ? Just set MER and return */
141 if (vcpu
== this_vcpu
) {
142 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) | LPCR_MER
);
147 * Check if the core is loaded,
148 * if not, find an available host core to post to wake the VCPU,
149 * if we can't find one, set up state to eventually return too hard.
151 cpu
= vcpu
->arch
.thread_cpu
;
152 if (cpu
< 0 || cpu
>= nr_cpu_ids
) {
154 if (kvmppc_host_rm_ops_hv
&& h_ipi_redirect
)
155 hcore
= find_available_hostcore(XICS_RM_KICK_VCPU
);
157 icp_send_hcore_msg(hcore
, vcpu
);
159 this_icp
->rm_action
|= XICS_RM_KICK_VCPU
;
160 this_icp
->rm_kick_target
= vcpu
;
166 kvmhv_rm_send_ipi(cpu
);
169 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu
*vcpu
)
171 /* Note: Only called on self ! */
172 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL
,
173 &vcpu
->arch
.pending_exceptions
);
174 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) & ~LPCR_MER
);
177 static inline bool icp_rm_try_update(struct kvmppc_icp
*icp
,
178 union kvmppc_icp_state old
,
179 union kvmppc_icp_state
new)
181 struct kvm_vcpu
*this_vcpu
= local_paca
->kvm_hstate
.kvm_vcpu
;
184 /* Calculate new output value */
185 new.out_ee
= (new.xisr
&& (new.pending_pri
< new.cppr
));
187 /* Attempt atomic update */
188 success
= cmpxchg64(&icp
->state
.raw
, old
.raw
, new.raw
) == old
.raw
;
193 * Check for output state update
195 * Note that this is racy since another processor could be updating
196 * the state already. This is why we never clear the interrupt output
197 * here, we only ever set it. The clear only happens prior to doing
198 * an update and only by the processor itself. Currently we do it
199 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
201 * We also do not try to figure out whether the EE state has changed,
202 * we unconditionally set it if the new state calls for it. The reason
203 * for that is that we opportunistically remove the pending interrupt
204 * flag when raising CPPR, so we need to set it back here if an
205 * interrupt is still pending.
208 icp_rm_set_vcpu_irq(icp
->vcpu
, this_vcpu
);
210 /* Expose the state change for debug purposes */
211 this_vcpu
->arch
.icp
->rm_dbgstate
= new;
212 this_vcpu
->arch
.icp
->rm_dbgtgt
= icp
->vcpu
;
218 static inline int check_too_hard(struct kvmppc_xics
*xics
,
219 struct kvmppc_icp
*icp
)
221 return (xics
->real_mode_dbg
|| icp
->rm_action
) ? H_TOO_HARD
: H_SUCCESS
;
224 static void icp_rm_check_resend(struct kvmppc_xics
*xics
,
225 struct kvmppc_icp
*icp
)
229 /* Order this load with the test for need_resend in the caller */
231 for_each_set_bit(icsid
, icp
->resend_map
, xics
->max_icsid
+ 1) {
232 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
234 if (!test_and_clear_bit(icsid
, icp
->resend_map
))
238 ics_rm_check_resend(xics
, ics
, icp
);
242 static bool icp_rm_try_to_deliver(struct kvmppc_icp
*icp
, u32 irq
, u8 priority
,
245 union kvmppc_icp_state old_state
, new_state
;
249 old_state
= new_state
= READ_ONCE(icp
->state
);
253 /* See if we can deliver */
254 success
= new_state
.cppr
> priority
&&
255 new_state
.mfrr
> priority
&&
256 new_state
.pending_pri
> priority
;
259 * If we can, check for a rejection and perform the
263 *reject
= new_state
.xisr
;
264 new_state
.xisr
= irq
;
265 new_state
.pending_pri
= priority
;
268 * If we failed to deliver we set need_resend
269 * so a subsequent CPPR state change causes us
270 * to try a new delivery.
272 new_state
.need_resend
= true;
275 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
280 static void icp_rm_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
283 struct ics_irq_state
*state
;
284 struct kvmppc_ics
*ics
;
289 * This is used both for initial delivery of an interrupt and
290 * for subsequent rejection.
292 * Rejection can be racy vs. resends. We have evaluated the
293 * rejection in an atomic ICP transaction which is now complete,
294 * so potentially the ICP can already accept the interrupt again.
296 * So we need to retry the delivery. Essentially the reject path
297 * boils down to a failed delivery. Always.
299 * Now the interrupt could also have moved to a different target,
300 * thus we may need to re-do the ICP lookup as well
304 /* Get the ICS state and lock it */
305 ics
= kvmppc_xics_find_ics(xics
, new_irq
, &src
);
307 /* Unsafe increment, but this does not need to be accurate */
311 state
= &ics
->irq_state
[src
];
313 /* Get a lock on the ICS */
314 arch_spin_lock(&ics
->lock
);
317 if (!icp
|| state
->server
!= icp
->server_num
) {
318 icp
= kvmppc_xics_find_server(xics
->kvm
, state
->server
);
320 /* Unsafe increment again*/
326 /* Clear the resend bit of that interrupt */
330 * If masked, bail out
332 * Note: PAPR doesn't mention anything about masked pending
333 * when doing a resend, only when doing a delivery.
335 * However that would have the effect of losing a masked
336 * interrupt that was rejected and isn't consistent with
337 * the whole masked_pending business which is about not
338 * losing interrupts that occur while masked.
340 * I don't differentiate normal deliveries and resends, this
341 * implementation will differ from PAPR and not lose such
344 if (state
->priority
== MASKED
) {
345 state
->masked_pending
= 1;
350 * Try the delivery, this will set the need_resend flag
351 * in the ICP as part of the atomic transaction if the
352 * delivery is not possible.
354 * Note that if successful, the new delivery might have itself
355 * rejected an interrupt that was "delivered" before we took the
358 * In this case we do the whole sequence all over again for the
359 * new guy. We cannot assume that the rejected interrupt is less
360 * favored than the new one, and thus doesn't need to be delivered,
361 * because by the time we exit icp_rm_try_to_deliver() the target
362 * processor may well have already consumed & completed it, and thus
363 * the rejected interrupt might actually be already acceptable.
365 if (icp_rm_try_to_deliver(icp
, new_irq
, state
->priority
, &reject
)) {
367 * Delivery was successful, did we reject somebody else ?
369 if (reject
&& reject
!= XICS_IPI
) {
370 arch_spin_unlock(&ics
->lock
);
376 * We failed to deliver the interrupt we need to set the
377 * resend map bit and mark the ICS state as needing a resend
379 set_bit(ics
->icsid
, icp
->resend_map
);
383 * If the need_resend flag got cleared in the ICP some time
384 * between icp_rm_try_to_deliver() atomic update and now, then
385 * we know it might have missed the resend_map bit. So we
389 if (!icp
->state
.need_resend
) {
390 arch_spin_unlock(&ics
->lock
);
395 arch_spin_unlock(&ics
->lock
);
398 static void icp_rm_down_cppr(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
401 union kvmppc_icp_state old_state
, new_state
;
405 * This handles several related states in one operation:
407 * ICP State: Down_CPPR
409 * Load CPPR with new value and if the XISR is 0
410 * then check for resends:
414 * If MFRR is more favored than CPPR, check for IPIs
415 * and notify ICS of a potential resend. This is done
416 * asynchronously (when used in real mode, we will have
419 * We do not handle the complete Check_IPI as documented
420 * here. In the PAPR, this state will be used for both
421 * Set_MFRR and Down_CPPR. However, we know that we aren't
422 * changing the MFRR state here so we don't need to handle
423 * the case of an MFRR causing a reject of a pending irq,
424 * this will have been handled when the MFRR was set in the
427 * Thus we don't have to handle rejects, only resends.
429 * When implementing real mode for HV KVM, resend will lead to
430 * a H_TOO_HARD return and the whole transaction will be handled
434 old_state
= new_state
= READ_ONCE(icp
->state
);
437 new_state
.cppr
= new_cppr
;
440 * Cut down Resend / Check_IPI / IPI
442 * The logic is that we cannot have a pending interrupt
443 * trumped by an IPI at this point (see above), so we
444 * know that either the pending interrupt is already an
445 * IPI (in which case we don't care to override it) or
446 * it's either more favored than us or non existent
448 if (new_state
.mfrr
< new_cppr
&&
449 new_state
.mfrr
<= new_state
.pending_pri
) {
450 new_state
.pending_pri
= new_state
.mfrr
;
451 new_state
.xisr
= XICS_IPI
;
454 /* Latch/clear resend bit */
455 resend
= new_state
.need_resend
;
456 new_state
.need_resend
= 0;
458 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
461 * Now handle resend checks. Those are asynchronous to the ICP
462 * state update in HW (ie bus transactions) so we can handle them
463 * separately here as well.
466 icp
->n_check_resend
++;
467 icp_rm_check_resend(xics
, icp
);
472 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu
*vcpu
)
474 union kvmppc_icp_state old_state
, new_state
;
475 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
476 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
479 if (!xics
|| !xics
->real_mode
)
482 /* First clear the interrupt */
483 icp_rm_clr_vcpu_irq(icp
->vcpu
);
486 * ICP State: Accept_Interrupt
488 * Return the pending interrupt (if any) along with the
489 * current CPPR, then clear the XISR & set CPPR to the
493 old_state
= new_state
= READ_ONCE(icp
->state
);
495 xirr
= old_state
.xisr
| (((u32
)old_state
.cppr
) << 24);
498 new_state
.cppr
= new_state
.pending_pri
;
499 new_state
.pending_pri
= 0xff;
502 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
504 /* Return the result in GPR4 */
505 vcpu
->arch
.gpr
[4] = xirr
;
507 return check_too_hard(xics
, icp
);
510 int kvmppc_rm_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
513 union kvmppc_icp_state old_state
, new_state
;
514 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
515 struct kvmppc_icp
*icp
, *this_icp
= vcpu
->arch
.icp
;
520 if (!xics
|| !xics
->real_mode
)
523 local
= this_icp
->server_num
== server
;
527 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
532 * ICP state: Set_MFRR
534 * If the CPPR is more favored than the new MFRR, then
535 * nothing needs to be done as there can be no XISR to
538 * ICP state: Check_IPI
540 * If the CPPR is less favored, then we might be replacing
541 * an interrupt, and thus need to possibly reject it.
545 * Besides rejecting any pending interrupts, we also
546 * update XISR and pending_pri to mark IPI as pending.
548 * PAPR does not describe this state, but if the MFRR is being
549 * made less favored than its earlier value, there might be
550 * a previously-rejected interrupt needing to be resent.
551 * Ideally, we would want to resend only if
552 * prio(pending_interrupt) < mfrr &&
553 * prio(pending_interrupt) < cppr
554 * where pending interrupt is the one that was rejected. But
555 * we don't have that state, so we simply trigger a resend
556 * whenever the MFRR is made less favored.
559 old_state
= new_state
= READ_ONCE(icp
->state
);
562 new_state
.mfrr
= mfrr
;
567 if (mfrr
< new_state
.cppr
) {
568 /* Reject a pending interrupt if not an IPI */
569 if (mfrr
<= new_state
.pending_pri
) {
570 reject
= new_state
.xisr
;
571 new_state
.pending_pri
= mfrr
;
572 new_state
.xisr
= XICS_IPI
;
576 if (mfrr
> old_state
.mfrr
) {
577 resend
= new_state
.need_resend
;
578 new_state
.need_resend
= 0;
580 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
582 /* Handle reject in real mode */
583 if (reject
&& reject
!= XICS_IPI
) {
584 this_icp
->n_reject
++;
585 icp_rm_deliver_irq(xics
, icp
, reject
);
588 /* Handle resends in real mode */
590 this_icp
->n_check_resend
++;
591 icp_rm_check_resend(xics
, icp
);
594 return check_too_hard(xics
, this_icp
);
597 int kvmppc_rm_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
599 union kvmppc_icp_state old_state
, new_state
;
600 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
601 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
604 if (!xics
|| !xics
->real_mode
)
608 * ICP State: Set_CPPR
610 * We can safely compare the new value with the current
611 * value outside of the transaction as the CPPR is only
612 * ever changed by the processor on itself
614 if (cppr
> icp
->state
.cppr
) {
615 icp_rm_down_cppr(xics
, icp
, cppr
);
617 } else if (cppr
== icp
->state
.cppr
)
623 * The processor is raising its priority, this can result
624 * in a rejection of a pending interrupt:
626 * ICP State: Reject_Current
628 * We can remove EE from the current processor, the update
629 * transaction will set it again if needed
631 icp_rm_clr_vcpu_irq(icp
->vcpu
);
634 old_state
= new_state
= READ_ONCE(icp
->state
);
637 new_state
.cppr
= cppr
;
639 if (cppr
<= new_state
.pending_pri
) {
640 reject
= new_state
.xisr
;
642 new_state
.pending_pri
= 0xff;
645 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
648 * Check for rejects. They are handled by doing a new delivery
649 * attempt (see comments in icp_rm_deliver_irq).
651 if (reject
&& reject
!= XICS_IPI
) {
653 icp_rm_deliver_irq(xics
, icp
, reject
);
656 return check_too_hard(xics
, icp
);
659 int kvmppc_rm_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
661 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
662 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
663 struct kvmppc_ics
*ics
;
664 struct ics_irq_state
*state
;
665 u32 irq
= xirr
& 0x00ffffff;
668 if (!xics
|| !xics
->real_mode
)
674 * Note: If EOI is incorrectly used by SW to lower the CPPR
675 * value (ie more favored), we do not check for rejection of
676 * a pending interrupt, this is a SW error and PAPR sepcifies
677 * that we don't have to deal with it.
679 * The sending of an EOI to the ICS is handled after the
682 * ICP State: Down_CPPR which we handle
683 * in a separate function as it's shared with H_CPPR.
685 icp_rm_down_cppr(xics
, icp
, xirr
>> 24);
687 /* IPIs have no EOI */
691 * EOI handling: If the interrupt is still asserted, we need to
692 * resend it. We can take a lockless "peek" at the ICS state here.
694 * "Message" interrupts will never have "asserted" set
696 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
699 state
= &ics
->irq_state
[src
];
701 /* Still asserted, resend it */
702 if (state
->asserted
) {
704 icp_rm_deliver_irq(xics
, icp
, irq
);
707 if (!hlist_empty(&vcpu
->kvm
->irq_ack_notifier_list
)) {
708 icp
->rm_action
|= XICS_RM_NOTIFY_EOI
;
709 icp
->rm_eoied_irq
= irq
;
712 return check_too_hard(xics
, icp
);
715 /* --- Non-real mode XICS-related built-in routines --- */
718 * Host Operations poked by RM KVM
720 static void rm_host_ipi_action(int action
, void *data
)
723 case XICS_RM_KICK_VCPU
:
724 kvmppc_host_rm_ops_hv
->vcpu_kick(data
);
727 WARN(1, "Unexpected rm_action=%d data=%p\n", action
, data
);
733 void kvmppc_xics_ipi_action(void)
736 unsigned int cpu
= smp_processor_id();
737 struct kvmppc_host_rm_core
*rm_corep
;
739 core
= cpu
>> threads_shift
;
740 rm_corep
= &kvmppc_host_rm_ops_hv
->rm_core
[core
];
742 if (rm_corep
->rm_data
) {
743 rm_host_ipi_action(rm_corep
->rm_state
.rm_action
,
745 /* Order these stores against the real mode KVM */
746 rm_corep
->rm_data
= NULL
;
748 rm_corep
->rm_state
.rm_action
= 0;