2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/spinlock.h>
17 #include <linux/uaccess.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
22 #include <asm/debugfs.h>
25 #include <linux/seq_file.h>
27 #include "book3s_xics.h"
30 #define XICS_DBG(fmt...) do { } while (0)
32 #define XICS_DBG(fmt...) trace_printk(fmt)
35 #define ENABLE_REALMODE true
36 #define DEBUG_REALMODE false
42 * Each ICS has a spin lock protecting the information about the IRQ
43 * sources and avoiding simultaneous deliveries of the same interrupt.
45 * ICP operations are done via a single compare & swap transaction
46 * (most ICP state fits in the union kvmppc_icp_state)
53 * - To speed up resends, keep a bitmap of "resend" set bits in the
56 * - Speed up server# -> ICP lookup (array ? hash table ?)
58 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
59 * locks array to improve scalability
62 /* -- ICS routines -- */
64 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
65 u32 new_irq
, bool check_resend
);
68 * Return value ideally indicates how the interrupt was handled, but no
69 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
72 static int ics_deliver_irq(struct kvmppc_xics
*xics
, u32 irq
, u32 level
)
74 struct ics_irq_state
*state
;
75 struct kvmppc_ics
*ics
;
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq
, level
);
81 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq
);
86 state
= &ics
->irq_state
[src
];
90 if (level
== KVM_INTERRUPT_SET_LEVEL
|| level
== KVM_INTERRUPT_SET
)
92 else if (level
== KVM_INTERRUPT_UNSET
)
95 * Take other values the same as 1, consistent with original code.
99 if (!state
->lsi
&& level
== 0) /* noop for MSI */
103 pq_old
= state
->pq_state
;
106 if (pq_old
& PQ_PRESENTED
)
107 /* Setting already set LSI ... */
110 pq_new
= PQ_PRESENTED
;
114 pq_new
= ((pq_old
<< 1) & 3) | PQ_PRESENTED
;
115 } while (cmpxchg(&state
->pq_state
, pq_old
, pq_new
) != pq_old
);
117 /* Test P=1, Q=0, this is the only case where we present */
118 if (pq_new
== PQ_PRESENTED
)
119 icp_deliver_irq(xics
, NULL
, irq
, false);
121 /* Record which CPU this arrived on for passed-through interrupts */
123 state
->intr_cpu
= raw_smp_processor_id();
128 static void ics_check_resend(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
129 struct kvmppc_icp
*icp
)
133 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
134 struct ics_irq_state
*state
= &ics
->irq_state
[i
];
136 XICS_DBG("resend %#x prio %#x\n", state
->number
,
138 icp_deliver_irq(xics
, icp
, state
->number
, true);
143 static bool write_xive(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
144 struct ics_irq_state
*state
,
145 u32 server
, u32 priority
, u32 saved_priority
)
150 local_irq_save(flags
);
151 arch_spin_lock(&ics
->lock
);
153 state
->server
= server
;
154 state
->priority
= priority
;
155 state
->saved_priority
= saved_priority
;
157 if ((state
->masked_pending
|| state
->resend
) && priority
!= MASKED
) {
158 state
->masked_pending
= 0;
163 arch_spin_unlock(&ics
->lock
);
164 local_irq_restore(flags
);
169 int kvmppc_xics_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
, u32 priority
)
171 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
172 struct kvmppc_icp
*icp
;
173 struct kvmppc_ics
*ics
;
174 struct ics_irq_state
*state
;
180 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
183 state
= &ics
->irq_state
[src
];
185 icp
= kvmppc_xics_find_server(kvm
, server
);
189 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
190 irq
, server
, priority
,
191 state
->masked_pending
, state
->resend
);
193 if (write_xive(xics
, ics
, state
, server
, priority
, priority
))
194 icp_deliver_irq(xics
, icp
, irq
, false);
199 int kvmppc_xics_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
, u32
*priority
)
201 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
202 struct kvmppc_ics
*ics
;
203 struct ics_irq_state
*state
;
210 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
213 state
= &ics
->irq_state
[src
];
215 local_irq_save(flags
);
216 arch_spin_lock(&ics
->lock
);
217 *server
= state
->server
;
218 *priority
= state
->priority
;
219 arch_spin_unlock(&ics
->lock
);
220 local_irq_restore(flags
);
225 int kvmppc_xics_int_on(struct kvm
*kvm
, u32 irq
)
227 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
228 struct kvmppc_icp
*icp
;
229 struct kvmppc_ics
*ics
;
230 struct ics_irq_state
*state
;
236 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
239 state
= &ics
->irq_state
[src
];
241 icp
= kvmppc_xics_find_server(kvm
, state
->server
);
245 if (write_xive(xics
, ics
, state
, state
->server
, state
->saved_priority
,
246 state
->saved_priority
))
247 icp_deliver_irq(xics
, icp
, irq
, false);
252 int kvmppc_xics_int_off(struct kvm
*kvm
, u32 irq
)
254 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
255 struct kvmppc_ics
*ics
;
256 struct ics_irq_state
*state
;
262 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
265 state
= &ics
->irq_state
[src
];
267 write_xive(xics
, ics
, state
, state
->server
, MASKED
, state
->priority
);
272 /* -- ICP routines, including hcalls -- */
274 static inline bool icp_try_update(struct kvmppc_icp
*icp
,
275 union kvmppc_icp_state old
,
276 union kvmppc_icp_state
new,
281 /* Calculate new output value */
282 new.out_ee
= (new.xisr
&& (new.pending_pri
< new.cppr
));
284 /* Attempt atomic update */
285 success
= cmpxchg64(&icp
->state
.raw
, old
.raw
, new.raw
) == old
.raw
;
289 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
291 old
.cppr
, old
.mfrr
, old
.pending_pri
, old
.xisr
,
292 old
.need_resend
, old
.out_ee
);
293 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
294 new.cppr
, new.mfrr
, new.pending_pri
, new.xisr
,
295 new.need_resend
, new.out_ee
);
297 * Check for output state update
299 * Note that this is racy since another processor could be updating
300 * the state already. This is why we never clear the interrupt output
301 * here, we only ever set it. The clear only happens prior to doing
302 * an update and only by the processor itself. Currently we do it
303 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
305 * We also do not try to figure out whether the EE state has changed,
306 * we unconditionally set it if the new state calls for it. The reason
307 * for that is that we opportunistically remove the pending interrupt
308 * flag when raising CPPR, so we need to set it back here if an
309 * interrupt is still pending.
312 kvmppc_book3s_queue_irqprio(icp
->vcpu
,
313 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
315 kvmppc_fast_vcpu_kick(icp
->vcpu
);
321 static void icp_check_resend(struct kvmppc_xics
*xics
,
322 struct kvmppc_icp
*icp
)
326 /* Order this load with the test for need_resend in the caller */
328 for_each_set_bit(icsid
, icp
->resend_map
, xics
->max_icsid
+ 1) {
329 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
331 if (!test_and_clear_bit(icsid
, icp
->resend_map
))
335 ics_check_resend(xics
, ics
, icp
);
339 static bool icp_try_to_deliver(struct kvmppc_icp
*icp
, u32 irq
, u8 priority
,
342 union kvmppc_icp_state old_state
, new_state
;
345 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq
, priority
,
349 old_state
= new_state
= READ_ONCE(icp
->state
);
353 /* See if we can deliver */
354 success
= new_state
.cppr
> priority
&&
355 new_state
.mfrr
> priority
&&
356 new_state
.pending_pri
> priority
;
359 * If we can, check for a rejection and perform the
363 *reject
= new_state
.xisr
;
364 new_state
.xisr
= irq
;
365 new_state
.pending_pri
= priority
;
368 * If we failed to deliver we set need_resend
369 * so a subsequent CPPR state change causes us
370 * to try a new delivery.
372 new_state
.need_resend
= true;
375 } while (!icp_try_update(icp
, old_state
, new_state
, false));
380 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
381 u32 new_irq
, bool check_resend
)
383 struct ics_irq_state
*state
;
384 struct kvmppc_ics
*ics
;
390 * This is used both for initial delivery of an interrupt and
391 * for subsequent rejection.
393 * Rejection can be racy vs. resends. We have evaluated the
394 * rejection in an atomic ICP transaction which is now complete,
395 * so potentially the ICP can already accept the interrupt again.
397 * So we need to retry the delivery. Essentially the reject path
398 * boils down to a failed delivery. Always.
400 * Now the interrupt could also have moved to a different target,
401 * thus we may need to re-do the ICP lookup as well
405 /* Get the ICS state and lock it */
406 ics
= kvmppc_xics_find_ics(xics
, new_irq
, &src
);
408 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq
);
411 state
= &ics
->irq_state
[src
];
413 /* Get a lock on the ICS */
414 local_irq_save(flags
);
415 arch_spin_lock(&ics
->lock
);
418 if (!icp
|| state
->server
!= icp
->server_num
) {
419 icp
= kvmppc_xics_find_server(xics
->kvm
, state
->server
);
421 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
422 new_irq
, state
->server
);
431 /* Clear the resend bit of that interrupt */
435 * If masked, bail out
437 * Note: PAPR doesn't mention anything about masked pending
438 * when doing a resend, only when doing a delivery.
440 * However that would have the effect of losing a masked
441 * interrupt that was rejected and isn't consistent with
442 * the whole masked_pending business which is about not
443 * losing interrupts that occur while masked.
445 * I don't differentiate normal deliveries and resends, this
446 * implementation will differ from PAPR and not lose such
449 if (state
->priority
== MASKED
) {
450 XICS_DBG("irq %#x masked pending\n", new_irq
);
451 state
->masked_pending
= 1;
456 * Try the delivery, this will set the need_resend flag
457 * in the ICP as part of the atomic transaction if the
458 * delivery is not possible.
460 * Note that if successful, the new delivery might have itself
461 * rejected an interrupt that was "delivered" before we took the
464 * In this case we do the whole sequence all over again for the
465 * new guy. We cannot assume that the rejected interrupt is less
466 * favored than the new one, and thus doesn't need to be delivered,
467 * because by the time we exit icp_try_to_deliver() the target
468 * processor may well have alrady consumed & completed it, and thus
469 * the rejected interrupt might actually be already acceptable.
471 if (icp_try_to_deliver(icp
, new_irq
, state
->priority
, &reject
)) {
473 * Delivery was successful, did we reject somebody else ?
475 if (reject
&& reject
!= XICS_IPI
) {
476 arch_spin_unlock(&ics
->lock
);
477 local_irq_restore(flags
);
484 * We failed to deliver the interrupt we need to set the
485 * resend map bit and mark the ICS state as needing a resend
490 * Make sure when checking resend, we don't miss the resend
491 * if resend_map bit is seen and cleared.
494 set_bit(ics
->icsid
, icp
->resend_map
);
497 * If the need_resend flag got cleared in the ICP some time
498 * between icp_try_to_deliver() atomic update and now, then
499 * we know it might have missed the resend_map bit. So we
503 if (!icp
->state
.need_resend
) {
505 arch_spin_unlock(&ics
->lock
);
506 local_irq_restore(flags
);
512 arch_spin_unlock(&ics
->lock
);
513 local_irq_restore(flags
);
516 static void icp_down_cppr(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
519 union kvmppc_icp_state old_state
, new_state
;
523 * This handles several related states in one operation:
525 * ICP State: Down_CPPR
527 * Load CPPR with new value and if the XISR is 0
528 * then check for resends:
532 * If MFRR is more favored than CPPR, check for IPIs
533 * and notify ICS of a potential resend. This is done
534 * asynchronously (when used in real mode, we will have
537 * We do not handle the complete Check_IPI as documented
538 * here. In the PAPR, this state will be used for both
539 * Set_MFRR and Down_CPPR. However, we know that we aren't
540 * changing the MFRR state here so we don't need to handle
541 * the case of an MFRR causing a reject of a pending irq,
542 * this will have been handled when the MFRR was set in the
545 * Thus we don't have to handle rejects, only resends.
547 * When implementing real mode for HV KVM, resend will lead to
548 * a H_TOO_HARD return and the whole transaction will be handled
552 old_state
= new_state
= READ_ONCE(icp
->state
);
555 new_state
.cppr
= new_cppr
;
558 * Cut down Resend / Check_IPI / IPI
560 * The logic is that we cannot have a pending interrupt
561 * trumped by an IPI at this point (see above), so we
562 * know that either the pending interrupt is already an
563 * IPI (in which case we don't care to override it) or
564 * it's either more favored than us or non existent
566 if (new_state
.mfrr
< new_cppr
&&
567 new_state
.mfrr
<= new_state
.pending_pri
) {
568 WARN_ON(new_state
.xisr
!= XICS_IPI
&&
569 new_state
.xisr
!= 0);
570 new_state
.pending_pri
= new_state
.mfrr
;
571 new_state
.xisr
= XICS_IPI
;
574 /* Latch/clear resend bit */
575 resend
= new_state
.need_resend
;
576 new_state
.need_resend
= 0;
578 } while (!icp_try_update(icp
, old_state
, new_state
, true));
581 * Now handle resend checks. Those are asynchronous to the ICP
582 * state update in HW (ie bus transactions) so we can handle them
583 * separately here too
586 icp_check_resend(xics
, icp
);
589 static noinline
unsigned long kvmppc_h_xirr(struct kvm_vcpu
*vcpu
)
591 union kvmppc_icp_state old_state
, new_state
;
592 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
595 /* First, remove EE from the processor */
596 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
597 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
600 * ICP State: Accept_Interrupt
602 * Return the pending interrupt (if any) along with the
603 * current CPPR, then clear the XISR & set CPPR to the
607 old_state
= new_state
= READ_ONCE(icp
->state
);
609 xirr
= old_state
.xisr
| (((u32
)old_state
.cppr
) << 24);
612 new_state
.cppr
= new_state
.pending_pri
;
613 new_state
.pending_pri
= 0xff;
616 } while (!icp_try_update(icp
, old_state
, new_state
, true));
618 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu
->vcpu_id
, xirr
);
623 static noinline
int kvmppc_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
626 union kvmppc_icp_state old_state
, new_state
;
627 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
628 struct kvmppc_icp
*icp
;
633 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
634 vcpu
->vcpu_id
, server
, mfrr
);
636 icp
= vcpu
->arch
.icp
;
637 local
= icp
->server_num
== server
;
639 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
645 * ICP state: Set_MFRR
647 * If the CPPR is more favored than the new MFRR, then
648 * nothing needs to be rejected as there can be no XISR to
649 * reject. If the MFRR is being made less favored then
650 * there might be a previously-rejected interrupt needing
653 * ICP state: Check_IPI
655 * If the CPPR is less favored, then we might be replacing
656 * an interrupt, and thus need to possibly reject it.
660 * Besides rejecting any pending interrupts, we also
661 * update XISR and pending_pri to mark IPI as pending.
663 * PAPR does not describe this state, but if the MFRR is being
664 * made less favored than its earlier value, there might be
665 * a previously-rejected interrupt needing to be resent.
666 * Ideally, we would want to resend only if
667 * prio(pending_interrupt) < mfrr &&
668 * prio(pending_interrupt) < cppr
669 * where pending interrupt is the one that was rejected. But
670 * we don't have that state, so we simply trigger a resend
671 * whenever the MFRR is made less favored.
674 old_state
= new_state
= READ_ONCE(icp
->state
);
677 new_state
.mfrr
= mfrr
;
682 if (mfrr
< new_state
.cppr
) {
683 /* Reject a pending interrupt if not an IPI */
684 if (mfrr
<= new_state
.pending_pri
) {
685 reject
= new_state
.xisr
;
686 new_state
.pending_pri
= mfrr
;
687 new_state
.xisr
= XICS_IPI
;
691 if (mfrr
> old_state
.mfrr
) {
692 resend
= new_state
.need_resend
;
693 new_state
.need_resend
= 0;
695 } while (!icp_try_update(icp
, old_state
, new_state
, local
));
698 if (reject
&& reject
!= XICS_IPI
)
699 icp_deliver_irq(xics
, icp
, reject
, false);
703 icp_check_resend(xics
, icp
);
708 static int kvmppc_h_ipoll(struct kvm_vcpu
*vcpu
, unsigned long server
)
710 union kvmppc_icp_state state
;
711 struct kvmppc_icp
*icp
;
713 icp
= vcpu
->arch
.icp
;
714 if (icp
->server_num
!= server
) {
715 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
719 state
= READ_ONCE(icp
->state
);
720 kvmppc_set_gpr(vcpu
, 4, ((u32
)state
.cppr
<< 24) | state
.xisr
);
721 kvmppc_set_gpr(vcpu
, 5, state
.mfrr
);
725 static noinline
void kvmppc_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
727 union kvmppc_icp_state old_state
, new_state
;
728 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
729 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
732 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu
->vcpu_id
, cppr
);
735 * ICP State: Set_CPPR
737 * We can safely compare the new value with the current
738 * value outside of the transaction as the CPPR is only
739 * ever changed by the processor on itself
741 if (cppr
> icp
->state
.cppr
)
742 icp_down_cppr(xics
, icp
, cppr
);
743 else if (cppr
== icp
->state
.cppr
)
749 * The processor is raising its priority, this can result
750 * in a rejection of a pending interrupt:
752 * ICP State: Reject_Current
754 * We can remove EE from the current processor, the update
755 * transaction will set it again if needed
757 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
758 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
761 old_state
= new_state
= READ_ONCE(icp
->state
);
764 new_state
.cppr
= cppr
;
766 if (cppr
<= new_state
.pending_pri
) {
767 reject
= new_state
.xisr
;
769 new_state
.pending_pri
= 0xff;
772 } while (!icp_try_update(icp
, old_state
, new_state
, true));
775 * Check for rejects. They are handled by doing a new delivery
776 * attempt (see comments in icp_deliver_irq).
778 if (reject
&& reject
!= XICS_IPI
)
779 icp_deliver_irq(xics
, icp
, reject
, false);
782 static int ics_eoi(struct kvm_vcpu
*vcpu
, u32 irq
)
784 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
785 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
786 struct kvmppc_ics
*ics
;
787 struct ics_irq_state
*state
;
792 * ICS EOI handling: For LSI, if P bit is still set, we need to
795 * For MSI, we move Q bit into P (and clear Q). If it is set,
799 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
801 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq
);
804 state
= &ics
->irq_state
[src
];
807 pq_new
= state
->pq_state
;
810 pq_old
= state
->pq_state
;
811 pq_new
= pq_old
>> 1;
812 } while (cmpxchg(&state
->pq_state
, pq_old
, pq_new
) != pq_old
);
814 if (pq_new
& PQ_PRESENTED
)
815 icp_deliver_irq(xics
, icp
, irq
, false);
817 kvm_notify_acked_irq(vcpu
->kvm
, 0, irq
);
822 static noinline
int kvmppc_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
824 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
825 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
826 u32 irq
= xirr
& 0x00ffffff;
828 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu
->vcpu_id
, xirr
);
833 * Note: If EOI is incorrectly used by SW to lower the CPPR
834 * value (ie more favored), we do not check for rejection of
835 * a pending interrupt, this is a SW error and PAPR sepcifies
836 * that we don't have to deal with it.
838 * The sending of an EOI to the ICS is handled after the
841 * ICP State: Down_CPPR which we handle
842 * in a separate function as it's shared with H_CPPR.
844 icp_down_cppr(xics
, icp
, xirr
>> 24);
846 /* IPIs have no EOI */
850 return ics_eoi(vcpu
, irq
);
853 int kvmppc_xics_rm_complete(struct kvm_vcpu
*vcpu
, u32 hcall
)
855 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
856 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
858 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
859 hcall
, icp
->rm_action
, icp
->rm_dbgstate
.raw
, icp
->rm_dbgtgt
);
861 if (icp
->rm_action
& XICS_RM_KICK_VCPU
) {
862 icp
->n_rm_kick_vcpu
++;
863 kvmppc_fast_vcpu_kick(icp
->rm_kick_target
);
865 if (icp
->rm_action
& XICS_RM_CHECK_RESEND
) {
866 icp
->n_rm_check_resend
++;
867 icp_check_resend(xics
, icp
->rm_resend_icp
);
869 if (icp
->rm_action
& XICS_RM_NOTIFY_EOI
) {
870 icp
->n_rm_notify_eoi
++;
871 kvm_notify_acked_irq(vcpu
->kvm
, 0, icp
->rm_eoied_irq
);
878 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete
);
880 int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 req
)
882 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
886 /* Check if we have an ICP */
887 if (!xics
|| !vcpu
->arch
.icp
)
890 /* These requests don't have real-mode implementations at present */
893 res
= kvmppc_h_xirr(vcpu
);
894 kvmppc_set_gpr(vcpu
, 4, res
);
895 kvmppc_set_gpr(vcpu
, 5, get_tb());
898 rc
= kvmppc_h_ipoll(vcpu
, kvmppc_get_gpr(vcpu
, 4));
902 /* Check for real mode returning too hard */
903 if (xics
->real_mode
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
904 return kvmppc_xics_rm_complete(vcpu
, req
);
908 res
= kvmppc_h_xirr(vcpu
);
909 kvmppc_set_gpr(vcpu
, 4, res
);
912 kvmppc_h_cppr(vcpu
, kvmppc_get_gpr(vcpu
, 4));
915 rc
= kvmppc_h_eoi(vcpu
, kvmppc_get_gpr(vcpu
, 4));
918 rc
= kvmppc_h_ipi(vcpu
, kvmppc_get_gpr(vcpu
, 4),
919 kvmppc_get_gpr(vcpu
, 5));
925 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall
);
928 /* -- Initialisation code etc. -- */
930 static void xics_debugfs_irqmap(struct seq_file
*m
,
931 struct kvmppc_passthru_irqmap
*pimap
)
937 seq_printf(m
, "========\nPIRQ mappings: %d maps\n===========\n",
939 for (i
= 0; i
< pimap
->n_mapped
; i
++) {
940 seq_printf(m
, "r_hwirq=%x, v_hwirq=%x\n",
941 pimap
->mapped
[i
].r_hwirq
, pimap
->mapped
[i
].v_hwirq
);
945 static int xics_debug_show(struct seq_file
*m
, void *private)
947 struct kvmppc_xics
*xics
= m
->private;
948 struct kvm
*kvm
= xics
->kvm
;
949 struct kvm_vcpu
*vcpu
;
952 unsigned long t_rm_kick_vcpu
, t_rm_check_resend
;
953 unsigned long t_rm_notify_eoi
;
954 unsigned long t_reject
, t_check_resend
;
961 t_rm_check_resend
= 0;
965 xics_debugfs_irqmap(m
, kvm
->arch
.pimap
);
967 seq_printf(m
, "=========\nICP state\n=========\n");
969 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
970 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
971 union kvmppc_icp_state state
;
976 state
.raw
= READ_ONCE(icp
->state
.raw
);
977 seq_printf(m
, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
978 icp
->server_num
, state
.xisr
,
979 state
.pending_pri
, state
.cppr
, state
.mfrr
,
980 state
.out_ee
, state
.need_resend
);
981 t_rm_kick_vcpu
+= icp
->n_rm_kick_vcpu
;
982 t_rm_notify_eoi
+= icp
->n_rm_notify_eoi
;
983 t_rm_check_resend
+= icp
->n_rm_check_resend
;
984 t_check_resend
+= icp
->n_check_resend
;
985 t_reject
+= icp
->n_reject
;
988 seq_printf(m
, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
989 t_rm_kick_vcpu
, t_rm_check_resend
,
991 seq_printf(m
, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
992 t_check_resend
, t_reject
);
993 for (icsid
= 0; icsid
<= KVMPPC_XICS_MAX_ICS_ID
; icsid
++) {
994 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
999 seq_printf(m
, "=========\nICS state for ICS 0x%x\n=========\n",
1002 local_irq_save(flags
);
1003 arch_spin_lock(&ics
->lock
);
1005 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1006 struct ics_irq_state
*irq
= &ics
->irq_state
[i
];
1008 seq_printf(m
, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1009 irq
->number
, irq
->server
, irq
->priority
,
1010 irq
->saved_priority
, irq
->pq_state
,
1011 irq
->resend
, irq
->masked_pending
);
1014 arch_spin_unlock(&ics
->lock
);
1015 local_irq_restore(flags
);
1020 static int xics_debug_open(struct inode
*inode
, struct file
*file
)
1022 return single_open(file
, xics_debug_show
, inode
->i_private
);
1025 static const struct file_operations xics_debug_fops
= {
1026 .open
= xics_debug_open
,
1028 .llseek
= seq_lseek
,
1029 .release
= single_release
,
1032 static void xics_debugfs_init(struct kvmppc_xics
*xics
)
1036 name
= kasprintf(GFP_KERNEL
, "kvm-xics-%p", xics
);
1038 pr_err("%s: no memory for name\n", __func__
);
1042 xics
->dentry
= debugfs_create_file(name
, 0444, powerpc_debugfs_root
,
1043 xics
, &xics_debug_fops
);
1045 pr_debug("%s: created %s\n", __func__
, name
);
1049 static struct kvmppc_ics
*kvmppc_xics_create_ics(struct kvm
*kvm
,
1050 struct kvmppc_xics
*xics
, int irq
)
1052 struct kvmppc_ics
*ics
;
1055 icsid
= irq
>> KVMPPC_XICS_ICS_SHIFT
;
1057 mutex_lock(&kvm
->lock
);
1059 /* ICS already exists - somebody else got here first */
1060 if (xics
->ics
[icsid
])
1063 /* Create the ICS */
1064 ics
= kzalloc(sizeof(struct kvmppc_ics
), GFP_KERNEL
);
1070 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1071 ics
->irq_state
[i
].number
= (icsid
<< KVMPPC_XICS_ICS_SHIFT
) | i
;
1072 ics
->irq_state
[i
].priority
= MASKED
;
1073 ics
->irq_state
[i
].saved_priority
= MASKED
;
1076 xics
->ics
[icsid
] = ics
;
1078 if (icsid
> xics
->max_icsid
)
1079 xics
->max_icsid
= icsid
;
1082 mutex_unlock(&kvm
->lock
);
1083 return xics
->ics
[icsid
];
1086 static int kvmppc_xics_create_icp(struct kvm_vcpu
*vcpu
, unsigned long server_num
)
1088 struct kvmppc_icp
*icp
;
1090 if (!vcpu
->kvm
->arch
.xics
)
1093 if (kvmppc_xics_find_server(vcpu
->kvm
, server_num
))
1096 icp
= kzalloc(sizeof(struct kvmppc_icp
), GFP_KERNEL
);
1101 icp
->server_num
= server_num
;
1102 icp
->state
.mfrr
= MASKED
;
1103 icp
->state
.pending_pri
= MASKED
;
1104 vcpu
->arch
.icp
= icp
;
1106 XICS_DBG("created server for vcpu %d\n", vcpu
->vcpu_id
);
1111 u64
kvmppc_xics_get_icp(struct kvm_vcpu
*vcpu
)
1113 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1114 union kvmppc_icp_state state
;
1119 return ((u64
)state
.cppr
<< KVM_REG_PPC_ICP_CPPR_SHIFT
) |
1120 ((u64
)state
.xisr
<< KVM_REG_PPC_ICP_XISR_SHIFT
) |
1121 ((u64
)state
.mfrr
<< KVM_REG_PPC_ICP_MFRR_SHIFT
) |
1122 ((u64
)state
.pending_pri
<< KVM_REG_PPC_ICP_PPRI_SHIFT
);
1125 int kvmppc_xics_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
)
1127 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1128 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
1129 union kvmppc_icp_state old_state
, new_state
;
1130 struct kvmppc_ics
*ics
;
1131 u8 cppr
, mfrr
, pending_pri
;
1139 cppr
= icpval
>> KVM_REG_PPC_ICP_CPPR_SHIFT
;
1140 xisr
= (icpval
>> KVM_REG_PPC_ICP_XISR_SHIFT
) &
1141 KVM_REG_PPC_ICP_XISR_MASK
;
1142 mfrr
= icpval
>> KVM_REG_PPC_ICP_MFRR_SHIFT
;
1143 pending_pri
= icpval
>> KVM_REG_PPC_ICP_PPRI_SHIFT
;
1145 /* Require the new state to be internally consistent */
1147 if (pending_pri
!= 0xff)
1149 } else if (xisr
== XICS_IPI
) {
1150 if (pending_pri
!= mfrr
|| pending_pri
>= cppr
)
1153 if (pending_pri
>= mfrr
|| pending_pri
>= cppr
)
1155 ics
= kvmppc_xics_find_ics(xics
, xisr
, &src
);
1161 new_state
.cppr
= cppr
;
1162 new_state
.xisr
= xisr
;
1163 new_state
.mfrr
= mfrr
;
1164 new_state
.pending_pri
= pending_pri
;
1167 * Deassert the CPU interrupt request.
1168 * icp_try_update will reassert it if necessary.
1170 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
1171 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
1174 * Note that if we displace an interrupt from old_state.xisr,
1175 * we don't mark it as rejected. We expect userspace to set
1176 * the state of the interrupt sources to be consistent with
1177 * the ICP states (either before or afterwards, which doesn't
1178 * matter). We do handle resends due to CPPR becoming less
1179 * favoured because that is necessary to end up with a
1180 * consistent state in the situation where userspace restores
1181 * the ICS states before the ICP states.
1184 old_state
= READ_ONCE(icp
->state
);
1186 if (new_state
.mfrr
<= old_state
.mfrr
) {
1188 new_state
.need_resend
= old_state
.need_resend
;
1190 resend
= old_state
.need_resend
;
1191 new_state
.need_resend
= 0;
1193 } while (!icp_try_update(icp
, old_state
, new_state
, false));
1196 icp_check_resend(xics
, icp
);
1201 static int xics_get_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1204 struct kvmppc_ics
*ics
;
1205 struct ics_irq_state
*irqp
;
1206 u64 __user
*ubufp
= (u64 __user
*) addr
;
1209 unsigned long flags
;
1211 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1215 irqp
= &ics
->irq_state
[idx
];
1216 local_irq_save(flags
);
1217 arch_spin_lock(&ics
->lock
);
1221 prio
= irqp
->priority
;
1222 if (prio
== MASKED
) {
1223 val
|= KVM_XICS_MASKED
;
1224 prio
= irqp
->saved_priority
;
1226 val
|= prio
<< KVM_XICS_PRIORITY_SHIFT
;
1228 val
|= KVM_XICS_LEVEL_SENSITIVE
;
1229 if (irqp
->pq_state
& PQ_PRESENTED
)
1230 val
|= KVM_XICS_PENDING
;
1231 } else if (irqp
->masked_pending
|| irqp
->resend
)
1232 val
|= KVM_XICS_PENDING
;
1234 if (irqp
->pq_state
& PQ_PRESENTED
)
1235 val
|= KVM_XICS_PRESENTED
;
1237 if (irqp
->pq_state
& PQ_QUEUED
)
1238 val
|= KVM_XICS_QUEUED
;
1242 arch_spin_unlock(&ics
->lock
);
1243 local_irq_restore(flags
);
1245 if (!ret
&& put_user(val
, ubufp
))
1251 static int xics_set_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1253 struct kvmppc_ics
*ics
;
1254 struct ics_irq_state
*irqp
;
1255 u64 __user
*ubufp
= (u64 __user
*) addr
;
1260 unsigned long flags
;
1262 if (irq
< KVMPPC_XICS_FIRST_IRQ
|| irq
>= KVMPPC_XICS_NR_IRQS
)
1265 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1267 ics
= kvmppc_xics_create_ics(xics
->kvm
, xics
, irq
);
1271 irqp
= &ics
->irq_state
[idx
];
1272 if (get_user(val
, ubufp
))
1275 server
= val
& KVM_XICS_DESTINATION_MASK
;
1276 prio
= val
>> KVM_XICS_PRIORITY_SHIFT
;
1277 if (prio
!= MASKED
&&
1278 kvmppc_xics_find_server(xics
->kvm
, server
) == NULL
)
1281 local_irq_save(flags
);
1282 arch_spin_lock(&ics
->lock
);
1283 irqp
->server
= server
;
1284 irqp
->saved_priority
= prio
;
1285 if (val
& KVM_XICS_MASKED
)
1287 irqp
->priority
= prio
;
1289 irqp
->masked_pending
= 0;
1292 if (val
& KVM_XICS_LEVEL_SENSITIVE
)
1294 /* If PENDING, set P in case P is not saved because of old code */
1295 if (val
& KVM_XICS_PRESENTED
|| val
& KVM_XICS_PENDING
)
1296 irqp
->pq_state
|= PQ_PRESENTED
;
1297 if (val
& KVM_XICS_QUEUED
)
1298 irqp
->pq_state
|= PQ_QUEUED
;
1300 arch_spin_unlock(&ics
->lock
);
1301 local_irq_restore(flags
);
1303 if (val
& KVM_XICS_PENDING
)
1304 icp_deliver_irq(xics
, NULL
, irqp
->number
, false);
1309 int kvmppc_xics_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1312 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1316 return ics_deliver_irq(xics
, irq
, level
);
1319 static int xics_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1321 struct kvmppc_xics
*xics
= dev
->private;
1323 switch (attr
->group
) {
1324 case KVM_DEV_XICS_GRP_SOURCES
:
1325 return xics_set_source(xics
, attr
->attr
, attr
->addr
);
1330 static int xics_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1332 struct kvmppc_xics
*xics
= dev
->private;
1334 switch (attr
->group
) {
1335 case KVM_DEV_XICS_GRP_SOURCES
:
1336 return xics_get_source(xics
, attr
->attr
, attr
->addr
);
1341 static int xics_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1343 switch (attr
->group
) {
1344 case KVM_DEV_XICS_GRP_SOURCES
:
1345 if (attr
->attr
>= KVMPPC_XICS_FIRST_IRQ
&&
1346 attr
->attr
< KVMPPC_XICS_NR_IRQS
)
1353 static void kvmppc_xics_free(struct kvm_device
*dev
)
1355 struct kvmppc_xics
*xics
= dev
->private;
1357 struct kvm
*kvm
= xics
->kvm
;
1359 debugfs_remove(xics
->dentry
);
1362 kvm
->arch
.xics
= NULL
;
1364 for (i
= 0; i
<= xics
->max_icsid
; i
++)
1365 kfree(xics
->ics
[i
]);
1370 static int kvmppc_xics_create(struct kvm_device
*dev
, u32 type
)
1372 struct kvmppc_xics
*xics
;
1373 struct kvm
*kvm
= dev
->kvm
;
1376 xics
= kzalloc(sizeof(*xics
), GFP_KERNEL
);
1380 dev
->private = xics
;
1384 /* Already there ? */
1388 kvm
->arch
.xics
= xics
;
1395 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1396 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
1397 /* Enable real mode support */
1398 xics
->real_mode
= ENABLE_REALMODE
;
1399 xics
->real_mode_dbg
= DEBUG_REALMODE
;
1401 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1406 static void kvmppc_xics_init(struct kvm_device
*dev
)
1408 struct kvmppc_xics
*xics
= (struct kvmppc_xics
*)dev
->private;
1410 xics_debugfs_init(xics
);
1413 struct kvm_device_ops kvm_xics_ops
= {
1415 .create
= kvmppc_xics_create
,
1416 .init
= kvmppc_xics_init
,
1417 .destroy
= kvmppc_xics_free
,
1418 .set_attr
= xics_set_attr
,
1419 .get_attr
= xics_get_attr
,
1420 .has_attr
= xics_has_attr
,
1423 int kvmppc_xics_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
1426 struct kvmppc_xics
*xics
= dev
->private;
1429 if (dev
->ops
!= &kvm_xics_ops
)
1431 if (xics
->kvm
!= vcpu
->kvm
)
1433 if (vcpu
->arch
.irq_type
)
1436 r
= kvmppc_xics_create_icp(vcpu
, xcpu
);
1438 vcpu
->arch
.irq_type
= KVMPPC_IRQ_XICS
;
1443 void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
)
1445 if (!vcpu
->arch
.icp
)
1447 kfree(vcpu
->arch
.icp
);
1448 vcpu
->arch
.icp
= NULL
;
1449 vcpu
->arch
.irq_type
= KVMPPC_IRQ_DEFAULT
;
1452 void kvmppc_xics_set_mapped(struct kvm
*kvm
, unsigned long irq
,
1453 unsigned long host_irq
)
1455 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1456 struct kvmppc_ics
*ics
;
1459 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1463 ics
->irq_state
[idx
].host_irq
= host_irq
;
1464 ics
->irq_state
[idx
].intr_cpu
= -1;
1466 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped
);
1468 void kvmppc_xics_clr_mapped(struct kvm
*kvm
, unsigned long irq
,
1469 unsigned long host_irq
)
1471 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1472 struct kvmppc_ics
*ics
;
1475 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1479 ics
->irq_state
[idx
].host_irq
= 0;
1481 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped
);