2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 #define pr_fmt(fmt) "xive-kvm: " fmt
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/err.h>
14 #include <linux/gfp.h>
15 #include <linux/spinlock.h>
16 #include <linux/delay.h>
17 #include <linux/percpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/uaccess.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/hvcall.h>
25 #include <asm/xive-regs.h>
26 #include <asm/debug.h>
27 #include <asm/debugfs.h>
31 #include <linux/debugfs.h>
32 #include <linux/seq_file.h>
34 #include "book3s_xive.h"
38 * Virtual mode variants of the hcalls for use on radix/radix
39 * with AIL. They require the VCPU's VP to be "pushed"
41 * We still instanciate them here because we use some of the
42 * generated utility functions as well in this file.
44 #define XIVE_RUNTIME_CHECKS
45 #define X_PFX xive_vm_
46 #define X_STATIC static
47 #define X_STAT_PFX stat_vm_
48 #define __x_tima xive_tima
49 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
51 #define __x_writeb __raw_writeb
52 #define __x_readw __raw_readw
53 #define __x_readq __raw_readq
54 #define __x_writeq __raw_writeq
56 #include "book3s_xive_template.c"
59 * We leave a gap of a couple of interrupts in the queue to
60 * account for the IPI and additional safety guard.
65 * This is a simple trigger for a generic XIVE IRQ. This must
66 * only be called for interrupts that support a trigger page
68 static bool xive_irq_trigger(struct xive_irq_data
*xd
)
70 /* This should be only for MSIs */
71 if (WARN_ON(xd
->flags
& XIVE_IRQ_FLAG_LSI
))
74 /* Those interrupts should always have a trigger page */
75 if (WARN_ON(!xd
->trig_mmio
))
78 out_be64(xd
->trig_mmio
, 0);
83 static irqreturn_t
xive_esc_irq(int irq
, void *data
)
85 struct kvm_vcpu
*vcpu
= data
;
87 vcpu
->arch
.irq_pending
= 1;
90 kvmppc_fast_vcpu_kick(vcpu
);
92 /* Since we have the no-EOI flag, the interrupt is effectively
93 * disabled now. Clearing xive_esc_on means we won't bother
94 * doing so on the next entry.
96 * This also allows the entry code to know that if a PQ combination
97 * of 10 is observed while xive_esc_on is true, it means the queue
98 * contains an unprocessed escalation interrupt. We don't make use of
99 * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
101 vcpu
->arch
.xive_esc_on
= false;
106 static int xive_attach_escalation(struct kvm_vcpu
*vcpu
, u8 prio
)
108 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
109 struct xive_q
*q
= &xc
->queues
[prio
];
113 /* Already there ? */
114 if (xc
->esc_virq
[prio
])
117 /* Hook up the escalation interrupt */
118 xc
->esc_virq
[prio
] = irq_create_mapping(NULL
, q
->esc_irq
);
119 if (!xc
->esc_virq
[prio
]) {
120 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
121 prio
, xc
->server_num
);
125 if (xc
->xive
->single_escalation
)
126 name
= kasprintf(GFP_KERNEL
, "kvm-%d-%d",
127 vcpu
->kvm
->arch
.lpid
, xc
->server_num
);
129 name
= kasprintf(GFP_KERNEL
, "kvm-%d-%d-%d",
130 vcpu
->kvm
->arch
.lpid
, xc
->server_num
, prio
);
132 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
133 prio
, xc
->server_num
);
138 pr_devel("Escalation %s irq %d (prio %d)\n", name
, xc
->esc_virq
[prio
], prio
);
140 rc
= request_irq(xc
->esc_virq
[prio
], xive_esc_irq
,
141 IRQF_NO_THREAD
, name
, vcpu
);
143 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
144 prio
, xc
->server_num
);
147 xc
->esc_virq_names
[prio
] = name
;
149 /* In single escalation mode, we grab the ESB MMIO of the
150 * interrupt and mask it. Also populate the VCPU v/raddr
151 * of the ESB page for use by asm entry/exit code. Finally
152 * set the XIVE_IRQ_NO_EOI flag which will prevent the
153 * core code from performing an EOI on the escalation
154 * interrupt, thus leaving it effectively masked after
157 if (xc
->xive
->single_escalation
) {
158 struct irq_data
*d
= irq_get_irq_data(xc
->esc_virq
[prio
]);
159 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
161 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_01
);
162 vcpu
->arch
.xive_esc_raddr
= xd
->eoi_page
;
163 vcpu
->arch
.xive_esc_vaddr
= (__force u64
)xd
->eoi_mmio
;
164 xd
->flags
|= XIVE_IRQ_NO_EOI
;
169 irq_dispose_mapping(xc
->esc_virq
[prio
]);
170 xc
->esc_virq
[prio
] = 0;
175 static int xive_provision_queue(struct kvm_vcpu
*vcpu
, u8 prio
)
177 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
178 struct kvmppc_xive
*xive
= xc
->xive
;
179 struct xive_q
*q
= &xc
->queues
[prio
];
183 if (WARN_ON(q
->qpage
))
186 /* Allocate the queue and retrieve infos on current node for now */
187 qpage
= (__be32
*)__get_free_pages(GFP_KERNEL
, xive
->q_page_order
);
189 pr_err("Failed to allocate queue %d for VCPU %d\n",
190 prio
, xc
->server_num
);
193 memset(qpage
, 0, 1 << xive
->q_order
);
196 * Reconfigure the queue. This will set q->qpage only once the
197 * queue is fully configured. This is a requirement for prio 0
198 * as we will stop doing EOIs for every IPI as soon as we observe
199 * qpage being non-NULL, and instead will only EOI when we receive
200 * corresponding queue 0 entries
202 rc
= xive_native_configure_queue(xc
->vp_id
, q
, prio
, qpage
,
203 xive
->q_order
, true);
205 pr_err("Failed to configure queue %d for VCPU %d\n",
206 prio
, xc
->server_num
);
210 /* Called with kvm_lock held */
211 static int xive_check_provisioning(struct kvm
*kvm
, u8 prio
)
213 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
214 struct kvm_vcpu
*vcpu
;
217 lockdep_assert_held(&kvm
->lock
);
219 /* Already provisioned ? */
220 if (xive
->qmap
& (1 << prio
))
223 pr_devel("Provisioning prio... %d\n", prio
);
225 /* Provision each VCPU and enable escalations if needed */
226 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
227 if (!vcpu
->arch
.xive_vcpu
)
229 rc
= xive_provision_queue(vcpu
, prio
);
230 if (rc
== 0 && !xive
->single_escalation
)
231 xive_attach_escalation(vcpu
, prio
);
236 /* Order previous stores and mark it as provisioned */
238 xive
->qmap
|= (1 << prio
);
242 static void xive_inc_q_pending(struct kvm
*kvm
, u32 server
, u8 prio
)
244 struct kvm_vcpu
*vcpu
;
245 struct kvmppc_xive_vcpu
*xc
;
248 /* Locate target server */
249 vcpu
= kvmppc_xive_find_server(kvm
, server
);
251 pr_warn("%s: Can't find server %d\n", __func__
, server
);
254 xc
= vcpu
->arch
.xive_vcpu
;
258 q
= &xc
->queues
[prio
];
259 atomic_inc(&q
->pending_count
);
262 static int xive_try_pick_queue(struct kvm_vcpu
*vcpu
, u8 prio
)
264 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
273 q
= &xc
->queues
[prio
];
274 if (WARN_ON(!q
->qpage
))
277 /* Calculate max number of interrupts in that queue. */
278 max
= (q
->msk
+ 1) - XIVE_Q_GAP
;
279 return atomic_add_unless(&q
->count
, 1, max
) ? 0 : -EBUSY
;
282 static int xive_select_target(struct kvm
*kvm
, u32
*server
, u8 prio
)
284 struct kvm_vcpu
*vcpu
;
287 /* Locate target server */
288 vcpu
= kvmppc_xive_find_server(kvm
, *server
);
290 pr_devel("Can't find server %d\n", *server
);
294 pr_devel("Finding irq target on 0x%x/%d...\n", *server
, prio
);
297 rc
= xive_try_pick_queue(vcpu
, prio
);
301 pr_devel(" .. failed, looking up candidate...\n");
303 /* Failed, pick another VCPU */
304 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
305 if (!vcpu
->arch
.xive_vcpu
)
307 rc
= xive_try_pick_queue(vcpu
, prio
);
309 *server
= vcpu
->arch
.xive_vcpu
->server_num
;
310 pr_devel(" found on 0x%x/%d\n", *server
, prio
);
314 pr_devel(" no available target !\n");
316 /* No available target ! */
320 static u8
xive_lock_and_mask(struct kvmppc_xive
*xive
,
321 struct kvmppc_xive_src_block
*sb
,
322 struct kvmppc_xive_irq_state
*state
)
324 struct xive_irq_data
*xd
;
330 * Take the lock, set masked, try again if racing
334 arch_spin_lock(&sb
->lock
);
335 old_prio
= state
->guest_priority
;
336 state
->guest_priority
= MASKED
;
340 state
->guest_priority
= old_prio
;
341 arch_spin_unlock(&sb
->lock
);
344 /* No change ? Bail */
345 if (old_prio
== MASKED
)
348 /* Get the right irq */
349 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
352 * If the interrupt is marked as needing masking via
353 * firmware, we do it here. Firmware masking however
354 * is "lossy", it won't return the old p and q bits
355 * and won't set the interrupt to a state where it will
356 * record queued ones. If this is an issue we should do
357 * lazy masking instead.
359 * For now, we work around this in unmask by forcing
360 * an interrupt whenever we unmask a non-LSI via FW
363 if (xd
->flags
& OPAL_XIVE_IRQ_MASK_VIA_FW
) {
364 xive_native_configure_irq(hw_num
,
365 xive
->vp_base
+ state
->act_server
,
366 MASKED
, state
->number
);
367 /* set old_p so we can track if an H_EOI was done */
369 state
->old_q
= false;
371 /* Set PQ to 10, return old P and old Q and remember them */
372 val
= xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_10
);
373 state
->old_p
= !!(val
& 2);
374 state
->old_q
= !!(val
& 1);
377 * Synchronize hardware to sensure the queues are updated
380 xive_native_sync_source(hw_num
);
386 static void xive_lock_for_unmask(struct kvmppc_xive_src_block
*sb
,
387 struct kvmppc_xive_irq_state
*state
)
390 * Take the lock try again if racing with H_EOI
393 arch_spin_lock(&sb
->lock
);
396 arch_spin_unlock(&sb
->lock
);
400 static void xive_finish_unmask(struct kvmppc_xive
*xive
,
401 struct kvmppc_xive_src_block
*sb
,
402 struct kvmppc_xive_irq_state
*state
,
405 struct xive_irq_data
*xd
;
408 /* If we aren't changing a thing, move on */
409 if (state
->guest_priority
!= MASKED
)
412 /* Get the right irq */
413 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
416 * See command in xive_lock_and_mask() concerning masking
419 if (xd
->flags
& OPAL_XIVE_IRQ_MASK_VIA_FW
) {
420 xive_native_configure_irq(hw_num
,
421 xive
->vp_base
+ state
->act_server
,
422 state
->act_priority
, state
->number
);
423 /* If an EOI is needed, do it here */
425 xive_vm_source_eoi(hw_num
, xd
);
426 /* If this is not an LSI, force a trigger */
427 if (!(xd
->flags
& OPAL_XIVE_IRQ_LSI
))
428 xive_irq_trigger(xd
);
432 /* Old Q set, set PQ to 11 */
434 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_11
);
437 * If not old P, then perform an "effective" EOI,
438 * on the source. This will handle the cases where
442 xive_vm_source_eoi(hw_num
, xd
);
444 /* Synchronize ordering and mark unmasked */
447 state
->guest_priority
= prio
;
451 * Target an interrupt to a given server/prio, this will fallback
452 * to another server if necessary and perform the HW targetting
455 * NOTE: Must be called with the state lock held
457 static int xive_target_interrupt(struct kvm
*kvm
,
458 struct kvmppc_xive_irq_state
*state
,
461 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
466 * This will return a tentative server and actual
467 * priority. The count for that new target will have
468 * already been incremented.
470 rc
= xive_select_target(kvm
, &server
, prio
);
473 * We failed to find a target ? Not much we can do
474 * at least until we support the GIQ.
480 * Increment the old queue pending count if there
481 * was one so that the old queue count gets adjusted later
482 * when observed to be empty.
484 if (state
->act_priority
!= MASKED
)
485 xive_inc_q_pending(kvm
,
487 state
->act_priority
);
489 * Update state and HW
491 state
->act_priority
= prio
;
492 state
->act_server
= server
;
494 /* Get the right irq */
495 kvmppc_xive_select_irq(state
, &hw_num
, NULL
);
497 return xive_native_configure_irq(hw_num
,
498 xive
->vp_base
+ server
,
499 prio
, state
->number
);
503 * Targetting rules: In order to avoid losing track of
504 * pending interrupts accross mask and unmask, which would
505 * allow queue overflows, we implement the following rules:
507 * - Unless it was never enabled (or we run out of capacity)
508 * an interrupt is always targetted at a valid server/queue
509 * pair even when "masked" by the guest. This pair tends to
510 * be the last one used but it can be changed under some
511 * circumstances. That allows us to separate targetting
512 * from masking, we only handle accounting during (re)targetting,
513 * this also allows us to let an interrupt drain into its target
514 * queue after masking, avoiding complex schemes to remove
515 * interrupts out of remote processor queues.
517 * - When masking, we set PQ to 10 and save the previous value
520 * - When unmasking, if saved Q was set, we set PQ to 11
521 * otherwise we leave PQ to the HW state which will be either
522 * 10 if nothing happened or 11 if the interrupt fired while
523 * masked. Effectively we are OR'ing the previous Q into the
526 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
527 * which will unmask the interrupt and shoot a new one if Q was
530 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
531 * effectively meaning an H_EOI from the guest is still expected
532 * for that interrupt).
534 * - If H_EOI occurs while masked, we clear the saved P.
536 * - When changing target, we account on the new target and
537 * increment a separate "pending" counter on the old one.
538 * This pending counter will be used to decrement the old
539 * target's count when its queue has been observed empty.
542 int kvmppc_xive_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
,
545 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
546 struct kvmppc_xive_src_block
*sb
;
547 struct kvmppc_xive_irq_state
*state
;
555 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
556 irq
, server
, priority
);
558 /* First, check provisioning of queues */
559 if (priority
!= MASKED
)
560 rc
= xive_check_provisioning(xive
->kvm
,
561 xive_prio_from_guest(priority
));
563 pr_devel(" provisioning failure %d !\n", rc
);
567 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
570 state
= &sb
->irq_state
[idx
];
573 * We first handle masking/unmasking since the locking
574 * might need to be retried due to EOIs, we'll handle
575 * targetting changes later. These functions will return
576 * with the SB lock held.
578 * xive_lock_and_mask() will also set state->guest_priority
579 * but won't otherwise change other fields of the state.
581 * xive_lock_for_unmask will not actually unmask, this will
582 * be done later by xive_finish_unmask() once the targetting
583 * has been done, so we don't try to unmask an interrupt
584 * that hasn't yet been targetted.
586 if (priority
== MASKED
)
587 xive_lock_and_mask(xive
, sb
, state
);
589 xive_lock_for_unmask(sb
, state
);
593 * Then we handle targetting.
595 * First calculate a new "actual priority"
597 new_act_prio
= state
->act_priority
;
598 if (priority
!= MASKED
)
599 new_act_prio
= xive_prio_from_guest(priority
);
601 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
602 new_act_prio
, state
->act_server
, state
->act_priority
);
605 * Then check if we actually need to change anything,
607 * The condition for re-targetting the interrupt is that
608 * we have a valid new priority (new_act_prio is not 0xff)
609 * and either the server or the priority changed.
611 * Note: If act_priority was ff and the new priority is
612 * also ff, we don't do anything and leave the interrupt
613 * untargetted. An attempt of doing an int_on on an
614 * untargetted interrupt will fail. If that is a problem
615 * we could initialize interrupts with valid default
618 if (new_act_prio
!= MASKED
&&
619 (state
->act_server
!= server
||
620 state
->act_priority
!= new_act_prio
))
621 rc
= xive_target_interrupt(kvm
, state
, server
, new_act_prio
);
624 * Perform the final unmasking of the interrupt source
627 if (priority
!= MASKED
)
628 xive_finish_unmask(xive
, sb
, state
, priority
);
631 * Finally Update saved_priority to match. Only int_on/off
632 * set this field to a different value.
634 state
->saved_priority
= priority
;
636 arch_spin_unlock(&sb
->lock
);
640 int kvmppc_xive_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
,
643 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
644 struct kvmppc_xive_src_block
*sb
;
645 struct kvmppc_xive_irq_state
*state
;
651 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
654 state
= &sb
->irq_state
[idx
];
655 arch_spin_lock(&sb
->lock
);
656 *server
= state
->act_server
;
657 *priority
= state
->guest_priority
;
658 arch_spin_unlock(&sb
->lock
);
663 int kvmppc_xive_int_on(struct kvm
*kvm
, u32 irq
)
665 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
666 struct kvmppc_xive_src_block
*sb
;
667 struct kvmppc_xive_irq_state
*state
;
673 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
676 state
= &sb
->irq_state
[idx
];
678 pr_devel("int_on(irq=0x%x)\n", irq
);
681 * Check if interrupt was not targetted
683 if (state
->act_priority
== MASKED
) {
684 pr_devel("int_on on untargetted interrupt\n");
688 /* If saved_priority is 0xff, do nothing */
689 if (state
->saved_priority
== MASKED
)
693 * Lock and unmask it.
695 xive_lock_for_unmask(sb
, state
);
696 xive_finish_unmask(xive
, sb
, state
, state
->saved_priority
);
697 arch_spin_unlock(&sb
->lock
);
702 int kvmppc_xive_int_off(struct kvm
*kvm
, u32 irq
)
704 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
705 struct kvmppc_xive_src_block
*sb
;
706 struct kvmppc_xive_irq_state
*state
;
712 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
715 state
= &sb
->irq_state
[idx
];
717 pr_devel("int_off(irq=0x%x)\n", irq
);
722 state
->saved_priority
= xive_lock_and_mask(xive
, sb
, state
);
723 arch_spin_unlock(&sb
->lock
);
728 static bool xive_restore_pending_irq(struct kvmppc_xive
*xive
, u32 irq
)
730 struct kvmppc_xive_src_block
*sb
;
731 struct kvmppc_xive_irq_state
*state
;
734 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
737 state
= &sb
->irq_state
[idx
];
742 * Trigger the IPI. This assumes we never restore a pass-through
743 * interrupt which should be safe enough
745 xive_irq_trigger(&state
->ipi_data
);
750 u64
kvmppc_xive_get_icp(struct kvm_vcpu
*vcpu
)
752 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
757 /* Return the per-cpu state for state saving/migration */
758 return (u64
)xc
->cppr
<< KVM_REG_PPC_ICP_CPPR_SHIFT
|
759 (u64
)xc
->mfrr
<< KVM_REG_PPC_ICP_MFRR_SHIFT
|
760 (u64
)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT
;
763 int kvmppc_xive_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
)
765 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
766 struct kvmppc_xive
*xive
= vcpu
->kvm
->arch
.xive
;
773 /* Grab individual state fields. We don't use pending_pri */
774 cppr
= icpval
>> KVM_REG_PPC_ICP_CPPR_SHIFT
;
775 xisr
= (icpval
>> KVM_REG_PPC_ICP_XISR_SHIFT
) &
776 KVM_REG_PPC_ICP_XISR_MASK
;
777 mfrr
= icpval
>> KVM_REG_PPC_ICP_MFRR_SHIFT
;
779 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
780 xc
->server_num
, cppr
, mfrr
, xisr
);
783 * We can't update the state of a "pushed" VCPU, but that
786 if (WARN_ON(vcpu
->arch
.xive_pushed
))
789 /* Update VCPU HW saved state */
790 vcpu
->arch
.xive_saved_state
.cppr
= cppr
;
791 xc
->hw_cppr
= xc
->cppr
= cppr
;
794 * Update MFRR state. If it's not 0xff, we mark the VCPU as
795 * having a pending MFRR change, which will re-evaluate the
796 * target. The VCPU will thus potentially get a spurious
797 * interrupt but that's not a big deal.
801 xive_irq_trigger(&xc
->vp_ipi_data
);
804 * Now saved XIRR is "interesting". It means there's something in
805 * the legacy "1 element" queue... for an IPI we simply ignore it,
806 * as the MFRR restore will handle that. For anything else we need
807 * to force a resend of the source.
808 * However the source may not have been setup yet. If that's the
809 * case, we keep that info and increment a counter in the xive to
810 * tell subsequent xive_set_source() to go look.
812 if (xisr
> XICS_IPI
&& !xive_restore_pending_irq(xive
, xisr
)) {
813 xc
->delayed_irq
= xisr
;
814 xive
->delayed_irqs
++;
815 pr_devel(" xisr restore delayed\n");
821 int kvmppc_xive_set_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
822 struct irq_desc
*host_desc
)
824 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
825 struct kvmppc_xive_src_block
*sb
;
826 struct kvmppc_xive_irq_state
*state
;
827 struct irq_data
*host_data
= irq_desc_get_irq_data(host_desc
);
828 unsigned int host_irq
= irq_desc_get_irq(host_desc
);
829 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(host_data
);
837 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq
, hw_irq
);
839 sb
= kvmppc_xive_find_source(xive
, guest_irq
, &idx
);
842 state
= &sb
->irq_state
[idx
];
845 * Mark the passed-through interrupt as going to a VCPU,
846 * this will prevent further EOIs and similar operations
847 * from the XIVE code. It will also mask the interrupt
848 * to either PQ=10 or 11 state, the latter if the interrupt
849 * is pending. This will allow us to unmask or retrigger it
850 * after routing it to the guest with a simple EOI.
852 * The "state" argument is a "token", all it needs is to be
853 * non-NULL to switch to passed-through or NULL for the
854 * other way around. We may not yet have an actual VCPU
855 * target here and we don't really care.
857 rc
= irq_set_vcpu_affinity(host_irq
, state
);
859 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq
);
864 * Mask and read state of IPI. We need to know if its P bit
865 * is set as that means it's potentially already using a
866 * queue entry in the target
868 prio
= xive_lock_and_mask(xive
, sb
, state
);
869 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio
,
870 state
->old_p
, state
->old_q
);
872 /* Turn the IPI hard off */
873 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_01
);
875 /* Grab info about irq */
876 state
->pt_number
= hw_irq
;
877 state
->pt_data
= irq_data_get_irq_handler_data(host_data
);
880 * Configure the IRQ to match the existing configuration of
881 * the IPI if it was already targetted. Otherwise this will
882 * mask the interrupt in a lossy way (act_priority is 0xff)
883 * which is fine for a never started interrupt.
885 xive_native_configure_irq(hw_irq
,
886 xive
->vp_base
+ state
->act_server
,
887 state
->act_priority
, state
->number
);
890 * We do an EOI to enable the interrupt (and retrigger if needed)
891 * if the guest has the interrupt unmasked and the P bit was *not*
892 * set in the IPI. If it was set, we know a slot may still be in
893 * use in the target queue thus we have to wait for a guest
896 if (prio
!= MASKED
&& !state
->old_p
)
897 xive_vm_source_eoi(hw_irq
, state
->pt_data
);
899 /* Clear old_p/old_q as they are no longer relevant */
900 state
->old_p
= state
->old_q
= false;
902 /* Restore guest prio (unlocks EOI) */
904 state
->guest_priority
= prio
;
905 arch_spin_unlock(&sb
->lock
);
909 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped
);
911 int kvmppc_xive_clr_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
912 struct irq_desc
*host_desc
)
914 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
915 struct kvmppc_xive_src_block
*sb
;
916 struct kvmppc_xive_irq_state
*state
;
917 unsigned int host_irq
= irq_desc_get_irq(host_desc
);
925 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq
);
927 sb
= kvmppc_xive_find_source(xive
, guest_irq
, &idx
);
930 state
= &sb
->irq_state
[idx
];
933 * Mask and read state of IRQ. We need to know if its P bit
934 * is set as that means it's potentially already using a
935 * queue entry in the target
937 prio
= xive_lock_and_mask(xive
, sb
, state
);
938 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio
,
939 state
->old_p
, state
->old_q
);
942 * If old_p is set, the interrupt is pending, we switch it to
943 * PQ=11. This will force a resend in the host so the interrupt
944 * isn't lost to whatver host driver may pick it up
947 xive_vm_esb_load(state
->pt_data
, XIVE_ESB_SET_PQ_11
);
949 /* Release the passed-through interrupt to the host */
950 rc
= irq_set_vcpu_affinity(host_irq
, NULL
);
952 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq
);
956 /* Forget about the IRQ */
957 state
->pt_number
= 0;
958 state
->pt_data
= NULL
;
960 /* Reconfigure the IPI */
961 xive_native_configure_irq(state
->ipi_number
,
962 xive
->vp_base
+ state
->act_server
,
963 state
->act_priority
, state
->number
);
966 * If old_p is set (we have a queue entry potentially
967 * occupied) or the interrupt is masked, we set the IPI
968 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
970 if (prio
== MASKED
|| state
->old_p
)
971 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_10
);
973 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_00
);
975 /* Restore guest prio (unlocks EOI) */
977 state
->guest_priority
= prio
;
978 arch_spin_unlock(&sb
->lock
);
982 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped
);
984 static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu
*vcpu
)
986 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
987 struct kvm
*kvm
= vcpu
->kvm
;
988 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
991 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
992 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
996 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++) {
997 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[j
];
1001 if (state
->act_priority
== MASKED
)
1003 if (state
->act_server
!= xc
->server_num
)
1007 arch_spin_lock(&sb
->lock
);
1008 state
->act_priority
= MASKED
;
1009 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_01
);
1010 xive_native_configure_irq(state
->ipi_number
, 0, MASKED
, 0);
1011 if (state
->pt_number
) {
1012 xive_vm_esb_load(state
->pt_data
, XIVE_ESB_SET_PQ_01
);
1013 xive_native_configure_irq(state
->pt_number
, 0, MASKED
, 0);
1015 arch_spin_unlock(&sb
->lock
);
1020 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu
*vcpu
)
1022 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1023 struct kvmppc_xive
*xive
= xc
->xive
;
1026 pr_devel("cleanup_vcpu(cpu=%d)\n", xc
->server_num
);
1028 /* Ensure no interrupt is still routed to that VP */
1030 kvmppc_xive_disable_vcpu_interrupts(vcpu
);
1032 /* Mask the VP IPI */
1033 xive_vm_esb_load(&xc
->vp_ipi_data
, XIVE_ESB_SET_PQ_01
);
1035 /* Disable the VP */
1036 xive_native_disable_vp(xc
->vp_id
);
1038 /* Free the queues & associated interrupts */
1039 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1040 struct xive_q
*q
= &xc
->queues
[i
];
1042 /* Free the escalation irq */
1043 if (xc
->esc_virq
[i
]) {
1044 free_irq(xc
->esc_virq
[i
], vcpu
);
1045 irq_dispose_mapping(xc
->esc_virq
[i
]);
1046 kfree(xc
->esc_virq_names
[i
]);
1048 /* Free the queue */
1049 xive_native_disable_queue(xc
->vp_id
, q
, i
);
1051 free_pages((unsigned long)q
->qpage
,
1052 xive
->q_page_order
);
1059 xive_cleanup_irq_data(&xc
->vp_ipi_data
);
1060 xive_native_free_irq(xc
->vp_ipi
);
1066 int kvmppc_xive_connect_vcpu(struct kvm_device
*dev
,
1067 struct kvm_vcpu
*vcpu
, u32 cpu
)
1069 struct kvmppc_xive
*xive
= dev
->private;
1070 struct kvmppc_xive_vcpu
*xc
;
1073 pr_devel("connect_vcpu(cpu=%d)\n", cpu
);
1075 if (dev
->ops
!= &kvm_xive_ops
) {
1076 pr_devel("Wrong ops !\n");
1079 if (xive
->kvm
!= vcpu
->kvm
)
1081 if (vcpu
->arch
.irq_type
)
1083 if (kvmppc_xive_find_server(vcpu
->kvm
, cpu
)) {
1084 pr_devel("Duplicate !\n");
1087 if (cpu
>= KVM_MAX_VCPUS
) {
1088 pr_devel("Out of bounds !\n");
1091 xc
= kzalloc(sizeof(*xc
), GFP_KERNEL
);
1095 /* We need to synchronize with queue provisioning */
1096 mutex_lock(&vcpu
->kvm
->lock
);
1097 vcpu
->arch
.xive_vcpu
= xc
;
1100 xc
->server_num
= cpu
;
1101 xc
->vp_id
= xive
->vp_base
+ cpu
;
1105 r
= xive_native_get_vp_info(xc
->vp_id
, &xc
->vp_cam
, &xc
->vp_chip_id
);
1109 /* Configure VCPU fields for use by assembly push/pull */
1110 vcpu
->arch
.xive_saved_state
.w01
= cpu_to_be64(0xff000000);
1111 vcpu
->arch
.xive_cam_word
= cpu_to_be32(xc
->vp_cam
| TM_QW1W2_VO
);
1114 xc
->vp_ipi
= xive_native_alloc_irq();
1116 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1120 pr_devel(" IPI=0x%x\n", xc
->vp_ipi
);
1122 r
= xive_native_populate_irq_data(xc
->vp_ipi
, &xc
->vp_ipi_data
);
1127 * Enable the VP first as the single escalation mode will
1128 * affect escalation interrupts numbering
1130 r
= xive_native_enable_vp(xc
->vp_id
, xive
->single_escalation
);
1132 pr_err("Failed to enable VP in OPAL, err %d\n", r
);
1137 * Initialize queues. Initially we set them all for no queueing
1138 * and we enable escalation for queue 0 only which we'll use for
1139 * our mfrr change notifications. If the VCPU is hot-plugged, we
1140 * do handle provisioning however based on the existing "map"
1141 * of enabled queues.
1143 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1144 struct xive_q
*q
= &xc
->queues
[i
];
1146 /* Single escalation, no queue 7 */
1147 if (i
== 7 && xive
->single_escalation
)
1150 /* Is queue already enabled ? Provision it */
1151 if (xive
->qmap
& (1 << i
)) {
1152 r
= xive_provision_queue(vcpu
, i
);
1153 if (r
== 0 && !xive
->single_escalation
)
1154 xive_attach_escalation(vcpu
, i
);
1158 r
= xive_native_configure_queue(xc
->vp_id
,
1159 q
, i
, NULL
, 0, true);
1161 pr_err("Failed to configure queue %d for VCPU %d\n",
1168 /* If not done above, attach priority 0 escalation */
1169 r
= xive_attach_escalation(vcpu
, 0);
1174 r
= xive_native_configure_irq(xc
->vp_ipi
, xc
->vp_id
, 0, XICS_IPI
);
1176 xive_vm_esb_load(&xc
->vp_ipi_data
, XIVE_ESB_SET_PQ_00
);
1179 mutex_unlock(&vcpu
->kvm
->lock
);
1181 kvmppc_xive_cleanup_vcpu(vcpu
);
1185 vcpu
->arch
.irq_type
= KVMPPC_IRQ_XICS
;
1190 * Scanning of queues before/after migration save
1192 static void xive_pre_save_set_queued(struct kvmppc_xive
*xive
, u32 irq
)
1194 struct kvmppc_xive_src_block
*sb
;
1195 struct kvmppc_xive_irq_state
*state
;
1198 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1202 state
= &sb
->irq_state
[idx
];
1204 /* Some sanity checking */
1205 if (!state
->valid
) {
1206 pr_err("invalid irq 0x%x in cpu queue!\n", irq
);
1211 * If the interrupt is in a queue it should have P set.
1212 * We warn so that gets reported. A backtrace isn't useful
1213 * so no need to use a WARN_ON.
1215 if (!state
->saved_p
)
1216 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq
);
1219 state
->in_queue
= true;
1222 static void xive_pre_save_mask_irq(struct kvmppc_xive
*xive
,
1223 struct kvmppc_xive_src_block
*sb
,
1226 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[irq
];
1231 /* Mask and save state, this will also sync HW queues */
1232 state
->saved_scan_prio
= xive_lock_and_mask(xive
, sb
, state
);
1234 /* Transfer P and Q */
1235 state
->saved_p
= state
->old_p
;
1236 state
->saved_q
= state
->old_q
;
1239 arch_spin_unlock(&sb
->lock
);
1242 static void xive_pre_save_unmask_irq(struct kvmppc_xive
*xive
,
1243 struct kvmppc_xive_src_block
*sb
,
1246 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[irq
];
1252 * Lock / exclude EOI (not technically necessary if the
1253 * guest isn't running concurrently. If this becomes a
1254 * performance issue we can probably remove the lock.
1256 xive_lock_for_unmask(sb
, state
);
1258 /* Restore mask/prio if it wasn't masked */
1259 if (state
->saved_scan_prio
!= MASKED
)
1260 xive_finish_unmask(xive
, sb
, state
, state
->saved_scan_prio
);
1263 arch_spin_unlock(&sb
->lock
);
1266 static void xive_pre_save_queue(struct kvmppc_xive
*xive
, struct xive_q
*q
)
1269 u32 toggle
= q
->toggle
;
1273 irq
= __xive_read_eq(q
->qpage
, q
->msk
, &idx
, &toggle
);
1275 xive_pre_save_set_queued(xive
, irq
);
1279 static void xive_pre_save_scan(struct kvmppc_xive
*xive
)
1281 struct kvm_vcpu
*vcpu
= NULL
;
1285 * See comment in xive_get_source() about how this
1286 * work. Collect a stable state for all interrupts
1288 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1289 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1292 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1293 xive_pre_save_mask_irq(xive
, sb
, j
);
1296 /* Then scan the queues and update the "in_queue" flag */
1297 kvm_for_each_vcpu(i
, vcpu
, xive
->kvm
) {
1298 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1301 for (j
= 0; j
< KVMPPC_XIVE_Q_COUNT
; j
++) {
1302 if (xc
->queues
[j
].qpage
)
1303 xive_pre_save_queue(xive
, &xc
->queues
[j
]);
1307 /* Finally restore interrupt states */
1308 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1309 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1312 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1313 xive_pre_save_unmask_irq(xive
, sb
, j
);
1317 static void xive_post_save_scan(struct kvmppc_xive
*xive
)
1321 /* Clear all the in_queue flags */
1322 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1323 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1326 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1327 sb
->irq_state
[j
].in_queue
= false;
1330 /* Next get_source() will do a new scan */
1331 xive
->saved_src_count
= 0;
1335 * This returns the source configuration and state to user space.
1337 static int xive_get_source(struct kvmppc_xive
*xive
, long irq
, u64 addr
)
1339 struct kvmppc_xive_src_block
*sb
;
1340 struct kvmppc_xive_irq_state
*state
;
1341 u64 __user
*ubufp
= (u64 __user
*) addr
;
1345 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1349 state
= &sb
->irq_state
[idx
];
1354 pr_devel("get_source(%ld)...\n", irq
);
1357 * So to properly save the state into something that looks like a
1358 * XICS migration stream we cannot treat interrupts individually.
1360 * We need, instead, mask them all (& save their previous PQ state)
1361 * to get a stable state in the HW, then sync them to ensure that
1362 * any interrupt that had already fired hits its queue, and finally
1363 * scan all the queues to collect which interrupts are still present
1364 * in the queues, so we can set the "pending" flag on them and
1365 * they can be resent on restore.
1367 * So we do it all when the "first" interrupt gets saved, all the
1368 * state is collected at that point, the rest of xive_get_source()
1369 * will merely collect and convert that state to the expected
1370 * userspace bit mask.
1372 if (xive
->saved_src_count
== 0)
1373 xive_pre_save_scan(xive
);
1374 xive
->saved_src_count
++;
1376 /* Convert saved state into something compatible with xics */
1377 val
= state
->act_server
;
1378 prio
= state
->saved_scan_prio
;
1380 if (prio
== MASKED
) {
1381 val
|= KVM_XICS_MASKED
;
1382 prio
= state
->saved_priority
;
1384 val
|= prio
<< KVM_XICS_PRIORITY_SHIFT
;
1386 val
|= KVM_XICS_LEVEL_SENSITIVE
;
1388 val
|= KVM_XICS_PENDING
;
1391 val
|= KVM_XICS_PRESENTED
;
1394 val
|= KVM_XICS_QUEUED
;
1397 * We mark it pending (which will attempt a re-delivery)
1398 * if we are in a queue *or* we were masked and had
1399 * Q set which is equivalent to the XICS "masked pending"
1402 if (state
->in_queue
|| (prio
== MASKED
&& state
->saved_q
))
1403 val
|= KVM_XICS_PENDING
;
1407 * If that was the last interrupt saved, reset the
1410 if (xive
->saved_src_count
== xive
->src_count
)
1411 xive_post_save_scan(xive
);
1413 /* Copy the result to userspace */
1414 if (put_user(val
, ubufp
))
1420 static struct kvmppc_xive_src_block
*xive_create_src_block(struct kvmppc_xive
*xive
,
1423 struct kvm
*kvm
= xive
->kvm
;
1424 struct kvmppc_xive_src_block
*sb
;
1427 bid
= irq
>> KVMPPC_XICS_ICS_SHIFT
;
1429 mutex_lock(&kvm
->lock
);
1431 /* block already exists - somebody else got here first */
1432 if (xive
->src_blocks
[bid
])
1435 /* Create the ICS */
1436 sb
= kzalloc(sizeof(*sb
), GFP_KERNEL
);
1442 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1443 sb
->irq_state
[i
].number
= (bid
<< KVMPPC_XICS_ICS_SHIFT
) | i
;
1444 sb
->irq_state
[i
].guest_priority
= MASKED
;
1445 sb
->irq_state
[i
].saved_priority
= MASKED
;
1446 sb
->irq_state
[i
].act_priority
= MASKED
;
1449 xive
->src_blocks
[bid
] = sb
;
1451 if (bid
> xive
->max_sbid
)
1452 xive
->max_sbid
= bid
;
1455 mutex_unlock(&kvm
->lock
);
1456 return xive
->src_blocks
[bid
];
1459 static bool xive_check_delayed_irq(struct kvmppc_xive
*xive
, u32 irq
)
1461 struct kvm
*kvm
= xive
->kvm
;
1462 struct kvm_vcpu
*vcpu
= NULL
;
1465 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1466 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1471 if (xc
->delayed_irq
== irq
) {
1472 xc
->delayed_irq
= 0;
1473 xive
->delayed_irqs
--;
1480 static int xive_set_source(struct kvmppc_xive
*xive
, long irq
, u64 addr
)
1482 struct kvmppc_xive_src_block
*sb
;
1483 struct kvmppc_xive_irq_state
*state
;
1484 u64 __user
*ubufp
= (u64 __user
*) addr
;
1487 u8 act_prio
, guest_prio
;
1491 if (irq
< KVMPPC_XICS_FIRST_IRQ
|| irq
>= KVMPPC_XICS_NR_IRQS
)
1494 pr_devel("set_source(irq=0x%lx)\n", irq
);
1496 /* Find the source */
1497 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1499 pr_devel("No source, creating source block...\n");
1500 sb
= xive_create_src_block(xive
, irq
);
1502 pr_devel("Failed to create block...\n");
1506 state
= &sb
->irq_state
[idx
];
1508 /* Read user passed data */
1509 if (get_user(val
, ubufp
)) {
1510 pr_devel("fault getting user info !\n");
1514 server
= val
& KVM_XICS_DESTINATION_MASK
;
1515 guest_prio
= val
>> KVM_XICS_PRIORITY_SHIFT
;
1517 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1518 val
, server
, guest_prio
);
1521 * If the source doesn't already have an IPI, allocate
1522 * one and get the corresponding data
1524 if (!state
->ipi_number
) {
1525 state
->ipi_number
= xive_native_alloc_irq();
1526 if (state
->ipi_number
== 0) {
1527 pr_devel("Failed to allocate IPI !\n");
1530 xive_native_populate_irq_data(state
->ipi_number
, &state
->ipi_data
);
1531 pr_devel(" src_ipi=0x%x\n", state
->ipi_number
);
1535 * We use lock_and_mask() to set us in the right masked
1536 * state. We will override that state from the saved state
1537 * further down, but this will handle the cases of interrupts
1538 * that need FW masking. We set the initial guest_priority to
1539 * 0 before calling it to ensure it actually performs the masking.
1541 state
->guest_priority
= 0;
1542 xive_lock_and_mask(xive
, sb
, state
);
1545 * Now, we select a target if we have one. If we don't we
1546 * leave the interrupt untargetted. It means that an interrupt
1547 * can become "untargetted" accross migration if it was masked
1548 * by set_xive() but there is little we can do about it.
1551 /* First convert prio and mark interrupt as untargetted */
1552 act_prio
= xive_prio_from_guest(guest_prio
);
1553 state
->act_priority
= MASKED
;
1556 * We need to drop the lock due to the mutex below. Hopefully
1557 * nothing is touching that interrupt yet since it hasn't been
1558 * advertized to a running guest yet
1560 arch_spin_unlock(&sb
->lock
);
1562 /* If we have a priority target the interrupt */
1563 if (act_prio
!= MASKED
) {
1564 /* First, check provisioning of queues */
1565 mutex_lock(&xive
->kvm
->lock
);
1566 rc
= xive_check_provisioning(xive
->kvm
, act_prio
);
1567 mutex_unlock(&xive
->kvm
->lock
);
1569 /* Target interrupt */
1571 rc
= xive_target_interrupt(xive
->kvm
, state
,
1574 * If provisioning or targetting failed, leave it
1575 * alone and masked. It will remain disabled until
1576 * the guest re-targets it.
1581 * Find out if this was a delayed irq stashed in an ICP,
1582 * in which case, treat it as pending
1584 if (xive
->delayed_irqs
&& xive_check_delayed_irq(xive
, irq
)) {
1585 val
|= KVM_XICS_PENDING
;
1586 pr_devel(" Found delayed ! forcing PENDING !\n");
1589 /* Cleanup the SW state */
1590 state
->old_p
= false;
1591 state
->old_q
= false;
1593 state
->asserted
= false;
1595 /* Restore LSI state */
1596 if (val
& KVM_XICS_LEVEL_SENSITIVE
) {
1598 if (val
& KVM_XICS_PENDING
)
1599 state
->asserted
= true;
1600 pr_devel(" LSI ! Asserted=%d\n", state
->asserted
);
1604 * Restore P and Q. If the interrupt was pending, we
1605 * force Q and !P, which will trigger a resend.
1607 * That means that a guest that had both an interrupt
1608 * pending (queued) and Q set will restore with only
1609 * one instance of that interrupt instead of 2, but that
1610 * is perfectly fine as coalescing interrupts that haven't
1611 * been presented yet is always allowed.
1613 if (val
& KVM_XICS_PRESENTED
&& !(val
& KVM_XICS_PENDING
))
1614 state
->old_p
= true;
1615 if (val
& KVM_XICS_QUEUED
|| val
& KVM_XICS_PENDING
)
1616 state
->old_q
= true;
1618 pr_devel(" P=%d, Q=%d\n", state
->old_p
, state
->old_q
);
1621 * If the interrupt was unmasked, update guest priority and
1622 * perform the appropriate state transition and do a
1623 * re-trigger if necessary.
1625 if (val
& KVM_XICS_MASKED
) {
1626 pr_devel(" masked, saving prio\n");
1627 state
->guest_priority
= MASKED
;
1628 state
->saved_priority
= guest_prio
;
1630 pr_devel(" unmasked, restoring to prio %d\n", guest_prio
);
1631 xive_finish_unmask(xive
, sb
, state
, guest_prio
);
1632 state
->saved_priority
= guest_prio
;
1635 /* Increment the number of valid sources and mark this one valid */
1638 state
->valid
= true;
1643 int kvmppc_xive_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1646 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
1647 struct kvmppc_xive_src_block
*sb
;
1648 struct kvmppc_xive_irq_state
*state
;
1654 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1658 /* Perform locklessly .... (we need to do some RCUisms here...) */
1659 state
= &sb
->irq_state
[idx
];
1663 /* We don't allow a trigger on a passed-through interrupt */
1664 if (state
->pt_number
)
1667 if ((level
== 1 && state
->lsi
) || level
== KVM_INTERRUPT_SET_LEVEL
)
1668 state
->asserted
= 1;
1669 else if (level
== 0 || level
== KVM_INTERRUPT_UNSET
) {
1670 state
->asserted
= 0;
1674 /* Trigger the IPI */
1675 xive_irq_trigger(&state
->ipi_data
);
1680 static int xive_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1682 struct kvmppc_xive
*xive
= dev
->private;
1684 /* We honor the existing XICS ioctl */
1685 switch (attr
->group
) {
1686 case KVM_DEV_XICS_GRP_SOURCES
:
1687 return xive_set_source(xive
, attr
->attr
, attr
->addr
);
1692 static int xive_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1694 struct kvmppc_xive
*xive
= dev
->private;
1696 /* We honor the existing XICS ioctl */
1697 switch (attr
->group
) {
1698 case KVM_DEV_XICS_GRP_SOURCES
:
1699 return xive_get_source(xive
, attr
->attr
, attr
->addr
);
1704 static int xive_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1706 /* We honor the same limits as XICS, at least for now */
1707 switch (attr
->group
) {
1708 case KVM_DEV_XICS_GRP_SOURCES
:
1709 if (attr
->attr
>= KVMPPC_XICS_FIRST_IRQ
&&
1710 attr
->attr
< KVMPPC_XICS_NR_IRQS
)
1717 static void kvmppc_xive_cleanup_irq(u32 hw_num
, struct xive_irq_data
*xd
)
1719 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_01
);
1720 xive_native_configure_irq(hw_num
, 0, MASKED
, 0);
1721 xive_cleanup_irq_data(xd
);
1724 static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block
*sb
)
1728 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1729 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[i
];
1734 kvmppc_xive_cleanup_irq(state
->ipi_number
, &state
->ipi_data
);
1735 xive_native_free_irq(state
->ipi_number
);
1737 /* Pass-through, cleanup too */
1738 if (state
->pt_number
)
1739 kvmppc_xive_cleanup_irq(state
->pt_number
, state
->pt_data
);
1741 state
->valid
= false;
1745 static void kvmppc_xive_free(struct kvm_device
*dev
)
1747 struct kvmppc_xive
*xive
= dev
->private;
1748 struct kvm
*kvm
= xive
->kvm
;
1751 debugfs_remove(xive
->dentry
);
1754 kvm
->arch
.xive
= NULL
;
1756 /* Mask and free interrupts */
1757 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1758 if (xive
->src_blocks
[i
])
1759 kvmppc_xive_free_sources(xive
->src_blocks
[i
]);
1760 kfree(xive
->src_blocks
[i
]);
1761 xive
->src_blocks
[i
] = NULL
;
1764 if (xive
->vp_base
!= XIVE_INVALID_VP
)
1765 xive_native_free_vp_block(xive
->vp_base
);
1772 static int kvmppc_xive_create(struct kvm_device
*dev
, u32 type
)
1774 struct kvmppc_xive
*xive
;
1775 struct kvm
*kvm
= dev
->kvm
;
1778 pr_devel("Creating xive for partition\n");
1780 xive
= kzalloc(sizeof(*xive
), GFP_KERNEL
);
1784 dev
->private = xive
;
1788 /* Already there ? */
1792 kvm
->arch
.xive
= xive
;
1794 /* We use the default queue size set by the host */
1795 xive
->q_order
= xive_native_default_eq_shift();
1796 if (xive
->q_order
< PAGE_SHIFT
)
1797 xive
->q_page_order
= 0;
1799 xive
->q_page_order
= xive
->q_order
- PAGE_SHIFT
;
1801 /* Allocate a bunch of VPs */
1802 xive
->vp_base
= xive_native_alloc_vp_block(KVM_MAX_VCPUS
);
1803 pr_devel("VP_Base=%x\n", xive
->vp_base
);
1805 if (xive
->vp_base
== XIVE_INVALID_VP
)
1808 xive
->single_escalation
= xive_native_has_single_escalation();
1819 static int xive_debug_show(struct seq_file
*m
, void *private)
1821 struct kvmppc_xive
*xive
= m
->private;
1822 struct kvm
*kvm
= xive
->kvm
;
1823 struct kvm_vcpu
*vcpu
;
1824 u64 t_rm_h_xirr
= 0;
1825 u64 t_rm_h_ipoll
= 0;
1826 u64 t_rm_h_cppr
= 0;
1829 u64 t_vm_h_xirr
= 0;
1830 u64 t_vm_h_ipoll
= 0;
1831 u64 t_vm_h_cppr
= 0;
1839 seq_printf(m
, "=========\nVCPU state\n=========\n");
1841 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1842 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1848 seq_printf(m
, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1849 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1850 xc
->server_num
, xc
->cppr
, xc
->hw_cppr
,
1851 xc
->mfrr
, xc
->pending
,
1852 xc
->stat_rm_h_xirr
, xc
->stat_vm_h_xirr
);
1853 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1854 struct xive_q
*q
= &xc
->queues
[i
];
1857 if (!q
->qpage
&& !xc
->esc_virq
[i
])
1860 seq_printf(m
, " [q%d]: ", i
);
1864 i0
= be32_to_cpup(q
->qpage
+ idx
);
1865 idx
= (idx
+ 1) & q
->msk
;
1866 i1
= be32_to_cpup(q
->qpage
+ idx
);
1867 seq_printf(m
, "T=%d %08x %08x... \n", q
->toggle
, i0
, i1
);
1869 if (xc
->esc_virq
[i
]) {
1870 struct irq_data
*d
= irq_get_irq_data(xc
->esc_virq
[i
]);
1871 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
1872 u64 pq
= xive_vm_esb_load(xd
, XIVE_ESB_GET
);
1873 seq_printf(m
, "E:%c%c I(%d:%llx:%llx)",
1874 (pq
& XIVE_ESB_VAL_P
) ? 'P' : 'p',
1875 (pq
& XIVE_ESB_VAL_Q
) ? 'Q' : 'q',
1876 xc
->esc_virq
[i
], pq
, xd
->eoi_page
);
1877 seq_printf(m
, "\n");
1881 t_rm_h_xirr
+= xc
->stat_rm_h_xirr
;
1882 t_rm_h_ipoll
+= xc
->stat_rm_h_ipoll
;
1883 t_rm_h_cppr
+= xc
->stat_rm_h_cppr
;
1884 t_rm_h_eoi
+= xc
->stat_rm_h_eoi
;
1885 t_rm_h_ipi
+= xc
->stat_rm_h_ipi
;
1886 t_vm_h_xirr
+= xc
->stat_vm_h_xirr
;
1887 t_vm_h_ipoll
+= xc
->stat_vm_h_ipoll
;
1888 t_vm_h_cppr
+= xc
->stat_vm_h_cppr
;
1889 t_vm_h_eoi
+= xc
->stat_vm_h_eoi
;
1890 t_vm_h_ipi
+= xc
->stat_vm_h_ipi
;
1893 seq_printf(m
, "Hcalls totals\n");
1894 seq_printf(m
, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr
, t_vm_h_xirr
);
1895 seq_printf(m
, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll
, t_vm_h_ipoll
);
1896 seq_printf(m
, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr
, t_vm_h_cppr
);
1897 seq_printf(m
, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi
, t_vm_h_eoi
);
1898 seq_printf(m
, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi
, t_vm_h_ipi
);
1903 static int xive_debug_open(struct inode
*inode
, struct file
*file
)
1905 return single_open(file
, xive_debug_show
, inode
->i_private
);
1908 static const struct file_operations xive_debug_fops
= {
1909 .open
= xive_debug_open
,
1911 .llseek
= seq_lseek
,
1912 .release
= single_release
,
1915 static void xive_debugfs_init(struct kvmppc_xive
*xive
)
1919 name
= kasprintf(GFP_KERNEL
, "kvm-xive-%p", xive
);
1921 pr_err("%s: no memory for name\n", __func__
);
1925 xive
->dentry
= debugfs_create_file(name
, S_IRUGO
, powerpc_debugfs_root
,
1926 xive
, &xive_debug_fops
);
1928 pr_debug("%s: created %s\n", __func__
, name
);
1932 static void kvmppc_xive_init(struct kvm_device
*dev
)
1934 struct kvmppc_xive
*xive
= (struct kvmppc_xive
*)dev
->private;
1936 /* Register some debug interfaces */
1937 xive_debugfs_init(xive
);
1940 struct kvm_device_ops kvm_xive_ops
= {
1942 .create
= kvmppc_xive_create
,
1943 .init
= kvmppc_xive_init
,
1944 .destroy
= kvmppc_xive_free
,
1945 .set_attr
= xive_set_attr
,
1946 .get_attr
= xive_get_attr
,
1947 .has_attr
= xive_has_attr
,
1950 void kvmppc_xive_init_module(void)
1952 __xive_vm_h_xirr
= xive_vm_h_xirr
;
1953 __xive_vm_h_ipoll
= xive_vm_h_ipoll
;
1954 __xive_vm_h_ipi
= xive_vm_h_ipi
;
1955 __xive_vm_h_cppr
= xive_vm_h_cppr
;
1956 __xive_vm_h_eoi
= xive_vm_h_eoi
;
1959 void kvmppc_xive_exit_module(void)
1961 __xive_vm_h_xirr
= NULL
;
1962 __xive_vm_h_ipoll
= NULL
;
1963 __xive_vm_h_ipi
= NULL
;
1964 __xive_vm_h_cppr
= NULL
;
1965 __xive_vm_h_eoi
= NULL
;