1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
6 /* File to be included by other .c files */
8 #define XGLUE(a,b) a##b
9 #define GLUE(a,b) XGLUE(a,b)
11 /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
14 static void GLUE(X_PFX
,ack_pending
)(struct kvmppc_xive_vcpu
*xc
)
20 * Ensure any previous store to CPPR is ordered vs.
21 * the subsequent loads from PIPR or ACK.
25 /* Perform the acknowledge OS to register cycle. */
26 ack
= be16_to_cpu(__x_readw(__x_tima
+ TM_SPC_ACK_OS_REG
));
28 /* Synchronize subsequent queue accesses */
31 /* XXX Check grouping level */
34 if (!((ack
>> 8) & TM_QW1_NSR_EO
))
37 /* Grab CPPR of the most favored pending interrupt */
40 xc
->pending
|= 1 << cppr
;
42 #ifdef XIVE_RUNTIME_CHECKS
43 /* Check consistency */
44 if (cppr
>= xc
->hw_cppr
)
45 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
46 smp_processor_id(), cppr
, xc
->hw_cppr
);
50 * Update our image of the HW CPPR. We don't yet modify
51 * xc->cppr, this will be done as we scan for interrupts
57 static u8
GLUE(X_PFX
,esb_load
)(struct xive_irq_data
*xd
, u32 offset
)
61 if (xd
->flags
& XIVE_IRQ_FLAG_SHIFT_BUG
)
62 offset
|= offset
<< 4;
64 val
=__x_readq(__x_eoi_page(xd
) + offset
);
65 #ifdef __LITTLE_ENDIAN__
72 static void GLUE(X_PFX
,source_eoi
)(u32 hw_irq
, struct xive_irq_data
*xd
)
74 /* If the XIVE supports the new "store EOI facility, use it */
75 if (xd
->flags
& XIVE_IRQ_FLAG_STORE_EOI
)
76 __x_writeq(0, __x_eoi_page(xd
) + XIVE_ESB_STORE_EOI
);
77 else if (hw_irq
&& xd
->flags
& XIVE_IRQ_FLAG_EOI_FW
)
79 else if (xd
->flags
& XIVE_IRQ_FLAG_LSI
) {
81 * For LSIs the HW EOI cycle is used rather than PQ bits,
82 * as they are automatically re-triggred in HW when still
85 __x_readq(__x_eoi_page(xd
) + XIVE_ESB_LOAD_EOI
);
90 * Otherwise for EOI, we use the special MMIO that does
91 * a clear of both P and Q and returns the old Q,
92 * except for LSIs where we use the "EOI cycle" special
95 * This allows us to then do a re-trigger if Q was set
96 * rather than synthetizing an interrupt in software
98 eoi_val
= GLUE(X_PFX
,esb_load
)(xd
, XIVE_ESB_SET_PQ_00
);
100 /* Re-trigger if needed */
101 if ((eoi_val
& 1) && __x_trig_page(xd
))
102 __x_writeq(0, __x_trig_page(xd
));
112 static u32
GLUE(X_PFX
,scan_interrupts
)(struct kvmppc_xive_vcpu
*xc
,
113 u8 pending
, int scan_type
)
118 /* Find highest pending priority */
119 while ((xc
->mfrr
!= 0xff || pending
!= 0) && hirq
== 0) {
125 * If pending is 0 this will return 0xff which is what
128 prio
= ffs(pending
) - 1;
130 /* Don't scan past the guest cppr */
131 if (prio
>= xc
->cppr
|| prio
> 7) {
132 if (xc
->mfrr
< xc
->cppr
) {
139 /* Grab queue and pointers */
140 q
= &xc
->queues
[prio
];
145 * Snapshot the queue page. The test further down for EOI
146 * must use the same "copy" that was used by __xive_read_eq
147 * since qpage can be set concurrently and we don't want
150 qpage
= READ_ONCE(q
->qpage
);
154 * Try to fetch from the queue. Will return 0 for a
155 * non-queueing priority (ie, qpage = 0).
157 hirq
= __xive_read_eq(qpage
, q
->msk
, &idx
, &toggle
);
160 * If this was a signal for an MFFR change done by
161 * H_IPI we skip it. Additionally, if we were fetching
162 * we EOI it now, thus re-enabling reception of a new
165 * We also need to do that if prio is 0 and we had no
166 * page for the queue. In this case, we have non-queued
167 * IPI that needs to be EOId.
169 * This is safe because if we have another pending MFRR
170 * change that wasn't observed above, the Q bit will have
171 * been set and another occurrence of the IPI will trigger.
173 if (hirq
== XICS_IPI
|| (prio
== 0 && !qpage
)) {
174 if (scan_type
== scan_fetch
) {
175 GLUE(X_PFX
,source_eoi
)(xc
->vp_ipi
,
180 /* Loop back on same queue with updated idx/toggle */
181 #ifdef XIVE_RUNTIME_CHECKS
182 WARN_ON(hirq
&& hirq
!= XICS_IPI
);
188 /* If it's the dummy interrupt, continue searching */
189 if (hirq
== XICS_DUMMY
)
192 /* Clear the pending bit if the queue is now empty */
194 pending
&= ~(1 << prio
);
197 * Check if the queue count needs adjusting due to
198 * interrupts being moved away.
200 if (atomic_read(&q
->pending_count
)) {
201 int p
= atomic_xchg(&q
->pending_count
, 0);
203 #ifdef XIVE_RUNTIME_CHECKS
204 WARN_ON(p
> atomic_read(&q
->count
));
206 atomic_sub(p
, &q
->count
);
212 * If the most favoured prio we found pending is less
213 * favored (or equal) than a pending IPI, we return
216 if (prio
>= xc
->mfrr
&& xc
->mfrr
< xc
->cppr
) {
222 /* If fetching, update queue pointers */
223 if (scan_type
== scan_fetch
) {
229 /* If we are just taking a "peek", do nothing else */
230 if (scan_type
== scan_poll
)
233 /* Update the pending bits */
234 xc
->pending
= pending
;
237 * If this is an EOI that's it, no CPPR adjustment done here,
238 * all we needed was cleanup the stale pending bits and check
239 * if there's anything left.
241 if (scan_type
== scan_eoi
)
245 * If we found an interrupt, adjust what the guest CPPR should
246 * be as if we had just fetched that interrupt from HW.
248 * Note: This can only make xc->cppr smaller as the previous
249 * loop will only exit with hirq != 0 if prio is lower than
250 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
256 * If it was an IPI the HW CPPR might have been lowered too much
257 * as the HW interrupt we use for IPIs is routed to priority 0.
259 * We re-sync it here.
261 if (xc
->cppr
!= xc
->hw_cppr
) {
262 xc
->hw_cppr
= xc
->cppr
;
263 __x_writeb(xc
->cppr
, __x_tima
+ TM_QW1_OS
+ TM_CPPR
);
269 X_STATIC
unsigned long GLUE(X_PFX
,h_xirr
)(struct kvm_vcpu
*vcpu
)
271 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
275 pr_devel("H_XIRR\n");
277 xc
->GLUE(X_STAT_PFX
,h_xirr
)++;
279 /* First collect pending bits from HW */
280 GLUE(X_PFX
,ack_pending
)(xc
);
282 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
283 xc
->pending
, xc
->hw_cppr
, xc
->cppr
);
285 /* Grab previous CPPR and reverse map it */
286 old_cppr
= xive_prio_to_guest(xc
->cppr
);
288 /* Scan for actual interrupts */
289 hirq
= GLUE(X_PFX
,scan_interrupts
)(xc
, xc
->pending
, scan_fetch
);
291 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
292 hirq
, xc
->hw_cppr
, xc
->cppr
);
294 #ifdef XIVE_RUNTIME_CHECKS
295 /* That should never hit */
296 if (hirq
& 0xff000000)
297 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq
);
301 * XXX We could check if the interrupt is masked here and
302 * filter it. If we chose to do so, we would need to do:
314 /* Return interrupt and old CPPR in GPR4 */
315 vcpu
->arch
.regs
.gpr
[4] = hirq
| (old_cppr
<< 24);
320 X_STATIC
unsigned long GLUE(X_PFX
,h_ipoll
)(struct kvm_vcpu
*vcpu
, unsigned long server
)
322 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
323 u8 pending
= xc
->pending
;
326 pr_devel("H_IPOLL(server=%ld)\n", server
);
328 xc
->GLUE(X_STAT_PFX
,h_ipoll
)++;
330 /* Grab the target VCPU if not the current one */
331 if (xc
->server_num
!= server
) {
332 vcpu
= kvmppc_xive_find_server(vcpu
->kvm
, server
);
335 xc
= vcpu
->arch
.xive_vcpu
;
337 /* Scan all priorities */
340 /* Grab pending interrupt if any */
341 __be64 qw1
= __x_readq(__x_tima
+ TM_QW1_OS
);
342 u8 pipr
= be64_to_cpu(qw1
) & 0xff;
344 pending
|= 1 << pipr
;
347 hirq
= GLUE(X_PFX
,scan_interrupts
)(xc
, pending
, scan_poll
);
349 /* Return interrupt and old CPPR in GPR4 */
350 vcpu
->arch
.regs
.gpr
[4] = hirq
| (xc
->cppr
<< 24);
355 static void GLUE(X_PFX
,push_pending_to_hw
)(struct kvmppc_xive_vcpu
*xc
)
359 pending
= xc
->pending
;
360 if (xc
->mfrr
!= 0xff) {
362 pending
|= 1 << xc
->mfrr
;
368 prio
= ffs(pending
) - 1;
370 __x_writeb(prio
, __x_tima
+ TM_SPC_SET_OS_PENDING
);
373 static void GLUE(X_PFX
,scan_for_rerouted_irqs
)(struct kvmppc_xive
*xive
,
374 struct kvmppc_xive_vcpu
*xc
)
378 /* For each priority that is now masked */
379 for (prio
= xc
->cppr
; prio
< KVMPPC_XIVE_Q_COUNT
; prio
++) {
380 struct xive_q
*q
= &xc
->queues
[prio
];
381 struct kvmppc_xive_irq_state
*state
;
382 struct kvmppc_xive_src_block
*sb
;
383 u32 idx
, toggle
, entry
, irq
, hw_num
;
384 struct xive_irq_data
*xd
;
390 qpage
= READ_ONCE(q
->qpage
);
394 /* For each interrupt in the queue */
396 entry
= be32_to_cpup(qpage
+ idx
);
399 if ((entry
>> 31) == toggle
)
401 irq
= entry
& 0x7fffffff;
403 /* Skip dummies and IPIs */
404 if (irq
== XICS_DUMMY
|| irq
== XICS_IPI
)
406 sb
= kvmppc_xive_find_source(xive
, irq
, &src
);
409 state
= &sb
->irq_state
[src
];
411 /* Has it been rerouted ? */
412 if (xc
->server_num
== state
->act_server
)
416 * Allright, it *has* been re-routed, kill it from
419 qpage
[idx
] = cpu_to_be32((entry
& 0x80000000) | XICS_DUMMY
);
421 /* Find the HW interrupt */
422 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
424 /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
425 if (!(xd
->flags
& XIVE_IRQ_FLAG_LSI
))
426 GLUE(X_PFX
,esb_load
)(xd
, XIVE_ESB_SET_PQ_11
);
429 GLUE(X_PFX
,source_eoi
)(hw_num
, xd
);
432 idx
= (idx
+ 1) & q
->msk
;
439 X_STATIC
int GLUE(X_PFX
,h_cppr
)(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
441 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
442 struct kvmppc_xive
*xive
= vcpu
->kvm
->arch
.xive
;
445 pr_devel("H_CPPR(cppr=%ld)\n", cppr
);
447 xc
->GLUE(X_STAT_PFX
,h_cppr
)++;
450 cppr
= xive_prio_from_guest(cppr
);
452 /* Remember old and update SW state */
457 * Order the above update of xc->cppr with the subsequent
458 * read of xc->mfrr inside push_pending_to_hw()
462 if (cppr
> old_cppr
) {
464 * We are masking less, we need to look for pending things
465 * to deliver and set VP pending bits accordingly to trigger
466 * a new interrupt otherwise we might miss MFRR changes for
467 * which we have optimized out sending an IPI signal.
469 GLUE(X_PFX
,push_pending_to_hw
)(xc
);
472 * We are masking more, we need to check the queue for any
473 * interrupt that has been routed to another CPU, take
474 * it out (replace it with the dummy) and retrigger it.
476 * This is necessary since those interrupts may otherwise
477 * never be processed, at least not until this CPU restores
480 * This is in theory racy vs. HW adding new interrupts to
481 * the queue. In practice this works because the interesting
482 * cases are when the guest has done a set_xive() to move the
483 * interrupt away, which flushes the xive, followed by the
484 * target CPU doing a H_CPPR. So any new interrupt coming into
485 * the queue must still be routed to us and isn't a source
488 GLUE(X_PFX
,scan_for_rerouted_irqs
)(xive
, xc
);
493 __x_writeb(cppr
, __x_tima
+ TM_QW1_OS
+ TM_CPPR
);
498 X_STATIC
int GLUE(X_PFX
,h_eoi
)(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
500 struct kvmppc_xive
*xive
= vcpu
->kvm
->arch
.xive
;
501 struct kvmppc_xive_src_block
*sb
;
502 struct kvmppc_xive_irq_state
*state
;
503 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
504 struct xive_irq_data
*xd
;
505 u8 new_cppr
= xirr
>> 24;
506 u32 irq
= xirr
& 0x00ffffff, hw_num
;
510 pr_devel("H_EOI(xirr=%08lx)\n", xirr
);
512 xc
->GLUE(X_STAT_PFX
,h_eoi
)++;
514 xc
->cppr
= xive_prio_from_guest(new_cppr
);
517 * IPIs are synthetized from MFRR and thus don't need
518 * any special EOI handling. The underlying interrupt
519 * used to signal MFRR changes is EOId when fetched from
522 if (irq
== XICS_IPI
|| irq
== 0) {
524 * This barrier orders the setting of xc->cppr vs.
525 * subsquent test of xc->mfrr done inside
526 * scan_interrupts and push_pending_to_hw
532 /* Find interrupt source */
533 sb
= kvmppc_xive_find_source(xive
, irq
, &src
);
535 pr_devel(" source not found !\n");
541 state
= &sb
->irq_state
[src
];
542 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
544 state
->in_eoi
= true;
547 * This barrier orders both setting of in_eoi above vs,
548 * subsequent test of guest_priority, and the setting
549 * of xc->cppr vs. subsquent test of xc->mfrr done inside
550 * scan_interrupts and push_pending_to_hw
555 if (state
->guest_priority
== MASKED
) {
556 arch_spin_lock(&sb
->lock
);
557 if (state
->guest_priority
!= MASKED
) {
558 arch_spin_unlock(&sb
->lock
);
561 pr_devel(" EOI on saved P...\n");
563 /* Clear old_p, that will cause unmask to perform an EOI */
564 state
->old_p
= false;
566 arch_spin_unlock(&sb
->lock
);
568 pr_devel(" EOI on source...\n");
570 /* Perform EOI on the source */
571 GLUE(X_PFX
,source_eoi
)(hw_num
, xd
);
573 /* If it's an emulated LSI, check level and resend */
574 if (state
->lsi
&& state
->asserted
)
575 __x_writeq(0, __x_trig_page(xd
));
580 * This barrier orders the above guest_priority check
581 * and spin_lock/unlock with clearing in_eoi below.
583 * It also has to be a full mb() as it must ensure
584 * the MMIOs done in source_eoi() are completed before
585 * state->in_eoi is visible.
588 state
->in_eoi
= false;
591 /* Re-evaluate pending IRQs and update HW */
592 GLUE(X_PFX
,scan_interrupts
)(xc
, xc
->pending
, scan_eoi
);
593 GLUE(X_PFX
,push_pending_to_hw
)(xc
);
594 pr_devel(" after scan pending=%02x\n", xc
->pending
);
597 xc
->hw_cppr
= xc
->cppr
;
598 __x_writeb(xc
->cppr
, __x_tima
+ TM_QW1_OS
+ TM_CPPR
);
603 X_STATIC
int GLUE(X_PFX
,h_ipi
)(struct kvm_vcpu
*vcpu
, unsigned long server
,
606 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
608 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server
, mfrr
);
610 xc
->GLUE(X_STAT_PFX
,h_ipi
)++;
613 vcpu
= kvmppc_xive_find_server(vcpu
->kvm
, server
);
616 xc
= vcpu
->arch
.xive_vcpu
;
618 /* Locklessly write over MFRR */
622 * The load of xc->cppr below and the subsequent MMIO store
623 * to the IPI must happen after the above mfrr update is
624 * globally visible so that:
626 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
627 * updating xc->cppr then reading xc->mfrr.
629 * - The target of the IPI sees the xc->mfrr update
633 /* Shoot the IPI if most favored than target cppr */
635 __x_writeq(0, __x_trig_page(&xc
->vp_ipi_data
));