Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / powerpc / sysdev / xive / common.c
blob40c06110821c36221010fa221326950759c9c5c1
1 /*
2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
10 #define pr_fmt(fmt) "xive: " fmt
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/debugfs.h>
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/seq_file.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/of.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/msi.h>
27 #include <asm/prom.h>
28 #include <asm/io.h>
29 #include <asm/smp.h>
30 #include <asm/machdep.h>
31 #include <asm/irq.h>
32 #include <asm/errno.h>
33 #include <asm/xive.h>
34 #include <asm/xive-regs.h>
35 #include <asm/xmon.h>
37 #include "xive-internal.h"
39 #undef DEBUG_FLUSH
40 #undef DEBUG_ALL
42 #ifdef DEBUG_ALL
43 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
44 smp_processor_id(), ## __VA_ARGS__)
45 #else
46 #define DBG_VERBOSE(fmt...) do { } while(0)
47 #endif
49 bool __xive_enabled;
50 EXPORT_SYMBOL_GPL(__xive_enabled);
51 bool xive_cmdline_disabled;
53 /* We use only one priority for now */
54 static u8 xive_irq_priority;
56 /* TIMA exported to KVM */
57 void __iomem *xive_tima;
58 EXPORT_SYMBOL_GPL(xive_tima);
59 u32 xive_tima_offset;
61 /* Backend ops */
62 static const struct xive_ops *xive_ops;
64 /* Our global interrupt domain */
65 static struct irq_domain *xive_irq_domain;
67 #ifdef CONFIG_SMP
68 /* The IPIs all use the same logical irq number */
69 static u32 xive_ipi_irq;
70 #endif
72 /* Xive state for each CPU */
73 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
76 * A "disabled" interrupt should never fire, to catch problems
77 * we set its logical number to this
79 #define XIVE_BAD_IRQ 0x7fffffff
80 #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
82 /* An invalid CPU target */
83 #define XIVE_INVALID_TARGET (-1)
86 * Read the next entry in a queue, return its content if it's valid
87 * or 0 if there is no new entry.
89 * The queue pointer is moved forward unless "just_peek" is set
91 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
93 u32 cur;
95 if (!q->qpage)
96 return 0;
97 cur = be32_to_cpup(q->qpage + q->idx);
99 /* Check valid bit (31) vs current toggle polarity */
100 if ((cur >> 31) == q->toggle)
101 return 0;
103 /* If consuming from the queue ... */
104 if (!just_peek) {
105 /* Next entry */
106 q->idx = (q->idx + 1) & q->msk;
108 /* Wrap around: flip valid toggle */
109 if (q->idx == 0)
110 q->toggle ^= 1;
112 /* Mask out the valid bit (31) */
113 return cur & 0x7fffffff;
117 * Scans all the queue that may have interrupts in them
118 * (based on "pending_prio") in priority order until an
119 * interrupt is found or all the queues are empty.
121 * Then updates the CPPR (Current Processor Priority
122 * Register) based on the most favored interrupt found
123 * (0xff if none) and return what was found (0 if none).
125 * If just_peek is set, return the most favored pending
126 * interrupt if any but don't update the queue pointers.
128 * Note: This function can operate generically on any number
129 * of queues (up to 8). The current implementation of the XIVE
130 * driver only uses a single queue however.
132 * Note2: This will also "flush" "the pending_count" of a queue
133 * into the "count" when that queue is observed to be empty.
134 * This is used to keep track of the amount of interrupts
135 * targetting a queue. When an interrupt is moved away from
136 * a queue, we only decrement that queue count once the queue
137 * has been observed empty to avoid races.
139 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
141 u32 irq = 0;
142 u8 prio;
144 /* Find highest pending priority */
145 while (xc->pending_prio != 0) {
146 struct xive_q *q;
148 prio = ffs(xc->pending_prio) - 1;
149 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
151 /* Try to fetch */
152 irq = xive_read_eq(&xc->queue[prio], just_peek);
154 /* Found something ? That's it */
155 if (irq)
156 break;
158 /* Clear pending bits */
159 xc->pending_prio &= ~(1 << prio);
162 * Check if the queue count needs adjusting due to
163 * interrupts being moved away. See description of
164 * xive_dec_target_count()
166 q = &xc->queue[prio];
167 if (atomic_read(&q->pending_count)) {
168 int p = atomic_xchg(&q->pending_count, 0);
169 if (p) {
170 WARN_ON(p > atomic_read(&q->count));
171 atomic_sub(p, &q->count);
176 /* If nothing was found, set CPPR to 0xff */
177 if (irq == 0)
178 prio = 0xff;
180 /* Update HW CPPR to match if necessary */
181 if (prio != xc->cppr) {
182 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
183 xc->cppr = prio;
184 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
187 return irq;
191 * This is used to perform the magic loads from an ESB
192 * described in xive.h
194 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
196 u64 val;
198 /* Handle HW errata */
199 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
200 offset |= offset << 4;
202 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
203 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
204 else
205 val = in_be64(xd->eoi_mmio + offset);
207 return (u8)val;
210 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
212 /* Handle HW errata */
213 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
214 offset |= offset << 4;
216 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
217 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
218 else
219 out_be64(xd->eoi_mmio + offset, data);
222 #ifdef CONFIG_XMON
223 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
225 u32 i0, i1, idx;
227 if (!q->qpage)
228 return;
229 idx = q->idx;
230 i0 = be32_to_cpup(q->qpage + idx);
231 idx = (idx + 1) & q->msk;
232 i1 = be32_to_cpup(q->qpage + idx);
233 xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
234 q->toggle, i0, i1);
237 notrace void xmon_xive_do_dump(int cpu)
239 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
241 xmon_printf("XIVE state for CPU %d:\n", cpu);
242 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
243 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
244 #ifdef CONFIG_SMP
246 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
247 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
248 val & XIVE_ESB_VAL_P ? 'P' : 'p',
249 val & XIVE_ESB_VAL_P ? 'Q' : 'q');
251 #endif
253 #endif /* CONFIG_XMON */
255 static unsigned int xive_get_irq(void)
257 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
258 u32 irq;
261 * This can be called either as a result of a HW interrupt or
262 * as a "replay" because EOI decided there was still something
263 * in one of the queues.
265 * First we perform an ACK cycle in order to update our mask
266 * of pending priorities. This will also have the effect of
267 * updating the CPPR to the most favored pending interrupts.
269 * In the future, if we have a way to differenciate a first
270 * entry (on HW interrupt) from a replay triggered by EOI,
271 * we could skip this on replays unless we soft-mask tells us
272 * that a new HW interrupt occurred.
274 xive_ops->update_pending(xc);
276 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
278 /* Scan our queue(s) for interrupts */
279 irq = xive_scan_interrupts(xc, false);
281 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
282 irq, xc->pending_prio);
284 /* Return pending interrupt if any */
285 if (irq == XIVE_BAD_IRQ)
286 return 0;
287 return irq;
291 * After EOI'ing an interrupt, we need to re-check the queue
292 * to see if another interrupt is pending since multiple
293 * interrupts can coalesce into a single notification to the
294 * CPU.
296 * If we find that there is indeed more in there, we call
297 * force_external_irq_replay() to make Linux synthetize an
298 * external interrupt on the next call to local_irq_restore().
300 static void xive_do_queue_eoi(struct xive_cpu *xc)
302 if (xive_scan_interrupts(xc, true) != 0) {
303 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
304 force_external_irq_replay();
309 * EOI an interrupt at the source. There are several methods
310 * to do this depending on the HW version and source type
312 void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
314 /* If the XIVE supports the new "store EOI facility, use it */
315 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
316 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
317 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
319 * The FW told us to call it. This happens for some
320 * interrupt sources that need additional HW whacking
321 * beyond the ESB manipulation. For example LPC interrupts
322 * on P9 DD1.0 need a latch to be clared in the LPC bridge
323 * itself. The Firmware will take care of it.
325 if (WARN_ON_ONCE(!xive_ops->eoi))
326 return;
327 xive_ops->eoi(hw_irq);
328 } else {
329 u8 eoi_val;
332 * Otherwise for EOI, we use the special MMIO that does
333 * a clear of both P and Q and returns the old Q,
334 * except for LSIs where we use the "EOI cycle" special
335 * load.
337 * This allows us to then do a re-trigger if Q was set
338 * rather than synthesizing an interrupt in software
340 * For LSIs, using the HW EOI cycle works around a problem
341 * on P9 DD1 PHBs where the other ESB accesses don't work
342 * properly.
344 if (xd->flags & XIVE_IRQ_FLAG_LSI)
345 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
346 else {
347 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
348 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
350 /* Re-trigger if needed */
351 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
352 out_be64(xd->trig_mmio, 0);
357 /* irq_chip eoi callback */
358 static void xive_irq_eoi(struct irq_data *d)
360 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
361 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
363 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
364 d->irq, irqd_to_hwirq(d), xc->pending_prio);
367 * EOI the source if it hasn't been disabled and hasn't
368 * been passed-through to a KVM guest
370 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
371 !(xd->flags & XIVE_IRQ_NO_EOI))
372 xive_do_source_eoi(irqd_to_hwirq(d), xd);
375 * Clear saved_p to indicate that it's no longer occupying
376 * a queue slot on the target queue
378 xd->saved_p = false;
380 /* Check for more work in the queue */
381 xive_do_queue_eoi(xc);
385 * Helper used to mask and unmask an interrupt source. This
386 * is only called for normal interrupts that do not require
387 * masking/unmasking via firmware.
389 static void xive_do_source_set_mask(struct xive_irq_data *xd,
390 bool mask)
392 u64 val;
395 * If the interrupt had P set, it may be in a queue.
397 * We need to make sure we don't re-enable it until it
398 * has been fetched from that queue and EOId. We keep
399 * a copy of that P state and use it to restore the
400 * ESB accordingly on unmask.
402 if (mask) {
403 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
404 xd->saved_p = !!(val & XIVE_ESB_VAL_P);
405 } else if (xd->saved_p)
406 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
407 else
408 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
412 * Try to chose "cpu" as a new interrupt target. Increments
413 * the queue accounting for that target if it's not already
414 * full.
416 static bool xive_try_pick_target(int cpu)
418 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
419 struct xive_q *q = &xc->queue[xive_irq_priority];
420 int max;
423 * Calculate max number of interrupts in that queue.
425 * We leave a gap of 1 just in case...
427 max = (q->msk + 1) - 1;
428 return !!atomic_add_unless(&q->count, 1, max);
432 * Un-account an interrupt for a target CPU. We don't directly
433 * decrement q->count since the interrupt might still be present
434 * in the queue.
436 * Instead increment a separate counter "pending_count" which
437 * will be substracted from "count" later when that CPU observes
438 * the queue to be empty.
440 static void xive_dec_target_count(int cpu)
442 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
443 struct xive_q *q = &xc->queue[xive_irq_priority];
445 if (unlikely(WARN_ON(cpu < 0 || !xc))) {
446 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
447 return;
451 * We increment the "pending count" which will be used
452 * to decrement the target queue count whenever it's next
453 * processed and found empty. This ensure that we don't
454 * decrement while we still have the interrupt there
455 * occupying a slot.
457 atomic_inc(&q->pending_count);
460 /* Find a tentative CPU target in a CPU mask */
461 static int xive_find_target_in_mask(const struct cpumask *mask,
462 unsigned int fuzz)
464 int cpu, first, num, i;
466 /* Pick up a starting point CPU in the mask based on fuzz */
467 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
468 first = fuzz % num;
470 /* Locate it */
471 cpu = cpumask_first(mask);
472 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
473 cpu = cpumask_next(cpu, mask);
475 /* Sanity check */
476 if (WARN_ON(cpu >= nr_cpu_ids))
477 cpu = cpumask_first(cpu_online_mask);
479 /* Remember first one to handle wrap-around */
480 first = cpu;
483 * Now go through the entire mask until we find a valid
484 * target.
486 for (;;) {
488 * We re-check online as the fallback case passes us
489 * an untested affinity mask
491 if (cpu_online(cpu) && xive_try_pick_target(cpu))
492 return cpu;
493 cpu = cpumask_next(cpu, mask);
494 if (cpu == first)
495 break;
496 /* Wrap around */
497 if (cpu >= nr_cpu_ids)
498 cpu = cpumask_first(mask);
500 return -1;
504 * Pick a target CPU for an interrupt. This is done at
505 * startup or if the affinity is changed in a way that
506 * invalidates the current target.
508 static int xive_pick_irq_target(struct irq_data *d,
509 const struct cpumask *affinity)
511 static unsigned int fuzz;
512 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
513 cpumask_var_t mask;
514 int cpu = -1;
517 * If we have chip IDs, first we try to build a mask of
518 * CPUs matching the CPU and find a target in there
520 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
521 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
522 /* Build a mask of matching chip IDs */
523 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
524 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
525 if (xc->chip_id == xd->src_chip)
526 cpumask_set_cpu(cpu, mask);
528 /* Try to find a target */
529 if (cpumask_empty(mask))
530 cpu = -1;
531 else
532 cpu = xive_find_target_in_mask(mask, fuzz++);
533 free_cpumask_var(mask);
534 if (cpu >= 0)
535 return cpu;
536 fuzz--;
539 /* No chip IDs, fallback to using the affinity mask */
540 return xive_find_target_in_mask(affinity, fuzz++);
543 static unsigned int xive_irq_startup(struct irq_data *d)
545 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
546 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
547 int target, rc;
549 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
550 d->irq, hw_irq, d);
552 #ifdef CONFIG_PCI_MSI
554 * The generic MSI code returns with the interrupt disabled on the
555 * card, using the MSI mask bits. Firmware doesn't appear to unmask
556 * at that level, so we do it here by hand.
558 if (irq_data_get_msi_desc(d))
559 pci_msi_unmask_irq(d);
560 #endif
562 /* Pick a target */
563 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
564 if (target == XIVE_INVALID_TARGET) {
565 /* Try again breaking affinity */
566 target = xive_pick_irq_target(d, cpu_online_mask);
567 if (target == XIVE_INVALID_TARGET)
568 return -ENXIO;
569 pr_warn("irq %d started with broken affinity\n", d->irq);
572 /* Sanity check */
573 if (WARN_ON(target == XIVE_INVALID_TARGET ||
574 target >= nr_cpu_ids))
575 target = smp_processor_id();
577 xd->target = target;
580 * Configure the logical number to be the Linux IRQ number
581 * and set the target queue
583 rc = xive_ops->configure_irq(hw_irq,
584 get_hard_smp_processor_id(target),
585 xive_irq_priority, d->irq);
586 if (rc)
587 return rc;
589 /* Unmask the ESB */
590 xive_do_source_set_mask(xd, false);
592 return 0;
595 static void xive_irq_shutdown(struct irq_data *d)
597 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
598 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
600 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
601 d->irq, hw_irq, d);
603 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
604 return;
606 /* Mask the interrupt at the source */
607 xive_do_source_set_mask(xd, true);
610 * The above may have set saved_p. We clear it otherwise it
611 * will prevent re-enabling later on. It is ok to forget the
612 * fact that the interrupt might be in a queue because we are
613 * accounting that already in xive_dec_target_count() and will
614 * be re-routing it to a new queue with proper accounting when
615 * it's started up again
617 xd->saved_p = false;
620 * Mask the interrupt in HW in the IVT/EAS and set the number
621 * to be the "bad" IRQ number
623 xive_ops->configure_irq(hw_irq,
624 get_hard_smp_processor_id(xd->target),
625 0xff, XIVE_BAD_IRQ);
627 xive_dec_target_count(xd->target);
628 xd->target = XIVE_INVALID_TARGET;
631 static void xive_irq_unmask(struct irq_data *d)
633 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
635 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
638 * This is a workaround for PCI LSI problems on P9, for
639 * these, we call FW to set the mask. The problems might
640 * be fixed by P9 DD2.0, if that is the case, firmware
641 * will no longer set that flag.
643 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
644 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
645 xive_ops->configure_irq(hw_irq,
646 get_hard_smp_processor_id(xd->target),
647 xive_irq_priority, d->irq);
648 return;
651 xive_do_source_set_mask(xd, false);
654 static void xive_irq_mask(struct irq_data *d)
656 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
658 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
661 * This is a workaround for PCI LSI problems on P9, for
662 * these, we call OPAL to set the mask. The problems might
663 * be fixed by P9 DD2.0, if that is the case, firmware
664 * will no longer set that flag.
666 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
667 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
668 xive_ops->configure_irq(hw_irq,
669 get_hard_smp_processor_id(xd->target),
670 0xff, d->irq);
671 return;
674 xive_do_source_set_mask(xd, true);
677 static int xive_irq_set_affinity(struct irq_data *d,
678 const struct cpumask *cpumask,
679 bool force)
681 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
682 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
683 u32 target, old_target;
684 int rc = 0;
686 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
688 /* Is this valid ? */
689 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
690 return -EINVAL;
692 /* Don't do anything if the interrupt isn't started */
693 if (!irqd_is_started(d))
694 return IRQ_SET_MASK_OK;
697 * If existing target is already in the new mask, and is
698 * online then do nothing.
700 if (xd->target != XIVE_INVALID_TARGET &&
701 cpu_online(xd->target) &&
702 cpumask_test_cpu(xd->target, cpumask))
703 return IRQ_SET_MASK_OK;
705 /* Pick a new target */
706 target = xive_pick_irq_target(d, cpumask);
708 /* No target found */
709 if (target == XIVE_INVALID_TARGET)
710 return -ENXIO;
712 /* Sanity check */
713 if (WARN_ON(target >= nr_cpu_ids))
714 target = smp_processor_id();
716 old_target = xd->target;
719 * Only configure the irq if it's not currently passed-through to
720 * a KVM guest
722 if (!irqd_is_forwarded_to_vcpu(d))
723 rc = xive_ops->configure_irq(hw_irq,
724 get_hard_smp_processor_id(target),
725 xive_irq_priority, d->irq);
726 if (rc < 0) {
727 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
728 return rc;
731 pr_devel(" target: 0x%x\n", target);
732 xd->target = target;
734 /* Give up previous target */
735 if (old_target != XIVE_INVALID_TARGET)
736 xive_dec_target_count(old_target);
738 return IRQ_SET_MASK_OK;
741 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
743 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
746 * We only support these. This has really no effect other than setting
747 * the corresponding descriptor bits mind you but those will in turn
748 * affect the resend function when re-enabling an edge interrupt.
750 * Set set the default to edge as explained in map().
752 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
753 flow_type = IRQ_TYPE_EDGE_RISING;
755 if (flow_type != IRQ_TYPE_EDGE_RISING &&
756 flow_type != IRQ_TYPE_LEVEL_LOW)
757 return -EINVAL;
759 irqd_set_trigger_type(d, flow_type);
762 * Double check it matches what the FW thinks
764 * NOTE: We don't know yet if the PAPR interface will provide
765 * the LSI vs MSI information apart from the device-tree so
766 * this check might have to move into an optional backend call
767 * that is specific to the native backend
769 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
770 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
771 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
772 d->irq, (u32)irqd_to_hwirq(d),
773 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
774 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
777 return IRQ_SET_MASK_OK_NOCOPY;
780 static int xive_irq_retrigger(struct irq_data *d)
782 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
784 /* This should be only for MSIs */
785 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
786 return 0;
789 * To perform a retrigger, we first set the PQ bits to
790 * 11, then perform an EOI.
792 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
795 * Note: We pass "0" to the hw_irq argument in order to
796 * avoid calling into the backend EOI code which we don't
797 * want to do in the case of a re-trigger. Backends typically
798 * only do EOI for LSIs anyway.
800 xive_do_source_eoi(0, xd);
802 return 1;
805 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
807 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
808 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
809 int rc;
810 u8 pq;
813 * We only support this on interrupts that do not require
814 * firmware calls for masking and unmasking
816 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
817 return -EIO;
820 * This is called by KVM with state non-NULL for enabling
821 * pass-through or NULL for disabling it
823 if (state) {
824 irqd_set_forwarded_to_vcpu(d);
826 /* Set it to PQ=10 state to prevent further sends */
827 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
829 /* No target ? nothing to do */
830 if (xd->target == XIVE_INVALID_TARGET) {
832 * An untargetted interrupt should have been
833 * also masked at the source
835 WARN_ON(pq & 2);
837 return 0;
841 * If P was set, adjust state to PQ=11 to indicate
842 * that a resend is needed for the interrupt to reach
843 * the guest. Also remember the value of P.
845 * This also tells us that it's in flight to a host queue
846 * or has already been fetched but hasn't been EOIed yet
847 * by the host. This it's potentially using up a host
848 * queue slot. This is important to know because as long
849 * as this is the case, we must not hard-unmask it when
850 * "returning" that interrupt to the host.
852 * This saved_p is cleared by the host EOI, when we know
853 * for sure the queue slot is no longer in use.
855 if (pq & 2) {
856 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
857 xd->saved_p = true;
860 * Sync the XIVE source HW to ensure the interrupt
861 * has gone through the EAS before we change its
862 * target to the guest. That should guarantee us
863 * that we *will* eventually get an EOI for it on
864 * the host. Otherwise there would be a small window
865 * for P to be seen here but the interrupt going
866 * to the guest queue.
868 if (xive_ops->sync_source)
869 xive_ops->sync_source(hw_irq);
870 } else
871 xd->saved_p = false;
872 } else {
873 irqd_clr_forwarded_to_vcpu(d);
875 /* No host target ? hard mask and return */
876 if (xd->target == XIVE_INVALID_TARGET) {
877 xive_do_source_set_mask(xd, true);
878 return 0;
882 * Sync the XIVE source HW to ensure the interrupt
883 * has gone through the EAS before we change its
884 * target to the host.
886 if (xive_ops->sync_source)
887 xive_ops->sync_source(hw_irq);
890 * By convention we are called with the interrupt in
891 * a PQ=10 or PQ=11 state, ie, it won't fire and will
892 * have latched in Q whether there's a pending HW
893 * interrupt or not.
895 * First reconfigure the target.
897 rc = xive_ops->configure_irq(hw_irq,
898 get_hard_smp_processor_id(xd->target),
899 xive_irq_priority, d->irq);
900 if (rc)
901 return rc;
904 * Then if saved_p is not set, effectively re-enable the
905 * interrupt with an EOI. If it is set, we know there is
906 * still a message in a host queue somewhere that will be
907 * EOId eventually.
909 * Note: We don't check irqd_irq_disabled(). Effectively,
910 * we *will* let the irq get through even if masked if the
911 * HW is still firing it in order to deal with the whole
912 * saved_p business properly. If the interrupt triggers
913 * while masked, the generic code will re-mask it anyway.
915 if (!xd->saved_p)
916 xive_do_source_eoi(hw_irq, xd);
919 return 0;
922 static struct irq_chip xive_irq_chip = {
923 .name = "XIVE-IRQ",
924 .irq_startup = xive_irq_startup,
925 .irq_shutdown = xive_irq_shutdown,
926 .irq_eoi = xive_irq_eoi,
927 .irq_mask = xive_irq_mask,
928 .irq_unmask = xive_irq_unmask,
929 .irq_set_affinity = xive_irq_set_affinity,
930 .irq_set_type = xive_irq_set_type,
931 .irq_retrigger = xive_irq_retrigger,
932 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
935 bool is_xive_irq(struct irq_chip *chip)
937 return chip == &xive_irq_chip;
939 EXPORT_SYMBOL_GPL(is_xive_irq);
941 void xive_cleanup_irq_data(struct xive_irq_data *xd)
943 if (xd->eoi_mmio) {
944 iounmap(xd->eoi_mmio);
945 if (xd->eoi_mmio == xd->trig_mmio)
946 xd->trig_mmio = NULL;
947 xd->eoi_mmio = NULL;
949 if (xd->trig_mmio) {
950 iounmap(xd->trig_mmio);
951 xd->trig_mmio = NULL;
954 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
956 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
958 struct xive_irq_data *xd;
959 int rc;
961 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
962 if (!xd)
963 return -ENOMEM;
964 rc = xive_ops->populate_irq_data(hw, xd);
965 if (rc) {
966 kfree(xd);
967 return rc;
969 xd->target = XIVE_INVALID_TARGET;
970 irq_set_handler_data(virq, xd);
972 return 0;
975 static void xive_irq_free_data(unsigned int virq)
977 struct xive_irq_data *xd = irq_get_handler_data(virq);
979 if (!xd)
980 return;
981 irq_set_handler_data(virq, NULL);
982 xive_cleanup_irq_data(xd);
983 kfree(xd);
986 #ifdef CONFIG_SMP
988 static void xive_cause_ipi(int cpu)
990 struct xive_cpu *xc;
991 struct xive_irq_data *xd;
993 xc = per_cpu(xive_cpu, cpu);
995 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
996 smp_processor_id(), cpu, xc->hw_ipi);
998 xd = &xc->ipi_data;
999 if (WARN_ON(!xd->trig_mmio))
1000 return;
1001 out_be64(xd->trig_mmio, 0);
1004 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1006 return smp_ipi_demux();
1009 static void xive_ipi_eoi(struct irq_data *d)
1011 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1013 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1014 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1016 /* Handle possible race with unplug and drop stale IPIs */
1017 if (!xc)
1018 return;
1019 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1020 xive_do_queue_eoi(xc);
1023 static void xive_ipi_do_nothing(struct irq_data *d)
1026 * Nothing to do, we never mask/unmask IPIs, but the callback
1027 * has to exist for the struct irq_chip.
1031 static struct irq_chip xive_ipi_chip = {
1032 .name = "XIVE-IPI",
1033 .irq_eoi = xive_ipi_eoi,
1034 .irq_mask = xive_ipi_do_nothing,
1035 .irq_unmask = xive_ipi_do_nothing,
1038 static void __init xive_request_ipi(void)
1040 unsigned int virq;
1043 * Initialization failed, move on, we might manage to
1044 * reach the point where we display our errors before
1045 * the system falls appart
1047 if (!xive_irq_domain)
1048 return;
1050 /* Initialize it */
1051 virq = irq_create_mapping(xive_irq_domain, 0);
1052 xive_ipi_irq = virq;
1054 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1055 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1058 static int xive_setup_cpu_ipi(unsigned int cpu)
1060 struct xive_cpu *xc;
1061 int rc;
1063 pr_debug("Setting up IPI for CPU %d\n", cpu);
1065 xc = per_cpu(xive_cpu, cpu);
1067 /* Check if we are already setup */
1068 if (xc->hw_ipi != 0)
1069 return 0;
1071 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
1072 if (xive_ops->get_ipi(cpu, xc))
1073 return -EIO;
1076 * Populate the IRQ data in the xive_cpu structure and
1077 * configure the HW / enable the IPIs.
1079 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1080 if (rc) {
1081 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1082 return -EIO;
1084 rc = xive_ops->configure_irq(xc->hw_ipi,
1085 get_hard_smp_processor_id(cpu),
1086 xive_irq_priority, xive_ipi_irq);
1087 if (rc) {
1088 pr_err("Failed to map IPI CPU %d\n", cpu);
1089 return -EIO;
1091 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1092 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1094 /* Unmask it */
1095 xive_do_source_set_mask(&xc->ipi_data, false);
1097 return 0;
1100 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1102 /* Disable the IPI and free the IRQ data */
1104 /* Already cleaned up ? */
1105 if (xc->hw_ipi == 0)
1106 return;
1108 /* Mask the IPI */
1109 xive_do_source_set_mask(&xc->ipi_data, true);
1112 * Note: We don't call xive_cleanup_irq_data() to free
1113 * the mappings as this is called from an IPI on kexec
1114 * which is not a safe environment to call iounmap()
1117 /* Deconfigure/mask in the backend */
1118 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1119 0xff, xive_ipi_irq);
1121 /* Free the IPIs in the backend */
1122 xive_ops->put_ipi(cpu, xc);
1125 void __init xive_smp_probe(void)
1127 smp_ops->cause_ipi = xive_cause_ipi;
1129 /* Register the IPI */
1130 xive_request_ipi();
1132 /* Allocate and setup IPI for the boot CPU */
1133 xive_setup_cpu_ipi(smp_processor_id());
1136 #endif /* CONFIG_SMP */
1138 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1139 irq_hw_number_t hw)
1141 int rc;
1144 * Mark interrupts as edge sensitive by default so that resend
1145 * actually works. Will fix that up below if needed.
1147 irq_clear_status_flags(virq, IRQ_LEVEL);
1149 #ifdef CONFIG_SMP
1150 /* IPIs are special and come up with HW number 0 */
1151 if (hw == 0) {
1153 * IPIs are marked per-cpu. We use separate HW interrupts under
1154 * the hood but associated with the same "linux" interrupt
1156 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1157 handle_percpu_irq);
1158 return 0;
1160 #endif
1162 rc = xive_irq_alloc_data(virq, hw);
1163 if (rc)
1164 return rc;
1166 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1168 return 0;
1171 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1173 struct irq_data *data = irq_get_irq_data(virq);
1174 unsigned int hw_irq;
1176 /* XXX Assign BAD number */
1177 if (!data)
1178 return;
1179 hw_irq = (unsigned int)irqd_to_hwirq(data);
1180 if (hw_irq)
1181 xive_irq_free_data(virq);
1184 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1185 const u32 *intspec, unsigned int intsize,
1186 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1189 *out_hwirq = intspec[0];
1192 * If intsize is at least 2, we look for the type in the second cell,
1193 * we assume the LSB indicates a level interrupt.
1195 if (intsize > 1) {
1196 if (intspec[1] & 1)
1197 *out_flags = IRQ_TYPE_LEVEL_LOW;
1198 else
1199 *out_flags = IRQ_TYPE_EDGE_RISING;
1200 } else
1201 *out_flags = IRQ_TYPE_LEVEL_LOW;
1203 return 0;
1206 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1207 enum irq_domain_bus_token bus_token)
1209 return xive_ops->match(node);
1212 static const struct irq_domain_ops xive_irq_domain_ops = {
1213 .match = xive_irq_domain_match,
1214 .map = xive_irq_domain_map,
1215 .unmap = xive_irq_domain_unmap,
1216 .xlate = xive_irq_domain_xlate,
1219 static void __init xive_init_host(void)
1221 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1222 &xive_irq_domain_ops, NULL);
1223 if (WARN_ON(xive_irq_domain == NULL))
1224 return;
1225 irq_set_default_host(xive_irq_domain);
1228 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1230 if (xc->queue[xive_irq_priority].qpage)
1231 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1234 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1236 int rc = 0;
1238 /* We setup 1 queues for now with a 64k page */
1239 if (!xc->queue[xive_irq_priority].qpage)
1240 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1242 return rc;
1245 static int xive_prepare_cpu(unsigned int cpu)
1247 struct xive_cpu *xc;
1249 xc = per_cpu(xive_cpu, cpu);
1250 if (!xc) {
1251 struct device_node *np;
1253 xc = kzalloc_node(sizeof(struct xive_cpu),
1254 GFP_KERNEL, cpu_to_node(cpu));
1255 if (!xc)
1256 return -ENOMEM;
1257 np = of_get_cpu_node(cpu, NULL);
1258 if (np)
1259 xc->chip_id = of_get_ibm_chip_id(np);
1260 of_node_put(np);
1262 per_cpu(xive_cpu, cpu) = xc;
1265 /* Setup EQs if not already */
1266 return xive_setup_cpu_queues(cpu, xc);
1269 static void xive_setup_cpu(void)
1271 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1273 /* The backend might have additional things to do */
1274 if (xive_ops->setup_cpu)
1275 xive_ops->setup_cpu(smp_processor_id(), xc);
1277 /* Set CPPR to 0xff to enable flow of interrupts */
1278 xc->cppr = 0xff;
1279 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1282 #ifdef CONFIG_SMP
1283 void xive_smp_setup_cpu(void)
1285 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1287 /* This will have already been done on the boot CPU */
1288 if (smp_processor_id() != boot_cpuid)
1289 xive_setup_cpu();
1293 int xive_smp_prepare_cpu(unsigned int cpu)
1295 int rc;
1297 /* Allocate per-CPU data and queues */
1298 rc = xive_prepare_cpu(cpu);
1299 if (rc)
1300 return rc;
1302 /* Allocate and setup IPI for the new CPU */
1303 return xive_setup_cpu_ipi(cpu);
1306 #ifdef CONFIG_HOTPLUG_CPU
1307 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1309 u32 irq;
1311 /* We assume local irqs are disabled */
1312 WARN_ON(!irqs_disabled());
1314 /* Check what's already in the CPU queue */
1315 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1317 * We need to re-route that interrupt to its new destination.
1318 * First get and lock the descriptor
1320 struct irq_desc *desc = irq_to_desc(irq);
1321 struct irq_data *d = irq_desc_get_irq_data(desc);
1322 struct xive_irq_data *xd;
1323 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1326 * Ignore anything that isn't a XIVE irq and ignore
1327 * IPIs, so can just be dropped.
1329 if (d->domain != xive_irq_domain || hw_irq == 0)
1330 continue;
1333 * The IRQ should have already been re-routed, it's just a
1334 * stale in the old queue, so re-trigger it in order to make
1335 * it reach is new destination.
1337 #ifdef DEBUG_FLUSH
1338 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1339 cpu, irq);
1340 #endif
1341 raw_spin_lock(&desc->lock);
1342 xd = irq_desc_get_handler_data(desc);
1345 * For LSIs, we EOI, this will cause a resend if it's
1346 * still asserted. Otherwise do an MSI retrigger.
1348 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1349 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1350 else
1351 xive_irq_retrigger(d);
1353 raw_spin_unlock(&desc->lock);
1357 void xive_smp_disable_cpu(void)
1359 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1360 unsigned int cpu = smp_processor_id();
1362 /* Migrate interrupts away from the CPU */
1363 irq_migrate_all_off_this_cpu();
1365 /* Set CPPR to 0 to disable flow of interrupts */
1366 xc->cppr = 0;
1367 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1369 /* Flush everything still in the queue */
1370 xive_flush_cpu_queue(cpu, xc);
1372 /* Re-enable CPPR */
1373 xc->cppr = 0xff;
1374 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1377 void xive_flush_interrupt(void)
1379 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1380 unsigned int cpu = smp_processor_id();
1382 /* Called if an interrupt occurs while the CPU is hot unplugged */
1383 xive_flush_cpu_queue(cpu, xc);
1386 #endif /* CONFIG_HOTPLUG_CPU */
1388 #endif /* CONFIG_SMP */
1390 void xive_teardown_cpu(void)
1392 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1393 unsigned int cpu = smp_processor_id();
1395 /* Set CPPR to 0 to disable flow of interrupts */
1396 xc->cppr = 0;
1397 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1399 if (xive_ops->teardown_cpu)
1400 xive_ops->teardown_cpu(cpu, xc);
1402 #ifdef CONFIG_SMP
1403 /* Get rid of IPI */
1404 xive_cleanup_cpu_ipi(cpu, xc);
1405 #endif
1407 /* Disable and free the queues */
1408 xive_cleanup_cpu_queues(cpu, xc);
1411 void xive_kexec_teardown_cpu(int secondary)
1413 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1414 unsigned int cpu = smp_processor_id();
1416 /* Set CPPR to 0 to disable flow of interrupts */
1417 xc->cppr = 0;
1418 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1420 /* Backend cleanup if any */
1421 if (xive_ops->teardown_cpu)
1422 xive_ops->teardown_cpu(cpu, xc);
1424 #ifdef CONFIG_SMP
1425 /* Get rid of IPI */
1426 xive_cleanup_cpu_ipi(cpu, xc);
1427 #endif
1429 /* Disable and free the queues */
1430 xive_cleanup_cpu_queues(cpu, xc);
1433 void xive_shutdown(void)
1435 xive_ops->shutdown();
1438 bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1439 u8 max_prio)
1441 xive_tima = area;
1442 xive_tima_offset = offset;
1443 xive_ops = ops;
1444 xive_irq_priority = max_prio;
1446 ppc_md.get_irq = xive_get_irq;
1447 __xive_enabled = true;
1449 pr_devel("Initializing host..\n");
1450 xive_init_host();
1452 pr_devel("Initializing boot CPU..\n");
1454 /* Allocate per-CPU data and queues */
1455 xive_prepare_cpu(smp_processor_id());
1457 /* Get ready for interrupts */
1458 xive_setup_cpu();
1460 pr_info("Interrupt handling initialized with %s backend\n",
1461 xive_ops->name);
1462 pr_info("Using priority %d for all interrupts\n", max_prio);
1464 return true;
1467 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1469 unsigned int alloc_order;
1470 struct page *pages;
1471 __be32 *qpage;
1473 alloc_order = xive_alloc_order(queue_shift);
1474 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1475 if (!pages)
1476 return ERR_PTR(-ENOMEM);
1477 qpage = (__be32 *)page_address(pages);
1478 memset(qpage, 0, 1 << queue_shift);
1480 return qpage;
1483 static int __init xive_off(char *arg)
1485 xive_cmdline_disabled = true;
1486 return 0;
1488 __setup("xive=off", xive_off);