2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #define pr_fmt(fmt) "xive: " fmt
12 #include <linux/types.h>
13 #include <linux/irq.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/cpumask.h>
22 #include <linux/delay.h>
28 #include <asm/errno.h>
30 #include <asm/xive-regs.h>
31 #include <asm/hvcall.h>
33 #include "xive-internal.h"
35 static u32 xive_queue_shift
;
37 struct xive_irq_bitmap
{
38 unsigned long *bitmap
;
42 struct list_head list
;
45 static LIST_HEAD(xive_irq_bitmaps
);
47 static int xive_irq_bitmap_add(int base
, int count
)
49 struct xive_irq_bitmap
*xibm
;
51 xibm
= kzalloc(sizeof(*xibm
), GFP_ATOMIC
);
55 spin_lock_init(&xibm
->lock
);
58 xibm
->bitmap
= kzalloc(xibm
->count
, GFP_KERNEL
);
59 list_add(&xibm
->list
, &xive_irq_bitmaps
);
61 pr_info("Using IRQ range [%x-%x]", xibm
->base
,
62 xibm
->base
+ xibm
->count
- 1);
66 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap
*xibm
)
70 irq
= find_first_zero_bit(xibm
->bitmap
, xibm
->count
);
71 if (irq
!= xibm
->count
) {
72 set_bit(irq
, xibm
->bitmap
);
81 static int xive_irq_bitmap_alloc(void)
83 struct xive_irq_bitmap
*xibm
;
87 list_for_each_entry(xibm
, &xive_irq_bitmaps
, list
) {
88 spin_lock_irqsave(&xibm
->lock
, flags
);
89 irq
= __xive_irq_bitmap_alloc(xibm
);
90 spin_unlock_irqrestore(&xibm
->lock
, flags
);
97 static void xive_irq_bitmap_free(int irq
)
100 struct xive_irq_bitmap
*xibm
;
102 list_for_each_entry(xibm
, &xive_irq_bitmaps
, list
) {
103 if ((irq
>= xibm
->base
) && (irq
< xibm
->base
+ xibm
->count
)) {
104 spin_lock_irqsave(&xibm
->lock
, flags
);
105 clear_bit(irq
- xibm
->base
, xibm
->bitmap
);
106 spin_unlock_irqrestore(&xibm
->lock
, flags
);
113 /* Based on the similar routines in RTAS */
114 static unsigned int plpar_busy_delay_time(long rc
)
118 if (H_IS_LONG_BUSY(rc
)) {
119 ms
= get_longbusy_msecs(rc
);
120 } else if (rc
== H_BUSY
) {
121 ms
= 10; /* seems appropriate for XIVE hcalls */
127 static unsigned int plpar_busy_delay(int rc
)
131 ms
= plpar_busy_delay_time(rc
);
139 * Note: this call has a partition wide scope and can take a while to
140 * complete. If it returns H_LONG_BUSY_* it should be retried
143 static long plpar_int_reset(unsigned long flags
)
148 rc
= plpar_hcall_norets(H_INT_RESET
, flags
);
149 } while (plpar_busy_delay(rc
));
152 pr_err("H_INT_RESET failed %ld\n", rc
);
157 static long plpar_int_get_source_info(unsigned long flags
,
159 unsigned long *src_flags
,
160 unsigned long *eoi_page
,
161 unsigned long *trig_page
,
162 unsigned long *esb_shift
)
164 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
168 rc
= plpar_hcall(H_INT_GET_SOURCE_INFO
, retbuf
, flags
, lisn
);
169 } while (plpar_busy_delay(rc
));
172 pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn
, rc
);
176 *src_flags
= retbuf
[0];
177 *eoi_page
= retbuf
[1];
178 *trig_page
= retbuf
[2];
179 *esb_shift
= retbuf
[3];
181 pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
182 retbuf
[0], retbuf
[1], retbuf
[2], retbuf
[3]);
187 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
188 #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
190 static long plpar_int_set_source_config(unsigned long flags
,
192 unsigned long target
,
194 unsigned long sw_irq
)
199 pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
200 flags
, lisn
, target
, prio
, sw_irq
);
204 rc
= plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG
, flags
, lisn
,
205 target
, prio
, sw_irq
);
206 } while (plpar_busy_delay(rc
));
209 pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
210 lisn
, target
, prio
, rc
);
217 static long plpar_int_get_queue_info(unsigned long flags
,
218 unsigned long target
,
219 unsigned long priority
,
220 unsigned long *esn_page
,
221 unsigned long *esn_size
)
223 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
227 rc
= plpar_hcall(H_INT_GET_QUEUE_INFO
, retbuf
, flags
, target
,
229 } while (plpar_busy_delay(rc
));
232 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
233 target
, priority
, rc
);
237 *esn_page
= retbuf
[0];
238 *esn_size
= retbuf
[1];
240 pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
241 retbuf
[0], retbuf
[1]);
246 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
248 static long plpar_int_set_queue_config(unsigned long flags
,
249 unsigned long target
,
250 unsigned long priority
,
256 pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
257 flags
, target
, priority
, qpage
, qsize
);
260 rc
= plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG
, flags
, target
,
261 priority
, qpage
, qsize
);
262 } while (plpar_busy_delay(rc
));
265 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
266 target
, priority
, qpage
, rc
);
273 static long plpar_int_sync(unsigned long flags
, unsigned long lisn
)
278 rc
= plpar_hcall_norets(H_INT_SYNC
, flags
, lisn
);
279 } while (plpar_busy_delay(rc
));
282 pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn
, rc
);
289 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
291 static long plpar_int_esb(unsigned long flags
,
293 unsigned long offset
,
294 unsigned long in_data
,
295 unsigned long *out_data
)
297 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
300 pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
301 flags
, lisn
, offset
, in_data
);
304 rc
= plpar_hcall(H_INT_ESB
, retbuf
, flags
, lisn
, offset
,
306 } while (plpar_busy_delay(rc
));
309 pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
314 *out_data
= retbuf
[0];
319 static u64
xive_spapr_esb_rw(u32 lisn
, u32 offset
, u64 data
, bool write
)
321 unsigned long read_data
;
324 rc
= plpar_int_esb(write
? XIVE_ESB_FLAG_STORE
: 0,
325 lisn
, offset
, data
, &read_data
);
329 return write
? 0 : read_data
;
332 #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
333 #define XIVE_SRC_LSI (1ull << (63 - 61))
334 #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
335 #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
337 static int xive_spapr_populate_irq_data(u32 hw_irq
, struct xive_irq_data
*data
)
341 unsigned long eoi_page
;
342 unsigned long trig_page
;
343 unsigned long esb_shift
;
345 memset(data
, 0, sizeof(*data
));
347 rc
= plpar_int_get_source_info(0, hw_irq
, &flags
, &eoi_page
, &trig_page
,
352 if (flags
& XIVE_SRC_H_INT_ESB
)
353 data
->flags
|= XIVE_IRQ_FLAG_H_INT_ESB
;
354 if (flags
& XIVE_SRC_STORE_EOI
)
355 data
->flags
|= XIVE_IRQ_FLAG_STORE_EOI
;
356 if (flags
& XIVE_SRC_LSI
)
357 data
->flags
|= XIVE_IRQ_FLAG_LSI
;
358 data
->eoi_page
= eoi_page
;
359 data
->esb_shift
= esb_shift
;
360 data
->trig_page
= trig_page
;
363 * No chip-id for the sPAPR backend. This has an impact how we
364 * pick a target. See xive_pick_irq_target().
366 data
->src_chip
= XIVE_INVALID_CHIP_ID
;
368 data
->eoi_mmio
= ioremap(data
->eoi_page
, 1u << data
->esb_shift
);
369 if (!data
->eoi_mmio
) {
370 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq
);
374 data
->hw_irq
= hw_irq
;
376 /* Full function page supports trigger */
377 if (flags
& XIVE_SRC_TRIGGER
) {
378 data
->trig_mmio
= data
->eoi_mmio
;
382 data
->trig_mmio
= ioremap(data
->trig_page
, 1u << data
->esb_shift
);
383 if (!data
->trig_mmio
) {
384 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq
);
390 static int xive_spapr_configure_irq(u32 hw_irq
, u32 target
, u8 prio
, u32 sw_irq
)
394 rc
= plpar_int_set_source_config(XIVE_SRC_SET_EISN
, hw_irq
, target
,
397 return rc
== 0 ? 0 : -ENXIO
;
400 /* This can be called multiple time to change a queue configuration */
401 static int xive_spapr_configure_queue(u32 target
, struct xive_q
*q
, u8 prio
,
402 __be32
*qpage
, u32 order
)
405 unsigned long esn_page
;
406 unsigned long esn_size
;
407 u64 flags
, qpage_phys
;
409 /* If there's an actual queue page, clean it */
413 qpage_phys
= __pa(qpage
);
418 /* Initialize the rest of the fields */
419 q
->msk
= order
? ((1u << (order
- 2)) - 1) : 0;
423 rc
= plpar_int_get_queue_info(0, target
, prio
, &esn_page
, &esn_size
);
425 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc
,
431 /* TODO: add support for the notification page */
432 q
->eoi_phys
= esn_page
;
434 /* Default is to always notify */
435 flags
= XIVE_EQ_ALWAYS_NOTIFY
;
437 /* Configure and enable the queue in HW */
438 rc
= plpar_int_set_queue_config(flags
, target
, prio
, qpage_phys
, order
);
440 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc
,
450 static int xive_spapr_setup_queue(unsigned int cpu
, struct xive_cpu
*xc
,
453 struct xive_q
*q
= &xc
->queue
[prio
];
456 qpage
= xive_queue_page_alloc(cpu
, xive_queue_shift
);
458 return PTR_ERR(qpage
);
460 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu
),
461 q
, prio
, qpage
, xive_queue_shift
);
464 static void xive_spapr_cleanup_queue(unsigned int cpu
, struct xive_cpu
*xc
,
467 struct xive_q
*q
= &xc
->queue
[prio
];
468 unsigned int alloc_order
;
470 int hw_cpu
= get_hard_smp_processor_id(cpu
);
472 rc
= plpar_int_set_queue_config(0, hw_cpu
, prio
, 0, 0);
474 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc
,
477 alloc_order
= xive_alloc_order(xive_queue_shift
);
478 free_pages((unsigned long)q
->qpage
, alloc_order
);
482 static bool xive_spapr_match(struct device_node
*node
)
484 /* Ignore cascaded controllers for the moment */
489 static int xive_spapr_get_ipi(unsigned int cpu
, struct xive_cpu
*xc
)
491 int irq
= xive_irq_bitmap_alloc();
494 pr_err("Failed to allocate IPI on CPU %d\n", cpu
);
502 static void xive_spapr_put_ipi(unsigned int cpu
, struct xive_cpu
*xc
)
507 xive_irq_bitmap_free(xc
->hw_ipi
);
510 #endif /* CONFIG_SMP */
512 static void xive_spapr_shutdown(void)
518 * Perform an "ack" cycle on the current thread. Grab the pending
519 * active priorities and update the CPPR to the most favored one.
521 static void xive_spapr_update_pending(struct xive_cpu
*xc
)
527 * Perform the "Acknowledge O/S to Register" cycle.
529 * Let's speedup the access to the TIMA using the raw I/O
530 * accessor as we don't need the synchronisation routine of
531 * the higher level ones
533 ack
= be16_to_cpu(__raw_readw(xive_tima
+ TM_SPC_ACK_OS_REG
));
535 /* Synchronize subsequent queue accesses */
539 * Grab the CPPR and the "NSR" field which indicates the source
540 * of the interrupt (if any)
545 if (nsr
& TM_QW1_NSR_EO
) {
548 /* Mark the priority pending */
549 xc
->pending_prio
|= 1 << cppr
;
552 * A new interrupt should never have a CPPR less favored
553 * than our current one.
555 if (cppr
>= xc
->cppr
)
556 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
557 smp_processor_id(), cppr
, xc
->cppr
);
559 /* Update our idea of what the CPPR is */
564 static void xive_spapr_eoi(u32 hw_irq
)
569 static void xive_spapr_setup_cpu(unsigned int cpu
, struct xive_cpu
*xc
)
571 /* Only some debug on the TIMA settings */
572 pr_debug("(HW value: %08x %08x %08x)\n",
573 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD0
),
574 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD1
),
575 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD2
));
578 static void xive_spapr_teardown_cpu(unsigned int cpu
, struct xive_cpu
*xc
)
583 static void xive_spapr_sync_source(u32 hw_irq
)
585 /* Specs are unclear on what this is doing */
586 plpar_int_sync(0, hw_irq
);
589 static const struct xive_ops xive_spapr_ops
= {
590 .populate_irq_data
= xive_spapr_populate_irq_data
,
591 .configure_irq
= xive_spapr_configure_irq
,
592 .setup_queue
= xive_spapr_setup_queue
,
593 .cleanup_queue
= xive_spapr_cleanup_queue
,
594 .match
= xive_spapr_match
,
595 .shutdown
= xive_spapr_shutdown
,
596 .update_pending
= xive_spapr_update_pending
,
597 .eoi
= xive_spapr_eoi
,
598 .setup_cpu
= xive_spapr_setup_cpu
,
599 .teardown_cpu
= xive_spapr_teardown_cpu
,
600 .sync_source
= xive_spapr_sync_source
,
601 .esb_rw
= xive_spapr_esb_rw
,
603 .get_ipi
= xive_spapr_get_ipi
,
604 .put_ipi
= xive_spapr_put_ipi
,
605 #endif /* CONFIG_SMP */
610 * get max priority from "/ibm,plat-res-int-priorities"
612 static bool xive_get_max_prio(u8
*max_prio
)
614 struct device_node
*rootdn
;
619 rootdn
= of_find_node_by_path("/");
621 pr_err("not root node found !\n");
625 reg
= of_get_property(rootdn
, "ibm,plat-res-int-priorities", &len
);
627 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
631 if (len
% (2 * sizeof(u32
)) != 0) {
632 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
636 /* HW supports priorities in the range [0-7] and 0xFF is a
637 * wildcard priority used to mask. We scan the ranges reserved
638 * by the hypervisor to find the lowest priority we can use.
641 for (prio
= 0; prio
< 8; prio
++) {
645 for (i
= 0; i
< len
/ (2 * sizeof(u32
)); i
++) {
646 int base
= be32_to_cpu(reg
[2 * i
]);
647 int range
= be32_to_cpu(reg
[2 * i
+ 1]);
649 if (prio
>= base
&& prio
< base
+ range
)
658 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
666 bool __init
xive_spapr_init(void)
668 struct device_node
*np
;
671 struct property
*prop
;
678 if (xive_cmdline_disabled
)
681 pr_devel("%s()\n", __func__
);
682 np
= of_find_compatible_node(NULL
, NULL
, "ibm,power-ivpe");
684 pr_devel("not found !\n");
687 pr_devel("Found %s\n", np
->full_name
);
689 /* Resource 1 is the OS ring TIMA */
690 if (of_address_to_resource(np
, 1, &r
)) {
691 pr_err("Failed to get thread mgmnt area resource\n");
694 tima
= ioremap(r
.start
, resource_size(&r
));
696 pr_err("Failed to map thread mgmnt area\n");
700 if (!xive_get_max_prio(&max_prio
))
703 /* Feed the IRQ number allocator with the ranges given in the DT */
704 reg
= of_get_property(np
, "ibm,xive-lisn-ranges", &len
);
706 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
710 if (len
% (2 * sizeof(u32
)) != 0) {
711 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
715 for (i
= 0; i
< len
/ (2 * sizeof(u32
)); i
++, reg
+= 2)
716 xive_irq_bitmap_add(be32_to_cpu(reg
[0]),
717 be32_to_cpu(reg
[1]));
719 /* Iterate the EQ sizes and pick one */
720 of_property_for_each_u32(np
, "ibm,xive-eq-sizes", prop
, reg
, val
) {
721 xive_queue_shift
= val
;
722 if (val
== PAGE_SHIFT
)
726 /* Initialize XIVE core with our backend */
727 if (!xive_core_init(&xive_spapr_ops
, tima
, TM_QW1_OS
, max_prio
))
730 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift
- 10));