2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #define pr_fmt(fmt) "xive: " fmt
12 #include <linux/types.h>
13 #include <linux/irq.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/cpumask.h>
27 #include <asm/errno.h>
29 #include <asm/xive-regs.h>
30 #include <asm/hvcall.h>
32 #include "xive-internal.h"
34 static u32 xive_queue_shift
;
36 struct xive_irq_bitmap
{
37 unsigned long *bitmap
;
41 struct list_head list
;
44 static LIST_HEAD(xive_irq_bitmaps
);
46 static int xive_irq_bitmap_add(int base
, int count
)
48 struct xive_irq_bitmap
*xibm
;
50 xibm
= kzalloc(sizeof(*xibm
), GFP_ATOMIC
);
54 spin_lock_init(&xibm
->lock
);
57 xibm
->bitmap
= kzalloc(xibm
->count
, GFP_KERNEL
);
58 list_add(&xibm
->list
, &xive_irq_bitmaps
);
60 pr_info("Using IRQ range [%x-%x]", xibm
->base
,
61 xibm
->base
+ xibm
->count
- 1);
65 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap
*xibm
)
69 irq
= find_first_zero_bit(xibm
->bitmap
, xibm
->count
);
70 if (irq
!= xibm
->count
) {
71 set_bit(irq
, xibm
->bitmap
);
80 static int xive_irq_bitmap_alloc(void)
82 struct xive_irq_bitmap
*xibm
;
86 list_for_each_entry(xibm
, &xive_irq_bitmaps
, list
) {
87 spin_lock_irqsave(&xibm
->lock
, flags
);
88 irq
= __xive_irq_bitmap_alloc(xibm
);
89 spin_unlock_irqrestore(&xibm
->lock
, flags
);
96 static void xive_irq_bitmap_free(int irq
)
99 struct xive_irq_bitmap
*xibm
;
101 list_for_each_entry(xibm
, &xive_irq_bitmaps
, list
) {
102 if ((irq
>= xibm
->base
) && (irq
< xibm
->base
+ xibm
->count
)) {
103 spin_lock_irqsave(&xibm
->lock
, flags
);
104 clear_bit(irq
- xibm
->base
, xibm
->bitmap
);
105 spin_unlock_irqrestore(&xibm
->lock
, flags
);
111 static long plpar_int_get_source_info(unsigned long flags
,
113 unsigned long *src_flags
,
114 unsigned long *eoi_page
,
115 unsigned long *trig_page
,
116 unsigned long *esb_shift
)
118 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
121 rc
= plpar_hcall(H_INT_GET_SOURCE_INFO
, retbuf
, flags
, lisn
);
123 pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn
, rc
);
127 *src_flags
= retbuf
[0];
128 *eoi_page
= retbuf
[1];
129 *trig_page
= retbuf
[2];
130 *esb_shift
= retbuf
[3];
132 pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
133 retbuf
[0], retbuf
[1], retbuf
[2], retbuf
[3]);
138 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
139 #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
141 static long plpar_int_set_source_config(unsigned long flags
,
143 unsigned long target
,
145 unsigned long sw_irq
)
150 pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
151 flags
, lisn
, target
, prio
, sw_irq
);
154 rc
= plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG
, flags
, lisn
,
155 target
, prio
, sw_irq
);
157 pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
158 lisn
, target
, prio
, rc
);
165 static long plpar_int_get_queue_info(unsigned long flags
,
166 unsigned long target
,
167 unsigned long priority
,
168 unsigned long *esn_page
,
169 unsigned long *esn_size
)
171 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
174 rc
= plpar_hcall(H_INT_GET_QUEUE_INFO
, retbuf
, flags
, target
, priority
);
176 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
177 target
, priority
, rc
);
181 *esn_page
= retbuf
[0];
182 *esn_size
= retbuf
[1];
184 pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
185 retbuf
[0], retbuf
[1]);
190 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
192 static long plpar_int_set_queue_config(unsigned long flags
,
193 unsigned long target
,
194 unsigned long priority
,
200 pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
201 flags
, target
, priority
, qpage
, qsize
);
203 rc
= plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG
, flags
, target
,
204 priority
, qpage
, qsize
);
206 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
207 target
, priority
, qpage
, rc
);
214 static long plpar_int_sync(unsigned long flags
, unsigned long lisn
)
218 rc
= plpar_hcall_norets(H_INT_SYNC
, flags
, lisn
);
220 pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn
, rc
);
227 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
229 static long plpar_int_esb(unsigned long flags
,
231 unsigned long offset
,
232 unsigned long in_data
,
233 unsigned long *out_data
)
235 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
238 pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
239 flags
, lisn
, offset
, in_data
);
241 rc
= plpar_hcall(H_INT_ESB
, retbuf
, flags
, lisn
, offset
, in_data
);
243 pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
248 *out_data
= retbuf
[0];
253 static u64
xive_spapr_esb_rw(u32 lisn
, u32 offset
, u64 data
, bool write
)
255 unsigned long read_data
;
258 rc
= plpar_int_esb(write
? XIVE_ESB_FLAG_STORE
: 0,
259 lisn
, offset
, data
, &read_data
);
263 return write
? 0 : read_data
;
266 #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
267 #define XIVE_SRC_LSI (1ull << (63 - 61))
268 #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
269 #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
271 static int xive_spapr_populate_irq_data(u32 hw_irq
, struct xive_irq_data
*data
)
275 unsigned long eoi_page
;
276 unsigned long trig_page
;
277 unsigned long esb_shift
;
279 memset(data
, 0, sizeof(*data
));
281 rc
= plpar_int_get_source_info(0, hw_irq
, &flags
, &eoi_page
, &trig_page
,
286 if (flags
& XIVE_SRC_H_INT_ESB
)
287 data
->flags
|= XIVE_IRQ_FLAG_H_INT_ESB
;
288 if (flags
& XIVE_SRC_STORE_EOI
)
289 data
->flags
|= XIVE_IRQ_FLAG_STORE_EOI
;
290 if (flags
& XIVE_SRC_LSI
)
291 data
->flags
|= XIVE_IRQ_FLAG_LSI
;
292 data
->eoi_page
= eoi_page
;
293 data
->esb_shift
= esb_shift
;
294 data
->trig_page
= trig_page
;
297 * No chip-id for the sPAPR backend. This has an impact how we
298 * pick a target. See xive_pick_irq_target().
300 data
->src_chip
= XIVE_INVALID_CHIP_ID
;
302 data
->eoi_mmio
= ioremap(data
->eoi_page
, 1u << data
->esb_shift
);
303 if (!data
->eoi_mmio
) {
304 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq
);
308 data
->hw_irq
= hw_irq
;
310 /* Full function page supports trigger */
311 if (flags
& XIVE_SRC_TRIGGER
) {
312 data
->trig_mmio
= data
->eoi_mmio
;
316 data
->trig_mmio
= ioremap(data
->trig_page
, 1u << data
->esb_shift
);
317 if (!data
->trig_mmio
) {
318 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq
);
324 static int xive_spapr_configure_irq(u32 hw_irq
, u32 target
, u8 prio
, u32 sw_irq
)
328 rc
= plpar_int_set_source_config(XIVE_SRC_SET_EISN
, hw_irq
, target
,
331 return rc
== 0 ? 0 : -ENXIO
;
334 /* This can be called multiple time to change a queue configuration */
335 static int xive_spapr_configure_queue(u32 target
, struct xive_q
*q
, u8 prio
,
336 __be32
*qpage
, u32 order
)
339 unsigned long esn_page
;
340 unsigned long esn_size
;
341 u64 flags
, qpage_phys
;
343 /* If there's an actual queue page, clean it */
347 qpage_phys
= __pa(qpage
);
352 /* Initialize the rest of the fields */
353 q
->msk
= order
? ((1u << (order
- 2)) - 1) : 0;
357 rc
= plpar_int_get_queue_info(0, target
, prio
, &esn_page
, &esn_size
);
359 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc
,
365 /* TODO: add support for the notification page */
366 q
->eoi_phys
= esn_page
;
368 /* Default is to always notify */
369 flags
= XIVE_EQ_ALWAYS_NOTIFY
;
371 /* Configure and enable the queue in HW */
372 rc
= plpar_int_set_queue_config(flags
, target
, prio
, qpage_phys
, order
);
374 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc
,
384 static int xive_spapr_setup_queue(unsigned int cpu
, struct xive_cpu
*xc
,
387 struct xive_q
*q
= &xc
->queue
[prio
];
390 qpage
= xive_queue_page_alloc(cpu
, xive_queue_shift
);
392 return PTR_ERR(qpage
);
394 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu
),
395 q
, prio
, qpage
, xive_queue_shift
);
398 static void xive_spapr_cleanup_queue(unsigned int cpu
, struct xive_cpu
*xc
,
401 struct xive_q
*q
= &xc
->queue
[prio
];
402 unsigned int alloc_order
;
404 int hw_cpu
= get_hard_smp_processor_id(cpu
);
406 rc
= plpar_int_set_queue_config(0, hw_cpu
, prio
, 0, 0);
408 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc
,
411 alloc_order
= xive_alloc_order(xive_queue_shift
);
412 free_pages((unsigned long)q
->qpage
, alloc_order
);
416 static bool xive_spapr_match(struct device_node
*node
)
418 /* Ignore cascaded controllers for the moment */
423 static int xive_spapr_get_ipi(unsigned int cpu
, struct xive_cpu
*xc
)
425 int irq
= xive_irq_bitmap_alloc();
428 pr_err("Failed to allocate IPI on CPU %d\n", cpu
);
436 static void xive_spapr_put_ipi(unsigned int cpu
, struct xive_cpu
*xc
)
441 xive_irq_bitmap_free(xc
->hw_ipi
);
444 #endif /* CONFIG_SMP */
446 static void xive_spapr_shutdown(void)
450 rc
= plpar_hcall_norets(H_INT_RESET
, 0);
452 pr_err("H_INT_RESET failed %ld\n", rc
);
456 * Perform an "ack" cycle on the current thread. Grab the pending
457 * active priorities and update the CPPR to the most favored one.
459 static void xive_spapr_update_pending(struct xive_cpu
*xc
)
465 * Perform the "Acknowledge O/S to Register" cycle.
467 * Let's speedup the access to the TIMA using the raw I/O
468 * accessor as we don't need the synchronisation routine of
469 * the higher level ones
471 ack
= be16_to_cpu(__raw_readw(xive_tima
+ TM_SPC_ACK_OS_REG
));
473 /* Synchronize subsequent queue accesses */
477 * Grab the CPPR and the "NSR" field which indicates the source
478 * of the interrupt (if any)
483 if (nsr
& TM_QW1_NSR_EO
) {
486 /* Mark the priority pending */
487 xc
->pending_prio
|= 1 << cppr
;
490 * A new interrupt should never have a CPPR less favored
491 * than our current one.
493 if (cppr
>= xc
->cppr
)
494 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
495 smp_processor_id(), cppr
, xc
->cppr
);
497 /* Update our idea of what the CPPR is */
502 static void xive_spapr_eoi(u32 hw_irq
)
507 static void xive_spapr_setup_cpu(unsigned int cpu
, struct xive_cpu
*xc
)
509 /* Only some debug on the TIMA settings */
510 pr_debug("(HW value: %08x %08x %08x)\n",
511 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD0
),
512 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD1
),
513 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD2
));
516 static void xive_spapr_teardown_cpu(unsigned int cpu
, struct xive_cpu
*xc
)
521 static void xive_spapr_sync_source(u32 hw_irq
)
523 /* Specs are unclear on what this is doing */
524 plpar_int_sync(0, hw_irq
);
527 static const struct xive_ops xive_spapr_ops
= {
528 .populate_irq_data
= xive_spapr_populate_irq_data
,
529 .configure_irq
= xive_spapr_configure_irq
,
530 .setup_queue
= xive_spapr_setup_queue
,
531 .cleanup_queue
= xive_spapr_cleanup_queue
,
532 .match
= xive_spapr_match
,
533 .shutdown
= xive_spapr_shutdown
,
534 .update_pending
= xive_spapr_update_pending
,
535 .eoi
= xive_spapr_eoi
,
536 .setup_cpu
= xive_spapr_setup_cpu
,
537 .teardown_cpu
= xive_spapr_teardown_cpu
,
538 .sync_source
= xive_spapr_sync_source
,
539 .esb_rw
= xive_spapr_esb_rw
,
541 .get_ipi
= xive_spapr_get_ipi
,
542 .put_ipi
= xive_spapr_put_ipi
,
543 #endif /* CONFIG_SMP */
548 * get max priority from "/ibm,plat-res-int-priorities"
550 static bool xive_get_max_prio(u8
*max_prio
)
552 struct device_node
*rootdn
;
557 rootdn
= of_find_node_by_path("/");
559 pr_err("not root node found !\n");
563 reg
= of_get_property(rootdn
, "ibm,plat-res-int-priorities", &len
);
565 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
569 if (len
% (2 * sizeof(u32
)) != 0) {
570 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
574 /* HW supports priorities in the range [0-7] and 0xFF is a
575 * wildcard priority used to mask. We scan the ranges reserved
576 * by the hypervisor to find the lowest priority we can use.
579 for (prio
= 0; prio
< 8; prio
++) {
583 for (i
= 0; i
< len
/ (2 * sizeof(u32
)); i
++) {
584 int base
= be32_to_cpu(reg
[2 * i
]);
585 int range
= be32_to_cpu(reg
[2 * i
+ 1]);
587 if (prio
>= base
&& prio
< base
+ range
)
596 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
604 bool __init
xive_spapr_init(void)
606 struct device_node
*np
;
609 struct property
*prop
;
616 if (xive_cmdline_disabled
)
619 pr_devel("%s()\n", __func__
);
620 np
= of_find_compatible_node(NULL
, NULL
, "ibm,power-ivpe");
622 pr_devel("not found !\n");
625 pr_devel("Found %s\n", np
->full_name
);
627 /* Resource 1 is the OS ring TIMA */
628 if (of_address_to_resource(np
, 1, &r
)) {
629 pr_err("Failed to get thread mgmnt area resource\n");
632 tima
= ioremap(r
.start
, resource_size(&r
));
634 pr_err("Failed to map thread mgmnt area\n");
638 if (!xive_get_max_prio(&max_prio
))
641 /* Feed the IRQ number allocator with the ranges given in the DT */
642 reg
= of_get_property(np
, "ibm,xive-lisn-ranges", &len
);
644 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
648 if (len
% (2 * sizeof(u32
)) != 0) {
649 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
653 for (i
= 0; i
< len
/ (2 * sizeof(u32
)); i
++, reg
+= 2)
654 xive_irq_bitmap_add(be32_to_cpu(reg
[0]),
655 be32_to_cpu(reg
[1]));
657 /* Iterate the EQ sizes and pick one */
658 of_property_for_each_u32(np
, "ibm,xive-eq-sizes", prop
, reg
, val
) {
659 xive_queue_shift
= val
;
660 if (val
== PAGE_SHIFT
)
664 /* Initialize XIVE core with our backend */
665 if (!xive_core_init(&xive_spapr_ops
, tima
, TM_QW1_OS
, max_prio
))
668 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift
- 10));