1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 SiFive
4 * Copyright (C) 2018 Christoph Hellwig
6 #define pr_fmt(fmt) "riscv-plic: " fmt
7 #include <linux/acpi.h>
9 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqchip.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/syscore_ops.h>
25 * This driver implements a version of the RISC-V PLIC with the actual layout
26 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
28 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
30 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
31 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
35 #define MAX_DEVICES 1024
36 #define MAX_CONTEXTS 15872
39 * Each interrupt source has a priority register associated with it.
40 * We always hardwire it to one in Linux.
42 #define PRIORITY_BASE 0
43 #define PRIORITY_PER_ID 4
46 * Each hart context has a vector of interrupt enable bits associated with it.
47 * There's one bit for each interrupt source.
49 #define CONTEXT_ENABLE_BASE 0x2000
50 #define CONTEXT_ENABLE_SIZE 0x80
53 * Each hart context has a set of control registers associated with it. Right
54 * now there's only two: a source priority threshold over which the hart will
55 * take an interrupt, and a register to claim interrupts.
57 #define CONTEXT_BASE 0x200000
58 #define CONTEXT_SIZE 0x1000
59 #define CONTEXT_THRESHOLD 0x00
60 #define CONTEXT_CLAIM 0x04
62 #define PLIC_DISABLE_THRESHOLD 0x7
63 #define PLIC_ENABLE_THRESHOLD 0
65 #define PLIC_QUIRK_EDGE_INTERRUPT 0
68 struct fwnode_handle
*fwnode
;
70 struct irq_domain
*irqdomain
;
72 unsigned long plic_quirks
;
74 unsigned long *prio_save
;
81 void __iomem
*hart_base
;
83 * Protect mask operations on the registers given that we can't
84 * assume atomic memory operations work on them.
86 raw_spinlock_t enable_lock
;
87 void __iomem
*enable_base
;
89 struct plic_priv
*priv
;
91 static int plic_parent_irq __ro_after_init
;
92 static bool plic_global_setup_done __ro_after_init
;
93 static DEFINE_PER_CPU(struct plic_handler
, plic_handlers
);
95 static int plic_irq_set_type(struct irq_data
*d
, unsigned int type
);
97 static void __plic_toggle(void __iomem
*enable_base
, int hwirq
, int enable
)
99 u32 __iomem
*reg
= enable_base
+ (hwirq
/ 32) * sizeof(u32
);
100 u32 hwirq_mask
= 1 << (hwirq
% 32);
103 writel(readl(reg
) | hwirq_mask
, reg
);
105 writel(readl(reg
) & ~hwirq_mask
, reg
);
108 static void plic_toggle(struct plic_handler
*handler
, int hwirq
, int enable
)
112 raw_spin_lock_irqsave(&handler
->enable_lock
, flags
);
113 __plic_toggle(handler
->enable_base
, hwirq
, enable
);
114 raw_spin_unlock_irqrestore(&handler
->enable_lock
, flags
);
117 static inline void plic_irq_toggle(const struct cpumask
*mask
,
118 struct irq_data
*d
, int enable
)
122 for_each_cpu(cpu
, mask
) {
123 struct plic_handler
*handler
= per_cpu_ptr(&plic_handlers
, cpu
);
125 plic_toggle(handler
, d
->hwirq
, enable
);
129 static void plic_irq_unmask(struct irq_data
*d
)
131 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
133 writel(1, priv
->regs
+ PRIORITY_BASE
+ d
->hwirq
* PRIORITY_PER_ID
);
136 static void plic_irq_mask(struct irq_data
*d
)
138 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
140 writel(0, priv
->regs
+ PRIORITY_BASE
+ d
->hwirq
* PRIORITY_PER_ID
);
143 static void plic_irq_enable(struct irq_data
*d
)
145 plic_irq_toggle(irq_data_get_effective_affinity_mask(d
), d
, 1);
149 static void plic_irq_disable(struct irq_data
*d
)
151 plic_irq_toggle(irq_data_get_effective_affinity_mask(d
), d
, 0);
154 static void plic_irq_eoi(struct irq_data
*d
)
156 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
158 if (unlikely(irqd_irq_disabled(d
))) {
159 plic_toggle(handler
, d
->hwirq
, 1);
160 writel(d
->hwirq
, handler
->hart_base
+ CONTEXT_CLAIM
);
161 plic_toggle(handler
, d
->hwirq
, 0);
163 writel(d
->hwirq
, handler
->hart_base
+ CONTEXT_CLAIM
);
168 static int plic_set_affinity(struct irq_data
*d
,
169 const struct cpumask
*mask_val
, bool force
)
172 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
175 cpu
= cpumask_first_and(&priv
->lmask
, mask_val
);
177 cpu
= cpumask_first_and_and(&priv
->lmask
, mask_val
, cpu_online_mask
);
179 if (cpu
>= nr_cpu_ids
)
184 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
186 if (!irqd_irq_disabled(d
))
189 return IRQ_SET_MASK_OK_DONE
;
193 static struct irq_chip plic_edge_chip
= {
194 .name
= "SiFive PLIC",
195 .irq_enable
= plic_irq_enable
,
196 .irq_disable
= plic_irq_disable
,
197 .irq_ack
= plic_irq_eoi
,
198 .irq_mask
= plic_irq_mask
,
199 .irq_unmask
= plic_irq_unmask
,
201 .irq_set_affinity
= plic_set_affinity
,
203 .irq_set_type
= plic_irq_set_type
,
204 .flags
= IRQCHIP_SKIP_SET_WAKE
|
205 IRQCHIP_AFFINITY_PRE_STARTUP
,
208 static struct irq_chip plic_chip
= {
209 .name
= "SiFive PLIC",
210 .irq_enable
= plic_irq_enable
,
211 .irq_disable
= plic_irq_disable
,
212 .irq_mask
= plic_irq_mask
,
213 .irq_unmask
= plic_irq_unmask
,
214 .irq_eoi
= plic_irq_eoi
,
216 .irq_set_affinity
= plic_set_affinity
,
218 .irq_set_type
= plic_irq_set_type
,
219 .flags
= IRQCHIP_SKIP_SET_WAKE
|
220 IRQCHIP_AFFINITY_PRE_STARTUP
,
223 static int plic_irq_set_type(struct irq_data
*d
, unsigned int type
)
225 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
227 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT
, &priv
->plic_quirks
))
228 return IRQ_SET_MASK_OK_NOCOPY
;
231 case IRQ_TYPE_EDGE_RISING
:
232 irq_set_chip_handler_name_locked(d
, &plic_edge_chip
,
233 handle_edge_irq
, NULL
);
235 case IRQ_TYPE_LEVEL_HIGH
:
236 irq_set_chip_handler_name_locked(d
, &plic_chip
,
237 handle_fasteoi_irq
, NULL
);
243 return IRQ_SET_MASK_OK
;
246 static int plic_irq_suspend(void)
251 struct plic_priv
*priv
;
253 priv
= per_cpu_ptr(&plic_handlers
, smp_processor_id())->priv
;
255 for (i
= 0; i
< priv
->nr_irqs
; i
++) {
256 __assign_bit(i
, priv
->prio_save
,
257 readl(priv
->regs
+ PRIORITY_BASE
+ i
* PRIORITY_PER_ID
));
260 for_each_cpu(cpu
, cpu_present_mask
) {
261 struct plic_handler
*handler
= per_cpu_ptr(&plic_handlers
, cpu
);
263 if (!handler
->present
)
266 raw_spin_lock_irqsave(&handler
->enable_lock
, flags
);
267 for (i
= 0; i
< DIV_ROUND_UP(priv
->nr_irqs
, 32); i
++) {
268 reg
= handler
->enable_base
+ i
* sizeof(u32
);
269 handler
->enable_save
[i
] = readl(reg
);
271 raw_spin_unlock_irqrestore(&handler
->enable_lock
, flags
);
277 static void plic_irq_resume(void)
279 unsigned int i
, index
, cpu
;
282 struct plic_priv
*priv
;
284 priv
= per_cpu_ptr(&plic_handlers
, smp_processor_id())->priv
;
286 for (i
= 0; i
< priv
->nr_irqs
; i
++) {
288 writel((priv
->prio_save
[index
] & BIT_MASK(i
)) ? 1 : 0,
289 priv
->regs
+ PRIORITY_BASE
+ i
* PRIORITY_PER_ID
);
292 for_each_cpu(cpu
, cpu_present_mask
) {
293 struct plic_handler
*handler
= per_cpu_ptr(&plic_handlers
, cpu
);
295 if (!handler
->present
)
298 raw_spin_lock_irqsave(&handler
->enable_lock
, flags
);
299 for (i
= 0; i
< DIV_ROUND_UP(priv
->nr_irqs
, 32); i
++) {
300 reg
= handler
->enable_base
+ i
* sizeof(u32
);
301 writel(handler
->enable_save
[i
], reg
);
303 raw_spin_unlock_irqrestore(&handler
->enable_lock
, flags
);
307 static struct syscore_ops plic_irq_syscore_ops
= {
308 .suspend
= plic_irq_suspend
,
309 .resume
= plic_irq_resume
,
312 static int plic_irqdomain_map(struct irq_domain
*d
, unsigned int irq
,
313 irq_hw_number_t hwirq
)
315 struct plic_priv
*priv
= d
->host_data
;
317 irq_domain_set_info(d
, irq
, hwirq
, &plic_chip
, d
->host_data
,
318 handle_fasteoi_irq
, NULL
, NULL
);
319 irq_set_noprobe(irq
);
320 irq_set_affinity(irq
, &priv
->lmask
);
324 static int plic_irq_domain_translate(struct irq_domain
*d
,
325 struct irq_fwspec
*fwspec
,
326 unsigned long *hwirq
,
329 struct plic_priv
*priv
= d
->host_data
;
331 /* For DT, gsi_base is always zero. */
332 if (fwspec
->param
[0] >= priv
->gsi_base
)
333 fwspec
->param
[0] = fwspec
->param
[0] - priv
->gsi_base
;
335 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT
, &priv
->plic_quirks
))
336 return irq_domain_translate_twocell(d
, fwspec
, hwirq
, type
);
338 return irq_domain_translate_onecell(d
, fwspec
, hwirq
, type
);
341 static int plic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
342 unsigned int nr_irqs
, void *arg
)
345 irq_hw_number_t hwirq
;
347 struct irq_fwspec
*fwspec
= arg
;
349 ret
= plic_irq_domain_translate(domain
, fwspec
, &hwirq
, &type
);
353 for (i
= 0; i
< nr_irqs
; i
++) {
354 ret
= plic_irqdomain_map(domain
, virq
+ i
, hwirq
+ i
);
362 static const struct irq_domain_ops plic_irqdomain_ops
= {
363 .translate
= plic_irq_domain_translate
,
364 .alloc
= plic_irq_domain_alloc
,
365 .free
= irq_domain_free_irqs_top
,
369 * Handling an interrupt is a two-step process: first you claim the interrupt
370 * by reading the claim register, then you complete the interrupt by writing
371 * that source ID back to the same claim register. This automatically enables
372 * and disables the interrupt, so there's nothing else to do.
374 static void plic_handle_irq(struct irq_desc
*desc
)
376 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
377 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
378 void __iomem
*claim
= handler
->hart_base
+ CONTEXT_CLAIM
;
379 irq_hw_number_t hwirq
;
381 WARN_ON_ONCE(!handler
->present
);
383 chained_irq_enter(chip
, desc
);
385 while ((hwirq
= readl(claim
))) {
386 int err
= generic_handle_domain_irq(handler
->priv
->irqdomain
,
389 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n",
390 handler
->priv
->fwnode
, hwirq
);
394 chained_irq_exit(chip
, desc
);
397 static void plic_set_threshold(struct plic_handler
*handler
, u32 threshold
)
399 /* priority must be > threshold to trigger an interrupt */
400 writel(threshold
, handler
->hart_base
+ CONTEXT_THRESHOLD
);
403 static int plic_dying_cpu(unsigned int cpu
)
406 disable_percpu_irq(plic_parent_irq
);
411 static int plic_starting_cpu(unsigned int cpu
)
413 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
416 enable_percpu_irq(plic_parent_irq
,
417 irq_get_trigger_type(plic_parent_irq
));
419 pr_warn("%pfwP: cpu%d: parent irq not available\n",
420 handler
->priv
->fwnode
, cpu
);
421 plic_set_threshold(handler
, PLIC_ENABLE_THRESHOLD
);
426 static const struct of_device_id plic_match
[] = {
427 { .compatible
= "sifive,plic-1.0.0" },
428 { .compatible
= "riscv,plic0" },
429 { .compatible
= "andestech,nceplic100",
430 .data
= (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT
) },
431 { .compatible
= "thead,c900-plic",
432 .data
= (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT
) },
438 static const struct acpi_device_id plic_acpi_match
[] = {
442 MODULE_DEVICE_TABLE(acpi
, plic_acpi_match
);
445 static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle
*fwnode
,
446 u32
*nr_irqs
, u32
*nr_contexts
,
447 u32
*gsi_base
, u32
*id
)
451 if (!is_of_node(fwnode
)) {
452 rc
= riscv_acpi_get_gsi_info(fwnode
, gsi_base
, id
, nr_irqs
, NULL
);
454 pr_err("%pfwP: failed to find GSI mapping\n", fwnode
);
458 *nr_contexts
= acpi_rintc_get_plic_nr_contexts(*id
);
459 if (WARN_ON(!*nr_contexts
)) {
460 pr_err("%pfwP: no PLIC context available\n", fwnode
);
467 rc
= of_property_read_u32(to_of_node(fwnode
), "riscv,ndev", nr_irqs
);
469 pr_err("%pfwP: riscv,ndev property not available\n", fwnode
);
473 *nr_contexts
= of_irq_count(to_of_node(fwnode
));
474 if (WARN_ON(!(*nr_contexts
))) {
475 pr_err("%pfwP: no PLIC context available\n", fwnode
);
485 static int plic_parse_context_parent(struct fwnode_handle
*fwnode
, u32 context
,
486 u32
*parent_hwirq
, int *parent_cpu
, u32 id
)
488 struct of_phandle_args parent
;
489 unsigned long hartid
;
492 if (!is_of_node(fwnode
)) {
493 hartid
= acpi_rintc_ext_parent_to_hartid(id
, context
);
494 if (hartid
== INVALID_HARTID
)
497 *parent_cpu
= riscv_hartid_to_cpuid(hartid
);
498 *parent_hwirq
= RV_IRQ_EXT
;
502 rc
= of_irq_parse_one(to_of_node(fwnode
), context
, &parent
);
506 rc
= riscv_of_parent_hartid(parent
.np
, &hartid
);
510 *parent_hwirq
= parent
.args
[0];
511 *parent_cpu
= riscv_hartid_to_cpuid(hartid
);
515 static int plic_probe(struct fwnode_handle
*fwnode
)
517 int error
= 0, nr_contexts
, nr_handlers
= 0, cpu
, i
;
518 unsigned long plic_quirks
= 0;
519 struct plic_handler
*handler
;
520 u32 nr_irqs
, parent_hwirq
;
521 struct plic_priv
*priv
;
522 irq_hw_number_t hwirq
;
527 if (is_of_node(fwnode
)) {
528 const struct of_device_id
*id
;
530 id
= of_match_node(plic_match
, to_of_node(fwnode
));
532 plic_quirks
= (unsigned long)id
->data
;
534 regs
= of_iomap(to_of_node(fwnode
), 0);
538 regs
= devm_platform_ioremap_resource(to_platform_device(fwnode
->dev
), 0);
540 return PTR_ERR(regs
);
543 error
= plic_parse_nr_irqs_and_contexts(fwnode
, &nr_irqs
, &nr_contexts
, &gsi_base
, &id
);
547 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
553 priv
->fwnode
= fwnode
;
554 priv
->plic_quirks
= plic_quirks
;
555 priv
->nr_irqs
= nr_irqs
;
557 priv
->gsi_base
= gsi_base
;
558 priv
->acpi_plic_id
= id
;
560 priv
->prio_save
= bitmap_zalloc(nr_irqs
, GFP_KERNEL
);
561 if (!priv
->prio_save
) {
566 for (i
= 0; i
< nr_contexts
; i
++) {
567 error
= plic_parse_context_parent(fwnode
, i
, &parent_hwirq
, &cpu
,
570 pr_warn("%pfwP: hwirq for context%d not found\n", fwnode
, i
);
574 if (is_of_node(fwnode
)) {
577 context_id
= acpi_rintc_get_plic_context(priv
->acpi_plic_id
, i
);
578 if (context_id
== INVALID_CONTEXT
) {
579 pr_warn("%pfwP: invalid context id for context%d\n", fwnode
, i
);
585 * Skip contexts other than external interrupts for our
588 if (parent_hwirq
!= RV_IRQ_EXT
) {
589 /* Disable S-mode enable bits if running in M-mode. */
590 if (IS_ENABLED(CONFIG_RISCV_M_MODE
)) {
591 void __iomem
*enable_base
= priv
->regs
+
592 CONTEXT_ENABLE_BASE
+
593 i
* CONTEXT_ENABLE_SIZE
;
595 for (hwirq
= 1; hwirq
<= nr_irqs
; hwirq
++)
596 __plic_toggle(enable_base
, hwirq
, 0);
602 pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode
, i
);
607 * When running in M-mode we need to ignore the S-mode handler.
608 * Here we assume it always comes later, but that might be a
611 handler
= per_cpu_ptr(&plic_handlers
, cpu
);
612 if (handler
->present
) {
613 pr_warn("%pfwP: handler already present for context %d.\n", fwnode
, i
);
614 plic_set_threshold(handler
, PLIC_DISABLE_THRESHOLD
);
618 cpumask_set_cpu(cpu
, &priv
->lmask
);
619 handler
->present
= true;
620 handler
->hart_base
= priv
->regs
+ CONTEXT_BASE
+
621 context_id
* CONTEXT_SIZE
;
622 raw_spin_lock_init(&handler
->enable_lock
);
623 handler
->enable_base
= priv
->regs
+ CONTEXT_ENABLE_BASE
+
624 context_id
* CONTEXT_ENABLE_SIZE
;
625 handler
->priv
= priv
;
627 handler
->enable_save
= kcalloc(DIV_ROUND_UP(nr_irqs
, 32),
628 sizeof(*handler
->enable_save
), GFP_KERNEL
);
629 if (!handler
->enable_save
) {
631 goto fail_cleanup_contexts
;
634 for (hwirq
= 1; hwirq
<= nr_irqs
; hwirq
++) {
635 plic_toggle(handler
, hwirq
, 0);
636 writel(1, priv
->regs
+ PRIORITY_BASE
+
637 hwirq
* PRIORITY_PER_ID
);
642 priv
->irqdomain
= irq_domain_create_linear(fwnode
, nr_irqs
+ 1,
643 &plic_irqdomain_ops
, priv
);
644 if (WARN_ON(!priv
->irqdomain
)) {
646 goto fail_cleanup_contexts
;
650 * We can have multiple PLIC instances so setup global state
651 * and register syscore operations only once after context
652 * handlers of all online CPUs are initialized.
654 if (!plic_global_setup_done
) {
655 struct irq_domain
*domain
;
656 bool global_setup
= true;
658 for_each_online_cpu(cpu
) {
659 handler
= per_cpu_ptr(&plic_handlers
, cpu
);
660 if (!handler
->present
) {
661 global_setup
= false;
667 /* Find parent domain and register chained handler */
668 domain
= irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY
);
670 plic_parent_irq
= irq_create_mapping(domain
, RV_IRQ_EXT
);
672 irq_set_chained_handler(plic_parent_irq
, plic_handle_irq
);
674 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING
,
675 "irqchip/sifive/plic:starting",
676 plic_starting_cpu
, plic_dying_cpu
);
677 register_syscore_ops(&plic_irq_syscore_ops
);
678 plic_global_setup_done
= true;
684 acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode
->dev
));
687 pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n",
688 fwnode
, nr_irqs
, nr_handlers
, nr_contexts
);
691 fail_cleanup_contexts
:
692 for (i
= 0; i
< nr_contexts
; i
++) {
693 if (plic_parse_context_parent(fwnode
, i
, &parent_hwirq
, &cpu
, priv
->acpi_plic_id
))
695 if (parent_hwirq
!= RV_IRQ_EXT
|| cpu
< 0)
698 handler
= per_cpu_ptr(&plic_handlers
, cpu
);
699 handler
->present
= false;
700 handler
->hart_base
= NULL
;
701 handler
->enable_base
= NULL
;
702 kfree(handler
->enable_save
);
703 handler
->enable_save
= NULL
;
704 handler
->priv
= NULL
;
706 bitmap_free(priv
->prio_save
);
714 static int plic_platform_probe(struct platform_device
*pdev
)
716 return plic_probe(pdev
->dev
.fwnode
);
719 static struct platform_driver plic_driver
= {
721 .name
= "riscv-plic",
722 .of_match_table
= plic_match
,
723 .suppress_bind_attrs
= true,
724 .acpi_match_table
= ACPI_PTR(plic_acpi_match
),
726 .probe
= plic_platform_probe
,
728 builtin_platform_driver(plic_driver
);
730 static int __init
plic_early_probe(struct device_node
*node
,
731 struct device_node
*parent
)
733 return plic_probe(&node
->fwnode
);
736 IRQCHIP_DECLARE(riscv
, "allwinner,sun20i-d1-plic", plic_early_probe
);