1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 SiFive
4 * Copyright (C) 2018 Christoph Hellwig
6 #define pr_fmt(fmt) "plic: " fmt
8 #include <linux/interrupt.h>
10 #include <linux/irq.h>
11 #include <linux/irqchip.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/platform_device.h>
19 #include <linux/spinlock.h>
23 * This driver implements a version of the RISC-V PLIC with the actual layout
24 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
26 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
28 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
29 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
33 #define MAX_DEVICES 1024
34 #define MAX_CONTEXTS 15872
37 * Each interrupt source has a priority register associated with it.
38 * We always hardwire it to one in Linux.
40 #define PRIORITY_BASE 0
41 #define PRIORITY_PER_ID 4
44 * Each hart context has a vector of interrupt enable bits associated with it.
45 * There's one bit for each interrupt source.
47 #define ENABLE_BASE 0x2000
48 #define ENABLE_PER_HART 0x80
51 * Each hart context has a set of control registers associated with it. Right
52 * now there's only two: a source priority threshold over which the hart will
53 * take an interrupt, and a register to claim interrupts.
55 #define CONTEXT_BASE 0x200000
56 #define CONTEXT_PER_HART 0x1000
57 #define CONTEXT_THRESHOLD 0x00
58 #define CONTEXT_CLAIM 0x04
60 #define PLIC_DISABLE_THRESHOLD 0x7
61 #define PLIC_ENABLE_THRESHOLD 0
65 struct irq_domain
*irqdomain
;
71 void __iomem
*hart_base
;
73 * Protect mask operations on the registers given that we can't
74 * assume atomic memory operations work on them.
76 raw_spinlock_t enable_lock
;
77 void __iomem
*enable_base
;
78 struct plic_priv
*priv
;
80 static int plic_parent_irq
;
81 static bool plic_cpuhp_setup_done
;
82 static DEFINE_PER_CPU(struct plic_handler
, plic_handlers
);
84 static inline void plic_toggle(struct plic_handler
*handler
,
85 int hwirq
, int enable
)
87 u32 __iomem
*reg
= handler
->enable_base
+ (hwirq
/ 32) * sizeof(u32
);
88 u32 hwirq_mask
= 1 << (hwirq
% 32);
90 raw_spin_lock(&handler
->enable_lock
);
92 writel(readl(reg
) | hwirq_mask
, reg
);
94 writel(readl(reg
) & ~hwirq_mask
, reg
);
95 raw_spin_unlock(&handler
->enable_lock
);
98 static inline void plic_irq_toggle(const struct cpumask
*mask
,
99 struct irq_data
*d
, int enable
)
102 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
104 writel(enable
, priv
->regs
+ PRIORITY_BASE
+ d
->hwirq
* PRIORITY_PER_ID
);
105 for_each_cpu(cpu
, mask
) {
106 struct plic_handler
*handler
= per_cpu_ptr(&plic_handlers
, cpu
);
108 if (handler
->present
&&
109 cpumask_test_cpu(cpu
, &handler
->priv
->lmask
))
110 plic_toggle(handler
, d
->hwirq
, enable
);
114 static void plic_irq_unmask(struct irq_data
*d
)
116 struct cpumask amask
;
118 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
120 cpumask_and(&amask
, &priv
->lmask
, cpu_online_mask
);
121 cpu
= cpumask_any_and(irq_data_get_affinity_mask(d
),
123 if (WARN_ON_ONCE(cpu
>= nr_cpu_ids
))
125 plic_irq_toggle(cpumask_of(cpu
), d
, 1);
128 static void plic_irq_mask(struct irq_data
*d
)
130 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
132 plic_irq_toggle(&priv
->lmask
, d
, 0);
136 static int plic_set_affinity(struct irq_data
*d
,
137 const struct cpumask
*mask_val
, bool force
)
140 struct cpumask amask
;
141 struct plic_priv
*priv
= irq_data_get_irq_chip_data(d
);
143 cpumask_and(&amask
, &priv
->lmask
, mask_val
);
146 cpu
= cpumask_first(&amask
);
148 cpu
= cpumask_any_and(&amask
, cpu_online_mask
);
150 if (cpu
>= nr_cpu_ids
)
153 plic_irq_toggle(&priv
->lmask
, d
, 0);
154 plic_irq_toggle(cpumask_of(cpu
), d
, !irqd_irq_masked(d
));
156 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
158 return IRQ_SET_MASK_OK_DONE
;
162 static void plic_irq_eoi(struct irq_data
*d
)
164 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
166 writel(d
->hwirq
, handler
->hart_base
+ CONTEXT_CLAIM
);
169 static struct irq_chip plic_chip
= {
170 .name
= "SiFive PLIC",
171 .irq_mask
= plic_irq_mask
,
172 .irq_unmask
= plic_irq_unmask
,
173 .irq_eoi
= plic_irq_eoi
,
175 .irq_set_affinity
= plic_set_affinity
,
179 static int plic_irqdomain_map(struct irq_domain
*d
, unsigned int irq
,
180 irq_hw_number_t hwirq
)
182 struct plic_priv
*priv
= d
->host_data
;
184 irq_domain_set_info(d
, irq
, hwirq
, &plic_chip
, d
->host_data
,
185 handle_fasteoi_irq
, NULL
, NULL
);
186 irq_set_noprobe(irq
);
187 irq_set_affinity(irq
, &priv
->lmask
);
191 static int plic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
192 unsigned int nr_irqs
, void *arg
)
195 irq_hw_number_t hwirq
;
197 struct irq_fwspec
*fwspec
= arg
;
199 ret
= irq_domain_translate_onecell(domain
, fwspec
, &hwirq
, &type
);
203 for (i
= 0; i
< nr_irqs
; i
++) {
204 ret
= plic_irqdomain_map(domain
, virq
+ i
, hwirq
+ i
);
212 static const struct irq_domain_ops plic_irqdomain_ops
= {
213 .translate
= irq_domain_translate_onecell
,
214 .alloc
= plic_irq_domain_alloc
,
215 .free
= irq_domain_free_irqs_top
,
219 * Handling an interrupt is a two-step process: first you claim the interrupt
220 * by reading the claim register, then you complete the interrupt by writing
221 * that source ID back to the same claim register. This automatically enables
222 * and disables the interrupt, so there's nothing else to do.
224 static void plic_handle_irq(struct irq_desc
*desc
)
226 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
227 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
228 void __iomem
*claim
= handler
->hart_base
+ CONTEXT_CLAIM
;
229 irq_hw_number_t hwirq
;
231 WARN_ON_ONCE(!handler
->present
);
233 chained_irq_enter(chip
, desc
);
235 while ((hwirq
= readl(claim
))) {
236 int irq
= irq_find_mapping(handler
->priv
->irqdomain
, hwirq
);
238 if (unlikely(irq
<= 0))
239 pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
242 generic_handle_irq(irq
);
245 chained_irq_exit(chip
, desc
);
248 static void plic_set_threshold(struct plic_handler
*handler
, u32 threshold
)
250 /* priority must be > threshold to trigger an interrupt */
251 writel(threshold
, handler
->hart_base
+ CONTEXT_THRESHOLD
);
254 static int plic_dying_cpu(unsigned int cpu
)
257 disable_percpu_irq(plic_parent_irq
);
262 static int plic_starting_cpu(unsigned int cpu
)
264 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
267 enable_percpu_irq(plic_parent_irq
,
268 irq_get_trigger_type(plic_parent_irq
));
270 pr_warn("cpu%d: parent irq not available\n", cpu
);
271 plic_set_threshold(handler
, PLIC_ENABLE_THRESHOLD
);
276 static int __init
plic_init(struct device_node
*node
,
277 struct device_node
*parent
)
279 int error
= 0, nr_contexts
, nr_handlers
= 0, i
;
281 struct plic_priv
*priv
;
282 struct plic_handler
*handler
;
284 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
288 priv
->regs
= of_iomap(node
, 0);
289 if (WARN_ON(!priv
->regs
)) {
295 of_property_read_u32(node
, "riscv,ndev", &nr_irqs
);
296 if (WARN_ON(!nr_irqs
))
299 nr_contexts
= of_irq_count(node
);
300 if (WARN_ON(!nr_contexts
))
304 priv
->irqdomain
= irq_domain_add_linear(node
, nr_irqs
+ 1,
305 &plic_irqdomain_ops
, priv
);
306 if (WARN_ON(!priv
->irqdomain
))
309 for (i
= 0; i
< nr_contexts
; i
++) {
310 struct of_phandle_args parent
;
311 irq_hw_number_t hwirq
;
314 if (of_irq_parse_one(node
, i
, &parent
)) {
315 pr_err("failed to parse parent for context %d.\n", i
);
320 * Skip contexts other than external interrupts for our
323 if (parent
.args
[0] != RV_IRQ_EXT
)
326 hartid
= riscv_of_parent_hartid(parent
.np
);
328 pr_warn("failed to parse hart ID for context %d.\n", i
);
332 cpu
= riscv_hartid_to_cpuid(hartid
);
334 pr_warn("Invalid cpuid for context %d\n", i
);
338 /* Find parent domain and register chained handler */
339 if (!plic_parent_irq
&& irq_find_host(parent
.np
)) {
340 plic_parent_irq
= irq_of_parse_and_map(node
, i
);
342 irq_set_chained_handler(plic_parent_irq
,
347 * When running in M-mode we need to ignore the S-mode handler.
348 * Here we assume it always comes later, but that might be a
351 handler
= per_cpu_ptr(&plic_handlers
, cpu
);
352 if (handler
->present
) {
353 pr_warn("handler already present for context %d.\n", i
);
354 plic_set_threshold(handler
, PLIC_DISABLE_THRESHOLD
);
358 cpumask_set_cpu(cpu
, &priv
->lmask
);
359 handler
->present
= true;
361 priv
->regs
+ CONTEXT_BASE
+ i
* CONTEXT_PER_HART
;
362 raw_spin_lock_init(&handler
->enable_lock
);
363 handler
->enable_base
=
364 priv
->regs
+ ENABLE_BASE
+ i
* ENABLE_PER_HART
;
365 handler
->priv
= priv
;
367 for (hwirq
= 1; hwirq
<= nr_irqs
; hwirq
++)
368 plic_toggle(handler
, hwirq
, 0);
373 * We can have multiple PLIC instances so setup cpuhp state only
374 * when context handler for current/boot CPU is present.
376 handler
= this_cpu_ptr(&plic_handlers
);
377 if (handler
->present
&& !plic_cpuhp_setup_done
) {
378 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING
,
379 "irqchip/sifive/plic:starting",
380 plic_starting_cpu
, plic_dying_cpu
);
381 plic_cpuhp_setup_done
= true;
384 pr_info("%pOFP: mapped %d interrupts with %d handlers for"
385 " %d contexts.\n", node
, nr_irqs
, nr_handlers
, nr_contexts
);
395 IRQCHIP_DECLARE(sifive_plic
, "sifive,plic-1.0.0", plic_init
);
396 IRQCHIP_DECLARE(riscv_plic0
, "riscv,plic0", plic_init
); /* for legacy systems */