1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 SiFive
4 * Copyright (C) 2018 Christoph Hellwig
6 #define pr_fmt(fmt) "plic: " fmt
8 #include <linux/interrupt.h>
10 #include <linux/irq.h>
11 #include <linux/irqchip.h>
12 #include <linux/irqdomain.h>
13 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <linux/platform_device.h>
18 #include <linux/spinlock.h>
22 * This driver implements a version of the RISC-V PLIC with the actual layout
23 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
25 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
27 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
28 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
32 #define MAX_DEVICES 1024
33 #define MAX_CONTEXTS 15872
36 * Each interrupt source has a priority register associated with it.
37 * We always hardwire it to one in Linux.
39 #define PRIORITY_BASE 0
40 #define PRIORITY_PER_ID 4
43 * Each hart context has a vector of interrupt enable bits associated with it.
44 * There's one bit for each interrupt source.
46 #define ENABLE_BASE 0x2000
47 #define ENABLE_PER_HART 0x80
50 * Each hart context has a set of control registers associated with it. Right
51 * now there's only two: a source priority threshold over which the hart will
52 * take an interrupt, and a register to claim interrupts.
54 #define CONTEXT_BASE 0x200000
55 #define CONTEXT_PER_HART 0x1000
56 #define CONTEXT_THRESHOLD 0x00
57 #define CONTEXT_CLAIM 0x04
59 #define PLIC_DISABLE_THRESHOLD 0xf
60 #define PLIC_ENABLE_THRESHOLD 0
64 struct irq_domain
*irqdomain
;
70 void __iomem
*hart_base
;
72 * Protect mask operations on the registers given that we can't
73 * assume atomic memory operations work on them.
75 raw_spinlock_t enable_lock
;
76 void __iomem
*enable_base
;
77 struct plic_priv
*priv
;
79 static DEFINE_PER_CPU(struct plic_handler
, plic_handlers
);
81 static inline void plic_toggle(struct plic_handler
*handler
,
82 int hwirq
, int enable
)
84 u32 __iomem
*reg
= handler
->enable_base
+ (hwirq
/ 32) * sizeof(u32
);
85 u32 hwirq_mask
= 1 << (hwirq
% 32);
87 raw_spin_lock(&handler
->enable_lock
);
89 writel(readl(reg
) | hwirq_mask
, reg
);
91 writel(readl(reg
) & ~hwirq_mask
, reg
);
92 raw_spin_unlock(&handler
->enable_lock
);
95 static inline void plic_irq_toggle(const struct cpumask
*mask
,
96 struct irq_data
*d
, int enable
)
99 struct plic_priv
*priv
= irq_get_chip_data(d
->irq
);
101 writel(enable
, priv
->regs
+ PRIORITY_BASE
+ d
->hwirq
* PRIORITY_PER_ID
);
102 for_each_cpu(cpu
, mask
) {
103 struct plic_handler
*handler
= per_cpu_ptr(&plic_handlers
, cpu
);
105 if (handler
->present
&&
106 cpumask_test_cpu(cpu
, &handler
->priv
->lmask
))
107 plic_toggle(handler
, d
->hwirq
, enable
);
111 static void plic_irq_unmask(struct irq_data
*d
)
113 struct cpumask amask
;
115 struct plic_priv
*priv
= irq_get_chip_data(d
->irq
);
117 cpumask_and(&amask
, &priv
->lmask
, cpu_online_mask
);
118 cpu
= cpumask_any_and(irq_data_get_affinity_mask(d
),
120 if (WARN_ON_ONCE(cpu
>= nr_cpu_ids
))
122 plic_irq_toggle(cpumask_of(cpu
), d
, 1);
125 static void plic_irq_mask(struct irq_data
*d
)
127 struct plic_priv
*priv
= irq_get_chip_data(d
->irq
);
129 plic_irq_toggle(&priv
->lmask
, d
, 0);
133 static int plic_set_affinity(struct irq_data
*d
,
134 const struct cpumask
*mask_val
, bool force
)
137 struct cpumask amask
;
138 struct plic_priv
*priv
= irq_get_chip_data(d
->irq
);
140 cpumask_and(&amask
, &priv
->lmask
, mask_val
);
143 cpu
= cpumask_first(&amask
);
145 cpu
= cpumask_any_and(&amask
, cpu_online_mask
);
147 if (cpu
>= nr_cpu_ids
)
150 plic_irq_toggle(&priv
->lmask
, d
, 0);
151 plic_irq_toggle(cpumask_of(cpu
), d
, 1);
153 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
155 return IRQ_SET_MASK_OK_DONE
;
159 static void plic_irq_eoi(struct irq_data
*d
)
161 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
163 writel(d
->hwirq
, handler
->hart_base
+ CONTEXT_CLAIM
);
166 static struct irq_chip plic_chip
= {
167 .name
= "SiFive PLIC",
168 .irq_mask
= plic_irq_mask
,
169 .irq_unmask
= plic_irq_unmask
,
170 .irq_eoi
= plic_irq_eoi
,
172 .irq_set_affinity
= plic_set_affinity
,
176 static int plic_irqdomain_map(struct irq_domain
*d
, unsigned int irq
,
177 irq_hw_number_t hwirq
)
179 irq_domain_set_info(d
, irq
, hwirq
, &plic_chip
, d
->host_data
,
180 handle_fasteoi_irq
, NULL
, NULL
);
181 irq_set_noprobe(irq
);
185 static int plic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
186 unsigned int nr_irqs
, void *arg
)
189 irq_hw_number_t hwirq
;
191 struct irq_fwspec
*fwspec
= arg
;
193 ret
= irq_domain_translate_onecell(domain
, fwspec
, &hwirq
, &type
);
197 for (i
= 0; i
< nr_irqs
; i
++) {
198 ret
= plic_irqdomain_map(domain
, virq
+ i
, hwirq
+ i
);
206 static const struct irq_domain_ops plic_irqdomain_ops
= {
207 .translate
= irq_domain_translate_onecell
,
208 .alloc
= plic_irq_domain_alloc
,
209 .free
= irq_domain_free_irqs_top
,
213 * Handling an interrupt is a two-step process: first you claim the interrupt
214 * by reading the claim register, then you complete the interrupt by writing
215 * that source ID back to the same claim register. This automatically enables
216 * and disables the interrupt, so there's nothing else to do.
218 static void plic_handle_irq(struct pt_regs
*regs
)
220 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
221 void __iomem
*claim
= handler
->hart_base
+ CONTEXT_CLAIM
;
222 irq_hw_number_t hwirq
;
224 WARN_ON_ONCE(!handler
->present
);
226 csr_clear(CSR_IE
, IE_EIE
);
227 while ((hwirq
= readl(claim
))) {
228 int irq
= irq_find_mapping(handler
->priv
->irqdomain
, hwirq
);
230 if (unlikely(irq
<= 0))
231 pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
234 generic_handle_irq(irq
);
236 csr_set(CSR_IE
, IE_EIE
);
240 * Walk up the DT tree until we find an active RISC-V core (HART) node and
241 * extract the cpuid from it.
243 static int plic_find_hart_id(struct device_node
*node
)
245 for (; node
; node
= node
->parent
) {
246 if (of_device_is_compatible(node
, "riscv"))
247 return riscv_of_processor_hartid(node
);
253 static void plic_set_threshold(struct plic_handler
*handler
, u32 threshold
)
255 /* priority must be > threshold to trigger an interrupt */
256 writel(threshold
, handler
->hart_base
+ CONTEXT_THRESHOLD
);
259 static int plic_dying_cpu(unsigned int cpu
)
261 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
263 csr_clear(CSR_IE
, IE_EIE
);
264 plic_set_threshold(handler
, PLIC_DISABLE_THRESHOLD
);
269 static int plic_starting_cpu(unsigned int cpu
)
271 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
273 csr_set(CSR_IE
, IE_EIE
);
274 plic_set_threshold(handler
, PLIC_ENABLE_THRESHOLD
);
279 static int __init
plic_init(struct device_node
*node
,
280 struct device_node
*parent
)
282 int error
= 0, nr_contexts
, nr_handlers
= 0, i
;
284 struct plic_priv
*priv
;
286 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
290 priv
->regs
= of_iomap(node
, 0);
291 if (WARN_ON(!priv
->regs
)) {
297 of_property_read_u32(node
, "riscv,ndev", &nr_irqs
);
298 if (WARN_ON(!nr_irqs
))
301 nr_contexts
= of_irq_count(node
);
302 if (WARN_ON(!nr_contexts
))
304 if (WARN_ON(nr_contexts
< num_possible_cpus()))
308 priv
->irqdomain
= irq_domain_add_linear(node
, nr_irqs
+ 1,
309 &plic_irqdomain_ops
, priv
);
310 if (WARN_ON(!priv
->irqdomain
))
313 for (i
= 0; i
< nr_contexts
; i
++) {
314 struct of_phandle_args parent
;
315 struct plic_handler
*handler
;
316 irq_hw_number_t hwirq
;
319 if (of_irq_parse_one(node
, i
, &parent
)) {
320 pr_err("failed to parse parent for context %d.\n", i
);
325 * Skip contexts other than external interrupts for our
328 if (parent
.args
[0] != RV_IRQ_EXT
)
331 hartid
= plic_find_hart_id(parent
.np
);
333 pr_warn("failed to parse hart ID for context %d.\n", i
);
337 cpu
= riscv_hartid_to_cpuid(hartid
);
339 pr_warn("Invalid cpuid for context %d\n", i
);
344 * When running in M-mode we need to ignore the S-mode handler.
345 * Here we assume it always comes later, but that might be a
348 handler
= per_cpu_ptr(&plic_handlers
, cpu
);
349 if (handler
->present
) {
350 pr_warn("handler already present for context %d.\n", i
);
351 plic_set_threshold(handler
, PLIC_DISABLE_THRESHOLD
);
355 cpumask_set_cpu(cpu
, &priv
->lmask
);
356 handler
->present
= true;
358 priv
->regs
+ CONTEXT_BASE
+ i
* CONTEXT_PER_HART
;
359 raw_spin_lock_init(&handler
->enable_lock
);
360 handler
->enable_base
=
361 priv
->regs
+ ENABLE_BASE
+ i
* ENABLE_PER_HART
;
362 handler
->priv
= priv
;
364 for (hwirq
= 1; hwirq
<= nr_irqs
; hwirq
++)
365 plic_toggle(handler
, hwirq
, 0);
369 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING
,
370 "irqchip/sifive/plic:starting",
371 plic_starting_cpu
, plic_dying_cpu
);
372 pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
373 nr_irqs
, nr_handlers
, nr_contexts
);
374 set_handle_irq(plic_handle_irq
);
384 IRQCHIP_DECLARE(sifive_plic
, "sifive,plic-1.0.0", plic_init
);
385 IRQCHIP_DECLARE(riscv_plic0
, "riscv,plic0", plic_init
); /* for legacy systems */