1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 SiFive
4 * Copyright (C) 2018 Christoph Hellwig
6 #define pr_fmt(fmt) "plic: " fmt
7 #include <linux/interrupt.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqdomain.h>
12 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/platform_device.h>
17 #include <linux/spinlock.h>
21 * This driver implements a version of the RISC-V PLIC with the actual layout
22 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
24 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
26 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
27 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
31 #define MAX_DEVICES 1024
32 #define MAX_CONTEXTS 15872
35 * Each interrupt source has a priority register associated with it.
36 * We always hardwire it to one in Linux.
38 #define PRIORITY_BASE 0
39 #define PRIORITY_PER_ID 4
42 * Each hart context has a vector of interrupt enable bits associated with it.
43 * There's one bit for each interrupt source.
45 #define ENABLE_BASE 0x2000
46 #define ENABLE_PER_HART 0x80
49 * Each hart context has a set of control registers associated with it. Right
50 * now there's only two: a source priority threshold over which the hart will
51 * take an interrupt, and a register to claim interrupts.
53 #define CONTEXT_BASE 0x200000
54 #define CONTEXT_PER_HART 0x1000
55 #define CONTEXT_THRESHOLD 0x00
56 #define CONTEXT_CLAIM 0x04
58 static void __iomem
*plic_regs
;
62 void __iomem
*hart_base
;
64 * Protect mask operations on the registers given that we can't
65 * assume atomic memory operations work on them.
67 raw_spinlock_t enable_lock
;
68 void __iomem
*enable_base
;
70 static DEFINE_PER_CPU(struct plic_handler
, plic_handlers
);
72 static inline void plic_toggle(struct plic_handler
*handler
,
73 int hwirq
, int enable
)
75 u32 __iomem
*reg
= handler
->enable_base
+ (hwirq
/ 32) * sizeof(u32
);
76 u32 hwirq_mask
= 1 << (hwirq
% 32);
78 raw_spin_lock(&handler
->enable_lock
);
80 writel(readl(reg
) | hwirq_mask
, reg
);
82 writel(readl(reg
) & ~hwirq_mask
, reg
);
83 raw_spin_unlock(&handler
->enable_lock
);
86 static inline void plic_irq_toggle(const struct cpumask
*mask
,
87 int hwirq
, int enable
)
91 writel(enable
, plic_regs
+ PRIORITY_BASE
+ hwirq
* PRIORITY_PER_ID
);
92 for_each_cpu(cpu
, mask
) {
93 struct plic_handler
*handler
= per_cpu_ptr(&plic_handlers
, cpu
);
96 plic_toggle(handler
, hwirq
, enable
);
100 static void plic_irq_enable(struct irq_data
*d
)
102 unsigned int cpu
= cpumask_any_and(irq_data_get_affinity_mask(d
),
104 if (WARN_ON_ONCE(cpu
>= nr_cpu_ids
))
106 plic_irq_toggle(cpumask_of(cpu
), d
->hwirq
, 1);
109 static void plic_irq_disable(struct irq_data
*d
)
111 plic_irq_toggle(cpu_possible_mask
, d
->hwirq
, 0);
115 static int plic_set_affinity(struct irq_data
*d
,
116 const struct cpumask
*mask_val
, bool force
)
121 cpu
= cpumask_first(mask_val
);
123 cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
125 if (cpu
>= nr_cpu_ids
)
128 if (!irqd_irq_disabled(d
)) {
129 plic_irq_toggle(cpu_possible_mask
, d
->hwirq
, 0);
130 plic_irq_toggle(cpumask_of(cpu
), d
->hwirq
, 1);
133 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
135 return IRQ_SET_MASK_OK_DONE
;
139 static struct irq_chip plic_chip
= {
140 .name
= "SiFive PLIC",
142 * There is no need to mask/unmask PLIC interrupts. They are "masked"
143 * by reading claim and "unmasked" when writing it back.
145 .irq_enable
= plic_irq_enable
,
146 .irq_disable
= plic_irq_disable
,
148 .irq_set_affinity
= plic_set_affinity
,
152 static int plic_irqdomain_map(struct irq_domain
*d
, unsigned int irq
,
153 irq_hw_number_t hwirq
)
155 irq_set_chip_and_handler(irq
, &plic_chip
, handle_simple_irq
);
156 irq_set_chip_data(irq
, NULL
);
157 irq_set_noprobe(irq
);
161 static const struct irq_domain_ops plic_irqdomain_ops
= {
162 .map
= plic_irqdomain_map
,
163 .xlate
= irq_domain_xlate_onecell
,
166 static struct irq_domain
*plic_irqdomain
;
169 * Handling an interrupt is a two-step process: first you claim the interrupt
170 * by reading the claim register, then you complete the interrupt by writing
171 * that source ID back to the same claim register. This automatically enables
172 * and disables the interrupt, so there's nothing else to do.
174 static void plic_handle_irq(struct pt_regs
*regs
)
176 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
177 void __iomem
*claim
= handler
->hart_base
+ CONTEXT_CLAIM
;
178 irq_hw_number_t hwirq
;
180 WARN_ON_ONCE(!handler
->present
);
182 csr_clear(sie
, SIE_SEIE
);
183 while ((hwirq
= readl(claim
))) {
184 int irq
= irq_find_mapping(plic_irqdomain
, hwirq
);
186 if (unlikely(irq
<= 0))
187 pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
190 generic_handle_irq(irq
);
191 writel(hwirq
, claim
);
193 csr_set(sie
, SIE_SEIE
);
197 * Walk up the DT tree until we find an active RISC-V core (HART) node and
198 * extract the cpuid from it.
200 static int plic_find_hart_id(struct device_node
*node
)
202 for (; node
; node
= node
->parent
) {
203 if (of_device_is_compatible(node
, "riscv"))
204 return riscv_of_processor_hartid(node
);
210 static int __init
plic_init(struct device_node
*node
,
211 struct device_node
*parent
)
213 int error
= 0, nr_contexts
, nr_handlers
= 0, i
;
217 pr_warn("PLIC already present.\n");
221 plic_regs
= of_iomap(node
, 0);
222 if (WARN_ON(!plic_regs
))
226 of_property_read_u32(node
, "riscv,ndev", &nr_irqs
);
227 if (WARN_ON(!nr_irqs
))
230 nr_contexts
= of_irq_count(node
);
231 if (WARN_ON(!nr_contexts
))
233 if (WARN_ON(nr_contexts
< num_possible_cpus()))
237 plic_irqdomain
= irq_domain_add_linear(node
, nr_irqs
+ 1,
238 &plic_irqdomain_ops
, NULL
);
239 if (WARN_ON(!plic_irqdomain
))
242 for (i
= 0; i
< nr_contexts
; i
++) {
243 struct of_phandle_args parent
;
244 struct plic_handler
*handler
;
245 irq_hw_number_t hwirq
;
248 if (of_irq_parse_one(node
, i
, &parent
)) {
249 pr_err("failed to parse parent for context %d.\n", i
);
253 /* skip context holes */
254 if (parent
.args
[0] == -1)
257 hartid
= plic_find_hart_id(parent
.np
);
259 pr_warn("failed to parse hart ID for context %d.\n", i
);
263 cpu
= riscv_hartid_to_cpuid(hartid
);
265 pr_warn("Invalid cpuid for context %d\n", i
);
269 handler
= per_cpu_ptr(&plic_handlers
, cpu
);
270 if (handler
->present
) {
271 pr_warn("handler already present for context %d.\n", i
);
275 handler
->present
= true;
277 plic_regs
+ CONTEXT_BASE
+ i
* CONTEXT_PER_HART
;
278 raw_spin_lock_init(&handler
->enable_lock
);
279 handler
->enable_base
=
280 plic_regs
+ ENABLE_BASE
+ i
* ENABLE_PER_HART
;
282 /* priority must be > threshold to trigger an interrupt */
283 writel(0, handler
->hart_base
+ CONTEXT_THRESHOLD
);
284 for (hwirq
= 1; hwirq
<= nr_irqs
; hwirq
++)
285 plic_toggle(handler
, hwirq
, 0);
289 pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
290 nr_irqs
, nr_handlers
, nr_contexts
);
291 set_handle_irq(plic_handle_irq
);
299 IRQCHIP_DECLARE(sifive_plic
, "sifive,plic-1.0.0", plic_init
);
300 IRQCHIP_DECLARE(riscv_plic0
, "riscv,plic0", plic_init
); /* for legacy systems */