1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 SiFive
4 * Copyright (C) 2018 Christoph Hellwig
6 #define pr_fmt(fmt) "plic: " fmt
7 #include <linux/interrupt.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqdomain.h>
12 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/platform_device.h>
17 #include <linux/spinlock.h>
21 * This driver implements a version of the RISC-V PLIC with the actual layout
22 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
24 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
26 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
27 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
31 #define MAX_DEVICES 1024
32 #define MAX_CONTEXTS 15872
35 * Each interrupt source has a priority register associated with it.
36 * We always hardwire it to one in Linux.
38 #define PRIORITY_BASE 0
39 #define PRIORITY_PER_ID 4
42 * Each hart context has a vector of interrupt enable bits associated with it.
43 * There's one bit for each interrupt source.
45 #define ENABLE_BASE 0x2000
46 #define ENABLE_PER_HART 0x80
49 * Each hart context has a set of control registers associated with it. Right
50 * now there's only two: a source priority threshold over which the hart will
51 * take an interrupt, and a register to claim interrupts.
53 #define CONTEXT_BASE 0x200000
54 #define CONTEXT_PER_HART 0x1000
55 #define CONTEXT_THRESHOLD 0x00
56 #define CONTEXT_CLAIM 0x04
58 static void __iomem
*plic_regs
;
62 void __iomem
*hart_base
;
64 * Protect mask operations on the registers given that we can't
65 * assume atomic memory operations work on them.
67 raw_spinlock_t enable_lock
;
68 void __iomem
*enable_base
;
70 static DEFINE_PER_CPU(struct plic_handler
, plic_handlers
);
72 static inline void plic_toggle(struct plic_handler
*handler
,
73 int hwirq
, int enable
)
75 u32 __iomem
*reg
= handler
->enable_base
+ (hwirq
/ 32) * sizeof(u32
);
76 u32 hwirq_mask
= 1 << (hwirq
% 32);
78 raw_spin_lock(&handler
->enable_lock
);
80 writel(readl(reg
) | hwirq_mask
, reg
);
82 writel(readl(reg
) & ~hwirq_mask
, reg
);
83 raw_spin_unlock(&handler
->enable_lock
);
86 static inline void plic_irq_toggle(const struct cpumask
*mask
,
87 int hwirq
, int enable
)
91 writel(enable
, plic_regs
+ PRIORITY_BASE
+ hwirq
* PRIORITY_PER_ID
);
92 for_each_cpu(cpu
, mask
) {
93 struct plic_handler
*handler
= per_cpu_ptr(&plic_handlers
, cpu
);
96 plic_toggle(handler
, hwirq
, enable
);
100 static void plic_irq_unmask(struct irq_data
*d
)
102 unsigned int cpu
= cpumask_any_and(irq_data_get_affinity_mask(d
),
104 if (WARN_ON_ONCE(cpu
>= nr_cpu_ids
))
106 plic_irq_toggle(cpumask_of(cpu
), d
->hwirq
, 1);
109 static void plic_irq_mask(struct irq_data
*d
)
111 plic_irq_toggle(cpu_possible_mask
, d
->hwirq
, 0);
115 static int plic_set_affinity(struct irq_data
*d
,
116 const struct cpumask
*mask_val
, bool force
)
121 cpu
= cpumask_first(mask_val
);
123 cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
125 if (cpu
>= nr_cpu_ids
)
128 plic_irq_toggle(cpu_possible_mask
, d
->hwirq
, 0);
129 plic_irq_toggle(cpumask_of(cpu
), d
->hwirq
, 1);
131 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
133 return IRQ_SET_MASK_OK_DONE
;
137 static void plic_irq_eoi(struct irq_data
*d
)
139 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
141 writel(d
->hwirq
, handler
->hart_base
+ CONTEXT_CLAIM
);
144 static struct irq_chip plic_chip
= {
145 .name
= "SiFive PLIC",
146 .irq_mask
= plic_irq_mask
,
147 .irq_unmask
= plic_irq_unmask
,
148 .irq_eoi
= plic_irq_eoi
,
150 .irq_set_affinity
= plic_set_affinity
,
154 static int plic_irqdomain_map(struct irq_domain
*d
, unsigned int irq
,
155 irq_hw_number_t hwirq
)
157 irq_domain_set_info(d
, irq
, hwirq
, &plic_chip
, d
->host_data
,
158 handle_fasteoi_irq
, NULL
, NULL
);
159 irq_set_noprobe(irq
);
163 static int plic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
164 unsigned int nr_irqs
, void *arg
)
167 irq_hw_number_t hwirq
;
169 struct irq_fwspec
*fwspec
= arg
;
171 ret
= irq_domain_translate_onecell(domain
, fwspec
, &hwirq
, &type
);
175 for (i
= 0; i
< nr_irqs
; i
++) {
176 ret
= plic_irqdomain_map(domain
, virq
+ i
, hwirq
+ i
);
184 static const struct irq_domain_ops plic_irqdomain_ops
= {
185 .translate
= irq_domain_translate_onecell
,
186 .alloc
= plic_irq_domain_alloc
,
187 .free
= irq_domain_free_irqs_top
,
190 static struct irq_domain
*plic_irqdomain
;
193 * Handling an interrupt is a two-step process: first you claim the interrupt
194 * by reading the claim register, then you complete the interrupt by writing
195 * that source ID back to the same claim register. This automatically enables
196 * and disables the interrupt, so there's nothing else to do.
198 static void plic_handle_irq(struct pt_regs
*regs
)
200 struct plic_handler
*handler
= this_cpu_ptr(&plic_handlers
);
201 void __iomem
*claim
= handler
->hart_base
+ CONTEXT_CLAIM
;
202 irq_hw_number_t hwirq
;
204 WARN_ON_ONCE(!handler
->present
);
206 csr_clear(CSR_IE
, IE_EIE
);
207 while ((hwirq
= readl(claim
))) {
208 int irq
= irq_find_mapping(plic_irqdomain
, hwirq
);
210 if (unlikely(irq
<= 0))
211 pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
214 generic_handle_irq(irq
);
216 csr_set(CSR_IE
, IE_EIE
);
220 * Walk up the DT tree until we find an active RISC-V core (HART) node and
221 * extract the cpuid from it.
223 static int plic_find_hart_id(struct device_node
*node
)
225 for (; node
; node
= node
->parent
) {
226 if (of_device_is_compatible(node
, "riscv"))
227 return riscv_of_processor_hartid(node
);
233 static int __init
plic_init(struct device_node
*node
,
234 struct device_node
*parent
)
236 int error
= 0, nr_contexts
, nr_handlers
= 0, i
;
240 pr_warn("PLIC already present.\n");
244 plic_regs
= of_iomap(node
, 0);
245 if (WARN_ON(!plic_regs
))
249 of_property_read_u32(node
, "riscv,ndev", &nr_irqs
);
250 if (WARN_ON(!nr_irqs
))
253 nr_contexts
= of_irq_count(node
);
254 if (WARN_ON(!nr_contexts
))
256 if (WARN_ON(nr_contexts
< num_possible_cpus()))
260 plic_irqdomain
= irq_domain_add_linear(node
, nr_irqs
+ 1,
261 &plic_irqdomain_ops
, NULL
);
262 if (WARN_ON(!plic_irqdomain
))
265 for (i
= 0; i
< nr_contexts
; i
++) {
266 struct of_phandle_args parent
;
267 struct plic_handler
*handler
;
268 irq_hw_number_t hwirq
;
272 if (of_irq_parse_one(node
, i
, &parent
)) {
273 pr_err("failed to parse parent for context %d.\n", i
);
278 * Skip contexts other than external interrupts for our
281 if (parent
.args
[0] != RV_IRQ_EXT
)
284 hartid
= plic_find_hart_id(parent
.np
);
286 pr_warn("failed to parse hart ID for context %d.\n", i
);
290 cpu
= riscv_hartid_to_cpuid(hartid
);
292 pr_warn("Invalid cpuid for context %d\n", i
);
297 * When running in M-mode we need to ignore the S-mode handler.
298 * Here we assume it always comes later, but that might be a
301 handler
= per_cpu_ptr(&plic_handlers
, cpu
);
302 if (handler
->present
) {
303 pr_warn("handler already present for context %d.\n", i
);
304 threshold
= 0xffffffff;
308 handler
->present
= true;
310 plic_regs
+ CONTEXT_BASE
+ i
* CONTEXT_PER_HART
;
311 raw_spin_lock_init(&handler
->enable_lock
);
312 handler
->enable_base
=
313 plic_regs
+ ENABLE_BASE
+ i
* ENABLE_PER_HART
;
316 /* priority must be > threshold to trigger an interrupt */
317 writel(threshold
, handler
->hart_base
+ CONTEXT_THRESHOLD
);
318 for (hwirq
= 1; hwirq
<= nr_irqs
; hwirq
++)
319 plic_toggle(handler
, hwirq
, 0);
323 pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
324 nr_irqs
, nr_handlers
, nr_contexts
);
325 set_handle_irq(plic_handle_irq
);
333 IRQCHIP_DECLARE(sifive_plic
, "sifive,plic-1.0.0", plic_init
);
334 IRQCHIP_DECLARE(riscv_plic0
, "riscv,plic0", plic_init
); /* for legacy systems */