Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / irqchip / irq-riscv-imsic-platform.c
blobc708780e8760f3445c89530ab8cb134adc874d2d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 */
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
9 #include <linux/bitmap.h>
10 #include <linux/cpu.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/module.h>
17 #include <linux/msi.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/smp.h>
23 #include "irq-riscv-imsic-state.h"
25 static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
26 phys_addr_t *out_msi_pa)
28 struct imsic_global_config *global;
29 struct imsic_local_config *local;
31 global = &imsic->global;
32 local = per_cpu_ptr(global->local, cpu);
34 if (BIT(global->guest_index_bits) <= guest_index)
35 return false;
37 if (out_msi_pa)
38 *out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ);
40 return true;
43 static void imsic_irq_mask(struct irq_data *d)
45 imsic_vector_mask(irq_data_get_irq_chip_data(d));
48 static void imsic_irq_unmask(struct irq_data *d)
50 imsic_vector_unmask(irq_data_get_irq_chip_data(d));
53 static int imsic_irq_retrigger(struct irq_data *d)
55 struct imsic_vector *vec = irq_data_get_irq_chip_data(d);
56 struct imsic_local_config *local;
58 if (WARN_ON(!vec))
59 return -ENOENT;
61 local = per_cpu_ptr(imsic->global.local, vec->cpu);
62 writel_relaxed(vec->local_id, local->msi_va);
63 return 0;
66 static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
68 phys_addr_t msi_addr;
70 if (WARN_ON(!vec))
71 return;
73 if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr)))
74 return;
76 msg->address_hi = upper_32_bits(msi_addr);
77 msg->address_lo = lower_32_bits(msi_addr);
78 msg->data = vec->local_id;
81 static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg)
83 imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg);
86 #ifdef CONFIG_SMP
87 static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec)
89 struct msi_msg msg = { };
91 imsic_irq_compose_vector_msg(vec, &msg);
92 irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg);
95 static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
96 bool force)
98 struct imsic_vector *old_vec, *new_vec;
99 struct irq_data *pd = d->parent_data;
101 old_vec = irq_data_get_irq_chip_data(pd);
102 if (WARN_ON(!old_vec))
103 return -ENOENT;
105 /* If old vector cpu belongs to the target cpumask then do nothing */
106 if (cpumask_test_cpu(old_vec->cpu, mask_val))
107 return IRQ_SET_MASK_OK_DONE;
109 /* If move is already in-flight then return failure */
110 if (imsic_vector_get_move(old_vec))
111 return -EBUSY;
113 /* Get a new vector on the desired set of CPUs */
114 new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
115 if (!new_vec)
116 return -ENOSPC;
118 /* Point device to the new vector */
119 imsic_msi_update_msg(d, new_vec);
121 /* Update irq descriptors with the new vector */
122 pd->chip_data = new_vec;
124 /* Update effective affinity of parent irq data */
125 irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
127 /* Move state of the old vector to the new vector */
128 imsic_vector_move(old_vec, new_vec);
130 return IRQ_SET_MASK_OK_DONE;
132 #endif
134 static struct irq_chip imsic_irq_base_chip = {
135 .name = "IMSIC",
136 .irq_mask = imsic_irq_mask,
137 .irq_unmask = imsic_irq_unmask,
138 .irq_retrigger = imsic_irq_retrigger,
139 .irq_compose_msi_msg = imsic_irq_compose_msg,
140 .flags = IRQCHIP_SKIP_SET_WAKE |
141 IRQCHIP_MASK_ON_SUSPEND,
144 static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
145 unsigned int nr_irqs, void *args)
147 struct imsic_vector *vec;
149 /* Multi-MSI is not supported yet. */
150 if (nr_irqs > 1)
151 return -EOPNOTSUPP;
153 vec = imsic_vector_alloc(virq, cpu_online_mask);
154 if (!vec)
155 return -ENOSPC;
157 irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
158 handle_simple_irq, NULL, NULL);
159 irq_set_noprobe(virq);
160 irq_set_affinity(virq, cpu_online_mask);
161 irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
163 return 0;
166 static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
167 unsigned int nr_irqs)
169 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
171 imsic_vector_free(irq_data_get_irq_chip_data(d));
172 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
175 static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
176 enum irq_domain_bus_token bus_token)
178 const struct msi_parent_ops *ops = domain->msi_parent_ops;
179 u32 busmask = BIT(bus_token);
181 if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
182 return 0;
184 /* Handle pure domain searches */
185 if (bus_token == ops->bus_select_token)
186 return 1;
188 return !!(ops->bus_select_mask & busmask);
191 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
192 static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
193 struct irq_data *irqd, int ind)
195 if (!irqd) {
196 imsic_vector_debug_show_summary(m, ind);
197 return;
200 imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind);
202 #endif
204 static const struct irq_domain_ops imsic_base_domain_ops = {
205 .alloc = imsic_irq_domain_alloc,
206 .free = imsic_irq_domain_free,
207 .select = imsic_irq_domain_select,
208 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
209 .debug_show = imsic_irq_debug_show,
210 #endif
213 #ifdef CONFIG_RISCV_IMSIC_PCI
215 static void imsic_pci_mask_irq(struct irq_data *d)
217 pci_msi_mask_irq(d);
218 irq_chip_mask_parent(d);
221 static void imsic_pci_unmask_irq(struct irq_data *d)
223 irq_chip_unmask_parent(d);
224 pci_msi_unmask_irq(d);
227 #define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
229 #else
231 #define MATCH_PCI_MSI 0
233 #endif
235 static bool imsic_init_dev_msi_info(struct device *dev,
236 struct irq_domain *domain,
237 struct irq_domain *real_parent,
238 struct msi_domain_info *info)
240 const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
242 /* MSI parent domain specific settings */
243 switch (real_parent->bus_token) {
244 case DOMAIN_BUS_NEXUS:
245 if (WARN_ON_ONCE(domain != real_parent))
246 return false;
247 #ifdef CONFIG_SMP
248 info->chip->irq_set_affinity = imsic_irq_set_affinity;
249 #endif
250 break;
251 default:
252 WARN_ON_ONCE(1);
253 return false;
256 /* Is the target supported? */
257 switch (info->bus_token) {
258 #ifdef CONFIG_RISCV_IMSIC_PCI
259 case DOMAIN_BUS_PCI_DEVICE_MSI:
260 case DOMAIN_BUS_PCI_DEVICE_MSIX:
261 info->chip->irq_mask = imsic_pci_mask_irq;
262 info->chip->irq_unmask = imsic_pci_unmask_irq;
263 break;
264 #endif
265 case DOMAIN_BUS_DEVICE_MSI:
267 * Per-device MSI should never have any MSI feature bits
268 * set. It's sole purpose is to create a dumb interrupt
269 * chip which has a device specific irq_write_msi_msg()
270 * callback.
272 if (WARN_ON_ONCE(info->flags))
273 return false;
275 /* Core managed MSI descriptors */
276 info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
277 MSI_FLAG_FREE_MSI_DESCS;
278 break;
279 case DOMAIN_BUS_WIRED_TO_MSI:
280 break;
281 default:
282 WARN_ON_ONCE(1);
283 return false;
286 /* Use hierarchial chip operations re-trigger */
287 info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
290 * Mask out the domain specific MSI feature flags which are not
291 * supported by the real parent.
293 info->flags &= pops->supported_flags;
295 /* Enforce the required flags */
296 info->flags |= pops->required_flags;
298 return true;
301 #define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
303 static const struct msi_parent_ops imsic_msi_parent_ops = {
304 .supported_flags = MSI_GENERIC_FLAGS_MASK |
305 MSI_FLAG_PCI_MSIX,
306 .required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
307 MSI_FLAG_USE_DEF_CHIP_OPS,
308 .bus_select_token = DOMAIN_BUS_NEXUS,
309 .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
310 .init_dev_msi_info = imsic_init_dev_msi_info,
313 int imsic_irqdomain_init(void)
315 struct imsic_global_config *global;
317 if (!imsic || !imsic->fwnode) {
318 pr_err("early driver not probed\n");
319 return -ENODEV;
322 if (imsic->base_domain) {
323 pr_err("%pfwP: irq domain already created\n", imsic->fwnode);
324 return -ENODEV;
327 /* Create Base IRQ domain */
328 imsic->base_domain = irq_domain_create_tree(imsic->fwnode,
329 &imsic_base_domain_ops, imsic);
330 if (!imsic->base_domain) {
331 pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
332 return -ENOMEM;
334 imsic->base_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
335 imsic->base_domain->msi_parent_ops = &imsic_msi_parent_ops;
337 irq_domain_update_bus_token(imsic->base_domain, DOMAIN_BUS_NEXUS);
339 global = &imsic->global;
340 pr_info("%pfwP: hart-index-bits: %d, guest-index-bits: %d\n",
341 imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
342 pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
343 imsic->fwnode, global->group_index_bits, global->group_index_shift);
344 pr_info("%pfwP: per-CPU IDs %d at base address %pa\n",
345 imsic->fwnode, global->nr_ids, &global->base_addr);
346 pr_info("%pfwP: total %d interrupts available\n",
347 imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
349 return 0;
352 static int imsic_platform_probe_common(struct fwnode_handle *fwnode)
354 if (imsic && imsic->fwnode != fwnode) {
355 pr_err("%pfwP: fwnode mismatch\n", fwnode);
356 return -ENODEV;
359 return imsic_irqdomain_init();
362 static int imsic_platform_dt_probe(struct platform_device *pdev)
364 return imsic_platform_probe_common(pdev->dev.fwnode);
367 #ifdef CONFIG_ACPI
370 * On ACPI based systems, PCI enumeration happens early during boot in
371 * acpi_scan_init(). PCI enumeration expects MSI domain setup before
372 * it calls pci_set_msi_domain(). Hence, unlike in DT where
373 * imsic-platform drive probe happens late during boot, ACPI based
374 * systems need to setup the MSI domain early.
376 int imsic_platform_acpi_probe(struct fwnode_handle *fwnode)
378 return imsic_platform_probe_common(fwnode);
381 #endif
383 static const struct of_device_id imsic_platform_match[] = {
384 { .compatible = "riscv,imsics" },
388 static struct platform_driver imsic_platform_driver = {
389 .driver = {
390 .name = "riscv-imsic",
391 .of_match_table = imsic_platform_match,
393 .probe = imsic_platform_dt_probe,
395 builtin_platform_driver(imsic_platform_driver);