treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / irqchip / irq-ls-scfg-msi.c
blob61dbfda08527e566ef16e3c0eed221d9ff1627eb
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Freescale SCFG MSI(-X) support
5 * Copyright (C) 2016 Freescale Semiconductor.
7 * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
8 */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_pci.h>
19 #include <linux/of_platform.h>
20 #include <linux/spinlock.h>
21 #include <linux/dma-iommu.h>
23 #define MSI_IRQS_PER_MSIR 32
24 #define MSI_MSIR_OFFSET 4
26 #define MSI_LS1043V1_1_IRQS_PER_MSIR 8
27 #define MSI_LS1043V1_1_MSIR_OFFSET 0x10
29 struct ls_scfg_msi_cfg {
30 u32 ibs_shift; /* Shift of interrupt bit select */
31 u32 msir_irqs; /* The irq number per MSIR */
32 u32 msir_base; /* The base address of MSIR */
35 struct ls_scfg_msir {
36 struct ls_scfg_msi *msi_data;
37 unsigned int index;
38 unsigned int gic_irq;
39 unsigned int bit_start;
40 unsigned int bit_end;
41 unsigned int srs; /* Shared interrupt register select */
42 void __iomem *reg;
45 struct ls_scfg_msi {
46 spinlock_t lock;
47 struct platform_device *pdev;
48 struct irq_domain *parent;
49 struct irq_domain *msi_domain;
50 void __iomem *regs;
51 phys_addr_t msiir_addr;
52 struct ls_scfg_msi_cfg *cfg;
53 u32 msir_num;
54 struct ls_scfg_msir *msir;
55 u32 irqs_num;
56 unsigned long *used;
59 static struct irq_chip ls_scfg_msi_irq_chip = {
60 .name = "MSI",
61 .irq_mask = pci_msi_mask_irq,
62 .irq_unmask = pci_msi_unmask_irq,
65 static struct msi_domain_info ls_scfg_msi_domain_info = {
66 .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
67 MSI_FLAG_USE_DEF_CHIP_OPS |
68 MSI_FLAG_PCI_MSIX),
69 .chip = &ls_scfg_msi_irq_chip,
72 static int msi_affinity_flag = 1;
74 static int __init early_parse_ls_scfg_msi(char *p)
76 if (p && strncmp(p, "no-affinity", 11) == 0)
77 msi_affinity_flag = 0;
78 else
79 msi_affinity_flag = 1;
81 return 0;
83 early_param("lsmsi", early_parse_ls_scfg_msi);
85 static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
87 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
89 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
90 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
91 msg->data = data->hwirq;
93 if (msi_affinity_flag) {
94 const struct cpumask *mask;
96 mask = irq_data_get_effective_affinity_mask(data);
97 msg->data |= cpumask_first(mask);
100 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
103 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
104 const struct cpumask *mask, bool force)
106 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
107 u32 cpu;
109 if (!msi_affinity_flag)
110 return -EINVAL;
112 if (!force)
113 cpu = cpumask_any_and(mask, cpu_online_mask);
114 else
115 cpu = cpumask_first(mask);
117 if (cpu >= msi_data->msir_num)
118 return -EINVAL;
120 if (msi_data->msir[cpu].gic_irq <= 0) {
121 pr_warn("cannot bind the irq to cpu%d\n", cpu);
122 return -EINVAL;
125 irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
127 return IRQ_SET_MASK_OK;
130 static struct irq_chip ls_scfg_msi_parent_chip = {
131 .name = "SCFG",
132 .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
133 .irq_set_affinity = ls_scfg_msi_set_affinity,
136 static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
137 unsigned int virq,
138 unsigned int nr_irqs,
139 void *args)
141 msi_alloc_info_t *info = args;
142 struct ls_scfg_msi *msi_data = domain->host_data;
143 int pos, err = 0;
145 WARN_ON(nr_irqs != 1);
147 spin_lock(&msi_data->lock);
148 pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
149 if (pos < msi_data->irqs_num)
150 __set_bit(pos, msi_data->used);
151 else
152 err = -ENOSPC;
153 spin_unlock(&msi_data->lock);
155 if (err)
156 return err;
158 err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
159 if (err)
160 return err;
162 irq_domain_set_info(domain, virq, pos,
163 &ls_scfg_msi_parent_chip, msi_data,
164 handle_simple_irq, NULL, NULL);
166 return 0;
169 static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
170 unsigned int virq, unsigned int nr_irqs)
172 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
173 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
174 int pos;
176 pos = d->hwirq;
177 if (pos < 0 || pos >= msi_data->irqs_num) {
178 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
179 return;
182 spin_lock(&msi_data->lock);
183 __clear_bit(pos, msi_data->used);
184 spin_unlock(&msi_data->lock);
187 static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
188 .alloc = ls_scfg_msi_domain_irq_alloc,
189 .free = ls_scfg_msi_domain_irq_free,
192 static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
194 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
195 struct ls_scfg_msi *msi_data = msir->msi_data;
196 unsigned long val;
197 int pos, size, virq, hwirq;
199 chained_irq_enter(irq_desc_get_chip(desc), desc);
201 val = ioread32be(msir->reg);
203 pos = msir->bit_start;
204 size = msir->bit_end + 1;
206 for_each_set_bit_from(pos, &val, size) {
207 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
208 msir->srs;
209 virq = irq_find_mapping(msi_data->parent, hwirq);
210 if (virq)
211 generic_handle_irq(virq);
214 chained_irq_exit(irq_desc_get_chip(desc), desc);
217 static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
219 /* Initialize MSI domain parent */
220 msi_data->parent = irq_domain_add_linear(NULL,
221 msi_data->irqs_num,
222 &ls_scfg_msi_domain_ops,
223 msi_data);
224 if (!msi_data->parent) {
225 dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
226 return -ENOMEM;
229 msi_data->msi_domain = pci_msi_create_irq_domain(
230 of_node_to_fwnode(msi_data->pdev->dev.of_node),
231 &ls_scfg_msi_domain_info,
232 msi_data->parent);
233 if (!msi_data->msi_domain) {
234 dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
235 irq_domain_remove(msi_data->parent);
236 return -ENOMEM;
239 return 0;
242 static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
244 struct ls_scfg_msir *msir;
245 int virq, i, hwirq;
247 virq = platform_get_irq(msi_data->pdev, index);
248 if (virq <= 0)
249 return -ENODEV;
251 msir = &msi_data->msir[index];
252 msir->index = index;
253 msir->msi_data = msi_data;
254 msir->gic_irq = virq;
255 msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
257 if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
258 msir->bit_start = 32 - ((msir->index + 1) *
259 MSI_LS1043V1_1_IRQS_PER_MSIR);
260 msir->bit_end = msir->bit_start +
261 MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
262 } else {
263 msir->bit_start = 0;
264 msir->bit_end = msi_data->cfg->msir_irqs - 1;
267 irq_set_chained_handler_and_data(msir->gic_irq,
268 ls_scfg_msi_irq_handler,
269 msir);
271 if (msi_affinity_flag) {
272 /* Associate MSIR interrupt to the cpu */
273 irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
274 msir->srs = 0; /* This value is determined by the CPU */
275 } else
276 msir->srs = index;
278 /* Release the hwirqs corresponding to this MSIR */
279 if (!msi_affinity_flag || msir->index == 0) {
280 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
281 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
282 bitmap_clear(msi_data->used, hwirq, 1);
286 return 0;
289 static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
291 struct ls_scfg_msi *msi_data = msir->msi_data;
292 int i, hwirq;
294 if (msir->gic_irq > 0)
295 irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
297 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
298 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
299 bitmap_set(msi_data->used, hwirq, 1);
302 return 0;
305 static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
306 .ibs_shift = 3,
307 .msir_irqs = MSI_IRQS_PER_MSIR,
308 .msir_base = MSI_MSIR_OFFSET,
311 static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
312 .ibs_shift = 2,
313 .msir_irqs = MSI_IRQS_PER_MSIR,
314 .msir_base = MSI_MSIR_OFFSET,
317 static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
318 .ibs_shift = 2,
319 .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
320 .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
323 static const struct of_device_id ls_scfg_msi_id[] = {
324 /* The following two misspelled compatibles are obsolete */
325 { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
326 { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
328 { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
329 { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
330 { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
331 { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
332 { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
335 MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
337 static int ls_scfg_msi_probe(struct platform_device *pdev)
339 const struct of_device_id *match;
340 struct ls_scfg_msi *msi_data;
341 struct resource *res;
342 int i, ret;
344 match = of_match_device(ls_scfg_msi_id, &pdev->dev);
345 if (!match)
346 return -ENODEV;
348 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
349 if (!msi_data)
350 return -ENOMEM;
352 msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
354 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
355 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
356 if (IS_ERR(msi_data->regs)) {
357 dev_err(&pdev->dev, "failed to initialize 'regs'\n");
358 return PTR_ERR(msi_data->regs);
360 msi_data->msiir_addr = res->start;
362 msi_data->pdev = pdev;
363 spin_lock_init(&msi_data->lock);
365 msi_data->irqs_num = MSI_IRQS_PER_MSIR *
366 (1 << msi_data->cfg->ibs_shift);
367 msi_data->used = devm_kcalloc(&pdev->dev,
368 BITS_TO_LONGS(msi_data->irqs_num),
369 sizeof(*msi_data->used),
370 GFP_KERNEL);
371 if (!msi_data->used)
372 return -ENOMEM;
374 * Reserve all the hwirqs
375 * The available hwirqs will be released in ls1_msi_setup_hwirq()
377 bitmap_set(msi_data->used, 0, msi_data->irqs_num);
379 msi_data->msir_num = of_irq_count(pdev->dev.of_node);
381 if (msi_affinity_flag) {
382 u32 cpu_num;
384 cpu_num = num_possible_cpus();
385 if (msi_data->msir_num >= cpu_num)
386 msi_data->msir_num = cpu_num;
387 else
388 msi_affinity_flag = 0;
391 msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
392 sizeof(*msi_data->msir),
393 GFP_KERNEL);
394 if (!msi_data->msir)
395 return -ENOMEM;
397 for (i = 0; i < msi_data->msir_num; i++)
398 ls_scfg_msi_setup_hwirq(msi_data, i);
400 ret = ls_scfg_msi_domains_init(msi_data);
401 if (ret)
402 return ret;
404 platform_set_drvdata(pdev, msi_data);
406 return 0;
409 static int ls_scfg_msi_remove(struct platform_device *pdev)
411 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
412 int i;
414 for (i = 0; i < msi_data->msir_num; i++)
415 ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
417 irq_domain_remove(msi_data->msi_domain);
418 irq_domain_remove(msi_data->parent);
420 platform_set_drvdata(pdev, NULL);
422 return 0;
425 static struct platform_driver ls_scfg_msi_driver = {
426 .driver = {
427 .name = "ls-scfg-msi",
428 .of_match_table = ls_scfg_msi_id,
430 .probe = ls_scfg_msi_probe,
431 .remove = ls_scfg_msi_remove,
434 module_platform_driver(ls_scfg_msi_driver);
436 MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
437 MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
438 MODULE_LICENSE("GPL v2");