1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2014-2016 Freescale Semiconductor Inc.
8 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/interrupt.h>
13 #include <linux/msi.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/delay.h>
17 #include <linux/sys_soc.h>
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
22 #include "qbman-portal.h"
26 MODULE_LICENSE("Dual BSD/GPL");
27 MODULE_AUTHOR("Freescale Semiconductor, Inc");
28 MODULE_DESCRIPTION("DPIO Driver");
34 static cpumask_var_t cpus_unused_mask
;
36 static const struct soc_device_attribute ls1088a_soc
[] = {
37 {.family
= "QorIQ LS1088A"},
41 static const struct soc_device_attribute ls2080a_soc
[] = {
42 {.family
= "QorIQ LS2080A"},
46 static const struct soc_device_attribute ls2088a_soc
[] = {
47 {.family
= "QorIQ LS2088A"},
51 static const struct soc_device_attribute lx2160a_soc
[] = {
52 {.family
= "QorIQ LX2160A"},
56 static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device
*dpio_dev
, int cpu
)
58 int cluster_base
, cluster_size
;
60 if (soc_device_match(ls1088a_soc
)) {
63 } else if (soc_device_match(ls2080a_soc
) ||
64 soc_device_match(ls2088a_soc
) ||
65 soc_device_match(lx2160a_soc
)) {
69 dev_err(&dpio_dev
->dev
, "unknown SoC version\n");
73 return cluster_base
+ cpu
/ cluster_size
;
76 static irqreturn_t
dpio_irq_handler(int irq_num
, void *arg
)
78 struct device
*dev
= (struct device
*)arg
;
79 struct dpio_priv
*priv
= dev_get_drvdata(dev
);
81 return dpaa2_io_irq(priv
->io
);
84 static void unregister_dpio_irq_handlers(struct fsl_mc_device
*dpio_dev
)
86 struct fsl_mc_device_irq
*irq
;
88 irq
= dpio_dev
->irqs
[0];
90 /* clear the affinity hint */
91 irq_set_affinity_hint(irq
->msi_desc
->irq
, NULL
);
94 static int register_dpio_irq_handlers(struct fsl_mc_device
*dpio_dev
, int cpu
)
97 struct fsl_mc_device_irq
*irq
;
99 irq
= dpio_dev
->irqs
[0];
100 error
= devm_request_irq(&dpio_dev
->dev
,
104 dev_name(&dpio_dev
->dev
),
107 dev_err(&dpio_dev
->dev
,
108 "devm_request_irq() failed: %d\n",
113 /* set the affinity hint */
114 if (irq_set_affinity_hint(irq
->msi_desc
->irq
, cpumask_of(cpu
)))
115 dev_err(&dpio_dev
->dev
,
116 "irq_set_affinity failed irq %d cpu %d\n",
117 irq
->msi_desc
->irq
, cpu
);
122 static int dpaa2_dpio_probe(struct fsl_mc_device
*dpio_dev
)
124 struct dpio_attr dpio_attrs
;
125 struct dpaa2_io_desc desc
;
126 struct dpio_priv
*priv
;
128 struct device
*dev
= &dpio_dev
->dev
;
129 int possible_next_cpu
;
132 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
136 dev_set_drvdata(dev
, priv
);
138 err
= fsl_mc_portal_allocate(dpio_dev
, 0, &dpio_dev
->mc_io
);
140 dev_dbg(dev
, "MC portal allocation failed\n");
145 err
= dpio_open(dpio_dev
->mc_io
, 0, dpio_dev
->obj_desc
.id
,
146 &dpio_dev
->mc_handle
);
148 dev_err(dev
, "dpio_open() failed\n");
152 err
= dpio_reset(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
);
154 dev_err(dev
, "dpio_reset() failed\n");
158 err
= dpio_get_attributes(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
,
161 dev_err(dev
, "dpio_get_attributes() failed %d\n", err
);
164 desc
.qman_version
= dpio_attrs
.qbman_version
;
166 err
= dpio_enable(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
);
168 dev_err(dev
, "dpio_enable() failed %d\n", err
);
172 /* initialize DPIO descriptor */
173 desc
.receives_notifications
= dpio_attrs
.num_priorities
? 1 : 0;
174 desc
.has_8prio
= dpio_attrs
.num_priorities
== 8 ? 1 : 0;
175 desc
.dpio_id
= dpio_dev
->obj_desc
.id
;
177 /* get the cpu to use for the affinity hint */
178 possible_next_cpu
= cpumask_first(cpus_unused_mask
);
179 if (possible_next_cpu
>= nr_cpu_ids
) {
180 dev_err(dev
, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
182 goto err_allocate_irqs
;
184 desc
.cpu
= possible_next_cpu
;
185 cpumask_clear_cpu(possible_next_cpu
, cpus_unused_mask
);
187 sdest
= dpaa2_dpio_get_cluster_sdest(dpio_dev
, desc
.cpu
);
189 err
= dpio_set_stashing_destination(dpio_dev
->mc_io
, 0,
193 dev_err(dev
, "dpio_set_stashing_destination failed for cpu%d\n",
197 if (dpio_dev
->obj_desc
.region_count
< 3) {
198 /* No support for DDR backed portals, use classic mapping */
200 * Set the CENA regs to be the cache inhibited area of the
201 * portal to avoid coherency issues if a user migrates to
204 desc
.regs_cena
= devm_memremap(dev
, dpio_dev
->regions
[1].start
,
205 resource_size(&dpio_dev
->regions
[1]),
208 desc
.regs_cena
= devm_memremap(dev
, dpio_dev
->regions
[2].start
,
209 resource_size(&dpio_dev
->regions
[2]),
213 if (IS_ERR(desc
.regs_cena
)) {
214 dev_err(dev
, "devm_memremap failed\n");
215 err
= PTR_ERR(desc
.regs_cena
);
216 goto err_allocate_irqs
;
219 desc
.regs_cinh
= devm_ioremap(dev
, dpio_dev
->regions
[1].start
,
220 resource_size(&dpio_dev
->regions
[1]));
221 if (!desc
.regs_cinh
) {
223 dev_err(dev
, "devm_ioremap failed\n");
224 goto err_allocate_irqs
;
227 err
= fsl_mc_allocate_irqs(dpio_dev
);
229 dev_err(dev
, "fsl_mc_allocate_irqs failed. err=%d\n", err
);
230 goto err_allocate_irqs
;
233 priv
->io
= dpaa2_io_create(&desc
, dev
);
235 dev_err(dev
, "dpaa2_io_create failed\n");
237 goto err_dpaa2_io_create
;
240 err
= register_dpio_irq_handlers(dpio_dev
, desc
.cpu
);
242 goto err_register_dpio_irq
;
244 dev_info(dev
, "probed\n");
245 dev_dbg(dev
, " receives_notifications = %d\n",
246 desc
.receives_notifications
);
247 dpio_close(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
);
252 unregister_dpio_irq_handlers(dpio_dev
);
253 err_register_dpio_irq
:
254 fsl_mc_free_irqs(dpio_dev
);
256 dpio_disable(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
);
259 dpio_close(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
);
261 fsl_mc_portal_free(dpio_dev
->mc_io
);
266 /* Tear down interrupts for a given DPIO object */
267 static void dpio_teardown_irqs(struct fsl_mc_device
*dpio_dev
)
269 unregister_dpio_irq_handlers(dpio_dev
);
270 fsl_mc_free_irqs(dpio_dev
);
273 static int dpaa2_dpio_remove(struct fsl_mc_device
*dpio_dev
)
276 struct dpio_priv
*priv
;
279 dev
= &dpio_dev
->dev
;
280 priv
= dev_get_drvdata(dev
);
281 cpu
= dpaa2_io_get_cpu(priv
->io
);
283 dpaa2_io_down(priv
->io
);
285 dpio_teardown_irqs(dpio_dev
);
287 cpumask_set_cpu(cpu
, cpus_unused_mask
);
289 err
= dpio_open(dpio_dev
->mc_io
, 0, dpio_dev
->obj_desc
.id
,
290 &dpio_dev
->mc_handle
);
292 dev_err(dev
, "dpio_open() failed\n");
296 dpio_disable(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
);
298 dpio_close(dpio_dev
->mc_io
, 0, dpio_dev
->mc_handle
);
300 fsl_mc_portal_free(dpio_dev
->mc_io
);
305 fsl_mc_portal_free(dpio_dev
->mc_io
);
310 static const struct fsl_mc_device_id dpaa2_dpio_match_id_table
[] = {
312 .vendor
= FSL_MC_VENDOR_FREESCALE
,
318 static struct fsl_mc_driver dpaa2_dpio_driver
= {
320 .name
= KBUILD_MODNAME
,
321 .owner
= THIS_MODULE
,
323 .probe
= dpaa2_dpio_probe
,
324 .remove
= dpaa2_dpio_remove
,
325 .match_id_table
= dpaa2_dpio_match_id_table
328 static int dpio_driver_init(void)
330 if (!zalloc_cpumask_var(&cpus_unused_mask
, GFP_KERNEL
))
332 cpumask_copy(cpus_unused_mask
, cpu_online_mask
);
334 return fsl_mc_driver_register(&dpaa2_dpio_driver
);
337 static void dpio_driver_exit(void)
339 free_cpumask_var(cpus_unused_mask
);
340 fsl_mc_driver_unregister(&dpaa2_dpio_driver
);
342 module_init(dpio_driver_init
);
343 module_exit(dpio_driver_exit
);