Linux 4.19.133
[linux/fpc-iii.git] / drivers / soc / fsl / qbman / qman_portal.c
blob3e9391d117c543cb46f49893610e528ed52984e8
1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qman_priv.h"
33 struct qman_portal *qman_dma_portal;
34 EXPORT_SYMBOL(qman_dma_portal);
36 /* Enable portal interupts (as opposed to polling mode) */
37 #define CONFIG_FSL_DPA_PIRQ_SLOW 1
38 #define CONFIG_FSL_DPA_PIRQ_FAST 1
40 static struct cpumask portal_cpus;
41 /* protect qman global registers and global data shared among portals */
42 static DEFINE_SPINLOCK(qman_lock);
44 static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
46 #ifdef CONFIG_FSL_PAMU
47 struct device *dev = pcfg->dev;
48 int window_count = 1;
49 struct iommu_domain_geometry geom_attr;
50 struct pamu_stash_attribute stash_attr;
51 int ret;
53 pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
54 if (!pcfg->iommu_domain) {
55 dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
56 goto no_iommu;
58 geom_attr.aperture_start = 0;
59 geom_attr.aperture_end =
60 ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
61 geom_attr.force_aperture = true;
62 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
63 &geom_attr);
64 if (ret < 0) {
65 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
66 ret);
67 goto out_domain_free;
69 ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
70 &window_count);
71 if (ret < 0) {
72 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
73 ret);
74 goto out_domain_free;
76 stash_attr.cpu = cpu;
77 stash_attr.cache = PAMU_ATTR_CACHE_L1;
78 ret = iommu_domain_set_attr(pcfg->iommu_domain,
79 DOMAIN_ATTR_FSL_PAMU_STASH,
80 &stash_attr);
81 if (ret < 0) {
82 dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
83 __func__, ret);
84 goto out_domain_free;
86 ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
87 IOMMU_READ | IOMMU_WRITE);
88 if (ret < 0) {
89 dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
90 __func__, ret);
91 goto out_domain_free;
93 ret = iommu_attach_device(pcfg->iommu_domain, dev);
94 if (ret < 0) {
95 dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
96 ret);
97 goto out_domain_free;
99 ret = iommu_domain_set_attr(pcfg->iommu_domain,
100 DOMAIN_ATTR_FSL_PAMU_ENABLE,
101 &window_count);
102 if (ret < 0) {
103 dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
104 ret);
105 goto out_detach_device;
108 no_iommu:
109 #endif
110 qman_set_sdest(pcfg->channel, cpu);
112 return;
114 #ifdef CONFIG_FSL_PAMU
115 out_detach_device:
116 iommu_detach_device(pcfg->iommu_domain, NULL);
117 out_domain_free:
118 iommu_domain_free(pcfg->iommu_domain);
119 pcfg->iommu_domain = NULL;
120 #endif
123 static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
125 struct qman_portal *p;
126 u32 irq_sources = 0;
128 /* We need the same LIODN offset for all portals */
129 qman_liodn_fixup(pcfg->channel);
131 pcfg->iommu_domain = NULL;
132 portal_set_cpu(pcfg, pcfg->cpu);
134 p = qman_create_affine_portal(pcfg, NULL);
135 if (!p) {
136 dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
137 __func__, pcfg->cpu);
138 return NULL;
141 /* Determine what should be interrupt-vs-poll driven */
142 #ifdef CONFIG_FSL_DPA_PIRQ_SLOW
143 irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
144 QM_PIRQ_CSCI;
145 #endif
146 #ifdef CONFIG_FSL_DPA_PIRQ_FAST
147 irq_sources |= QM_PIRQ_DQRI;
148 #endif
149 qman_p_irqsource_add(p, irq_sources);
151 spin_lock(&qman_lock);
152 if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
153 /* all assigned portals are initialized now */
154 qman_init_cgr_all();
157 if (!qman_dma_portal)
158 qman_dma_portal = p;
160 spin_unlock(&qman_lock);
162 dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
164 return p;
167 static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
168 unsigned int cpu)
170 #ifdef CONFIG_FSL_PAMU /* TODO */
171 struct pamu_stash_attribute stash_attr;
172 int ret;
174 if (pcfg->iommu_domain) {
175 stash_attr.cpu = cpu;
176 stash_attr.cache = PAMU_ATTR_CACHE_L1;
177 ret = iommu_domain_set_attr(pcfg->iommu_domain,
178 DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
179 if (ret < 0) {
180 dev_err(pcfg->dev,
181 "Failed to update pamu stash setting\n");
182 return;
185 #endif
186 qman_set_sdest(pcfg->channel, cpu);
189 static int qman_offline_cpu(unsigned int cpu)
191 struct qman_portal *p;
192 const struct qm_portal_config *pcfg;
194 p = affine_portals[cpu];
195 if (p) {
196 pcfg = qman_get_qm_portal_config(p);
197 if (pcfg) {
198 irq_set_affinity(pcfg->irq, cpumask_of(0));
199 qman_portal_update_sdest(pcfg, 0);
202 return 0;
205 static int qman_online_cpu(unsigned int cpu)
207 struct qman_portal *p;
208 const struct qm_portal_config *pcfg;
210 p = affine_portals[cpu];
211 if (p) {
212 pcfg = qman_get_qm_portal_config(p);
213 if (pcfg) {
214 irq_set_affinity(pcfg->irq, cpumask_of(cpu));
215 qman_portal_update_sdest(pcfg, cpu);
218 return 0;
221 static int qman_portal_probe(struct platform_device *pdev)
223 struct device *dev = &pdev->dev;
224 struct device_node *node = dev->of_node;
225 struct qm_portal_config *pcfg;
226 struct resource *addr_phys[2];
227 int irq, cpu, err;
228 u32 val;
230 err = qman_is_probed();
231 if (!err)
232 return -EPROBE_DEFER;
233 if (err < 0) {
234 dev_err(&pdev->dev, "failing probe due to qman probe error\n");
235 return -ENODEV;
238 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
239 if (!pcfg)
240 return -ENOMEM;
242 pcfg->dev = dev;
244 addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
245 DPAA_PORTAL_CE);
246 if (!addr_phys[0]) {
247 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
248 return -ENXIO;
251 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
252 DPAA_PORTAL_CI);
253 if (!addr_phys[1]) {
254 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
255 return -ENXIO;
258 err = of_property_read_u32(node, "cell-index", &val);
259 if (err) {
260 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
261 return err;
263 pcfg->channel = val;
264 pcfg->cpu = -1;
265 irq = platform_get_irq(pdev, 0);
266 if (irq <= 0) {
267 dev_err(dev, "Can't get %pOF IRQ\n", node);
268 return -ENXIO;
270 pcfg->irq = irq;
272 pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
273 resource_size(addr_phys[0]),
274 QBMAN_MEMREMAP_ATTR);
275 if (!pcfg->addr_virt_ce) {
276 dev_err(dev, "memremap::CE failed\n");
277 goto err_ioremap1;
280 pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
281 resource_size(addr_phys[1]));
282 if (!pcfg->addr_virt_ci) {
283 dev_err(dev, "ioremap::CI failed\n");
284 goto err_ioremap2;
287 pcfg->pools = qm_get_pools_sdqcr();
289 spin_lock(&qman_lock);
290 cpu = cpumask_next_zero(-1, &portal_cpus);
291 if (cpu >= nr_cpu_ids) {
292 /* unassigned portal, skip init */
293 spin_unlock(&qman_lock);
294 return 0;
297 cpumask_set_cpu(cpu, &portal_cpus);
298 spin_unlock(&qman_lock);
299 pcfg->cpu = cpu;
301 if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
302 dev_err(dev, "dma_set_mask() failed\n");
303 goto err_portal_init;
306 if (!init_pcfg(pcfg)) {
307 dev_err(dev, "portal init failed\n");
308 goto err_portal_init;
311 /* clear irq affinity if assigned cpu is offline */
312 if (!cpu_online(cpu))
313 qman_offline_cpu(cpu);
315 return 0;
317 err_portal_init:
318 iounmap(pcfg->addr_virt_ci);
319 err_ioremap2:
320 memunmap(pcfg->addr_virt_ce);
321 err_ioremap1:
322 return -ENXIO;
325 static const struct of_device_id qman_portal_ids[] = {
327 .compatible = "fsl,qman-portal",
331 MODULE_DEVICE_TABLE(of, qman_portal_ids);
333 static struct platform_driver qman_portal_driver = {
334 .driver = {
335 .name = KBUILD_MODNAME,
336 .of_match_table = qman_portal_ids,
338 .probe = qman_portal_probe,
341 static int __init qman_portal_driver_register(struct platform_driver *drv)
343 int ret;
345 ret = platform_driver_register(drv);
346 if (ret < 0)
347 return ret;
349 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
350 "soc/qman_portal:online",
351 qman_online_cpu, qman_offline_cpu);
352 if (ret < 0) {
353 pr_err("qman: failed to register hotplug callbacks.\n");
354 platform_driver_unregister(drv);
355 return ret;
357 return 0;
360 module_driver(qman_portal_driver,
361 qman_portal_driver_register, platform_driver_unregister);