radix tree: use GFP_ZONEMASK bits of gfp_t for flags
[linux/fpc-iii.git] / drivers / dma / qcom / hidma_mgmt.c
blob000c7019ca7d30db231ba6e8b6d597fbf1276fa2
1 /*
2 * Qualcomm Technologies HIDMA DMA engine Management interface
4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/acpi.h>
18 #include <linux/of.h>
19 #include <linux/property.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_platform.h>
23 #include <linux/module.h>
24 #include <linux/uaccess.h>
25 #include <linux/slab.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/bitops.h>
28 #include <linux/dma-mapping.h>
30 #include "hidma_mgmt.h"
32 #define HIDMA_QOS_N_OFFSET 0x700
33 #define HIDMA_CFG_OFFSET 0x400
34 #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
35 #define HIDMA_MAX_XACTIONS_OFFSET 0x420
36 #define HIDMA_HW_VERSION_OFFSET 0x424
37 #define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418
39 #define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0)
40 #define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0)
41 #define HIDMA_WEIGHT_MASK GENMASK(6, 0)
42 #define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0)
43 #define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0)
45 #define HIDMA_MAX_WR_XACTIONS_BIT_POS 16
46 #define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16
47 #define HIDMA_WRR_BIT_POS 8
48 #define HIDMA_PRIORITY_BIT_POS 15
50 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
51 #define HIDMA_MAX_CHANNEL_WEIGHT 15
53 static unsigned int max_write_request;
54 module_param(max_write_request, uint, 0644);
55 MODULE_PARM_DESC(max_write_request,
56 "maximum write burst (default: ACPI/DT value)");
58 static unsigned int max_read_request;
59 module_param(max_read_request, uint, 0644);
60 MODULE_PARM_DESC(max_read_request,
61 "maximum read burst (default: ACPI/DT value)");
63 static unsigned int max_wr_xactions;
64 module_param(max_wr_xactions, uint, 0644);
65 MODULE_PARM_DESC(max_wr_xactions,
66 "maximum number of write transactions (default: ACPI/DT value)");
68 static unsigned int max_rd_xactions;
69 module_param(max_rd_xactions, uint, 0644);
70 MODULE_PARM_DESC(max_rd_xactions,
71 "maximum number of read transactions (default: ACPI/DT value)");
73 int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
75 unsigned int i;
76 u32 val;
78 if (!is_power_of_2(mgmtdev->max_write_request) ||
79 (mgmtdev->max_write_request < 128) ||
80 (mgmtdev->max_write_request > 1024)) {
81 dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
82 mgmtdev->max_write_request);
83 return -EINVAL;
86 if (!is_power_of_2(mgmtdev->max_read_request) ||
87 (mgmtdev->max_read_request < 128) ||
88 (mgmtdev->max_read_request > 1024)) {
89 dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
90 mgmtdev->max_read_request);
91 return -EINVAL;
94 if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
95 dev_err(&mgmtdev->pdev->dev,
96 "max_wr_xactions cannot be bigger than %ld\n",
97 HIDMA_MAX_WR_XACTIONS_MASK);
98 return -EINVAL;
101 if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
102 dev_err(&mgmtdev->pdev->dev,
103 "max_rd_xactions cannot be bigger than %ld\n",
104 HIDMA_MAX_RD_XACTIONS_MASK);
105 return -EINVAL;
108 for (i = 0; i < mgmtdev->dma_channels; i++) {
109 if (mgmtdev->priority[i] > 1) {
110 dev_err(&mgmtdev->pdev->dev,
111 "priority can be 0 or 1\n");
112 return -EINVAL;
115 if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
116 dev_err(&mgmtdev->pdev->dev,
117 "max value of weight can be %d.\n",
118 HIDMA_MAX_CHANNEL_WEIGHT);
119 return -EINVAL;
122 /* weight needs to be at least one */
123 if (mgmtdev->weight[i] == 0)
124 mgmtdev->weight[i] = 1;
127 pm_runtime_get_sync(&mgmtdev->pdev->dev);
128 val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
129 val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
130 val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
131 val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
132 val |= mgmtdev->max_read_request;
133 writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
135 val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
136 val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
137 val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
138 val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
139 val |= mgmtdev->max_rd_xactions;
140 writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
142 mgmtdev->hw_version =
143 readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
144 mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
145 mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
147 for (i = 0; i < mgmtdev->dma_channels; i++) {
148 u32 weight = mgmtdev->weight[i];
149 u32 priority = mgmtdev->priority[i];
151 val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
152 val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
153 val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
154 val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
155 val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
156 writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
159 val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
160 val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
161 val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
162 writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
164 pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
165 pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
166 return 0;
168 EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
170 static int hidma_mgmt_probe(struct platform_device *pdev)
172 struct hidma_mgmt_dev *mgmtdev;
173 struct resource *res;
174 void __iomem *virtaddr;
175 int irq;
176 int rc;
177 u32 val;
179 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
180 pm_runtime_use_autosuspend(&pdev->dev);
181 pm_runtime_set_active(&pdev->dev);
182 pm_runtime_enable(&pdev->dev);
183 pm_runtime_get_sync(&pdev->dev);
185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
186 virtaddr = devm_ioremap_resource(&pdev->dev, res);
187 if (IS_ERR(virtaddr)) {
188 rc = -ENOMEM;
189 goto out;
192 irq = platform_get_irq(pdev, 0);
193 if (irq < 0) {
194 dev_err(&pdev->dev, "irq resources not found\n");
195 rc = irq;
196 goto out;
199 mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
200 if (!mgmtdev) {
201 rc = -ENOMEM;
202 goto out;
205 mgmtdev->pdev = pdev;
206 mgmtdev->addrsize = resource_size(res);
207 mgmtdev->virtaddr = virtaddr;
209 rc = device_property_read_u32(&pdev->dev, "dma-channels",
210 &mgmtdev->dma_channels);
211 if (rc) {
212 dev_err(&pdev->dev, "number of channels missing\n");
213 goto out;
216 rc = device_property_read_u32(&pdev->dev,
217 "channel-reset-timeout-cycles",
218 &mgmtdev->chreset_timeout_cycles);
219 if (rc) {
220 dev_err(&pdev->dev, "channel reset timeout missing\n");
221 goto out;
224 rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
225 &mgmtdev->max_write_request);
226 if (rc) {
227 dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
228 goto out;
231 if (max_write_request &&
232 (max_write_request != mgmtdev->max_write_request)) {
233 dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
234 max_write_request);
235 mgmtdev->max_write_request = max_write_request;
236 } else
237 max_write_request = mgmtdev->max_write_request;
239 rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
240 &mgmtdev->max_read_request);
241 if (rc) {
242 dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
243 goto out;
245 if (max_read_request &&
246 (max_read_request != mgmtdev->max_read_request)) {
247 dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
248 max_read_request);
249 mgmtdev->max_read_request = max_read_request;
250 } else
251 max_read_request = mgmtdev->max_read_request;
253 rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
254 &mgmtdev->max_wr_xactions);
255 if (rc) {
256 dev_err(&pdev->dev, "max-write-transactions missing\n");
257 goto out;
259 if (max_wr_xactions &&
260 (max_wr_xactions != mgmtdev->max_wr_xactions)) {
261 dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
262 max_wr_xactions);
263 mgmtdev->max_wr_xactions = max_wr_xactions;
264 } else
265 max_wr_xactions = mgmtdev->max_wr_xactions;
267 rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
268 &mgmtdev->max_rd_xactions);
269 if (rc) {
270 dev_err(&pdev->dev, "max-read-transactions missing\n");
271 goto out;
273 if (max_rd_xactions &&
274 (max_rd_xactions != mgmtdev->max_rd_xactions)) {
275 dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
276 max_rd_xactions);
277 mgmtdev->max_rd_xactions = max_rd_xactions;
278 } else
279 max_rd_xactions = mgmtdev->max_rd_xactions;
281 mgmtdev->priority = devm_kcalloc(&pdev->dev,
282 mgmtdev->dma_channels,
283 sizeof(*mgmtdev->priority),
284 GFP_KERNEL);
285 if (!mgmtdev->priority) {
286 rc = -ENOMEM;
287 goto out;
290 mgmtdev->weight = devm_kcalloc(&pdev->dev,
291 mgmtdev->dma_channels,
292 sizeof(*mgmtdev->weight), GFP_KERNEL);
293 if (!mgmtdev->weight) {
294 rc = -ENOMEM;
295 goto out;
298 rc = hidma_mgmt_setup(mgmtdev);
299 if (rc) {
300 dev_err(&pdev->dev, "setup failed\n");
301 goto out;
304 /* start the HW */
305 val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
306 val |= 1;
307 writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
309 rc = hidma_mgmt_init_sys(mgmtdev);
310 if (rc) {
311 dev_err(&pdev->dev, "sysfs setup failed\n");
312 goto out;
315 dev_info(&pdev->dev,
316 "HW rev: %d.%d @ %pa with %d physical channels\n",
317 mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
318 &res->start, mgmtdev->dma_channels);
320 platform_set_drvdata(pdev, mgmtdev);
321 pm_runtime_mark_last_busy(&pdev->dev);
322 pm_runtime_put_autosuspend(&pdev->dev);
323 return 0;
324 out:
325 pm_runtime_put_sync_suspend(&pdev->dev);
326 pm_runtime_disable(&pdev->dev);
327 return rc;
330 #if IS_ENABLED(CONFIG_ACPI)
331 static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
332 {"QCOM8060"},
335 MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
336 #endif
338 static const struct of_device_id hidma_mgmt_match[] = {
339 {.compatible = "qcom,hidma-mgmt-1.0",},
342 MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
344 static struct platform_driver hidma_mgmt_driver = {
345 .probe = hidma_mgmt_probe,
346 .driver = {
347 .name = "hidma-mgmt",
348 .of_match_table = hidma_mgmt_match,
349 .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
353 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
354 static int object_counter;
356 static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
358 struct platform_device *pdev_parent = of_find_device_by_node(np);
359 struct platform_device_info pdevinfo;
360 struct device_node *child;
361 struct resource *res;
362 int ret = 0;
364 /* allocate a resource array */
365 res = kcalloc(3, sizeof(*res), GFP_KERNEL);
366 if (!res)
367 return -ENOMEM;
369 for_each_available_child_of_node(np, child) {
370 struct platform_device *new_pdev;
372 ret = of_address_to_resource(child, 0, &res[0]);
373 if (!ret)
374 goto out;
376 ret = of_address_to_resource(child, 1, &res[1]);
377 if (!ret)
378 goto out;
380 ret = of_irq_to_resource(child, 0, &res[2]);
381 if (ret <= 0)
382 goto out;
384 memset(&pdevinfo, 0, sizeof(pdevinfo));
385 pdevinfo.fwnode = &child->fwnode;
386 pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
387 pdevinfo.name = child->name;
388 pdevinfo.id = object_counter++;
389 pdevinfo.res = res;
390 pdevinfo.num_res = 3;
391 pdevinfo.data = NULL;
392 pdevinfo.size_data = 0;
393 pdevinfo.dma_mask = DMA_BIT_MASK(64);
394 new_pdev = platform_device_register_full(&pdevinfo);
395 if (IS_ERR(new_pdev)) {
396 ret = PTR_ERR(new_pdev);
397 goto out;
399 of_node_get(child);
400 new_pdev->dev.of_node = child;
401 of_dma_configure(&new_pdev->dev, child);
403 * It is assumed that calling of_msi_configure is safe on
404 * platforms with or without MSI support.
406 of_msi_configure(&new_pdev->dev, child);
407 of_node_put(child);
409 out:
410 kfree(res);
412 return ret;
414 #endif
416 static int __init hidma_mgmt_init(void)
418 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
419 struct device_node *child;
421 for_each_matching_node(child, hidma_mgmt_match) {
422 /* device tree based firmware here */
423 hidma_mgmt_of_populate_channels(child);
425 #endif
426 platform_driver_register(&hidma_mgmt_driver);
428 return 0;
430 module_init(hidma_mgmt_init);
431 MODULE_LICENSE("GPL v2");