2 * Platform driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
8 * Some parts of this driver are derived from the original dw_dmac.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/platform_device.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/of_dma.h>
24 #include <linux/acpi.h>
25 #include <linux/acpi_dma.h>
29 #define DRV_NAME "dw_dmac"
31 static struct dma_chan
*dw_dma_of_xlate(struct of_phandle_args
*dma_spec
,
34 struct dw_dma
*dw
= ofdma
->of_dma_data
;
35 struct dw_dma_slave slave
= {
36 .dma_dev
= dw
->dma
.dev
,
40 if (dma_spec
->args_count
!= 3)
43 slave
.src_id
= dma_spec
->args
[0];
44 slave
.dst_id
= dma_spec
->args
[0];
45 slave
.m_master
= dma_spec
->args
[1];
46 slave
.p_master
= dma_spec
->args
[2];
48 if (WARN_ON(slave
.src_id
>= DW_DMA_MAX_NR_REQUESTS
||
49 slave
.dst_id
>= DW_DMA_MAX_NR_REQUESTS
||
50 slave
.m_master
>= dw
->pdata
->nr_masters
||
51 slave
.p_master
>= dw
->pdata
->nr_masters
))
55 dma_cap_set(DMA_SLAVE
, cap
);
57 /* TODO: there should be a simpler way to do this */
58 return dma_request_channel(cap
, dw_dma_filter
, &slave
);
62 static bool dw_dma_acpi_filter(struct dma_chan
*chan
, void *param
)
64 struct acpi_dma_spec
*dma_spec
= param
;
65 struct dw_dma_slave slave
= {
66 .dma_dev
= dma_spec
->dev
,
67 .src_id
= dma_spec
->slave_id
,
68 .dst_id
= dma_spec
->slave_id
,
73 return dw_dma_filter(chan
, &slave
);
76 static void dw_dma_acpi_controller_register(struct dw_dma
*dw
)
78 struct device
*dev
= dw
->dma
.dev
;
79 struct acpi_dma_filter_info
*info
;
82 info
= devm_kzalloc(dev
, sizeof(*info
), GFP_KERNEL
);
86 dma_cap_zero(info
->dma_cap
);
87 dma_cap_set(DMA_SLAVE
, info
->dma_cap
);
88 info
->filter_fn
= dw_dma_acpi_filter
;
90 ret
= devm_acpi_dma_controller_register(dev
, acpi_dma_simple_xlate
,
93 dev_err(dev
, "could not register acpi_dma_controller\n");
95 #else /* !CONFIG_ACPI */
96 static inline void dw_dma_acpi_controller_register(struct dw_dma
*dw
) {}
97 #endif /* !CONFIG_ACPI */
100 static struct dw_dma_platform_data
*
101 dw_dma_parse_dt(struct platform_device
*pdev
)
103 struct device_node
*np
= pdev
->dev
.of_node
;
104 struct dw_dma_platform_data
*pdata
;
105 u32 tmp
, arr
[DW_DMA_MAX_NR_MASTERS
], mb
[DW_DMA_MAX_NR_CHANNELS
];
110 dev_err(&pdev
->dev
, "Missing DT data\n");
114 if (of_property_read_u32(np
, "dma-masters", &nr_masters
))
116 if (nr_masters
< 1 || nr_masters
> DW_DMA_MAX_NR_MASTERS
)
119 if (of_property_read_u32(np
, "dma-channels", &nr_channels
))
121 if (nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
124 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
128 pdata
->nr_masters
= nr_masters
;
129 pdata
->nr_channels
= nr_channels
;
131 if (of_property_read_bool(np
, "is_private"))
132 pdata
->is_private
= true;
135 * All known devices, which use DT for configuration, support
136 * memory-to-memory transfers. So enable it by default.
138 pdata
->is_memcpy
= true;
140 if (!of_property_read_u32(np
, "chan_allocation_order", &tmp
))
141 pdata
->chan_allocation_order
= (unsigned char)tmp
;
143 if (!of_property_read_u32(np
, "chan_priority", &tmp
))
144 pdata
->chan_priority
= tmp
;
146 if (!of_property_read_u32(np
, "block_size", &tmp
))
147 pdata
->block_size
= tmp
;
149 if (!of_property_read_u32_array(np
, "data-width", arr
, nr_masters
)) {
150 for (tmp
= 0; tmp
< nr_masters
; tmp
++)
151 pdata
->data_width
[tmp
] = arr
[tmp
];
152 } else if (!of_property_read_u32_array(np
, "data_width", arr
, nr_masters
)) {
153 for (tmp
= 0; tmp
< nr_masters
; tmp
++)
154 pdata
->data_width
[tmp
] = BIT(arr
[tmp
] & 0x07);
157 if (!of_property_read_u32_array(np
, "multi-block", mb
, nr_channels
)) {
158 for (tmp
= 0; tmp
< nr_channels
; tmp
++)
159 pdata
->multi_block
[tmp
] = mb
[tmp
];
161 for (tmp
= 0; tmp
< nr_channels
; tmp
++)
162 pdata
->multi_block
[tmp
] = 1;
168 static inline struct dw_dma_platform_data
*
169 dw_dma_parse_dt(struct platform_device
*pdev
)
175 static int dw_probe(struct platform_device
*pdev
)
177 struct dw_dma_chip
*chip
;
178 struct device
*dev
= &pdev
->dev
;
179 struct resource
*mem
;
180 const struct dw_dma_platform_data
*pdata
;
183 chip
= devm_kzalloc(dev
, sizeof(*chip
), GFP_KERNEL
);
187 chip
->irq
= platform_get_irq(pdev
, 0);
191 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
192 chip
->regs
= devm_ioremap_resource(dev
, mem
);
193 if (IS_ERR(chip
->regs
))
194 return PTR_ERR(chip
->regs
);
196 err
= dma_coerce_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
200 pdata
= dev_get_platdata(dev
);
202 pdata
= dw_dma_parse_dt(pdev
);
207 chip
->clk
= devm_clk_get(chip
->dev
, "hclk");
208 if (IS_ERR(chip
->clk
))
209 return PTR_ERR(chip
->clk
);
210 err
= clk_prepare_enable(chip
->clk
);
214 pm_runtime_enable(&pdev
->dev
);
216 err
= dw_dma_probe(chip
);
218 goto err_dw_dma_probe
;
220 platform_set_drvdata(pdev
, chip
);
222 if (pdev
->dev
.of_node
) {
223 err
= of_dma_controller_register(pdev
->dev
.of_node
,
224 dw_dma_of_xlate
, chip
->dw
);
227 "could not register of_dma_controller\n");
230 if (ACPI_HANDLE(&pdev
->dev
))
231 dw_dma_acpi_controller_register(chip
->dw
);
236 pm_runtime_disable(&pdev
->dev
);
237 clk_disable_unprepare(chip
->clk
);
241 static int dw_remove(struct platform_device
*pdev
)
243 struct dw_dma_chip
*chip
= platform_get_drvdata(pdev
);
245 if (pdev
->dev
.of_node
)
246 of_dma_controller_free(pdev
->dev
.of_node
);
249 pm_runtime_disable(&pdev
->dev
);
250 clk_disable_unprepare(chip
->clk
);
255 static void dw_shutdown(struct platform_device
*pdev
)
257 struct dw_dma_chip
*chip
= platform_get_drvdata(pdev
);
260 * We have to call dw_dma_disable() to stop any ongoing transfer. On
261 * some platforms we can't do that since DMA device is powered off.
262 * Moreover we have no possibility to check if the platform is affected
263 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
264 * unconditionally. On the other hand we can't use
265 * pm_runtime_suspended() because runtime PM framework is not fully
266 * used by the driver.
268 pm_runtime_get_sync(chip
->dev
);
269 dw_dma_disable(chip
);
270 pm_runtime_put_sync_suspend(chip
->dev
);
272 clk_disable_unprepare(chip
->clk
);
276 static const struct of_device_id dw_dma_of_id_table
[] = {
277 { .compatible
= "snps,dma-spear1340" },
280 MODULE_DEVICE_TABLE(of
, dw_dma_of_id_table
);
284 static const struct acpi_device_id dw_dma_acpi_id_table
[] = {
288 MODULE_DEVICE_TABLE(acpi
, dw_dma_acpi_id_table
);
291 #ifdef CONFIG_PM_SLEEP
293 static int dw_suspend_late(struct device
*dev
)
295 struct platform_device
*pdev
= to_platform_device(dev
);
296 struct dw_dma_chip
*chip
= platform_get_drvdata(pdev
);
298 dw_dma_disable(chip
);
299 clk_disable_unprepare(chip
->clk
);
304 static int dw_resume_early(struct device
*dev
)
306 struct platform_device
*pdev
= to_platform_device(dev
);
307 struct dw_dma_chip
*chip
= platform_get_drvdata(pdev
);
309 clk_prepare_enable(chip
->clk
);
310 return dw_dma_enable(chip
);
313 #endif /* CONFIG_PM_SLEEP */
315 static const struct dev_pm_ops dw_dev_pm_ops
= {
316 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late
, dw_resume_early
)
319 static struct platform_driver dw_driver
= {
322 .shutdown
= dw_shutdown
,
325 .pm
= &dw_dev_pm_ops
,
326 .of_match_table
= of_match_ptr(dw_dma_of_id_table
),
327 .acpi_match_table
= ACPI_PTR(dw_dma_acpi_id_table
),
331 static int __init
dw_init(void)
333 return platform_driver_register(&dw_driver
);
335 subsys_initcall(dw_init
);
337 static void __exit
dw_exit(void)
339 platform_driver_unregister(&dw_driver
);
341 module_exit(dw_exit
);
343 MODULE_LICENSE("GPL v2");
344 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
345 MODULE_ALIAS("platform:" DRV_NAME
);