1 /* linux/arch/arm/mach-msm/dma.c
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/clk.h>
18 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/completion.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
27 #define MSM_DMOV_CHANNEL_COUNT 16
28 #define MSM_DMOV_MAX_ADMS 2
30 #define MODULE_NAME "msm_dmov"
33 MSM_DMOV_PRINT_ERRORS
= 1,
34 MSM_DMOV_PRINT_IO
= 2,
35 MSM_DMOV_PRINT_FLOW
= 4
38 unsigned int msm_dmov_print_mask
= MSM_DMOV_PRINT_ERRORS
;
40 struct msm_dmov_conf
{
43 struct list_head ready_commands
[MSM_DMOV_CHANNEL_COUNT
];
44 struct list_head active_commands
[MSM_DMOV_CHANNEL_COUNT
];
51 static struct msm_dmov_conf dmov_conf
[MSM_DMOV_MAX_ADMS
];
54 #if defined(CONFIG_ARCH_MSM7X30)
55 #define DMOV_SD_AARM DMOV_SD2
56 #define DMOV_SD_SIZE 0x400
57 #elif defined(CONFIG_ARCH_MSM8X60)
58 #define DMOV_SD_AARM DMOV_SD1
59 #define DMOV_SD_SIZE 0x800
60 #elif defined(CONFIG_ARCH_MSM8960)
61 #define DMOV_SD_AARM DMOV_SD0
62 #define DMOV_SD_SIZE 0x800
64 #define DMOV_SD_AARM DMOV_SD3
65 #define DMOV_SD_SIZE 0x400
68 #define DMOV_ADDR(sd, off, ch) (((sd) * DMOV_SD_SIZE) + (off) + ((ch) << 2))
70 #define DMOV_SD0(off, ch) DMOV_ADDR(0, off, ch)
71 #define DMOV_SD1(off, ch) DMOV_ADDR(1, off, ch)
72 #define DMOV_SD2(off, ch) DMOV_ADDR(2, off, ch)
73 #define DMOV_SD3(off, ch) DMOV_ADDR(3, off, ch)
75 #define DMOV_CMD_PTR(ch) DMOV_SD_AARM(0x000, ch)
76 #define DMOV_RSLT(ch) DMOV_SD_AARM(0x040, ch)
77 #define DMOV_FLUSH0(ch) DMOV_SD_AARM(0x080, ch)
78 #define DMOV_FLUSH1(ch) DMOV_SD_AARM(0x0C0, ch)
79 #define DMOV_FLUSH2(ch) DMOV_SD_AARM(0x100, ch)
80 #define DMOV_FLUSH3(ch) DMOV_SD_AARM(0x140, ch)
81 #define DMOV_FLUSH4(ch) DMOV_SD_AARM(0x180, ch)
82 #define DMOV_FLUSH5(ch) DMOV_SD_AARM(0x1C0, ch)
83 #define DMOV_STATUS(ch) DMOV_SD_AARM(0x200, ch)
84 #define DMOV_CONFIG(ch) DMOV_SD_AARM(0x300, ch)
85 #define DMOV_ISR DMOV_SD_AARM(0x380, 0)
87 #define MSM_DMOV_DPRINTF(mask, format, args...) \
89 if ((mask) & msm_dmov_print_mask) \
90 printk(KERN_ERR format, args); \
92 #define PRINT_ERROR(format, args...) \
93 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
94 #define PRINT_IO(format, args...) \
95 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
96 #define PRINT_FLOW(format, args...) \
97 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
99 static inline unsigned dmov_readl(unsigned addr
, int adm
)
101 return readl(dmov_conf
[adm
].base
+ addr
);
104 static inline void dmov_writel(unsigned val
, unsigned addr
, int adm
)
106 writel(val
, dmov_conf
[adm
].base
+ addr
);
109 #define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
110 #define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
111 #define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
113 int msm_dmov_stop_cmd(unsigned id
, struct msm_dmov_cmd
*cmd
, int graceful
)
115 int adm
= DMOV_ID_TO_ADM(id
);
116 int ch
= DMOV_ID_TO_CHAN(id
);
118 if (!dmov_conf
[adm
].base
)
121 dmov_writel((graceful
<< 31), DMOV_FLUSH0(ch
), adm
);
125 EXPORT_SYMBOL(msm_dmov_stop_cmd
);
127 static int msm_dmov_clocks_on(int adm
)
131 if (!IS_ERR(dmov_conf
[adm
].clk
)) {
132 ret
= clk_enable(dmov_conf
[adm
].clk
);
135 if (!IS_ERR(dmov_conf
[adm
].pclk
)) {
136 ret
= clk_enable(dmov_conf
[adm
].pclk
);
138 clk_disable(dmov_conf
[adm
].clk
);
144 static void msm_dmov_clocks_off(int adm
)
146 if (!IS_ERR(dmov_conf
[adm
].clk
))
147 clk_disable(dmov_conf
[adm
].clk
);
148 if (!IS_ERR(dmov_conf
[adm
].pclk
))
149 clk_disable(dmov_conf
[adm
].pclk
);
152 int msm_dmov_enqueue_cmd(unsigned id
, struct msm_dmov_cmd
*cmd
)
154 unsigned long irq_flags
;
156 int adm
= DMOV_ID_TO_ADM(id
);
157 int ch
= DMOV_ID_TO_CHAN(id
);
159 if (!dmov_conf
[adm
].base
)
162 spin_lock_irqsave(&dmov_conf
[adm
].lock
, irq_flags
);
163 if (!dmov_conf
[adm
].channel_active
)
164 msm_dmov_clocks_on(adm
);
166 status
= dmov_readl(DMOV_STATUS(ch
), adm
);
167 if (list_empty(&dmov_conf
[adm
].ready_commands
[ch
]) &&
168 (status
& DMOV_STATUS_CMD_PTR_RDY
)) {
169 if (cmd
->execute_func
)
170 cmd
->execute_func(cmd
);
171 PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
173 list_add_tail(&cmd
->list
, &dmov_conf
[adm
].active_commands
[ch
]);
174 if (!dmov_conf
[adm
].channel_active
)
175 enable_irq(dmov_conf
[adm
].irq
);
176 dmov_conf
[adm
].channel_active
|= 1U << ch
;
177 dmov_writel(cmd
->cmdptr
, DMOV_CMD_PTR(ch
), adm
);
179 if (!dmov_conf
[adm
].channel_active
)
180 msm_dmov_clocks_off(adm
);
181 if (list_empty(&dmov_conf
[adm
].active_commands
[ch
]))
182 PRINT_ERROR("msm_dmov_enqueue_cmd(%d), error datamover "
183 "stalled, status %x\n", id
, status
);
184 PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
186 list_add_tail(&cmd
->list
, &dmov_conf
[adm
].ready_commands
[ch
]);
188 spin_unlock_irqrestore(&dmov_conf
[adm
].lock
, irq_flags
);
192 EXPORT_SYMBOL(msm_dmov_enqueue_cmd
);
194 int msm_dmov_flush(unsigned int id
)
197 int ch
= DMOV_ID_TO_CHAN(id
);
198 int adm
= DMOV_ID_TO_ADM(id
);
200 if (!dmov_conf
[adm
].base
)
203 spin_lock_irqsave(&dmov_conf
[adm
].lock
, flags
);
204 /* XXX not checking if flush cmd sent already */
205 if (!list_empty(&dmov_conf
[adm
].active_commands
[ch
])) {
206 PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id
);
207 dmov_writel(DMOV_FLUSH_GRACEFUL
, DMOV_FLUSH0(ch
), adm
);
209 spin_unlock_irqrestore(&dmov_conf
[adm
].lock
, flags
);
213 EXPORT_SYMBOL(msm_dmov_flush
);
215 struct msm_dmov_exec_cmdptr_cmd
{
216 struct msm_dmov_cmd dmov_cmd
;
217 struct completion complete
;
220 struct msm_dmov_errdata err
;
224 dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd
*_cmd
,
226 struct msm_dmov_errdata
*err
)
228 struct msm_dmov_exec_cmdptr_cmd
*cmd
= container_of(_cmd
, struct msm_dmov_exec_cmdptr_cmd
, dmov_cmd
);
229 cmd
->result
= result
;
230 if (result
!= 0x80000002 && err
)
231 memcpy(&cmd
->err
, err
, sizeof(struct msm_dmov_errdata
));
233 complete(&cmd
->complete
);
236 int msm_dmov_exec_cmd(unsigned id
, unsigned int cmdptr
)
238 struct msm_dmov_exec_cmdptr_cmd cmd
;
239 int adm
= DMOV_ID_TO_ADM(id
);
241 if (!dmov_conf
[adm
].base
)
244 PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id
, cmdptr
);
246 cmd
.dmov_cmd
.cmdptr
= cmdptr
;
247 cmd
.dmov_cmd
.complete_func
= dmov_exec_cmdptr_complete_func
;
248 cmd
.dmov_cmd
.execute_func
= NULL
;
250 init_completion(&cmd
.complete
);
252 msm_dmov_enqueue_cmd(id
, &cmd
.dmov_cmd
);
253 wait_for_completion(&cmd
.complete
);
255 if (cmd
.result
!= 0x80000002) {
256 PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id
, cmd
.result
);
257 PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
258 id
, cmd
.err
.flush
[0], cmd
.err
.flush
[1], cmd
.err
.flush
[2], cmd
.err
.flush
[3]);
261 PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id
, cmdptr
);
264 EXPORT_SYMBOL(msm_dmov_exec_cmd
);
266 static void msm_dmov_fill_errdata(struct msm_dmov_errdata
*errdata
, int ch
,
269 errdata
->flush
[0] = dmov_readl(DMOV_FLUSH0(ch
), adm
);
270 errdata
->flush
[1] = dmov_readl(DMOV_FLUSH1(ch
), adm
);
271 errdata
->flush
[2] = dmov_readl(DMOV_FLUSH2(ch
), adm
);
272 errdata
->flush
[3] = dmov_readl(DMOV_FLUSH3(ch
), adm
);
273 errdata
->flush
[4] = dmov_readl(DMOV_FLUSH4(ch
), adm
);
274 errdata
->flush
[5] = dmov_readl(DMOV_FLUSH5(ch
), adm
);
277 static int msm_dmov_irq_to_adm(unsigned irq
)
280 for (i
= 0; i
< nr_adms
; i
++)
281 if (dmov_conf
[i
].irq
== irq
)
283 PRINT_ERROR("msm_dmov_irq_to_adm: can't match ADM to IRQ %d\n", irq
);
287 static irqreturn_t
msm_datamover_irq_handler(int irq
, void *dev_id
)
289 unsigned int int_status
, mask
, id
;
290 unsigned long irq_flags
;
292 unsigned int ch_status
;
293 unsigned int ch_result
;
294 struct msm_dmov_cmd
*cmd
;
295 int adm
= msm_dmov_irq_to_adm(irq
);
297 spin_lock_irqsave(&dmov_conf
[adm
].lock
, irq_flags
);
299 int_status
= dmov_readl(DMOV_ISR
, adm
); /* read and clear interrupt */
300 PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status
);
303 mask
= int_status
& -int_status
;
305 id
= DMOV_CHAN_ADM_TO_ID(ch
, adm
);
306 PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status
, mask
, id
);
308 ch_status
= dmov_readl(DMOV_STATUS(ch
), adm
);
309 if (!(ch_status
& DMOV_STATUS_RSLT_VALID
)) {
310 PRINT_FLOW("msm_datamover_irq_handler id %d, result not valid %x\n", id
, ch_status
);
314 ch_result
= dmov_readl(DMOV_RSLT(ch
), adm
);
315 if (list_empty(&dmov_conf
[adm
].active_commands
[ch
])) {
316 PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
317 "with no active command, status %x, result %x\n",
318 id
, ch_status
, ch_result
);
321 cmd
= list_entry(dmov_conf
[adm
].
322 active_commands
[ch
].next
, typeof(*cmd
),
324 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x,"
325 " result %x\n", id
, ch_status
, ch_result
);
326 if (ch_result
& DMOV_RSLT_DONE
) {
327 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
329 PRINT_IO("msm_datamover_irq_handler id %d, got result "
330 "for %p, result %x\n", id
, cmd
, ch_result
);
332 list_del(&cmd
->list
);
334 cmd
->complete_func(cmd
, ch_result
, NULL
);
337 if (ch_result
& DMOV_RSLT_FLUSH
) {
338 struct msm_dmov_errdata errdata
;
340 msm_dmov_fill_errdata(&errdata
, ch
, adm
);
341 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id
, ch_status
);
342 PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id
, ch_result
, errdata
.flush
[0]);
344 list_del(&cmd
->list
);
346 cmd
->complete_func(cmd
, ch_result
, &errdata
);
349 if (ch_result
& DMOV_RSLT_ERROR
) {
350 struct msm_dmov_errdata errdata
;
352 msm_dmov_fill_errdata(&errdata
, ch
, adm
);
353 PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id
, ch_status
);
354 PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id
, ch_result
, errdata
.flush
[0]);
356 list_del(&cmd
->list
);
358 cmd
->complete_func(cmd
, ch_result
, &errdata
);
360 /* this does not seem to work, once we get an error */
361 /* the datamover will no longer accept commands */
362 dmov_writel(0, DMOV_FLUSH0(ch
), adm
);
364 ch_status
= dmov_readl(DMOV_STATUS(ch
), adm
);
365 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id
, ch_status
);
366 if ((ch_status
& DMOV_STATUS_CMD_PTR_RDY
) &&
367 !list_empty(&dmov_conf
[adm
].ready_commands
[ch
])) {
368 cmd
= list_entry(dmov_conf
[adm
].
369 ready_commands
[ch
].next
, typeof(*cmd
),
371 list_del(&cmd
->list
);
372 list_add_tail(&cmd
->list
, &dmov_conf
[adm
].
373 active_commands
[ch
]);
374 if (cmd
->execute_func
)
375 cmd
->execute_func(cmd
);
376 PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id
);
377 dmov_writel(cmd
->cmdptr
, DMOV_CMD_PTR(ch
), adm
);
379 } while (ch_status
& DMOV_STATUS_RSLT_VALID
);
380 if (list_empty(&dmov_conf
[adm
].active_commands
[ch
]) &&
381 list_empty(&dmov_conf
[adm
].ready_commands
[ch
]))
382 dmov_conf
[adm
].channel_active
&= ~(1U << ch
);
383 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id
, ch_status
);
386 if (!dmov_conf
[adm
].channel_active
) {
387 disable_irq_nosync(dmov_conf
[adm
].irq
);
388 msm_dmov_clocks_off(adm
);
391 spin_unlock_irqrestore(&dmov_conf
[adm
].lock
, irq_flags
);
395 static void __init
msm_dmov_deinit_clocks(int adm
)
397 if (!IS_ERR(dmov_conf
[adm
].clk
))
398 clk_put(dmov_conf
[adm
].clk
);
399 if (!IS_ERR(dmov_conf
[adm
].pclk
))
400 clk_put(dmov_conf
[adm
].pclk
);
403 #define PDEV_TO_ADM(pdev) \
405 typeof(pdev) _pdev = pdev; \
406 (_pdev->id == -1) ? 0 : _pdev->id; \
409 static int __devinit
msm_dmov_init_clocks(struct platform_device
*pdev
)
412 int adm
= PDEV_TO_ADM(pdev
);
414 dmov_conf
[adm
].clk
= clk_get(&pdev
->dev
, "adm_clk");
415 if (IS_ERR(dmov_conf
[adm
].clk
)) {
416 PRINT_ERROR("%s: Error getting adm_clk\n", __func__
);
417 ret
= PTR_ERR(dmov_conf
[adm
].clk
);
420 dmov_conf
[adm
].pclk
= clk_get(&pdev
->dev
, "adm_pclk");
421 /* pclk not present on all SoCs, don't return error on failure */
426 static int __devinit
msm_dmov_conf_init(struct platform_device
*pdev
)
429 int adm
= PDEV_TO_ADM(pdev
);
430 struct resource
*irqres
=
431 platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
432 struct resource
*memres
=
433 platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
435 if (!irqres
|| !memres
|| !irqres
->start
)
438 dmov_conf
[adm
].irq
= irqres
->start
;
440 dmov_conf
[adm
].base
=
441 ioremap_nocache(memres
->start
, resource_size(memres
));
442 if (!dmov_conf
[adm
].base
)
445 dmov_conf
[adm
].lock
= __SPIN_LOCK_UNLOCKED(dmov_lock
);
446 for (i
= 0; i
< MSM_DMOV_CHANNEL_COUNT
; i
++) {
447 INIT_LIST_HEAD(&dmov_conf
[adm
].ready_commands
[i
]);
448 INIT_LIST_HEAD(&dmov_conf
[adm
].active_commands
[i
]);
453 static inline void __devinit
msm_dmov_conf_free(int adm
)
455 iounmap(dmov_conf
[adm
].base
);
456 dmov_conf
[adm
].base
= NULL
;
459 static int __devinit
msm_dmov_probe(struct platform_device
*pdev
)
463 int adm
= PDEV_TO_ADM(pdev
);
465 ret
= msm_dmov_conf_init(pdev
);
469 for (i
= 0; i
< MSM_DMOV_CHANNEL_COUNT
; i
++)
470 dmov_writel(DMOV_CONFIG_IRQ_EN
| DMOV_CONFIG_FORCE_TOP_PTR_RSLT
471 | DMOV_CONFIG_FORCE_FLUSH_RSLT
, DMOV_CONFIG(i
), adm
);
473 ret
= msm_dmov_init_clocks(pdev
);
477 ret
= request_irq(dmov_conf
[adm
].irq
, msm_datamover_irq_handler
, 0,
478 "msmdatamover", NULL
);
481 disable_irq(dmov_conf
[adm
].irq
);
487 msm_dmov_deinit_clocks(adm
);
489 msm_dmov_conf_free(adm
);
493 static struct platform_driver msm_dmov_driver
= {
494 .probe
= msm_dmov_probe
,
497 .owner
= THIS_MODULE
,
501 static int __init
msm_init_datamover(void)
503 return platform_driver_register(&msm_dmov_driver
);
506 arch_initcall(msm_init_datamover
);