4 * Implements HSI low level interface driver functionality with DMA support.
6 * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
7 * Copyright (C) 2009 Texas Instruments, Inc.
9 * Author: Carlos Chinea <carlos.chinea@nokia.com>
10 * Author: Sebastien JAN <s-jan@ti.com>
12 * This package is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
18 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 #include <linux/dma-mapping.h>
22 #include "hsi_driver.h"
24 #define HSI_SYNC_WRITE 0
25 #define HSI_SYNC_READ 1
26 #define HSI_L3_TPUT 13428 /* 13428 KiB/s => ~110 Mbit/s */
28 static unsigned char hsi_sync_table
[2][2][8] = {
30 {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
31 {0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00}
33 {0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17},
34 {0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}
39 * hsi_is_dma_read_int_pending - Indicates if a DMA read interrupt is pending
40 * @hsi_ctrl - HSI controller of the GDD.
42 * Needs to be called holding the hsi_controller lock
44 * Returns true if DMA read interrupt is pending, else false
46 bool hsi_is_dma_read_int_pending(struct hsi_dev
*hsi_ctrl
)
48 void __iomem
*base
= hsi_ctrl
->base
;
49 unsigned int gdd_lch
= 0;
52 status_reg
= hsi_inl(base
, HSI_SYS_GDD_MPU_IRQ_STATUS_REG
);
53 status_reg
&= hsi_inl(base
, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
57 /* Scan all enabled DMA channels */
58 for (gdd_lch
= 0; gdd_lch
< hsi_ctrl
->gdd_chan_count
; gdd_lch
++) {
59 if (!(status_reg
& HSI_GDD_LCH(gdd_lch
)))
61 for (i
= 0; i
< hsi_ctrl
->max_p
; i
++)
62 for (j
= 0; j
< hsi_ctrl
->hsi_port
[i
].max_ch
; j
++)
63 if (hsi_ctrl
->hsi_port
[i
].
64 hsi_channel
[j
].read_data
.lch
== gdd_lch
)
70 * hsi_get_free_lch - Get a free GDD(DMA) logical channel
71 * @hsi_ctrl - HSI controller of the GDD.
73 * Needs to be called holding the hsi_controller lock
75 * Returns the logical channel number, or -EBUSY if none available
77 static int hsi_get_free_lch(struct hsi_dev
*hsi_ctrl
)
79 unsigned int enable_reg
;
82 enable_reg
= hsi_inl(hsi_ctrl
->base
, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
83 lch
= hsi_ctrl
->last_gdd_lch
;
84 for (i
= 0; i
< hsi_ctrl
->gdd_chan_count
; i
++) {
85 if (++lch
>= hsi_ctrl
->gdd_chan_count
)
87 if ((enable_reg
& HSI_GDD_LCH(lch
)) == 0) {
88 hsi_ctrl
->last_gdd_lch
= lch
;
96 * hsi_driver_write_dma - Program GDD [DMA] to write data from memory to
97 * the hsi channel buffer.
98 * @hsi_channel - pointer to the hsi_channel to write data to.
99 * @data - 32-bit word pointer to the data.
100 * @size - Number of 32bit words to be transfered.
102 * hsi_controller lock must be held before calling this function.
104 * Return 0 on success and < 0 on error.
106 int hsi_driver_write_dma(struct hsi_channel
*hsi_channel
, u32
* data
,
109 struct hsi_dev
*hsi_ctrl
= hsi_channel
->hsi_port
->hsi_controller
;
110 void __iomem
*base
= hsi_ctrl
->base
;
111 unsigned int port
= hsi_channel
->hsi_port
->port_number
;
112 unsigned int channel
= hsi_channel
->channel_number
;
116 dma_addr_t dest_addr
;
120 if ((size
< 1) || (data
== NULL
))
123 lch
= hsi_get_free_lch(hsi_ctrl
);
125 dev_err(hsi_ctrl
->dev
, "No free DMA channels.\n");
126 return -EBUSY
; /* No free GDD logical channels. */
128 dev_dbg(hsi_ctrl
->dev
, "Allocated DMA channel %d for write on"
129 " HSI channel %d.\n", lch
,
130 hsi_channel
->channel_number
);
133 /* NOTE: Getting a free gdd logical channel and
134 * reserve it must be done atomicaly. */
135 hsi_channel
->write_data
.lch
= lch
;
137 /* Sync is required for SSI but not for HSI */
138 sync
= hsi_sync_table
[HSI_SYNC_WRITE
][port
- 1][channel
];
140 src_addr
= dma_map_single(hsi_ctrl
->dev
, data
, size
* 4, DMA_TO_DEVICE
);
141 if (unlikely(dma_mapping_error(hsi_ctrl
->dev
, src_addr
))) {
142 dev_err(hsi_ctrl
->dev
, "Failed to create DMA write mapping.\n");
146 tmp
= HSI_SRC_SINGLE_ACCESS0
|
147 HSI_SRC_MEMORY_PORT
|
148 HSI_DST_SINGLE_ACCESS0
|
149 HSI_DST_PERIPHERAL_PORT
| HSI_DATA_TYPE_S32
;
150 hsi_outw(tmp
, base
, HSI_GDD_CSDP_REG(lch
));
152 tmp
= HSI_SRC_AMODE_POSTINC
| HSI_DST_AMODE_CONST
| sync
;
153 hsi_outw(tmp
, base
, HSI_GDD_CCR_REG(lch
));
155 hsi_outw((HSI_BLOCK_IE
| HSI_TOUT_IE
), base
, HSI_GDD_CCIR_REG(lch
));
157 if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl
->dev
))) {
158 fifo
= hsi_fifo_get_id(hsi_ctrl
, channel
, port
);
159 if (unlikely(fifo
< 0)) {
160 dev_err(hsi_ctrl
->dev
, "No valid FIFO id for DMA "
161 "transfer to FIFO.\n");
164 /* HSI CDSA register takes a FIFO ID when copying to FIFO */
165 hsi_outl(fifo
, base
, HSI_GDD_CDSA_REG(lch
));
167 dest_addr
= hsi_ctrl
->phy_base
+ HSI_HST_BUFFER_CH_REG(port
,
169 /* SSI CDSA register always takes a 32-bit address */
170 hsi_outl(dest_addr
, base
, HSI_GDD_CDSA_REG(lch
));
173 /* HSI CSSA register takes a 32-bit address when copying from memory */
174 /* SSI CSSA register always takes a 32-bit address */
175 hsi_outl(src_addr
, base
, HSI_GDD_CSSA_REG(lch
));
176 hsi_outw(size
, base
, HSI_GDD_CEN_REG(lch
));
178 /* TODO : Need to clean interrupt status here to avoid spurious int */
180 hsi_outl_or(HSI_GDD_LCH(lch
), base
, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
181 hsi_outw_or(HSI_CCR_ENABLE
, base
, HSI_GDD_CCR_REG(lch
));
187 * hsi_driver_read_dma - Program GDD [DMA] to write data to memory from
188 * the hsi channel buffer.
189 * @hsi_channel - pointer to the hsi_channel to read data from.
190 * @data - 32-bit word pointer where to store the incoming data.
191 * @size - Number of 32bit words to be transfered to the buffer.
193 * hsi_controller lock must be held before calling this function.
195 * Return 0 on success and < 0 on error.
197 int hsi_driver_read_dma(struct hsi_channel
*hsi_channel
, u32
* data
,
200 struct hsi_dev
*hsi_ctrl
= hsi_channel
->hsi_port
->hsi_controller
;
201 void __iomem
*base
= hsi_ctrl
->base
;
202 unsigned int port
= hsi_channel
->hsi_port
->port_number
;
203 unsigned int channel
= hsi_channel
->channel_number
;
207 dma_addr_t dest_addr
;
211 lch
= hsi_get_free_lch(hsi_ctrl
);
213 dev_err(hsi_ctrl
->dev
, "No free DMA channels.\n");
214 return -EBUSY
; /* No free GDD logical channels. */
216 dev_dbg(hsi_ctrl
->dev
, "Allocated DMA channel %d for read on"
217 " HSI channel %d.\n", lch
,
218 hsi_channel
->channel_number
);
221 /* When DMA is used for Rx, disable the Rx Interrupt.
222 * (else DATAAVAILLABLE event would get triggered on first
223 * received data word)
224 * (Rx interrupt might be active for polling feature)
227 if (omap_readl(0x4A05A810)) {
228 dev_err(hsi_ctrl
->dev
,
229 "READ INTERRUPT IS PENDING DMA() but still disabling %0x\n",
230 omap_readl(0x4A05A810));
233 hsi_driver_disable_read_interrupt(hsi_channel
);
236 * NOTE: Gettting a free gdd logical channel and
237 * reserve it must be done atomicaly.
239 hsi_channel
->read_data
.lch
= lch
;
241 /* Sync is required for SSI but not for HSI */
242 sync
= hsi_sync_table
[HSI_SYNC_READ
][port
- 1][channel
];
244 dest_addr
= dma_map_single(hsi_ctrl
->dev
, data
, count
* 4,
246 if (unlikely(dma_mapping_error(hsi_ctrl
->dev
, dest_addr
))) {
247 dev_err(hsi_ctrl
->dev
, "Failed to create DMA read mapping.\n");
251 tmp
= HSI_DST_SINGLE_ACCESS0
|
252 HSI_DST_MEMORY_PORT
|
253 HSI_SRC_SINGLE_ACCESS0
|
254 HSI_SRC_PERIPHERAL_PORT
| HSI_DATA_TYPE_S32
;
255 hsi_outw(tmp
, base
, HSI_GDD_CSDP_REG(lch
));
257 tmp
= HSI_DST_AMODE_POSTINC
| HSI_SRC_AMODE_CONST
| sync
;
258 hsi_outw(tmp
, base
, HSI_GDD_CCR_REG(lch
));
260 hsi_outw((HSI_BLOCK_IE
| HSI_TOUT_IE
), base
, HSI_GDD_CCIR_REG(lch
));
262 if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl
->dev
))) {
263 fifo
= hsi_fifo_get_id(hsi_ctrl
, channel
, port
);
264 if (unlikely(fifo
< 0)) {
265 dev_err(hsi_ctrl
->dev
, "No valid FIFO id for DMA "
266 "transfer from FIFO.\n");
269 /* HSI CSSA register takes a FIFO ID when copying from FIFO */
270 hsi_outl(fifo
, base
, HSI_GDD_CSSA_REG(lch
));
272 src_addr
= hsi_ctrl
->phy_base
+ HSI_HSR_BUFFER_CH_REG(port
,
274 /* SSI CSSA register always takes a 32-bit address */
275 hsi_outl(src_addr
, base
, HSI_GDD_CSSA_REG(lch
));
278 /* HSI CDSA register takes a 32-bit address when copying to memory */
279 /* SSI CDSA register always takes a 32-bit address */
280 hsi_outl(dest_addr
, base
, HSI_GDD_CDSA_REG(lch
));
281 hsi_outw(count
, base
, HSI_GDD_CEN_REG(lch
));
283 /* TODO : Need to clean interrupt status here to avoid spurious int */
285 hsi_outl_or(HSI_GDD_LCH(lch
), base
, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
286 hsi_outw_or(HSI_CCR_ENABLE
, base
, HSI_GDD_CCR_REG(lch
));
292 * hsi_driver_cancel_write_dma - Cancel an ongoing GDD [DMA] write for the
293 * specified hsi channel.
294 * @hsi_ch - pointer to the hsi_channel to cancel DMA write.
296 * hsi_controller lock must be held before calling this function.
298 * Return: -ENXIO : No DMA channel found for specified HSI channel
299 * -ECANCELED : DMA cancel success, data not transfered to TX FIFO
300 * 0 : DMA transfer is already over, data already transfered to TX FIFO
302 * Note: whatever returned value, write callback will not be called after
305 int hsi_driver_cancel_write_dma(struct hsi_channel
*hsi_ch
)
307 int lch
= hsi_ch
->write_data
.lch
;
308 unsigned int port
= hsi_ch
->hsi_port
->port_number
;
309 unsigned int channel
= hsi_ch
->channel_number
;
310 struct hsi_dev
*hsi_ctrl
= hsi_ch
->hsi_port
->hsi_controller
;
317 dev_err(&hsi_ch
->dev
->device
, "hsi_driver_cancel_write_dma( "
318 "channel %d\n", hsi_ch
->channel_number
);
321 dev_err(&hsi_ch
->dev
->device
, "No DMA channel found for HSI "
322 "channel %d\n", hsi_ch
->channel_number
);
325 ccr
= hsi_inw(hsi_ctrl
->base
, HSI_GDD_CCR_REG(lch
));
326 if (!(ccr
& HSI_CCR_ENABLE
)) {
327 dev_dbg(&hsi_ch
->dev
->device
, "Write cancel on not "
328 "enabled logical channel %d CCR REG 0x%04X\n",
331 status_reg
= hsi_inl(hsi_ctrl
->base
, HSI_SYS_GDD_MPU_IRQ_STATUS_REG
);
332 status_reg
&= hsi_inl(hsi_ctrl
->base
, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
333 hsi_outw_and(~HSI_CCR_ENABLE
, hsi_ctrl
->base
, HSI_GDD_CCR_REG(lch
));
335 /* Clear CSR register by reading it, as it is cleared automaticaly */
336 /* by HW after SW read. */
337 gdd_csr
= hsi_inw(hsi_ctrl
->base
, HSI_GDD_CSR_REG(lch
));
338 hsi_outl_and(~HSI_GDD_LCH(lch
), hsi_ctrl
->base
,
339 HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
340 hsi_outl(HSI_GDD_LCH(lch
), hsi_ctrl
->base
,
341 HSI_SYS_GDD_MPU_IRQ_STATUS_REG
);
343 /* Unmap DMA region */
344 dma_h
= hsi_inl(hsi_ctrl
->base
, HSI_GDD_CSSA_REG(lch
));
345 size
= hsi_inw(hsi_ctrl
->base
, HSI_GDD_CEN_REG(lch
)) * 4;
346 dma_unmap_single(hsi_ctrl
->dev
, dma_h
, size
, DMA_TO_DEVICE
);
348 buff_offset
= hsi_hst_bufstate_f_reg(hsi_ctrl
, port
, channel
);
349 if (buff_offset
>= 0)
350 hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel
), hsi_ctrl
->base
,
353 hsi_reset_ch_write(hsi_ch
);
354 return status_reg
& HSI_GDD_LCH(lch
) ? 0 : -ECANCELED
;
358 * hsi_driver_cancel_read_dma - Cancel an ongoing GDD [DMA] read for the
359 * specified hsi channel.
360 * @hsi_ch - pointer to the hsi_channel to cancel DMA read.
362 * hsi_controller lock must be held before calling this function.
364 * Return: -ENXIO : No DMA channel found for specified HSI channel
365 * -ECANCELED : DMA cancel success, data not available at expected
367 * 0 : DMA transfer is already over, data already available at
370 * Note: whatever returned value, read callback will not be called after cancel.
372 int hsi_driver_cancel_read_dma(struct hsi_channel
*hsi_ch
)
374 int lch
= hsi_ch
->read_data
.lch
;
375 struct hsi_dev
*hsi_ctrl
= hsi_ch
->hsi_port
->hsi_controller
;
381 dev_err(&hsi_ch
->dev
->device
, "hsi_driver_cancel_read_dma "
382 "channel %d\n", hsi_ch
->channel_number
);
384 /* Re-enable interrupts for polling if needed */
385 if (hsi_ch
->flags
& HSI_CH_RX_POLL
)
386 hsi_driver_enable_read_interrupt(hsi_ch
, NULL
);
389 dev_err(&hsi_ch
->dev
->device
, "No DMA channel found for HSI "
390 "channel %d\n", hsi_ch
->channel_number
);
394 ccr
= hsi_inw(hsi_ctrl
->base
, HSI_GDD_CCR_REG(lch
));
395 if (!(ccr
& HSI_CCR_ENABLE
)) {
396 dev_dbg(&hsi_ch
->dev
->device
, "Read cancel on not "
397 "enabled logical channel %d CCR REG 0x%04X\n",
401 status_reg
= hsi_inl(hsi_ctrl
->base
, HSI_SYS_GDD_MPU_IRQ_STATUS_REG
);
402 status_reg
&= hsi_inl(hsi_ctrl
->base
, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
403 hsi_outw_and(~HSI_CCR_ENABLE
, hsi_ctrl
->base
, HSI_GDD_CCR_REG(lch
));
405 /* Clear CSR register by reading it, as it is cleared automaticaly */
406 /* by HW after SW read */
407 gdd_csr
= hsi_inw(hsi_ctrl
->base
, HSI_GDD_CSR_REG(lch
));
408 hsi_outl_and(~HSI_GDD_LCH(lch
), hsi_ctrl
->base
,
409 HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
410 hsi_outl(HSI_GDD_LCH(lch
), hsi_ctrl
->base
,
411 HSI_SYS_GDD_MPU_IRQ_STATUS_REG
);
413 /* Unmap DMA region - Access to the buffer is now safe */
414 dma_h
= hsi_inl(hsi_ctrl
->base
, HSI_GDD_CDSA_REG(lch
));
415 size
= hsi_inw(hsi_ctrl
->base
, HSI_GDD_CEN_REG(lch
)) * 4;
416 dma_unmap_single(hsi_ctrl
->dev
, dma_h
, size
, DMA_FROM_DEVICE
);
418 hsi_reset_ch_read(hsi_ch
);
419 return status_reg
& HSI_GDD_LCH(lch
) ? 0 : -ECANCELED
;
423 * hsi_get_info_from_gdd_lch - Retrieve channels information from DMA channel
424 * @hsi_ctrl - HSI device control structure
425 * @lch - DMA logical channel
427 * @channel - HSI channel
428 * @is_read_path - channel is used for reading
430 * Updates the port, channel and is_read_path parameters depending on the
431 * lch DMA channel status.
433 * Return 0 on success and < 0 on error.
435 int hsi_get_info_from_gdd_lch(struct hsi_dev
*hsi_ctrl
, unsigned int lch
,
436 unsigned int *port
, unsigned int *channel
,
437 unsigned int *is_read_path
)
442 for (i
= 0; i
< hsi_ctrl
->max_p
; i
++)
443 for (j
= 0; j
< hsi_ctrl
->hsi_port
[i
].max_ch
; j
++)
444 if (hsi_ctrl
->hsi_port
[i
].
445 hsi_channel
[j
].read_data
.lch
== lch
) {
451 } else if (hsi_ctrl
->hsi_port
[i
].
452 hsi_channel
[j
].write_data
.lch
== lch
) {
463 static void do_hsi_gdd_lch(struct hsi_dev
*hsi_ctrl
, unsigned int gdd_lch
)
465 void __iomem
*base
= hsi_ctrl
->base
;
466 struct platform_device
*pdev
= to_platform_device(hsi_ctrl
->dev
);
467 struct hsi_channel
*ch
;
469 unsigned int channel
;
470 unsigned int is_read_path
;
474 int fifo
, fifo_words_avail
;
476 if (hsi_get_info_from_gdd_lch(hsi_ctrl
, gdd_lch
, &port
, &channel
,
477 &is_read_path
) < 0) {
478 dev_err(hsi_ctrl
->dev
, "Unable to match the DMA channel %d with"
479 " an HSI channel\n", gdd_lch
);
482 dev_dbg(hsi_ctrl
->dev
, "DMA event on gdd_lch=%d => port=%d, "
483 "channel=%d, read=%d\n", gdd_lch
, port
, channel
,
487 hsi_outl_and(~HSI_GDD_LCH(gdd_lch
), base
,
488 HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
489 /* Warning : CSR register is cleared automaticaly by HW after SW read */
490 gdd_csr
= hsi_inw(base
, HSI_GDD_CSR_REG(gdd_lch
));
492 if (!(gdd_csr
& HSI_CSR_TOUT
)) {
493 if (is_read_path
) { /* Read path */
494 dma_h
= hsi_inl(base
, HSI_GDD_CDSA_REG(gdd_lch
));
495 size
= hsi_inw(base
, HSI_GDD_CEN_REG(gdd_lch
)) * 4;
496 dma_sync_single_for_cpu(hsi_ctrl
->dev
, dma_h
, size
,
498 dma_unmap_single(hsi_ctrl
->dev
, dma_h
, size
,
500 ch
= hsi_ctrl_get_ch(hsi_ctrl
, port
, channel
);
501 hsi_reset_ch_read(ch
);
503 dev_dbg(hsi_ctrl
->dev
, "Calling ch %d read callback "
504 "(size %d).\n", channel
, size
/4);
505 spin_unlock(&hsi_ctrl
->lock
);
506 ch
->read_done(ch
->dev
, size
/ 4);
507 spin_lock(&hsi_ctrl
->lock
);
509 /* Check if FIFO is correctly emptied */
510 if (hsi_driver_device_is_hsi(pdev
)) {
511 fifo
= hsi_fifo_get_id(hsi_ctrl
, channel
, port
);
512 if (unlikely(fifo
< 0)) {
513 dev_err(hsi_ctrl
->dev
, "No valid FIFO "
514 "id found for channel %d.\n",
519 hsi_get_rx_fifo_occupancy(hsi_ctrl
,
521 if (fifo_words_avail
)
522 dev_dbg(hsi_ctrl
->dev
,
523 "WARNING: FIFO %d not empty "
524 "after DMA copy, remaining "
526 fifo
, fifo_words_avail
,
529 /* Re-enable interrupts for polling if needed */
530 if (ch
->flags
& HSI_CH_RX_POLL
)
531 hsi_driver_enable_read_interrupt(ch
, NULL
);
532 } else { /* Write path */
533 dma_h
= hsi_inl(base
, HSI_GDD_CSSA_REG(gdd_lch
));
534 size
= hsi_inw(base
, HSI_GDD_CEN_REG(gdd_lch
)) * 4;
535 dma_unmap_single(hsi_ctrl
->dev
, dma_h
, size
,
537 ch
= hsi_ctrl_get_ch(hsi_ctrl
, port
, channel
);
538 hsi_reset_ch_write(ch
);
540 dev_dbg(hsi_ctrl
->dev
, "Calling ch %d write callback "
541 "(size %d).\n", channel
, size
/4);
542 spin_unlock(&hsi_ctrl
->lock
);
543 ch
->write_done(ch
->dev
, size
/ 4);
544 spin_lock(&hsi_ctrl
->lock
);
547 dev_err(hsi_ctrl
->dev
, "Time-out overflow Error on GDD transfer"
548 " on gdd channel %d\n", gdd_lch
);
549 spin_unlock(&hsi_ctrl
->lock
);
550 /* TODO : need to perform a DMA soft reset */
551 hsi_port_event_handler(&hsi_ctrl
->hsi_port
[port
- 1],
552 HSI_EVENT_ERROR
, NULL
);
553 spin_lock(&hsi_ctrl
->lock
);
557 static u32
hsi_process_dma_event(struct hsi_dev
*hsi_ctrl
)
559 void __iomem
*base
= hsi_ctrl
->base
;
560 unsigned int gdd_lch
= 0;
563 unsigned int gdd_max_count
= hsi_ctrl
->gdd_chan_count
;
565 status_reg
= hsi_inl(base
, HSI_SYS_GDD_MPU_IRQ_STATUS_REG
);
566 status_reg
&= hsi_inl(base
, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG
);
568 dev_dbg(hsi_ctrl
->dev
, "DMA : no event, exit.\n");
572 for (gdd_lch
= 0; gdd_lch
< gdd_max_count
; gdd_lch
++) {
573 if (status_reg
& HSI_GDD_LCH(gdd_lch
)) {
574 do_hsi_gdd_lch(hsi_ctrl
, gdd_lch
);
575 lch_served
|= HSI_GDD_LCH(gdd_lch
);
579 /* Acknowledge interrupt for DMA channel */
580 hsi_outl(lch_served
, base
, HSI_SYS_GDD_MPU_IRQ_STATUS_REG
);
586 static void do_hsi_gdd_tasklet(unsigned long device
)
588 struct hsi_dev
*hsi_ctrl
= (struct hsi_dev
*)device
;
590 dev_dbg(hsi_ctrl
->dev
, "DMA Tasklet : clock_enabled=%d\n",
591 hsi_ctrl
->clock_enabled
);
593 spin_lock(&hsi_ctrl
->lock
);
594 hsi_clocks_enable(hsi_ctrl
->dev
, __func__
);
595 hsi_ctrl
->in_dma_tasklet
= true;
597 hsi_process_dma_event(hsi_ctrl
);
599 hsi_ctrl
->in_dma_tasklet
= false;
600 hsi_clocks_disable(hsi_ctrl
->dev
, __func__
);
601 spin_unlock(&hsi_ctrl
->lock
);
603 enable_irq(hsi_ctrl
->gdd_irq
);
606 static irqreturn_t
hsi_gdd_mpu_handler(int irq
, void *p
)
608 struct hsi_dev
*hsi_ctrl
= p
;
610 tasklet_hi_schedule(&hsi_ctrl
->hsi_gdd_tasklet
);
612 /* Disable interrupt until Bottom Half has cleared the IRQ status */
614 disable_irq_nosync(hsi_ctrl
->gdd_irq
);
619 int __init
hsi_gdd_init(struct hsi_dev
*hsi_ctrl
, const char *irq_name
)
621 tasklet_init(&hsi_ctrl
->hsi_gdd_tasklet
, do_hsi_gdd_tasklet
,
622 (unsigned long)hsi_ctrl
);
624 dev_info(hsi_ctrl
->dev
, "Registering IRQ %s (%d)\n",
625 irq_name
, hsi_ctrl
->gdd_irq
);
627 if (request_irq(hsi_ctrl
->gdd_irq
, hsi_gdd_mpu_handler
,
628 IRQF_NO_SUSPEND
| IRQF_TRIGGER_HIGH
,
629 irq_name
, hsi_ctrl
) < 0) {
630 dev_err(hsi_ctrl
->dev
, "FAILED to request GDD IRQ %d\n",
638 void hsi_gdd_exit(struct hsi_dev
*hsi_ctrl
)
640 tasklet_kill(&hsi_ctrl
->hsi_gdd_tasklet
);
641 free_irq(hsi_ctrl
->gdd_irq
, hsi_ctrl
);