2 * ADMA driver for Nvidia's Tegra210 ADMA controller.
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/clk.h>
20 #include <linux/iopoll.h>
21 #include <linux/module.h>
22 #include <linux/of_device.h>
23 #include <linux/of_dma.h>
24 #include <linux/of_irq.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
30 #define ADMA_CH_CMD 0x00
31 #define ADMA_CH_STATUS 0x0c
32 #define ADMA_CH_STATUS_XFER_EN BIT(0)
34 #define ADMA_CH_INT_STATUS 0x10
35 #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0)
37 #define ADMA_CH_INT_CLEAR 0x1c
38 #define ADMA_CH_CTRL 0x24
39 #define ADMA_CH_CTRL_TX_REQ(val) (((val) & 0xf) << 28)
40 #define ADMA_CH_CTRL_TX_REQ_MAX 10
41 #define ADMA_CH_CTRL_RX_REQ(val) (((val) & 0xf) << 24)
42 #define ADMA_CH_CTRL_RX_REQ_MAX 10
43 #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
44 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2
45 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4
46 #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
47 #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
49 #define ADMA_CH_CONFIG 0x28
50 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28)
51 #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24)
52 #define ADMA_CH_CONFIG_BURST_SIZE(val) (((val) & 0x7) << 20)
53 #define ADMA_CH_CONFIG_BURST_16 5
54 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
55 #define ADMA_CH_CONFIG_MAX_BUFS 8
57 #define ADMA_CH_FIFO_CTRL 0x2c
58 #define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24)
59 #define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16)
60 #define ADMA_CH_FIFO_CTRL_TX_SIZE(val) (((val) & 0xf) << 8)
61 #define ADMA_CH_FIFO_CTRL_RX_SIZE(val) ((val) & 0xf)
63 #define ADMA_CH_LOWER_SRC_ADDR 0x34
64 #define ADMA_CH_LOWER_TRG_ADDR 0x3c
65 #define ADMA_CH_TC 0x44
66 #define ADMA_CH_TC_COUNT_MASK 0x3ffffffc
68 #define ADMA_CH_XFER_STATUS 0x54
69 #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff
71 #define ADMA_GLOBAL_CMD 0xc00
72 #define ADMA_GLOBAL_SOFT_RESET 0xc04
73 #define ADMA_GLOBAL_INT_CLEAR 0xc20
74 #define ADMA_GLOBAL_CTRL 0xc24
76 #define ADMA_CH_REG_OFFSET(a) (a * 0x80)
78 #define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
79 ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \
80 ADMA_CH_FIFO_CTRL_TX_SIZE(3) | \
81 ADMA_CH_FIFO_CTRL_RX_SIZE(3))
85 * struct tegra_adma_chip_data - Tegra chip specific data
86 * @nr_channels: Number of DMA channels available.
88 struct tegra_adma_chip_data
{
93 * struct tegra_adma_chan_regs - Tegra ADMA channel registers
95 struct tegra_adma_chan_regs
{
98 unsigned int src_addr
;
99 unsigned int trg_addr
;
100 unsigned int fifo_ctrl
;
106 * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests.
108 struct tegra_adma_desc
{
109 struct virt_dma_desc vd
;
110 struct tegra_adma_chan_regs ch_regs
;
117 * struct tegra_adma_chan - Tegra ADMA channel information
119 struct tegra_adma_chan
{
120 struct virt_dma_chan vc
;
121 struct tegra_adma_desc
*desc
;
122 struct tegra_adma
*tdma
;
124 void __iomem
*chan_addr
;
126 /* Slave channel configuration info */
127 struct dma_slave_config sconfig
;
128 enum dma_transfer_direction sreq_dir
;
129 unsigned int sreq_index
;
131 struct tegra_adma_chan_regs ch_regs
;
133 /* Transfer count and position info */
134 unsigned int tx_buf_count
;
135 unsigned int tx_buf_pos
;
139 * struct tegra_adma - Tegra ADMA controller information
142 struct dma_device dma_dev
;
144 void __iomem
*base_addr
;
145 struct clk
*ahub_clk
;
146 unsigned int nr_channels
;
147 unsigned long rx_requests_reserved
;
148 unsigned long tx_requests_reserved
;
150 /* Used to store global command register state when suspending */
151 unsigned int global_cmd
;
153 /* Last member of the structure */
154 struct tegra_adma_chan channels
[0];
157 static inline void tdma_write(struct tegra_adma
*tdma
, u32 reg
, u32 val
)
159 writel(val
, tdma
->base_addr
+ reg
);
162 static inline u32
tdma_read(struct tegra_adma
*tdma
, u32 reg
)
164 return readl(tdma
->base_addr
+ reg
);
167 static inline void tdma_ch_write(struct tegra_adma_chan
*tdc
, u32 reg
, u32 val
)
169 writel(val
, tdc
->chan_addr
+ reg
);
172 static inline u32
tdma_ch_read(struct tegra_adma_chan
*tdc
, u32 reg
)
174 return readl(tdc
->chan_addr
+ reg
);
177 static inline struct tegra_adma_chan
*to_tegra_adma_chan(struct dma_chan
*dc
)
179 return container_of(dc
, struct tegra_adma_chan
, vc
.chan
);
182 static inline struct tegra_adma_desc
*to_tegra_adma_desc(
183 struct dma_async_tx_descriptor
*td
)
185 return container_of(td
, struct tegra_adma_desc
, vd
.tx
);
188 static inline struct device
*tdc2dev(struct tegra_adma_chan
*tdc
)
190 return tdc
->tdma
->dev
;
193 static void tegra_adma_desc_free(struct virt_dma_desc
*vd
)
195 kfree(container_of(vd
, struct tegra_adma_desc
, vd
));
198 static int tegra_adma_slave_config(struct dma_chan
*dc
,
199 struct dma_slave_config
*sconfig
)
201 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
203 memcpy(&tdc
->sconfig
, sconfig
, sizeof(*sconfig
));
208 static int tegra_adma_init(struct tegra_adma
*tdma
)
213 /* Clear any interrupts */
214 tdma_write(tdma
, ADMA_GLOBAL_INT_CLEAR
, 0x1);
216 /* Assert soft reset */
217 tdma_write(tdma
, ADMA_GLOBAL_SOFT_RESET
, 0x1);
219 /* Wait for reset to clear */
220 ret
= readx_poll_timeout(readl
,
221 tdma
->base_addr
+ ADMA_GLOBAL_SOFT_RESET
,
222 status
, status
== 0, 20, 10000);
226 /* Enable global ADMA registers */
227 tdma_write(tdma
, ADMA_GLOBAL_CMD
, 1);
232 static int tegra_adma_request_alloc(struct tegra_adma_chan
*tdc
,
233 enum dma_transfer_direction direction
)
235 struct tegra_adma
*tdma
= tdc
->tdma
;
236 unsigned int sreq_index
= tdc
->sreq_index
;
238 if (tdc
->sreq_reserved
)
239 return tdc
->sreq_dir
== direction
? 0 : -EINVAL
;
243 if (sreq_index
> ADMA_CH_CTRL_TX_REQ_MAX
) {
244 dev_err(tdma
->dev
, "invalid DMA request\n");
248 if (test_and_set_bit(sreq_index
, &tdma
->tx_requests_reserved
)) {
249 dev_err(tdma
->dev
, "DMA request reserved\n");
255 if (sreq_index
> ADMA_CH_CTRL_RX_REQ_MAX
) {
256 dev_err(tdma
->dev
, "invalid DMA request\n");
260 if (test_and_set_bit(sreq_index
, &tdma
->rx_requests_reserved
)) {
261 dev_err(tdma
->dev
, "DMA request reserved\n");
267 dev_WARN(tdma
->dev
, "channel %s has invalid transfer type\n",
268 dma_chan_name(&tdc
->vc
.chan
));
272 tdc
->sreq_dir
= direction
;
273 tdc
->sreq_reserved
= true;
278 static void tegra_adma_request_free(struct tegra_adma_chan
*tdc
)
280 struct tegra_adma
*tdma
= tdc
->tdma
;
282 if (!tdc
->sreq_reserved
)
285 switch (tdc
->sreq_dir
) {
287 clear_bit(tdc
->sreq_index
, &tdma
->tx_requests_reserved
);
291 clear_bit(tdc
->sreq_index
, &tdma
->rx_requests_reserved
);
295 dev_WARN(tdma
->dev
, "channel %s has invalid transfer type\n",
296 dma_chan_name(&tdc
->vc
.chan
));
300 tdc
->sreq_reserved
= false;
303 static u32
tegra_adma_irq_status(struct tegra_adma_chan
*tdc
)
305 u32 status
= tdma_ch_read(tdc
, ADMA_CH_INT_STATUS
);
307 return status
& ADMA_CH_INT_STATUS_XFER_DONE
;
310 static u32
tegra_adma_irq_clear(struct tegra_adma_chan
*tdc
)
312 u32 status
= tegra_adma_irq_status(tdc
);
315 tdma_ch_write(tdc
, ADMA_CH_INT_CLEAR
, status
);
320 static void tegra_adma_stop(struct tegra_adma_chan
*tdc
)
325 tdma_ch_write(tdc
, ADMA_CH_CMD
, 0);
327 /* Clear interrupt status */
328 tegra_adma_irq_clear(tdc
);
330 if (readx_poll_timeout_atomic(readl
, tdc
->chan_addr
+ ADMA_CH_STATUS
,
331 status
, !(status
& ADMA_CH_STATUS_XFER_EN
),
333 dev_err(tdc2dev(tdc
), "unable to stop DMA channel\n");
341 static void tegra_adma_start(struct tegra_adma_chan
*tdc
)
343 struct virt_dma_desc
*vd
= vchan_next_desc(&tdc
->vc
);
344 struct tegra_adma_chan_regs
*ch_regs
;
345 struct tegra_adma_desc
*desc
;
352 desc
= to_tegra_adma_desc(&vd
->tx
);
355 dev_warn(tdc2dev(tdc
), "unable to start DMA, no descriptor\n");
359 ch_regs
= &desc
->ch_regs
;
362 tdc
->tx_buf_count
= 0;
363 tdma_ch_write(tdc
, ADMA_CH_TC
, ch_regs
->tc
);
364 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_regs
->ctrl
);
365 tdma_ch_write(tdc
, ADMA_CH_LOWER_SRC_ADDR
, ch_regs
->src_addr
);
366 tdma_ch_write(tdc
, ADMA_CH_LOWER_TRG_ADDR
, ch_regs
->trg_addr
);
367 tdma_ch_write(tdc
, ADMA_CH_FIFO_CTRL
, ch_regs
->fifo_ctrl
);
368 tdma_ch_write(tdc
, ADMA_CH_CONFIG
, ch_regs
->config
);
371 tdma_ch_write(tdc
, ADMA_CH_CMD
, 1);
376 static unsigned int tegra_adma_get_residue(struct tegra_adma_chan
*tdc
)
378 struct tegra_adma_desc
*desc
= tdc
->desc
;
379 unsigned int max
= ADMA_CH_XFER_STATUS_COUNT_MASK
+ 1;
380 unsigned int pos
= tdma_ch_read(tdc
, ADMA_CH_XFER_STATUS
);
381 unsigned int periods_remaining
;
384 * Handle wrap around of buffer count register
386 if (pos
< tdc
->tx_buf_pos
)
387 tdc
->tx_buf_count
+= pos
+ (max
- tdc
->tx_buf_pos
);
389 tdc
->tx_buf_count
+= pos
- tdc
->tx_buf_pos
;
391 periods_remaining
= tdc
->tx_buf_count
% desc
->num_periods
;
392 tdc
->tx_buf_pos
= pos
;
394 return desc
->buf_len
- (periods_remaining
* desc
->period_len
);
397 static irqreturn_t
tegra_adma_isr(int irq
, void *dev_id
)
399 struct tegra_adma_chan
*tdc
= dev_id
;
400 unsigned long status
;
403 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
405 status
= tegra_adma_irq_clear(tdc
);
406 if (status
== 0 || !tdc
->desc
) {
407 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
411 vchan_cyclic_callback(&tdc
->desc
->vd
);
413 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
418 static void tegra_adma_issue_pending(struct dma_chan
*dc
)
420 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
423 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
425 if (vchan_issue_pending(&tdc
->vc
)) {
427 tegra_adma_start(tdc
);
430 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
433 static int tegra_adma_terminate_all(struct dma_chan
*dc
)
435 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
439 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
442 tegra_adma_stop(tdc
);
444 tegra_adma_request_free(tdc
);
445 vchan_get_all_descriptors(&tdc
->vc
, &head
);
446 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
447 vchan_dma_desc_free_list(&tdc
->vc
, &head
);
452 static enum dma_status
tegra_adma_tx_status(struct dma_chan
*dc
,
454 struct dma_tx_state
*txstate
)
456 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
457 struct tegra_adma_desc
*desc
;
458 struct virt_dma_desc
*vd
;
461 unsigned int residual
;
463 ret
= dma_cookie_status(dc
, cookie
, txstate
);
464 if (ret
== DMA_COMPLETE
|| !txstate
)
467 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
469 vd
= vchan_find_desc(&tdc
->vc
, cookie
);
471 desc
= to_tegra_adma_desc(&vd
->tx
);
472 residual
= desc
->ch_regs
.tc
;
473 } else if (tdc
->desc
&& tdc
->desc
->vd
.tx
.cookie
== cookie
) {
474 residual
= tegra_adma_get_residue(tdc
);
479 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
481 dma_set_residue(txstate
, residual
);
486 static int tegra_adma_set_xfer_params(struct tegra_adma_chan
*tdc
,
487 struct tegra_adma_desc
*desc
,
489 enum dma_transfer_direction direction
)
491 struct tegra_adma_chan_regs
*ch_regs
= &desc
->ch_regs
;
492 unsigned int burst_size
, adma_dir
;
494 if (desc
->num_periods
> ADMA_CH_CONFIG_MAX_BUFS
)
499 adma_dir
= ADMA_CH_CTRL_DIR_MEM2AHUB
;
500 burst_size
= fls(tdc
->sconfig
.dst_maxburst
);
501 ch_regs
->config
= ADMA_CH_CONFIG_SRC_BUF(desc
->num_periods
- 1);
502 ch_regs
->ctrl
= ADMA_CH_CTRL_TX_REQ(tdc
->sreq_index
);
503 ch_regs
->src_addr
= buf_addr
;
507 adma_dir
= ADMA_CH_CTRL_DIR_AHUB2MEM
;
508 burst_size
= fls(tdc
->sconfig
.src_maxburst
);
509 ch_regs
->config
= ADMA_CH_CONFIG_TRG_BUF(desc
->num_periods
- 1);
510 ch_regs
->ctrl
= ADMA_CH_CTRL_RX_REQ(tdc
->sreq_index
);
511 ch_regs
->trg_addr
= buf_addr
;
515 dev_err(tdc2dev(tdc
), "DMA direction is not supported\n");
519 if (!burst_size
|| burst_size
> ADMA_CH_CONFIG_BURST_16
)
520 burst_size
= ADMA_CH_CONFIG_BURST_16
;
522 ch_regs
->ctrl
|= ADMA_CH_CTRL_DIR(adma_dir
) |
523 ADMA_CH_CTRL_MODE_CONTINUOUS
|
524 ADMA_CH_CTRL_FLOWCTRL_EN
;
525 ch_regs
->config
|= ADMA_CH_CONFIG_BURST_SIZE(burst_size
);
526 ch_regs
->config
|= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
527 ch_regs
->fifo_ctrl
= ADMA_CH_FIFO_CTRL_DEFAULT
;
528 ch_regs
->tc
= desc
->period_len
& ADMA_CH_TC_COUNT_MASK
;
530 return tegra_adma_request_alloc(tdc
, direction
);
533 static struct dma_async_tx_descriptor
*tegra_adma_prep_dma_cyclic(
534 struct dma_chan
*dc
, dma_addr_t buf_addr
, size_t buf_len
,
535 size_t period_len
, enum dma_transfer_direction direction
,
538 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
539 struct tegra_adma_desc
*desc
= NULL
;
541 if (!buf_len
|| !period_len
|| period_len
> ADMA_CH_TC_COUNT_MASK
) {
542 dev_err(tdc2dev(tdc
), "invalid buffer/period len\n");
546 if (buf_len
% period_len
) {
547 dev_err(tdc2dev(tdc
), "buf_len not a multiple of period_len\n");
551 if (!IS_ALIGNED(buf_addr
, 4)) {
552 dev_err(tdc2dev(tdc
), "invalid buffer alignment\n");
556 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
560 desc
->buf_len
= buf_len
;
561 desc
->period_len
= period_len
;
562 desc
->num_periods
= buf_len
/ period_len
;
564 if (tegra_adma_set_xfer_params(tdc
, desc
, buf_addr
, direction
)) {
569 return vchan_tx_prep(&tdc
->vc
, &desc
->vd
, flags
);
572 static int tegra_adma_alloc_chan_resources(struct dma_chan
*dc
)
574 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
577 ret
= request_irq(tdc
->irq
, tegra_adma_isr
, 0, dma_chan_name(dc
), tdc
);
579 dev_err(tdc2dev(tdc
), "failed to get interrupt for %s\n",
584 ret
= pm_runtime_get_sync(tdc2dev(tdc
));
586 free_irq(tdc
->irq
, tdc
);
590 dma_cookie_init(&tdc
->vc
.chan
);
595 static void tegra_adma_free_chan_resources(struct dma_chan
*dc
)
597 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
599 tegra_adma_terminate_all(dc
);
600 vchan_free_chan_resources(&tdc
->vc
);
601 tasklet_kill(&tdc
->vc
.task
);
602 free_irq(tdc
->irq
, tdc
);
603 pm_runtime_put(tdc2dev(tdc
));
606 tdc
->sreq_dir
= DMA_TRANS_NONE
;
609 static struct dma_chan
*tegra_dma_of_xlate(struct of_phandle_args
*dma_spec
,
610 struct of_dma
*ofdma
)
612 struct tegra_adma
*tdma
= ofdma
->of_dma_data
;
613 struct tegra_adma_chan
*tdc
;
614 struct dma_chan
*chan
;
615 unsigned int sreq_index
;
617 if (dma_spec
->args_count
!= 1)
620 sreq_index
= dma_spec
->args
[0];
622 if (sreq_index
== 0) {
623 dev_err(tdma
->dev
, "DMA request must not be 0\n");
627 chan
= dma_get_any_slave_channel(&tdma
->dma_dev
);
631 tdc
= to_tegra_adma_chan(chan
);
632 tdc
->sreq_index
= sreq_index
;
637 static int tegra_adma_runtime_suspend(struct device
*dev
)
639 struct tegra_adma
*tdma
= dev_get_drvdata(dev
);
640 struct tegra_adma_chan_regs
*ch_reg
;
641 struct tegra_adma_chan
*tdc
;
644 tdma
->global_cmd
= tdma_read(tdma
, ADMA_GLOBAL_CMD
);
645 if (!tdma
->global_cmd
)
648 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
649 tdc
= &tdma
->channels
[i
];
650 ch_reg
= &tdc
->ch_regs
;
651 ch_reg
->cmd
= tdma_ch_read(tdc
, ADMA_CH_CMD
);
652 /* skip if channel is not active */
655 ch_reg
->tc
= tdma_ch_read(tdc
, ADMA_CH_TC
);
656 ch_reg
->src_addr
= tdma_ch_read(tdc
, ADMA_CH_LOWER_SRC_ADDR
);
657 ch_reg
->trg_addr
= tdma_ch_read(tdc
, ADMA_CH_LOWER_TRG_ADDR
);
658 ch_reg
->ctrl
= tdma_ch_read(tdc
, ADMA_CH_CTRL
);
659 ch_reg
->fifo_ctrl
= tdma_ch_read(tdc
, ADMA_CH_FIFO_CTRL
);
660 ch_reg
->config
= tdma_ch_read(tdc
, ADMA_CH_CONFIG
);
664 clk_disable_unprepare(tdma
->ahub_clk
);
669 static int tegra_adma_runtime_resume(struct device
*dev
)
671 struct tegra_adma
*tdma
= dev_get_drvdata(dev
);
672 struct tegra_adma_chan_regs
*ch_reg
;
673 struct tegra_adma_chan
*tdc
;
676 ret
= clk_prepare_enable(tdma
->ahub_clk
);
678 dev_err(dev
, "ahub clk_enable failed: %d\n", ret
);
681 tdma_write(tdma
, ADMA_GLOBAL_CMD
, tdma
->global_cmd
);
683 if (!tdma
->global_cmd
)
686 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
687 tdc
= &tdma
->channels
[i
];
688 ch_reg
= &tdc
->ch_regs
;
689 /* skip if channel was not active earlier */
692 tdma_ch_write(tdc
, ADMA_CH_TC
, ch_reg
->tc
);
693 tdma_ch_write(tdc
, ADMA_CH_LOWER_SRC_ADDR
, ch_reg
->src_addr
);
694 tdma_ch_write(tdc
, ADMA_CH_LOWER_TRG_ADDR
, ch_reg
->trg_addr
);
695 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_reg
->ctrl
);
696 tdma_ch_write(tdc
, ADMA_CH_FIFO_CTRL
, ch_reg
->fifo_ctrl
);
697 tdma_ch_write(tdc
, ADMA_CH_CONFIG
, ch_reg
->config
);
698 tdma_ch_write(tdc
, ADMA_CH_CMD
, ch_reg
->cmd
);
704 static const struct tegra_adma_chip_data tegra210_chip_data
= {
708 static const struct of_device_id tegra_adma_of_match
[] = {
709 { .compatible
= "nvidia,tegra210-adma", .data
= &tegra210_chip_data
},
712 MODULE_DEVICE_TABLE(of
, tegra_adma_of_match
);
714 static int tegra_adma_probe(struct platform_device
*pdev
)
716 const struct tegra_adma_chip_data
*cdata
;
717 struct tegra_adma
*tdma
;
718 struct resource
*res
;
721 cdata
= of_device_get_match_data(&pdev
->dev
);
723 dev_err(&pdev
->dev
, "device match data not found\n");
727 tdma
= devm_kzalloc(&pdev
->dev
, sizeof(*tdma
) + cdata
->nr_channels
*
728 sizeof(struct tegra_adma_chan
), GFP_KERNEL
);
732 tdma
->dev
= &pdev
->dev
;
733 tdma
->nr_channels
= cdata
->nr_channels
;
734 platform_set_drvdata(pdev
, tdma
);
736 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
737 tdma
->base_addr
= devm_ioremap_resource(&pdev
->dev
, res
);
738 if (IS_ERR(tdma
->base_addr
))
739 return PTR_ERR(tdma
->base_addr
);
741 tdma
->ahub_clk
= devm_clk_get(&pdev
->dev
, "d_audio");
742 if (IS_ERR(tdma
->ahub_clk
)) {
743 dev_err(&pdev
->dev
, "Error: Missing ahub controller clock\n");
744 return PTR_ERR(tdma
->ahub_clk
);
747 INIT_LIST_HEAD(&tdma
->dma_dev
.channels
);
748 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
749 struct tegra_adma_chan
*tdc
= &tdma
->channels
[i
];
751 tdc
->chan_addr
= tdma
->base_addr
+ ADMA_CH_REG_OFFSET(i
);
753 tdc
->irq
= of_irq_get(pdev
->dev
.of_node
, i
);
755 ret
= tdc
->irq
?: -ENXIO
;
759 vchan_init(&tdc
->vc
, &tdma
->dma_dev
);
760 tdc
->vc
.desc_free
= tegra_adma_desc_free
;
764 pm_runtime_enable(&pdev
->dev
);
766 ret
= pm_runtime_get_sync(&pdev
->dev
);
770 ret
= tegra_adma_init(tdma
);
774 dma_cap_set(DMA_SLAVE
, tdma
->dma_dev
.cap_mask
);
775 dma_cap_set(DMA_PRIVATE
, tdma
->dma_dev
.cap_mask
);
776 dma_cap_set(DMA_CYCLIC
, tdma
->dma_dev
.cap_mask
);
778 tdma
->dma_dev
.dev
= &pdev
->dev
;
779 tdma
->dma_dev
.device_alloc_chan_resources
=
780 tegra_adma_alloc_chan_resources
;
781 tdma
->dma_dev
.device_free_chan_resources
=
782 tegra_adma_free_chan_resources
;
783 tdma
->dma_dev
.device_issue_pending
= tegra_adma_issue_pending
;
784 tdma
->dma_dev
.device_prep_dma_cyclic
= tegra_adma_prep_dma_cyclic
;
785 tdma
->dma_dev
.device_config
= tegra_adma_slave_config
;
786 tdma
->dma_dev
.device_tx_status
= tegra_adma_tx_status
;
787 tdma
->dma_dev
.device_terminate_all
= tegra_adma_terminate_all
;
788 tdma
->dma_dev
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
789 tdma
->dma_dev
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
790 tdma
->dma_dev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
791 tdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
793 ret
= dma_async_device_register(&tdma
->dma_dev
);
795 dev_err(&pdev
->dev
, "ADMA registration failed: %d\n", ret
);
799 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
800 tegra_dma_of_xlate
, tdma
);
802 dev_err(&pdev
->dev
, "ADMA OF registration failed %d\n", ret
);
806 pm_runtime_put(&pdev
->dev
);
808 dev_info(&pdev
->dev
, "Tegra210 ADMA driver registered %d channels\n",
814 dma_async_device_unregister(&tdma
->dma_dev
);
816 pm_runtime_put_sync(&pdev
->dev
);
818 pm_runtime_disable(&pdev
->dev
);
821 irq_dispose_mapping(tdma
->channels
[i
].irq
);
826 static int tegra_adma_remove(struct platform_device
*pdev
)
828 struct tegra_adma
*tdma
= platform_get_drvdata(pdev
);
831 of_dma_controller_free(pdev
->dev
.of_node
);
832 dma_async_device_unregister(&tdma
->dma_dev
);
834 for (i
= 0; i
< tdma
->nr_channels
; ++i
)
835 irq_dispose_mapping(tdma
->channels
[i
].irq
);
837 pm_runtime_put_sync(&pdev
->dev
);
838 pm_runtime_disable(&pdev
->dev
);
843 #ifdef CONFIG_PM_SLEEP
844 static int tegra_adma_pm_suspend(struct device
*dev
)
846 return pm_runtime_suspended(dev
) == false;
850 static const struct dev_pm_ops tegra_adma_dev_pm_ops
= {
851 SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend
,
852 tegra_adma_runtime_resume
, NULL
)
853 SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend
, NULL
)
856 static struct platform_driver tegra_admac_driver
= {
858 .name
= "tegra-adma",
859 .pm
= &tegra_adma_dev_pm_ops
,
860 .of_match_table
= tegra_adma_of_match
,
862 .probe
= tegra_adma_probe
,
863 .remove
= tegra_adma_remove
,
866 module_platform_driver(tegra_admac_driver
);
868 MODULE_ALIAS("platform:tegra210-adma");
869 MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
870 MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
871 MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
872 MODULE_LICENSE("GPL v2");