1 // SPDX-License-Identifier: GPL-2.0-only
3 * ADMA driver for Nvidia's Tegra210 ADMA controller.
5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
9 #include <linux/iopoll.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_dma.h>
13 #include <linux/of_irq.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
19 #define ADMA_CH_CMD 0x00
20 #define ADMA_CH_STATUS 0x0c
21 #define ADMA_CH_STATUS_XFER_EN BIT(0)
22 #define ADMA_CH_STATUS_XFER_PAUSED BIT(1)
24 #define ADMA_CH_INT_STATUS 0x10
25 #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0)
27 #define ADMA_CH_INT_CLEAR 0x1c
28 #define ADMA_CH_CTRL 0x24
29 #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
30 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2
31 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4
32 #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
33 #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
34 #define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
36 #define ADMA_CH_CONFIG 0x28
37 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28)
38 #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24)
39 #define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20
40 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
41 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
42 #define ADMA_CH_CONFIG_MAX_BUFS 8
43 #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
45 #define ADMA_CH_FIFO_CTRL 0x2c
46 #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
47 #define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
48 #define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
49 #define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
51 #define ADMA_CH_LOWER_SRC_ADDR 0x34
52 #define ADMA_CH_LOWER_TRG_ADDR 0x3c
53 #define ADMA_CH_TC 0x44
54 #define ADMA_CH_TC_COUNT_MASK 0x3ffffffc
56 #define ADMA_CH_XFER_STATUS 0x54
57 #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff
59 #define ADMA_GLOBAL_CMD 0x00
60 #define ADMA_GLOBAL_SOFT_RESET 0x04
62 #define TEGRA_ADMA_BURST_COMPLETE_TIME 20
64 #define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
65 TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
67 #define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
68 TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
70 #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
75 * struct tegra_adma_chip_data - Tegra chip specific data
76 * @global_reg_offset: Register offset of DMA global register.
77 * @global_int_clear: Register offset of DMA global interrupt clear.
78 * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
79 * @ch_req_rx_shift: Register offset for AHUB receive channel select.
80 * @ch_base_offset: Register offset of DMA channel registers.
81 * @has_outstanding_reqs: If DMA channel can have outstanding requests.
82 * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
83 * @ch_req_mask: Mask for Tx or Rx channel select.
84 * @ch_req_max: Maximum number of Tx or Rx channels available.
85 * @ch_reg_size: Size of DMA channel register space.
86 * @nr_channels: Number of DMA channels available.
88 struct tegra_adma_chip_data
{
89 unsigned int (*adma_get_burst_config
)(unsigned int burst_size
);
90 unsigned int global_reg_offset
;
91 unsigned int global_int_clear
;
92 unsigned int ch_req_tx_shift
;
93 unsigned int ch_req_rx_shift
;
94 unsigned int ch_base_offset
;
95 unsigned int ch_fifo_ctrl
;
96 unsigned int ch_req_mask
;
97 unsigned int ch_req_max
;
98 unsigned int ch_reg_size
;
99 unsigned int nr_channels
;
100 bool has_outstanding_reqs
;
104 * struct tegra_adma_chan_regs - Tegra ADMA channel registers
106 struct tegra_adma_chan_regs
{
109 unsigned int src_addr
;
110 unsigned int trg_addr
;
111 unsigned int fifo_ctrl
;
117 * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests.
119 struct tegra_adma_desc
{
120 struct virt_dma_desc vd
;
121 struct tegra_adma_chan_regs ch_regs
;
128 * struct tegra_adma_chan - Tegra ADMA channel information
130 struct tegra_adma_chan
{
131 struct virt_dma_chan vc
;
132 struct tegra_adma_desc
*desc
;
133 struct tegra_adma
*tdma
;
135 void __iomem
*chan_addr
;
137 /* Slave channel configuration info */
138 struct dma_slave_config sconfig
;
139 enum dma_transfer_direction sreq_dir
;
140 unsigned int sreq_index
;
142 struct tegra_adma_chan_regs ch_regs
;
144 /* Transfer count and position info */
145 unsigned int tx_buf_count
;
146 unsigned int tx_buf_pos
;
150 * struct tegra_adma - Tegra ADMA controller information
153 struct dma_device dma_dev
;
155 void __iomem
*base_addr
;
156 struct clk
*ahub_clk
;
157 unsigned int nr_channels
;
158 unsigned long rx_requests_reserved
;
159 unsigned long tx_requests_reserved
;
161 /* Used to store global command register state when suspending */
162 unsigned int global_cmd
;
164 const struct tegra_adma_chip_data
*cdata
;
166 /* Last member of the structure */
167 struct tegra_adma_chan channels
[];
170 static inline void tdma_write(struct tegra_adma
*tdma
, u32 reg
, u32 val
)
172 writel(val
, tdma
->base_addr
+ tdma
->cdata
->global_reg_offset
+ reg
);
175 static inline u32
tdma_read(struct tegra_adma
*tdma
, u32 reg
)
177 return readl(tdma
->base_addr
+ tdma
->cdata
->global_reg_offset
+ reg
);
180 static inline void tdma_ch_write(struct tegra_adma_chan
*tdc
, u32 reg
, u32 val
)
182 writel(val
, tdc
->chan_addr
+ reg
);
185 static inline u32
tdma_ch_read(struct tegra_adma_chan
*tdc
, u32 reg
)
187 return readl(tdc
->chan_addr
+ reg
);
190 static inline struct tegra_adma_chan
*to_tegra_adma_chan(struct dma_chan
*dc
)
192 return container_of(dc
, struct tegra_adma_chan
, vc
.chan
);
195 static inline struct tegra_adma_desc
*to_tegra_adma_desc(
196 struct dma_async_tx_descriptor
*td
)
198 return container_of(td
, struct tegra_adma_desc
, vd
.tx
);
201 static inline struct device
*tdc2dev(struct tegra_adma_chan
*tdc
)
203 return tdc
->tdma
->dev
;
206 static void tegra_adma_desc_free(struct virt_dma_desc
*vd
)
208 kfree(container_of(vd
, struct tegra_adma_desc
, vd
));
211 static int tegra_adma_slave_config(struct dma_chan
*dc
,
212 struct dma_slave_config
*sconfig
)
214 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
216 memcpy(&tdc
->sconfig
, sconfig
, sizeof(*sconfig
));
221 static int tegra_adma_init(struct tegra_adma
*tdma
)
226 /* Clear any interrupts */
227 tdma_write(tdma
, tdma
->cdata
->global_int_clear
, 0x1);
229 /* Assert soft reset */
230 tdma_write(tdma
, ADMA_GLOBAL_SOFT_RESET
, 0x1);
232 /* Wait for reset to clear */
233 ret
= readx_poll_timeout(readl
,
235 tdma
->cdata
->global_reg_offset
+
236 ADMA_GLOBAL_SOFT_RESET
,
237 status
, status
== 0, 20, 10000);
241 /* Enable global ADMA registers */
242 tdma_write(tdma
, ADMA_GLOBAL_CMD
, 1);
247 static int tegra_adma_request_alloc(struct tegra_adma_chan
*tdc
,
248 enum dma_transfer_direction direction
)
250 struct tegra_adma
*tdma
= tdc
->tdma
;
251 unsigned int sreq_index
= tdc
->sreq_index
;
253 if (tdc
->sreq_reserved
)
254 return tdc
->sreq_dir
== direction
? 0 : -EINVAL
;
256 if (sreq_index
> tdma
->cdata
->ch_req_max
) {
257 dev_err(tdma
->dev
, "invalid DMA request\n");
263 if (test_and_set_bit(sreq_index
, &tdma
->tx_requests_reserved
)) {
264 dev_err(tdma
->dev
, "DMA request reserved\n");
270 if (test_and_set_bit(sreq_index
, &tdma
->rx_requests_reserved
)) {
271 dev_err(tdma
->dev
, "DMA request reserved\n");
277 dev_WARN(tdma
->dev
, "channel %s has invalid transfer type\n",
278 dma_chan_name(&tdc
->vc
.chan
));
282 tdc
->sreq_dir
= direction
;
283 tdc
->sreq_reserved
= true;
288 static void tegra_adma_request_free(struct tegra_adma_chan
*tdc
)
290 struct tegra_adma
*tdma
= tdc
->tdma
;
292 if (!tdc
->sreq_reserved
)
295 switch (tdc
->sreq_dir
) {
297 clear_bit(tdc
->sreq_index
, &tdma
->tx_requests_reserved
);
301 clear_bit(tdc
->sreq_index
, &tdma
->rx_requests_reserved
);
305 dev_WARN(tdma
->dev
, "channel %s has invalid transfer type\n",
306 dma_chan_name(&tdc
->vc
.chan
));
310 tdc
->sreq_reserved
= false;
313 static u32
tegra_adma_irq_status(struct tegra_adma_chan
*tdc
)
315 u32 status
= tdma_ch_read(tdc
, ADMA_CH_INT_STATUS
);
317 return status
& ADMA_CH_INT_STATUS_XFER_DONE
;
320 static u32
tegra_adma_irq_clear(struct tegra_adma_chan
*tdc
)
322 u32 status
= tegra_adma_irq_status(tdc
);
325 tdma_ch_write(tdc
, ADMA_CH_INT_CLEAR
, status
);
330 static void tegra_adma_stop(struct tegra_adma_chan
*tdc
)
335 tdma_ch_write(tdc
, ADMA_CH_CMD
, 0);
337 /* Clear interrupt status */
338 tegra_adma_irq_clear(tdc
);
340 if (readx_poll_timeout_atomic(readl
, tdc
->chan_addr
+ ADMA_CH_STATUS
,
341 status
, !(status
& ADMA_CH_STATUS_XFER_EN
),
343 dev_err(tdc2dev(tdc
), "unable to stop DMA channel\n");
351 static void tegra_adma_start(struct tegra_adma_chan
*tdc
)
353 struct virt_dma_desc
*vd
= vchan_next_desc(&tdc
->vc
);
354 struct tegra_adma_chan_regs
*ch_regs
;
355 struct tegra_adma_desc
*desc
;
362 desc
= to_tegra_adma_desc(&vd
->tx
);
365 dev_warn(tdc2dev(tdc
), "unable to start DMA, no descriptor\n");
369 ch_regs
= &desc
->ch_regs
;
372 tdc
->tx_buf_count
= 0;
373 tdma_ch_write(tdc
, ADMA_CH_TC
, ch_regs
->tc
);
374 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_regs
->ctrl
);
375 tdma_ch_write(tdc
, ADMA_CH_LOWER_SRC_ADDR
, ch_regs
->src_addr
);
376 tdma_ch_write(tdc
, ADMA_CH_LOWER_TRG_ADDR
, ch_regs
->trg_addr
);
377 tdma_ch_write(tdc
, ADMA_CH_FIFO_CTRL
, ch_regs
->fifo_ctrl
);
378 tdma_ch_write(tdc
, ADMA_CH_CONFIG
, ch_regs
->config
);
381 tdma_ch_write(tdc
, ADMA_CH_CMD
, 1);
386 static unsigned int tegra_adma_get_residue(struct tegra_adma_chan
*tdc
)
388 struct tegra_adma_desc
*desc
= tdc
->desc
;
389 unsigned int max
= ADMA_CH_XFER_STATUS_COUNT_MASK
+ 1;
390 unsigned int pos
= tdma_ch_read(tdc
, ADMA_CH_XFER_STATUS
);
391 unsigned int periods_remaining
;
394 * Handle wrap around of buffer count register
396 if (pos
< tdc
->tx_buf_pos
)
397 tdc
->tx_buf_count
+= pos
+ (max
- tdc
->tx_buf_pos
);
399 tdc
->tx_buf_count
+= pos
- tdc
->tx_buf_pos
;
401 periods_remaining
= tdc
->tx_buf_count
% desc
->num_periods
;
402 tdc
->tx_buf_pos
= pos
;
404 return desc
->buf_len
- (periods_remaining
* desc
->period_len
);
407 static irqreturn_t
tegra_adma_isr(int irq
, void *dev_id
)
409 struct tegra_adma_chan
*tdc
= dev_id
;
410 unsigned long status
;
412 spin_lock(&tdc
->vc
.lock
);
414 status
= tegra_adma_irq_clear(tdc
);
415 if (status
== 0 || !tdc
->desc
) {
416 spin_unlock(&tdc
->vc
.lock
);
420 vchan_cyclic_callback(&tdc
->desc
->vd
);
422 spin_unlock(&tdc
->vc
.lock
);
427 static void tegra_adma_issue_pending(struct dma_chan
*dc
)
429 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
432 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
434 if (vchan_issue_pending(&tdc
->vc
)) {
436 tegra_adma_start(tdc
);
439 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
442 static bool tegra_adma_is_paused(struct tegra_adma_chan
*tdc
)
446 csts
= tdma_ch_read(tdc
, ADMA_CH_STATUS
);
447 csts
&= ADMA_CH_STATUS_XFER_PAUSED
;
449 return csts
? true : false;
452 static int tegra_adma_pause(struct dma_chan
*dc
)
454 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
455 struct tegra_adma_desc
*desc
= tdc
->desc
;
456 struct tegra_adma_chan_regs
*ch_regs
= &desc
->ch_regs
;
459 ch_regs
->ctrl
= tdma_ch_read(tdc
, ADMA_CH_CTRL
);
460 ch_regs
->ctrl
|= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT
);
461 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_regs
->ctrl
);
463 while (dcnt
-- && !tegra_adma_is_paused(tdc
))
464 udelay(TEGRA_ADMA_BURST_COMPLETE_TIME
);
467 dev_err(tdc2dev(tdc
), "unable to pause DMA channel\n");
474 static int tegra_adma_resume(struct dma_chan
*dc
)
476 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
477 struct tegra_adma_desc
*desc
= tdc
->desc
;
478 struct tegra_adma_chan_regs
*ch_regs
= &desc
->ch_regs
;
480 ch_regs
->ctrl
= tdma_ch_read(tdc
, ADMA_CH_CTRL
);
481 ch_regs
->ctrl
&= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT
);
482 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_regs
->ctrl
);
487 static int tegra_adma_terminate_all(struct dma_chan
*dc
)
489 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
493 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
496 tegra_adma_stop(tdc
);
498 tegra_adma_request_free(tdc
);
499 vchan_get_all_descriptors(&tdc
->vc
, &head
);
500 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
501 vchan_dma_desc_free_list(&tdc
->vc
, &head
);
506 static enum dma_status
tegra_adma_tx_status(struct dma_chan
*dc
,
508 struct dma_tx_state
*txstate
)
510 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
511 struct tegra_adma_desc
*desc
;
512 struct virt_dma_desc
*vd
;
515 unsigned int residual
;
517 ret
= dma_cookie_status(dc
, cookie
, txstate
);
518 if (ret
== DMA_COMPLETE
|| !txstate
)
521 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
523 vd
= vchan_find_desc(&tdc
->vc
, cookie
);
525 desc
= to_tegra_adma_desc(&vd
->tx
);
526 residual
= desc
->ch_regs
.tc
;
527 } else if (tdc
->desc
&& tdc
->desc
->vd
.tx
.cookie
== cookie
) {
528 residual
= tegra_adma_get_residue(tdc
);
533 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
535 dma_set_residue(txstate
, residual
);
540 static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size
)
542 if (!burst_size
|| burst_size
> ADMA_CH_CONFIG_MAX_BURST_SIZE
)
543 burst_size
= ADMA_CH_CONFIG_MAX_BURST_SIZE
;
545 return fls(burst_size
) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT
;
548 static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size
)
550 if (!burst_size
|| burst_size
> ADMA_CH_CONFIG_MAX_BURST_SIZE
)
551 burst_size
= ADMA_CH_CONFIG_MAX_BURST_SIZE
;
553 return (burst_size
- 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT
;
556 static int tegra_adma_set_xfer_params(struct tegra_adma_chan
*tdc
,
557 struct tegra_adma_desc
*desc
,
559 enum dma_transfer_direction direction
)
561 struct tegra_adma_chan_regs
*ch_regs
= &desc
->ch_regs
;
562 const struct tegra_adma_chip_data
*cdata
= tdc
->tdma
->cdata
;
563 unsigned int burst_size
, adma_dir
;
565 if (desc
->num_periods
> ADMA_CH_CONFIG_MAX_BUFS
)
570 adma_dir
= ADMA_CH_CTRL_DIR_MEM2AHUB
;
571 burst_size
= tdc
->sconfig
.dst_maxburst
;
572 ch_regs
->config
= ADMA_CH_CONFIG_SRC_BUF(desc
->num_periods
- 1);
573 ch_regs
->ctrl
= ADMA_CH_REG_FIELD_VAL(tdc
->sreq_index
,
575 cdata
->ch_req_tx_shift
);
576 ch_regs
->src_addr
= buf_addr
;
580 adma_dir
= ADMA_CH_CTRL_DIR_AHUB2MEM
;
581 burst_size
= tdc
->sconfig
.src_maxburst
;
582 ch_regs
->config
= ADMA_CH_CONFIG_TRG_BUF(desc
->num_periods
- 1);
583 ch_regs
->ctrl
= ADMA_CH_REG_FIELD_VAL(tdc
->sreq_index
,
585 cdata
->ch_req_rx_shift
);
586 ch_regs
->trg_addr
= buf_addr
;
590 dev_err(tdc2dev(tdc
), "DMA direction is not supported\n");
594 ch_regs
->ctrl
|= ADMA_CH_CTRL_DIR(adma_dir
) |
595 ADMA_CH_CTRL_MODE_CONTINUOUS
|
596 ADMA_CH_CTRL_FLOWCTRL_EN
;
597 ch_regs
->config
|= cdata
->adma_get_burst_config(burst_size
);
598 ch_regs
->config
|= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
599 if (cdata
->has_outstanding_reqs
)
600 ch_regs
->config
|= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
601 ch_regs
->fifo_ctrl
= cdata
->ch_fifo_ctrl
;
602 ch_regs
->tc
= desc
->period_len
& ADMA_CH_TC_COUNT_MASK
;
604 return tegra_adma_request_alloc(tdc
, direction
);
607 static struct dma_async_tx_descriptor
*tegra_adma_prep_dma_cyclic(
608 struct dma_chan
*dc
, dma_addr_t buf_addr
, size_t buf_len
,
609 size_t period_len
, enum dma_transfer_direction direction
,
612 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
613 struct tegra_adma_desc
*desc
= NULL
;
615 if (!buf_len
|| !period_len
|| period_len
> ADMA_CH_TC_COUNT_MASK
) {
616 dev_err(tdc2dev(tdc
), "invalid buffer/period len\n");
620 if (buf_len
% period_len
) {
621 dev_err(tdc2dev(tdc
), "buf_len not a multiple of period_len\n");
625 if (!IS_ALIGNED(buf_addr
, 4)) {
626 dev_err(tdc2dev(tdc
), "invalid buffer alignment\n");
630 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
634 desc
->buf_len
= buf_len
;
635 desc
->period_len
= period_len
;
636 desc
->num_periods
= buf_len
/ period_len
;
638 if (tegra_adma_set_xfer_params(tdc
, desc
, buf_addr
, direction
)) {
643 return vchan_tx_prep(&tdc
->vc
, &desc
->vd
, flags
);
646 static int tegra_adma_alloc_chan_resources(struct dma_chan
*dc
)
648 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
651 ret
= request_irq(tdc
->irq
, tegra_adma_isr
, 0, dma_chan_name(dc
), tdc
);
653 dev_err(tdc2dev(tdc
), "failed to get interrupt for %s\n",
658 ret
= pm_runtime_get_sync(tdc2dev(tdc
));
660 pm_runtime_put_noidle(tdc2dev(tdc
));
661 free_irq(tdc
->irq
, tdc
);
665 dma_cookie_init(&tdc
->vc
.chan
);
670 static void tegra_adma_free_chan_resources(struct dma_chan
*dc
)
672 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
674 tegra_adma_terminate_all(dc
);
675 vchan_free_chan_resources(&tdc
->vc
);
676 tasklet_kill(&tdc
->vc
.task
);
677 free_irq(tdc
->irq
, tdc
);
678 pm_runtime_put(tdc2dev(tdc
));
681 tdc
->sreq_dir
= DMA_TRANS_NONE
;
684 static struct dma_chan
*tegra_dma_of_xlate(struct of_phandle_args
*dma_spec
,
685 struct of_dma
*ofdma
)
687 struct tegra_adma
*tdma
= ofdma
->of_dma_data
;
688 struct tegra_adma_chan
*tdc
;
689 struct dma_chan
*chan
;
690 unsigned int sreq_index
;
692 if (dma_spec
->args_count
!= 1)
695 sreq_index
= dma_spec
->args
[0];
697 if (sreq_index
== 0) {
698 dev_err(tdma
->dev
, "DMA request must not be 0\n");
702 chan
= dma_get_any_slave_channel(&tdma
->dma_dev
);
706 tdc
= to_tegra_adma_chan(chan
);
707 tdc
->sreq_index
= sreq_index
;
712 static int __maybe_unused
tegra_adma_runtime_suspend(struct device
*dev
)
714 struct tegra_adma
*tdma
= dev_get_drvdata(dev
);
715 struct tegra_adma_chan_regs
*ch_reg
;
716 struct tegra_adma_chan
*tdc
;
719 tdma
->global_cmd
= tdma_read(tdma
, ADMA_GLOBAL_CMD
);
720 if (!tdma
->global_cmd
)
723 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
724 tdc
= &tdma
->channels
[i
];
725 ch_reg
= &tdc
->ch_regs
;
726 ch_reg
->cmd
= tdma_ch_read(tdc
, ADMA_CH_CMD
);
727 /* skip if channel is not active */
730 ch_reg
->tc
= tdma_ch_read(tdc
, ADMA_CH_TC
);
731 ch_reg
->src_addr
= tdma_ch_read(tdc
, ADMA_CH_LOWER_SRC_ADDR
);
732 ch_reg
->trg_addr
= tdma_ch_read(tdc
, ADMA_CH_LOWER_TRG_ADDR
);
733 ch_reg
->ctrl
= tdma_ch_read(tdc
, ADMA_CH_CTRL
);
734 ch_reg
->fifo_ctrl
= tdma_ch_read(tdc
, ADMA_CH_FIFO_CTRL
);
735 ch_reg
->config
= tdma_ch_read(tdc
, ADMA_CH_CONFIG
);
739 clk_disable_unprepare(tdma
->ahub_clk
);
744 static int __maybe_unused
tegra_adma_runtime_resume(struct device
*dev
)
746 struct tegra_adma
*tdma
= dev_get_drvdata(dev
);
747 struct tegra_adma_chan_regs
*ch_reg
;
748 struct tegra_adma_chan
*tdc
;
751 ret
= clk_prepare_enable(tdma
->ahub_clk
);
753 dev_err(dev
, "ahub clk_enable failed: %d\n", ret
);
756 tdma_write(tdma
, ADMA_GLOBAL_CMD
, tdma
->global_cmd
);
758 if (!tdma
->global_cmd
)
761 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
762 tdc
= &tdma
->channels
[i
];
763 ch_reg
= &tdc
->ch_regs
;
764 /* skip if channel was not active earlier */
767 tdma_ch_write(tdc
, ADMA_CH_TC
, ch_reg
->tc
);
768 tdma_ch_write(tdc
, ADMA_CH_LOWER_SRC_ADDR
, ch_reg
->src_addr
);
769 tdma_ch_write(tdc
, ADMA_CH_LOWER_TRG_ADDR
, ch_reg
->trg_addr
);
770 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_reg
->ctrl
);
771 tdma_ch_write(tdc
, ADMA_CH_FIFO_CTRL
, ch_reg
->fifo_ctrl
);
772 tdma_ch_write(tdc
, ADMA_CH_CONFIG
, ch_reg
->config
);
773 tdma_ch_write(tdc
, ADMA_CH_CMD
, ch_reg
->cmd
);
779 static const struct tegra_adma_chip_data tegra210_chip_data
= {
780 .adma_get_burst_config
= tegra210_adma_get_burst_config
,
781 .global_reg_offset
= 0xc00,
782 .global_int_clear
= 0x20,
783 .ch_req_tx_shift
= 28,
784 .ch_req_rx_shift
= 24,
786 .has_outstanding_reqs
= false,
787 .ch_fifo_ctrl
= TEGRA210_FIFO_CTRL_DEFAULT
,
794 static const struct tegra_adma_chip_data tegra186_chip_data
= {
795 .adma_get_burst_config
= tegra186_adma_get_burst_config
,
796 .global_reg_offset
= 0,
797 .global_int_clear
= 0x402c,
798 .ch_req_tx_shift
= 27,
799 .ch_req_rx_shift
= 22,
800 .ch_base_offset
= 0x10000,
801 .has_outstanding_reqs
= true,
802 .ch_fifo_ctrl
= TEGRA186_FIFO_CTRL_DEFAULT
,
805 .ch_reg_size
= 0x100,
809 static const struct of_device_id tegra_adma_of_match
[] = {
810 { .compatible
= "nvidia,tegra210-adma", .data
= &tegra210_chip_data
},
811 { .compatible
= "nvidia,tegra186-adma", .data
= &tegra186_chip_data
},
814 MODULE_DEVICE_TABLE(of
, tegra_adma_of_match
);
816 static int tegra_adma_probe(struct platform_device
*pdev
)
818 const struct tegra_adma_chip_data
*cdata
;
819 struct tegra_adma
*tdma
;
820 struct resource
*res
;
823 cdata
= of_device_get_match_data(&pdev
->dev
);
825 dev_err(&pdev
->dev
, "device match data not found\n");
829 tdma
= devm_kzalloc(&pdev
->dev
,
830 struct_size(tdma
, channels
, cdata
->nr_channels
),
835 tdma
->dev
= &pdev
->dev
;
837 tdma
->nr_channels
= cdata
->nr_channels
;
838 platform_set_drvdata(pdev
, tdma
);
840 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
841 tdma
->base_addr
= devm_ioremap_resource(&pdev
->dev
, res
);
842 if (IS_ERR(tdma
->base_addr
))
843 return PTR_ERR(tdma
->base_addr
);
845 tdma
->ahub_clk
= devm_clk_get(&pdev
->dev
, "d_audio");
846 if (IS_ERR(tdma
->ahub_clk
)) {
847 dev_err(&pdev
->dev
, "Error: Missing ahub controller clock\n");
848 return PTR_ERR(tdma
->ahub_clk
);
851 INIT_LIST_HEAD(&tdma
->dma_dev
.channels
);
852 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
853 struct tegra_adma_chan
*tdc
= &tdma
->channels
[i
];
855 tdc
->chan_addr
= tdma
->base_addr
+ cdata
->ch_base_offset
856 + (cdata
->ch_reg_size
* i
);
858 tdc
->irq
= of_irq_get(pdev
->dev
.of_node
, i
);
860 ret
= tdc
->irq
?: -ENXIO
;
864 vchan_init(&tdc
->vc
, &tdma
->dma_dev
);
865 tdc
->vc
.desc_free
= tegra_adma_desc_free
;
869 pm_runtime_enable(&pdev
->dev
);
871 ret
= pm_runtime_get_sync(&pdev
->dev
);
873 pm_runtime_put_noidle(&pdev
->dev
);
877 ret
= tegra_adma_init(tdma
);
881 dma_cap_set(DMA_SLAVE
, tdma
->dma_dev
.cap_mask
);
882 dma_cap_set(DMA_PRIVATE
, tdma
->dma_dev
.cap_mask
);
883 dma_cap_set(DMA_CYCLIC
, tdma
->dma_dev
.cap_mask
);
885 tdma
->dma_dev
.dev
= &pdev
->dev
;
886 tdma
->dma_dev
.device_alloc_chan_resources
=
887 tegra_adma_alloc_chan_resources
;
888 tdma
->dma_dev
.device_free_chan_resources
=
889 tegra_adma_free_chan_resources
;
890 tdma
->dma_dev
.device_issue_pending
= tegra_adma_issue_pending
;
891 tdma
->dma_dev
.device_prep_dma_cyclic
= tegra_adma_prep_dma_cyclic
;
892 tdma
->dma_dev
.device_config
= tegra_adma_slave_config
;
893 tdma
->dma_dev
.device_tx_status
= tegra_adma_tx_status
;
894 tdma
->dma_dev
.device_terminate_all
= tegra_adma_terminate_all
;
895 tdma
->dma_dev
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
896 tdma
->dma_dev
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
897 tdma
->dma_dev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
898 tdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
899 tdma
->dma_dev
.device_pause
= tegra_adma_pause
;
900 tdma
->dma_dev
.device_resume
= tegra_adma_resume
;
902 ret
= dma_async_device_register(&tdma
->dma_dev
);
904 dev_err(&pdev
->dev
, "ADMA registration failed: %d\n", ret
);
908 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
909 tegra_dma_of_xlate
, tdma
);
911 dev_err(&pdev
->dev
, "ADMA OF registration failed %d\n", ret
);
915 pm_runtime_put(&pdev
->dev
);
917 dev_info(&pdev
->dev
, "Tegra210 ADMA driver registered %d channels\n",
923 dma_async_device_unregister(&tdma
->dma_dev
);
925 pm_runtime_put_sync(&pdev
->dev
);
927 pm_runtime_disable(&pdev
->dev
);
930 irq_dispose_mapping(tdma
->channels
[i
].irq
);
935 static int tegra_adma_remove(struct platform_device
*pdev
)
937 struct tegra_adma
*tdma
= platform_get_drvdata(pdev
);
940 of_dma_controller_free(pdev
->dev
.of_node
);
941 dma_async_device_unregister(&tdma
->dma_dev
);
943 for (i
= 0; i
< tdma
->nr_channels
; ++i
)
944 irq_dispose_mapping(tdma
->channels
[i
].irq
);
946 pm_runtime_put_sync(&pdev
->dev
);
947 pm_runtime_disable(&pdev
->dev
);
952 static const struct dev_pm_ops tegra_adma_dev_pm_ops
= {
953 SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend
,
954 tegra_adma_runtime_resume
, NULL
)
955 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
956 pm_runtime_force_resume
)
959 static struct platform_driver tegra_admac_driver
= {
961 .name
= "tegra-adma",
962 .pm
= &tegra_adma_dev_pm_ops
,
963 .of_match_table
= tegra_adma_of_match
,
965 .probe
= tegra_adma_probe
,
966 .remove
= tegra_adma_remove
,
969 module_platform_driver(tegra_admac_driver
);
971 MODULE_ALIAS("platform:tegra210-adma");
972 MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
973 MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
974 MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
975 MODULE_LICENSE("GPL v2");