1 // SPDX-License-Identifier: GPL-2.0-only
3 * ADMA driver for Nvidia's Tegra210 ADMA controller.
5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
9 #include <linux/iopoll.h>
10 #include <linux/module.h>
12 #include <linux/of_dma.h>
13 #include <linux/of_irq.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/slab.h>
20 #define ADMA_CH_CMD 0x00
21 #define ADMA_CH_STATUS 0x0c
22 #define ADMA_CH_STATUS_XFER_EN BIT(0)
23 #define ADMA_CH_STATUS_XFER_PAUSED BIT(1)
25 #define ADMA_CH_INT_STATUS 0x10
26 #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0)
28 #define ADMA_CH_INT_CLEAR 0x1c
29 #define ADMA_CH_CTRL 0x24
30 #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
31 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2
32 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4
33 #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
34 #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
35 #define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
37 #define ADMA_CH_CONFIG 0x28
38 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28)
39 #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24)
40 #define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20
41 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
42 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
43 #define ADMA_CH_CONFIG_MAX_BUFS 8
44 #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
46 #define ADMA_CH_FIFO_CTRL 0x2c
47 #define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
48 #define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
50 #define ADMA_CH_LOWER_SRC_ADDR 0x34
51 #define ADMA_CH_LOWER_TRG_ADDR 0x3c
52 #define ADMA_CH_TC 0x44
53 #define ADMA_CH_TC_COUNT_MASK 0x3ffffffc
55 #define ADMA_CH_XFER_STATUS 0x54
56 #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff
58 #define ADMA_GLOBAL_CMD 0x00
59 #define ADMA_GLOBAL_SOFT_RESET 0x04
61 #define TEGRA_ADMA_BURST_COMPLETE_TIME 20
63 #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
68 * struct tegra_adma_chip_data - Tegra chip specific data
69 * @adma_get_burst_config: Function callback used to set DMA burst size.
70 * @global_reg_offset: Register offset of DMA global register.
71 * @global_int_clear: Register offset of DMA global interrupt clear.
72 * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
73 * @ch_req_rx_shift: Register offset for AHUB receive channel select.
74 * @ch_base_offset: Register offset of DMA channel registers.
75 * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
76 * @ch_req_mask: Mask for Tx or Rx channel select.
77 * @ch_req_max: Maximum number of Tx or Rx channels available.
78 * @ch_reg_size: Size of DMA channel register space.
79 * @nr_channels: Number of DMA channels available.
80 * @ch_fifo_size_mask: Mask for FIFO size field.
81 * @sreq_index_offset: Slave channel index offset.
82 * @has_outstanding_reqs: If DMA channel can have outstanding requests.
84 struct tegra_adma_chip_data
{
85 unsigned int (*adma_get_burst_config
)(unsigned int burst_size
);
86 unsigned int global_reg_offset
;
87 unsigned int global_int_clear
;
88 unsigned int ch_req_tx_shift
;
89 unsigned int ch_req_rx_shift
;
90 unsigned int ch_base_offset
;
91 unsigned int ch_fifo_ctrl
;
92 unsigned int ch_req_mask
;
93 unsigned int ch_req_max
;
94 unsigned int ch_reg_size
;
95 unsigned int nr_channels
;
96 unsigned int ch_fifo_size_mask
;
97 unsigned int sreq_index_offset
;
98 bool has_outstanding_reqs
;
102 * struct tegra_adma_chan_regs - Tegra ADMA channel registers
104 struct tegra_adma_chan_regs
{
107 unsigned int src_addr
;
108 unsigned int trg_addr
;
109 unsigned int fifo_ctrl
;
115 * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests.
117 struct tegra_adma_desc
{
118 struct virt_dma_desc vd
;
119 struct tegra_adma_chan_regs ch_regs
;
126 * struct tegra_adma_chan - Tegra ADMA channel information
128 struct tegra_adma_chan
{
129 struct virt_dma_chan vc
;
130 struct tegra_adma_desc
*desc
;
131 struct tegra_adma
*tdma
;
133 void __iomem
*chan_addr
;
135 /* Slave channel configuration info */
136 struct dma_slave_config sconfig
;
137 enum dma_transfer_direction sreq_dir
;
138 unsigned int sreq_index
;
140 struct tegra_adma_chan_regs ch_regs
;
142 /* Transfer count and position info */
143 unsigned int tx_buf_count
;
144 unsigned int tx_buf_pos
;
148 * struct tegra_adma - Tegra ADMA controller information
151 struct dma_device dma_dev
;
153 void __iomem
*base_addr
;
154 struct clk
*ahub_clk
;
155 unsigned int nr_channels
;
156 unsigned long *dma_chan_mask
;
157 unsigned long rx_requests_reserved
;
158 unsigned long tx_requests_reserved
;
160 /* Used to store global command register state when suspending */
161 unsigned int global_cmd
;
163 const struct tegra_adma_chip_data
*cdata
;
165 /* Last member of the structure */
166 struct tegra_adma_chan channels
[] __counted_by(nr_channels
);
169 static inline void tdma_write(struct tegra_adma
*tdma
, u32 reg
, u32 val
)
171 writel(val
, tdma
->base_addr
+ tdma
->cdata
->global_reg_offset
+ reg
);
174 static inline u32
tdma_read(struct tegra_adma
*tdma
, u32 reg
)
176 return readl(tdma
->base_addr
+ tdma
->cdata
->global_reg_offset
+ reg
);
179 static inline void tdma_ch_write(struct tegra_adma_chan
*tdc
, u32 reg
, u32 val
)
181 writel(val
, tdc
->chan_addr
+ reg
);
184 static inline u32
tdma_ch_read(struct tegra_adma_chan
*tdc
, u32 reg
)
186 return readl(tdc
->chan_addr
+ reg
);
189 static inline struct tegra_adma_chan
*to_tegra_adma_chan(struct dma_chan
*dc
)
191 return container_of(dc
, struct tegra_adma_chan
, vc
.chan
);
194 static inline struct tegra_adma_desc
*to_tegra_adma_desc(
195 struct dma_async_tx_descriptor
*td
)
197 return container_of(td
, struct tegra_adma_desc
, vd
.tx
);
200 static inline struct device
*tdc2dev(struct tegra_adma_chan
*tdc
)
202 return tdc
->tdma
->dev
;
205 static void tegra_adma_desc_free(struct virt_dma_desc
*vd
)
207 kfree(container_of(vd
, struct tegra_adma_desc
, vd
));
210 static int tegra_adma_slave_config(struct dma_chan
*dc
,
211 struct dma_slave_config
*sconfig
)
213 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
215 memcpy(&tdc
->sconfig
, sconfig
, sizeof(*sconfig
));
220 static int tegra_adma_init(struct tegra_adma
*tdma
)
225 /* Clear any interrupts */
226 tdma_write(tdma
, tdma
->cdata
->ch_base_offset
+ tdma
->cdata
->global_int_clear
, 0x1);
228 /* Assert soft reset */
229 tdma_write(tdma
, ADMA_GLOBAL_SOFT_RESET
, 0x1);
231 /* Wait for reset to clear */
232 ret
= readx_poll_timeout(readl
,
234 tdma
->cdata
->global_reg_offset
+
235 ADMA_GLOBAL_SOFT_RESET
,
236 status
, status
== 0, 20, 10000);
240 /* Enable global ADMA registers */
241 tdma_write(tdma
, ADMA_GLOBAL_CMD
, 1);
246 static int tegra_adma_request_alloc(struct tegra_adma_chan
*tdc
,
247 enum dma_transfer_direction direction
)
249 struct tegra_adma
*tdma
= tdc
->tdma
;
250 unsigned int sreq_index
= tdc
->sreq_index
;
252 if (tdc
->sreq_reserved
)
253 return tdc
->sreq_dir
== direction
? 0 : -EINVAL
;
255 if (sreq_index
> tdma
->cdata
->ch_req_max
) {
256 dev_err(tdma
->dev
, "invalid DMA request\n");
262 if (test_and_set_bit(sreq_index
, &tdma
->tx_requests_reserved
)) {
263 dev_err(tdma
->dev
, "DMA request reserved\n");
269 if (test_and_set_bit(sreq_index
, &tdma
->rx_requests_reserved
)) {
270 dev_err(tdma
->dev
, "DMA request reserved\n");
276 dev_WARN(tdma
->dev
, "channel %s has invalid transfer type\n",
277 dma_chan_name(&tdc
->vc
.chan
));
281 tdc
->sreq_dir
= direction
;
282 tdc
->sreq_reserved
= true;
287 static void tegra_adma_request_free(struct tegra_adma_chan
*tdc
)
289 struct tegra_adma
*tdma
= tdc
->tdma
;
291 if (!tdc
->sreq_reserved
)
294 switch (tdc
->sreq_dir
) {
296 clear_bit(tdc
->sreq_index
, &tdma
->tx_requests_reserved
);
300 clear_bit(tdc
->sreq_index
, &tdma
->rx_requests_reserved
);
304 dev_WARN(tdma
->dev
, "channel %s has invalid transfer type\n",
305 dma_chan_name(&tdc
->vc
.chan
));
309 tdc
->sreq_reserved
= false;
312 static u32
tegra_adma_irq_status(struct tegra_adma_chan
*tdc
)
314 u32 status
= tdma_ch_read(tdc
, ADMA_CH_INT_STATUS
);
316 return status
& ADMA_CH_INT_STATUS_XFER_DONE
;
319 static u32
tegra_adma_irq_clear(struct tegra_adma_chan
*tdc
)
321 u32 status
= tegra_adma_irq_status(tdc
);
324 tdma_ch_write(tdc
, ADMA_CH_INT_CLEAR
, status
);
329 static void tegra_adma_stop(struct tegra_adma_chan
*tdc
)
334 tdma_ch_write(tdc
, ADMA_CH_CMD
, 0);
336 /* Clear interrupt status */
337 tegra_adma_irq_clear(tdc
);
339 if (readx_poll_timeout_atomic(readl
, tdc
->chan_addr
+ ADMA_CH_STATUS
,
340 status
, !(status
& ADMA_CH_STATUS_XFER_EN
),
342 dev_err(tdc2dev(tdc
), "unable to stop DMA channel\n");
350 static void tegra_adma_start(struct tegra_adma_chan
*tdc
)
352 struct virt_dma_desc
*vd
= vchan_next_desc(&tdc
->vc
);
353 struct tegra_adma_chan_regs
*ch_regs
;
354 struct tegra_adma_desc
*desc
;
361 desc
= to_tegra_adma_desc(&vd
->tx
);
364 dev_warn(tdc2dev(tdc
), "unable to start DMA, no descriptor\n");
368 ch_regs
= &desc
->ch_regs
;
371 tdc
->tx_buf_count
= 0;
372 tdma_ch_write(tdc
, ADMA_CH_TC
, ch_regs
->tc
);
373 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_regs
->ctrl
);
374 tdma_ch_write(tdc
, ADMA_CH_LOWER_SRC_ADDR
, ch_regs
->src_addr
);
375 tdma_ch_write(tdc
, ADMA_CH_LOWER_TRG_ADDR
, ch_regs
->trg_addr
);
376 tdma_ch_write(tdc
, ADMA_CH_FIFO_CTRL
, ch_regs
->fifo_ctrl
);
377 tdma_ch_write(tdc
, ADMA_CH_CONFIG
, ch_regs
->config
);
380 tdma_ch_write(tdc
, ADMA_CH_CMD
, 1);
385 static unsigned int tegra_adma_get_residue(struct tegra_adma_chan
*tdc
)
387 struct tegra_adma_desc
*desc
= tdc
->desc
;
388 unsigned int max
= ADMA_CH_XFER_STATUS_COUNT_MASK
+ 1;
389 unsigned int pos
= tdma_ch_read(tdc
, ADMA_CH_XFER_STATUS
);
390 unsigned int periods_remaining
;
393 * Handle wrap around of buffer count register
395 if (pos
< tdc
->tx_buf_pos
)
396 tdc
->tx_buf_count
+= pos
+ (max
- tdc
->tx_buf_pos
);
398 tdc
->tx_buf_count
+= pos
- tdc
->tx_buf_pos
;
400 periods_remaining
= tdc
->tx_buf_count
% desc
->num_periods
;
401 tdc
->tx_buf_pos
= pos
;
403 return desc
->buf_len
- (periods_remaining
* desc
->period_len
);
406 static irqreturn_t
tegra_adma_isr(int irq
, void *dev_id
)
408 struct tegra_adma_chan
*tdc
= dev_id
;
409 unsigned long status
;
411 spin_lock(&tdc
->vc
.lock
);
413 status
= tegra_adma_irq_clear(tdc
);
414 if (status
== 0 || !tdc
->desc
) {
415 spin_unlock(&tdc
->vc
.lock
);
419 vchan_cyclic_callback(&tdc
->desc
->vd
);
421 spin_unlock(&tdc
->vc
.lock
);
426 static void tegra_adma_issue_pending(struct dma_chan
*dc
)
428 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
431 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
433 if (vchan_issue_pending(&tdc
->vc
)) {
435 tegra_adma_start(tdc
);
438 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
441 static bool tegra_adma_is_paused(struct tegra_adma_chan
*tdc
)
445 csts
= tdma_ch_read(tdc
, ADMA_CH_STATUS
);
446 csts
&= ADMA_CH_STATUS_XFER_PAUSED
;
448 return csts
? true : false;
451 static int tegra_adma_pause(struct dma_chan
*dc
)
453 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
454 struct tegra_adma_desc
*desc
= tdc
->desc
;
455 struct tegra_adma_chan_regs
*ch_regs
= &desc
->ch_regs
;
458 ch_regs
->ctrl
= tdma_ch_read(tdc
, ADMA_CH_CTRL
);
459 ch_regs
->ctrl
|= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT
);
460 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_regs
->ctrl
);
462 while (dcnt
-- && !tegra_adma_is_paused(tdc
))
463 udelay(TEGRA_ADMA_BURST_COMPLETE_TIME
);
466 dev_err(tdc2dev(tdc
), "unable to pause DMA channel\n");
473 static int tegra_adma_resume(struct dma_chan
*dc
)
475 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
476 struct tegra_adma_desc
*desc
= tdc
->desc
;
477 struct tegra_adma_chan_regs
*ch_regs
= &desc
->ch_regs
;
479 ch_regs
->ctrl
= tdma_ch_read(tdc
, ADMA_CH_CTRL
);
480 ch_regs
->ctrl
&= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT
);
481 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_regs
->ctrl
);
486 static int tegra_adma_terminate_all(struct dma_chan
*dc
)
488 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
492 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
495 tegra_adma_stop(tdc
);
497 tegra_adma_request_free(tdc
);
498 vchan_get_all_descriptors(&tdc
->vc
, &head
);
499 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
500 vchan_dma_desc_free_list(&tdc
->vc
, &head
);
505 static enum dma_status
tegra_adma_tx_status(struct dma_chan
*dc
,
507 struct dma_tx_state
*txstate
)
509 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
510 struct tegra_adma_desc
*desc
;
511 struct virt_dma_desc
*vd
;
514 unsigned int residual
;
516 ret
= dma_cookie_status(dc
, cookie
, txstate
);
517 if (ret
== DMA_COMPLETE
|| !txstate
)
520 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
522 vd
= vchan_find_desc(&tdc
->vc
, cookie
);
524 desc
= to_tegra_adma_desc(&vd
->tx
);
525 residual
= desc
->ch_regs
.tc
;
526 } else if (tdc
->desc
&& tdc
->desc
->vd
.tx
.cookie
== cookie
) {
527 residual
= tegra_adma_get_residue(tdc
);
532 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
534 dma_set_residue(txstate
, residual
);
539 static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size
)
541 if (!burst_size
|| burst_size
> ADMA_CH_CONFIG_MAX_BURST_SIZE
)
542 burst_size
= ADMA_CH_CONFIG_MAX_BURST_SIZE
;
544 return fls(burst_size
) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT
;
547 static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size
)
549 if (!burst_size
|| burst_size
> ADMA_CH_CONFIG_MAX_BURST_SIZE
)
550 burst_size
= ADMA_CH_CONFIG_MAX_BURST_SIZE
;
552 return (burst_size
- 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT
;
555 static int tegra_adma_set_xfer_params(struct tegra_adma_chan
*tdc
,
556 struct tegra_adma_desc
*desc
,
558 enum dma_transfer_direction direction
)
560 struct tegra_adma_chan_regs
*ch_regs
= &desc
->ch_regs
;
561 const struct tegra_adma_chip_data
*cdata
= tdc
->tdma
->cdata
;
562 unsigned int burst_size
, adma_dir
, fifo_size_shift
;
564 if (desc
->num_periods
> ADMA_CH_CONFIG_MAX_BUFS
)
569 fifo_size_shift
= ADMA_CH_TX_FIFO_SIZE_SHIFT
;
570 adma_dir
= ADMA_CH_CTRL_DIR_MEM2AHUB
;
571 burst_size
= tdc
->sconfig
.dst_maxburst
;
572 ch_regs
->config
= ADMA_CH_CONFIG_SRC_BUF(desc
->num_periods
- 1);
573 ch_regs
->ctrl
= ADMA_CH_REG_FIELD_VAL(tdc
->sreq_index
,
575 cdata
->ch_req_tx_shift
);
576 ch_regs
->src_addr
= buf_addr
;
580 fifo_size_shift
= ADMA_CH_RX_FIFO_SIZE_SHIFT
;
581 adma_dir
= ADMA_CH_CTRL_DIR_AHUB2MEM
;
582 burst_size
= tdc
->sconfig
.src_maxburst
;
583 ch_regs
->config
= ADMA_CH_CONFIG_TRG_BUF(desc
->num_periods
- 1);
584 ch_regs
->ctrl
= ADMA_CH_REG_FIELD_VAL(tdc
->sreq_index
,
586 cdata
->ch_req_rx_shift
);
587 ch_regs
->trg_addr
= buf_addr
;
591 dev_err(tdc2dev(tdc
), "DMA direction is not supported\n");
595 ch_regs
->ctrl
|= ADMA_CH_CTRL_DIR(adma_dir
) |
596 ADMA_CH_CTRL_MODE_CONTINUOUS
|
597 ADMA_CH_CTRL_FLOWCTRL_EN
;
598 ch_regs
->config
|= cdata
->adma_get_burst_config(burst_size
);
599 ch_regs
->config
|= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
600 if (cdata
->has_outstanding_reqs
)
601 ch_regs
->config
|= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
604 * 'sreq_index' represents the current ADMAIF channel number and as per
605 * HW recommendation its FIFO size should match with the corresponding
608 * ADMA FIFO size is set as per below (based on default ADMAIF channel
610 * fifo_size = 0x2 (sreq_index > sreq_index_offset)
611 * fifo_size = 0x3 (sreq_index <= sreq_index_offset)
614 if (tdc
->sreq_index
> cdata
->sreq_index_offset
)
616 ADMA_CH_REG_FIELD_VAL(2, cdata
->ch_fifo_size_mask
,
620 ADMA_CH_REG_FIELD_VAL(3, cdata
->ch_fifo_size_mask
,
623 ch_regs
->tc
= desc
->period_len
& ADMA_CH_TC_COUNT_MASK
;
625 return tegra_adma_request_alloc(tdc
, direction
);
628 static struct dma_async_tx_descriptor
*tegra_adma_prep_dma_cyclic(
629 struct dma_chan
*dc
, dma_addr_t buf_addr
, size_t buf_len
,
630 size_t period_len
, enum dma_transfer_direction direction
,
633 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
634 struct tegra_adma_desc
*desc
= NULL
;
636 if (!buf_len
|| !period_len
|| period_len
> ADMA_CH_TC_COUNT_MASK
) {
637 dev_err(tdc2dev(tdc
), "invalid buffer/period len\n");
641 if (buf_len
% period_len
) {
642 dev_err(tdc2dev(tdc
), "buf_len not a multiple of period_len\n");
646 if (!IS_ALIGNED(buf_addr
, 4)) {
647 dev_err(tdc2dev(tdc
), "invalid buffer alignment\n");
651 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
655 desc
->buf_len
= buf_len
;
656 desc
->period_len
= period_len
;
657 desc
->num_periods
= buf_len
/ period_len
;
659 if (tegra_adma_set_xfer_params(tdc
, desc
, buf_addr
, direction
)) {
664 return vchan_tx_prep(&tdc
->vc
, &desc
->vd
, flags
);
667 static int tegra_adma_alloc_chan_resources(struct dma_chan
*dc
)
669 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
672 ret
= request_irq(tdc
->irq
, tegra_adma_isr
, 0, dma_chan_name(dc
), tdc
);
674 dev_err(tdc2dev(tdc
), "failed to get interrupt for %s\n",
679 ret
= pm_runtime_resume_and_get(tdc2dev(tdc
));
681 free_irq(tdc
->irq
, tdc
);
685 dma_cookie_init(&tdc
->vc
.chan
);
690 static void tegra_adma_free_chan_resources(struct dma_chan
*dc
)
692 struct tegra_adma_chan
*tdc
= to_tegra_adma_chan(dc
);
694 tegra_adma_terminate_all(dc
);
695 vchan_free_chan_resources(&tdc
->vc
);
696 tasklet_kill(&tdc
->vc
.task
);
697 free_irq(tdc
->irq
, tdc
);
698 pm_runtime_put(tdc2dev(tdc
));
701 tdc
->sreq_dir
= DMA_TRANS_NONE
;
704 static struct dma_chan
*tegra_dma_of_xlate(struct of_phandle_args
*dma_spec
,
705 struct of_dma
*ofdma
)
707 struct tegra_adma
*tdma
= ofdma
->of_dma_data
;
708 struct tegra_adma_chan
*tdc
;
709 struct dma_chan
*chan
;
710 unsigned int sreq_index
;
712 if (dma_spec
->args_count
!= 1)
715 sreq_index
= dma_spec
->args
[0];
717 if (sreq_index
== 0) {
718 dev_err(tdma
->dev
, "DMA request must not be 0\n");
722 chan
= dma_get_any_slave_channel(&tdma
->dma_dev
);
726 tdc
= to_tegra_adma_chan(chan
);
727 tdc
->sreq_index
= sreq_index
;
732 static int __maybe_unused
tegra_adma_runtime_suspend(struct device
*dev
)
734 struct tegra_adma
*tdma
= dev_get_drvdata(dev
);
735 struct tegra_adma_chan_regs
*ch_reg
;
736 struct tegra_adma_chan
*tdc
;
739 tdma
->global_cmd
= tdma_read(tdma
, ADMA_GLOBAL_CMD
);
740 if (!tdma
->global_cmd
)
743 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
744 tdc
= &tdma
->channels
[i
];
745 /* skip for reserved channels */
749 ch_reg
= &tdc
->ch_regs
;
750 ch_reg
->cmd
= tdma_ch_read(tdc
, ADMA_CH_CMD
);
751 /* skip if channel is not active */
754 ch_reg
->tc
= tdma_ch_read(tdc
, ADMA_CH_TC
);
755 ch_reg
->src_addr
= tdma_ch_read(tdc
, ADMA_CH_LOWER_SRC_ADDR
);
756 ch_reg
->trg_addr
= tdma_ch_read(tdc
, ADMA_CH_LOWER_TRG_ADDR
);
757 ch_reg
->ctrl
= tdma_ch_read(tdc
, ADMA_CH_CTRL
);
758 ch_reg
->fifo_ctrl
= tdma_ch_read(tdc
, ADMA_CH_FIFO_CTRL
);
759 ch_reg
->config
= tdma_ch_read(tdc
, ADMA_CH_CONFIG
);
763 clk_disable_unprepare(tdma
->ahub_clk
);
768 static int __maybe_unused
tegra_adma_runtime_resume(struct device
*dev
)
770 struct tegra_adma
*tdma
= dev_get_drvdata(dev
);
771 struct tegra_adma_chan_regs
*ch_reg
;
772 struct tegra_adma_chan
*tdc
;
775 ret
= clk_prepare_enable(tdma
->ahub_clk
);
777 dev_err(dev
, "ahub clk_enable failed: %d\n", ret
);
780 tdma_write(tdma
, ADMA_GLOBAL_CMD
, tdma
->global_cmd
);
782 if (!tdma
->global_cmd
)
785 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
786 tdc
= &tdma
->channels
[i
];
787 /* skip for reserved channels */
790 ch_reg
= &tdc
->ch_regs
;
791 /* skip if channel was not active earlier */
794 tdma_ch_write(tdc
, ADMA_CH_TC
, ch_reg
->tc
);
795 tdma_ch_write(tdc
, ADMA_CH_LOWER_SRC_ADDR
, ch_reg
->src_addr
);
796 tdma_ch_write(tdc
, ADMA_CH_LOWER_TRG_ADDR
, ch_reg
->trg_addr
);
797 tdma_ch_write(tdc
, ADMA_CH_CTRL
, ch_reg
->ctrl
);
798 tdma_ch_write(tdc
, ADMA_CH_FIFO_CTRL
, ch_reg
->fifo_ctrl
);
799 tdma_ch_write(tdc
, ADMA_CH_CONFIG
, ch_reg
->config
);
800 tdma_ch_write(tdc
, ADMA_CH_CMD
, ch_reg
->cmd
);
806 static const struct tegra_adma_chip_data tegra210_chip_data
= {
807 .adma_get_burst_config
= tegra210_adma_get_burst_config
,
808 .global_reg_offset
= 0xc00,
809 .global_int_clear
= 0x20,
810 .ch_req_tx_shift
= 28,
811 .ch_req_rx_shift
= 24,
817 .ch_fifo_size_mask
= 0xf,
818 .sreq_index_offset
= 2,
819 .has_outstanding_reqs
= false,
822 static const struct tegra_adma_chip_data tegra186_chip_data
= {
823 .adma_get_burst_config
= tegra186_adma_get_burst_config
,
824 .global_reg_offset
= 0,
825 .global_int_clear
= 0x402c,
826 .ch_req_tx_shift
= 27,
827 .ch_req_rx_shift
= 22,
828 .ch_base_offset
= 0x10000,
831 .ch_reg_size
= 0x100,
833 .ch_fifo_size_mask
= 0x1f,
834 .sreq_index_offset
= 4,
835 .has_outstanding_reqs
= true,
838 static const struct of_device_id tegra_adma_of_match
[] = {
839 { .compatible
= "nvidia,tegra210-adma", .data
= &tegra210_chip_data
},
840 { .compatible
= "nvidia,tegra186-adma", .data
= &tegra186_chip_data
},
843 MODULE_DEVICE_TABLE(of
, tegra_adma_of_match
);
845 static int tegra_adma_probe(struct platform_device
*pdev
)
847 const struct tegra_adma_chip_data
*cdata
;
848 struct tegra_adma
*tdma
;
851 cdata
= of_device_get_match_data(&pdev
->dev
);
853 dev_err(&pdev
->dev
, "device match data not found\n");
857 tdma
= devm_kzalloc(&pdev
->dev
,
858 struct_size(tdma
, channels
, cdata
->nr_channels
),
863 tdma
->dev
= &pdev
->dev
;
865 tdma
->nr_channels
= cdata
->nr_channels
;
866 platform_set_drvdata(pdev
, tdma
);
868 tdma
->base_addr
= devm_platform_ioremap_resource(pdev
, 0);
869 if (IS_ERR(tdma
->base_addr
))
870 return PTR_ERR(tdma
->base_addr
);
872 tdma
->ahub_clk
= devm_clk_get(&pdev
->dev
, "d_audio");
873 if (IS_ERR(tdma
->ahub_clk
)) {
874 dev_err(&pdev
->dev
, "Error: Missing ahub controller clock\n");
875 return PTR_ERR(tdma
->ahub_clk
);
878 tdma
->dma_chan_mask
= devm_kzalloc(&pdev
->dev
,
879 BITS_TO_LONGS(tdma
->nr_channels
) * sizeof(unsigned long),
881 if (!tdma
->dma_chan_mask
)
884 /* Enable all channels by default */
885 bitmap_fill(tdma
->dma_chan_mask
, tdma
->nr_channels
);
887 ret
= of_property_read_u32_array(pdev
->dev
.of_node
, "dma-channel-mask",
888 (u32
*)tdma
->dma_chan_mask
,
889 BITS_TO_U32(tdma
->nr_channels
));
890 if (ret
< 0 && (ret
!= -EINVAL
)) {
891 dev_err(&pdev
->dev
, "dma-channel-mask is not complete.\n");
895 INIT_LIST_HEAD(&tdma
->dma_dev
.channels
);
896 for (i
= 0; i
< tdma
->nr_channels
; i
++) {
897 struct tegra_adma_chan
*tdc
= &tdma
->channels
[i
];
899 /* skip for reserved channels */
900 if (!test_bit(i
, tdma
->dma_chan_mask
))
903 tdc
->chan_addr
= tdma
->base_addr
+ cdata
->ch_base_offset
904 + (cdata
->ch_reg_size
* i
);
906 tdc
->irq
= of_irq_get(pdev
->dev
.of_node
, i
);
908 ret
= tdc
->irq
?: -ENXIO
;
912 vchan_init(&tdc
->vc
, &tdma
->dma_dev
);
913 tdc
->vc
.desc_free
= tegra_adma_desc_free
;
917 pm_runtime_enable(&pdev
->dev
);
919 ret
= pm_runtime_resume_and_get(&pdev
->dev
);
923 ret
= tegra_adma_init(tdma
);
927 dma_cap_set(DMA_SLAVE
, tdma
->dma_dev
.cap_mask
);
928 dma_cap_set(DMA_PRIVATE
, tdma
->dma_dev
.cap_mask
);
929 dma_cap_set(DMA_CYCLIC
, tdma
->dma_dev
.cap_mask
);
931 tdma
->dma_dev
.dev
= &pdev
->dev
;
932 tdma
->dma_dev
.device_alloc_chan_resources
=
933 tegra_adma_alloc_chan_resources
;
934 tdma
->dma_dev
.device_free_chan_resources
=
935 tegra_adma_free_chan_resources
;
936 tdma
->dma_dev
.device_issue_pending
= tegra_adma_issue_pending
;
937 tdma
->dma_dev
.device_prep_dma_cyclic
= tegra_adma_prep_dma_cyclic
;
938 tdma
->dma_dev
.device_config
= tegra_adma_slave_config
;
939 tdma
->dma_dev
.device_tx_status
= tegra_adma_tx_status
;
940 tdma
->dma_dev
.device_terminate_all
= tegra_adma_terminate_all
;
941 tdma
->dma_dev
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
942 tdma
->dma_dev
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
943 tdma
->dma_dev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
944 tdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
945 tdma
->dma_dev
.device_pause
= tegra_adma_pause
;
946 tdma
->dma_dev
.device_resume
= tegra_adma_resume
;
948 ret
= dma_async_device_register(&tdma
->dma_dev
);
950 dev_err(&pdev
->dev
, "ADMA registration failed: %d\n", ret
);
954 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
955 tegra_dma_of_xlate
, tdma
);
957 dev_err(&pdev
->dev
, "ADMA OF registration failed %d\n", ret
);
961 pm_runtime_put(&pdev
->dev
);
963 dev_info(&pdev
->dev
, "Tegra210 ADMA driver registered %d channels\n",
969 dma_async_device_unregister(&tdma
->dma_dev
);
971 pm_runtime_put_sync(&pdev
->dev
);
973 pm_runtime_disable(&pdev
->dev
);
976 irq_dispose_mapping(tdma
->channels
[i
].irq
);
981 static void tegra_adma_remove(struct platform_device
*pdev
)
983 struct tegra_adma
*tdma
= platform_get_drvdata(pdev
);
986 of_dma_controller_free(pdev
->dev
.of_node
);
987 dma_async_device_unregister(&tdma
->dma_dev
);
989 for (i
= 0; i
< tdma
->nr_channels
; ++i
) {
990 if (tdma
->channels
[i
].irq
)
991 irq_dispose_mapping(tdma
->channels
[i
].irq
);
994 pm_runtime_disable(&pdev
->dev
);
997 static const struct dev_pm_ops tegra_adma_dev_pm_ops
= {
998 SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend
,
999 tegra_adma_runtime_resume
, NULL
)
1000 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
1001 pm_runtime_force_resume
)
1004 static struct platform_driver tegra_admac_driver
= {
1006 .name
= "tegra-adma",
1007 .pm
= &tegra_adma_dev_pm_ops
,
1008 .of_match_table
= tegra_adma_of_match
,
1010 .probe
= tegra_adma_probe
,
1011 .remove
= tegra_adma_remove
,
1014 module_platform_driver(tegra_admac_driver
);
1016 MODULE_ALIAS("platform:tegra210-adma");
1017 MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
1018 MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
1019 MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
1020 MODULE_LICENSE("GPL v2");