1 // SPDX-License-Identifier: GPL-2.0-only
3 * DMA driver for NVIDIA Tegra GPC DMA controller.
5 * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
8 #include <linux/bitfield.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/iommu.h>
13 #include <linux/iopoll.h>
14 #include <linux/minmax.h>
15 #include <linux/module.h>
17 #include <linux/of_dma.h>
18 #include <linux/platform_device.h>
19 #include <linux/reset.h>
20 #include <linux/slab.h>
21 #include <dt-bindings/memory/tegra186-mc.h>
25 #define TEGRA_GPCDMA_CHAN_CSR 0x00
26 #define TEGRA_GPCDMA_CSR_ENB BIT(31)
27 #define TEGRA_GPCDMA_CSR_IE_EOC BIT(30)
28 #define TEGRA_GPCDMA_CSR_ONCE BIT(27)
30 #define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24)
31 #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \
32 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
33 #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \
34 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
35 #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \
36 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
37 #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \
38 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
40 #define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21)
41 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \
42 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
43 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \
44 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
45 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \
46 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
47 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \
48 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
49 #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \
50 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
51 #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \
52 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
54 #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16)
55 #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \
56 FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
57 #define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15)
58 #define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10)
61 #define TEGRA_GPCDMA_CHAN_STATUS 0x004
62 #define TEGRA_GPCDMA_STATUS_BUSY BIT(31)
63 #define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30)
64 #define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28)
65 #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27)
66 #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26)
67 #define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25)
68 #define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24)
69 #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23)
70 #define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21)
71 #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20)
73 #define TEGRA_GPCDMA_CHAN_CSRE 0x008
74 #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31)
77 #define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C
79 /* Destination address */
80 #define TEGRA_GPCDMA_CHAN_DST_PTR 0x010
82 /* High address pointer */
83 #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014
84 #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0)
85 #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16)
87 /* MC sequence register */
88 #define TEGRA_GPCDMA_CHAN_MCSEQ 0x18
89 #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31)
90 #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25)
91 #define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23)
92 #define TEGRA_GPCDMA_MCSEQ_BURST_2 \
93 FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
94 #define TEGRA_GPCDMA_MCSEQ_BURST_16 \
95 FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
96 #define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20)
97 #define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17)
98 #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0
100 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7)
101 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0)
103 /* MMIO sequence register */
104 #define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c
105 #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31)
106 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28)
107 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \
108 FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
109 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \
110 FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
111 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \
112 FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
113 #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27)
114 #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23
115 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U
116 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U
117 #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \
118 (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
119 #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19)
120 #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16)
121 #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7)
124 #define TEGRA_GPCDMA_CHAN_WCOUNT 0x20
127 #define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24
129 /* DMA byte count status */
130 #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28
132 /* Error Status Register */
133 #define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30
134 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8
135 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF
136 #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \
137 ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \
138 TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
139 #define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF
140 #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE
141 #define TEGRA_DMA_PERIPH_ID_ERR 0xD
142 #define TEGRA_DMA_STREAM_ID_ERR 0xC
143 #define TEGRA_DMA_MC_SLAVE_ERR 0xB
144 #define TEGRA_DMA_MMIO_SLAVE_ERR 0xA
147 #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34
149 #define TEGRA_GPCDMA_CHAN_TZ 0x38
150 #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0)
151 #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1)
153 #define TEGRA_GPCDMA_CHAN_SPARE 0x3c
154 #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16)
157 * If any burst is in flight and DMA paused then this is the time to complete
158 * on-flight burst and update DMA status register.
160 #define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
161 #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
163 /* Channel base address offset from GPCDMA base address */
164 #define TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET 0x10000
166 /* Default channel mask reserving channel0 */
167 #define TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK 0xfffffffe
170 struct tegra_dma_channel
;
173 * tegra_dma_chip_data Tegra chip specific DMA data
174 * @nr_channels: Number of channels available in the controller.
175 * @channel_reg_size: Channel register size.
176 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
177 * @hw_support_pause: DMA HW engine support pause of the channel.
179 struct tegra_dma_chip_data
{
180 bool hw_support_pause
;
181 unsigned int nr_channels
;
182 unsigned int channel_reg_size
;
183 unsigned int max_dma_count
;
184 int (*terminate
)(struct tegra_dma_channel
*tdc
);
187 /* DMA channel registers */
188 struct tegra_dma_channel_regs
{
200 * tegra_dma_sg_req: DMA request details to configure hardware. This
201 * contains the details for one transfer to configure DMA hw.
202 * The client's request for data transfer can be broken into multiple
203 * sub-transfer as per requester details and hw support. This sub transfer
204 * get added as an array in Tegra DMA desc which manages the transfer details.
206 struct tegra_dma_sg_req
{
208 struct tegra_dma_channel_regs ch_regs
;
212 * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
213 * manage client request and keep track of transfer status, callbacks
214 * and request counts etc.
216 struct tegra_dma_desc
{
218 unsigned int bytes_req
;
219 unsigned int bytes_xfer
;
221 unsigned int sg_count
;
222 struct virt_dma_desc vd
;
223 struct tegra_dma_channel
*tdc
;
224 struct tegra_dma_sg_req sg_req
[] __counted_by(sg_count
);
228 * tegra_dma_channel: Channel specific information
230 struct tegra_dma_channel
{
233 enum dma_transfer_direction sid_dir
;
237 struct tegra_dma
*tdma
;
238 struct virt_dma_chan vc
;
239 struct tegra_dma_desc
*dma_desc
;
240 struct dma_slave_config dma_sconfig
;
241 unsigned int stream_id
;
242 unsigned long chan_base_offset
;
246 * tegra_dma: Tegra DMA specific information
249 const struct tegra_dma_chip_data
*chip_data
;
250 unsigned long sid_m2d_reserved
;
251 unsigned long sid_d2m_reserved
;
253 void __iomem
*base_addr
;
255 struct dma_device dma_dev
;
256 struct reset_control
*rst
;
257 struct tegra_dma_channel channels
[];
260 static inline void tdc_write(struct tegra_dma_channel
*tdc
,
263 writel_relaxed(val
, tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
266 static inline u32
tdc_read(struct tegra_dma_channel
*tdc
, u32 reg
)
268 return readl_relaxed(tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
271 static inline struct tegra_dma_channel
*to_tegra_dma_chan(struct dma_chan
*dc
)
273 return container_of(dc
, struct tegra_dma_channel
, vc
.chan
);
276 static inline struct tegra_dma_desc
*vd_to_tegra_dma_desc(struct virt_dma_desc
*vd
)
278 return container_of(vd
, struct tegra_dma_desc
, vd
);
281 static inline struct device
*tdc2dev(struct tegra_dma_channel
*tdc
)
283 return tdc
->vc
.chan
.device
->dev
;
286 static void tegra_dma_dump_chan_regs(struct tegra_dma_channel
*tdc
)
288 dev_dbg(tdc2dev(tdc
), "DMA Channel %d name %s register dump:\n",
290 dev_dbg(tdc2dev(tdc
), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
291 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSR
),
292 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
),
293 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSRE
),
294 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_SRC_PTR
),
295 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_DST_PTR
)
297 dev_dbg(tdc2dev(tdc
), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
298 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
),
299 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MMIOSEQ
),
300 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_WCOUNT
),
301 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_XFER_COUNT
),
302 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS
)
304 dev_dbg(tdc2dev(tdc
), "DMA ERR_STA %x\n",
305 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_ERR_STATUS
));
308 static int tegra_dma_sid_reserve(struct tegra_dma_channel
*tdc
,
309 enum dma_transfer_direction direction
)
311 struct tegra_dma
*tdma
= tdc
->tdma
;
312 int sid
= tdc
->slave_id
;
314 if (!is_slave_direction(direction
))
319 if (test_and_set_bit(sid
, &tdma
->sid_m2d_reserved
)) {
320 dev_err(tdma
->dev
, "slave id already in use\n");
325 if (test_and_set_bit(sid
, &tdma
->sid_d2m_reserved
)) {
326 dev_err(tdma
->dev
, "slave id already in use\n");
334 tdc
->sid_dir
= direction
;
339 static void tegra_dma_sid_free(struct tegra_dma_channel
*tdc
)
341 struct tegra_dma
*tdma
= tdc
->tdma
;
342 int sid
= tdc
->slave_id
;
344 switch (tdc
->sid_dir
) {
346 clear_bit(sid
, &tdma
->sid_m2d_reserved
);
349 clear_bit(sid
, &tdma
->sid_d2m_reserved
);
355 tdc
->sid_dir
= DMA_TRANS_NONE
;
358 static void tegra_dma_desc_free(struct virt_dma_desc
*vd
)
360 kfree(container_of(vd
, struct tegra_dma_desc
, vd
));
363 static int tegra_dma_slave_config(struct dma_chan
*dc
,
364 struct dma_slave_config
*sconfig
)
366 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
368 memcpy(&tdc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
369 tdc
->config_init
= true;
374 static int tegra_dma_pause(struct tegra_dma_channel
*tdc
)
379 val
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSRE
);
380 val
|= TEGRA_GPCDMA_CHAN_CSRE_PAUSE
;
381 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSRE
, val
);
383 /* Wait until busy bit is de-asserted */
384 ret
= readl_relaxed_poll_timeout_atomic(tdc
->tdma
->base_addr
+
385 tdc
->chan_base_offset
+ TEGRA_GPCDMA_CHAN_STATUS
,
387 !(val
& TEGRA_GPCDMA_STATUS_BUSY
),
388 TEGRA_GPCDMA_BURST_COMPLETE_TIME
,
389 TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT
);
392 dev_err(tdc2dev(tdc
), "DMA pause timed out\n");
393 tegra_dma_dump_chan_regs(tdc
);
399 static int tegra_dma_device_pause(struct dma_chan
*dc
)
401 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
405 if (!tdc
->tdma
->chip_data
->hw_support_pause
)
408 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
409 ret
= tegra_dma_pause(tdc
);
410 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
415 static void tegra_dma_resume(struct tegra_dma_channel
*tdc
)
419 val
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSRE
);
420 val
&= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE
;
421 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSRE
, val
);
424 static int tegra_dma_device_resume(struct dma_chan
*dc
)
426 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
429 if (!tdc
->tdma
->chip_data
->hw_support_pause
)
432 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
433 tegra_dma_resume(tdc
);
434 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
439 static inline int tegra_dma_pause_noerr(struct tegra_dma_channel
*tdc
)
441 /* Return 0 irrespective of PAUSE status.
442 * This is useful to recover channels that can exit out of flush
443 * state when the channel is disabled.
446 tegra_dma_pause(tdc
);
450 static void tegra_dma_disable(struct tegra_dma_channel
*tdc
)
454 csr
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSR
);
456 /* Disable interrupts */
457 csr
&= ~TEGRA_GPCDMA_CSR_IE_EOC
;
460 csr
&= ~TEGRA_GPCDMA_CSR_ENB
;
461 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, csr
);
463 /* Clear interrupt status if it is there */
464 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
);
465 if (status
& TEGRA_GPCDMA_STATUS_ISE_EOC
) {
466 dev_dbg(tdc2dev(tdc
), "%s():clearing interrupt\n", __func__
);
467 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_STATUS
, status
);
471 static void tegra_dma_configure_next_sg(struct tegra_dma_channel
*tdc
)
473 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
474 struct tegra_dma_channel_regs
*ch_regs
;
480 /* Reset the sg index for cyclic transfers */
481 if (dma_desc
->sg_idx
== dma_desc
->sg_count
)
482 dma_desc
->sg_idx
= 0;
484 /* Configure next transfer immediately after DMA is busy */
485 ret
= readl_relaxed_poll_timeout_atomic(tdc
->tdma
->base_addr
+
486 tdc
->chan_base_offset
+ TEGRA_GPCDMA_CHAN_STATUS
,
488 (val
& TEGRA_GPCDMA_STATUS_BUSY
), 0,
489 TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT
);
493 ch_regs
= &dma_desc
->sg_req
[dma_desc
->sg_idx
].ch_regs
;
495 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_WCOUNT
, ch_regs
->wcount
);
496 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_SRC_PTR
, ch_regs
->src_ptr
);
497 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_DST_PTR
, ch_regs
->dst_ptr
);
498 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR
, ch_regs
->high_addr_ptr
);
501 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
,
502 ch_regs
->csr
| TEGRA_GPCDMA_CSR_ENB
);
505 static void tegra_dma_start(struct tegra_dma_channel
*tdc
)
507 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
508 struct tegra_dma_channel_regs
*ch_regs
;
509 struct virt_dma_desc
*vdesc
;
512 vdesc
= vchan_next_desc(&tdc
->vc
);
516 dma_desc
= vd_to_tegra_dma_desc(vdesc
);
517 list_del(&vdesc
->node
);
519 tdc
->dma_desc
= dma_desc
;
521 tegra_dma_resume(tdc
);
524 ch_regs
= &dma_desc
->sg_req
[dma_desc
->sg_idx
].ch_regs
;
526 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_WCOUNT
, ch_regs
->wcount
);
527 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, 0);
528 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_SRC_PTR
, ch_regs
->src_ptr
);
529 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_DST_PTR
, ch_regs
->dst_ptr
);
530 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR
, ch_regs
->high_addr_ptr
);
531 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_FIXED_PATTERN
, ch_regs
->fixed_pattern
);
532 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_MMIOSEQ
, ch_regs
->mmio_seq
);
533 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
, ch_regs
->mc_seq
);
534 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, ch_regs
->csr
);
537 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
,
538 ch_regs
->csr
| TEGRA_GPCDMA_CSR_ENB
);
541 static void tegra_dma_xfer_complete(struct tegra_dma_channel
*tdc
)
543 vchan_cookie_complete(&tdc
->dma_desc
->vd
);
545 tegra_dma_sid_free(tdc
);
546 tdc
->dma_desc
= NULL
;
549 static void tegra_dma_chan_decode_error(struct tegra_dma_channel
*tdc
,
550 unsigned int err_status
)
552 switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status
)) {
553 case TEGRA_DMA_BM_FIFO_FULL_ERR
:
554 dev_err(tdc
->tdma
->dev
,
555 "GPCDMA CH%d bm fifo full\n", tdc
->id
);
558 case TEGRA_DMA_PERIPH_FIFO_FULL_ERR
:
559 dev_err(tdc
->tdma
->dev
,
560 "GPCDMA CH%d peripheral fifo full\n", tdc
->id
);
563 case TEGRA_DMA_PERIPH_ID_ERR
:
564 dev_err(tdc
->tdma
->dev
,
565 "GPCDMA CH%d illegal peripheral id\n", tdc
->id
);
568 case TEGRA_DMA_STREAM_ID_ERR
:
569 dev_err(tdc
->tdma
->dev
,
570 "GPCDMA CH%d illegal stream id\n", tdc
->id
);
573 case TEGRA_DMA_MC_SLAVE_ERR
:
574 dev_err(tdc
->tdma
->dev
,
575 "GPCDMA CH%d mc slave error\n", tdc
->id
);
578 case TEGRA_DMA_MMIO_SLAVE_ERR
:
579 dev_err(tdc
->tdma
->dev
,
580 "GPCDMA CH%d mmio slave error\n", tdc
->id
);
584 dev_err(tdc
->tdma
->dev
,
585 "GPCDMA CH%d security violation %x\n", tdc
->id
,
590 static irqreturn_t
tegra_dma_isr(int irq
, void *dev_id
)
592 struct tegra_dma_channel
*tdc
= dev_id
;
593 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
594 struct tegra_dma_sg_req
*sg_req
;
597 /* Check channel error status register */
598 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_ERR_STATUS
);
600 tegra_dma_chan_decode_error(tdc
, status
);
601 tegra_dma_dump_chan_regs(tdc
);
602 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_ERR_STATUS
, 0xFFFFFFFF);
605 spin_lock(&tdc
->vc
.lock
);
606 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
);
607 if (!(status
& TEGRA_GPCDMA_STATUS_ISE_EOC
))
610 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_STATUS
,
611 TEGRA_GPCDMA_STATUS_ISE_EOC
);
616 sg_req
= dma_desc
->sg_req
;
617 dma_desc
->bytes_xfer
+= sg_req
[dma_desc
->sg_idx
].len
;
619 if (dma_desc
->cyclic
) {
620 vchan_cyclic_callback(&dma_desc
->vd
);
621 tegra_dma_configure_next_sg(tdc
);
624 if (dma_desc
->sg_idx
== dma_desc
->sg_count
)
625 tegra_dma_xfer_complete(tdc
);
627 tegra_dma_start(tdc
);
631 spin_unlock(&tdc
->vc
.lock
);
635 static void tegra_dma_issue_pending(struct dma_chan
*dc
)
637 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
643 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
644 if (vchan_issue_pending(&tdc
->vc
))
645 tegra_dma_start(tdc
);
648 * For cyclic DMA transfers, program the second
649 * transfer parameters as soon as the first DMA
650 * transfer is started inorder for the DMA
651 * controller to trigger the second transfer
652 * with the correct parameters.
654 if (tdc
->dma_desc
&& tdc
->dma_desc
->cyclic
)
655 tegra_dma_configure_next_sg(tdc
);
657 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
660 static int tegra_dma_stop_client(struct tegra_dma_channel
*tdc
)
666 * Change the client associated with the DMA channel
667 * to stop DMA engine from starting any more bursts for
668 * the given client and wait for in flight bursts to complete
670 csr
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSR
);
671 csr
&= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK
);
672 csr
|= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED
;
673 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, csr
);
675 /* Wait for in flight data transfer to finish */
676 udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME
);
678 /* If TX/RX path is still active wait till it becomes
682 ret
= readl_relaxed_poll_timeout_atomic(tdc
->tdma
->base_addr
+
683 tdc
->chan_base_offset
+
684 TEGRA_GPCDMA_CHAN_STATUS
,
686 !(status
& (TEGRA_GPCDMA_STATUS_CHANNEL_TX
|
687 TEGRA_GPCDMA_STATUS_CHANNEL_RX
)),
689 TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT
);
691 dev_err(tdc2dev(tdc
), "Timeout waiting for DMA burst completion!\n");
692 tegra_dma_dump_chan_regs(tdc
);
698 static int tegra_dma_terminate_all(struct dma_chan
*dc
)
700 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
705 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
708 err
= tdc
->tdma
->chip_data
->terminate(tdc
);
710 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
714 vchan_terminate_vdesc(&tdc
->dma_desc
->vd
);
715 tegra_dma_disable(tdc
);
716 tdc
->dma_desc
= NULL
;
719 tegra_dma_sid_free(tdc
);
720 vchan_get_all_descriptors(&tdc
->vc
, &head
);
721 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
723 vchan_dma_desc_free_list(&tdc
->vc
, &head
);
728 static int tegra_dma_get_residual(struct tegra_dma_channel
*tdc
)
730 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
731 struct tegra_dma_sg_req
*sg_req
= dma_desc
->sg_req
;
732 unsigned int bytes_xfer
, residual
;
733 u32 wcount
= 0, status
;
735 wcount
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_XFER_COUNT
);
738 * Set wcount = 0 if EOC bit is set. The transfer would have
739 * already completed and the CHAN_XFER_COUNT could have updated
740 * for the next transfer, specifically in case of cyclic transfers.
742 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
);
743 if (status
& TEGRA_GPCDMA_STATUS_ISE_EOC
)
746 bytes_xfer
= dma_desc
->bytes_xfer
+
747 sg_req
[dma_desc
->sg_idx
].len
- (wcount
* 4);
749 if (dma_desc
->bytes_req
== bytes_xfer
)
752 residual
= dma_desc
->bytes_req
- (bytes_xfer
% dma_desc
->bytes_req
);
757 static enum dma_status
tegra_dma_tx_status(struct dma_chan
*dc
,
759 struct dma_tx_state
*txstate
)
761 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
762 struct tegra_dma_desc
*dma_desc
;
763 struct virt_dma_desc
*vd
;
764 unsigned int residual
;
768 ret
= dma_cookie_status(dc
, cookie
, txstate
);
769 if (ret
== DMA_COMPLETE
)
772 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
773 vd
= vchan_find_desc(&tdc
->vc
, cookie
);
775 dma_desc
= vd_to_tegra_dma_desc(vd
);
776 residual
= dma_desc
->bytes_req
;
777 dma_set_residue(txstate
, residual
);
778 } else if (tdc
->dma_desc
&& tdc
->dma_desc
->vd
.tx
.cookie
== cookie
) {
779 residual
= tegra_dma_get_residual(tdc
);
780 dma_set_residue(txstate
, residual
);
782 dev_err(tdc2dev(tdc
), "cookie %d is not found\n", cookie
);
784 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
789 static inline int get_bus_width(struct tegra_dma_channel
*tdc
,
790 enum dma_slave_buswidth slave_bw
)
793 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
794 return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8
;
795 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
796 return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16
;
797 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
798 return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32
;
800 dev_err(tdc2dev(tdc
), "given slave bus width is not supported\n");
805 static unsigned int get_burst_size(struct tegra_dma_channel
*tdc
,
806 u32 burst_size
, enum dma_slave_buswidth slave_bw
,
809 unsigned int burst_mmio_width
, burst_byte
;
812 * burst_size from client is in terms of the bus_width.
813 * convert that into words.
814 * If burst_size is not specified from client, then use
815 * len to calculate the optimum burst size
817 burst_byte
= burst_size
? burst_size
* slave_bw
: len
;
818 burst_mmio_width
= burst_byte
/ 4;
820 if (burst_mmio_width
< TEGRA_GPCDMA_MMIOSEQ_BURST_MIN
)
823 burst_mmio_width
= min(burst_mmio_width
, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX
);
825 return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width
);
828 static int get_transfer_param(struct tegra_dma_channel
*tdc
,
829 enum dma_transfer_direction direction
,
833 unsigned int *burst_size
,
834 enum dma_slave_buswidth
*slave_bw
)
838 *apb_addr
= tdc
->dma_sconfig
.dst_addr
;
839 *mmio_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.dst_addr_width
);
840 *burst_size
= tdc
->dma_sconfig
.dst_maxburst
;
841 *slave_bw
= tdc
->dma_sconfig
.dst_addr_width
;
842 *csr
= TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC
;
845 *apb_addr
= tdc
->dma_sconfig
.src_addr
;
846 *mmio_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.src_addr_width
);
847 *burst_size
= tdc
->dma_sconfig
.src_maxburst
;
848 *slave_bw
= tdc
->dma_sconfig
.src_addr_width
;
849 *csr
= TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC
;
852 dev_err(tdc2dev(tdc
), "DMA direction is not supported\n");
858 static struct dma_async_tx_descriptor
*
859 tegra_dma_prep_dma_memset(struct dma_chan
*dc
, dma_addr_t dest
, int value
,
860 size_t len
, unsigned long flags
)
862 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
863 unsigned int max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
864 struct tegra_dma_sg_req
*sg_req
;
865 struct tegra_dma_desc
*dma_desc
;
868 if ((len
& 3) || (dest
& 3) || len
> max_dma_count
) {
869 dev_err(tdc2dev(tdc
),
870 "DMA length/memory address is not supported\n");
874 /* Set DMA mode to fixed pattern */
875 csr
= TEGRA_GPCDMA_CSR_DMA_FIXED_PAT
;
876 /* Enable once or continuous mode */
877 csr
|= TEGRA_GPCDMA_CSR_ONCE
;
878 /* Enable IRQ mask */
879 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
880 /* Enable the DMA interrupt */
881 if (flags
& DMA_PREP_INTERRUPT
)
882 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
883 /* Configure default priority weight for the channel */
884 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
886 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
887 /* retain stream-id and clean rest */
888 mc_seq
&= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
;
890 /* Set the address wrapping */
891 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
892 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
893 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
894 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
896 /* Program outstanding MC requests */
897 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
899 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
901 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, 1), GFP_NOWAIT
);
905 dma_desc
->bytes_req
= len
;
906 dma_desc
->sg_count
= 1;
907 sg_req
= dma_desc
->sg_req
;
909 sg_req
[0].ch_regs
.src_ptr
= 0;
910 sg_req
[0].ch_regs
.dst_ptr
= dest
;
911 sg_req
[0].ch_regs
.high_addr_ptr
=
912 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (dest
>> 32));
913 sg_req
[0].ch_regs
.fixed_pattern
= value
;
914 /* Word count reg takes value as (N +1) words */
915 sg_req
[0].ch_regs
.wcount
= ((len
- 4) >> 2);
916 sg_req
[0].ch_regs
.csr
= csr
;
917 sg_req
[0].ch_regs
.mmio_seq
= 0;
918 sg_req
[0].ch_regs
.mc_seq
= mc_seq
;
921 dma_desc
->cyclic
= false;
922 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
925 static struct dma_async_tx_descriptor
*
926 tegra_dma_prep_dma_memcpy(struct dma_chan
*dc
, dma_addr_t dest
,
927 dma_addr_t src
, size_t len
, unsigned long flags
)
929 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
930 struct tegra_dma_sg_req
*sg_req
;
931 struct tegra_dma_desc
*dma_desc
;
932 unsigned int max_dma_count
;
935 max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
936 if ((len
& 3) || (src
& 3) || (dest
& 3) || len
> max_dma_count
) {
937 dev_err(tdc2dev(tdc
),
938 "DMA length/memory address is not supported\n");
942 /* Set DMA mode to memory to memory transfer */
943 csr
= TEGRA_GPCDMA_CSR_DMA_MEM2MEM
;
944 /* Enable once or continuous mode */
945 csr
|= TEGRA_GPCDMA_CSR_ONCE
;
946 /* Enable IRQ mask */
947 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
948 /* Enable the DMA interrupt */
949 if (flags
& DMA_PREP_INTERRUPT
)
950 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
951 /* Configure default priority weight for the channel */
952 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
954 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
955 /* retain stream-id and clean rest */
956 mc_seq
&= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
) |
957 (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK
);
959 /* Set the address wrapping */
960 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
961 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
962 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
963 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
965 /* Program outstanding MC requests */
966 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
968 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
970 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, 1), GFP_NOWAIT
);
974 dma_desc
->bytes_req
= len
;
975 dma_desc
->sg_count
= 1;
976 sg_req
= dma_desc
->sg_req
;
978 sg_req
[0].ch_regs
.src_ptr
= src
;
979 sg_req
[0].ch_regs
.dst_ptr
= dest
;
980 sg_req
[0].ch_regs
.high_addr_ptr
=
981 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR
, (src
>> 32));
982 sg_req
[0].ch_regs
.high_addr_ptr
|=
983 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (dest
>> 32));
984 /* Word count reg takes value as (N +1) words */
985 sg_req
[0].ch_regs
.wcount
= ((len
- 4) >> 2);
986 sg_req
[0].ch_regs
.csr
= csr
;
987 sg_req
[0].ch_regs
.mmio_seq
= 0;
988 sg_req
[0].ch_regs
.mc_seq
= mc_seq
;
991 dma_desc
->cyclic
= false;
992 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
995 static struct dma_async_tx_descriptor
*
996 tegra_dma_prep_slave_sg(struct dma_chan
*dc
, struct scatterlist
*sgl
,
997 unsigned int sg_len
, enum dma_transfer_direction direction
,
998 unsigned long flags
, void *context
)
1000 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1001 unsigned int max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
1002 enum dma_slave_buswidth slave_bw
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
1003 u32 csr
, mc_seq
, apb_ptr
= 0, mmio_seq
= 0;
1004 struct tegra_dma_sg_req
*sg_req
;
1005 struct tegra_dma_desc
*dma_desc
;
1006 struct scatterlist
*sg
;
1011 if (!tdc
->config_init
) {
1012 dev_err(tdc2dev(tdc
), "DMA channel is not configured\n");
1016 dev_err(tdc2dev(tdc
), "Invalid segment length %d\n", sg_len
);
1020 ret
= tegra_dma_sid_reserve(tdc
, direction
);
1024 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &mmio_seq
, &csr
,
1025 &burst_size
, &slave_bw
);
1029 /* Enable once or continuous mode */
1030 csr
|= TEGRA_GPCDMA_CSR_ONCE
;
1031 /* Program the slave id in requestor select */
1032 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK
, tdc
->slave_id
);
1033 /* Enable IRQ mask */
1034 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
1035 /* Configure default priority weight for the channel*/
1036 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
1038 /* Enable the DMA interrupt */
1039 if (flags
& DMA_PREP_INTERRUPT
)
1040 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
1042 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
1043 /* retain stream-id and clean rest */
1044 mc_seq
&= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
;
1046 /* Set the address wrapping on both MC and MMIO side */
1048 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
1049 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1050 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
1051 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1052 mmio_seq
|= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD
, 1);
1054 /* Program 2 MC outstanding requests by default. */
1055 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
1057 /* Setting MC burst size depending on MMIO burst size */
1058 if (burst_size
== 64)
1059 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
1061 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_2
;
1063 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, sg_len
), GFP_NOWAIT
);
1067 dma_desc
->sg_count
= sg_len
;
1068 sg_req
= dma_desc
->sg_req
;
1070 /* Make transfer requests */
1071 for_each_sg(sgl
, sg
, sg_len
, i
) {
1075 mem
= sg_dma_address(sg
);
1076 len
= sg_dma_len(sg
);
1078 if ((len
& 3) || (mem
& 3) || len
> max_dma_count
) {
1079 dev_err(tdc2dev(tdc
),
1080 "DMA length/memory address is not supported\n");
1085 mmio_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
1086 dma_desc
->bytes_req
+= len
;
1088 if (direction
== DMA_MEM_TO_DEV
) {
1089 sg_req
[i
].ch_regs
.src_ptr
= mem
;
1090 sg_req
[i
].ch_regs
.dst_ptr
= apb_ptr
;
1091 sg_req
[i
].ch_regs
.high_addr_ptr
=
1092 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR
, (mem
>> 32));
1093 } else if (direction
== DMA_DEV_TO_MEM
) {
1094 sg_req
[i
].ch_regs
.src_ptr
= apb_ptr
;
1095 sg_req
[i
].ch_regs
.dst_ptr
= mem
;
1096 sg_req
[i
].ch_regs
.high_addr_ptr
=
1097 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (mem
>> 32));
1101 * Word count register takes input in words. Writing a value
1102 * of N into word count register means a req of (N+1) words.
1104 sg_req
[i
].ch_regs
.wcount
= ((len
- 4) >> 2);
1105 sg_req
[i
].ch_regs
.csr
= csr
;
1106 sg_req
[i
].ch_regs
.mmio_seq
= mmio_seq
;
1107 sg_req
[i
].ch_regs
.mc_seq
= mc_seq
;
1108 sg_req
[i
].len
= len
;
1111 dma_desc
->cyclic
= false;
1112 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
1115 static struct dma_async_tx_descriptor
*
1116 tegra_dma_prep_dma_cyclic(struct dma_chan
*dc
, dma_addr_t buf_addr
, size_t buf_len
,
1117 size_t period_len
, enum dma_transfer_direction direction
,
1118 unsigned long flags
)
1120 enum dma_slave_buswidth slave_bw
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
1121 u32 csr
, mc_seq
, apb_ptr
= 0, mmio_seq
= 0, burst_size
;
1122 unsigned int max_dma_count
, len
, period_count
, i
;
1123 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1124 struct tegra_dma_desc
*dma_desc
;
1125 struct tegra_dma_sg_req
*sg_req
;
1126 dma_addr_t mem
= buf_addr
;
1129 if (!buf_len
|| !period_len
) {
1130 dev_err(tdc2dev(tdc
), "Invalid buffer/period len\n");
1134 if (!tdc
->config_init
) {
1135 dev_err(tdc2dev(tdc
), "DMA slave is not configured\n");
1139 ret
= tegra_dma_sid_reserve(tdc
, direction
);
1144 * We only support cycle transfer when buf_len is multiple of
1147 if (buf_len
% period_len
) {
1148 dev_err(tdc2dev(tdc
), "buf_len is not multiple of period_len\n");
1153 max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
1154 if ((len
& 3) || (buf_addr
& 3) || len
> max_dma_count
) {
1155 dev_err(tdc2dev(tdc
), "Req len/mem address is not correct\n");
1159 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &mmio_seq
, &csr
,
1160 &burst_size
, &slave_bw
);
1164 /* Enable once or continuous mode */
1165 csr
&= ~TEGRA_GPCDMA_CSR_ONCE
;
1166 /* Program the slave id in requestor select */
1167 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK
, tdc
->slave_id
);
1168 /* Enable IRQ mask */
1169 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
1170 /* Configure default priority weight for the channel*/
1171 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
1173 /* Enable the DMA interrupt */
1174 if (flags
& DMA_PREP_INTERRUPT
)
1175 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
1177 mmio_seq
|= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD
, 1);
1179 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
1180 /* retain stream-id and clean rest */
1181 mc_seq
&= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
;
1183 /* Set the address wrapping on both MC and MMIO side */
1184 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
1185 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1186 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
1187 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1189 /* Program 2 MC outstanding requests by default. */
1190 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
1191 /* Setting MC burst size depending on MMIO burst size */
1192 if (burst_size
== 64)
1193 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
1195 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_2
;
1197 period_count
= buf_len
/ period_len
;
1198 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, period_count
),
1203 dma_desc
->bytes_req
= buf_len
;
1204 dma_desc
->sg_count
= period_count
;
1205 sg_req
= dma_desc
->sg_req
;
1207 /* Split transfer equal to period size */
1208 for (i
= 0; i
< period_count
; i
++) {
1209 mmio_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
1210 if (direction
== DMA_MEM_TO_DEV
) {
1211 sg_req
[i
].ch_regs
.src_ptr
= mem
;
1212 sg_req
[i
].ch_regs
.dst_ptr
= apb_ptr
;
1213 sg_req
[i
].ch_regs
.high_addr_ptr
=
1214 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR
, (mem
>> 32));
1215 } else if (direction
== DMA_DEV_TO_MEM
) {
1216 sg_req
[i
].ch_regs
.src_ptr
= apb_ptr
;
1217 sg_req
[i
].ch_regs
.dst_ptr
= mem
;
1218 sg_req
[i
].ch_regs
.high_addr_ptr
=
1219 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (mem
>> 32));
1222 * Word count register takes input in words. Writing a value
1223 * of N into word count register means a req of (N+1) words.
1225 sg_req
[i
].ch_regs
.wcount
= ((len
- 4) >> 2);
1226 sg_req
[i
].ch_regs
.csr
= csr
;
1227 sg_req
[i
].ch_regs
.mmio_seq
= mmio_seq
;
1228 sg_req
[i
].ch_regs
.mc_seq
= mc_seq
;
1229 sg_req
[i
].len
= len
;
1234 dma_desc
->cyclic
= true;
1236 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
1239 static int tegra_dma_alloc_chan_resources(struct dma_chan
*dc
)
1241 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1244 ret
= request_irq(tdc
->irq
, tegra_dma_isr
, 0, tdc
->name
, tdc
);
1246 dev_err(tdc2dev(tdc
), "request_irq failed for %s\n", tdc
->name
);
1250 dma_cookie_init(&tdc
->vc
.chan
);
1251 tdc
->config_init
= false;
1255 static void tegra_dma_chan_synchronize(struct dma_chan
*dc
)
1257 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1259 synchronize_irq(tdc
->irq
);
1260 vchan_synchronize(&tdc
->vc
);
1263 static void tegra_dma_free_chan_resources(struct dma_chan
*dc
)
1265 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1267 dev_dbg(tdc2dev(tdc
), "Freeing channel %d\n", tdc
->id
);
1269 tegra_dma_terminate_all(dc
);
1270 synchronize_irq(tdc
->irq
);
1272 tasklet_kill(&tdc
->vc
.task
);
1273 tdc
->config_init
= false;
1275 tdc
->sid_dir
= DMA_TRANS_NONE
;
1276 free_irq(tdc
->irq
, tdc
);
1278 vchan_free_chan_resources(&tdc
->vc
);
1281 static struct dma_chan
*tegra_dma_of_xlate(struct of_phandle_args
*dma_spec
,
1282 struct of_dma
*ofdma
)
1284 struct tegra_dma
*tdma
= ofdma
->of_dma_data
;
1285 struct tegra_dma_channel
*tdc
;
1286 struct dma_chan
*chan
;
1288 chan
= dma_get_any_slave_channel(&tdma
->dma_dev
);
1292 tdc
= to_tegra_dma_chan(chan
);
1293 tdc
->slave_id
= dma_spec
->args
[0];
1298 static const struct tegra_dma_chip_data tegra186_dma_chip_data
= {
1300 .channel_reg_size
= SZ_64K
,
1301 .max_dma_count
= SZ_1G
,
1302 .hw_support_pause
= false,
1303 .terminate
= tegra_dma_stop_client
,
1306 static const struct tegra_dma_chip_data tegra194_dma_chip_data
= {
1308 .channel_reg_size
= SZ_64K
,
1309 .max_dma_count
= SZ_1G
,
1310 .hw_support_pause
= true,
1311 .terminate
= tegra_dma_pause
,
1314 static const struct tegra_dma_chip_data tegra234_dma_chip_data
= {
1316 .channel_reg_size
= SZ_64K
,
1317 .max_dma_count
= SZ_1G
,
1318 .hw_support_pause
= true,
1319 .terminate
= tegra_dma_pause_noerr
,
1322 static const struct of_device_id tegra_dma_of_match
[] = {
1324 .compatible
= "nvidia,tegra186-gpcdma",
1325 .data
= &tegra186_dma_chip_data
,
1327 .compatible
= "nvidia,tegra194-gpcdma",
1328 .data
= &tegra194_dma_chip_data
,
1330 .compatible
= "nvidia,tegra234-gpcdma",
1331 .data
= &tegra234_dma_chip_data
,
1335 MODULE_DEVICE_TABLE(of
, tegra_dma_of_match
);
1337 static int tegra_dma_program_sid(struct tegra_dma_channel
*tdc
, int stream_id
)
1339 unsigned int reg_val
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
1341 reg_val
&= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
);
1342 reg_val
&= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK
);
1344 reg_val
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
, stream_id
);
1345 reg_val
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK
, stream_id
);
1347 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
, reg_val
);
1351 static int tegra_dma_probe(struct platform_device
*pdev
)
1353 const struct tegra_dma_chip_data
*cdata
= NULL
;
1356 struct tegra_dma
*tdma
;
1359 cdata
= of_device_get_match_data(&pdev
->dev
);
1361 tdma
= devm_kzalloc(&pdev
->dev
,
1362 struct_size(tdma
, channels
, cdata
->nr_channels
),
1367 tdma
->dev
= &pdev
->dev
;
1368 tdma
->chip_data
= cdata
;
1369 platform_set_drvdata(pdev
, tdma
);
1371 tdma
->base_addr
= devm_platform_ioremap_resource(pdev
, 0);
1372 if (IS_ERR(tdma
->base_addr
))
1373 return PTR_ERR(tdma
->base_addr
);
1375 tdma
->rst
= devm_reset_control_get_exclusive(&pdev
->dev
, "gpcdma");
1376 if (IS_ERR(tdma
->rst
)) {
1377 return dev_err_probe(&pdev
->dev
, PTR_ERR(tdma
->rst
),
1378 "Missing controller reset\n");
1380 reset_control_reset(tdma
->rst
);
1382 tdma
->dma_dev
.dev
= &pdev
->dev
;
1384 if (!tegra_dev_iommu_get_stream_id(&pdev
->dev
, &stream_id
)) {
1385 dev_err(&pdev
->dev
, "Missing iommu stream-id\n");
1389 ret
= device_property_read_u32(&pdev
->dev
, "dma-channel-mask",
1392 dev_warn(&pdev
->dev
,
1393 "Missing dma-channel-mask property, using default channel mask %#x\n",
1394 TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK
);
1395 tdma
->chan_mask
= TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK
;
1398 INIT_LIST_HEAD(&tdma
->dma_dev
.channels
);
1399 for (i
= 0; i
< cdata
->nr_channels
; i
++) {
1400 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1402 /* Check for channel mask */
1403 if (!(tdma
->chan_mask
& BIT(i
)))
1406 tdc
->irq
= platform_get_irq(pdev
, i
);
1410 tdc
->chan_base_offset
= TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET
+
1411 i
* cdata
->channel_reg_size
;
1412 snprintf(tdc
->name
, sizeof(tdc
->name
), "gpcdma.%d", i
);
1417 vchan_init(&tdc
->vc
, &tdma
->dma_dev
);
1418 tdc
->vc
.desc_free
= tegra_dma_desc_free
;
1420 /* program stream-id for this channel */
1421 tegra_dma_program_sid(tdc
, stream_id
);
1422 tdc
->stream_id
= stream_id
;
1425 dma_cap_set(DMA_SLAVE
, tdma
->dma_dev
.cap_mask
);
1426 dma_cap_set(DMA_PRIVATE
, tdma
->dma_dev
.cap_mask
);
1427 dma_cap_set(DMA_MEMCPY
, tdma
->dma_dev
.cap_mask
);
1428 dma_cap_set(DMA_MEMSET
, tdma
->dma_dev
.cap_mask
);
1429 dma_cap_set(DMA_CYCLIC
, tdma
->dma_dev
.cap_mask
);
1432 * Only word aligned transfers are supported. Set the copy
1435 tdma
->dma_dev
.copy_align
= 2;
1436 tdma
->dma_dev
.fill_align
= 2;
1437 tdma
->dma_dev
.device_alloc_chan_resources
=
1438 tegra_dma_alloc_chan_resources
;
1439 tdma
->dma_dev
.device_free_chan_resources
=
1440 tegra_dma_free_chan_resources
;
1441 tdma
->dma_dev
.device_prep_slave_sg
= tegra_dma_prep_slave_sg
;
1442 tdma
->dma_dev
.device_prep_dma_memcpy
= tegra_dma_prep_dma_memcpy
;
1443 tdma
->dma_dev
.device_prep_dma_memset
= tegra_dma_prep_dma_memset
;
1444 tdma
->dma_dev
.device_prep_dma_cyclic
= tegra_dma_prep_dma_cyclic
;
1445 tdma
->dma_dev
.device_config
= tegra_dma_slave_config
;
1446 tdma
->dma_dev
.device_terminate_all
= tegra_dma_terminate_all
;
1447 tdma
->dma_dev
.device_tx_status
= tegra_dma_tx_status
;
1448 tdma
->dma_dev
.device_issue_pending
= tegra_dma_issue_pending
;
1449 tdma
->dma_dev
.device_pause
= tegra_dma_device_pause
;
1450 tdma
->dma_dev
.device_resume
= tegra_dma_device_resume
;
1451 tdma
->dma_dev
.device_synchronize
= tegra_dma_chan_synchronize
;
1452 tdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1454 ret
= dma_async_device_register(&tdma
->dma_dev
);
1456 dev_err_probe(&pdev
->dev
, ret
,
1457 "GPC DMA driver registration failed\n");
1461 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
1462 tegra_dma_of_xlate
, tdma
);
1464 dev_err_probe(&pdev
->dev
, ret
,
1465 "GPC DMA OF registration failed\n");
1467 dma_async_device_unregister(&tdma
->dma_dev
);
1471 dev_info(&pdev
->dev
, "GPC DMA driver register %lu channels\n",
1472 hweight_long(tdma
->chan_mask
));
1477 static void tegra_dma_remove(struct platform_device
*pdev
)
1479 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1481 of_dma_controller_free(pdev
->dev
.of_node
);
1482 dma_async_device_unregister(&tdma
->dma_dev
);
1485 static int __maybe_unused
tegra_dma_pm_suspend(struct device
*dev
)
1487 struct tegra_dma
*tdma
= dev_get_drvdata(dev
);
1490 for (i
= 0; i
< tdma
->chip_data
->nr_channels
; i
++) {
1491 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1493 if (!(tdma
->chan_mask
& BIT(i
)))
1496 if (tdc
->dma_desc
) {
1497 dev_err(tdma
->dev
, "channel %u busy\n", i
);
1505 static int __maybe_unused
tegra_dma_pm_resume(struct device
*dev
)
1507 struct tegra_dma
*tdma
= dev_get_drvdata(dev
);
1510 reset_control_reset(tdma
->rst
);
1512 for (i
= 0; i
< tdma
->chip_data
->nr_channels
; i
++) {
1513 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1515 if (!(tdma
->chan_mask
& BIT(i
)))
1518 tegra_dma_program_sid(tdc
, tdc
->stream_id
);
1524 static const struct dev_pm_ops tegra_dma_dev_pm_ops
= {
1525 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend
, tegra_dma_pm_resume
)
1528 static struct platform_driver tegra_dma_driver
= {
1530 .name
= "tegra-gpcdma",
1531 .pm
= &tegra_dma_dev_pm_ops
,
1532 .of_match_table
= tegra_dma_of_match
,
1534 .probe
= tegra_dma_probe
,
1535 .remove
= tegra_dma_remove
,
1538 module_platform_driver(tegra_dma_driver
);
1540 MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
1541 MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>");
1542 MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>");
1543 MODULE_LICENSE("GPL");