1 // SPDX-License-Identifier: GPL-2.0-only
3 * DMA driver for NVIDIA Tegra GPC DMA controller.
5 * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
8 #include <linux/bitfield.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/iommu.h>
13 #include <linux/iopoll.h>
14 #include <linux/minmax.h>
15 #include <linux/module.h>
17 #include <linux/of_dma.h>
18 #include <linux/platform_device.h>
19 #include <linux/reset.h>
20 #include <linux/slab.h>
21 #include <dt-bindings/memory/tegra186-mc.h>
25 #define TEGRA_GPCDMA_CHAN_CSR 0x00
26 #define TEGRA_GPCDMA_CSR_ENB BIT(31)
27 #define TEGRA_GPCDMA_CSR_IE_EOC BIT(30)
28 #define TEGRA_GPCDMA_CSR_ONCE BIT(27)
30 #define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24)
31 #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \
32 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
33 #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \
34 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
35 #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \
36 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
37 #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \
38 FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
40 #define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21)
41 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \
42 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
43 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \
44 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
45 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \
46 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
47 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \
48 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
49 #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \
50 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
51 #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \
52 FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
54 #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16)
55 #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \
56 FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
57 #define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15)
58 #define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10)
61 #define TEGRA_GPCDMA_CHAN_STATUS 0x004
62 #define TEGRA_GPCDMA_STATUS_BUSY BIT(31)
63 #define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30)
64 #define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28)
65 #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27)
66 #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26)
67 #define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25)
68 #define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24)
69 #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23)
70 #define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21)
71 #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20)
73 #define TEGRA_GPCDMA_CHAN_CSRE 0x008
74 #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31)
77 #define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C
79 /* Destination address */
80 #define TEGRA_GPCDMA_CHAN_DST_PTR 0x010
82 /* High address pointer */
83 #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014
84 #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0)
85 #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16)
87 /* MC sequence register */
88 #define TEGRA_GPCDMA_CHAN_MCSEQ 0x18
89 #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31)
90 #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25)
91 #define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23)
92 #define TEGRA_GPCDMA_MCSEQ_BURST_2 \
93 FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
94 #define TEGRA_GPCDMA_MCSEQ_BURST_16 \
95 FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
96 #define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20)
97 #define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17)
98 #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0
100 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7)
101 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0)
103 /* MMIO sequence register */
104 #define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c
105 #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31)
106 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28)
107 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \
108 FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
109 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \
110 FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
111 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \
112 FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
113 #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27)
114 #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23
115 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U
116 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U
117 #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \
118 (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
119 #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19)
120 #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16)
121 #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7)
124 #define TEGRA_GPCDMA_CHAN_WCOUNT 0x20
127 #define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24
129 /* DMA byte count status */
130 #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28
132 /* Error Status Register */
133 #define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30
134 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8
135 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF
136 #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \
137 ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \
138 TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
139 #define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF
140 #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE
141 #define TEGRA_DMA_PERIPH_ID_ERR 0xD
142 #define TEGRA_DMA_STREAM_ID_ERR 0xC
143 #define TEGRA_DMA_MC_SLAVE_ERR 0xB
144 #define TEGRA_DMA_MMIO_SLAVE_ERR 0xA
147 #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34
149 #define TEGRA_GPCDMA_CHAN_TZ 0x38
150 #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0)
151 #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1)
153 #define TEGRA_GPCDMA_CHAN_SPARE 0x3c
154 #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16)
157 * If any burst is in flight and DMA paused then this is the time to complete
158 * on-flight burst and update DMA status register.
160 #define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
161 #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
163 /* Channel base address offset from GPCDMA base address */
164 #define TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET 0x10000
166 /* Default channel mask reserving channel0 */
167 #define TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK 0xfffffffe
170 struct tegra_dma_channel
;
173 * tegra_dma_chip_data Tegra chip specific DMA data
174 * @nr_channels: Number of channels available in the controller.
175 * @channel_reg_size: Channel register size.
176 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
177 * @hw_support_pause: DMA HW engine support pause of the channel.
179 struct tegra_dma_chip_data
{
180 bool hw_support_pause
;
181 unsigned int nr_channels
;
182 unsigned int channel_reg_size
;
183 unsigned int max_dma_count
;
184 int (*terminate
)(struct tegra_dma_channel
*tdc
);
187 /* DMA channel registers */
188 struct tegra_dma_channel_regs
{
200 * tegra_dma_sg_req: DMA request details to configure hardware. This
201 * contains the details for one transfer to configure DMA hw.
202 * The client's request for data transfer can be broken into multiple
203 * sub-transfer as per requester details and hw support. This sub transfer
204 * get added as an array in Tegra DMA desc which manages the transfer details.
206 struct tegra_dma_sg_req
{
208 struct tegra_dma_channel_regs ch_regs
;
212 * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
213 * manage client request and keep track of transfer status, callbacks
214 * and request counts etc.
216 struct tegra_dma_desc
{
218 unsigned int bytes_req
;
219 unsigned int bytes_xfer
;
221 unsigned int sg_count
;
222 struct virt_dma_desc vd
;
223 struct tegra_dma_channel
*tdc
;
224 struct tegra_dma_sg_req sg_req
[] __counted_by(sg_count
);
228 * tegra_dma_channel: Channel specific information
230 struct tegra_dma_channel
{
233 enum dma_transfer_direction sid_dir
;
234 enum dma_status status
;
238 struct tegra_dma
*tdma
;
239 struct virt_dma_chan vc
;
240 struct tegra_dma_desc
*dma_desc
;
241 struct dma_slave_config dma_sconfig
;
242 unsigned int stream_id
;
243 unsigned long chan_base_offset
;
247 * tegra_dma: Tegra DMA specific information
250 const struct tegra_dma_chip_data
*chip_data
;
251 unsigned long sid_m2d_reserved
;
252 unsigned long sid_d2m_reserved
;
254 void __iomem
*base_addr
;
256 struct dma_device dma_dev
;
257 struct reset_control
*rst
;
258 struct tegra_dma_channel channels
[];
261 static inline void tdc_write(struct tegra_dma_channel
*tdc
,
264 writel_relaxed(val
, tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
267 static inline u32
tdc_read(struct tegra_dma_channel
*tdc
, u32 reg
)
269 return readl_relaxed(tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
272 static inline struct tegra_dma_channel
*to_tegra_dma_chan(struct dma_chan
*dc
)
274 return container_of(dc
, struct tegra_dma_channel
, vc
.chan
);
277 static inline struct tegra_dma_desc
*vd_to_tegra_dma_desc(struct virt_dma_desc
*vd
)
279 return container_of(vd
, struct tegra_dma_desc
, vd
);
282 static inline struct device
*tdc2dev(struct tegra_dma_channel
*tdc
)
284 return tdc
->vc
.chan
.device
->dev
;
287 static void tegra_dma_dump_chan_regs(struct tegra_dma_channel
*tdc
)
289 dev_dbg(tdc2dev(tdc
), "DMA Channel %d name %s register dump:\n",
291 dev_dbg(tdc2dev(tdc
), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
292 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSR
),
293 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
),
294 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSRE
),
295 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_SRC_PTR
),
296 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_DST_PTR
)
298 dev_dbg(tdc2dev(tdc
), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
299 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
),
300 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MMIOSEQ
),
301 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_WCOUNT
),
302 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_XFER_COUNT
),
303 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS
)
305 dev_dbg(tdc2dev(tdc
), "DMA ERR_STA %x\n",
306 tdc_read(tdc
, TEGRA_GPCDMA_CHAN_ERR_STATUS
));
309 static int tegra_dma_sid_reserve(struct tegra_dma_channel
*tdc
,
310 enum dma_transfer_direction direction
)
312 struct tegra_dma
*tdma
= tdc
->tdma
;
313 int sid
= tdc
->slave_id
;
315 if (!is_slave_direction(direction
))
320 if (test_and_set_bit(sid
, &tdma
->sid_m2d_reserved
)) {
321 dev_err(tdma
->dev
, "slave id already in use\n");
326 if (test_and_set_bit(sid
, &tdma
->sid_d2m_reserved
)) {
327 dev_err(tdma
->dev
, "slave id already in use\n");
335 tdc
->sid_dir
= direction
;
340 static void tegra_dma_sid_free(struct tegra_dma_channel
*tdc
)
342 struct tegra_dma
*tdma
= tdc
->tdma
;
343 int sid
= tdc
->slave_id
;
345 switch (tdc
->sid_dir
) {
347 clear_bit(sid
, &tdma
->sid_m2d_reserved
);
350 clear_bit(sid
, &tdma
->sid_d2m_reserved
);
356 tdc
->sid_dir
= DMA_TRANS_NONE
;
359 static void tegra_dma_desc_free(struct virt_dma_desc
*vd
)
361 kfree(container_of(vd
, struct tegra_dma_desc
, vd
));
364 static int tegra_dma_slave_config(struct dma_chan
*dc
,
365 struct dma_slave_config
*sconfig
)
367 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
369 memcpy(&tdc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
370 tdc
->config_init
= true;
375 static int tegra_dma_pause(struct tegra_dma_channel
*tdc
)
380 val
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSRE
);
381 val
|= TEGRA_GPCDMA_CHAN_CSRE_PAUSE
;
382 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSRE
, val
);
384 /* Wait until busy bit is de-asserted */
385 ret
= readl_relaxed_poll_timeout_atomic(tdc
->tdma
->base_addr
+
386 tdc
->chan_base_offset
+ TEGRA_GPCDMA_CHAN_STATUS
,
388 !(val
& TEGRA_GPCDMA_STATUS_BUSY
),
389 TEGRA_GPCDMA_BURST_COMPLETE_TIME
,
390 TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT
);
393 dev_err(tdc2dev(tdc
), "DMA pause timed out\n");
394 tegra_dma_dump_chan_regs(tdc
);
397 tdc
->status
= DMA_PAUSED
;
402 static int tegra_dma_device_pause(struct dma_chan
*dc
)
404 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
408 if (!tdc
->tdma
->chip_data
->hw_support_pause
)
411 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
412 ret
= tegra_dma_pause(tdc
);
413 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
418 static void tegra_dma_resume(struct tegra_dma_channel
*tdc
)
422 val
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSRE
);
423 val
&= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE
;
424 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSRE
, val
);
426 tdc
->status
= DMA_IN_PROGRESS
;
429 static int tegra_dma_device_resume(struct dma_chan
*dc
)
431 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
434 if (!tdc
->tdma
->chip_data
->hw_support_pause
)
437 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
438 tegra_dma_resume(tdc
);
439 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
444 static inline int tegra_dma_pause_noerr(struct tegra_dma_channel
*tdc
)
446 /* Return 0 irrespective of PAUSE status.
447 * This is useful to recover channels that can exit out of flush
448 * state when the channel is disabled.
451 tegra_dma_pause(tdc
);
455 static void tegra_dma_disable(struct tegra_dma_channel
*tdc
)
459 csr
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSR
);
461 /* Disable interrupts */
462 csr
&= ~TEGRA_GPCDMA_CSR_IE_EOC
;
465 csr
&= ~TEGRA_GPCDMA_CSR_ENB
;
466 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, csr
);
468 /* Clear interrupt status if it is there */
469 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
);
470 if (status
& TEGRA_GPCDMA_STATUS_ISE_EOC
) {
471 dev_dbg(tdc2dev(tdc
), "%s():clearing interrupt\n", __func__
);
472 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_STATUS
, status
);
476 static void tegra_dma_configure_next_sg(struct tegra_dma_channel
*tdc
)
478 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
479 struct tegra_dma_channel_regs
*ch_regs
;
485 /* Reset the sg index for cyclic transfers */
486 if (dma_desc
->sg_idx
== dma_desc
->sg_count
)
487 dma_desc
->sg_idx
= 0;
489 /* Configure next transfer immediately after DMA is busy */
490 ret
= readl_relaxed_poll_timeout_atomic(tdc
->tdma
->base_addr
+
491 tdc
->chan_base_offset
+ TEGRA_GPCDMA_CHAN_STATUS
,
493 (val
& TEGRA_GPCDMA_STATUS_BUSY
), 0,
494 TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT
);
498 ch_regs
= &dma_desc
->sg_req
[dma_desc
->sg_idx
].ch_regs
;
500 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_WCOUNT
, ch_regs
->wcount
);
501 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_SRC_PTR
, ch_regs
->src_ptr
);
502 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_DST_PTR
, ch_regs
->dst_ptr
);
503 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR
, ch_regs
->high_addr_ptr
);
506 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
,
507 ch_regs
->csr
| TEGRA_GPCDMA_CSR_ENB
);
510 static void tegra_dma_start(struct tegra_dma_channel
*tdc
)
512 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
513 struct tegra_dma_channel_regs
*ch_regs
;
514 struct virt_dma_desc
*vdesc
;
517 vdesc
= vchan_next_desc(&tdc
->vc
);
521 dma_desc
= vd_to_tegra_dma_desc(vdesc
);
522 list_del(&vdesc
->node
);
524 tdc
->dma_desc
= dma_desc
;
526 tegra_dma_resume(tdc
);
529 ch_regs
= &dma_desc
->sg_req
[dma_desc
->sg_idx
].ch_regs
;
531 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_WCOUNT
, ch_regs
->wcount
);
532 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, 0);
533 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_SRC_PTR
, ch_regs
->src_ptr
);
534 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_DST_PTR
, ch_regs
->dst_ptr
);
535 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR
, ch_regs
->high_addr_ptr
);
536 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_FIXED_PATTERN
, ch_regs
->fixed_pattern
);
537 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_MMIOSEQ
, ch_regs
->mmio_seq
);
538 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
, ch_regs
->mc_seq
);
539 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, ch_regs
->csr
);
542 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
,
543 ch_regs
->csr
| TEGRA_GPCDMA_CSR_ENB
);
546 static void tegra_dma_xfer_complete(struct tegra_dma_channel
*tdc
)
548 vchan_cookie_complete(&tdc
->dma_desc
->vd
);
550 tegra_dma_sid_free(tdc
);
551 tdc
->dma_desc
= NULL
;
552 tdc
->status
= DMA_COMPLETE
;
555 static void tegra_dma_chan_decode_error(struct tegra_dma_channel
*tdc
,
556 unsigned int err_status
)
558 switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status
)) {
559 case TEGRA_DMA_BM_FIFO_FULL_ERR
:
560 dev_err(tdc
->tdma
->dev
,
561 "GPCDMA CH%d bm fifo full\n", tdc
->id
);
564 case TEGRA_DMA_PERIPH_FIFO_FULL_ERR
:
565 dev_err(tdc
->tdma
->dev
,
566 "GPCDMA CH%d peripheral fifo full\n", tdc
->id
);
569 case TEGRA_DMA_PERIPH_ID_ERR
:
570 dev_err(tdc
->tdma
->dev
,
571 "GPCDMA CH%d illegal peripheral id\n", tdc
->id
);
574 case TEGRA_DMA_STREAM_ID_ERR
:
575 dev_err(tdc
->tdma
->dev
,
576 "GPCDMA CH%d illegal stream id\n", tdc
->id
);
579 case TEGRA_DMA_MC_SLAVE_ERR
:
580 dev_err(tdc
->tdma
->dev
,
581 "GPCDMA CH%d mc slave error\n", tdc
->id
);
584 case TEGRA_DMA_MMIO_SLAVE_ERR
:
585 dev_err(tdc
->tdma
->dev
,
586 "GPCDMA CH%d mmio slave error\n", tdc
->id
);
590 dev_err(tdc
->tdma
->dev
,
591 "GPCDMA CH%d security violation %x\n", tdc
->id
,
596 static irqreturn_t
tegra_dma_isr(int irq
, void *dev_id
)
598 struct tegra_dma_channel
*tdc
= dev_id
;
599 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
600 struct tegra_dma_sg_req
*sg_req
;
603 /* Check channel error status register */
604 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_ERR_STATUS
);
606 tegra_dma_chan_decode_error(tdc
, status
);
607 tegra_dma_dump_chan_regs(tdc
);
608 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_ERR_STATUS
, 0xFFFFFFFF);
611 spin_lock(&tdc
->vc
.lock
);
612 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
);
613 if (!(status
& TEGRA_GPCDMA_STATUS_ISE_EOC
))
616 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_STATUS
,
617 TEGRA_GPCDMA_STATUS_ISE_EOC
);
622 sg_req
= dma_desc
->sg_req
;
623 dma_desc
->bytes_xfer
+= sg_req
[dma_desc
->sg_idx
].len
;
625 if (dma_desc
->cyclic
) {
626 vchan_cyclic_callback(&dma_desc
->vd
);
627 tegra_dma_configure_next_sg(tdc
);
630 if (dma_desc
->sg_idx
== dma_desc
->sg_count
)
631 tegra_dma_xfer_complete(tdc
);
633 tegra_dma_start(tdc
);
637 spin_unlock(&tdc
->vc
.lock
);
641 static void tegra_dma_issue_pending(struct dma_chan
*dc
)
643 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
649 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
650 if (vchan_issue_pending(&tdc
->vc
))
651 tegra_dma_start(tdc
);
654 * For cyclic DMA transfers, program the second
655 * transfer parameters as soon as the first DMA
656 * transfer is started inorder for the DMA
657 * controller to trigger the second transfer
658 * with the correct parameters.
660 if (tdc
->dma_desc
&& tdc
->dma_desc
->cyclic
)
661 tegra_dma_configure_next_sg(tdc
);
663 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
666 static int tegra_dma_stop_client(struct tegra_dma_channel
*tdc
)
672 * Change the client associated with the DMA channel
673 * to stop DMA engine from starting any more bursts for
674 * the given client and wait for in flight bursts to complete
676 csr
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_CSR
);
677 csr
&= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK
);
678 csr
|= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED
;
679 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_CSR
, csr
);
681 /* Wait for in flight data transfer to finish */
682 udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME
);
684 /* If TX/RX path is still active wait till it becomes
688 ret
= readl_relaxed_poll_timeout_atomic(tdc
->tdma
->base_addr
+
689 tdc
->chan_base_offset
+
690 TEGRA_GPCDMA_CHAN_STATUS
,
692 !(status
& (TEGRA_GPCDMA_STATUS_CHANNEL_TX
|
693 TEGRA_GPCDMA_STATUS_CHANNEL_RX
)),
695 TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT
);
697 dev_err(tdc2dev(tdc
), "Timeout waiting for DMA burst completion!\n");
698 tegra_dma_dump_chan_regs(tdc
);
704 static int tegra_dma_terminate_all(struct dma_chan
*dc
)
706 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
711 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
714 err
= tdc
->tdma
->chip_data
->terminate(tdc
);
716 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
720 vchan_terminate_vdesc(&tdc
->dma_desc
->vd
);
721 tegra_dma_disable(tdc
);
722 tdc
->dma_desc
= NULL
;
725 tdc
->status
= DMA_COMPLETE
;
726 tegra_dma_sid_free(tdc
);
727 vchan_get_all_descriptors(&tdc
->vc
, &head
);
728 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
730 vchan_dma_desc_free_list(&tdc
->vc
, &head
);
735 static int tegra_dma_get_residual(struct tegra_dma_channel
*tdc
)
737 struct tegra_dma_desc
*dma_desc
= tdc
->dma_desc
;
738 struct tegra_dma_sg_req
*sg_req
= dma_desc
->sg_req
;
739 unsigned int bytes_xfer
, residual
;
740 u32 wcount
= 0, status
;
742 wcount
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_XFER_COUNT
);
745 * Set wcount = 0 if EOC bit is set. The transfer would have
746 * already completed and the CHAN_XFER_COUNT could have updated
747 * for the next transfer, specifically in case of cyclic transfers.
749 status
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_STATUS
);
750 if (status
& TEGRA_GPCDMA_STATUS_ISE_EOC
)
753 bytes_xfer
= dma_desc
->bytes_xfer
+
754 sg_req
[dma_desc
->sg_idx
].len
- (wcount
* 4);
756 if (dma_desc
->bytes_req
== bytes_xfer
)
759 residual
= dma_desc
->bytes_req
- (bytes_xfer
% dma_desc
->bytes_req
);
764 static enum dma_status
tegra_dma_tx_status(struct dma_chan
*dc
,
766 struct dma_tx_state
*txstate
)
768 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
769 struct tegra_dma_desc
*dma_desc
;
770 struct virt_dma_desc
*vd
;
771 unsigned int residual
;
775 ret
= dma_cookie_status(dc
, cookie
, txstate
);
776 if (ret
== DMA_COMPLETE
)
779 if (tdc
->status
== DMA_PAUSED
)
782 spin_lock_irqsave(&tdc
->vc
.lock
, flags
);
783 vd
= vchan_find_desc(&tdc
->vc
, cookie
);
785 dma_desc
= vd_to_tegra_dma_desc(vd
);
786 residual
= dma_desc
->bytes_req
;
787 dma_set_residue(txstate
, residual
);
788 } else if (tdc
->dma_desc
&& tdc
->dma_desc
->vd
.tx
.cookie
== cookie
) {
789 residual
= tegra_dma_get_residual(tdc
);
790 dma_set_residue(txstate
, residual
);
792 dev_err(tdc2dev(tdc
), "cookie %d is not found\n", cookie
);
794 spin_unlock_irqrestore(&tdc
->vc
.lock
, flags
);
799 static inline int get_bus_width(struct tegra_dma_channel
*tdc
,
800 enum dma_slave_buswidth slave_bw
)
803 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
804 return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8
;
805 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
806 return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16
;
807 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
808 return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32
;
810 dev_err(tdc2dev(tdc
), "given slave bus width is not supported\n");
815 static unsigned int get_burst_size(struct tegra_dma_channel
*tdc
,
816 u32 burst_size
, enum dma_slave_buswidth slave_bw
,
819 unsigned int burst_mmio_width
, burst_byte
;
822 * burst_size from client is in terms of the bus_width.
823 * convert that into words.
824 * If burst_size is not specified from client, then use
825 * len to calculate the optimum burst size
827 burst_byte
= burst_size
? burst_size
* slave_bw
: len
;
828 burst_mmio_width
= burst_byte
/ 4;
830 if (burst_mmio_width
< TEGRA_GPCDMA_MMIOSEQ_BURST_MIN
)
833 burst_mmio_width
= min(burst_mmio_width
, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX
);
835 return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width
);
838 static int get_transfer_param(struct tegra_dma_channel
*tdc
,
839 enum dma_transfer_direction direction
,
843 unsigned int *burst_size
,
844 enum dma_slave_buswidth
*slave_bw
)
848 *apb_addr
= tdc
->dma_sconfig
.dst_addr
;
849 *mmio_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.dst_addr_width
);
850 *burst_size
= tdc
->dma_sconfig
.dst_maxburst
;
851 *slave_bw
= tdc
->dma_sconfig
.dst_addr_width
;
852 *csr
= TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC
;
855 *apb_addr
= tdc
->dma_sconfig
.src_addr
;
856 *mmio_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.src_addr_width
);
857 *burst_size
= tdc
->dma_sconfig
.src_maxburst
;
858 *slave_bw
= tdc
->dma_sconfig
.src_addr_width
;
859 *csr
= TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC
;
862 dev_err(tdc2dev(tdc
), "DMA direction is not supported\n");
868 static struct dma_async_tx_descriptor
*
869 tegra_dma_prep_dma_memset(struct dma_chan
*dc
, dma_addr_t dest
, int value
,
870 size_t len
, unsigned long flags
)
872 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
873 unsigned int max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
874 struct tegra_dma_sg_req
*sg_req
;
875 struct tegra_dma_desc
*dma_desc
;
878 if ((len
& 3) || (dest
& 3) || len
> max_dma_count
) {
879 dev_err(tdc2dev(tdc
),
880 "DMA length/memory address is not supported\n");
884 /* Set DMA mode to fixed pattern */
885 csr
= TEGRA_GPCDMA_CSR_DMA_FIXED_PAT
;
886 /* Enable once or continuous mode */
887 csr
|= TEGRA_GPCDMA_CSR_ONCE
;
888 /* Enable IRQ mask */
889 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
890 /* Enable the DMA interrupt */
891 if (flags
& DMA_PREP_INTERRUPT
)
892 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
893 /* Configure default priority weight for the channel */
894 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
896 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
897 /* retain stream-id and clean rest */
898 mc_seq
&= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
;
900 /* Set the address wrapping */
901 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
902 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
903 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
904 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
906 /* Program outstanding MC requests */
907 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
909 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
911 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, 1), GFP_NOWAIT
);
915 dma_desc
->bytes_req
= len
;
916 dma_desc
->sg_count
= 1;
917 sg_req
= dma_desc
->sg_req
;
919 sg_req
[0].ch_regs
.src_ptr
= 0;
920 sg_req
[0].ch_regs
.dst_ptr
= dest
;
921 sg_req
[0].ch_regs
.high_addr_ptr
=
922 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (dest
>> 32));
923 sg_req
[0].ch_regs
.fixed_pattern
= value
;
924 /* Word count reg takes value as (N +1) words */
925 sg_req
[0].ch_regs
.wcount
= ((len
- 4) >> 2);
926 sg_req
[0].ch_regs
.csr
= csr
;
927 sg_req
[0].ch_regs
.mmio_seq
= 0;
928 sg_req
[0].ch_regs
.mc_seq
= mc_seq
;
931 dma_desc
->cyclic
= false;
932 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
935 static struct dma_async_tx_descriptor
*
936 tegra_dma_prep_dma_memcpy(struct dma_chan
*dc
, dma_addr_t dest
,
937 dma_addr_t src
, size_t len
, unsigned long flags
)
939 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
940 struct tegra_dma_sg_req
*sg_req
;
941 struct tegra_dma_desc
*dma_desc
;
942 unsigned int max_dma_count
;
945 max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
946 if ((len
& 3) || (src
& 3) || (dest
& 3) || len
> max_dma_count
) {
947 dev_err(tdc2dev(tdc
),
948 "DMA length/memory address is not supported\n");
952 /* Set DMA mode to memory to memory transfer */
953 csr
= TEGRA_GPCDMA_CSR_DMA_MEM2MEM
;
954 /* Enable once or continuous mode */
955 csr
|= TEGRA_GPCDMA_CSR_ONCE
;
956 /* Enable IRQ mask */
957 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
958 /* Enable the DMA interrupt */
959 if (flags
& DMA_PREP_INTERRUPT
)
960 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
961 /* Configure default priority weight for the channel */
962 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
964 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
965 /* retain stream-id and clean rest */
966 mc_seq
&= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
) |
967 (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK
);
969 /* Set the address wrapping */
970 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
971 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
972 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
973 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
975 /* Program outstanding MC requests */
976 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
978 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
980 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, 1), GFP_NOWAIT
);
984 dma_desc
->bytes_req
= len
;
985 dma_desc
->sg_count
= 1;
986 sg_req
= dma_desc
->sg_req
;
988 sg_req
[0].ch_regs
.src_ptr
= src
;
989 sg_req
[0].ch_regs
.dst_ptr
= dest
;
990 sg_req
[0].ch_regs
.high_addr_ptr
=
991 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR
, (src
>> 32));
992 sg_req
[0].ch_regs
.high_addr_ptr
|=
993 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (dest
>> 32));
994 /* Word count reg takes value as (N +1) words */
995 sg_req
[0].ch_regs
.wcount
= ((len
- 4) >> 2);
996 sg_req
[0].ch_regs
.csr
= csr
;
997 sg_req
[0].ch_regs
.mmio_seq
= 0;
998 sg_req
[0].ch_regs
.mc_seq
= mc_seq
;
1001 dma_desc
->cyclic
= false;
1002 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
1005 static struct dma_async_tx_descriptor
*
1006 tegra_dma_prep_slave_sg(struct dma_chan
*dc
, struct scatterlist
*sgl
,
1007 unsigned int sg_len
, enum dma_transfer_direction direction
,
1008 unsigned long flags
, void *context
)
1010 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1011 unsigned int max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
1012 enum dma_slave_buswidth slave_bw
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
1013 u32 csr
, mc_seq
, apb_ptr
= 0, mmio_seq
= 0;
1014 struct tegra_dma_sg_req
*sg_req
;
1015 struct tegra_dma_desc
*dma_desc
;
1016 struct scatterlist
*sg
;
1021 if (!tdc
->config_init
) {
1022 dev_err(tdc2dev(tdc
), "DMA channel is not configured\n");
1026 dev_err(tdc2dev(tdc
), "Invalid segment length %d\n", sg_len
);
1030 ret
= tegra_dma_sid_reserve(tdc
, direction
);
1034 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &mmio_seq
, &csr
,
1035 &burst_size
, &slave_bw
);
1039 /* Enable once or continuous mode */
1040 csr
|= TEGRA_GPCDMA_CSR_ONCE
;
1041 /* Program the slave id in requestor select */
1042 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK
, tdc
->slave_id
);
1043 /* Enable IRQ mask */
1044 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
1045 /* Configure default priority weight for the channel*/
1046 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
1048 /* Enable the DMA interrupt */
1049 if (flags
& DMA_PREP_INTERRUPT
)
1050 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
1052 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
1053 /* retain stream-id and clean rest */
1054 mc_seq
&= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
;
1056 /* Set the address wrapping on both MC and MMIO side */
1058 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
1059 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1060 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
1061 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1062 mmio_seq
|= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD
, 1);
1064 /* Program 2 MC outstanding requests by default. */
1065 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
1067 /* Setting MC burst size depending on MMIO burst size */
1068 if (burst_size
== 64)
1069 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
1071 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_2
;
1073 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, sg_len
), GFP_NOWAIT
);
1077 dma_desc
->sg_count
= sg_len
;
1078 sg_req
= dma_desc
->sg_req
;
1080 /* Make transfer requests */
1081 for_each_sg(sgl
, sg
, sg_len
, i
) {
1085 mem
= sg_dma_address(sg
);
1086 len
= sg_dma_len(sg
);
1088 if ((len
& 3) || (mem
& 3) || len
> max_dma_count
) {
1089 dev_err(tdc2dev(tdc
),
1090 "DMA length/memory address is not supported\n");
1095 mmio_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
1096 dma_desc
->bytes_req
+= len
;
1098 if (direction
== DMA_MEM_TO_DEV
) {
1099 sg_req
[i
].ch_regs
.src_ptr
= mem
;
1100 sg_req
[i
].ch_regs
.dst_ptr
= apb_ptr
;
1101 sg_req
[i
].ch_regs
.high_addr_ptr
=
1102 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR
, (mem
>> 32));
1103 } else if (direction
== DMA_DEV_TO_MEM
) {
1104 sg_req
[i
].ch_regs
.src_ptr
= apb_ptr
;
1105 sg_req
[i
].ch_regs
.dst_ptr
= mem
;
1106 sg_req
[i
].ch_regs
.high_addr_ptr
=
1107 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (mem
>> 32));
1111 * Word count register takes input in words. Writing a value
1112 * of N into word count register means a req of (N+1) words.
1114 sg_req
[i
].ch_regs
.wcount
= ((len
- 4) >> 2);
1115 sg_req
[i
].ch_regs
.csr
= csr
;
1116 sg_req
[i
].ch_regs
.mmio_seq
= mmio_seq
;
1117 sg_req
[i
].ch_regs
.mc_seq
= mc_seq
;
1118 sg_req
[i
].len
= len
;
1121 dma_desc
->cyclic
= false;
1122 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
1125 static struct dma_async_tx_descriptor
*
1126 tegra_dma_prep_dma_cyclic(struct dma_chan
*dc
, dma_addr_t buf_addr
, size_t buf_len
,
1127 size_t period_len
, enum dma_transfer_direction direction
,
1128 unsigned long flags
)
1130 enum dma_slave_buswidth slave_bw
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
1131 u32 csr
, mc_seq
, apb_ptr
= 0, mmio_seq
= 0, burst_size
;
1132 unsigned int max_dma_count
, len
, period_count
, i
;
1133 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1134 struct tegra_dma_desc
*dma_desc
;
1135 struct tegra_dma_sg_req
*sg_req
;
1136 dma_addr_t mem
= buf_addr
;
1139 if (!buf_len
|| !period_len
) {
1140 dev_err(tdc2dev(tdc
), "Invalid buffer/period len\n");
1144 if (!tdc
->config_init
) {
1145 dev_err(tdc2dev(tdc
), "DMA slave is not configured\n");
1149 ret
= tegra_dma_sid_reserve(tdc
, direction
);
1154 * We only support cycle transfer when buf_len is multiple of
1157 if (buf_len
% period_len
) {
1158 dev_err(tdc2dev(tdc
), "buf_len is not multiple of period_len\n");
1163 max_dma_count
= tdc
->tdma
->chip_data
->max_dma_count
;
1164 if ((len
& 3) || (buf_addr
& 3) || len
> max_dma_count
) {
1165 dev_err(tdc2dev(tdc
), "Req len/mem address is not correct\n");
1169 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &mmio_seq
, &csr
,
1170 &burst_size
, &slave_bw
);
1174 /* Enable once or continuous mode */
1175 csr
&= ~TEGRA_GPCDMA_CSR_ONCE
;
1176 /* Program the slave id in requestor select */
1177 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK
, tdc
->slave_id
);
1178 /* Enable IRQ mask */
1179 csr
|= TEGRA_GPCDMA_CSR_IRQ_MASK
;
1180 /* Configure default priority weight for the channel*/
1181 csr
|= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT
, 1);
1183 /* Enable the DMA interrupt */
1184 if (flags
& DMA_PREP_INTERRUPT
)
1185 csr
|= TEGRA_GPCDMA_CSR_IE_EOC
;
1187 mmio_seq
|= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD
, 1);
1189 mc_seq
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
1190 /* retain stream-id and clean rest */
1191 mc_seq
&= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
;
1193 /* Set the address wrapping on both MC and MMIO side */
1194 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0
,
1195 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1196 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1
,
1197 TEGRA_GPCDMA_MCSEQ_WRAP_NONE
);
1199 /* Program 2 MC outstanding requests by default. */
1200 mc_seq
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT
, 1);
1201 /* Setting MC burst size depending on MMIO burst size */
1202 if (burst_size
== 64)
1203 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_16
;
1205 mc_seq
|= TEGRA_GPCDMA_MCSEQ_BURST_2
;
1207 period_count
= buf_len
/ period_len
;
1208 dma_desc
= kzalloc(struct_size(dma_desc
, sg_req
, period_count
),
1213 dma_desc
->bytes_req
= buf_len
;
1214 dma_desc
->sg_count
= period_count
;
1215 sg_req
= dma_desc
->sg_req
;
1217 /* Split transfer equal to period size */
1218 for (i
= 0; i
< period_count
; i
++) {
1219 mmio_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
1220 if (direction
== DMA_MEM_TO_DEV
) {
1221 sg_req
[i
].ch_regs
.src_ptr
= mem
;
1222 sg_req
[i
].ch_regs
.dst_ptr
= apb_ptr
;
1223 sg_req
[i
].ch_regs
.high_addr_ptr
=
1224 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR
, (mem
>> 32));
1225 } else if (direction
== DMA_DEV_TO_MEM
) {
1226 sg_req
[i
].ch_regs
.src_ptr
= apb_ptr
;
1227 sg_req
[i
].ch_regs
.dst_ptr
= mem
;
1228 sg_req
[i
].ch_regs
.high_addr_ptr
=
1229 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR
, (mem
>> 32));
1232 * Word count register takes input in words. Writing a value
1233 * of N into word count register means a req of (N+1) words.
1235 sg_req
[i
].ch_regs
.wcount
= ((len
- 4) >> 2);
1236 sg_req
[i
].ch_regs
.csr
= csr
;
1237 sg_req
[i
].ch_regs
.mmio_seq
= mmio_seq
;
1238 sg_req
[i
].ch_regs
.mc_seq
= mc_seq
;
1239 sg_req
[i
].len
= len
;
1244 dma_desc
->cyclic
= true;
1246 return vchan_tx_prep(&tdc
->vc
, &dma_desc
->vd
, flags
);
1249 static int tegra_dma_alloc_chan_resources(struct dma_chan
*dc
)
1251 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1254 ret
= request_irq(tdc
->irq
, tegra_dma_isr
, 0, tdc
->name
, tdc
);
1256 dev_err(tdc2dev(tdc
), "request_irq failed for %s\n", tdc
->name
);
1260 dma_cookie_init(&tdc
->vc
.chan
);
1261 tdc
->config_init
= false;
1265 static void tegra_dma_chan_synchronize(struct dma_chan
*dc
)
1267 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1269 synchronize_irq(tdc
->irq
);
1270 vchan_synchronize(&tdc
->vc
);
1273 static void tegra_dma_free_chan_resources(struct dma_chan
*dc
)
1275 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1277 dev_dbg(tdc2dev(tdc
), "Freeing channel %d\n", tdc
->id
);
1279 tegra_dma_terminate_all(dc
);
1280 synchronize_irq(tdc
->irq
);
1282 tasklet_kill(&tdc
->vc
.task
);
1283 tdc
->config_init
= false;
1285 tdc
->sid_dir
= DMA_TRANS_NONE
;
1286 free_irq(tdc
->irq
, tdc
);
1288 vchan_free_chan_resources(&tdc
->vc
);
1291 static struct dma_chan
*tegra_dma_of_xlate(struct of_phandle_args
*dma_spec
,
1292 struct of_dma
*ofdma
)
1294 struct tegra_dma
*tdma
= ofdma
->of_dma_data
;
1295 struct tegra_dma_channel
*tdc
;
1296 struct dma_chan
*chan
;
1298 chan
= dma_get_any_slave_channel(&tdma
->dma_dev
);
1302 tdc
= to_tegra_dma_chan(chan
);
1303 tdc
->slave_id
= dma_spec
->args
[0];
1308 static const struct tegra_dma_chip_data tegra186_dma_chip_data
= {
1310 .channel_reg_size
= SZ_64K
,
1311 .max_dma_count
= SZ_1G
,
1312 .hw_support_pause
= false,
1313 .terminate
= tegra_dma_stop_client
,
1316 static const struct tegra_dma_chip_data tegra194_dma_chip_data
= {
1318 .channel_reg_size
= SZ_64K
,
1319 .max_dma_count
= SZ_1G
,
1320 .hw_support_pause
= true,
1321 .terminate
= tegra_dma_pause
,
1324 static const struct tegra_dma_chip_data tegra234_dma_chip_data
= {
1326 .channel_reg_size
= SZ_64K
,
1327 .max_dma_count
= SZ_1G
,
1328 .hw_support_pause
= true,
1329 .terminate
= tegra_dma_pause_noerr
,
1332 static const struct of_device_id tegra_dma_of_match
[] = {
1334 .compatible
= "nvidia,tegra186-gpcdma",
1335 .data
= &tegra186_dma_chip_data
,
1337 .compatible
= "nvidia,tegra194-gpcdma",
1338 .data
= &tegra194_dma_chip_data
,
1340 .compatible
= "nvidia,tegra234-gpcdma",
1341 .data
= &tegra234_dma_chip_data
,
1345 MODULE_DEVICE_TABLE(of
, tegra_dma_of_match
);
1347 static int tegra_dma_program_sid(struct tegra_dma_channel
*tdc
, int stream_id
)
1349 unsigned int reg_val
= tdc_read(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
);
1351 reg_val
&= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
);
1352 reg_val
&= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK
);
1354 reg_val
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK
, stream_id
);
1355 reg_val
|= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK
, stream_id
);
1357 tdc_write(tdc
, TEGRA_GPCDMA_CHAN_MCSEQ
, reg_val
);
1361 static int tegra_dma_probe(struct platform_device
*pdev
)
1363 const struct tegra_dma_chip_data
*cdata
= NULL
;
1366 struct tegra_dma
*tdma
;
1369 cdata
= of_device_get_match_data(&pdev
->dev
);
1371 tdma
= devm_kzalloc(&pdev
->dev
,
1372 struct_size(tdma
, channels
, cdata
->nr_channels
),
1377 tdma
->dev
= &pdev
->dev
;
1378 tdma
->chip_data
= cdata
;
1379 platform_set_drvdata(pdev
, tdma
);
1381 tdma
->base_addr
= devm_platform_ioremap_resource(pdev
, 0);
1382 if (IS_ERR(tdma
->base_addr
))
1383 return PTR_ERR(tdma
->base_addr
);
1385 tdma
->rst
= devm_reset_control_get_exclusive(&pdev
->dev
, "gpcdma");
1386 if (IS_ERR(tdma
->rst
)) {
1387 return dev_err_probe(&pdev
->dev
, PTR_ERR(tdma
->rst
),
1388 "Missing controller reset\n");
1390 reset_control_reset(tdma
->rst
);
1392 tdma
->dma_dev
.dev
= &pdev
->dev
;
1394 if (!tegra_dev_iommu_get_stream_id(&pdev
->dev
, &stream_id
)) {
1395 dev_err(&pdev
->dev
, "Missing iommu stream-id\n");
1399 ret
= device_property_read_u32(&pdev
->dev
, "dma-channel-mask",
1402 dev_warn(&pdev
->dev
,
1403 "Missing dma-channel-mask property, using default channel mask %#x\n",
1404 TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK
);
1405 tdma
->chan_mask
= TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK
;
1408 INIT_LIST_HEAD(&tdma
->dma_dev
.channels
);
1409 for (i
= 0; i
< cdata
->nr_channels
; i
++) {
1410 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1412 /* Check for channel mask */
1413 if (!(tdma
->chan_mask
& BIT(i
)))
1416 tdc
->irq
= platform_get_irq(pdev
, i
);
1420 tdc
->chan_base_offset
= TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET
+
1421 i
* cdata
->channel_reg_size
;
1422 snprintf(tdc
->name
, sizeof(tdc
->name
), "gpcdma.%d", i
);
1427 vchan_init(&tdc
->vc
, &tdma
->dma_dev
);
1428 tdc
->vc
.desc_free
= tegra_dma_desc_free
;
1430 /* program stream-id for this channel */
1431 tegra_dma_program_sid(tdc
, stream_id
);
1432 tdc
->stream_id
= stream_id
;
1435 dma_cap_set(DMA_SLAVE
, tdma
->dma_dev
.cap_mask
);
1436 dma_cap_set(DMA_PRIVATE
, tdma
->dma_dev
.cap_mask
);
1437 dma_cap_set(DMA_MEMCPY
, tdma
->dma_dev
.cap_mask
);
1438 dma_cap_set(DMA_MEMSET
, tdma
->dma_dev
.cap_mask
);
1439 dma_cap_set(DMA_CYCLIC
, tdma
->dma_dev
.cap_mask
);
1442 * Only word aligned transfers are supported. Set the copy
1445 tdma
->dma_dev
.copy_align
= 2;
1446 tdma
->dma_dev
.fill_align
= 2;
1447 tdma
->dma_dev
.device_alloc_chan_resources
=
1448 tegra_dma_alloc_chan_resources
;
1449 tdma
->dma_dev
.device_free_chan_resources
=
1450 tegra_dma_free_chan_resources
;
1451 tdma
->dma_dev
.device_prep_slave_sg
= tegra_dma_prep_slave_sg
;
1452 tdma
->dma_dev
.device_prep_dma_memcpy
= tegra_dma_prep_dma_memcpy
;
1453 tdma
->dma_dev
.device_prep_dma_memset
= tegra_dma_prep_dma_memset
;
1454 tdma
->dma_dev
.device_prep_dma_cyclic
= tegra_dma_prep_dma_cyclic
;
1455 tdma
->dma_dev
.device_config
= tegra_dma_slave_config
;
1456 tdma
->dma_dev
.device_terminate_all
= tegra_dma_terminate_all
;
1457 tdma
->dma_dev
.device_tx_status
= tegra_dma_tx_status
;
1458 tdma
->dma_dev
.device_issue_pending
= tegra_dma_issue_pending
;
1459 tdma
->dma_dev
.device_pause
= tegra_dma_device_pause
;
1460 tdma
->dma_dev
.device_resume
= tegra_dma_device_resume
;
1461 tdma
->dma_dev
.device_synchronize
= tegra_dma_chan_synchronize
;
1462 tdma
->dma_dev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1464 ret
= dma_async_device_register(&tdma
->dma_dev
);
1466 dev_err_probe(&pdev
->dev
, ret
,
1467 "GPC DMA driver registration failed\n");
1471 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
1472 tegra_dma_of_xlate
, tdma
);
1474 dev_err_probe(&pdev
->dev
, ret
,
1475 "GPC DMA OF registration failed\n");
1477 dma_async_device_unregister(&tdma
->dma_dev
);
1481 dev_info(&pdev
->dev
, "GPC DMA driver register %lu channels\n",
1482 hweight_long(tdma
->chan_mask
));
1487 static void tegra_dma_remove(struct platform_device
*pdev
)
1489 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1491 of_dma_controller_free(pdev
->dev
.of_node
);
1492 dma_async_device_unregister(&tdma
->dma_dev
);
1495 static int __maybe_unused
tegra_dma_pm_suspend(struct device
*dev
)
1497 struct tegra_dma
*tdma
= dev_get_drvdata(dev
);
1500 for (i
= 0; i
< tdma
->chip_data
->nr_channels
; i
++) {
1501 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1503 if (!(tdma
->chan_mask
& BIT(i
)))
1506 if (tdc
->dma_desc
) {
1507 dev_err(tdma
->dev
, "channel %u busy\n", i
);
1515 static int __maybe_unused
tegra_dma_pm_resume(struct device
*dev
)
1517 struct tegra_dma
*tdma
= dev_get_drvdata(dev
);
1520 reset_control_reset(tdma
->rst
);
1522 for (i
= 0; i
< tdma
->chip_data
->nr_channels
; i
++) {
1523 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1525 if (!(tdma
->chan_mask
& BIT(i
)))
1528 tegra_dma_program_sid(tdc
, tdc
->stream_id
);
1534 static const struct dev_pm_ops tegra_dma_dev_pm_ops
= {
1535 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend
, tegra_dma_pm_resume
)
1538 static struct platform_driver tegra_dma_driver
= {
1540 .name
= "tegra-gpcdma",
1541 .pm
= &tegra_dma_dev_pm_ops
,
1542 .of_match_table
= tegra_dma_of_match
,
1544 .probe
= tegra_dma_probe
,
1545 .remove
= tegra_dma_remove
,
1548 module_platform_driver(tegra_dma_driver
);
1550 MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
1551 MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>");
1552 MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>");
1553 MODULE_LICENSE("GPL");