1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
5 * Copyright (C) 2008 Atmel Corporation
6 * Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
8 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
9 * The only Atmel DMA Controller that is not covered by this driver is the one
10 * found on AT91SAM9263.
13 #include <dt-bindings/dma/at91.h>
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dmapool.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/overflow.h>
23 #include <linux/of_platform.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
28 #include "dmaengine.h"
35 * at_hdmac : Name of the ATmel AHB DMA Controller
36 * at_dma_ / atdma : ATmel DMA controller entity related
37 * atc_ / atchan : ATmel DMA Channel entity related
40 #define AT_DMA_MAX_NR_CHANNELS 8
42 /* Global Configuration Register */
43 #define AT_DMA_GCFG 0x00
44 #define AT_DMA_IF_BIGEND(i) BIT((i)) /* AHB-Lite Interface i in Big-endian mode */
45 #define AT_DMA_ARB_CFG BIT(4) /* Arbiter mode. */
47 /* Controller Enable Register */
48 #define AT_DMA_EN 0x04
49 #define AT_DMA_ENABLE BIT(0)
51 /* Software Single Request Register */
52 #define AT_DMA_SREQ 0x08
53 #define AT_DMA_SSREQ(x) BIT((x) << 1) /* Request a source single transfer on channel x */
54 #define AT_DMA_DSREQ(x) BIT(1 + ((x) << 1)) /* Request a destination single transfer on channel x */
56 /* Software Chunk Transfer Request Register */
57 #define AT_DMA_CREQ 0x0c
58 #define AT_DMA_SCREQ(x) BIT((x) << 1) /* Request a source chunk transfer on channel x */
59 #define AT_DMA_DCREQ(x) BIT(1 + ((x) << 1)) /* Request a destination chunk transfer on channel x */
61 /* Software Last Transfer Flag Register */
62 #define AT_DMA_LAST 0x10
63 #define AT_DMA_SLAST(x) BIT((x) << 1) /* This src rq is last tx of buffer on channel x */
64 #define AT_DMA_DLAST(x) BIT(1 + ((x) << 1)) /* This dst rq is last tx of buffer on channel x */
66 /* Request Synchronization Register */
67 #define AT_DMA_SYNC 0x14
68 #define AT_DMA_SYR(h) BIT((h)) /* Synchronize handshake line h */
70 /* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */
71 #define AT_DMA_EBCIER 0x18 /* Enable register */
72 #define AT_DMA_EBCIDR 0x1c /* Disable register */
73 #define AT_DMA_EBCIMR 0x20 /* Mask Register */
74 #define AT_DMA_EBCISR 0x24 /* Status Register */
75 #define AT_DMA_CBTC_OFFSET 8
76 #define AT_DMA_ERR_OFFSET 16
77 #define AT_DMA_BTC(x) BIT((x))
78 #define AT_DMA_CBTC(x) BIT(AT_DMA_CBTC_OFFSET + (x))
79 #define AT_DMA_ERR(x) BIT(AT_DMA_ERR_OFFSET + (x))
81 /* Channel Handler Enable Register */
82 #define AT_DMA_CHER 0x28
83 #define AT_DMA_ENA(x) BIT((x))
84 #define AT_DMA_SUSP(x) BIT(8 + (x))
85 #define AT_DMA_KEEP(x) BIT(24 + (x))
87 /* Channel Handler Disable Register */
88 #define AT_DMA_CHDR 0x2c
89 #define AT_DMA_DIS(x) BIT(x)
90 #define AT_DMA_RES(x) BIT(8 + (x))
92 /* Channel Handler Status Register */
93 #define AT_DMA_CHSR 0x30
94 #define AT_DMA_EMPT(x) BIT(16 + (x))
95 #define AT_DMA_STAL(x) BIT(24 + (x))
97 /* Channel registers base address */
98 #define AT_DMA_CH_REGS_BASE 0x3c
99 #define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */
101 /* Hardware register offset for each channel */
102 #define ATC_SADDR_OFFSET 0x00 /* Source Address Register */
103 #define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */
104 #define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */
105 #define ATC_CTRLA_OFFSET 0x0c /* Control A Register */
106 #define ATC_CTRLB_OFFSET 0x10 /* Control B Register */
107 #define ATC_CFG_OFFSET 0x14 /* Configuration Register */
108 #define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */
109 #define ATC_DPIP_OFFSET 0x1c /* Dst PIP Configuration Register */
112 /* Bitfield definitions */
114 /* Bitfields in DSCR */
115 #define ATC_DSCR_IF GENMASK(1, 0) /* Dsc feched via AHB-Lite Interface */
117 /* Bitfields in CTRLA */
118 #define ATC_BTSIZE_MAX GENMASK(15, 0) /* Maximum Buffer Transfer Size */
119 #define ATC_BTSIZE GENMASK(15, 0) /* Buffer Transfer Size */
120 #define ATC_SCSIZE GENMASK(18, 16) /* Source Chunk Transfer Size */
121 #define ATC_DCSIZE GENMASK(22, 20) /* Destination Chunk Transfer Size */
122 #define ATC_SRC_WIDTH GENMASK(25, 24) /* Source Single Transfer Size */
123 #define ATC_DST_WIDTH GENMASK(29, 28) /* Destination Single Transfer Size */
124 #define ATC_DONE BIT(31) /* Tx Done (only written back in descriptor) */
126 /* Bitfields in CTRLB */
127 #define ATC_SIF GENMASK(1, 0) /* Src tx done via AHB-Lite Interface i */
128 #define ATC_DIF GENMASK(5, 4) /* Dst tx done via AHB-Lite Interface i */
129 #define AT_DMA_MEM_IF 0x0 /* interface 0 as memory interface */
130 #define AT_DMA_PER_IF 0x1 /* interface 1 as peripheral interface */
131 #define ATC_SRC_PIP BIT(8) /* Source Picture-in-Picture enabled */
132 #define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */
133 #define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */
134 #define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */
135 #define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */
136 #define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */
137 #define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */
138 #define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */
139 #define ATC_FC_PER2PER 0x3 /* Periph-to-Periph (DMA) */
140 #define ATC_FC_PER2MEM_PER 0x4 /* Periph-to-Mem (Peripheral) */
141 #define ATC_FC_MEM2PER_PER 0x5 /* Mem-to-Periph (Peripheral) */
142 #define ATC_FC_PER2PER_SRCPER 0x6 /* Periph-to-Periph (Src Peripheral) */
143 #define ATC_FC_PER2PER_DSTPER 0x7 /* Periph-to-Periph (Dst Peripheral) */
144 #define ATC_SRC_ADDR_MODE GENMASK(25, 24)
145 #define ATC_SRC_ADDR_MODE_INCR 0x0 /* Incrementing Mode */
146 #define ATC_SRC_ADDR_MODE_DECR 0x1 /* Decrementing Mode */
147 #define ATC_SRC_ADDR_MODE_FIXED 0x2 /* Fixed Mode */
148 #define ATC_DST_ADDR_MODE GENMASK(29, 28)
149 #define ATC_DST_ADDR_MODE_INCR 0x0 /* Incrementing Mode */
150 #define ATC_DST_ADDR_MODE_DECR 0x1 /* Decrementing Mode */
151 #define ATC_DST_ADDR_MODE_FIXED 0x2 /* Fixed Mode */
152 #define ATC_IEN BIT(30) /* BTC interrupt enable (active low) */
153 #define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */
155 /* Bitfields in CFG */
156 #define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */
157 #define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */
158 #define ATC_SRC_REP BIT(8) /* Source Replay Mod */
159 #define ATC_SRC_H2SEL BIT(9) /* Source Handshaking Mod */
160 #define ATC_SRC_PER_MSB GENMASK(11, 10) /* Channel src rq (most significant bits) */
161 #define ATC_DST_REP BIT(12) /* Destination Replay Mod */
162 #define ATC_DST_H2SEL BIT(13) /* Destination Handshaking Mod */
163 #define ATC_DST_PER_MSB GENMASK(15, 14) /* Channel dst rq (most significant bits) */
164 #define ATC_SOD BIT(16) /* Stop On Done */
165 #define ATC_LOCK_IF BIT(20) /* Interface Lock */
166 #define ATC_LOCK_B BIT(21) /* AHB Bus Lock */
167 #define ATC_LOCK_IF_L BIT(22) /* Master Interface Arbiter Lock */
168 #define ATC_AHB_PROT GENMASK(26, 24) /* AHB Protection */
169 #define ATC_FIFOCFG GENMASK(29, 28) /* FIFO Request Configuration */
170 #define ATC_FIFOCFG_LARGESTBURST 0x0
171 #define ATC_FIFOCFG_HALFFIFO 0x1
172 #define ATC_FIFOCFG_ENOUGHSPACE 0x2
174 /* Bitfields in SPIP */
175 #define ATC_SPIP_HOLE GENMASK(15, 0)
176 #define ATC_SPIP_BOUNDARY GENMASK(25, 16)
178 /* Bitfields in DPIP */
179 #define ATC_DPIP_HOLE GENMASK(15, 0)
180 #define ATC_DPIP_BOUNDARY GENMASK(25, 16)
182 #define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */
183 #define ATC_SRC_PER_ID(id) \
184 ({ typeof(id) _id = (id); \
185 FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
186 FIELD_PREP(ATC_SRC_PER, _id); })
187 #define ATC_DST_PER_ID(id) \
188 ({ typeof(id) _id = (id); \
189 FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
190 FIELD_PREP(ATC_DST_PER, _id); })
194 /*-- descriptors -----------------------------------------------------*/
196 /* LLI == Linked List Item; aka DMA buffer descriptor */
198 /* values that are not changed by hardware */
201 /* value that may get written back: */
203 /* more values that are not changed by hardware */
205 u32 dscr
; /* chain to next lli */
209 * struct atdma_sg - atdma scatter gather entry
210 * @len: length of the current Linked List Item.
211 * @lli: linked list item that is passed to the DMA controller
212 * @lli_phys: physical address of the LLI.
221 * struct at_desc - software descriptor
222 * @vd: pointer to the virtual dma descriptor.
223 * @atchan: pointer to the atmel dma channel.
224 * @total_len: total transaction byte count
225 * @sglen: number of sg entries.
227 * @boundary: number of transfers to perform before the automatic address increment operation
228 * @dst_hole: value to add to the destination address when the boundary has been reached
229 * @src_hole: value to add to the source address when the boundary has been reached
230 * @memset_buffer: buffer used for the memset operation
231 * @memset_paddr: physical address of the buffer used for the memset operation
232 * @memset_vaddr: virtual address of the buffer used for the memset operation
235 struct virt_dma_desc vd
;
236 struct at_dma_chan
*atchan
;
239 /* Interleaved data */
244 /* Memset temporary buffer */
246 dma_addr_t memset_paddr
;
248 struct atdma_sg sg
[] __counted_by(sglen
);
251 /*-- Channels --------------------------------------------------------*/
254 * enum atc_status - information bits stored in channel status flag
256 * @ATC_IS_PAUSED: If channel is pauses
257 * @ATC_IS_CYCLIC: If channel is cyclic
259 * Manipulated with atomic operations.
267 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
268 * @vc: virtual dma channel entry.
269 * @atdma: pointer to the driver data.
270 * @ch_regs: memory mapped register base
271 * @mask: channel index in a mask
272 * @per_if: peripheral interface
273 * @mem_if: memory interface
274 * @status: transmit status information from irq/prep* functions
275 * to tasklet (use atomic operations)
276 * @save_cfg: configuration register that is saved on suspend/resume cycle
277 * @save_dscr: for cyclic operations, preserve next descriptor address in
278 * the cyclic list on suspend/resume cycle
279 * @dma_sconfig: configuration for slave transfers, passed via
281 * @desc: pointer to the atmel dma descriptor.
284 struct virt_dma_chan vc
;
285 struct at_dma
*atdma
;
286 void __iomem
*ch_regs
;
290 unsigned long status
;
293 struct dma_slave_config dma_sconfig
;
294 struct at_desc
*desc
;
297 #define channel_readl(atchan, name) \
298 __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET)
300 #define channel_writel(atchan, name, val) \
301 __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET)
304 * Fix sconfig's burst size according to at_hdmac. We need to convert them as:
305 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7.
307 * This can be done by finding most significant bit set.
309 static inline void convert_burst(u32
*maxburst
)
312 *maxburst
= fls(*maxburst
) - 2;
318 * Fix sconfig's bus width according to at_hdmac.
319 * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
321 static inline u8
convert_buswidth(enum dma_slave_buswidth addr_width
)
323 switch (addr_width
) {
324 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
326 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
329 /* For 1 byte width or fallback */
334 /*-- Controller ------------------------------------------------------*/
337 * struct at_dma - internal representation of an Atmel HDMA Controller
338 * @dma_device: dmaengine dma_device object members
339 * @regs: memory mapped register base
340 * @clk: dma controller clock
341 * @save_imr: interrupt mask register that is saved on suspend/resume cycle
342 * @all_chan_mask: all channels available in a mask
343 * @lli_pool: hw lli table
344 * @memset_pool: hw memset pool
345 * @chan: channels table to store at_dma_chan structures
348 struct dma_device dma_device
;
355 struct dma_pool
*lli_pool
;
356 struct dma_pool
*memset_pool
;
357 /* AT THE END channels table */
358 struct at_dma_chan chan
[];
361 #define dma_readl(atdma, name) \
362 __raw_readl((atdma)->regs + AT_DMA_##name)
363 #define dma_writel(atdma, name, val) \
364 __raw_writel((val), (atdma)->regs + AT_DMA_##name)
366 static inline struct at_desc
*to_atdma_desc(struct dma_async_tx_descriptor
*t
)
368 return container_of(t
, struct at_desc
, vd
.tx
);
371 static inline struct at_dma_chan
*to_at_dma_chan(struct dma_chan
*chan
)
373 return container_of(chan
, struct at_dma_chan
, vc
.chan
);
376 static inline struct at_dma
*to_at_dma(struct dma_device
*ddev
)
378 return container_of(ddev
, struct at_dma
, dma_device
);
382 /*-- Helper functions ------------------------------------------------*/
384 static struct device
*chan2dev(struct dma_chan
*chan
)
386 return &chan
->dev
->device
;
389 #if defined(VERBOSE_DEBUG)
390 static void vdbg_dump_regs(struct at_dma_chan
*atchan
)
392 struct at_dma
*atdma
= to_at_dma(atchan
->vc
.chan
.device
);
394 dev_err(chan2dev(&atchan
->vc
.chan
),
395 " channel %d : imr = 0x%x, chsr = 0x%x\n",
396 atchan
->vc
.chan
.chan_id
,
397 dma_readl(atdma
, EBCIMR
),
398 dma_readl(atdma
, CHSR
));
400 dev_err(chan2dev(&atchan
->vc
.chan
),
401 " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n",
402 channel_readl(atchan
, SADDR
),
403 channel_readl(atchan
, DADDR
),
404 channel_readl(atchan
, CTRLA
),
405 channel_readl(atchan
, CTRLB
),
406 channel_readl(atchan
, CFG
),
407 channel_readl(atchan
, DSCR
));
410 static void vdbg_dump_regs(struct at_dma_chan
*atchan
) {}
413 static void atc_dump_lli(struct at_dma_chan
*atchan
, struct at_lli
*lli
)
415 dev_crit(chan2dev(&atchan
->vc
.chan
),
416 "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n",
417 &lli
->saddr
, &lli
->daddr
,
418 lli
->ctrla
, lli
->ctrlb
, &lli
->dscr
);
422 static void atc_setup_irq(struct at_dma
*atdma
, int chan_id
, int on
)
426 /* enable interrupts on buffer transfer completion & error */
427 ebci
= AT_DMA_BTC(chan_id
)
428 | AT_DMA_ERR(chan_id
);
430 dma_writel(atdma
, EBCIER
, ebci
);
432 dma_writel(atdma
, EBCIDR
, ebci
);
435 static void atc_enable_chan_irq(struct at_dma
*atdma
, int chan_id
)
437 atc_setup_irq(atdma
, chan_id
, 1);
440 static void atc_disable_chan_irq(struct at_dma
*atdma
, int chan_id
)
442 atc_setup_irq(atdma
, chan_id
, 0);
447 * atc_chan_is_enabled - test if given channel is enabled
448 * @atchan: channel we want to test status
450 static inline int atc_chan_is_enabled(struct at_dma_chan
*atchan
)
452 struct at_dma
*atdma
= to_at_dma(atchan
->vc
.chan
.device
);
454 return !!(dma_readl(atdma
, CHSR
) & atchan
->mask
);
458 * atc_chan_is_paused - test channel pause/resume status
459 * @atchan: channel we want to test status
461 static inline int atc_chan_is_paused(struct at_dma_chan
*atchan
)
463 return test_bit(ATC_IS_PAUSED
, &atchan
->status
);
467 * atc_chan_is_cyclic - test if given channel has cyclic property set
468 * @atchan: channel we want to test status
470 static inline int atc_chan_is_cyclic(struct at_dma_chan
*atchan
)
472 return test_bit(ATC_IS_CYCLIC
, &atchan
->status
);
476 * set_lli_eol - set end-of-link to descriptor so it will end transfer
477 * @desc: descriptor, signle or at the end of a chain, to end chain on
478 * @i: index of the atmel scatter gather entry that is at the end of the chain.
480 static void set_lli_eol(struct at_desc
*desc
, unsigned int i
)
482 u32 ctrlb
= desc
->sg
[i
].lli
->ctrlb
;
485 ctrlb
|= ATC_SRC_DSCR_DIS
| ATC_DST_DSCR_DIS
;
487 desc
->sg
[i
].lli
->ctrlb
= ctrlb
;
488 desc
->sg
[i
].lli
->dscr
= 0;
491 #define ATC_DEFAULT_CFG FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO)
492 #define ATC_DEFAULT_CTRLB (FIELD_PREP(ATC_SIF, AT_DMA_MEM_IF) | \
493 FIELD_PREP(ATC_DIF, AT_DMA_MEM_IF))
494 #define ATC_DMA_BUSWIDTHS\
495 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
496 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
497 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
498 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
500 #define ATC_MAX_DSCR_TRIALS 10
503 * Initial number of descriptors to allocate for each channel. This could
504 * be increased during dma usage.
506 static unsigned int init_nr_desc_per_channel
= 64;
507 module_param(init_nr_desc_per_channel
, uint
, 0644);
508 MODULE_PARM_DESC(init_nr_desc_per_channel
,
509 "initial descriptors per channel (default: 64)");
512 * struct at_dma_platform_data - Controller configuration parameters
513 * @nr_channels: Number of channels supported by hardware (max 8)
514 * @cap_mask: dma_capability flags supported by the platform
516 struct at_dma_platform_data
{
517 unsigned int nr_channels
;
518 dma_cap_mask_t cap_mask
;
522 * struct at_dma_slave - Controller-specific information about a slave
523 * @dma_dev: required DMA master device
524 * @cfg: Platform-specific initializer for the CFG register
526 struct at_dma_slave
{
527 struct device
*dma_dev
;
531 static inline unsigned int atc_get_xfer_width(dma_addr_t src
, dma_addr_t dst
,
536 if (!((src
| dst
| len
) & 3))
538 else if (!((src
| dst
| len
) & 1))
546 static void atdma_lli_chain(struct at_desc
*desc
, unsigned int i
)
548 struct atdma_sg
*atdma_sg
= &desc
->sg
[i
];
551 desc
->sg
[i
- 1].lli
->dscr
= atdma_sg
->lli_phys
;
555 * atc_dostart - starts the DMA engine for real
556 * @atchan: the channel we want to start
558 static void atc_dostart(struct at_dma_chan
*atchan
)
560 struct virt_dma_desc
*vd
= vchan_next_desc(&atchan
->vc
);
561 struct at_desc
*desc
;
568 vdbg_dump_regs(atchan
);
571 atchan
->desc
= desc
= to_atdma_desc(&vd
->tx
);
573 channel_writel(atchan
, SADDR
, 0);
574 channel_writel(atchan
, DADDR
, 0);
575 channel_writel(atchan
, CTRLA
, 0);
576 channel_writel(atchan
, CTRLB
, 0);
577 channel_writel(atchan
, DSCR
, desc
->sg
[0].lli_phys
);
578 channel_writel(atchan
, SPIP
,
579 FIELD_PREP(ATC_SPIP_HOLE
, desc
->src_hole
) |
580 FIELD_PREP(ATC_SPIP_BOUNDARY
, desc
->boundary
));
581 channel_writel(atchan
, DPIP
,
582 FIELD_PREP(ATC_DPIP_HOLE
, desc
->dst_hole
) |
583 FIELD_PREP(ATC_DPIP_BOUNDARY
, desc
->boundary
));
585 /* Don't allow CPU to reorder channel enable. */
587 dma_writel(atchan
->atdma
, CHER
, atchan
->mask
);
589 vdbg_dump_regs(atchan
);
592 static void atdma_desc_free(struct virt_dma_desc
*vd
)
594 struct at_dma
*atdma
= to_at_dma(vd
->tx
.chan
->device
);
595 struct at_desc
*desc
= to_atdma_desc(&vd
->tx
);
598 for (i
= 0; i
< desc
->sglen
; i
++) {
600 dma_pool_free(atdma
->lli_pool
, desc
->sg
[i
].lli
,
601 desc
->sg
[i
].lli_phys
);
604 /* If the transfer was a memset, free our temporary buffer */
605 if (desc
->memset_buffer
) {
606 dma_pool_free(atdma
->memset_pool
, desc
->memset_vaddr
,
608 desc
->memset_buffer
= false;
615 * atc_calc_bytes_left - calculates the number of bytes left according to the
616 * value read from CTRLA.
618 * @current_len: the number of bytes left before reading CTRLA
619 * @ctrla: the value of CTRLA
621 static inline u32
atc_calc_bytes_left(u32 current_len
, u32 ctrla
)
623 u32 btsize
= FIELD_GET(ATC_BTSIZE
, ctrla
);
624 u32 src_width
= FIELD_GET(ATC_SRC_WIDTH
, ctrla
);
627 * According to the datasheet, when reading the Control A Register
628 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
629 * number of transfers completed on the Source Interface.
630 * So btsize is always a number of source width transfers.
632 return current_len
- (btsize
<< src_width
);
636 * atc_get_llis_residue - Get residue for a hardware linked list transfer
637 * @atchan: pointer to an atmel hdmac channel.
638 * @desc: pointer to the descriptor for which the residue is calculated.
639 * @residue: residue to be set to dma_tx_state.
641 * Calculate the residue by removing the length of the Linked List Item (LLI)
642 * already transferred from the total length. To get the current LLI we can use
643 * the value of the channel's DSCR register and compare it against the DSCR
646 * The CTRLA register provides us with the amount of data already read from the
647 * source for the LLI. So we can compute a more accurate residue by also
648 * removing the number of bytes corresponding to this amount of data.
650 * However, the DSCR and CTRLA registers cannot be read both atomically. Hence a
651 * race condition may occur: the first read register may refer to one LLI
652 * whereas the second read may refer to a later LLI in the list because of the
653 * DMA transfer progression inbetween the two reads.
655 * One solution could have been to pause the DMA transfer, read the DSCR and
656 * CTRLA then resume the DMA transfer. Nonetheless, this approach presents some
658 * - If the DMA transfer is paused, RX overruns or TX underruns are more likey
659 * to occur depending on the system latency. Taking the USART driver as an
660 * example, it uses a cyclic DMA transfer to read data from the Receive
661 * Holding Register (RHR) to avoid RX overruns since the RHR is not protected
662 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer to compute the
663 * residue would break the USART driver design.
664 * - The atc_pause() function masks interrupts but we'd rather avoid to do so
665 * for system latency purpose.
667 * Then we'd rather use another solution: the DSCR is read a first time, the
668 * CTRLA is read in turn, next the DSCR is read a second time. If the two
669 * consecutive read values of the DSCR are the same then we assume both refers
670 * to the very same LLI as well as the CTRLA value read inbetween does. For
671 * cyclic transfers, the assumption is that a full loop is "not so fast". If the
672 * two DSCR values are different, we read again the CTRLA then the DSCR till two
673 * consecutive read values from DSCR are equal or till the maximum trials is
674 * reach. This algorithm is very unlikely not to find a stable value for DSCR.
676 * Returns: %0 on success, -errno otherwise.
678 static int atc_get_llis_residue(struct at_dma_chan
*atchan
,
679 struct at_desc
*desc
, u32
*residue
)
681 u32 len
, ctrla
, dscr
;
684 len
= desc
->total_len
;
685 dscr
= channel_readl(atchan
, DSCR
);
686 rmb(); /* ensure DSCR is read before CTRLA */
687 ctrla
= channel_readl(atchan
, CTRLA
);
688 for (i
= 0; i
< ATC_MAX_DSCR_TRIALS
; ++i
) {
691 rmb(); /* ensure DSCR is read after CTRLA */
692 new_dscr
= channel_readl(atchan
, DSCR
);
695 * If the DSCR register value has not changed inside the DMA
696 * controller since the previous read, we assume that both the
697 * dscr and ctrla values refers to the very same descriptor.
699 if (likely(new_dscr
== dscr
))
703 * DSCR has changed inside the DMA controller, so the previously
704 * read value of CTRLA may refer to an already processed
705 * descriptor hence could be outdated. We need to update ctrla
706 * to match the current descriptor.
709 rmb(); /* ensure DSCR is read before CTRLA */
710 ctrla
= channel_readl(atchan
, CTRLA
);
712 if (unlikely(i
== ATC_MAX_DSCR_TRIALS
))
715 /* For the first descriptor we can be more accurate. */
716 if (desc
->sg
[0].lli
->dscr
== dscr
) {
717 *residue
= atc_calc_bytes_left(len
, ctrla
);
720 len
-= desc
->sg
[0].len
;
722 for (i
= 1; i
< desc
->sglen
; i
++) {
723 if (desc
->sg
[i
].lli
&& desc
->sg
[i
].lli
->dscr
== dscr
)
725 len
-= desc
->sg
[i
].len
;
729 * For the current LLI in the chain we can calculate the remaining bytes
730 * using the channel's CTRLA register.
732 *residue
= atc_calc_bytes_left(len
, ctrla
);
738 * atc_get_residue - get the number of bytes residue for a cookie.
739 * The residue is passed by address and updated on success.
741 * @cookie: transaction identifier to check status of
742 * @residue: residue to be updated.
744 * Return: %0 on success, -errno otherwise.
746 static int atc_get_residue(struct dma_chan
*chan
, dma_cookie_t cookie
,
749 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
750 struct virt_dma_desc
*vd
;
751 struct at_desc
*desc
= NULL
;
754 vd
= vchan_find_desc(&atchan
->vc
, cookie
);
756 desc
= to_atdma_desc(&vd
->tx
);
757 else if (atchan
->desc
&& atchan
->desc
->vd
.tx
.cookie
== cookie
)
763 if (desc
->sg
[0].lli
->dscr
)
764 /* hardware linked list transfer */
765 return atc_get_llis_residue(atchan
, desc
, residue
);
767 /* single transfer */
768 len
= desc
->total_len
;
769 ctrla
= channel_readl(atchan
, CTRLA
);
770 *residue
= atc_calc_bytes_left(len
, ctrla
);
775 * atc_handle_error - handle errors reported by DMA controller
776 * @atchan: channel where error occurs.
779 static void atc_handle_error(struct at_dma_chan
*atchan
, unsigned int i
)
781 struct at_desc
*desc
= atchan
->desc
;
783 /* Disable channel on AHB error */
784 dma_writel(atchan
->atdma
, CHDR
, AT_DMA_RES(i
) | atchan
->mask
);
787 * KERN_CRITICAL may seem harsh, but since this only happens
788 * when someone submits a bad physical address in a
789 * descriptor, we should consider ourselves lucky that the
790 * controller flagged an error instead of scribbling over
791 * random memory locations.
793 dev_crit(chan2dev(&atchan
->vc
.chan
), "Bad descriptor submitted for DMA!\n");
794 dev_crit(chan2dev(&atchan
->vc
.chan
), "cookie: %d\n",
796 for (i
= 0; i
< desc
->sglen
; i
++)
797 atc_dump_lli(atchan
, desc
->sg
[i
].lli
);
800 static void atdma_handle_chan_done(struct at_dma_chan
*atchan
, u32 pending
,
803 struct at_desc
*desc
;
805 spin_lock(&atchan
->vc
.lock
);
809 if (pending
& AT_DMA_ERR(i
)) {
810 atc_handle_error(atchan
, i
);
811 /* Pretend the descriptor completed successfully */
814 if (atc_chan_is_cyclic(atchan
)) {
815 vchan_cyclic_callback(&desc
->vd
);
817 vchan_cookie_complete(&desc
->vd
);
819 if (!(atc_chan_is_enabled(atchan
)))
823 spin_unlock(&atchan
->vc
.lock
);
826 static irqreturn_t
at_dma_interrupt(int irq
, void *dev_id
)
828 struct at_dma
*atdma
= dev_id
;
829 struct at_dma_chan
*atchan
;
831 u32 status
, pending
, imr
;
835 imr
= dma_readl(atdma
, EBCIMR
);
836 status
= dma_readl(atdma
, EBCISR
);
837 pending
= status
& imr
;
842 dev_vdbg(atdma
->dma_device
.dev
,
843 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
844 status
, imr
, pending
);
846 for (i
= 0; i
< atdma
->dma_device
.chancnt
; i
++) {
847 atchan
= &atdma
->chan
[i
];
848 if (!(pending
& (AT_DMA_BTC(i
) | AT_DMA_ERR(i
))))
850 atdma_handle_chan_done(atchan
, pending
, i
);
859 /*-- DMA Engine API --------------------------------------------------*/
861 * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
862 * @chan: the channel to prepare operation on
863 * @xt: Interleaved transfer template
864 * @flags: tx descriptor status flags
866 static struct dma_async_tx_descriptor
*
867 atc_prep_dma_interleaved(struct dma_chan
*chan
,
868 struct dma_interleaved_template
*xt
,
871 struct at_dma
*atdma
= to_at_dma(chan
->device
);
872 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
873 struct data_chunk
*first
;
874 struct atdma_sg
*atdma_sg
;
875 struct at_desc
*desc
;
884 if (unlikely(!xt
|| xt
->numf
!= 1 || !xt
->frame_size
))
889 dev_info(chan2dev(chan
),
890 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
891 __func__
, &xt
->src_start
, &xt
->dst_start
, xt
->numf
,
892 xt
->frame_size
, flags
);
895 * The controller can only "skip" X bytes every Y bytes, so we
896 * need to make sure we are given a template that fit that
897 * description, ie a template with chunks that always have the
898 * same size, with the same ICGs.
900 for (i
= 0; i
< xt
->frame_size
; i
++) {
901 struct data_chunk
*chunk
= xt
->sgl
+ i
;
903 if ((chunk
->size
!= xt
->sgl
->size
) ||
904 (dmaengine_get_dst_icg(xt
, chunk
) != dmaengine_get_dst_icg(xt
, first
)) ||
905 (dmaengine_get_src_icg(xt
, chunk
) != dmaengine_get_src_icg(xt
, first
))) {
906 dev_err(chan2dev(chan
),
907 "%s: the controller can transfer only identical chunks\n",
915 dwidth
= atc_get_xfer_width(xt
->src_start
, xt
->dst_start
, len
);
917 xfer_count
= len
>> dwidth
;
918 if (xfer_count
> ATC_BTSIZE_MAX
) {
919 dev_err(chan2dev(chan
), "%s: buffer is too big\n", __func__
);
923 ctrla
= FIELD_PREP(ATC_SRC_WIDTH
, dwidth
) |
924 FIELD_PREP(ATC_DST_WIDTH
, dwidth
);
926 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
|
927 FIELD_PREP(ATC_SRC_ADDR_MODE
, ATC_SRC_ADDR_MODE_INCR
) |
928 FIELD_PREP(ATC_DST_ADDR_MODE
, ATC_DST_ADDR_MODE_INCR
) |
929 ATC_SRC_PIP
| ATC_DST_PIP
|
930 FIELD_PREP(ATC_FC
, ATC_FC_MEM2MEM
);
932 desc
= kzalloc(struct_size(desc
, sg
, 1), GFP_ATOMIC
);
938 atdma_sg
->lli
= dma_pool_alloc(atdma
->lli_pool
, GFP_NOWAIT
,
939 &atdma_sg
->lli_phys
);
940 if (!atdma_sg
->lli
) {
946 lli
->saddr
= xt
->src_start
;
947 lli
->daddr
= xt
->dst_start
;
948 lli
->ctrla
= ctrla
| xfer_count
;
951 desc
->boundary
= first
->size
>> dwidth
;
952 desc
->dst_hole
= (dmaengine_get_dst_icg(xt
, first
) >> dwidth
) + 1;
953 desc
->src_hole
= (dmaengine_get_src_icg(xt
, first
) >> dwidth
) + 1;
956 desc
->total_len
= len
;
958 set_lli_eol(desc
, 0);
959 return vchan_tx_prep(&atchan
->vc
, &desc
->vd
, flags
);
963 * atc_prep_dma_memcpy - prepare a memcpy operation
964 * @chan: the channel to prepare operation on
965 * @dest: operation virtual destination address
966 * @src: operation virtual source address
967 * @len: operation length
968 * @flags: tx descriptor status flags
970 static struct dma_async_tx_descriptor
*
971 atc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
972 size_t len
, unsigned long flags
)
974 struct at_dma
*atdma
= to_at_dma(chan
->device
);
975 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
976 struct at_desc
*desc
= NULL
;
980 unsigned int src_width
;
981 unsigned int dst_width
;
986 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
987 &dest
, &src
, len
, flags
);
989 if (unlikely(!len
)) {
990 dev_err(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
994 sg_len
= DIV_ROUND_UP(len
, ATC_BTSIZE_MAX
);
995 desc
= kzalloc(struct_size(desc
, sg
, sg_len
), GFP_ATOMIC
);
998 desc
->sglen
= sg_len
;
1000 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
|
1001 FIELD_PREP(ATC_SRC_ADDR_MODE
, ATC_SRC_ADDR_MODE_INCR
) |
1002 FIELD_PREP(ATC_DST_ADDR_MODE
, ATC_DST_ADDR_MODE_INCR
) |
1003 FIELD_PREP(ATC_FC
, ATC_FC_MEM2MEM
);
1006 * We can be a lot more clever here, but this should take care
1007 * of the most common optimization.
1009 src_width
= dst_width
= atc_get_xfer_width(src
, dest
, len
);
1011 ctrla
= FIELD_PREP(ATC_SRC_WIDTH
, src_width
) |
1012 FIELD_PREP(ATC_DST_WIDTH
, dst_width
);
1014 for (offset
= 0, i
= 0; offset
< len
;
1015 offset
+= xfer_count
<< src_width
, i
++) {
1016 struct atdma_sg
*atdma_sg
= &desc
->sg
[i
];
1019 atdma_sg
->lli
= dma_pool_alloc(atdma
->lli_pool
, GFP_NOWAIT
,
1020 &atdma_sg
->lli_phys
);
1023 lli
= atdma_sg
->lli
;
1025 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
1028 lli
->saddr
= src
+ offset
;
1029 lli
->daddr
= dest
+ offset
;
1030 lli
->ctrla
= ctrla
| xfer_count
;
1033 desc
->sg
[i
].len
= xfer_count
<< src_width
;
1035 atdma_lli_chain(desc
, i
);
1038 desc
->total_len
= len
;
1040 /* set end-of-link to the last link descriptor of list*/
1041 set_lli_eol(desc
, i
- 1);
1043 return vchan_tx_prep(&atchan
->vc
, &desc
->vd
, flags
);
1046 atdma_desc_free(&desc
->vd
);
1050 static int atdma_create_memset_lli(struct dma_chan
*chan
,
1051 struct atdma_sg
*atdma_sg
,
1052 dma_addr_t psrc
, dma_addr_t pdst
, size_t len
)
1054 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1057 u32 ctrla
= FIELD_PREP(ATC_SRC_WIDTH
, 2) | FIELD_PREP(ATC_DST_WIDTH
, 2);
1058 u32 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
|
1059 FIELD_PREP(ATC_SRC_ADDR_MODE
, ATC_SRC_ADDR_MODE_FIXED
) |
1060 FIELD_PREP(ATC_DST_ADDR_MODE
, ATC_DST_ADDR_MODE_INCR
) |
1061 FIELD_PREP(ATC_FC
, ATC_FC_MEM2MEM
);
1063 xfer_count
= len
>> 2;
1064 if (xfer_count
> ATC_BTSIZE_MAX
) {
1065 dev_err(chan2dev(chan
), "%s: buffer is too big\n", __func__
);
1069 atdma_sg
->lli
= dma_pool_alloc(atdma
->lli_pool
, GFP_NOWAIT
,
1070 &atdma_sg
->lli_phys
);
1073 lli
= atdma_sg
->lli
;
1077 lli
->ctrla
= ctrla
| xfer_count
;
1080 atdma_sg
->len
= len
;
1086 * atc_prep_dma_memset - prepare a memcpy operation
1087 * @chan: the channel to prepare operation on
1088 * @dest: operation virtual destination address
1089 * @value: value to set memory buffer to
1090 * @len: operation length
1091 * @flags: tx descriptor status flags
1093 static struct dma_async_tx_descriptor
*
1094 atc_prep_dma_memset(struct dma_chan
*chan
, dma_addr_t dest
, int value
,
1095 size_t len
, unsigned long flags
)
1097 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1098 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1099 struct at_desc
*desc
;
1100 void __iomem
*vaddr
;
1105 dev_vdbg(chan2dev(chan
), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__
,
1106 &dest
, value
, len
, flags
);
1108 if (unlikely(!len
)) {
1109 dev_dbg(chan2dev(chan
), "%s: length is zero!\n", __func__
);
1113 if (!is_dma_fill_aligned(chan
->device
, dest
, 0, len
)) {
1114 dev_dbg(chan2dev(chan
), "%s: buffer is not aligned\n",
1119 vaddr
= dma_pool_alloc(atdma
->memset_pool
, GFP_NOWAIT
, &paddr
);
1121 dev_err(chan2dev(chan
), "%s: couldn't allocate buffer\n",
1126 /* Only the first byte of value is to be used according to dmaengine */
1127 fill_pattern
= (char)value
;
1129 *(u32
*)vaddr
= (fill_pattern
<< 24) |
1130 (fill_pattern
<< 16) |
1131 (fill_pattern
<< 8) |
1134 desc
= kzalloc(struct_size(desc
, sg
, 1), GFP_ATOMIC
);
1136 goto err_free_buffer
;
1139 ret
= atdma_create_memset_lli(chan
, desc
->sg
, paddr
, dest
, len
);
1143 desc
->memset_paddr
= paddr
;
1144 desc
->memset_vaddr
= vaddr
;
1145 desc
->memset_buffer
= true;
1147 desc
->total_len
= len
;
1149 /* set end-of-link on the descriptor */
1150 set_lli_eol(desc
, 0);
1152 return vchan_tx_prep(&atchan
->vc
, &desc
->vd
, flags
);
1157 dma_pool_free(atdma
->memset_pool
, vaddr
, paddr
);
1161 static struct dma_async_tx_descriptor
*
1162 atc_prep_dma_memset_sg(struct dma_chan
*chan
,
1163 struct scatterlist
*sgl
,
1164 unsigned int sg_len
, int value
,
1165 unsigned long flags
)
1167 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1168 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1169 struct at_desc
*desc
;
1170 struct scatterlist
*sg
;
1171 void __iomem
*vaddr
;
1173 size_t total_len
= 0;
1177 dev_vdbg(chan2dev(chan
), "%s: v0x%x l0x%zx f0x%lx\n", __func__
,
1178 value
, sg_len
, flags
);
1180 if (unlikely(!sgl
|| !sg_len
)) {
1181 dev_dbg(chan2dev(chan
), "%s: scatterlist is empty!\n",
1186 vaddr
= dma_pool_alloc(atdma
->memset_pool
, GFP_NOWAIT
, &paddr
);
1188 dev_err(chan2dev(chan
), "%s: couldn't allocate buffer\n",
1192 *(u32
*)vaddr
= value
;
1194 desc
= kzalloc(struct_size(desc
, sg
, sg_len
), GFP_ATOMIC
);
1196 goto err_free_dma_buf
;
1197 desc
->sglen
= sg_len
;
1199 for_each_sg(sgl
, sg
, sg_len
, i
) {
1200 dma_addr_t dest
= sg_dma_address(sg
);
1201 size_t len
= sg_dma_len(sg
);
1203 dev_vdbg(chan2dev(chan
), "%s: d%pad, l0x%zx\n",
1204 __func__
, &dest
, len
);
1206 if (!is_dma_fill_aligned(chan
->device
, dest
, 0, len
)) {
1207 dev_err(chan2dev(chan
), "%s: buffer is not aligned\n",
1212 ret
= atdma_create_memset_lli(chan
, &desc
->sg
[i
], paddr
, dest
,
1217 atdma_lli_chain(desc
, i
);
1221 desc
->memset_paddr
= paddr
;
1222 desc
->memset_vaddr
= vaddr
;
1223 desc
->memset_buffer
= true;
1225 desc
->total_len
= total_len
;
1227 /* set end-of-link on the descriptor */
1228 set_lli_eol(desc
, i
- 1);
1230 return vchan_tx_prep(&atchan
->vc
, &desc
->vd
, flags
);
1233 atdma_desc_free(&desc
->vd
);
1235 dma_pool_free(atdma
->memset_pool
, vaddr
, paddr
);
1240 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1241 * @chan: DMA channel
1242 * @sgl: scatterlist to transfer to/from
1243 * @sg_len: number of entries in @scatterlist
1244 * @direction: DMA direction
1245 * @flags: tx descriptor status flags
1246 * @context: transaction context (ignored)
1248 static struct dma_async_tx_descriptor
*
1249 atc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1250 unsigned int sg_len
, enum dma_transfer_direction direction
,
1251 unsigned long flags
, void *context
)
1253 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1254 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1255 struct at_dma_slave
*atslave
= chan
->private;
1256 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
1257 struct at_desc
*desc
;
1261 unsigned int reg_width
;
1262 unsigned int mem_width
;
1264 struct scatterlist
*sg
;
1265 size_t total_len
= 0;
1267 dev_vdbg(chan2dev(chan
), "prep_slave_sg (%d): %s f0x%lx\n",
1269 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
1272 if (unlikely(!atslave
|| !sg_len
)) {
1273 dev_dbg(chan2dev(chan
), "prep_slave_sg: sg length is zero!\n");
1277 desc
= kzalloc(struct_size(desc
, sg
, sg_len
), GFP_ATOMIC
);
1280 desc
->sglen
= sg_len
;
1282 ctrla
= FIELD_PREP(ATC_SCSIZE
, sconfig
->src_maxburst
) |
1283 FIELD_PREP(ATC_DCSIZE
, sconfig
->dst_maxburst
);
1286 switch (direction
) {
1287 case DMA_MEM_TO_DEV
:
1288 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
1289 ctrla
|= FIELD_PREP(ATC_DST_WIDTH
, reg_width
);
1290 ctrlb
|= FIELD_PREP(ATC_DST_ADDR_MODE
,
1291 ATC_DST_ADDR_MODE_FIXED
) |
1292 FIELD_PREP(ATC_SRC_ADDR_MODE
, ATC_SRC_ADDR_MODE_INCR
) |
1293 FIELD_PREP(ATC_FC
, ATC_FC_MEM2PER
) |
1294 FIELD_PREP(ATC_SIF
, atchan
->mem_if
) |
1295 FIELD_PREP(ATC_DIF
, atchan
->per_if
);
1296 reg
= sconfig
->dst_addr
;
1297 for_each_sg(sgl
, sg
, sg_len
, i
) {
1298 struct atdma_sg
*atdma_sg
= &desc
->sg
[i
];
1303 atdma_sg
->lli
= dma_pool_alloc(atdma
->lli_pool
,
1305 &atdma_sg
->lli_phys
);
1308 lli
= atdma_sg
->lli
;
1310 mem
= sg_dma_address(sg
);
1311 len
= sg_dma_len(sg
);
1312 if (unlikely(!len
)) {
1313 dev_dbg(chan2dev(chan
),
1314 "prep_slave_sg: sg(%d) data length is zero\n", i
);
1318 if (unlikely(mem
& 3 || len
& 3))
1323 lli
->ctrla
= ctrla
|
1324 FIELD_PREP(ATC_SRC_WIDTH
, mem_width
) |
1328 atdma_sg
->len
= len
;
1331 desc
->sg
[i
].len
= len
;
1332 atdma_lli_chain(desc
, i
);
1335 case DMA_DEV_TO_MEM
:
1336 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
1337 ctrla
|= FIELD_PREP(ATC_SRC_WIDTH
, reg_width
);
1338 ctrlb
|= FIELD_PREP(ATC_DST_ADDR_MODE
, ATC_DST_ADDR_MODE_INCR
) |
1339 FIELD_PREP(ATC_SRC_ADDR_MODE
,
1340 ATC_SRC_ADDR_MODE_FIXED
) |
1341 FIELD_PREP(ATC_FC
, ATC_FC_PER2MEM
) |
1342 FIELD_PREP(ATC_SIF
, atchan
->per_if
) |
1343 FIELD_PREP(ATC_DIF
, atchan
->mem_if
);
1345 reg
= sconfig
->src_addr
;
1346 for_each_sg(sgl
, sg
, sg_len
, i
) {
1347 struct atdma_sg
*atdma_sg
= &desc
->sg
[i
];
1352 atdma_sg
->lli
= dma_pool_alloc(atdma
->lli_pool
,
1354 &atdma_sg
->lli_phys
);
1357 lli
= atdma_sg
->lli
;
1359 mem
= sg_dma_address(sg
);
1360 len
= sg_dma_len(sg
);
1361 if (unlikely(!len
)) {
1362 dev_dbg(chan2dev(chan
),
1363 "prep_slave_sg: sg(%d) data length is zero\n", i
);
1367 if (unlikely(mem
& 3 || len
& 3))
1372 lli
->ctrla
= ctrla
|
1373 FIELD_PREP(ATC_DST_WIDTH
, mem_width
) |
1377 desc
->sg
[i
].len
= len
;
1380 atdma_lli_chain(desc
, i
);
1387 /* set end-of-link to the last link descriptor of list*/
1388 set_lli_eol(desc
, i
- 1);
1390 desc
->total_len
= total_len
;
1392 return vchan_tx_prep(&atchan
->vc
, &desc
->vd
, flags
);
1395 dev_err(chan2dev(chan
), "not enough descriptors available\n");
1397 atdma_desc_free(&desc
->vd
);
1402 * atc_dma_cyclic_check_values
1403 * Check for too big/unaligned periods and unaligned DMA buffer
1406 atc_dma_cyclic_check_values(unsigned int reg_width
, dma_addr_t buf_addr
,
1409 if (period_len
> (ATC_BTSIZE_MAX
<< reg_width
))
1411 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1413 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1423 * atc_dma_cyclic_fill_desc - Fill one period descriptor
1426 atc_dma_cyclic_fill_desc(struct dma_chan
*chan
, struct at_desc
*desc
,
1427 unsigned int i
, dma_addr_t buf_addr
,
1428 unsigned int reg_width
, size_t period_len
,
1429 enum dma_transfer_direction direction
)
1431 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1432 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1433 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
1434 struct atdma_sg
*atdma_sg
= &desc
->sg
[i
];
1437 atdma_sg
->lli
= dma_pool_alloc(atdma
->lli_pool
, GFP_ATOMIC
,
1438 &atdma_sg
->lli_phys
);
1441 lli
= atdma_sg
->lli
;
1443 switch (direction
) {
1444 case DMA_MEM_TO_DEV
:
1445 lli
->saddr
= buf_addr
+ (period_len
* i
);
1446 lli
->daddr
= sconfig
->dst_addr
;
1447 lli
->ctrlb
= FIELD_PREP(ATC_DST_ADDR_MODE
,
1448 ATC_DST_ADDR_MODE_FIXED
) |
1449 FIELD_PREP(ATC_SRC_ADDR_MODE
,
1450 ATC_SRC_ADDR_MODE_INCR
) |
1451 FIELD_PREP(ATC_FC
, ATC_FC_MEM2PER
) |
1452 FIELD_PREP(ATC_SIF
, atchan
->mem_if
) |
1453 FIELD_PREP(ATC_DIF
, atchan
->per_if
);
1457 case DMA_DEV_TO_MEM
:
1458 lli
->saddr
= sconfig
->src_addr
;
1459 lli
->daddr
= buf_addr
+ (period_len
* i
);
1460 lli
->ctrlb
= FIELD_PREP(ATC_DST_ADDR_MODE
,
1461 ATC_DST_ADDR_MODE_INCR
) |
1462 FIELD_PREP(ATC_SRC_ADDR_MODE
,
1463 ATC_SRC_ADDR_MODE_FIXED
) |
1464 FIELD_PREP(ATC_FC
, ATC_FC_PER2MEM
) |
1465 FIELD_PREP(ATC_SIF
, atchan
->per_if
) |
1466 FIELD_PREP(ATC_DIF
, atchan
->mem_if
);
1473 lli
->ctrla
= FIELD_PREP(ATC_SCSIZE
, sconfig
->src_maxburst
) |
1474 FIELD_PREP(ATC_DCSIZE
, sconfig
->dst_maxburst
) |
1475 FIELD_PREP(ATC_DST_WIDTH
, reg_width
) |
1476 FIELD_PREP(ATC_SRC_WIDTH
, reg_width
) |
1477 period_len
>> reg_width
;
1478 desc
->sg
[i
].len
= period_len
;
1484 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1485 * @chan: the DMA channel to prepare
1486 * @buf_addr: physical DMA address where the buffer starts
1487 * @buf_len: total number of bytes for the entire buffer
1488 * @period_len: number of bytes for each period
1489 * @direction: transfer direction, to or from device
1490 * @flags: tx descriptor status flags
1492 static struct dma_async_tx_descriptor
*
1493 atc_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1494 size_t period_len
, enum dma_transfer_direction direction
,
1495 unsigned long flags
)
1497 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1498 struct at_dma_slave
*atslave
= chan
->private;
1499 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
1500 struct at_desc
*desc
;
1501 unsigned long was_cyclic
;
1502 unsigned int reg_width
;
1503 unsigned int periods
= buf_len
/ period_len
;
1506 dev_vdbg(chan2dev(chan
), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1507 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
1509 periods
, buf_len
, period_len
);
1511 if (unlikely(!atslave
|| !buf_len
|| !period_len
)) {
1512 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: length is zero!\n");
1516 was_cyclic
= test_and_set_bit(ATC_IS_CYCLIC
, &atchan
->status
);
1518 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: channel in use!\n");
1522 if (unlikely(!is_slave_direction(direction
)))
1525 if (direction
== DMA_MEM_TO_DEV
)
1526 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
1528 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
1530 /* Check for too big/unaligned periods and unaligned DMA buffer */
1531 if (atc_dma_cyclic_check_values(reg_width
, buf_addr
, period_len
))
1534 desc
= kzalloc(struct_size(desc
, sg
, periods
), GFP_ATOMIC
);
1537 desc
->sglen
= periods
;
1539 /* build cyclic linked list */
1540 for (i
= 0; i
< periods
; i
++) {
1541 if (atc_dma_cyclic_fill_desc(chan
, desc
, i
, buf_addr
,
1542 reg_width
, period_len
, direction
))
1544 atdma_lli_chain(desc
, i
);
1546 desc
->total_len
= buf_len
;
1547 /* lets make a cyclic list */
1548 desc
->sg
[i
- 1].lli
->dscr
= desc
->sg
[0].lli_phys
;
1550 return vchan_tx_prep(&atchan
->vc
, &desc
->vd
, flags
);
1553 atdma_desc_free(&desc
->vd
);
1555 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
1559 static int atc_config(struct dma_chan
*chan
,
1560 struct dma_slave_config
*sconfig
)
1562 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1564 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1566 /* Check if it is chan is configured for slave transfers */
1570 memcpy(&atchan
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
1572 convert_burst(&atchan
->dma_sconfig
.src_maxburst
);
1573 convert_burst(&atchan
->dma_sconfig
.dst_maxburst
);
1578 static int atc_pause(struct dma_chan
*chan
)
1580 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1581 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1582 int chan_id
= atchan
->vc
.chan
.chan_id
;
1583 unsigned long flags
;
1585 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1587 spin_lock_irqsave(&atchan
->vc
.lock
, flags
);
1589 dma_writel(atdma
, CHER
, AT_DMA_SUSP(chan_id
));
1590 set_bit(ATC_IS_PAUSED
, &atchan
->status
);
1592 spin_unlock_irqrestore(&atchan
->vc
.lock
, flags
);
1597 static int atc_resume(struct dma_chan
*chan
)
1599 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1600 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1601 int chan_id
= atchan
->vc
.chan
.chan_id
;
1602 unsigned long flags
;
1604 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1606 if (!atc_chan_is_paused(atchan
))
1609 spin_lock_irqsave(&atchan
->vc
.lock
, flags
);
1611 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
));
1612 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
1614 spin_unlock_irqrestore(&atchan
->vc
.lock
, flags
);
1619 static int atc_terminate_all(struct dma_chan
*chan
)
1621 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1622 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1623 int chan_id
= atchan
->vc
.chan
.chan_id
;
1624 unsigned long flags
;
1628 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1631 * This is only called when something went wrong elsewhere, so
1632 * we don't really care about the data. Just disable the
1633 * channel. We still have to poll the channel enable bit due
1634 * to AHB/HSB limitations.
1636 spin_lock_irqsave(&atchan
->vc
.lock
, flags
);
1638 /* disabling channel: must also remove suspend state */
1639 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
) | atchan
->mask
);
1641 /* confirm that this channel is disabled */
1642 while (dma_readl(atdma
, CHSR
) & atchan
->mask
)
1646 vchan_terminate_vdesc(&atchan
->desc
->vd
);
1647 atchan
->desc
= NULL
;
1650 vchan_get_all_descriptors(&atchan
->vc
, &list
);
1652 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
1653 /* if channel dedicated to cyclic operations, free it */
1654 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
1656 spin_unlock_irqrestore(&atchan
->vc
.lock
, flags
);
1658 vchan_dma_desc_free_list(&atchan
->vc
, &list
);
1664 * atc_tx_status - poll for transaction completion
1665 * @chan: DMA channel
1666 * @cookie: transaction identifier to check status of
1667 * @txstate: if not %NULL updated with transaction state
1669 * If @txstate is passed in, upon return it reflect the driver
1670 * internal state and can be used with dma_async_is_complete() to check
1671 * the status of multiple cookies without re-checking hardware state.
1673 static enum dma_status
1674 atc_tx_status(struct dma_chan
*chan
,
1675 dma_cookie_t cookie
,
1676 struct dma_tx_state
*txstate
)
1678 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1679 unsigned long flags
;
1680 enum dma_status dma_status
;
1684 dma_status
= dma_cookie_status(chan
, cookie
, txstate
);
1685 if (dma_status
== DMA_COMPLETE
|| !txstate
)
1688 spin_lock_irqsave(&atchan
->vc
.lock
, flags
);
1689 /* Get number of bytes left in the active transactions */
1690 ret
= atc_get_residue(chan
, cookie
, &residue
);
1691 spin_unlock_irqrestore(&atchan
->vc
.lock
, flags
);
1693 if (unlikely(ret
< 0)) {
1694 dev_vdbg(chan2dev(chan
), "get residual bytes error\n");
1697 dma_set_residue(txstate
, residue
);
1700 dev_vdbg(chan2dev(chan
), "tx_status %d: cookie = %d residue = %u\n",
1701 dma_status
, cookie
, residue
);
1706 static void atc_issue_pending(struct dma_chan
*chan
)
1708 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1709 unsigned long flags
;
1711 spin_lock_irqsave(&atchan
->vc
.lock
, flags
);
1712 if (vchan_issue_pending(&atchan
->vc
) && !atchan
->desc
) {
1713 if (!(atc_chan_is_enabled(atchan
)))
1714 atc_dostart(atchan
);
1716 spin_unlock_irqrestore(&atchan
->vc
.lock
, flags
);
1720 * atc_alloc_chan_resources - allocate resources for DMA channel
1721 * @chan: allocate descriptor resources for this channel
1723 * Return: the number of allocated descriptors
1725 static int atc_alloc_chan_resources(struct dma_chan
*chan
)
1727 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1728 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1729 struct at_dma_slave
*atslave
;
1732 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1734 /* ASSERT: channel is idle */
1735 if (atc_chan_is_enabled(atchan
)) {
1736 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
1740 cfg
= ATC_DEFAULT_CFG
;
1742 atslave
= chan
->private;
1745 * We need controller-specific data to set up slave
1748 BUG_ON(!atslave
->dma_dev
|| atslave
->dma_dev
!= atdma
->dma_device
.dev
);
1750 /* if cfg configuration specified take it instead of default */
1755 /* channel parameters */
1756 channel_writel(atchan
, CFG
, cfg
);
1762 * atc_free_chan_resources - free all channel resources
1763 * @chan: DMA channel
1765 static void atc_free_chan_resources(struct dma_chan
*chan
)
1767 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1769 BUG_ON(atc_chan_is_enabled(atchan
));
1771 vchan_free_chan_resources(to_virt_chan(chan
));
1775 * Free atslave allocated in at_dma_xlate()
1777 kfree(chan
->private);
1778 chan
->private = NULL
;
1780 dev_vdbg(chan2dev(chan
), "free_chan_resources: done\n");
1784 static bool at_dma_filter(struct dma_chan
*chan
, void *slave
)
1786 struct at_dma_slave
*atslave
= slave
;
1788 if (atslave
->dma_dev
== chan
->device
->dev
) {
1789 chan
->private = atslave
;
1796 static struct dma_chan
*at_dma_xlate(struct of_phandle_args
*dma_spec
,
1797 struct of_dma
*of_dma
)
1799 struct dma_chan
*chan
;
1800 struct at_dma_chan
*atchan
;
1801 struct at_dma_slave
*atslave
;
1802 dma_cap_mask_t mask
;
1803 unsigned int per_id
;
1804 struct platform_device
*dmac_pdev
;
1806 if (dma_spec
->args_count
!= 2)
1809 dmac_pdev
= of_find_device_by_node(dma_spec
->np
);
1814 dma_cap_set(DMA_SLAVE
, mask
);
1816 atslave
= kmalloc(sizeof(*atslave
), GFP_KERNEL
);
1818 put_device(&dmac_pdev
->dev
);
1822 atslave
->cfg
= ATC_DST_H2SEL
| ATC_SRC_H2SEL
;
1824 * We can fill both SRC_PER and DST_PER, one of these fields will be
1825 * ignored depending on DMA transfer direction.
1827 per_id
= dma_spec
->args
[1] & AT91_DMA_CFG_PER_ID_MASK
;
1828 atslave
->cfg
|= ATC_DST_PER_ID(per_id
) | ATC_SRC_PER_ID(per_id
);
1830 * We have to translate the value we get from the device tree since
1831 * the half FIFO configuration value had to be 0 to keep backward
1834 switch (dma_spec
->args
[1] & AT91_DMA_CFG_FIFOCFG_MASK
) {
1835 case AT91_DMA_CFG_FIFOCFG_ALAP
:
1836 atslave
->cfg
|= FIELD_PREP(ATC_FIFOCFG
,
1837 ATC_FIFOCFG_LARGESTBURST
);
1839 case AT91_DMA_CFG_FIFOCFG_ASAP
:
1840 atslave
->cfg
|= FIELD_PREP(ATC_FIFOCFG
,
1841 ATC_FIFOCFG_ENOUGHSPACE
);
1843 case AT91_DMA_CFG_FIFOCFG_HALF
:
1845 atslave
->cfg
|= FIELD_PREP(ATC_FIFOCFG
, ATC_FIFOCFG_HALFFIFO
);
1847 atslave
->dma_dev
= &dmac_pdev
->dev
;
1849 chan
= dma_request_channel(mask
, at_dma_filter
, atslave
);
1851 put_device(&dmac_pdev
->dev
);
1856 atchan
= to_at_dma_chan(chan
);
1857 atchan
->per_if
= dma_spec
->args
[0] & 0xff;
1858 atchan
->mem_if
= (dma_spec
->args
[0] >> 16) & 0xff;
1863 static struct dma_chan
*at_dma_xlate(struct of_phandle_args
*dma_spec
,
1864 struct of_dma
*of_dma
)
1870 /*-- Module Management -----------------------------------------------*/
1872 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1873 static struct at_dma_platform_data at91sam9rl_config
= {
1876 static struct at_dma_platform_data at91sam9g45_config
= {
1880 #if defined(CONFIG_OF)
1881 static const struct of_device_id atmel_dma_dt_ids
[] = {
1883 .compatible
= "atmel,at91sam9rl-dma",
1884 .data
= &at91sam9rl_config
,
1886 .compatible
= "atmel,at91sam9g45-dma",
1887 .data
= &at91sam9g45_config
,
1893 MODULE_DEVICE_TABLE(of
, atmel_dma_dt_ids
);
1896 static const struct platform_device_id atdma_devtypes
[] = {
1898 .name
= "at91sam9rl_dma",
1899 .driver_data
= (unsigned long) &at91sam9rl_config
,
1901 .name
= "at91sam9g45_dma",
1902 .driver_data
= (unsigned long) &at91sam9g45_config
,
1908 static inline const struct at_dma_platform_data
* __init
at_dma_get_driver_data(
1909 struct platform_device
*pdev
)
1911 if (pdev
->dev
.of_node
) {
1912 const struct of_device_id
*match
;
1913 match
= of_match_node(atmel_dma_dt_ids
, pdev
->dev
.of_node
);
1918 return (struct at_dma_platform_data
*)
1919 platform_get_device_id(pdev
)->driver_data
;
1923 * at_dma_off - disable DMA controller
1924 * @atdma: the Atmel HDAMC device
1926 static void at_dma_off(struct at_dma
*atdma
)
1928 dma_writel(atdma
, EN
, 0);
1930 /* disable all interrupts */
1931 dma_writel(atdma
, EBCIDR
, -1L);
1933 /* confirm that all channels are disabled */
1934 while (dma_readl(atdma
, CHSR
) & atdma
->all_chan_mask
)
1938 static int __init
at_dma_probe(struct platform_device
*pdev
)
1940 struct at_dma
*atdma
;
1944 const struct at_dma_platform_data
*plat_dat
;
1946 /* setup platform data for each SoC */
1947 dma_cap_set(DMA_MEMCPY
, at91sam9rl_config
.cap_mask
);
1948 dma_cap_set(DMA_INTERLEAVE
, at91sam9g45_config
.cap_mask
);
1949 dma_cap_set(DMA_MEMCPY
, at91sam9g45_config
.cap_mask
);
1950 dma_cap_set(DMA_MEMSET
, at91sam9g45_config
.cap_mask
);
1951 dma_cap_set(DMA_MEMSET_SG
, at91sam9g45_config
.cap_mask
);
1952 dma_cap_set(DMA_PRIVATE
, at91sam9g45_config
.cap_mask
);
1953 dma_cap_set(DMA_SLAVE
, at91sam9g45_config
.cap_mask
);
1955 /* get DMA parameters from controller type */
1956 plat_dat
= at_dma_get_driver_data(pdev
);
1960 atdma
= devm_kzalloc(&pdev
->dev
,
1961 struct_size(atdma
, chan
, plat_dat
->nr_channels
),
1966 atdma
->regs
= devm_platform_ioremap_resource(pdev
, 0);
1967 if (IS_ERR(atdma
->regs
))
1968 return PTR_ERR(atdma
->regs
);
1970 irq
= platform_get_irq(pdev
, 0);
1974 /* discover transaction capabilities */
1975 atdma
->dma_device
.cap_mask
= plat_dat
->cap_mask
;
1976 atdma
->all_chan_mask
= (1 << plat_dat
->nr_channels
) - 1;
1978 atdma
->clk
= devm_clk_get(&pdev
->dev
, "dma_clk");
1979 if (IS_ERR(atdma
->clk
))
1980 return PTR_ERR(atdma
->clk
);
1982 err
= clk_prepare_enable(atdma
->clk
);
1986 /* force dma off, just in case */
1989 err
= request_irq(irq
, at_dma_interrupt
, 0, "at_hdmac", atdma
);
1993 platform_set_drvdata(pdev
, atdma
);
1995 /* create a pool of consistent memory blocks for hardware descriptors */
1996 atdma
->lli_pool
= dma_pool_create("at_hdmac_lli_pool",
1997 &pdev
->dev
, sizeof(struct at_lli
),
1998 4 /* word alignment */, 0);
1999 if (!atdma
->lli_pool
) {
2000 dev_err(&pdev
->dev
, "Unable to allocate DMA LLI descriptor pool\n");
2002 goto err_desc_pool_create
;
2005 /* create a pool of consistent memory blocks for memset blocks */
2006 atdma
->memset_pool
= dma_pool_create("at_hdmac_memset_pool",
2007 &pdev
->dev
, sizeof(int), 4, 0);
2008 if (!atdma
->memset_pool
) {
2009 dev_err(&pdev
->dev
, "No memory for memset dma pool\n");
2011 goto err_memset_pool_create
;
2014 /* clear any pending interrupt */
2015 while (dma_readl(atdma
, EBCISR
))
2018 /* initialize channels related values */
2019 INIT_LIST_HEAD(&atdma
->dma_device
.channels
);
2020 for (i
= 0; i
< plat_dat
->nr_channels
; i
++) {
2021 struct at_dma_chan
*atchan
= &atdma
->chan
[i
];
2023 atchan
->mem_if
= AT_DMA_MEM_IF
;
2024 atchan
->per_if
= AT_DMA_PER_IF
;
2026 atchan
->ch_regs
= atdma
->regs
+ ch_regs(i
);
2027 atchan
->mask
= 1 << i
;
2029 atchan
->atdma
= atdma
;
2030 atchan
->vc
.desc_free
= atdma_desc_free
;
2031 vchan_init(&atchan
->vc
, &atdma
->dma_device
);
2032 atc_enable_chan_irq(atdma
, i
);
2035 /* set base routines */
2036 atdma
->dma_device
.device_alloc_chan_resources
= atc_alloc_chan_resources
;
2037 atdma
->dma_device
.device_free_chan_resources
= atc_free_chan_resources
;
2038 atdma
->dma_device
.device_tx_status
= atc_tx_status
;
2039 atdma
->dma_device
.device_issue_pending
= atc_issue_pending
;
2040 atdma
->dma_device
.dev
= &pdev
->dev
;
2042 /* set prep routines based on capability */
2043 if (dma_has_cap(DMA_INTERLEAVE
, atdma
->dma_device
.cap_mask
))
2044 atdma
->dma_device
.device_prep_interleaved_dma
= atc_prep_dma_interleaved
;
2046 if (dma_has_cap(DMA_MEMCPY
, atdma
->dma_device
.cap_mask
))
2047 atdma
->dma_device
.device_prep_dma_memcpy
= atc_prep_dma_memcpy
;
2049 if (dma_has_cap(DMA_MEMSET
, atdma
->dma_device
.cap_mask
)) {
2050 atdma
->dma_device
.device_prep_dma_memset
= atc_prep_dma_memset
;
2051 atdma
->dma_device
.device_prep_dma_memset_sg
= atc_prep_dma_memset_sg
;
2052 atdma
->dma_device
.fill_align
= DMAENGINE_ALIGN_4_BYTES
;
2055 if (dma_has_cap(DMA_SLAVE
, atdma
->dma_device
.cap_mask
)) {
2056 atdma
->dma_device
.device_prep_slave_sg
= atc_prep_slave_sg
;
2057 /* controller can do slave DMA: can trigger cyclic transfers */
2058 dma_cap_set(DMA_CYCLIC
, atdma
->dma_device
.cap_mask
);
2059 atdma
->dma_device
.device_prep_dma_cyclic
= atc_prep_dma_cyclic
;
2060 atdma
->dma_device
.device_config
= atc_config
;
2061 atdma
->dma_device
.device_pause
= atc_pause
;
2062 atdma
->dma_device
.device_resume
= atc_resume
;
2063 atdma
->dma_device
.device_terminate_all
= atc_terminate_all
;
2064 atdma
->dma_device
.src_addr_widths
= ATC_DMA_BUSWIDTHS
;
2065 atdma
->dma_device
.dst_addr_widths
= ATC_DMA_BUSWIDTHS
;
2066 atdma
->dma_device
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2067 atdma
->dma_device
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
2070 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
2072 dev_info(&pdev
->dev
, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
2073 dma_has_cap(DMA_MEMCPY
, atdma
->dma_device
.cap_mask
) ? "cpy " : "",
2074 dma_has_cap(DMA_MEMSET
, atdma
->dma_device
.cap_mask
) ? "set " : "",
2075 dma_has_cap(DMA_SLAVE
, atdma
->dma_device
.cap_mask
) ? "slave " : "",
2076 plat_dat
->nr_channels
);
2078 err
= dma_async_device_register(&atdma
->dma_device
);
2080 dev_err(&pdev
->dev
, "Unable to register: %d.\n", err
);
2081 goto err_dma_async_device_register
;
2085 * Do not return an error if the dmac node is not present in order to
2086 * not break the existing way of requesting channel with
2087 * dma_request_channel().
2089 if (pdev
->dev
.of_node
) {
2090 err
= of_dma_controller_register(pdev
->dev
.of_node
,
2091 at_dma_xlate
, atdma
);
2093 dev_err(&pdev
->dev
, "could not register of_dma_controller\n");
2094 goto err_of_dma_controller_register
;
2100 err_of_dma_controller_register
:
2101 dma_async_device_unregister(&atdma
->dma_device
);
2102 err_dma_async_device_register
:
2103 dma_pool_destroy(atdma
->memset_pool
);
2104 err_memset_pool_create
:
2105 dma_pool_destroy(atdma
->lli_pool
);
2106 err_desc_pool_create
:
2107 free_irq(platform_get_irq(pdev
, 0), atdma
);
2109 clk_disable_unprepare(atdma
->clk
);
2113 static void at_dma_remove(struct platform_device
*pdev
)
2115 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
2116 struct dma_chan
*chan
, *_chan
;
2119 if (pdev
->dev
.of_node
)
2120 of_dma_controller_free(pdev
->dev
.of_node
);
2121 dma_async_device_unregister(&atdma
->dma_device
);
2123 dma_pool_destroy(atdma
->memset_pool
);
2124 dma_pool_destroy(atdma
->lli_pool
);
2125 free_irq(platform_get_irq(pdev
, 0), atdma
);
2127 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_device
.channels
,
2129 /* Disable interrupts */
2130 atc_disable_chan_irq(atdma
, chan
->chan_id
);
2131 list_del(&chan
->device_node
);
2134 clk_disable_unprepare(atdma
->clk
);
2137 static void at_dma_shutdown(struct platform_device
*pdev
)
2139 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
2141 at_dma_off(platform_get_drvdata(pdev
));
2142 clk_disable_unprepare(atdma
->clk
);
2145 static int at_dma_prepare(struct device
*dev
)
2147 struct at_dma
*atdma
= dev_get_drvdata(dev
);
2148 struct dma_chan
*chan
, *_chan
;
2150 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_device
.channels
,
2152 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
2153 /* wait for transaction completion (except in cyclic case) */
2154 if (atc_chan_is_enabled(atchan
) && !atc_chan_is_cyclic(atchan
))
2160 static void atc_suspend_cyclic(struct at_dma_chan
*atchan
)
2162 struct dma_chan
*chan
= &atchan
->vc
.chan
;
2164 /* Channel should be paused by user
2165 * do it anyway even if it is not done already */
2166 if (!atc_chan_is_paused(atchan
)) {
2167 dev_warn(chan2dev(chan
),
2168 "cyclic channel not paused, should be done by channel user\n");
2172 /* now preserve additional data for cyclic operations */
2173 /* next descriptor address in the cyclic list */
2174 atchan
->save_dscr
= channel_readl(atchan
, DSCR
);
2176 vdbg_dump_regs(atchan
);
2179 static int at_dma_suspend_noirq(struct device
*dev
)
2181 struct at_dma
*atdma
= dev_get_drvdata(dev
);
2182 struct dma_chan
*chan
, *_chan
;
2185 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_device
.channels
,
2187 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
2189 if (atc_chan_is_cyclic(atchan
))
2190 atc_suspend_cyclic(atchan
);
2191 atchan
->save_cfg
= channel_readl(atchan
, CFG
);
2193 atdma
->save_imr
= dma_readl(atdma
, EBCIMR
);
2195 /* disable DMA controller */
2197 clk_disable_unprepare(atdma
->clk
);
2201 static void atc_resume_cyclic(struct at_dma_chan
*atchan
)
2203 struct at_dma
*atdma
= to_at_dma(atchan
->vc
.chan
.device
);
2205 /* restore channel status for cyclic descriptors list:
2206 * next descriptor in the cyclic list at the time of suspend */
2207 channel_writel(atchan
, SADDR
, 0);
2208 channel_writel(atchan
, DADDR
, 0);
2209 channel_writel(atchan
, CTRLA
, 0);
2210 channel_writel(atchan
, CTRLB
, 0);
2211 channel_writel(atchan
, DSCR
, atchan
->save_dscr
);
2212 dma_writel(atdma
, CHER
, atchan
->mask
);
2214 /* channel pause status should be removed by channel user
2215 * We cannot take the initiative to do it here */
2217 vdbg_dump_regs(atchan
);
2220 static int at_dma_resume_noirq(struct device
*dev
)
2222 struct at_dma
*atdma
= dev_get_drvdata(dev
);
2223 struct dma_chan
*chan
, *_chan
;
2225 /* bring back DMA controller */
2226 clk_prepare_enable(atdma
->clk
);
2227 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
2229 /* clear any pending interrupt */
2230 while (dma_readl(atdma
, EBCISR
))
2233 /* restore saved data */
2234 dma_writel(atdma
, EBCIER
, atdma
->save_imr
);
2235 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_device
.channels
,
2237 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
2239 channel_writel(atchan
, CFG
, atchan
->save_cfg
);
2240 if (atc_chan_is_cyclic(atchan
))
2241 atc_resume_cyclic(atchan
);
2246 static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops
= {
2247 .prepare
= at_dma_prepare
,
2248 .suspend_noirq
= at_dma_suspend_noirq
,
2249 .resume_noirq
= at_dma_resume_noirq
,
2252 static struct platform_driver at_dma_driver
= {
2253 .remove
= at_dma_remove
,
2254 .shutdown
= at_dma_shutdown
,
2255 .id_table
= atdma_devtypes
,
2258 .pm
= pm_ptr(&at_dma_dev_pm_ops
),
2259 .of_match_table
= of_match_ptr(atmel_dma_dt_ids
),
2263 static int __init
at_dma_init(void)
2265 return platform_driver_probe(&at_dma_driver
, at_dma_probe
);
2267 subsys_initcall(at_dma_init
);
2269 static void __exit
at_dma_exit(void)
2271 platform_driver_unregister(&at_dma_driver
);
2273 module_exit(at_dma_exit
);
2275 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2276 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2277 MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
2278 MODULE_LICENSE("GPL");
2279 MODULE_ALIAS("platform:at_hdmac");