Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / dma / dw-axi-dmac / dw-axi-dmac-platform.c
blobb23536645ff7ccb59fa1363f500f742a41b7631f
1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
4 /*
5 * Synopsys DesignWare AXI DMA Controller driver.
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
8 */
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/property.h>
28 #include <linux/reset.h>
29 #include <linux/slab.h>
30 #include <linux/types.h>
32 #include "dw-axi-dmac.h"
33 #include "../dmaengine.h"
34 #include "../virt-dma.h"
37 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
38 * master data bus width up to 512 bits (for both AXI master interfaces), but
39 * it depends on IP block configuration.
41 #define AXI_DMA_BUSWIDTHS \
42 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
43 DMA_SLAVE_BUSWIDTH_2_BYTES | \
44 DMA_SLAVE_BUSWIDTH_4_BYTES | \
45 DMA_SLAVE_BUSWIDTH_8_BYTES | \
46 DMA_SLAVE_BUSWIDTH_16_BYTES | \
47 DMA_SLAVE_BUSWIDTH_32_BYTES | \
48 DMA_SLAVE_BUSWIDTH_64_BYTES)
50 #define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
51 #define AXI_DMA_FLAG_HAS_RESETS BIT(1)
52 #define AXI_DMA_FLAG_USE_CFG2 BIT(2)
54 static inline void
55 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
57 iowrite32(val, chip->regs + reg);
60 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
62 return ioread32(chip->regs + reg);
65 static inline void
66 axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)
68 iowrite64(val, chip->regs + reg);
71 static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)
73 return ioread64(chip->regs + reg);
76 static inline void
77 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
79 iowrite32(val, chan->chan_regs + reg);
82 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
84 return ioread32(chan->chan_regs + reg);
87 static inline void
88 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
91 * We split one 64 bit write for two 32 bit write as some HW doesn't
92 * support 64 bit access.
94 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
95 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
98 static inline void axi_chan_config_write(struct axi_dma_chan *chan,
99 struct axi_dma_chan_config *config)
101 u32 cfg_lo, cfg_hi;
103 cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
104 config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
105 if (chan->chip->dw->hdata->reg_map_8_channels &&
106 !chan->chip->dw->hdata->use_cfg2) {
107 cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
108 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
109 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
110 config->src_per << CH_CFG_H_SRC_PER_POS |
111 config->dst_per << CH_CFG_H_DST_PER_POS |
112 config->prior << CH_CFG_H_PRIORITY_POS;
113 } else {
114 cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
115 config->dst_per << CH_CFG2_L_DST_PER_POS;
116 cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
117 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
118 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
119 config->prior << CH_CFG2_H_PRIORITY_POS;
121 axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
122 axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
125 static inline void axi_dma_disable(struct axi_dma_chip *chip)
127 u32 val;
129 val = axi_dma_ioread32(chip, DMAC_CFG);
130 val &= ~DMAC_EN_MASK;
131 axi_dma_iowrite32(chip, DMAC_CFG, val);
134 static inline void axi_dma_enable(struct axi_dma_chip *chip)
136 u32 val;
138 val = axi_dma_ioread32(chip, DMAC_CFG);
139 val |= DMAC_EN_MASK;
140 axi_dma_iowrite32(chip, DMAC_CFG, val);
143 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
145 u32 val;
147 val = axi_dma_ioread32(chip, DMAC_CFG);
148 val &= ~INT_EN_MASK;
149 axi_dma_iowrite32(chip, DMAC_CFG, val);
152 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
154 u32 val;
156 val = axi_dma_ioread32(chip, DMAC_CFG);
157 val |= INT_EN_MASK;
158 axi_dma_iowrite32(chip, DMAC_CFG, val);
161 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
163 u32 val;
165 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
166 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
167 } else {
168 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
169 val &= ~irq_mask;
170 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
174 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
176 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
179 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
181 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
184 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
186 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
189 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
191 return axi_chan_ioread32(chan, CH_INTSTATUS);
194 static inline void axi_chan_disable(struct axi_dma_chan *chan)
196 u64 val;
198 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
199 val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
200 if (chan->id >= DMAC_CHAN_16) {
201 val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
202 << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
203 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
204 << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
205 } else {
206 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
207 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
209 axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
210 } else {
211 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
212 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
213 if (chan->chip->dw->hdata->reg_map_8_channels)
214 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
215 else
216 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
217 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
221 static inline void axi_chan_enable(struct axi_dma_chan *chan)
223 u64 val;
225 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
226 val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
227 if (chan->id >= DMAC_CHAN_16) {
228 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
229 << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
230 (u64)(BIT(chan->id) >> DMAC_CHAN_16)
231 << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
232 } else {
233 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
234 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
236 axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
237 } else {
238 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
239 if (chan->chip->dw->hdata->reg_map_8_channels) {
240 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
241 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
242 } else {
243 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
244 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
246 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
250 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
252 u64 val;
254 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)
255 val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
256 else
257 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
259 if (chan->id >= DMAC_CHAN_16)
260 return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));
261 else
262 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
265 static void axi_dma_hw_init(struct axi_dma_chip *chip)
267 int ret;
268 u32 i;
270 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
271 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
272 axi_chan_disable(&chip->dw->chan[i]);
274 ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
275 if (ret)
276 dev_warn(chip->dev, "Unable to set coherent mask\n");
279 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
280 dma_addr_t dst, size_t len)
282 u32 max_width = chan->chip->dw->hdata->m_data_width;
284 return __ffs(src | dst | len | BIT(max_width));
287 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
289 return dma_chan_name(&chan->vc.chan);
292 static struct axi_dma_desc *axi_desc_alloc(u32 num)
294 struct axi_dma_desc *desc;
296 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
297 if (!desc)
298 return NULL;
300 desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
301 if (!desc->hw_desc) {
302 kfree(desc);
303 return NULL;
305 desc->nr_hw_descs = num;
307 return desc;
310 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
311 dma_addr_t *addr)
313 struct axi_dma_lli *lli;
314 dma_addr_t phys;
316 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
317 if (unlikely(!lli)) {
318 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
319 axi_chan_name(chan));
320 return NULL;
323 atomic_inc(&chan->descs_allocated);
324 *addr = phys;
326 return lli;
329 static void axi_desc_put(struct axi_dma_desc *desc)
331 struct axi_dma_chan *chan = desc->chan;
332 int count = desc->nr_hw_descs;
333 struct axi_dma_hw_desc *hw_desc;
334 int descs_put;
336 for (descs_put = 0; descs_put < count; descs_put++) {
337 hw_desc = &desc->hw_desc[descs_put];
338 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
341 kfree(desc->hw_desc);
342 kfree(desc);
343 atomic_sub(descs_put, &chan->descs_allocated);
344 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
345 axi_chan_name(chan), descs_put,
346 atomic_read(&chan->descs_allocated));
349 static void vchan_desc_put(struct virt_dma_desc *vdesc)
351 axi_desc_put(vd_to_axi_desc(vdesc));
354 static enum dma_status
355 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
356 struct dma_tx_state *txstate)
358 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
359 struct virt_dma_desc *vdesc;
360 enum dma_status status;
361 u32 completed_length;
362 unsigned long flags;
363 u32 completed_blocks;
364 size_t bytes = 0;
365 u32 length;
366 u32 len;
368 status = dma_cookie_status(dchan, cookie, txstate);
369 if (status == DMA_COMPLETE || !txstate)
370 return status;
372 spin_lock_irqsave(&chan->vc.lock, flags);
374 vdesc = vchan_find_desc(&chan->vc, cookie);
375 if (vdesc) {
376 length = vd_to_axi_desc(vdesc)->length;
377 completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
378 len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
379 completed_length = completed_blocks * len;
380 bytes = length - completed_length;
383 spin_unlock_irqrestore(&chan->vc.lock, flags);
384 dma_set_residue(txstate, bytes);
386 return status;
389 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
391 desc->lli->llp = cpu_to_le64(adr);
394 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
396 axi_chan_iowrite64(chan, CH_LLP, adr);
399 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
401 u32 offset = DMAC_APB_BYTE_WR_CH_EN;
402 u32 reg_width, val;
404 if (!chan->chip->apb_regs) {
405 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
406 return;
409 reg_width = __ffs(chan->config.dst_addr_width);
410 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
411 offset = DMAC_APB_HALFWORD_WR_CH_EN;
413 val = ioread32(chan->chip->apb_regs + offset);
415 if (set)
416 val |= BIT(chan->id);
417 else
418 val &= ~BIT(chan->id);
420 iowrite32(val, chan->chip->apb_regs + offset);
422 /* Called in chan locked context */
423 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
424 struct axi_dma_desc *first)
426 u32 priority = chan->chip->dw->hdata->priority[chan->id];
427 struct axi_dma_chan_config config = {};
428 u32 irq_mask;
429 u8 lms = 0; /* Select AXI0 master for LLI fetching */
431 if (unlikely(axi_chan_is_hw_enable(chan))) {
432 dev_err(chan2dev(chan), "%s is non-idle!\n",
433 axi_chan_name(chan));
435 return;
438 axi_dma_enable(chan->chip);
440 config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
441 config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
442 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
443 config.prior = priority;
444 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
445 config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
446 switch (chan->direction) {
447 case DMA_MEM_TO_DEV:
448 dw_axi_dma_set_byte_halfword(chan, true);
449 config.tt_fc = chan->config.device_fc ?
450 DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
451 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
452 if (chan->chip->apb_regs)
453 config.dst_per = chan->id;
454 else
455 config.dst_per = chan->hw_handshake_num;
456 break;
457 case DMA_DEV_TO_MEM:
458 config.tt_fc = chan->config.device_fc ?
459 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
460 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
461 if (chan->chip->apb_regs)
462 config.src_per = chan->id;
463 else
464 config.src_per = chan->hw_handshake_num;
465 break;
466 default:
467 break;
469 axi_chan_config_write(chan, &config);
471 write_chan_llp(chan, first->hw_desc[0].llp | lms);
473 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
474 axi_chan_irq_sig_set(chan, irq_mask);
476 /* Generate 'suspend' status but don't generate interrupt */
477 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
478 axi_chan_irq_set(chan, irq_mask);
480 axi_chan_enable(chan);
483 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
485 struct axi_dma_desc *desc;
486 struct virt_dma_desc *vd;
488 vd = vchan_next_desc(&chan->vc);
489 if (!vd)
490 return;
492 desc = vd_to_axi_desc(vd);
493 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
494 vd->tx.cookie);
495 axi_chan_block_xfer_start(chan, desc);
498 static void dma_chan_issue_pending(struct dma_chan *dchan)
500 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
501 unsigned long flags;
503 spin_lock_irqsave(&chan->vc.lock, flags);
504 if (vchan_issue_pending(&chan->vc))
505 axi_chan_start_first_queued(chan);
506 spin_unlock_irqrestore(&chan->vc.lock, flags);
509 static void dw_axi_dma_synchronize(struct dma_chan *dchan)
511 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
513 vchan_synchronize(&chan->vc);
516 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
518 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
520 /* ASSERT: channel is idle */
521 if (axi_chan_is_hw_enable(chan)) {
522 dev_err(chan2dev(chan), "%s is non-idle!\n",
523 axi_chan_name(chan));
524 return -EBUSY;
527 /* LLI address must be aligned to a 64-byte boundary */
528 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
529 chan->chip->dev,
530 sizeof(struct axi_dma_lli),
531 64, 0);
532 if (!chan->desc_pool) {
533 dev_err(chan2dev(chan), "No memory for descriptors\n");
534 return -ENOMEM;
536 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
538 pm_runtime_get(chan->chip->dev);
540 return 0;
543 static void dma_chan_free_chan_resources(struct dma_chan *dchan)
545 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
547 /* ASSERT: channel is idle */
548 if (axi_chan_is_hw_enable(chan))
549 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
550 axi_chan_name(chan));
552 axi_chan_disable(chan);
553 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
555 vchan_free_chan_resources(&chan->vc);
557 dma_pool_destroy(chan->desc_pool);
558 chan->desc_pool = NULL;
559 dev_vdbg(dchan2dev(dchan),
560 "%s: free resources, descriptor still allocated: %u\n",
561 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
563 pm_runtime_put(chan->chip->dev);
566 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
568 struct axi_dma_chip *chip = chan->chip;
569 unsigned long reg_value, val;
571 if (!chip->apb_regs) {
572 dev_err(chip->dev, "apb_regs not initialized\n");
573 return;
577 * An unused DMA channel has a default value of 0x3F.
578 * Lock the DMA channel by assign a handshake number to the channel.
579 * Unlock the DMA channel by assign 0x3F to the channel.
581 if (set)
582 val = chan->hw_handshake_num;
583 else
584 val = UNUSED_CHANNEL;
586 reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
588 /* Channel is already allocated, set handshake as per channel ID */
589 /* 64 bit write should handle for 8 channels */
591 reg_value &= ~(DMA_APB_HS_SEL_MASK <<
592 (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
593 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
594 lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
596 return;
600 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
601 * as 1, it understands that the current block is the final block in the
602 * transfer and completes the DMA transfer operation at the end of current
603 * block transfer.
605 static void set_desc_last(struct axi_dma_hw_desc *desc)
607 u32 val;
609 val = le32_to_cpu(desc->lli->ctl_hi);
610 val |= CH_CTL_H_LLI_LAST;
611 desc->lli->ctl_hi = cpu_to_le32(val);
614 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
616 desc->lli->sar = cpu_to_le64(adr);
619 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
621 desc->lli->dar = cpu_to_le64(adr);
624 static void set_desc_src_master(struct axi_dma_hw_desc *desc)
626 u32 val;
628 /* Select AXI0 for source master */
629 val = le32_to_cpu(desc->lli->ctl_lo);
630 val &= ~CH_CTL_L_SRC_MAST;
631 desc->lli->ctl_lo = cpu_to_le32(val);
634 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
635 struct axi_dma_desc *desc)
637 u32 val;
639 /* Select AXI1 for source master if available */
640 val = le32_to_cpu(hw_desc->lli->ctl_lo);
641 if (desc->chan->chip->dw->hdata->nr_masters > 1)
642 val |= CH_CTL_L_DST_MAST;
643 else
644 val &= ~CH_CTL_L_DST_MAST;
646 hw_desc->lli->ctl_lo = cpu_to_le32(val);
649 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
650 struct axi_dma_hw_desc *hw_desc,
651 dma_addr_t mem_addr, size_t len)
653 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
654 unsigned int reg_width;
655 unsigned int mem_width;
656 dma_addr_t device_addr;
657 size_t axi_block_ts;
658 size_t block_ts;
659 u32 ctllo, ctlhi;
660 u32 burst_len;
662 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
664 mem_width = __ffs(data_width | mem_addr | len);
665 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
666 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
668 if (!IS_ALIGNED(mem_addr, 4)) {
669 dev_err(chan->chip->dev, "invalid buffer alignment\n");
670 return -EINVAL;
673 switch (chan->direction) {
674 case DMA_MEM_TO_DEV:
675 reg_width = __ffs(chan->config.dst_addr_width);
676 device_addr = chan->config.dst_addr;
677 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
678 mem_width << CH_CTL_L_SRC_WIDTH_POS |
679 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
680 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
681 block_ts = len >> mem_width;
682 break;
683 case DMA_DEV_TO_MEM:
684 reg_width = __ffs(chan->config.src_addr_width);
685 device_addr = chan->config.src_addr;
686 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
687 mem_width << CH_CTL_L_DST_WIDTH_POS |
688 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
689 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
690 block_ts = len >> reg_width;
691 break;
692 default:
693 return -EINVAL;
696 if (block_ts > axi_block_ts)
697 return -EINVAL;
699 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
700 if (unlikely(!hw_desc->lli))
701 return -ENOMEM;
703 ctlhi = CH_CTL_H_LLI_VALID;
705 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
706 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
707 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
708 burst_len << CH_CTL_H_ARLEN_POS |
709 burst_len << CH_CTL_H_AWLEN_POS;
712 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
714 if (chan->direction == DMA_MEM_TO_DEV) {
715 write_desc_sar(hw_desc, mem_addr);
716 write_desc_dar(hw_desc, device_addr);
717 } else {
718 write_desc_sar(hw_desc, device_addr);
719 write_desc_dar(hw_desc, mem_addr);
722 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
724 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
725 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
726 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
728 set_desc_src_master(hw_desc);
730 hw_desc->len = len;
731 return 0;
734 static size_t calculate_block_len(struct axi_dma_chan *chan,
735 dma_addr_t dma_addr, size_t buf_len,
736 enum dma_transfer_direction direction)
738 u32 data_width, reg_width, mem_width;
739 size_t axi_block_ts, block_len;
741 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
743 switch (direction) {
744 case DMA_MEM_TO_DEV:
745 data_width = BIT(chan->chip->dw->hdata->m_data_width);
746 mem_width = __ffs(data_width | dma_addr | buf_len);
747 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
748 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
750 block_len = axi_block_ts << mem_width;
751 break;
752 case DMA_DEV_TO_MEM:
753 reg_width = __ffs(chan->config.src_addr_width);
754 block_len = axi_block_ts << reg_width;
755 break;
756 default:
757 block_len = 0;
760 return block_len;
763 static struct dma_async_tx_descriptor *
764 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
765 size_t buf_len, size_t period_len,
766 enum dma_transfer_direction direction,
767 unsigned long flags)
769 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
770 struct axi_dma_hw_desc *hw_desc = NULL;
771 struct axi_dma_desc *desc = NULL;
772 dma_addr_t src_addr = dma_addr;
773 u32 num_periods, num_segments;
774 size_t axi_block_len;
775 u32 total_segments;
776 u32 segment_len;
777 unsigned int i;
778 int status;
779 u64 llp = 0;
780 u8 lms = 0; /* Select AXI0 master for LLI fetching */
782 num_periods = buf_len / period_len;
784 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
785 if (axi_block_len == 0)
786 return NULL;
788 num_segments = DIV_ROUND_UP(period_len, axi_block_len);
789 segment_len = DIV_ROUND_UP(period_len, num_segments);
791 total_segments = num_periods * num_segments;
793 desc = axi_desc_alloc(total_segments);
794 if (unlikely(!desc))
795 goto err_desc_get;
797 chan->direction = direction;
798 desc->chan = chan;
799 chan->cyclic = true;
800 desc->length = 0;
801 desc->period_len = period_len;
803 for (i = 0; i < total_segments; i++) {
804 hw_desc = &desc->hw_desc[i];
806 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
807 segment_len);
808 if (status < 0)
809 goto err_desc_get;
811 desc->length += hw_desc->len;
812 /* Set end-of-link to the linked descriptor, so that cyclic
813 * callback function can be triggered during interrupt.
815 set_desc_last(hw_desc);
817 src_addr += segment_len;
820 llp = desc->hw_desc[0].llp;
822 /* Managed transfer list */
823 do {
824 hw_desc = &desc->hw_desc[--total_segments];
825 write_desc_llp(hw_desc, llp | lms);
826 llp = hw_desc->llp;
827 } while (total_segments);
829 dw_axi_dma_set_hw_channel(chan, true);
831 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
833 err_desc_get:
834 if (desc)
835 axi_desc_put(desc);
837 return NULL;
840 static struct dma_async_tx_descriptor *
841 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
842 unsigned int sg_len,
843 enum dma_transfer_direction direction,
844 unsigned long flags, void *context)
846 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
847 struct axi_dma_hw_desc *hw_desc = NULL;
848 struct axi_dma_desc *desc = NULL;
849 u32 num_segments, segment_len;
850 unsigned int loop = 0;
851 struct scatterlist *sg;
852 size_t axi_block_len;
853 u32 len, num_sgs = 0;
854 unsigned int i;
855 dma_addr_t mem;
856 int status;
857 u64 llp = 0;
858 u8 lms = 0; /* Select AXI0 master for LLI fetching */
860 if (unlikely(!is_slave_direction(direction) || !sg_len))
861 return NULL;
863 mem = sg_dma_address(sgl);
864 len = sg_dma_len(sgl);
866 axi_block_len = calculate_block_len(chan, mem, len, direction);
867 if (axi_block_len == 0)
868 return NULL;
870 for_each_sg(sgl, sg, sg_len, i)
871 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
873 desc = axi_desc_alloc(num_sgs);
874 if (unlikely(!desc))
875 goto err_desc_get;
877 desc->chan = chan;
878 desc->length = 0;
879 chan->direction = direction;
881 for_each_sg(sgl, sg, sg_len, i) {
882 mem = sg_dma_address(sg);
883 len = sg_dma_len(sg);
884 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
885 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
887 do {
888 hw_desc = &desc->hw_desc[loop++];
889 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
890 if (status < 0)
891 goto err_desc_get;
893 desc->length += hw_desc->len;
894 len -= segment_len;
895 mem += segment_len;
896 } while (len >= segment_len);
899 /* Set end-of-link to the last link descriptor of list */
900 set_desc_last(&desc->hw_desc[num_sgs - 1]);
902 /* Managed transfer list */
903 do {
904 hw_desc = &desc->hw_desc[--num_sgs];
905 write_desc_llp(hw_desc, llp | lms);
906 llp = hw_desc->llp;
907 } while (num_sgs);
909 dw_axi_dma_set_hw_channel(chan, true);
911 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
913 err_desc_get:
914 if (desc)
915 axi_desc_put(desc);
917 return NULL;
920 static struct dma_async_tx_descriptor *
921 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
922 dma_addr_t src_adr, size_t len, unsigned long flags)
924 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
925 size_t block_ts, max_block_ts, xfer_len;
926 struct axi_dma_hw_desc *hw_desc = NULL;
927 struct axi_dma_desc *desc = NULL;
928 u32 xfer_width, reg, num;
929 u64 llp = 0;
930 u8 lms = 0; /* Select AXI0 master for LLI fetching */
932 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
933 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
935 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
936 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
937 num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
938 desc = axi_desc_alloc(num);
939 if (unlikely(!desc))
940 goto err_desc_get;
942 desc->chan = chan;
943 num = 0;
944 desc->length = 0;
945 while (len) {
946 xfer_len = len;
948 hw_desc = &desc->hw_desc[num];
950 * Take care for the alignment.
951 * Actually source and destination widths can be different, but
952 * make them same to be simpler.
954 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
957 * block_ts indicates the total number of data of width
958 * to be transferred in a DMA block transfer.
959 * BLOCK_TS register should be set to block_ts - 1
961 block_ts = xfer_len >> xfer_width;
962 if (block_ts > max_block_ts) {
963 block_ts = max_block_ts;
964 xfer_len = max_block_ts << xfer_width;
967 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
968 if (unlikely(!hw_desc->lli))
969 goto err_desc_get;
971 write_desc_sar(hw_desc, src_adr);
972 write_desc_dar(hw_desc, dst_adr);
973 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
975 reg = CH_CTL_H_LLI_VALID;
976 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
977 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
979 reg |= (CH_CTL_H_ARLEN_EN |
980 burst_len << CH_CTL_H_ARLEN_POS |
981 CH_CTL_H_AWLEN_EN |
982 burst_len << CH_CTL_H_AWLEN_POS);
984 hw_desc->lli->ctl_hi = cpu_to_le32(reg);
986 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
987 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
988 xfer_width << CH_CTL_L_DST_WIDTH_POS |
989 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
990 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
991 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
992 hw_desc->lli->ctl_lo = cpu_to_le32(reg);
994 set_desc_src_master(hw_desc);
995 set_desc_dest_master(hw_desc, desc);
997 hw_desc->len = xfer_len;
998 desc->length += hw_desc->len;
999 /* update the length and addresses for the next loop cycle */
1000 len -= xfer_len;
1001 dst_adr += xfer_len;
1002 src_adr += xfer_len;
1003 num++;
1006 /* Set end-of-link to the last link descriptor of list */
1007 set_desc_last(&desc->hw_desc[num - 1]);
1008 /* Managed transfer list */
1009 do {
1010 hw_desc = &desc->hw_desc[--num];
1011 write_desc_llp(hw_desc, llp | lms);
1012 llp = hw_desc->llp;
1013 } while (num);
1015 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
1017 err_desc_get:
1018 if (desc)
1019 axi_desc_put(desc);
1020 return NULL;
1023 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
1024 struct dma_slave_config *config)
1026 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1028 memcpy(&chan->config, config, sizeof(*config));
1030 return 0;
1033 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
1034 struct axi_dma_hw_desc *desc)
1036 if (!desc->lli) {
1037 dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
1038 return;
1041 dev_err(dchan2dev(&chan->vc.chan),
1042 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
1043 le64_to_cpu(desc->lli->sar),
1044 le64_to_cpu(desc->lli->dar),
1045 le64_to_cpu(desc->lli->llp),
1046 le32_to_cpu(desc->lli->block_ts_lo),
1047 le32_to_cpu(desc->lli->ctl_hi),
1048 le32_to_cpu(desc->lli->ctl_lo));
1051 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
1052 struct axi_dma_desc *desc_head)
1054 int count = atomic_read(&chan->descs_allocated);
1055 int i;
1057 for (i = 0; i < count; i++)
1058 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
1061 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
1063 struct virt_dma_desc *vd;
1064 unsigned long flags;
1066 spin_lock_irqsave(&chan->vc.lock, flags);
1068 axi_chan_disable(chan);
1070 /* The bad descriptor currently is in the head of vc list */
1071 vd = vchan_next_desc(&chan->vc);
1072 if (!vd) {
1073 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1074 axi_chan_name(chan));
1075 goto out;
1077 /* Remove the completed descriptor from issued list */
1078 list_del(&vd->node);
1080 /* WARN about bad descriptor */
1081 dev_err(chan2dev(chan),
1082 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
1083 axi_chan_name(chan), vd->tx.cookie, status);
1084 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
1086 vchan_cookie_complete(vd);
1088 /* Try to restart the controller */
1089 axi_chan_start_first_queued(chan);
1091 out:
1092 spin_unlock_irqrestore(&chan->vc.lock, flags);
1095 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1097 int count = atomic_read(&chan->descs_allocated);
1098 struct axi_dma_hw_desc *hw_desc;
1099 struct axi_dma_desc *desc;
1100 struct virt_dma_desc *vd;
1101 unsigned long flags;
1102 u64 llp;
1103 int i;
1105 spin_lock_irqsave(&chan->vc.lock, flags);
1106 if (unlikely(axi_chan_is_hw_enable(chan))) {
1107 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1108 axi_chan_name(chan));
1109 axi_chan_disable(chan);
1112 /* The completed descriptor currently is in the head of vc list */
1113 vd = vchan_next_desc(&chan->vc);
1114 if (!vd) {
1115 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1116 axi_chan_name(chan));
1117 goto out;
1120 if (chan->cyclic) {
1121 desc = vd_to_axi_desc(vd);
1122 if (desc) {
1123 llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1124 for (i = 0; i < count; i++) {
1125 hw_desc = &desc->hw_desc[i];
1126 if (hw_desc->llp == llp) {
1127 axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1128 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1129 desc->completed_blocks = i;
1131 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1132 vchan_cyclic_callback(vd);
1133 break;
1137 axi_chan_enable(chan);
1139 } else {
1140 /* Remove the completed descriptor from issued list before completing */
1141 list_del(&vd->node);
1142 vchan_cookie_complete(vd);
1145 out:
1146 spin_unlock_irqrestore(&chan->vc.lock, flags);
1149 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1151 struct axi_dma_chip *chip = dev_id;
1152 struct dw_axi_dma *dw = chip->dw;
1153 struct axi_dma_chan *chan;
1155 u32 status, i;
1157 /* Disable DMAC interrupts. We'll enable them after processing channels */
1158 axi_dma_irq_disable(chip);
1160 /* Poll, clear and process every channel interrupt status */
1161 for (i = 0; i < dw->hdata->nr_channels; i++) {
1162 chan = &dw->chan[i];
1163 status = axi_chan_irq_read(chan);
1164 axi_chan_irq_clear(chan, status);
1166 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1167 axi_chan_name(chan), i, status);
1169 if (status & DWAXIDMAC_IRQ_ALL_ERR)
1170 axi_chan_handle_err(chan, status);
1171 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1172 axi_chan_block_xfer_complete(chan);
1175 /* Re-enable interrupts */
1176 axi_dma_irq_enable(chip);
1178 return IRQ_HANDLED;
1181 static int dma_chan_terminate_all(struct dma_chan *dchan)
1183 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1184 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1185 unsigned long flags;
1186 u32 val;
1187 int ret;
1188 LIST_HEAD(head);
1190 axi_chan_disable(chan);
1192 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1193 !(val & chan_active), 1000, 50000);
1194 if (ret == -ETIMEDOUT)
1195 dev_warn(dchan2dev(dchan),
1196 "%s failed to stop\n", axi_chan_name(chan));
1198 if (chan->direction != DMA_MEM_TO_MEM)
1199 dw_axi_dma_set_hw_channel(chan, false);
1200 if (chan->direction == DMA_MEM_TO_DEV)
1201 dw_axi_dma_set_byte_halfword(chan, false);
1203 spin_lock_irqsave(&chan->vc.lock, flags);
1205 vchan_get_all_descriptors(&chan->vc, &head);
1207 chan->cyclic = false;
1208 spin_unlock_irqrestore(&chan->vc.lock, flags);
1210 vchan_dma_desc_free_list(&chan->vc, &head);
1212 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1214 return 0;
1217 static int dma_chan_pause(struct dma_chan *dchan)
1219 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1220 unsigned long flags;
1221 unsigned int timeout = 20; /* timeout iterations */
1222 u64 val;
1224 spin_lock_irqsave(&chan->vc.lock, flags);
1226 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1227 val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
1228 if (chan->id >= DMAC_CHAN_16) {
1229 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
1230 << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
1231 (u64)(BIT(chan->id) >> DMAC_CHAN_16)
1232 << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
1233 } else {
1234 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1235 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1237 axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
1238 } else {
1239 if (chan->chip->dw->hdata->reg_map_8_channels) {
1240 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1241 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1242 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1243 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
1244 } else {
1245 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1246 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1247 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1248 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
1252 do {
1253 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1254 break;
1256 udelay(2);
1257 } while (--timeout);
1259 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1261 chan->is_paused = true;
1263 spin_unlock_irqrestore(&chan->vc.lock, flags);
1265 return timeout ? 0 : -EAGAIN;
1268 /* Called in chan locked context */
1269 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1271 u64 val;
1273 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1274 val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
1275 if (chan->id >= DMAC_CHAN_16) {
1276 val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1277 << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1278 val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1279 << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1280 } else {
1281 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1282 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1284 axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
1285 } else {
1286 if (chan->chip->dw->hdata->reg_map_8_channels) {
1287 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1288 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1289 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1290 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
1291 } else {
1292 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1293 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1294 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1295 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
1299 chan->is_paused = false;
1302 static int dma_chan_resume(struct dma_chan *dchan)
1304 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1305 unsigned long flags;
1307 spin_lock_irqsave(&chan->vc.lock, flags);
1309 if (chan->is_paused)
1310 axi_chan_resume(chan);
1312 spin_unlock_irqrestore(&chan->vc.lock, flags);
1314 return 0;
1317 static int axi_dma_suspend(struct axi_dma_chip *chip)
1319 axi_dma_irq_disable(chip);
1320 axi_dma_disable(chip);
1322 clk_disable_unprepare(chip->core_clk);
1323 clk_disable_unprepare(chip->cfgr_clk);
1325 return 0;
1328 static int axi_dma_resume(struct axi_dma_chip *chip)
1330 int ret;
1332 ret = clk_prepare_enable(chip->cfgr_clk);
1333 if (ret < 0)
1334 return ret;
1336 ret = clk_prepare_enable(chip->core_clk);
1337 if (ret < 0)
1338 return ret;
1340 axi_dma_enable(chip);
1341 axi_dma_irq_enable(chip);
1343 return 0;
1346 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1348 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1350 return axi_dma_suspend(chip);
1353 static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1355 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1357 return axi_dma_resume(chip);
1360 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1361 struct of_dma *ofdma)
1363 struct dw_axi_dma *dw = ofdma->of_dma_data;
1364 struct axi_dma_chan *chan;
1365 struct dma_chan *dchan;
1367 dchan = dma_get_any_slave_channel(&dw->dma);
1368 if (!dchan)
1369 return NULL;
1371 chan = dchan_to_axi_dma_chan(dchan);
1372 chan->hw_handshake_num = dma_spec->args[0];
1373 return dchan;
1376 static int parse_device_properties(struct axi_dma_chip *chip)
1378 struct device *dev = chip->dev;
1379 u32 tmp, carr[DMAC_MAX_CHANNELS];
1380 int ret;
1382 ret = device_property_read_u32(dev, "dma-channels", &tmp);
1383 if (ret)
1384 return ret;
1385 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1386 return -EINVAL;
1388 chip->dw->hdata->nr_channels = tmp;
1389 if (tmp <= DMA_REG_MAP_CH_REF)
1390 chip->dw->hdata->reg_map_8_channels = true;
1392 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1393 if (ret)
1394 return ret;
1395 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1396 return -EINVAL;
1398 chip->dw->hdata->nr_masters = tmp;
1400 ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1401 if (ret)
1402 return ret;
1403 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1404 return -EINVAL;
1406 chip->dw->hdata->m_data_width = tmp;
1408 ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1409 chip->dw->hdata->nr_channels);
1410 if (ret)
1411 return ret;
1412 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1413 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1414 return -EINVAL;
1416 chip->dw->hdata->block_size[tmp] = carr[tmp];
1419 ret = device_property_read_u32_array(dev, "snps,priority", carr,
1420 chip->dw->hdata->nr_channels);
1421 if (ret)
1422 return ret;
1423 /* Priority value must be programmed within [0:nr_channels-1] range */
1424 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1425 if (carr[tmp] >= chip->dw->hdata->nr_channels)
1426 return -EINVAL;
1428 chip->dw->hdata->priority[tmp] = carr[tmp];
1431 /* axi-max-burst-len is optional property */
1432 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1433 if (!ret) {
1434 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1435 return -EINVAL;
1436 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1437 return -EINVAL;
1439 chip->dw->hdata->restrict_axi_burst_len = true;
1440 chip->dw->hdata->axi_rw_burst_len = tmp;
1443 return 0;
1446 static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
1448 int irq_count = platform_irq_count(pdev);
1449 int ret;
1451 for (int i = 0; i < irq_count; i++) {
1452 chip->irq[i] = platform_get_irq(pdev, i);
1453 if (chip->irq[i] < 0)
1454 return chip->irq[i];
1455 ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
1456 IRQF_SHARED, KBUILD_MODNAME, chip);
1457 if (ret < 0)
1458 return ret;
1461 return 0;
1464 static int dw_probe(struct platform_device *pdev)
1466 struct axi_dma_chip *chip;
1467 struct dw_axi_dma *dw;
1468 struct dw_axi_dma_hcfg *hdata;
1469 struct reset_control *resets;
1470 unsigned int flags;
1471 u32 i;
1472 int ret;
1474 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1475 if (!chip)
1476 return -ENOMEM;
1478 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1479 if (!dw)
1480 return -ENOMEM;
1482 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1483 if (!hdata)
1484 return -ENOMEM;
1486 chip->dw = dw;
1487 chip->dev = &pdev->dev;
1488 chip->dw->hdata = hdata;
1490 chip->regs = devm_platform_ioremap_resource(pdev, 0);
1491 if (IS_ERR(chip->regs))
1492 return PTR_ERR(chip->regs);
1494 flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
1495 if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
1496 chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1497 if (IS_ERR(chip->apb_regs))
1498 return PTR_ERR(chip->apb_regs);
1501 if (flags & AXI_DMA_FLAG_HAS_RESETS) {
1502 resets = devm_reset_control_array_get_exclusive(&pdev->dev);
1503 if (IS_ERR(resets))
1504 return PTR_ERR(resets);
1506 ret = reset_control_deassert(resets);
1507 if (ret)
1508 return ret;
1511 chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
1513 chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1514 if (IS_ERR(chip->core_clk))
1515 return PTR_ERR(chip->core_clk);
1517 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1518 if (IS_ERR(chip->cfgr_clk))
1519 return PTR_ERR(chip->cfgr_clk);
1521 ret = parse_device_properties(chip);
1522 if (ret)
1523 return ret;
1525 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1526 sizeof(*dw->chan), GFP_KERNEL);
1527 if (!dw->chan)
1528 return -ENOMEM;
1530 ret = axi_req_irqs(pdev, chip);
1531 if (ret)
1532 return ret;
1534 INIT_LIST_HEAD(&dw->dma.channels);
1535 for (i = 0; i < hdata->nr_channels; i++) {
1536 struct axi_dma_chan *chan = &dw->chan[i];
1538 chan->chip = chip;
1539 chan->id = i;
1540 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1541 atomic_set(&chan->descs_allocated, 0);
1543 chan->vc.desc_free = vchan_desc_put;
1544 vchan_init(&chan->vc, &dw->dma);
1547 /* Set capabilities */
1548 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1549 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1550 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1552 /* DMA capabilities */
1553 dw->dma.max_burst = hdata->axi_rw_burst_len;
1554 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1555 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1556 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1557 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1558 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1560 dw->dma.dev = chip->dev;
1561 dw->dma.device_tx_status = dma_chan_tx_status;
1562 dw->dma.device_issue_pending = dma_chan_issue_pending;
1563 dw->dma.device_terminate_all = dma_chan_terminate_all;
1564 dw->dma.device_pause = dma_chan_pause;
1565 dw->dma.device_resume = dma_chan_resume;
1567 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1568 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1570 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1571 dw->dma.device_synchronize = dw_axi_dma_synchronize;
1572 dw->dma.device_config = dw_axi_dma_chan_slave_config;
1573 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1574 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1577 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1578 * supported blocks is 1024. Device register width is 4 bytes.
1579 * Therefore, set constraint to 1024 * 4.
1581 dw->dma.dev->dma_parms = &dw->dma_parms;
1582 dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1583 platform_set_drvdata(pdev, chip);
1585 pm_runtime_enable(chip->dev);
1588 * We can't just call pm_runtime_get here instead of
1589 * pm_runtime_get_noresume + axi_dma_resume because we need
1590 * driver to work also without Runtime PM.
1592 pm_runtime_get_noresume(chip->dev);
1593 ret = axi_dma_resume(chip);
1594 if (ret < 0)
1595 goto err_pm_disable;
1597 axi_dma_hw_init(chip);
1599 pm_runtime_put(chip->dev);
1601 ret = dmaenginem_async_device_register(&dw->dma);
1602 if (ret)
1603 goto err_pm_disable;
1605 /* Register with OF helpers for DMA lookups */
1606 ret = of_dma_controller_register(pdev->dev.of_node,
1607 dw_axi_dma_of_xlate, dw);
1608 if (ret < 0)
1609 dev_warn(&pdev->dev,
1610 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1612 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1613 dw->hdata->nr_channels);
1615 return 0;
1617 err_pm_disable:
1618 pm_runtime_disable(chip->dev);
1620 return ret;
1623 static void dw_remove(struct platform_device *pdev)
1625 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1626 struct dw_axi_dma *dw = chip->dw;
1627 struct axi_dma_chan *chan, *_chan;
1628 u32 i;
1630 /* Enable clk before accessing to registers */
1631 clk_prepare_enable(chip->cfgr_clk);
1632 clk_prepare_enable(chip->core_clk);
1633 axi_dma_irq_disable(chip);
1634 for (i = 0; i < dw->hdata->nr_channels; i++) {
1635 axi_chan_disable(&chip->dw->chan[i]);
1636 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1638 axi_dma_disable(chip);
1640 pm_runtime_disable(chip->dev);
1641 axi_dma_suspend(chip);
1643 for (i = 0; i < DMAC_MAX_CHANNELS; i++)
1644 if (chip->irq[i] > 0)
1645 devm_free_irq(chip->dev, chip->irq[i], chip);
1647 of_dma_controller_free(chip->dev->of_node);
1649 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1650 vc.chan.device_node) {
1651 list_del(&chan->vc.chan.device_node);
1652 tasklet_kill(&chan->vc.task);
1656 static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1657 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1660 static const struct of_device_id dw_dma_of_id_table[] = {
1662 .compatible = "snps,axi-dma-1.01a"
1663 }, {
1664 .compatible = "intel,kmb-axi-dma",
1665 .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
1666 }, {
1667 .compatible = "starfive,jh7110-axi-dma",
1668 .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
1669 }, {
1670 .compatible = "starfive,jh8100-axi-dma",
1671 .data = (void *)AXI_DMA_FLAG_HAS_RESETS,
1675 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1677 static struct platform_driver dw_driver = {
1678 .probe = dw_probe,
1679 .remove = dw_remove,
1680 .driver = {
1681 .name = KBUILD_MODNAME,
1682 .of_match_table = dw_dma_of_id_table,
1683 .pm = &dw_axi_dma_pm_ops,
1686 module_platform_driver(dw_driver);
1688 MODULE_LICENSE("GPL v2");
1689 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1690 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");