1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
5 * Synopsys DesignWare AXI DMA Controller driver.
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/types.h>
26 #include "dw-axi-dmac.h"
27 #include "../dmaengine.h"
28 #include "../virt-dma.h"
31 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
32 * master data bus width up to 512 bits (for both AXI master interfaces), but
33 * it depends on IP block configurarion.
35 #define AXI_DMA_BUSWIDTHS \
36 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
37 DMA_SLAVE_BUSWIDTH_2_BYTES | \
38 DMA_SLAVE_BUSWIDTH_4_BYTES | \
39 DMA_SLAVE_BUSWIDTH_8_BYTES | \
40 DMA_SLAVE_BUSWIDTH_16_BYTES | \
41 DMA_SLAVE_BUSWIDTH_32_BYTES | \
42 DMA_SLAVE_BUSWIDTH_64_BYTES)
45 axi_dma_iowrite32(struct axi_dma_chip
*chip
, u32 reg
, u32 val
)
47 iowrite32(val
, chip
->regs
+ reg
);
50 static inline u32
axi_dma_ioread32(struct axi_dma_chip
*chip
, u32 reg
)
52 return ioread32(chip
->regs
+ reg
);
56 axi_chan_iowrite32(struct axi_dma_chan
*chan
, u32 reg
, u32 val
)
58 iowrite32(val
, chan
->chan_regs
+ reg
);
61 static inline u32
axi_chan_ioread32(struct axi_dma_chan
*chan
, u32 reg
)
63 return ioread32(chan
->chan_regs
+ reg
);
67 axi_chan_iowrite64(struct axi_dma_chan
*chan
, u32 reg
, u64 val
)
70 * We split one 64 bit write for two 32 bit write as some HW doesn't
71 * support 64 bit access.
73 iowrite32(lower_32_bits(val
), chan
->chan_regs
+ reg
);
74 iowrite32(upper_32_bits(val
), chan
->chan_regs
+ reg
+ 4);
77 static inline void axi_dma_disable(struct axi_dma_chip
*chip
)
81 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
83 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
86 static inline void axi_dma_enable(struct axi_dma_chip
*chip
)
90 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
92 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
95 static inline void axi_dma_irq_disable(struct axi_dma_chip
*chip
)
99 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
101 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
104 static inline void axi_dma_irq_enable(struct axi_dma_chip
*chip
)
108 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
110 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
113 static inline void axi_chan_irq_disable(struct axi_dma_chan
*chan
, u32 irq_mask
)
117 if (likely(irq_mask
== DWAXIDMAC_IRQ_ALL
)) {
118 axi_chan_iowrite32(chan
, CH_INTSTATUS_ENA
, DWAXIDMAC_IRQ_NONE
);
120 val
= axi_chan_ioread32(chan
, CH_INTSTATUS_ENA
);
122 axi_chan_iowrite32(chan
, CH_INTSTATUS_ENA
, val
);
126 static inline void axi_chan_irq_set(struct axi_dma_chan
*chan
, u32 irq_mask
)
128 axi_chan_iowrite32(chan
, CH_INTSTATUS_ENA
, irq_mask
);
131 static inline void axi_chan_irq_sig_set(struct axi_dma_chan
*chan
, u32 irq_mask
)
133 axi_chan_iowrite32(chan
, CH_INTSIGNAL_ENA
, irq_mask
);
136 static inline void axi_chan_irq_clear(struct axi_dma_chan
*chan
, u32 irq_mask
)
138 axi_chan_iowrite32(chan
, CH_INTCLEAR
, irq_mask
);
141 static inline u32
axi_chan_irq_read(struct axi_dma_chan
*chan
)
143 return axi_chan_ioread32(chan
, CH_INTSTATUS
);
146 static inline void axi_chan_disable(struct axi_dma_chan
*chan
)
150 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
151 val
&= ~(BIT(chan
->id
) << DMAC_CHAN_EN_SHIFT
);
152 val
|= BIT(chan
->id
) << DMAC_CHAN_EN_WE_SHIFT
;
153 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
156 static inline void axi_chan_enable(struct axi_dma_chan
*chan
)
160 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
161 val
|= BIT(chan
->id
) << DMAC_CHAN_EN_SHIFT
|
162 BIT(chan
->id
) << DMAC_CHAN_EN_WE_SHIFT
;
163 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
166 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan
*chan
)
170 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
172 return !!(val
& (BIT(chan
->id
) << DMAC_CHAN_EN_SHIFT
));
175 static void axi_dma_hw_init(struct axi_dma_chip
*chip
)
179 for (i
= 0; i
< chip
->dw
->hdata
->nr_channels
; i
++) {
180 axi_chan_irq_disable(&chip
->dw
->chan
[i
], DWAXIDMAC_IRQ_ALL
);
181 axi_chan_disable(&chip
->dw
->chan
[i
]);
185 static u32
axi_chan_get_xfer_width(struct axi_dma_chan
*chan
, dma_addr_t src
,
186 dma_addr_t dst
, size_t len
)
188 u32 max_width
= chan
->chip
->dw
->hdata
->m_data_width
;
190 return __ffs(src
| dst
| len
| BIT(max_width
));
193 static inline const char *axi_chan_name(struct axi_dma_chan
*chan
)
195 return dma_chan_name(&chan
->vc
.chan
);
198 static struct axi_dma_desc
*axi_desc_get(struct axi_dma_chan
*chan
)
200 struct dw_axi_dma
*dw
= chan
->chip
->dw
;
201 struct axi_dma_desc
*desc
;
204 desc
= dma_pool_zalloc(dw
->desc_pool
, GFP_NOWAIT
, &phys
);
205 if (unlikely(!desc
)) {
206 dev_err(chan2dev(chan
), "%s: not enough descriptors available\n",
207 axi_chan_name(chan
));
211 atomic_inc(&chan
->descs_allocated
);
212 INIT_LIST_HEAD(&desc
->xfer_list
);
213 desc
->vd
.tx
.phys
= phys
;
219 static void axi_desc_put(struct axi_dma_desc
*desc
)
221 struct axi_dma_chan
*chan
= desc
->chan
;
222 struct dw_axi_dma
*dw
= chan
->chip
->dw
;
223 struct axi_dma_desc
*child
, *_next
;
224 unsigned int descs_put
= 0;
226 list_for_each_entry_safe(child
, _next
, &desc
->xfer_list
, xfer_list
) {
227 list_del(&child
->xfer_list
);
228 dma_pool_free(dw
->desc_pool
, child
, child
->vd
.tx
.phys
);
232 dma_pool_free(dw
->desc_pool
, desc
, desc
->vd
.tx
.phys
);
235 atomic_sub(descs_put
, &chan
->descs_allocated
);
236 dev_vdbg(chan2dev(chan
), "%s: %d descs put, %d still allocated\n",
237 axi_chan_name(chan
), descs_put
,
238 atomic_read(&chan
->descs_allocated
));
241 static void vchan_desc_put(struct virt_dma_desc
*vdesc
)
243 axi_desc_put(vd_to_axi_desc(vdesc
));
246 static enum dma_status
247 dma_chan_tx_status(struct dma_chan
*dchan
, dma_cookie_t cookie
,
248 struct dma_tx_state
*txstate
)
250 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
253 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
255 if (chan
->is_paused
&& ret
== DMA_IN_PROGRESS
)
261 static void write_desc_llp(struct axi_dma_desc
*desc
, dma_addr_t adr
)
263 desc
->lli
.llp
= cpu_to_le64(adr
);
266 static void write_chan_llp(struct axi_dma_chan
*chan
, dma_addr_t adr
)
268 axi_chan_iowrite64(chan
, CH_LLP
, adr
);
271 /* Called in chan locked context */
272 static void axi_chan_block_xfer_start(struct axi_dma_chan
*chan
,
273 struct axi_dma_desc
*first
)
275 u32 priority
= chan
->chip
->dw
->hdata
->priority
[chan
->id
];
277 u8 lms
= 0; /* Select AXI0 master for LLI fetching */
279 if (unlikely(axi_chan_is_hw_enable(chan
))) {
280 dev_err(chan2dev(chan
), "%s is non-idle!\n",
281 axi_chan_name(chan
));
286 axi_dma_enable(chan
->chip
);
288 reg
= (DWAXIDMAC_MBLK_TYPE_LL
<< CH_CFG_L_DST_MULTBLK_TYPE_POS
|
289 DWAXIDMAC_MBLK_TYPE_LL
<< CH_CFG_L_SRC_MULTBLK_TYPE_POS
);
290 axi_chan_iowrite32(chan
, CH_CFG_L
, reg
);
292 reg
= (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC
<< CH_CFG_H_TT_FC_POS
|
293 priority
<< CH_CFG_H_PRIORITY_POS
|
294 DWAXIDMAC_HS_SEL_HW
<< CH_CFG_H_HS_SEL_DST_POS
|
295 DWAXIDMAC_HS_SEL_HW
<< CH_CFG_H_HS_SEL_SRC_POS
);
296 axi_chan_iowrite32(chan
, CH_CFG_H
, reg
);
298 write_chan_llp(chan
, first
->vd
.tx
.phys
| lms
);
300 irq_mask
= DWAXIDMAC_IRQ_DMA_TRF
| DWAXIDMAC_IRQ_ALL_ERR
;
301 axi_chan_irq_sig_set(chan
, irq_mask
);
303 /* Generate 'suspend' status but don't generate interrupt */
304 irq_mask
|= DWAXIDMAC_IRQ_SUSPENDED
;
305 axi_chan_irq_set(chan
, irq_mask
);
307 axi_chan_enable(chan
);
310 static void axi_chan_start_first_queued(struct axi_dma_chan
*chan
)
312 struct axi_dma_desc
*desc
;
313 struct virt_dma_desc
*vd
;
315 vd
= vchan_next_desc(&chan
->vc
);
319 desc
= vd_to_axi_desc(vd
);
320 dev_vdbg(chan2dev(chan
), "%s: started %u\n", axi_chan_name(chan
),
322 axi_chan_block_xfer_start(chan
, desc
);
325 static void dma_chan_issue_pending(struct dma_chan
*dchan
)
327 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
330 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
331 if (vchan_issue_pending(&chan
->vc
))
332 axi_chan_start_first_queued(chan
);
333 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
336 static int dma_chan_alloc_chan_resources(struct dma_chan
*dchan
)
338 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
340 /* ASSERT: channel is idle */
341 if (axi_chan_is_hw_enable(chan
)) {
342 dev_err(chan2dev(chan
), "%s is non-idle!\n",
343 axi_chan_name(chan
));
347 dev_vdbg(dchan2dev(dchan
), "%s: allocating\n", axi_chan_name(chan
));
349 pm_runtime_get(chan
->chip
->dev
);
354 static void dma_chan_free_chan_resources(struct dma_chan
*dchan
)
356 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
358 /* ASSERT: channel is idle */
359 if (axi_chan_is_hw_enable(chan
))
360 dev_err(dchan2dev(dchan
), "%s is non-idle!\n",
361 axi_chan_name(chan
));
363 axi_chan_disable(chan
);
364 axi_chan_irq_disable(chan
, DWAXIDMAC_IRQ_ALL
);
366 vchan_free_chan_resources(&chan
->vc
);
368 dev_vdbg(dchan2dev(dchan
),
369 "%s: free resources, descriptor still allocated: %u\n",
370 axi_chan_name(chan
), atomic_read(&chan
->descs_allocated
));
372 pm_runtime_put(chan
->chip
->dev
);
376 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
377 * as 1, it understands that the current block is the final block in the
378 * transfer and completes the DMA transfer operation at the end of current
381 static void set_desc_last(struct axi_dma_desc
*desc
)
385 val
= le32_to_cpu(desc
->lli
.ctl_hi
);
386 val
|= CH_CTL_H_LLI_LAST
;
387 desc
->lli
.ctl_hi
= cpu_to_le32(val
);
390 static void write_desc_sar(struct axi_dma_desc
*desc
, dma_addr_t adr
)
392 desc
->lli
.sar
= cpu_to_le64(adr
);
395 static void write_desc_dar(struct axi_dma_desc
*desc
, dma_addr_t adr
)
397 desc
->lli
.dar
= cpu_to_le64(adr
);
400 static void set_desc_src_master(struct axi_dma_desc
*desc
)
404 /* Select AXI0 for source master */
405 val
= le32_to_cpu(desc
->lli
.ctl_lo
);
406 val
&= ~CH_CTL_L_SRC_MAST
;
407 desc
->lli
.ctl_lo
= cpu_to_le32(val
);
410 static void set_desc_dest_master(struct axi_dma_desc
*desc
)
414 /* Select AXI1 for source master if available */
415 val
= le32_to_cpu(desc
->lli
.ctl_lo
);
416 if (desc
->chan
->chip
->dw
->hdata
->nr_masters
> 1)
417 val
|= CH_CTL_L_DST_MAST
;
419 val
&= ~CH_CTL_L_DST_MAST
;
421 desc
->lli
.ctl_lo
= cpu_to_le32(val
);
424 static struct dma_async_tx_descriptor
*
425 dma_chan_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dst_adr
,
426 dma_addr_t src_adr
, size_t len
, unsigned long flags
)
428 struct axi_dma_desc
*first
= NULL
, *desc
= NULL
, *prev
= NULL
;
429 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
430 size_t block_ts
, max_block_ts
, xfer_len
;
432 u8 lms
= 0; /* Select AXI0 master for LLI fetching */
434 dev_dbg(chan2dev(chan
), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
435 axi_chan_name(chan
), &src_adr
, &dst_adr
, len
, flags
);
437 max_block_ts
= chan
->chip
->dw
->hdata
->block_size
[chan
->id
];
443 * Take care for the alignment.
444 * Actually source and destination widths can be different, but
445 * make them same to be simpler.
447 xfer_width
= axi_chan_get_xfer_width(chan
, src_adr
, dst_adr
, xfer_len
);
450 * block_ts indicates the total number of data of width
451 * to be transferred in a DMA block transfer.
452 * BLOCK_TS register should be set to block_ts - 1
454 block_ts
= xfer_len
>> xfer_width
;
455 if (block_ts
> max_block_ts
) {
456 block_ts
= max_block_ts
;
457 xfer_len
= max_block_ts
<< xfer_width
;
460 desc
= axi_desc_get(chan
);
464 write_desc_sar(desc
, src_adr
);
465 write_desc_dar(desc
, dst_adr
);
466 desc
->lli
.block_ts_lo
= cpu_to_le32(block_ts
- 1);
468 reg
= CH_CTL_H_LLI_VALID
;
469 if (chan
->chip
->dw
->hdata
->restrict_axi_burst_len
) {
470 u32 burst_len
= chan
->chip
->dw
->hdata
->axi_rw_burst_len
;
472 reg
|= (CH_CTL_H_ARLEN_EN
|
473 burst_len
<< CH_CTL_H_ARLEN_POS
|
475 burst_len
<< CH_CTL_H_AWLEN_POS
);
477 desc
->lli
.ctl_hi
= cpu_to_le32(reg
);
479 reg
= (DWAXIDMAC_BURST_TRANS_LEN_4
<< CH_CTL_L_DST_MSIZE_POS
|
480 DWAXIDMAC_BURST_TRANS_LEN_4
<< CH_CTL_L_SRC_MSIZE_POS
|
481 xfer_width
<< CH_CTL_L_DST_WIDTH_POS
|
482 xfer_width
<< CH_CTL_L_SRC_WIDTH_POS
|
483 DWAXIDMAC_CH_CTL_L_INC
<< CH_CTL_L_DST_INC_POS
|
484 DWAXIDMAC_CH_CTL_L_INC
<< CH_CTL_L_SRC_INC_POS
);
485 desc
->lli
.ctl_lo
= cpu_to_le32(reg
);
487 set_desc_src_master(desc
);
488 set_desc_dest_master(desc
);
490 /* Manage transfer list (xfer_list) */
494 list_add_tail(&desc
->xfer_list
, &first
->xfer_list
);
495 write_desc_llp(prev
, desc
->vd
.tx
.phys
| lms
);
499 /* update the length and addresses for the next loop cycle */
505 /* Total len of src/dest sg == 0, so no descriptor were allocated */
506 if (unlikely(!first
))
509 /* Set end-of-link to the last link descriptor of list */
512 return vchan_tx_prep(&chan
->vc
, &first
->vd
, flags
);
519 static void axi_chan_dump_lli(struct axi_dma_chan
*chan
,
520 struct axi_dma_desc
*desc
)
522 dev_err(dchan2dev(&chan
->vc
.chan
),
523 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
524 le64_to_cpu(desc
->lli
.sar
),
525 le64_to_cpu(desc
->lli
.dar
),
526 le64_to_cpu(desc
->lli
.llp
),
527 le32_to_cpu(desc
->lli
.block_ts_lo
),
528 le32_to_cpu(desc
->lli
.ctl_hi
),
529 le32_to_cpu(desc
->lli
.ctl_lo
));
532 static void axi_chan_list_dump_lli(struct axi_dma_chan
*chan
,
533 struct axi_dma_desc
*desc_head
)
535 struct axi_dma_desc
*desc
;
537 axi_chan_dump_lli(chan
, desc_head
);
538 list_for_each_entry(desc
, &desc_head
->xfer_list
, xfer_list
)
539 axi_chan_dump_lli(chan
, desc
);
542 static noinline
void axi_chan_handle_err(struct axi_dma_chan
*chan
, u32 status
)
544 struct virt_dma_desc
*vd
;
547 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
549 axi_chan_disable(chan
);
551 /* The bad descriptor currently is in the head of vc list */
552 vd
= vchan_next_desc(&chan
->vc
);
553 /* Remove the completed descriptor from issued list */
556 /* WARN about bad descriptor */
557 dev_err(chan2dev(chan
),
558 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
559 axi_chan_name(chan
), vd
->tx
.cookie
, status
);
560 axi_chan_list_dump_lli(chan
, vd_to_axi_desc(vd
));
562 vchan_cookie_complete(vd
);
564 /* Try to restart the controller */
565 axi_chan_start_first_queued(chan
);
567 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
570 static void axi_chan_block_xfer_complete(struct axi_dma_chan
*chan
)
572 struct virt_dma_desc
*vd
;
575 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
576 if (unlikely(axi_chan_is_hw_enable(chan
))) {
577 dev_err(chan2dev(chan
), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
578 axi_chan_name(chan
));
579 axi_chan_disable(chan
);
582 /* The completed descriptor currently is in the head of vc list */
583 vd
= vchan_next_desc(&chan
->vc
);
584 /* Remove the completed descriptor from issued list before completing */
586 vchan_cookie_complete(vd
);
588 /* Submit queued descriptors after processing the completed ones */
589 axi_chan_start_first_queued(chan
);
591 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
594 static irqreturn_t
dw_axi_dma_interrupt(int irq
, void *dev_id
)
596 struct axi_dma_chip
*chip
= dev_id
;
597 struct dw_axi_dma
*dw
= chip
->dw
;
598 struct axi_dma_chan
*chan
;
602 /* Disable DMAC inerrupts. We'll enable them after processing chanels */
603 axi_dma_irq_disable(chip
);
605 /* Poll, clear and process every chanel interrupt status */
606 for (i
= 0; i
< dw
->hdata
->nr_channels
; i
++) {
608 status
= axi_chan_irq_read(chan
);
609 axi_chan_irq_clear(chan
, status
);
611 dev_vdbg(chip
->dev
, "%s %u IRQ status: 0x%08x\n",
612 axi_chan_name(chan
), i
, status
);
614 if (status
& DWAXIDMAC_IRQ_ALL_ERR
)
615 axi_chan_handle_err(chan
, status
);
616 else if (status
& DWAXIDMAC_IRQ_DMA_TRF
)
617 axi_chan_block_xfer_complete(chan
);
620 /* Re-enable interrupts */
621 axi_dma_irq_enable(chip
);
626 static int dma_chan_terminate_all(struct dma_chan
*dchan
)
628 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
632 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
634 axi_chan_disable(chan
);
636 vchan_get_all_descriptors(&chan
->vc
, &head
);
639 * As vchan_dma_desc_free_list can access to desc_allocated list
640 * we need to call it in vc.lock context.
642 vchan_dma_desc_free_list(&chan
->vc
, &head
);
644 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
646 dev_vdbg(dchan2dev(dchan
), "terminated: %s\n", axi_chan_name(chan
));
651 static int dma_chan_pause(struct dma_chan
*dchan
)
653 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
655 unsigned int timeout
= 20; /* timeout iterations */
658 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
660 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
661 val
|= BIT(chan
->id
) << DMAC_CHAN_SUSP_SHIFT
|
662 BIT(chan
->id
) << DMAC_CHAN_SUSP_WE_SHIFT
;
663 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
666 if (axi_chan_irq_read(chan
) & DWAXIDMAC_IRQ_SUSPENDED
)
672 axi_chan_irq_clear(chan
, DWAXIDMAC_IRQ_SUSPENDED
);
674 chan
->is_paused
= true;
676 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
678 return timeout
? 0 : -EAGAIN
;
681 /* Called in chan locked context */
682 static inline void axi_chan_resume(struct axi_dma_chan
*chan
)
686 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
687 val
&= ~(BIT(chan
->id
) << DMAC_CHAN_SUSP_SHIFT
);
688 val
|= (BIT(chan
->id
) << DMAC_CHAN_SUSP_WE_SHIFT
);
689 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
691 chan
->is_paused
= false;
694 static int dma_chan_resume(struct dma_chan
*dchan
)
696 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
699 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
702 axi_chan_resume(chan
);
704 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
709 static int axi_dma_suspend(struct axi_dma_chip
*chip
)
711 axi_dma_irq_disable(chip
);
712 axi_dma_disable(chip
);
714 clk_disable_unprepare(chip
->core_clk
);
715 clk_disable_unprepare(chip
->cfgr_clk
);
720 static int axi_dma_resume(struct axi_dma_chip
*chip
)
724 ret
= clk_prepare_enable(chip
->cfgr_clk
);
728 ret
= clk_prepare_enable(chip
->core_clk
);
732 axi_dma_enable(chip
);
733 axi_dma_irq_enable(chip
);
738 static int __maybe_unused
axi_dma_runtime_suspend(struct device
*dev
)
740 struct axi_dma_chip
*chip
= dev_get_drvdata(dev
);
742 return axi_dma_suspend(chip
);
745 static int __maybe_unused
axi_dma_runtime_resume(struct device
*dev
)
747 struct axi_dma_chip
*chip
= dev_get_drvdata(dev
);
749 return axi_dma_resume(chip
);
752 static int parse_device_properties(struct axi_dma_chip
*chip
)
754 struct device
*dev
= chip
->dev
;
755 u32 tmp
, carr
[DMAC_MAX_CHANNELS
];
758 ret
= device_property_read_u32(dev
, "dma-channels", &tmp
);
761 if (tmp
== 0 || tmp
> DMAC_MAX_CHANNELS
)
764 chip
->dw
->hdata
->nr_channels
= tmp
;
766 ret
= device_property_read_u32(dev
, "snps,dma-masters", &tmp
);
769 if (tmp
== 0 || tmp
> DMAC_MAX_MASTERS
)
772 chip
->dw
->hdata
->nr_masters
= tmp
;
774 ret
= device_property_read_u32(dev
, "snps,data-width", &tmp
);
777 if (tmp
> DWAXIDMAC_TRANS_WIDTH_MAX
)
780 chip
->dw
->hdata
->m_data_width
= tmp
;
782 ret
= device_property_read_u32_array(dev
, "snps,block-size", carr
,
783 chip
->dw
->hdata
->nr_channels
);
786 for (tmp
= 0; tmp
< chip
->dw
->hdata
->nr_channels
; tmp
++) {
787 if (carr
[tmp
] == 0 || carr
[tmp
] > DMAC_MAX_BLK_SIZE
)
790 chip
->dw
->hdata
->block_size
[tmp
] = carr
[tmp
];
793 ret
= device_property_read_u32_array(dev
, "snps,priority", carr
,
794 chip
->dw
->hdata
->nr_channels
);
797 /* Priority value must be programmed within [0:nr_channels-1] range */
798 for (tmp
= 0; tmp
< chip
->dw
->hdata
->nr_channels
; tmp
++) {
799 if (carr
[tmp
] >= chip
->dw
->hdata
->nr_channels
)
802 chip
->dw
->hdata
->priority
[tmp
] = carr
[tmp
];
805 /* axi-max-burst-len is optional property */
806 ret
= device_property_read_u32(dev
, "snps,axi-max-burst-len", &tmp
);
808 if (tmp
> DWAXIDMAC_ARWLEN_MAX
+ 1)
810 if (tmp
< DWAXIDMAC_ARWLEN_MIN
+ 1)
813 chip
->dw
->hdata
->restrict_axi_burst_len
= true;
814 chip
->dw
->hdata
->axi_rw_burst_len
= tmp
- 1;
820 static int dw_probe(struct platform_device
*pdev
)
822 struct axi_dma_chip
*chip
;
823 struct resource
*mem
;
824 struct dw_axi_dma
*dw
;
825 struct dw_axi_dma_hcfg
*hdata
;
829 chip
= devm_kzalloc(&pdev
->dev
, sizeof(*chip
), GFP_KERNEL
);
833 dw
= devm_kzalloc(&pdev
->dev
, sizeof(*dw
), GFP_KERNEL
);
837 hdata
= devm_kzalloc(&pdev
->dev
, sizeof(*hdata
), GFP_KERNEL
);
842 chip
->dev
= &pdev
->dev
;
843 chip
->dw
->hdata
= hdata
;
845 chip
->irq
= platform_get_irq(pdev
, 0);
849 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
850 chip
->regs
= devm_ioremap_resource(chip
->dev
, mem
);
851 if (IS_ERR(chip
->regs
))
852 return PTR_ERR(chip
->regs
);
854 chip
->core_clk
= devm_clk_get(chip
->dev
, "core-clk");
855 if (IS_ERR(chip
->core_clk
))
856 return PTR_ERR(chip
->core_clk
);
858 chip
->cfgr_clk
= devm_clk_get(chip
->dev
, "cfgr-clk");
859 if (IS_ERR(chip
->cfgr_clk
))
860 return PTR_ERR(chip
->cfgr_clk
);
862 ret
= parse_device_properties(chip
);
866 dw
->chan
= devm_kcalloc(chip
->dev
, hdata
->nr_channels
,
867 sizeof(*dw
->chan
), GFP_KERNEL
);
871 ret
= devm_request_irq(chip
->dev
, chip
->irq
, dw_axi_dma_interrupt
,
872 IRQF_SHARED
, KBUILD_MODNAME
, chip
);
876 /* Lli address must be aligned to a 64-byte boundary */
877 dw
->desc_pool
= dmam_pool_create(KBUILD_MODNAME
, chip
->dev
,
878 sizeof(struct axi_dma_desc
), 64, 0);
879 if (!dw
->desc_pool
) {
880 dev_err(chip
->dev
, "No memory for descriptors dma pool\n");
884 INIT_LIST_HEAD(&dw
->dma
.channels
);
885 for (i
= 0; i
< hdata
->nr_channels
; i
++) {
886 struct axi_dma_chan
*chan
= &dw
->chan
[i
];
890 chan
->chan_regs
= chip
->regs
+ COMMON_REG_LEN
+ i
* CHAN_REG_LEN
;
891 atomic_set(&chan
->descs_allocated
, 0);
893 chan
->vc
.desc_free
= vchan_desc_put
;
894 vchan_init(&chan
->vc
, &dw
->dma
);
897 /* Set capabilities */
898 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
900 /* DMA capabilities */
901 dw
->dma
.chancnt
= hdata
->nr_channels
;
902 dw
->dma
.src_addr_widths
= AXI_DMA_BUSWIDTHS
;
903 dw
->dma
.dst_addr_widths
= AXI_DMA_BUSWIDTHS
;
904 dw
->dma
.directions
= BIT(DMA_MEM_TO_MEM
);
905 dw
->dma
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
907 dw
->dma
.dev
= chip
->dev
;
908 dw
->dma
.device_tx_status
= dma_chan_tx_status
;
909 dw
->dma
.device_issue_pending
= dma_chan_issue_pending
;
910 dw
->dma
.device_terminate_all
= dma_chan_terminate_all
;
911 dw
->dma
.device_pause
= dma_chan_pause
;
912 dw
->dma
.device_resume
= dma_chan_resume
;
914 dw
->dma
.device_alloc_chan_resources
= dma_chan_alloc_chan_resources
;
915 dw
->dma
.device_free_chan_resources
= dma_chan_free_chan_resources
;
917 dw
->dma
.device_prep_dma_memcpy
= dma_chan_prep_dma_memcpy
;
919 platform_set_drvdata(pdev
, chip
);
921 pm_runtime_enable(chip
->dev
);
924 * We can't just call pm_runtime_get here instead of
925 * pm_runtime_get_noresume + axi_dma_resume because we need
926 * driver to work also without Runtime PM.
928 pm_runtime_get_noresume(chip
->dev
);
929 ret
= axi_dma_resume(chip
);
933 axi_dma_hw_init(chip
);
935 pm_runtime_put(chip
->dev
);
937 ret
= dma_async_device_register(&dw
->dma
);
941 dev_info(chip
->dev
, "DesignWare AXI DMA Controller, %d channels\n",
942 dw
->hdata
->nr_channels
);
947 pm_runtime_disable(chip
->dev
);
952 static int dw_remove(struct platform_device
*pdev
)
954 struct axi_dma_chip
*chip
= platform_get_drvdata(pdev
);
955 struct dw_axi_dma
*dw
= chip
->dw
;
956 struct axi_dma_chan
*chan
, *_chan
;
959 /* Enable clk before accessing to registers */
960 clk_prepare_enable(chip
->cfgr_clk
);
961 clk_prepare_enable(chip
->core_clk
);
962 axi_dma_irq_disable(chip
);
963 for (i
= 0; i
< dw
->hdata
->nr_channels
; i
++) {
964 axi_chan_disable(&chip
->dw
->chan
[i
]);
965 axi_chan_irq_disable(&chip
->dw
->chan
[i
], DWAXIDMAC_IRQ_ALL
);
967 axi_dma_disable(chip
);
969 pm_runtime_disable(chip
->dev
);
970 axi_dma_suspend(chip
);
972 devm_free_irq(chip
->dev
, chip
->irq
, chip
);
974 list_for_each_entry_safe(chan
, _chan
, &dw
->dma
.channels
,
975 vc
.chan
.device_node
) {
976 list_del(&chan
->vc
.chan
.device_node
);
977 tasklet_kill(&chan
->vc
.task
);
980 dma_async_device_unregister(&dw
->dma
);
985 static const struct dev_pm_ops dw_axi_dma_pm_ops
= {
986 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend
, axi_dma_runtime_resume
, NULL
)
989 static const struct of_device_id dw_dma_of_id_table
[] = {
990 { .compatible
= "snps,axi-dma-1.01a" },
993 MODULE_DEVICE_TABLE(of
, dw_dma_of_id_table
);
995 static struct platform_driver dw_driver
= {
999 .name
= KBUILD_MODNAME
,
1000 .of_match_table
= of_match_ptr(dw_dma_of_id_table
),
1001 .pm
= &dw_axi_dma_pm_ops
,
1004 module_platform_driver(dw_driver
);
1006 MODULE_LICENSE("GPL v2");
1007 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1008 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");