1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
5 * Synopsys DesignWare AXI DMA Controller driver.
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/types.h>
26 #include "dw-axi-dmac.h"
27 #include "../dmaengine.h"
28 #include "../virt-dma.h"
31 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
32 * master data bus width up to 512 bits (for both AXI master interfaces), but
33 * it depends on IP block configurarion.
35 #define AXI_DMA_BUSWIDTHS \
36 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
37 DMA_SLAVE_BUSWIDTH_2_BYTES | \
38 DMA_SLAVE_BUSWIDTH_4_BYTES | \
39 DMA_SLAVE_BUSWIDTH_8_BYTES | \
40 DMA_SLAVE_BUSWIDTH_16_BYTES | \
41 DMA_SLAVE_BUSWIDTH_32_BYTES | \
42 DMA_SLAVE_BUSWIDTH_64_BYTES)
45 axi_dma_iowrite32(struct axi_dma_chip
*chip
, u32 reg
, u32 val
)
47 iowrite32(val
, chip
->regs
+ reg
);
50 static inline u32
axi_dma_ioread32(struct axi_dma_chip
*chip
, u32 reg
)
52 return ioread32(chip
->regs
+ reg
);
56 axi_chan_iowrite32(struct axi_dma_chan
*chan
, u32 reg
, u32 val
)
58 iowrite32(val
, chan
->chan_regs
+ reg
);
61 static inline u32
axi_chan_ioread32(struct axi_dma_chan
*chan
, u32 reg
)
63 return ioread32(chan
->chan_regs
+ reg
);
67 axi_chan_iowrite64(struct axi_dma_chan
*chan
, u32 reg
, u64 val
)
70 * We split one 64 bit write for two 32 bit write as some HW doesn't
71 * support 64 bit access.
73 iowrite32(lower_32_bits(val
), chan
->chan_regs
+ reg
);
74 iowrite32(upper_32_bits(val
), chan
->chan_regs
+ reg
+ 4);
77 static inline void axi_dma_disable(struct axi_dma_chip
*chip
)
81 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
83 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
86 static inline void axi_dma_enable(struct axi_dma_chip
*chip
)
90 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
92 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
95 static inline void axi_dma_irq_disable(struct axi_dma_chip
*chip
)
99 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
101 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
104 static inline void axi_dma_irq_enable(struct axi_dma_chip
*chip
)
108 val
= axi_dma_ioread32(chip
, DMAC_CFG
);
110 axi_dma_iowrite32(chip
, DMAC_CFG
, val
);
113 static inline void axi_chan_irq_disable(struct axi_dma_chan
*chan
, u32 irq_mask
)
117 if (likely(irq_mask
== DWAXIDMAC_IRQ_ALL
)) {
118 axi_chan_iowrite32(chan
, CH_INTSTATUS_ENA
, DWAXIDMAC_IRQ_NONE
);
120 val
= axi_chan_ioread32(chan
, CH_INTSTATUS_ENA
);
122 axi_chan_iowrite32(chan
, CH_INTSTATUS_ENA
, val
);
126 static inline void axi_chan_irq_set(struct axi_dma_chan
*chan
, u32 irq_mask
)
128 axi_chan_iowrite32(chan
, CH_INTSTATUS_ENA
, irq_mask
);
131 static inline void axi_chan_irq_sig_set(struct axi_dma_chan
*chan
, u32 irq_mask
)
133 axi_chan_iowrite32(chan
, CH_INTSIGNAL_ENA
, irq_mask
);
136 static inline void axi_chan_irq_clear(struct axi_dma_chan
*chan
, u32 irq_mask
)
138 axi_chan_iowrite32(chan
, CH_INTCLEAR
, irq_mask
);
141 static inline u32
axi_chan_irq_read(struct axi_dma_chan
*chan
)
143 return axi_chan_ioread32(chan
, CH_INTSTATUS
);
146 static inline void axi_chan_disable(struct axi_dma_chan
*chan
)
150 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
151 val
&= ~(BIT(chan
->id
) << DMAC_CHAN_EN_SHIFT
);
152 val
|= BIT(chan
->id
) << DMAC_CHAN_EN_WE_SHIFT
;
153 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
156 static inline void axi_chan_enable(struct axi_dma_chan
*chan
)
160 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
161 val
|= BIT(chan
->id
) << DMAC_CHAN_EN_SHIFT
|
162 BIT(chan
->id
) << DMAC_CHAN_EN_WE_SHIFT
;
163 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
166 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan
*chan
)
170 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
172 return !!(val
& (BIT(chan
->id
) << DMAC_CHAN_EN_SHIFT
));
175 static void axi_dma_hw_init(struct axi_dma_chip
*chip
)
179 for (i
= 0; i
< chip
->dw
->hdata
->nr_channels
; i
++) {
180 axi_chan_irq_disable(&chip
->dw
->chan
[i
], DWAXIDMAC_IRQ_ALL
);
181 axi_chan_disable(&chip
->dw
->chan
[i
]);
185 static u32
axi_chan_get_xfer_width(struct axi_dma_chan
*chan
, dma_addr_t src
,
186 dma_addr_t dst
, size_t len
)
188 u32 max_width
= chan
->chip
->dw
->hdata
->m_data_width
;
190 return __ffs(src
| dst
| len
| BIT(max_width
));
193 static inline const char *axi_chan_name(struct axi_dma_chan
*chan
)
195 return dma_chan_name(&chan
->vc
.chan
);
198 static struct axi_dma_desc
*axi_desc_get(struct axi_dma_chan
*chan
)
200 struct dw_axi_dma
*dw
= chan
->chip
->dw
;
201 struct axi_dma_desc
*desc
;
204 desc
= dma_pool_zalloc(dw
->desc_pool
, GFP_NOWAIT
, &phys
);
205 if (unlikely(!desc
)) {
206 dev_err(chan2dev(chan
), "%s: not enough descriptors available\n",
207 axi_chan_name(chan
));
211 atomic_inc(&chan
->descs_allocated
);
212 INIT_LIST_HEAD(&desc
->xfer_list
);
213 desc
->vd
.tx
.phys
= phys
;
219 static void axi_desc_put(struct axi_dma_desc
*desc
)
221 struct axi_dma_chan
*chan
= desc
->chan
;
222 struct dw_axi_dma
*dw
= chan
->chip
->dw
;
223 struct axi_dma_desc
*child
, *_next
;
224 unsigned int descs_put
= 0;
226 list_for_each_entry_safe(child
, _next
, &desc
->xfer_list
, xfer_list
) {
227 list_del(&child
->xfer_list
);
228 dma_pool_free(dw
->desc_pool
, child
, child
->vd
.tx
.phys
);
232 dma_pool_free(dw
->desc_pool
, desc
, desc
->vd
.tx
.phys
);
235 atomic_sub(descs_put
, &chan
->descs_allocated
);
236 dev_vdbg(chan2dev(chan
), "%s: %d descs put, %d still allocated\n",
237 axi_chan_name(chan
), descs_put
,
238 atomic_read(&chan
->descs_allocated
));
241 static void vchan_desc_put(struct virt_dma_desc
*vdesc
)
243 axi_desc_put(vd_to_axi_desc(vdesc
));
246 static enum dma_status
247 dma_chan_tx_status(struct dma_chan
*dchan
, dma_cookie_t cookie
,
248 struct dma_tx_state
*txstate
)
250 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
253 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
255 if (chan
->is_paused
&& ret
== DMA_IN_PROGRESS
)
261 static void write_desc_llp(struct axi_dma_desc
*desc
, dma_addr_t adr
)
263 desc
->lli
.llp
= cpu_to_le64(adr
);
266 static void write_chan_llp(struct axi_dma_chan
*chan
, dma_addr_t adr
)
268 axi_chan_iowrite64(chan
, CH_LLP
, adr
);
271 /* Called in chan locked context */
272 static void axi_chan_block_xfer_start(struct axi_dma_chan
*chan
,
273 struct axi_dma_desc
*first
)
275 u32 priority
= chan
->chip
->dw
->hdata
->priority
[chan
->id
];
277 u8 lms
= 0; /* Select AXI0 master for LLI fetching */
279 if (unlikely(axi_chan_is_hw_enable(chan
))) {
280 dev_err(chan2dev(chan
), "%s is non-idle!\n",
281 axi_chan_name(chan
));
286 axi_dma_enable(chan
->chip
);
288 reg
= (DWAXIDMAC_MBLK_TYPE_LL
<< CH_CFG_L_DST_MULTBLK_TYPE_POS
|
289 DWAXIDMAC_MBLK_TYPE_LL
<< CH_CFG_L_SRC_MULTBLK_TYPE_POS
);
290 axi_chan_iowrite32(chan
, CH_CFG_L
, reg
);
292 reg
= (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC
<< CH_CFG_H_TT_FC_POS
|
293 priority
<< CH_CFG_H_PRIORITY_POS
|
294 DWAXIDMAC_HS_SEL_HW
<< CH_CFG_H_HS_SEL_DST_POS
|
295 DWAXIDMAC_HS_SEL_HW
<< CH_CFG_H_HS_SEL_SRC_POS
);
296 axi_chan_iowrite32(chan
, CH_CFG_H
, reg
);
298 write_chan_llp(chan
, first
->vd
.tx
.phys
| lms
);
300 irq_mask
= DWAXIDMAC_IRQ_DMA_TRF
| DWAXIDMAC_IRQ_ALL_ERR
;
301 axi_chan_irq_sig_set(chan
, irq_mask
);
303 /* Generate 'suspend' status but don't generate interrupt */
304 irq_mask
|= DWAXIDMAC_IRQ_SUSPENDED
;
305 axi_chan_irq_set(chan
, irq_mask
);
307 axi_chan_enable(chan
);
310 static void axi_chan_start_first_queued(struct axi_dma_chan
*chan
)
312 struct axi_dma_desc
*desc
;
313 struct virt_dma_desc
*vd
;
315 vd
= vchan_next_desc(&chan
->vc
);
319 desc
= vd_to_axi_desc(vd
);
320 dev_vdbg(chan2dev(chan
), "%s: started %u\n", axi_chan_name(chan
),
322 axi_chan_block_xfer_start(chan
, desc
);
325 static void dma_chan_issue_pending(struct dma_chan
*dchan
)
327 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
330 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
331 if (vchan_issue_pending(&chan
->vc
))
332 axi_chan_start_first_queued(chan
);
333 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
336 static int dma_chan_alloc_chan_resources(struct dma_chan
*dchan
)
338 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
340 /* ASSERT: channel is idle */
341 if (axi_chan_is_hw_enable(chan
)) {
342 dev_err(chan2dev(chan
), "%s is non-idle!\n",
343 axi_chan_name(chan
));
347 dev_vdbg(dchan2dev(dchan
), "%s: allocating\n", axi_chan_name(chan
));
349 pm_runtime_get(chan
->chip
->dev
);
354 static void dma_chan_free_chan_resources(struct dma_chan
*dchan
)
356 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
358 /* ASSERT: channel is idle */
359 if (axi_chan_is_hw_enable(chan
))
360 dev_err(dchan2dev(dchan
), "%s is non-idle!\n",
361 axi_chan_name(chan
));
363 axi_chan_disable(chan
);
364 axi_chan_irq_disable(chan
, DWAXIDMAC_IRQ_ALL
);
366 vchan_free_chan_resources(&chan
->vc
);
368 dev_vdbg(dchan2dev(dchan
),
369 "%s: free resources, descriptor still allocated: %u\n",
370 axi_chan_name(chan
), atomic_read(&chan
->descs_allocated
));
372 pm_runtime_put(chan
->chip
->dev
);
376 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
377 * as 1, it understands that the current block is the final block in the
378 * transfer and completes the DMA transfer operation at the end of current
381 static void set_desc_last(struct axi_dma_desc
*desc
)
385 val
= le32_to_cpu(desc
->lli
.ctl_hi
);
386 val
|= CH_CTL_H_LLI_LAST
;
387 desc
->lli
.ctl_hi
= cpu_to_le32(val
);
390 static void write_desc_sar(struct axi_dma_desc
*desc
, dma_addr_t adr
)
392 desc
->lli
.sar
= cpu_to_le64(adr
);
395 static void write_desc_dar(struct axi_dma_desc
*desc
, dma_addr_t adr
)
397 desc
->lli
.dar
= cpu_to_le64(adr
);
400 static void set_desc_src_master(struct axi_dma_desc
*desc
)
404 /* Select AXI0 for source master */
405 val
= le32_to_cpu(desc
->lli
.ctl_lo
);
406 val
&= ~CH_CTL_L_SRC_MAST
;
407 desc
->lli
.ctl_lo
= cpu_to_le32(val
);
410 static void set_desc_dest_master(struct axi_dma_desc
*desc
)
414 /* Select AXI1 for source master if available */
415 val
= le32_to_cpu(desc
->lli
.ctl_lo
);
416 if (desc
->chan
->chip
->dw
->hdata
->nr_masters
> 1)
417 val
|= CH_CTL_L_DST_MAST
;
419 val
&= ~CH_CTL_L_DST_MAST
;
421 desc
->lli
.ctl_lo
= cpu_to_le32(val
);
424 static struct dma_async_tx_descriptor
*
425 dma_chan_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dst_adr
,
426 dma_addr_t src_adr
, size_t len
, unsigned long flags
)
428 struct axi_dma_desc
*first
= NULL
, *desc
= NULL
, *prev
= NULL
;
429 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
430 size_t block_ts
, max_block_ts
, xfer_len
;
432 u8 lms
= 0; /* Select AXI0 master for LLI fetching */
434 dev_dbg(chan2dev(chan
), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
435 axi_chan_name(chan
), &src_adr
, &dst_adr
, len
, flags
);
437 max_block_ts
= chan
->chip
->dw
->hdata
->block_size
[chan
->id
];
443 * Take care for the alignment.
444 * Actually source and destination widths can be different, but
445 * make them same to be simpler.
447 xfer_width
= axi_chan_get_xfer_width(chan
, src_adr
, dst_adr
, xfer_len
);
450 * block_ts indicates the total number of data of width
451 * to be transferred in a DMA block transfer.
452 * BLOCK_TS register should be set to block_ts - 1
454 block_ts
= xfer_len
>> xfer_width
;
455 if (block_ts
> max_block_ts
) {
456 block_ts
= max_block_ts
;
457 xfer_len
= max_block_ts
<< xfer_width
;
460 desc
= axi_desc_get(chan
);
464 write_desc_sar(desc
, src_adr
);
465 write_desc_dar(desc
, dst_adr
);
466 desc
->lli
.block_ts_lo
= cpu_to_le32(block_ts
- 1);
468 reg
= CH_CTL_H_LLI_VALID
;
469 if (chan
->chip
->dw
->hdata
->restrict_axi_burst_len
) {
470 u32 burst_len
= chan
->chip
->dw
->hdata
->axi_rw_burst_len
;
472 reg
|= (CH_CTL_H_ARLEN_EN
|
473 burst_len
<< CH_CTL_H_ARLEN_POS
|
475 burst_len
<< CH_CTL_H_AWLEN_POS
);
477 desc
->lli
.ctl_hi
= cpu_to_le32(reg
);
479 reg
= (DWAXIDMAC_BURST_TRANS_LEN_4
<< CH_CTL_L_DST_MSIZE_POS
|
480 DWAXIDMAC_BURST_TRANS_LEN_4
<< CH_CTL_L_SRC_MSIZE_POS
|
481 xfer_width
<< CH_CTL_L_DST_WIDTH_POS
|
482 xfer_width
<< CH_CTL_L_SRC_WIDTH_POS
|
483 DWAXIDMAC_CH_CTL_L_INC
<< CH_CTL_L_DST_INC_POS
|
484 DWAXIDMAC_CH_CTL_L_INC
<< CH_CTL_L_SRC_INC_POS
);
485 desc
->lli
.ctl_lo
= cpu_to_le32(reg
);
487 set_desc_src_master(desc
);
488 set_desc_dest_master(desc
);
490 /* Manage transfer list (xfer_list) */
494 list_add_tail(&desc
->xfer_list
, &first
->xfer_list
);
495 write_desc_llp(prev
, desc
->vd
.tx
.phys
| lms
);
499 /* update the length and addresses for the next loop cycle */
505 /* Total len of src/dest sg == 0, so no descriptor were allocated */
506 if (unlikely(!first
))
509 /* Set end-of-link to the last link descriptor of list */
512 return vchan_tx_prep(&chan
->vc
, &first
->vd
, flags
);
520 static void axi_chan_dump_lli(struct axi_dma_chan
*chan
,
521 struct axi_dma_desc
*desc
)
523 dev_err(dchan2dev(&chan
->vc
.chan
),
524 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
525 le64_to_cpu(desc
->lli
.sar
),
526 le64_to_cpu(desc
->lli
.dar
),
527 le64_to_cpu(desc
->lli
.llp
),
528 le32_to_cpu(desc
->lli
.block_ts_lo
),
529 le32_to_cpu(desc
->lli
.ctl_hi
),
530 le32_to_cpu(desc
->lli
.ctl_lo
));
533 static void axi_chan_list_dump_lli(struct axi_dma_chan
*chan
,
534 struct axi_dma_desc
*desc_head
)
536 struct axi_dma_desc
*desc
;
538 axi_chan_dump_lli(chan
, desc_head
);
539 list_for_each_entry(desc
, &desc_head
->xfer_list
, xfer_list
)
540 axi_chan_dump_lli(chan
, desc
);
543 static noinline
void axi_chan_handle_err(struct axi_dma_chan
*chan
, u32 status
)
545 struct virt_dma_desc
*vd
;
548 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
550 axi_chan_disable(chan
);
552 /* The bad descriptor currently is in the head of vc list */
553 vd
= vchan_next_desc(&chan
->vc
);
554 /* Remove the completed descriptor from issued list */
557 /* WARN about bad descriptor */
558 dev_err(chan2dev(chan
),
559 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
560 axi_chan_name(chan
), vd
->tx
.cookie
, status
);
561 axi_chan_list_dump_lli(chan
, vd_to_axi_desc(vd
));
563 vchan_cookie_complete(vd
);
565 /* Try to restart the controller */
566 axi_chan_start_first_queued(chan
);
568 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
571 static void axi_chan_block_xfer_complete(struct axi_dma_chan
*chan
)
573 struct virt_dma_desc
*vd
;
576 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
577 if (unlikely(axi_chan_is_hw_enable(chan
))) {
578 dev_err(chan2dev(chan
), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
579 axi_chan_name(chan
));
580 axi_chan_disable(chan
);
583 /* The completed descriptor currently is in the head of vc list */
584 vd
= vchan_next_desc(&chan
->vc
);
585 /* Remove the completed descriptor from issued list before completing */
587 vchan_cookie_complete(vd
);
589 /* Submit queued descriptors after processing the completed ones */
590 axi_chan_start_first_queued(chan
);
592 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
595 static irqreturn_t
dw_axi_dma_interrupt(int irq
, void *dev_id
)
597 struct axi_dma_chip
*chip
= dev_id
;
598 struct dw_axi_dma
*dw
= chip
->dw
;
599 struct axi_dma_chan
*chan
;
603 /* Disable DMAC inerrupts. We'll enable them after processing chanels */
604 axi_dma_irq_disable(chip
);
606 /* Poll, clear and process every chanel interrupt status */
607 for (i
= 0; i
< dw
->hdata
->nr_channels
; i
++) {
609 status
= axi_chan_irq_read(chan
);
610 axi_chan_irq_clear(chan
, status
);
612 dev_vdbg(chip
->dev
, "%s %u IRQ status: 0x%08x\n",
613 axi_chan_name(chan
), i
, status
);
615 if (status
& DWAXIDMAC_IRQ_ALL_ERR
)
616 axi_chan_handle_err(chan
, status
);
617 else if (status
& DWAXIDMAC_IRQ_DMA_TRF
)
618 axi_chan_block_xfer_complete(chan
);
621 /* Re-enable interrupts */
622 axi_dma_irq_enable(chip
);
627 static int dma_chan_terminate_all(struct dma_chan
*dchan
)
629 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
633 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
635 axi_chan_disable(chan
);
637 vchan_get_all_descriptors(&chan
->vc
, &head
);
639 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
641 vchan_dma_desc_free_list(&chan
->vc
, &head
);
643 dev_vdbg(dchan2dev(dchan
), "terminated: %s\n", axi_chan_name(chan
));
648 static int dma_chan_pause(struct dma_chan
*dchan
)
650 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
652 unsigned int timeout
= 20; /* timeout iterations */
655 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
657 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
658 val
|= BIT(chan
->id
) << DMAC_CHAN_SUSP_SHIFT
|
659 BIT(chan
->id
) << DMAC_CHAN_SUSP_WE_SHIFT
;
660 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
663 if (axi_chan_irq_read(chan
) & DWAXIDMAC_IRQ_SUSPENDED
)
669 axi_chan_irq_clear(chan
, DWAXIDMAC_IRQ_SUSPENDED
);
671 chan
->is_paused
= true;
673 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
675 return timeout
? 0 : -EAGAIN
;
678 /* Called in chan locked context */
679 static inline void axi_chan_resume(struct axi_dma_chan
*chan
)
683 val
= axi_dma_ioread32(chan
->chip
, DMAC_CHEN
);
684 val
&= ~(BIT(chan
->id
) << DMAC_CHAN_SUSP_SHIFT
);
685 val
|= (BIT(chan
->id
) << DMAC_CHAN_SUSP_WE_SHIFT
);
686 axi_dma_iowrite32(chan
->chip
, DMAC_CHEN
, val
);
688 chan
->is_paused
= false;
691 static int dma_chan_resume(struct dma_chan
*dchan
)
693 struct axi_dma_chan
*chan
= dchan_to_axi_dma_chan(dchan
);
696 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
699 axi_chan_resume(chan
);
701 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
706 static int axi_dma_suspend(struct axi_dma_chip
*chip
)
708 axi_dma_irq_disable(chip
);
709 axi_dma_disable(chip
);
711 clk_disable_unprepare(chip
->core_clk
);
712 clk_disable_unprepare(chip
->cfgr_clk
);
717 static int axi_dma_resume(struct axi_dma_chip
*chip
)
721 ret
= clk_prepare_enable(chip
->cfgr_clk
);
725 ret
= clk_prepare_enable(chip
->core_clk
);
729 axi_dma_enable(chip
);
730 axi_dma_irq_enable(chip
);
735 static int __maybe_unused
axi_dma_runtime_suspend(struct device
*dev
)
737 struct axi_dma_chip
*chip
= dev_get_drvdata(dev
);
739 return axi_dma_suspend(chip
);
742 static int __maybe_unused
axi_dma_runtime_resume(struct device
*dev
)
744 struct axi_dma_chip
*chip
= dev_get_drvdata(dev
);
746 return axi_dma_resume(chip
);
749 static int parse_device_properties(struct axi_dma_chip
*chip
)
751 struct device
*dev
= chip
->dev
;
752 u32 tmp
, carr
[DMAC_MAX_CHANNELS
];
755 ret
= device_property_read_u32(dev
, "dma-channels", &tmp
);
758 if (tmp
== 0 || tmp
> DMAC_MAX_CHANNELS
)
761 chip
->dw
->hdata
->nr_channels
= tmp
;
763 ret
= device_property_read_u32(dev
, "snps,dma-masters", &tmp
);
766 if (tmp
== 0 || tmp
> DMAC_MAX_MASTERS
)
769 chip
->dw
->hdata
->nr_masters
= tmp
;
771 ret
= device_property_read_u32(dev
, "snps,data-width", &tmp
);
774 if (tmp
> DWAXIDMAC_TRANS_WIDTH_MAX
)
777 chip
->dw
->hdata
->m_data_width
= tmp
;
779 ret
= device_property_read_u32_array(dev
, "snps,block-size", carr
,
780 chip
->dw
->hdata
->nr_channels
);
783 for (tmp
= 0; tmp
< chip
->dw
->hdata
->nr_channels
; tmp
++) {
784 if (carr
[tmp
] == 0 || carr
[tmp
] > DMAC_MAX_BLK_SIZE
)
787 chip
->dw
->hdata
->block_size
[tmp
] = carr
[tmp
];
790 ret
= device_property_read_u32_array(dev
, "snps,priority", carr
,
791 chip
->dw
->hdata
->nr_channels
);
794 /* Priority value must be programmed within [0:nr_channels-1] range */
795 for (tmp
= 0; tmp
< chip
->dw
->hdata
->nr_channels
; tmp
++) {
796 if (carr
[tmp
] >= chip
->dw
->hdata
->nr_channels
)
799 chip
->dw
->hdata
->priority
[tmp
] = carr
[tmp
];
802 /* axi-max-burst-len is optional property */
803 ret
= device_property_read_u32(dev
, "snps,axi-max-burst-len", &tmp
);
805 if (tmp
> DWAXIDMAC_ARWLEN_MAX
+ 1)
807 if (tmp
< DWAXIDMAC_ARWLEN_MIN
+ 1)
810 chip
->dw
->hdata
->restrict_axi_burst_len
= true;
811 chip
->dw
->hdata
->axi_rw_burst_len
= tmp
- 1;
817 static int dw_probe(struct platform_device
*pdev
)
819 struct axi_dma_chip
*chip
;
820 struct resource
*mem
;
821 struct dw_axi_dma
*dw
;
822 struct dw_axi_dma_hcfg
*hdata
;
826 chip
= devm_kzalloc(&pdev
->dev
, sizeof(*chip
), GFP_KERNEL
);
830 dw
= devm_kzalloc(&pdev
->dev
, sizeof(*dw
), GFP_KERNEL
);
834 hdata
= devm_kzalloc(&pdev
->dev
, sizeof(*hdata
), GFP_KERNEL
);
839 chip
->dev
= &pdev
->dev
;
840 chip
->dw
->hdata
= hdata
;
842 chip
->irq
= platform_get_irq(pdev
, 0);
846 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
847 chip
->regs
= devm_ioremap_resource(chip
->dev
, mem
);
848 if (IS_ERR(chip
->regs
))
849 return PTR_ERR(chip
->regs
);
851 chip
->core_clk
= devm_clk_get(chip
->dev
, "core-clk");
852 if (IS_ERR(chip
->core_clk
))
853 return PTR_ERR(chip
->core_clk
);
855 chip
->cfgr_clk
= devm_clk_get(chip
->dev
, "cfgr-clk");
856 if (IS_ERR(chip
->cfgr_clk
))
857 return PTR_ERR(chip
->cfgr_clk
);
859 ret
= parse_device_properties(chip
);
863 dw
->chan
= devm_kcalloc(chip
->dev
, hdata
->nr_channels
,
864 sizeof(*dw
->chan
), GFP_KERNEL
);
868 ret
= devm_request_irq(chip
->dev
, chip
->irq
, dw_axi_dma_interrupt
,
869 IRQF_SHARED
, KBUILD_MODNAME
, chip
);
873 /* Lli address must be aligned to a 64-byte boundary */
874 dw
->desc_pool
= dmam_pool_create(KBUILD_MODNAME
, chip
->dev
,
875 sizeof(struct axi_dma_desc
), 64, 0);
876 if (!dw
->desc_pool
) {
877 dev_err(chip
->dev
, "No memory for descriptors dma pool\n");
881 INIT_LIST_HEAD(&dw
->dma
.channels
);
882 for (i
= 0; i
< hdata
->nr_channels
; i
++) {
883 struct axi_dma_chan
*chan
= &dw
->chan
[i
];
887 chan
->chan_regs
= chip
->regs
+ COMMON_REG_LEN
+ i
* CHAN_REG_LEN
;
888 atomic_set(&chan
->descs_allocated
, 0);
890 chan
->vc
.desc_free
= vchan_desc_put
;
891 vchan_init(&chan
->vc
, &dw
->dma
);
894 /* Set capabilities */
895 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
897 /* DMA capabilities */
898 dw
->dma
.chancnt
= hdata
->nr_channels
;
899 dw
->dma
.src_addr_widths
= AXI_DMA_BUSWIDTHS
;
900 dw
->dma
.dst_addr_widths
= AXI_DMA_BUSWIDTHS
;
901 dw
->dma
.directions
= BIT(DMA_MEM_TO_MEM
);
902 dw
->dma
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
904 dw
->dma
.dev
= chip
->dev
;
905 dw
->dma
.device_tx_status
= dma_chan_tx_status
;
906 dw
->dma
.device_issue_pending
= dma_chan_issue_pending
;
907 dw
->dma
.device_terminate_all
= dma_chan_terminate_all
;
908 dw
->dma
.device_pause
= dma_chan_pause
;
909 dw
->dma
.device_resume
= dma_chan_resume
;
911 dw
->dma
.device_alloc_chan_resources
= dma_chan_alloc_chan_resources
;
912 dw
->dma
.device_free_chan_resources
= dma_chan_free_chan_resources
;
914 dw
->dma
.device_prep_dma_memcpy
= dma_chan_prep_dma_memcpy
;
916 platform_set_drvdata(pdev
, chip
);
918 pm_runtime_enable(chip
->dev
);
921 * We can't just call pm_runtime_get here instead of
922 * pm_runtime_get_noresume + axi_dma_resume because we need
923 * driver to work also without Runtime PM.
925 pm_runtime_get_noresume(chip
->dev
);
926 ret
= axi_dma_resume(chip
);
930 axi_dma_hw_init(chip
);
932 pm_runtime_put(chip
->dev
);
934 ret
= dmaenginem_async_device_register(&dw
->dma
);
938 dev_info(chip
->dev
, "DesignWare AXI DMA Controller, %d channels\n",
939 dw
->hdata
->nr_channels
);
944 pm_runtime_disable(chip
->dev
);
949 static int dw_remove(struct platform_device
*pdev
)
951 struct axi_dma_chip
*chip
= platform_get_drvdata(pdev
);
952 struct dw_axi_dma
*dw
= chip
->dw
;
953 struct axi_dma_chan
*chan
, *_chan
;
956 /* Enable clk before accessing to registers */
957 clk_prepare_enable(chip
->cfgr_clk
);
958 clk_prepare_enable(chip
->core_clk
);
959 axi_dma_irq_disable(chip
);
960 for (i
= 0; i
< dw
->hdata
->nr_channels
; i
++) {
961 axi_chan_disable(&chip
->dw
->chan
[i
]);
962 axi_chan_irq_disable(&chip
->dw
->chan
[i
], DWAXIDMAC_IRQ_ALL
);
964 axi_dma_disable(chip
);
966 pm_runtime_disable(chip
->dev
);
967 axi_dma_suspend(chip
);
969 devm_free_irq(chip
->dev
, chip
->irq
, chip
);
971 list_for_each_entry_safe(chan
, _chan
, &dw
->dma
.channels
,
972 vc
.chan
.device_node
) {
973 list_del(&chan
->vc
.chan
.device_node
);
974 tasklet_kill(&chan
->vc
.task
);
980 static const struct dev_pm_ops dw_axi_dma_pm_ops
= {
981 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend
, axi_dma_runtime_resume
, NULL
)
984 static const struct of_device_id dw_dma_of_id_table
[] = {
985 { .compatible
= "snps,axi-dma-1.01a" },
988 MODULE_DEVICE_TABLE(of
, dw_dma_of_id_table
);
990 static struct platform_driver dw_driver
= {
994 .name
= KBUILD_MODNAME
,
995 .of_match_table
= dw_dma_of_id_table
,
996 .pm
= &dw_axi_dma_pm_ops
,
999 module_platform_driver(dw_driver
);
1001 MODULE_LICENSE("GPL v2");
1002 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1003 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");