1 // SPDX-License-Identifier: GPL-2.0
3 * Lightning Mountain centralized DMA controller driver
5 * Copyright (c) 2016 - 2020 Intel Corporation.
8 #include <linux/bitfield.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/of_dma.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/reset.h>
22 #include "../dmaengine.h"
23 #include "../virt-dma.h"
25 #define DRIVER_NAME "lgm-dma"
28 #define DMA_ID_REV GENMASK(7, 0)
29 #define DMA_ID_PNR GENMASK(19, 16)
30 #define DMA_ID_CHNR GENMASK(26, 20)
31 #define DMA_ID_DW_128B BIT(27)
32 #define DMA_ID_AW_36B BIT(28)
33 #define DMA_VER32 0x32
34 #define DMA_VER31 0x31
35 #define DMA_VER22 0x0A
37 #define DMA_CTRL 0x0010
38 #define DMA_CTRL_RST BIT(0)
39 #define DMA_CTRL_DSRAM_PATH BIT(1)
40 #define DMA_CTRL_DBURST_WR BIT(3)
41 #define DMA_CTRL_VLD_DF_ACK BIT(4)
42 #define DMA_CTRL_CH_FL BIT(6)
43 #define DMA_CTRL_DS_FOD BIT(7)
44 #define DMA_CTRL_DRB BIT(8)
45 #define DMA_CTRL_ENBE BIT(9)
46 #define DMA_CTRL_DESC_TMOUT_CNT_V31 GENMASK(27, 16)
47 #define DMA_CTRL_DESC_TMOUT_EN_V31 BIT(30)
48 #define DMA_CTRL_PKTARB BIT(31)
50 #define DMA_CPOLL 0x0014
51 #define DMA_CPOLL_CNT GENMASK(15, 4)
52 #define DMA_CPOLL_EN BIT(31)
55 #define DMA_CS_MASK GENMASK(5, 0)
57 #define DMA_CCTRL 0x001C
58 #define DMA_CCTRL_ON BIT(0)
59 #define DMA_CCTRL_RST BIT(1)
60 #define DMA_CCTRL_CH_POLL_EN BIT(2)
61 #define DMA_CCTRL_CH_ABC BIT(3) /* Adaptive Burst Chop */
62 #define DMA_CDBA_MSB GENMASK(7, 4)
63 #define DMA_CCTRL_DIR_TX BIT(8)
64 #define DMA_CCTRL_CLASS GENMASK(11, 9)
65 #define DMA_CCTRL_CLASSH GENMASK(19, 18)
66 #define DMA_CCTRL_WR_NP_EN BIT(21)
67 #define DMA_CCTRL_PDEN BIT(23)
68 #define DMA_MAX_CLASS (SZ_32 - 1)
70 #define DMA_CDBA 0x0020
71 #define DMA_CDLEN 0x0024
72 #define DMA_CIS 0x0028
73 #define DMA_CIE 0x002C
74 #define DMA_CI_EOP BIT(1)
75 #define DMA_CI_DUR BIT(2)
76 #define DMA_CI_DESCPT BIT(3)
77 #define DMA_CI_CHOFF BIT(4)
78 #define DMA_CI_RDERR BIT(5)
80 (DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
83 #define DMA_PCTRL 0x0044
84 #define DMA_PCTRL_RXBL16 BIT(0)
85 #define DMA_PCTRL_TXBL16 BIT(1)
86 #define DMA_PCTRL_RXBL GENMASK(3, 2)
87 #define DMA_PCTRL_RXBL_8 3
88 #define DMA_PCTRL_TXBL GENMASK(5, 4)
89 #define DMA_PCTRL_TXBL_8 3
90 #define DMA_PCTRL_PDEN BIT(6)
91 #define DMA_PCTRL_RXBL32 BIT(7)
92 #define DMA_PCTRL_RXENDI GENMASK(9, 8)
93 #define DMA_PCTRL_TXENDI GENMASK(11, 10)
94 #define DMA_PCTRL_TXBL32 BIT(15)
95 #define DMA_PCTRL_MEM_FLUSH BIT(16)
97 #define DMA_IRNEN1 0x00E8
98 #define DMA_IRNCR1 0x00EC
99 #define DMA_IRNEN 0x00F4
100 #define DMA_IRNCR 0x00F8
101 #define DMA_C_DP_TICK 0x100
102 #define DMA_C_DP_TICK_TIKNARB GENMASK(15, 0)
103 #define DMA_C_DP_TICK_TIKARB GENMASK(31, 16)
105 #define DMA_C_HDRM 0x110
107 * If header mode is set in DMA descriptor,
108 * If bit 30 is disabled, HDR_LEN must be configured according to channel
110 * If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
111 * be configured. It will enable check sum for switch
112 * If header mode is not set in DMA descriptor,
113 * This register setting doesn't matter
115 #define DMA_C_HDRM_HDR_SUM BIT(30)
117 #define DMA_C_BOFF 0x120
118 #define DMA_C_BOFF_BOF_LEN GENMASK(7, 0)
119 #define DMA_C_BOFF_EN BIT(31)
121 #define DMA_ORRC 0x190
122 #define DMA_ORRC_ORRCNT GENMASK(8, 4)
123 #define DMA_ORRC_EN BIT(31)
125 #define DMA_C_ENDIAN 0x200
126 #define DMA_C_END_DATAENDI GENMASK(1, 0)
127 #define DMA_C_END_DE_EN BIT(7)
128 #define DMA_C_END_DESENDI GENMASK(9, 8)
129 #define DMA_C_END_DES_EN BIT(16)
131 /* DMA controller capability */
132 #define DMA_ADDR_36BIT BIT(0)
133 #define DMA_DATA_128BIT BIT(1)
134 #define DMA_CHAN_FLOW_CTL BIT(2)
135 #define DMA_DESC_FOD BIT(3)
136 #define DMA_DESC_IN_SRAM BIT(4)
137 #define DMA_EN_BYTE_EN BIT(5)
138 #define DMA_DBURST_WR BIT(6)
139 #define DMA_VALID_DESC_FETCH_ACK BIT(7)
140 #define DMA_DFT_DRB BIT(8)
142 #define DMA_ORRC_MAX_CNT (SZ_32 - 1)
143 #define DMA_DFT_POLL_CNT SZ_4
144 #define DMA_DFT_BURST_V22 SZ_2
145 #define DMA_BURSTL_8DW SZ_8
146 #define DMA_BURSTL_16DW SZ_16
147 #define DMA_BURSTL_32DW SZ_32
148 #define DMA_DFT_BURST DMA_BURSTL_16DW
149 #define DMA_MAX_DESC_NUM (SZ_8K - 1)
150 #define DMA_CHAN_BOFF_MAX (SZ_256 - 1)
151 #define DMA_DFT_ENDIAN 0
153 #define DMA_DFT_DESC_TCNT 50
154 #define DMA_HDR_LEN_MAX (SZ_16K - 1)
157 #define DMA_TX_CH BIT(0)
158 #define DMA_RX_CH BIT(1)
159 #define DEVICE_ALLOC_DESC BIT(2)
160 #define CHAN_IN_USE BIT(3)
161 #define DMA_HW_DESC BIT(4)
163 /* Descriptor fields */
164 #define DESC_DATA_LEN GENMASK(15, 0)
165 #define DESC_BYTE_OFF GENMASK(25, 23)
166 #define DESC_EOP BIT(28)
167 #define DESC_SOP BIT(29)
168 #define DESC_C BIT(30)
169 #define DESC_OWN BIT(31)
171 #define DMA_CHAN_RST 1
172 #define DMA_MAX_SIZE (BIT(16) - 1)
173 #define MAX_LOWER_CHANS 32
174 #define MASK_LOWER_CHANS GENMASK(4, 0)
176 #define HIGH_4_BITS GENMASK(3, 0)
177 #define DMA_DFT_DESC_NUM 1
178 #define DMA_PKT_DROP_DIS 0
180 enum ldma_chan_on_off
{
195 struct virt_dma_chan vchan
;
196 struct ldma_port
*port
; /* back pointer */
197 char name
[8]; /* Channel name */
198 int nr
; /* Channel id in hardware */
199 u32 flags
; /* central way or channel based way */
200 enum ldma_chan_on_off onoff
;
201 dma_addr_t desc_phys
;
202 void *desc_base
; /* Virtual address */
203 u32 desc_cnt
; /* Number of descriptors */
216 struct dma_pool
*desc_pool
; /* Descriptors pool */
218 struct dw2_desc_sw
*ds
;
219 struct work_struct work
;
220 struct dma_slave_config config
;
224 struct ldma_dev
*ldev
; /* back pointer */
233 /* Instance specific data */
234 struct ldma_inst_data
{
237 bool desc_fod
; /* Fetch On Demand */
238 bool valid_desc_fetch_ack
;
239 u32 orrc
; /* Outstanding read count */
247 struct reset_control
*rst
;
248 struct clk
*core_clk
;
249 struct dma_device dma_dev
;
252 struct ldma_port
*ports
;
253 struct ldma_chan
*chans
; /* channel list on this DMA or port */
254 spinlock_t dev_lock
; /* Controller register exclusive */
260 const struct ldma_inst_data
*inst
;
261 struct workqueue_struct
*wq
;
267 } __packed
__aligned(8);
270 struct virt_dma_desc vdesc
;
271 struct ldma_chan
*chan
;
272 dma_addr_t desc_phys
;
275 struct dw2_desc
*desc_hw
;
279 ldma_update_bits(struct ldma_dev
*d
, u32 mask
, u32 val
, u32 ofs
)
281 u32 old_val
, new_val
;
283 old_val
= readl(d
->base
+ ofs
);
284 new_val
= (old_val
& ~mask
) | (val
& mask
);
286 if (new_val
!= old_val
)
287 writel(new_val
, d
->base
+ ofs
);
290 static inline struct ldma_chan
*to_ldma_chan(struct dma_chan
*chan
)
292 return container_of(chan
, struct ldma_chan
, vchan
.chan
);
295 static inline struct ldma_dev
*to_ldma_dev(struct dma_device
*dma_dev
)
297 return container_of(dma_dev
, struct ldma_dev
, dma_dev
);
300 static inline struct dw2_desc_sw
*to_lgm_dma_desc(struct virt_dma_desc
*vdesc
)
302 return container_of(vdesc
, struct dw2_desc_sw
, vdesc
);
305 static inline bool ldma_chan_tx(struct ldma_chan
*c
)
307 return !!(c
->flags
& DMA_TX_CH
);
310 static inline bool ldma_chan_is_hw_desc(struct ldma_chan
*c
)
312 return !!(c
->flags
& DMA_HW_DESC
);
315 static void ldma_dev_reset(struct ldma_dev
*d
)
320 spin_lock_irqsave(&d
->dev_lock
, flags
);
321 ldma_update_bits(d
, DMA_CTRL_RST
, DMA_CTRL_RST
, DMA_CTRL
);
322 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
325 static void ldma_dev_pkt_arb_cfg(struct ldma_dev
*d
, bool enable
)
328 u32 mask
= DMA_CTRL_PKTARB
;
329 u32 val
= enable
? DMA_CTRL_PKTARB
: 0;
331 spin_lock_irqsave(&d
->dev_lock
, flags
);
332 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
333 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
336 static void ldma_dev_sram_desc_cfg(struct ldma_dev
*d
, bool enable
)
339 u32 mask
= DMA_CTRL_DSRAM_PATH
;
340 u32 val
= enable
? DMA_CTRL_DSRAM_PATH
: 0;
342 spin_lock_irqsave(&d
->dev_lock
, flags
);
343 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
344 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
347 static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev
*d
, bool enable
)
352 if (d
->inst
->type
!= DMA_TYPE_TX
)
355 mask
= DMA_CTRL_CH_FL
;
356 val
= enable
? DMA_CTRL_CH_FL
: 0;
358 spin_lock_irqsave(&d
->dev_lock
, flags
);
359 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
360 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
363 static void ldma_dev_global_polling_enable(struct ldma_dev
*d
)
366 u32 mask
= DMA_CPOLL_EN
| DMA_CPOLL_CNT
;
367 u32 val
= DMA_CPOLL_EN
;
369 val
|= FIELD_PREP(DMA_CPOLL_CNT
, d
->pollcnt
);
371 spin_lock_irqsave(&d
->dev_lock
, flags
);
372 ldma_update_bits(d
, mask
, val
, DMA_CPOLL
);
373 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
376 static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev
*d
, bool enable
)
381 if (d
->inst
->type
== DMA_TYPE_MCPY
)
384 mask
= DMA_CTRL_DS_FOD
;
385 val
= enable
? DMA_CTRL_DS_FOD
: 0;
387 spin_lock_irqsave(&d
->dev_lock
, flags
);
388 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
389 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
392 static void ldma_dev_byte_enable_cfg(struct ldma_dev
*d
, bool enable
)
395 u32 mask
= DMA_CTRL_ENBE
;
396 u32 val
= enable
? DMA_CTRL_ENBE
: 0;
398 spin_lock_irqsave(&d
->dev_lock
, flags
);
399 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
400 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
403 static void ldma_dev_orrc_cfg(struct ldma_dev
*d
)
409 if (d
->inst
->type
== DMA_TYPE_RX
)
412 mask
= DMA_ORRC_EN
| DMA_ORRC_ORRCNT
;
413 if (d
->inst
->orrc
> 0 && d
->inst
->orrc
<= DMA_ORRC_MAX_CNT
)
414 val
= DMA_ORRC_EN
| FIELD_PREP(DMA_ORRC_ORRCNT
, d
->inst
->orrc
);
416 spin_lock_irqsave(&d
->dev_lock
, flags
);
417 ldma_update_bits(d
, mask
, val
, DMA_ORRC
);
418 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
421 static void ldma_dev_df_tout_cfg(struct ldma_dev
*d
, bool enable
, int tcnt
)
423 u32 mask
= DMA_CTRL_DESC_TMOUT_CNT_V31
;
428 val
= DMA_CTRL_DESC_TMOUT_EN_V31
| FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31
, tcnt
);
432 spin_lock_irqsave(&d
->dev_lock
, flags
);
433 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
434 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
437 static void ldma_dev_dburst_wr_cfg(struct ldma_dev
*d
, bool enable
)
442 if (d
->inst
->type
!= DMA_TYPE_RX
&& d
->inst
->type
!= DMA_TYPE_MCPY
)
445 mask
= DMA_CTRL_DBURST_WR
;
446 val
= enable
? DMA_CTRL_DBURST_WR
: 0;
448 spin_lock_irqsave(&d
->dev_lock
, flags
);
449 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
450 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
453 static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev
*d
, bool enable
)
458 if (d
->inst
->type
!= DMA_TYPE_TX
)
461 mask
= DMA_CTRL_VLD_DF_ACK
;
462 val
= enable
? DMA_CTRL_VLD_DF_ACK
: 0;
464 spin_lock_irqsave(&d
->dev_lock
, flags
);
465 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
466 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
469 static void ldma_dev_drb_cfg(struct ldma_dev
*d
, int enable
)
472 u32 mask
= DMA_CTRL_DRB
;
473 u32 val
= enable
? DMA_CTRL_DRB
: 0;
475 spin_lock_irqsave(&d
->dev_lock
, flags
);
476 ldma_update_bits(d
, mask
, val
, DMA_CTRL
);
477 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
480 static int ldma_dev_cfg(struct ldma_dev
*d
)
484 ldma_dev_pkt_arb_cfg(d
, true);
485 ldma_dev_global_polling_enable(d
);
487 enable
= !!(d
->flags
& DMA_DFT_DRB
);
488 ldma_dev_drb_cfg(d
, enable
);
490 enable
= !!(d
->flags
& DMA_EN_BYTE_EN
);
491 ldma_dev_byte_enable_cfg(d
, enable
);
493 enable
= !!(d
->flags
& DMA_CHAN_FLOW_CTL
);
494 ldma_dev_chan_flow_ctl_cfg(d
, enable
);
496 enable
= !!(d
->flags
& DMA_DESC_FOD
);
497 ldma_dev_desc_fetch_on_demand_cfg(d
, enable
);
499 enable
= !!(d
->flags
& DMA_DESC_IN_SRAM
);
500 ldma_dev_sram_desc_cfg(d
, enable
);
502 enable
= !!(d
->flags
& DMA_DBURST_WR
);
503 ldma_dev_dburst_wr_cfg(d
, enable
);
505 enable
= !!(d
->flags
& DMA_VALID_DESC_FETCH_ACK
);
506 ldma_dev_vld_fetch_ack_cfg(d
, enable
);
508 if (d
->ver
> DMA_VER22
) {
509 ldma_dev_orrc_cfg(d
);
510 ldma_dev_df_tout_cfg(d
, true, DMA_DFT_DESC_TCNT
);
513 dev_dbg(d
->dev
, "%s Controller 0x%08x configuration done\n",
514 d
->inst
->name
, readl(d
->base
+ DMA_CTRL
));
519 static int ldma_chan_cctrl_cfg(struct ldma_chan
*c
, u32 val
)
521 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
522 u32 class_low
, class_high
;
526 spin_lock_irqsave(&d
->dev_lock
, flags
);
527 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
528 reg
= readl(d
->base
+ DMA_CCTRL
);
529 /* Read from hardware */
530 if (reg
& DMA_CCTRL_DIR_TX
)
531 c
->flags
|= DMA_TX_CH
;
533 c
->flags
|= DMA_RX_CH
;
535 /* Keep the class value unchanged */
536 class_low
= FIELD_GET(DMA_CCTRL_CLASS
, reg
);
537 class_high
= FIELD_GET(DMA_CCTRL_CLASSH
, reg
);
538 val
&= ~DMA_CCTRL_CLASS
;
539 val
|= FIELD_PREP(DMA_CCTRL_CLASS
, class_low
);
540 val
&= ~DMA_CCTRL_CLASSH
;
541 val
|= FIELD_PREP(DMA_CCTRL_CLASSH
, class_high
);
542 writel(val
, d
->base
+ DMA_CCTRL
);
543 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
548 static void ldma_chan_irq_init(struct ldma_chan
*c
)
550 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
555 if (c
->nr
< MAX_LOWER_CHANS
) {
563 cn_bit
= BIT(c
->nr
& MASK_LOWER_CHANS
);
564 spin_lock_irqsave(&d
->dev_lock
, flags
);
565 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
567 /* Clear all interrupts and disabled it */
568 writel(0, d
->base
+ DMA_CIE
);
569 writel(DMA_CI_ALL
, d
->base
+ DMA_CIS
);
571 ldma_update_bits(d
, cn_bit
, 0, enofs
);
572 writel(cn_bit
, d
->base
+ crofs
);
573 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
576 static void ldma_chan_set_class(struct ldma_chan
*c
, u32 val
)
578 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
581 if (d
->inst
->type
== DMA_TYPE_MCPY
|| val
> DMA_MAX_CLASS
)
585 class_val
= FIELD_PREP(DMA_CCTRL_CLASS
, val
& 0x7);
587 class_val
|= FIELD_PREP(DMA_CCTRL_CLASSH
, (val
>> 3) & 0x3);
589 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
590 ldma_update_bits(d
, DMA_CCTRL_CLASS
| DMA_CCTRL_CLASSH
, class_val
,
594 static int ldma_chan_on(struct ldma_chan
*c
)
596 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
599 /* If descriptors not configured, not allow to turn on channel */
600 if (WARN_ON(!c
->desc_init
))
603 spin_lock_irqsave(&d
->dev_lock
, flags
);
604 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
605 ldma_update_bits(d
, DMA_CCTRL_ON
, DMA_CCTRL_ON
, DMA_CCTRL
);
606 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
608 c
->onoff
= DMA_CH_ON
;
613 static int ldma_chan_off(struct ldma_chan
*c
)
615 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
620 spin_lock_irqsave(&d
->dev_lock
, flags
);
621 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
622 ldma_update_bits(d
, DMA_CCTRL_ON
, 0, DMA_CCTRL
);
623 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
625 ret
= readl_poll_timeout_atomic(d
->base
+ DMA_CCTRL
, val
,
626 !(val
& DMA_CCTRL_ON
), 0, 10000);
630 c
->onoff
= DMA_CH_OFF
;
635 static void ldma_chan_desc_hw_cfg(struct ldma_chan
*c
, dma_addr_t desc_base
,
638 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
641 spin_lock_irqsave(&d
->dev_lock
, flags
);
642 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
643 writel(lower_32_bits(desc_base
), d
->base
+ DMA_CDBA
);
645 /* Higher 4 bits of 36 bit addressing */
646 if (IS_ENABLED(CONFIG_64BIT
)) {
647 u32 hi
= upper_32_bits(desc_base
) & HIGH_4_BITS
;
649 ldma_update_bits(d
, DMA_CDBA_MSB
,
650 FIELD_PREP(DMA_CDBA_MSB
, hi
), DMA_CCTRL
);
652 writel(desc_num
, d
->base
+ DMA_CDLEN
);
653 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
658 static struct dma_async_tx_descriptor
*
659 ldma_chan_desc_cfg(struct dma_chan
*chan
, dma_addr_t desc_base
, int desc_num
)
661 struct ldma_chan
*c
= to_ldma_chan(chan
);
662 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
663 struct dma_async_tx_descriptor
*tx
;
664 struct dw2_desc_sw
*ds
;
667 dev_err(d
->dev
, "Channel %d must allocate descriptor first\n",
672 if (desc_num
> DMA_MAX_DESC_NUM
) {
673 dev_err(d
->dev
, "Channel %d descriptor number out of range %d\n",
678 ldma_chan_desc_hw_cfg(c
, desc_base
, desc_num
);
680 c
->flags
|= DMA_HW_DESC
;
681 c
->desc_cnt
= desc_num
;
682 c
->desc_phys
= desc_base
;
684 ds
= kzalloc(sizeof(*ds
), GFP_NOWAIT
);
689 dma_async_tx_descriptor_init(tx
, chan
);
694 static int ldma_chan_reset(struct ldma_chan
*c
)
696 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
701 ret
= ldma_chan_off(c
);
705 spin_lock_irqsave(&d
->dev_lock
, flags
);
706 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
707 ldma_update_bits(d
, DMA_CCTRL_RST
, DMA_CCTRL_RST
, DMA_CCTRL
);
708 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
710 ret
= readl_poll_timeout_atomic(d
->base
+ DMA_CCTRL
, val
,
711 !(val
& DMA_CCTRL_RST
), 0, 10000);
716 c
->desc_init
= false;
721 static void ldma_chan_byte_offset_cfg(struct ldma_chan
*c
, u32 boff_len
)
723 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
724 u32 mask
= DMA_C_BOFF_EN
| DMA_C_BOFF_BOF_LEN
;
727 if (boff_len
> 0 && boff_len
<= DMA_CHAN_BOFF_MAX
)
728 val
= FIELD_PREP(DMA_C_BOFF_BOF_LEN
, boff_len
) | DMA_C_BOFF_EN
;
732 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
733 ldma_update_bits(d
, mask
, val
, DMA_C_BOFF
);
736 static void ldma_chan_data_endian_cfg(struct ldma_chan
*c
, bool enable
,
739 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
740 u32 mask
= DMA_C_END_DE_EN
| DMA_C_END_DATAENDI
;
744 val
= DMA_C_END_DE_EN
| FIELD_PREP(DMA_C_END_DATAENDI
, endian_type
);
748 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
749 ldma_update_bits(d
, mask
, val
, DMA_C_ENDIAN
);
752 static void ldma_chan_desc_endian_cfg(struct ldma_chan
*c
, bool enable
,
755 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
756 u32 mask
= DMA_C_END_DES_EN
| DMA_C_END_DESENDI
;
760 val
= DMA_C_END_DES_EN
| FIELD_PREP(DMA_C_END_DESENDI
, endian_type
);
764 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
765 ldma_update_bits(d
, mask
, val
, DMA_C_ENDIAN
);
768 static void ldma_chan_hdr_mode_cfg(struct ldma_chan
*c
, u32 hdr_len
, bool csum
)
770 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
773 /* NB, csum disabled, hdr length must be provided */
774 if (!csum
&& (!hdr_len
|| hdr_len
> DMA_HDR_LEN_MAX
))
777 mask
= DMA_C_HDRM_HDR_SUM
;
778 val
= DMA_C_HDRM_HDR_SUM
;
780 if (!csum
&& hdr_len
)
783 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
784 ldma_update_bits(d
, mask
, val
, DMA_C_HDRM
);
787 static void ldma_chan_rxwr_np_cfg(struct ldma_chan
*c
, bool enable
)
789 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
792 /* Only valid for RX channel */
796 mask
= DMA_CCTRL_WR_NP_EN
;
797 val
= enable
? DMA_CCTRL_WR_NP_EN
: 0;
799 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
800 ldma_update_bits(d
, mask
, val
, DMA_CCTRL
);
803 static void ldma_chan_abc_cfg(struct ldma_chan
*c
, bool enable
)
805 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
808 if (d
->ver
< DMA_VER32
|| ldma_chan_tx(c
))
811 mask
= DMA_CCTRL_CH_ABC
;
812 val
= enable
? DMA_CCTRL_CH_ABC
: 0;
814 ldma_update_bits(d
, DMA_CS_MASK
, c
->nr
, DMA_CS
);
815 ldma_update_bits(d
, mask
, val
, DMA_CCTRL
);
818 static int ldma_port_cfg(struct ldma_port
*p
)
825 reg
= FIELD_PREP(DMA_PCTRL_TXENDI
, p
->txendi
);
826 reg
|= FIELD_PREP(DMA_PCTRL_RXENDI
, p
->rxendi
);
828 if (d
->ver
== DMA_VER22
) {
829 reg
|= FIELD_PREP(DMA_PCTRL_TXBL
, p
->txbl
);
830 reg
|= FIELD_PREP(DMA_PCTRL_RXBL
, p
->rxbl
);
832 reg
|= FIELD_PREP(DMA_PCTRL_PDEN
, p
->pkt_drop
);
834 if (p
->txbl
== DMA_BURSTL_32DW
)
835 reg
|= DMA_PCTRL_TXBL32
;
836 else if (p
->txbl
== DMA_BURSTL_16DW
)
837 reg
|= DMA_PCTRL_TXBL16
;
839 reg
|= FIELD_PREP(DMA_PCTRL_TXBL
, DMA_PCTRL_TXBL_8
);
841 if (p
->rxbl
== DMA_BURSTL_32DW
)
842 reg
|= DMA_PCTRL_RXBL32
;
843 else if (p
->rxbl
== DMA_BURSTL_16DW
)
844 reg
|= DMA_PCTRL_RXBL16
;
846 reg
|= FIELD_PREP(DMA_PCTRL_RXBL
, DMA_PCTRL_RXBL_8
);
849 spin_lock_irqsave(&d
->dev_lock
, flags
);
850 writel(p
->portid
, d
->base
+ DMA_PS
);
851 writel(reg
, d
->base
+ DMA_PCTRL
);
852 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
854 reg
= readl(d
->base
+ DMA_PCTRL
); /* read back */
855 dev_dbg(d
->dev
, "Port Control 0x%08x configuration done\n", reg
);
860 static int ldma_chan_cfg(struct ldma_chan
*c
)
862 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
866 reg
= c
->pden
? DMA_CCTRL_PDEN
: 0;
867 reg
|= c
->onoff
? DMA_CCTRL_ON
: 0;
868 reg
|= c
->rst
? DMA_CCTRL_RST
: 0;
870 ldma_chan_cctrl_cfg(c
, reg
);
871 ldma_chan_irq_init(c
);
873 if (d
->ver
<= DMA_VER22
)
876 spin_lock_irqsave(&d
->dev_lock
, flags
);
877 ldma_chan_set_class(c
, c
->nr
);
878 ldma_chan_byte_offset_cfg(c
, c
->boff_len
);
879 ldma_chan_data_endian_cfg(c
, c
->data_endian_en
, c
->data_endian
);
880 ldma_chan_desc_endian_cfg(c
, c
->desc_endian_en
, c
->desc_endian
);
881 ldma_chan_hdr_mode_cfg(c
, c
->hdrm_len
, c
->hdrm_csum
);
882 ldma_chan_rxwr_np_cfg(c
, c
->desc_rx_np
);
883 ldma_chan_abc_cfg(c
, c
->abc_en
);
884 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
886 if (ldma_chan_is_hw_desc(c
))
887 ldma_chan_desc_hw_cfg(c
, c
->desc_phys
, c
->desc_cnt
);
892 static void ldma_dev_init(struct ldma_dev
*d
)
894 unsigned long ch_mask
= (unsigned long)d
->channels_mask
;
900 spin_lock_init(&d
->dev_lock
);
904 /* DMA port initialization */
905 for (i
= 0; i
< d
->port_nrs
; i
++) {
910 /* DMA channel initialization */
911 for_each_set_bit(j
, &ch_mask
, d
->chan_nrs
) {
917 static int ldma_parse_dt(struct ldma_dev
*d
)
919 struct fwnode_handle
*fwnode
= dev_fwnode(d
->dev
);
923 if (fwnode_property_read_bool(fwnode
, "intel,dma-byte-en"))
924 d
->flags
|= DMA_EN_BYTE_EN
;
926 if (fwnode_property_read_bool(fwnode
, "intel,dma-dburst-wr"))
927 d
->flags
|= DMA_DBURST_WR
;
929 if (fwnode_property_read_bool(fwnode
, "intel,dma-drb"))
930 d
->flags
|= DMA_DFT_DRB
;
932 if (fwnode_property_read_u32(fwnode
, "intel,dma-poll-cnt",
934 d
->pollcnt
= DMA_DFT_POLL_CNT
;
936 if (d
->inst
->chan_fc
)
937 d
->flags
|= DMA_CHAN_FLOW_CTL
;
939 if (d
->inst
->desc_fod
)
940 d
->flags
|= DMA_DESC_FOD
;
942 if (d
->inst
->desc_in_sram
)
943 d
->flags
|= DMA_DESC_IN_SRAM
;
945 if (d
->inst
->valid_desc_fetch_ack
)
946 d
->flags
|= DMA_VALID_DESC_FETCH_ACK
;
948 if (d
->ver
> DMA_VER22
) {
952 for (i
= 0; i
< d
->port_nrs
; i
++) {
954 p
->rxendi
= DMA_DFT_ENDIAN
;
955 p
->txendi
= DMA_DFT_ENDIAN
;
956 p
->rxbl
= DMA_DFT_BURST
;
957 p
->txbl
= DMA_DFT_BURST
;
958 p
->pkt_drop
= DMA_PKT_DROP_DIS
;
965 static void dma_free_desc_resource(struct virt_dma_desc
*vdesc
)
967 struct dw2_desc_sw
*ds
= to_lgm_dma_desc(vdesc
);
968 struct ldma_chan
*c
= ds
->chan
;
970 dma_pool_free(c
->desc_pool
, ds
->desc_hw
, ds
->desc_phys
);
974 static struct dw2_desc_sw
*
975 dma_alloc_desc_resource(int num
, struct ldma_chan
*c
)
977 struct device
*dev
= c
->vchan
.chan
.device
->dev
;
978 struct dw2_desc_sw
*ds
;
980 if (num
> c
->desc_num
) {
981 dev_err(dev
, "sg num %d exceed max %d\n", num
, c
->desc_num
);
985 ds
= kzalloc(sizeof(*ds
), GFP_NOWAIT
);
990 ds
->desc_hw
= dma_pool_zalloc(c
->desc_pool
, GFP_ATOMIC
,
993 dev_dbg(dev
, "out of memory for link descriptor\n");
1002 static void ldma_chan_irq_en(struct ldma_chan
*c
)
1004 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
1005 unsigned long flags
;
1007 spin_lock_irqsave(&d
->dev_lock
, flags
);
1008 writel(c
->nr
, d
->base
+ DMA_CS
);
1009 writel(DMA_CI_EOP
, d
->base
+ DMA_CIE
);
1010 writel(BIT(c
->nr
), d
->base
+ DMA_IRNEN
);
1011 spin_unlock_irqrestore(&d
->dev_lock
, flags
);
1014 static void ldma_issue_pending(struct dma_chan
*chan
)
1016 struct ldma_chan
*c
= to_ldma_chan(chan
);
1017 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
1018 unsigned long flags
;
1020 if (d
->ver
== DMA_VER22
) {
1021 spin_lock_irqsave(&c
->vchan
.lock
, flags
);
1022 if (vchan_issue_pending(&c
->vchan
)) {
1023 struct virt_dma_desc
*vdesc
;
1025 /* Get the next descriptor */
1026 vdesc
= vchan_next_desc(&c
->vchan
);
1029 spin_unlock_irqrestore(&c
->vchan
.lock
, flags
);
1032 list_del(&vdesc
->node
);
1033 c
->ds
= to_lgm_dma_desc(vdesc
);
1034 ldma_chan_desc_hw_cfg(c
, c
->ds
->desc_phys
, c
->ds
->desc_cnt
);
1035 ldma_chan_irq_en(c
);
1037 spin_unlock_irqrestore(&c
->vchan
.lock
, flags
);
1042 static void ldma_synchronize(struct dma_chan
*chan
)
1044 struct ldma_chan
*c
= to_ldma_chan(chan
);
1047 * clear any pending work if any. In that
1048 * case the resource needs to be free here.
1050 cancel_work_sync(&c
->work
);
1051 vchan_synchronize(&c
->vchan
);
1053 dma_free_desc_resource(&c
->ds
->vdesc
);
1056 static int ldma_terminate_all(struct dma_chan
*chan
)
1058 struct ldma_chan
*c
= to_ldma_chan(chan
);
1059 unsigned long flags
;
1062 spin_lock_irqsave(&c
->vchan
.lock
, flags
);
1063 vchan_get_all_descriptors(&c
->vchan
, &head
);
1064 spin_unlock_irqrestore(&c
->vchan
.lock
, flags
);
1065 vchan_dma_desc_free_list(&c
->vchan
, &head
);
1067 return ldma_chan_reset(c
);
1070 static int ldma_resume_chan(struct dma_chan
*chan
)
1072 struct ldma_chan
*c
= to_ldma_chan(chan
);
1079 static int ldma_pause_chan(struct dma_chan
*chan
)
1081 struct ldma_chan
*c
= to_ldma_chan(chan
);
1083 return ldma_chan_off(c
);
1086 static enum dma_status
1087 ldma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
1088 struct dma_tx_state
*txstate
)
1090 struct ldma_chan
*c
= to_ldma_chan(chan
);
1091 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
1092 enum dma_status status
= DMA_COMPLETE
;
1094 if (d
->ver
== DMA_VER22
)
1095 status
= dma_cookie_status(chan
, cookie
, txstate
);
1100 static void dma_chan_irq(int irq
, void *data
)
1102 struct ldma_chan
*c
= data
;
1103 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
1106 /* Disable channel interrupts */
1107 writel(c
->nr
, d
->base
+ DMA_CS
);
1108 stat
= readl(d
->base
+ DMA_CIS
);
1112 writel(readl(d
->base
+ DMA_CIE
) & ~DMA_CI_ALL
, d
->base
+ DMA_CIE
);
1113 writel(stat
, d
->base
+ DMA_CIS
);
1114 queue_work(d
->wq
, &c
->work
);
1117 static irqreturn_t
dma_interrupt(int irq
, void *dev_id
)
1119 struct ldma_dev
*d
= dev_id
;
1120 struct ldma_chan
*c
;
1121 unsigned long irncr
;
1124 irncr
= readl(d
->base
+ DMA_IRNCR
);
1126 dev_err(d
->dev
, "dummy interrupt\n");
1130 for_each_set_bit(cid
, &irncr
, d
->chan_nrs
) {
1132 writel(readl(d
->base
+ DMA_IRNEN
) & ~BIT(cid
), d
->base
+ DMA_IRNEN
);
1134 writel(readl(d
->base
+ DMA_IRNCR
) | BIT(cid
), d
->base
+ DMA_IRNCR
);
1137 dma_chan_irq(irq
, c
);
1143 static void prep_slave_burst_len(struct ldma_chan
*c
)
1145 struct ldma_port
*p
= c
->port
;
1146 struct dma_slave_config
*cfg
= &c
->config
;
1148 if (cfg
->dst_maxburst
)
1149 cfg
->src_maxburst
= cfg
->dst_maxburst
;
1151 /* TX and RX has the same burst length */
1152 p
->txbl
= ilog2(cfg
->src_maxburst
);
1156 static struct dma_async_tx_descriptor
*
1157 ldma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1158 unsigned int sglen
, enum dma_transfer_direction dir
,
1159 unsigned long flags
, void *context
)
1161 struct ldma_chan
*c
= to_ldma_chan(chan
);
1162 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
1163 size_t len
, avail
, total
= 0;
1164 struct dw2_desc
*hw_ds
;
1165 struct dw2_desc_sw
*ds
;
1166 struct scatterlist
*sg
;
1173 if (d
->ver
> DMA_VER22
)
1174 return ldma_chan_desc_cfg(chan
, sgl
->dma_address
, sglen
);
1176 for_each_sg(sgl
, sg
, sglen
, i
) {
1177 avail
= sg_dma_len(sg
);
1178 if (avail
> DMA_MAX_SIZE
)
1179 num
+= DIV_ROUND_UP(avail
, DMA_MAX_SIZE
) - 1;
1182 ds
= dma_alloc_desc_resource(num
, c
);
1189 /* sop and eop has to be handled nicely */
1190 for_each_sg(sgl
, sg
, sglen
, i
) {
1191 addr
= sg_dma_address(sg
);
1192 avail
= sg_dma_len(sg
);
1196 len
= min_t(size_t, avail
, DMA_MAX_SIZE
);
1198 hw_ds
= &ds
->desc_hw
[num
];
1201 hw_ds
->field
&= ~DESC_SOP
;
1202 hw_ds
->field
|= FIELD_PREP(DESC_SOP
, 1);
1204 hw_ds
->field
&= ~DESC_EOP
;
1205 hw_ds
->field
|= FIELD_PREP(DESC_EOP
, 1);
1209 hw_ds
->field
&= ~DESC_SOP
;
1210 hw_ds
->field
|= FIELD_PREP(DESC_SOP
, 1);
1212 hw_ds
->field
&= ~DESC_EOP
;
1213 hw_ds
->field
|= FIELD_PREP(DESC_EOP
, 0);
1214 } else if (num
== (sglen
- 1)) {
1215 hw_ds
->field
&= ~DESC_SOP
;
1216 hw_ds
->field
|= FIELD_PREP(DESC_SOP
, 0);
1217 hw_ds
->field
&= ~DESC_EOP
;
1218 hw_ds
->field
|= FIELD_PREP(DESC_EOP
, 1);
1220 hw_ds
->field
&= ~DESC_SOP
;
1221 hw_ds
->field
|= FIELD_PREP(DESC_SOP
, 0);
1223 hw_ds
->field
&= ~DESC_EOP
;
1224 hw_ds
->field
|= FIELD_PREP(DESC_EOP
, 0);
1228 /* Only 32 bit address supported */
1229 hw_ds
->addr
= (u32
)addr
;
1231 hw_ds
->field
&= ~DESC_DATA_LEN
;
1232 hw_ds
->field
|= FIELD_PREP(DESC_DATA_LEN
, len
);
1234 hw_ds
->field
&= ~DESC_C
;
1235 hw_ds
->field
|= FIELD_PREP(DESC_C
, 0);
1237 hw_ds
->field
&= ~DESC_BYTE_OFF
;
1238 hw_ds
->field
|= FIELD_PREP(DESC_BYTE_OFF
, addr
& 0x3);
1240 /* Ensure data ready before ownership change */
1242 hw_ds
->field
&= ~DESC_OWN
;
1243 hw_ds
->field
|= FIELD_PREP(DESC_OWN
, DMA_OWN
);
1245 /* Ensure ownership changed before moving forward */
1254 prep_slave_burst_len(c
);
1256 return vchan_tx_prep(&c
->vchan
, &ds
->vdesc
, DMA_CTRL_ACK
);
1260 ldma_slave_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
1262 struct ldma_chan
*c
= to_ldma_chan(chan
);
1264 memcpy(&c
->config
, cfg
, sizeof(c
->config
));
1269 static int ldma_alloc_chan_resources(struct dma_chan
*chan
)
1271 struct ldma_chan
*c
= to_ldma_chan(chan
);
1272 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
1273 struct device
*dev
= c
->vchan
.chan
.device
->dev
;
1276 if (d
->ver
> DMA_VER22
) {
1277 c
->flags
|= CHAN_IN_USE
;
1284 desc_sz
= c
->desc_num
* sizeof(struct dw2_desc
);
1285 c
->desc_pool
= dma_pool_create(c
->name
, dev
, desc_sz
,
1286 __alignof__(struct dw2_desc
), 0);
1288 if (!c
->desc_pool
) {
1289 dev_err(dev
, "unable to allocate descriptor pool\n");
1296 static void ldma_free_chan_resources(struct dma_chan
*chan
)
1298 struct ldma_chan
*c
= to_ldma_chan(chan
);
1299 struct ldma_dev
*d
= to_ldma_dev(c
->vchan
.chan
.device
);
1301 if (d
->ver
== DMA_VER22
) {
1302 dma_pool_destroy(c
->desc_pool
);
1303 c
->desc_pool
= NULL
;
1304 vchan_free_chan_resources(to_virt_chan(chan
));
1307 c
->flags
&= ~CHAN_IN_USE
;
1311 static void dma_work(struct work_struct
*work
)
1313 struct ldma_chan
*c
= container_of(work
, struct ldma_chan
, work
);
1314 struct dma_async_tx_descriptor
*tx
= &c
->ds
->vdesc
.tx
;
1315 struct virt_dma_chan
*vc
= &c
->vchan
;
1316 struct dmaengine_desc_callback cb
;
1317 struct virt_dma_desc
*vd
, *_vd
;
1318 unsigned long flags
;
1321 spin_lock_irqsave(&c
->vchan
.lock
, flags
);
1322 list_splice_tail_init(&vc
->desc_completed
, &head
);
1323 spin_unlock_irqrestore(&c
->vchan
.lock
, flags
);
1324 dmaengine_desc_get_callback(tx
, &cb
);
1325 dma_cookie_complete(tx
);
1326 dmaengine_desc_callback_invoke(&cb
, NULL
);
1328 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
1329 dmaengine_desc_get_callback(tx
, &cb
);
1330 dma_cookie_complete(tx
);
1331 list_del(&vd
->node
);
1332 dmaengine_desc_callback_invoke(&cb
, NULL
);
1334 vchan_vdesc_fini(vd
);
1340 update_burst_len_v22(struct ldma_chan
*c
, struct ldma_port
*p
, u32 burst
)
1342 if (ldma_chan_tx(c
))
1343 p
->txbl
= ilog2(burst
);
1345 p
->rxbl
= ilog2(burst
);
1349 update_burst_len_v3X(struct ldma_chan
*c
, struct ldma_port
*p
, u32 burst
)
1351 if (ldma_chan_tx(c
))
1358 update_client_configs(struct of_dma
*ofdma
, struct of_phandle_args
*spec
)
1360 struct ldma_dev
*d
= ofdma
->of_dma_data
;
1361 u32 chan_id
= spec
->args
[0];
1362 u32 port_id
= spec
->args
[1];
1363 u32 burst
= spec
->args
[2];
1364 struct ldma_port
*p
;
1365 struct ldma_chan
*c
;
1367 if (chan_id
>= d
->chan_nrs
|| port_id
>= d
->port_nrs
)
1370 p
= &d
->ports
[port_id
];
1371 c
= &d
->chans
[chan_id
];
1374 if (d
->ver
== DMA_VER22
)
1375 update_burst_len_v22(c
, p
, burst
);
1377 update_burst_len_v3X(c
, p
, burst
);
1384 static struct dma_chan
*ldma_xlate(struct of_phandle_args
*spec
,
1385 struct of_dma
*ofdma
)
1387 struct ldma_dev
*d
= ofdma
->of_dma_data
;
1388 u32 chan_id
= spec
->args
[0];
1391 if (!spec
->args_count
)
1394 /* if args_count is 1 driver use default settings */
1395 if (spec
->args_count
> 1) {
1396 ret
= update_client_configs(ofdma
, spec
);
1401 return dma_get_slave_channel(&d
->chans
[chan_id
].vchan
.chan
);
1404 static void ldma_dma_init_v22(int i
, struct ldma_dev
*d
)
1406 struct ldma_chan
*c
;
1409 c
->nr
= i
; /* Real channel number */
1410 c
->rst
= DMA_CHAN_RST
;
1411 c
->desc_num
= DMA_DFT_DESC_NUM
;
1412 snprintf(c
->name
, sizeof(c
->name
), "chan%d", c
->nr
);
1413 INIT_WORK(&c
->work
, dma_work
);
1414 c
->vchan
.desc_free
= dma_free_desc_resource
;
1415 vchan_init(&c
->vchan
, &d
->dma_dev
);
1418 static void ldma_dma_init_v3X(int i
, struct ldma_dev
*d
)
1420 struct ldma_chan
*c
;
1423 c
->data_endian
= DMA_DFT_ENDIAN
;
1424 c
->desc_endian
= DMA_DFT_ENDIAN
;
1425 c
->data_endian_en
= false;
1426 c
->desc_endian_en
= false;
1427 c
->desc_rx_np
= false;
1428 c
->flags
|= DEVICE_ALLOC_DESC
;
1429 c
->onoff
= DMA_CH_OFF
;
1430 c
->rst
= DMA_CHAN_RST
;
1432 c
->hdrm_csum
= false;
1435 c
->vchan
.desc_free
= dma_free_desc_resource
;
1436 vchan_init(&c
->vchan
, &d
->dma_dev
);
1439 static int ldma_init_v22(struct ldma_dev
*d
, struct platform_device
*pdev
)
1443 ret
= device_property_read_u32(d
->dev
, "dma-channels", &d
->chan_nrs
);
1445 dev_err(d
->dev
, "unable to read dma-channels property\n");
1449 d
->irq
= platform_get_irq(pdev
, 0);
1453 ret
= devm_request_irq(&pdev
->dev
, d
->irq
, dma_interrupt
, 0,
1458 d
->wq
= alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM
|
1466 static void ldma_clk_disable(void *data
)
1468 struct ldma_dev
*d
= data
;
1470 clk_disable_unprepare(d
->core_clk
);
1471 reset_control_assert(d
->rst
);
1474 static const struct ldma_inst_data dma0
= {
1478 .desc_in_sram
= false,
1479 .valid_desc_fetch_ack
= false,
1482 static const struct ldma_inst_data dma2tx
= {
1484 .type
= DMA_TYPE_TX
,
1488 .desc_in_sram
= true,
1489 .valid_desc_fetch_ack
= true,
1492 static const struct ldma_inst_data dma1rx
= {
1494 .type
= DMA_TYPE_RX
,
1498 .desc_in_sram
= true,
1499 .valid_desc_fetch_ack
= false,
1502 static const struct ldma_inst_data dma1tx
= {
1504 .type
= DMA_TYPE_TX
,
1508 .desc_in_sram
= true,
1509 .valid_desc_fetch_ack
= true,
1512 static const struct ldma_inst_data dma0tx
= {
1514 .type
= DMA_TYPE_TX
,
1518 .desc_in_sram
= true,
1519 .valid_desc_fetch_ack
= true,
1522 static const struct ldma_inst_data dma3
= {
1524 .type
= DMA_TYPE_MCPY
,
1528 .desc_in_sram
= true,
1529 .valid_desc_fetch_ack
= false,
1532 static const struct ldma_inst_data toe_dma30
= {
1533 .name
= "toe_dma30",
1534 .type
= DMA_TYPE_MCPY
,
1538 .desc_in_sram
= true,
1539 .valid_desc_fetch_ack
= true,
1542 static const struct ldma_inst_data toe_dma31
= {
1543 .name
= "toe_dma31",
1544 .type
= DMA_TYPE_MCPY
,
1548 .desc_in_sram
= true,
1549 .valid_desc_fetch_ack
= true,
1552 static const struct of_device_id intel_ldma_match
[] = {
1553 { .compatible
= "intel,lgm-cdma", .data
= &dma0
},
1554 { .compatible
= "intel,lgm-dma2tx", .data
= &dma2tx
},
1555 { .compatible
= "intel,lgm-dma1rx", .data
= &dma1rx
},
1556 { .compatible
= "intel,lgm-dma1tx", .data
= &dma1tx
},
1557 { .compatible
= "intel,lgm-dma0tx", .data
= &dma0tx
},
1558 { .compatible
= "intel,lgm-dma3", .data
= &dma3
},
1559 { .compatible
= "intel,lgm-toe-dma30", .data
= &toe_dma30
},
1560 { .compatible
= "intel,lgm-toe-dma31", .data
= &toe_dma31
},
1564 static int intel_ldma_probe(struct platform_device
*pdev
)
1566 struct device
*dev
= &pdev
->dev
;
1567 struct dma_device
*dma_dev
;
1568 unsigned long ch_mask
;
1569 struct ldma_chan
*c
;
1570 struct ldma_port
*p
;
1572 u32 id
, bitn
= 32, j
;
1575 d
= devm_kzalloc(dev
, sizeof(*d
), GFP_KERNEL
);
1579 /* Link controller to platform device */
1580 d
->dev
= &pdev
->dev
;
1582 d
->inst
= device_get_match_data(dev
);
1584 dev_err(dev
, "No device match found\n");
1588 d
->base
= devm_platform_ioremap_resource(pdev
, 0);
1589 if (IS_ERR(d
->base
))
1590 return PTR_ERR(d
->base
);
1592 /* Power up and reset the dma engine, some DMAs always on?? */
1593 d
->core_clk
= devm_clk_get_optional(dev
, NULL
);
1594 if (IS_ERR(d
->core_clk
))
1595 return PTR_ERR(d
->core_clk
);
1597 d
->rst
= devm_reset_control_get_optional(dev
, NULL
);
1599 return PTR_ERR(d
->rst
);
1601 clk_prepare_enable(d
->core_clk
);
1602 reset_control_deassert(d
->rst
);
1604 ret
= devm_add_action_or_reset(dev
, ldma_clk_disable
, d
);
1606 dev_err(dev
, "Failed to devm_add_action_or_reset, %d\n", ret
);
1610 id
= readl(d
->base
+ DMA_ID
);
1611 d
->chan_nrs
= FIELD_GET(DMA_ID_CHNR
, id
);
1612 d
->port_nrs
= FIELD_GET(DMA_ID_PNR
, id
);
1613 d
->ver
= FIELD_GET(DMA_ID_REV
, id
);
1615 if (id
& DMA_ID_AW_36B
)
1616 d
->flags
|= DMA_ADDR_36BIT
;
1618 if (IS_ENABLED(CONFIG_64BIT
) && (id
& DMA_ID_AW_36B
))
1621 if (id
& DMA_ID_DW_128B
)
1622 d
->flags
|= DMA_DATA_128BIT
;
1624 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(bitn
));
1626 dev_err(dev
, "No usable DMA configuration\n");
1630 if (d
->ver
== DMA_VER22
) {
1631 ret
= ldma_init_v22(d
, pdev
);
1636 ret
= device_property_read_u32(dev
, "dma-channel-mask", &d
->channels_mask
);
1638 d
->channels_mask
= GENMASK(d
->chan_nrs
- 1, 0);
1640 dma_dev
= &d
->dma_dev
;
1642 dma_cap_zero(dma_dev
->cap_mask
);
1643 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
1645 /* Channel initializations */
1646 INIT_LIST_HEAD(&dma_dev
->channels
);
1648 /* Port Initializations */
1649 d
->ports
= devm_kcalloc(dev
, d
->port_nrs
, sizeof(*p
), GFP_KERNEL
);
1653 /* Channels Initializations */
1654 d
->chans
= devm_kcalloc(d
->dev
, d
->chan_nrs
, sizeof(*c
), GFP_KERNEL
);
1658 for (i
= 0; i
< d
->port_nrs
; i
++) {
1664 dma_dev
->dev
= &pdev
->dev
;
1666 ch_mask
= (unsigned long)d
->channels_mask
;
1667 for_each_set_bit(j
, &ch_mask
, d
->chan_nrs
) {
1668 if (d
->ver
== DMA_VER22
)
1669 ldma_dma_init_v22(j
, d
);
1671 ldma_dma_init_v3X(j
, d
);
1674 ret
= ldma_parse_dt(d
);
1678 dma_dev
->device_alloc_chan_resources
= ldma_alloc_chan_resources
;
1679 dma_dev
->device_free_chan_resources
= ldma_free_chan_resources
;
1680 dma_dev
->device_terminate_all
= ldma_terminate_all
;
1681 dma_dev
->device_issue_pending
= ldma_issue_pending
;
1682 dma_dev
->device_tx_status
= ldma_tx_status
;
1683 dma_dev
->device_resume
= ldma_resume_chan
;
1684 dma_dev
->device_pause
= ldma_pause_chan
;
1685 dma_dev
->device_prep_slave_sg
= ldma_prep_slave_sg
;
1687 if (d
->ver
== DMA_VER22
) {
1688 dma_dev
->device_config
= ldma_slave_config
;
1689 dma_dev
->device_synchronize
= ldma_synchronize
;
1690 dma_dev
->src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
1691 dma_dev
->dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
1692 dma_dev
->directions
= BIT(DMA_MEM_TO_DEV
) |
1693 BIT(DMA_DEV_TO_MEM
);
1694 dma_dev
->residue_granularity
=
1695 DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1698 platform_set_drvdata(pdev
, d
);
1702 ret
= dma_async_device_register(dma_dev
);
1704 dev_err(dev
, "Failed to register slave DMA engine device\n");
1708 ret
= of_dma_controller_register(pdev
->dev
.of_node
, ldma_xlate
, d
);
1710 dev_err(dev
, "Failed to register of DMA controller\n");
1711 dma_async_device_unregister(dma_dev
);
1715 dev_info(dev
, "Init done - rev: %x, ports: %d channels: %d\n", d
->ver
,
1716 d
->port_nrs
, d
->chan_nrs
);
1721 static struct platform_driver intel_ldma_driver
= {
1722 .probe
= intel_ldma_probe
,
1724 .name
= DRIVER_NAME
,
1725 .of_match_table
= intel_ldma_match
,
1730 * Perform this driver as device_initcall to make sure initialization happens
1731 * before its DMA clients of some are platform specific and also to provide
1732 * registered DMA channels and DMA capabilities to clients before their
1735 builtin_platform_driver(intel_ldma_driver
);