1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/sys_soc.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
32 #include "../virt-dma.h"
34 #include "k3-psil-priv.h"
36 struct udma_static_tr
{
37 u8 elsize
; /* RPSTR0 */
38 u16 elcnt
; /* RPSTR0 */
39 u16 bstcnt
; /* RPSTR1 */
42 #define K3_UDMA_MAX_RFLOWS 1024
43 #define K3_UDMA_DEFAULT_RING_SIZE 16
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE 0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
51 #define UDMA_RFLOW_DSTTAG_NONE 0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
73 static const char * const mmr_names
[] = {
75 [MMR_BCHANRT
] = "bchanrt",
76 [MMR_RCHANRT
] = "rchanrt",
77 [MMR_TCHANRT
] = "tchanrt",
84 struct k3_ring
*t_ring
; /* Transmit ring */
85 struct k3_ring
*tc_ring
; /* Transmit Completion ring */
86 int tflow_id
; /* applicable only for PKTDMA */
90 #define udma_bchan udma_tchan
94 struct k3_ring
*fd_ring
; /* Free Descriptor ring */
95 struct k3_ring
*r_ring
; /* Receive ring */
104 struct udma_oes_offsets
{
105 /* K3 UDMA Output Event Offset */
108 /* BCDMA Output Event Offsets */
109 u32 bcdma_bchan_data
;
110 u32 bcdma_bchan_ring
;
111 u32 bcdma_tchan_data
;
112 u32 bcdma_tchan_ring
;
113 u32 bcdma_rchan_data
;
114 u32 bcdma_rchan_ring
;
116 /* PKTDMA Output Event Offsets */
117 u32 pktdma_tchan_flow
;
118 u32 pktdma_rchan_flow
;
121 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
122 #define UDMA_FLAG_PDMA_BURST BIT(1)
123 #define UDMA_FLAG_TDTYPE BIT(2)
124 #define UDMA_FLAG_BURST_SIZE BIT(3)
125 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
126 UDMA_FLAG_PDMA_BURST | \
128 UDMA_FLAG_BURST_SIZE)
130 struct udma_match_data
{
131 enum k3_dma_type type
;
133 bool enable_memcpy_support
;
137 struct udma_soc_data
*soc_data
;
140 struct udma_soc_data
{
141 struct udma_oes_offsets oes
;
142 u32 bcdma_trigger_event_offset
;
146 size_t cppi5_desc_size
;
147 void *cppi5_desc_vaddr
;
148 dma_addr_t cppi5_desc_paddr
;
150 /* TR descriptor internal pointers */
152 struct cppi5_tr_resp_t
*tr_resp_base
;
155 struct udma_rx_flush
{
156 struct udma_hwdesc hwdescs
[2];
160 dma_addr_t buffer_paddr
;
169 struct dma_device ddev
;
171 void __iomem
*mmrs
[MMR_LAST
];
172 const struct udma_match_data
*match_data
;
173 const struct udma_soc_data
*soc_data
;
175 struct udma_tpl bchan_tpl
;
176 struct udma_tpl tchan_tpl
;
177 struct udma_tpl rchan_tpl
;
179 size_t desc_align
; /* alignment to use for descriptors */
181 struct udma_tisci_rm tisci_rm
;
183 struct k3_ringacc
*ringacc
;
185 struct work_struct purge_work
;
186 struct list_head desc_to_purge
;
189 struct udma_rx_flush rx_flush
;
197 unsigned long *bchan_map
;
198 unsigned long *tchan_map
;
199 unsigned long *rchan_map
;
200 unsigned long *rflow_gp_map
;
201 unsigned long *rflow_gp_map_allocated
;
202 unsigned long *rflow_in_use
;
203 unsigned long *tflow_map
;
205 struct udma_bchan
*bchans
;
206 struct udma_tchan
*tchans
;
207 struct udma_rchan
*rchans
;
208 struct udma_rflow
*rflows
;
210 struct udma_chan
*channels
;
217 struct virt_dma_desc vd
;
221 enum dma_transfer_direction dir
;
223 struct udma_static_tr static_tr
;
227 unsigned int desc_idx
; /* Only used for cyclic in packet mode */
231 void *metadata
; /* pointer to provided metadata buffer (EPIP, PSdata) */
233 unsigned int hwdesc_count
;
234 struct udma_hwdesc hwdesc
[];
237 enum udma_chan_state
{
238 UDMA_CHAN_IS_IDLE
= 0, /* not active, no teardown is in progress */
239 UDMA_CHAN_IS_ACTIVE
, /* Normal operation */
240 UDMA_CHAN_IS_TERMINATING
, /* channel is being terminated */
243 struct udma_tx_drain
{
244 struct delayed_work work
;
249 struct udma_chan_config
{
250 bool pkt_mode
; /* TR or packet */
251 bool needs_epib
; /* EPIB is needed for the communication or not */
252 u32 psd_size
; /* size of Protocol Specific Data */
253 u32 metadata_size
; /* (needs_epib ? 16:0) + psd_size */
254 u32 hdesc_size
; /* Size of a packet descriptor in packet mode */
255 bool notdpkt
; /* Suppress sending TDC packet */
256 int remote_thread_id
;
261 enum psil_endpoint_type ep_type
;
264 enum udma_tp_level channel_tpl
; /* Channel Throughput Level */
267 unsigned long tx_flags
;
269 /* PKDMA mapped channel */
270 int mapped_channel_id
;
271 /* PKTDMA default tflow or rflow for mapped channel */
274 enum dma_transfer_direction dir
;
278 struct virt_dma_chan vc
;
279 struct dma_slave_config cfg
;
281 struct device
*dma_dev
;
282 struct udma_desc
*desc
;
283 struct udma_desc
*terminated_desc
;
284 struct udma_static_tr static_tr
;
287 struct udma_bchan
*bchan
;
288 struct udma_tchan
*tchan
;
289 struct udma_rchan
*rchan
;
290 struct udma_rflow
*rflow
;
300 enum udma_chan_state state
;
301 struct completion teardown_completed
;
303 struct udma_tx_drain tx_drain
;
305 /* Channel configuration parameters */
306 struct udma_chan_config config
;
307 /* Channel configuration parameters (backup) */
308 struct udma_chan_config backup_config
;
310 /* dmapool for packet mode descriptors */
312 struct dma_pool
*hdesc_pool
;
317 static inline struct udma_dev
*to_udma_dev(struct dma_device
*d
)
319 return container_of(d
, struct udma_dev
, ddev
);
322 static inline struct udma_chan
*to_udma_chan(struct dma_chan
*c
)
324 return container_of(c
, struct udma_chan
, vc
.chan
);
327 static inline struct udma_desc
*to_udma_desc(struct dma_async_tx_descriptor
*t
)
329 return container_of(t
, struct udma_desc
, vd
.tx
);
332 /* Generic register access functions */
333 static inline u32
udma_read(void __iomem
*base
, int reg
)
335 return readl(base
+ reg
);
338 static inline void udma_write(void __iomem
*base
, int reg
, u32 val
)
340 writel(val
, base
+ reg
);
343 static inline void udma_update_bits(void __iomem
*base
, int reg
,
348 orig
= readl(base
+ reg
);
353 writel(tmp
, base
+ reg
);
357 static inline u32
udma_tchanrt_read(struct udma_chan
*uc
, int reg
)
361 return udma_read(uc
->tchan
->reg_rt
, reg
);
364 static inline void udma_tchanrt_write(struct udma_chan
*uc
, int reg
, u32 val
)
368 udma_write(uc
->tchan
->reg_rt
, reg
, val
);
371 static inline void udma_tchanrt_update_bits(struct udma_chan
*uc
, int reg
,
376 udma_update_bits(uc
->tchan
->reg_rt
, reg
, mask
, val
);
380 static inline u32
udma_rchanrt_read(struct udma_chan
*uc
, int reg
)
384 return udma_read(uc
->rchan
->reg_rt
, reg
);
387 static inline void udma_rchanrt_write(struct udma_chan
*uc
, int reg
, u32 val
)
391 udma_write(uc
->rchan
->reg_rt
, reg
, val
);
394 static inline void udma_rchanrt_update_bits(struct udma_chan
*uc
, int reg
,
399 udma_update_bits(uc
->rchan
->reg_rt
, reg
, mask
, val
);
402 static int navss_psil_pair(struct udma_dev
*ud
, u32 src_thread
, u32 dst_thread
)
404 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
406 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
407 return tisci_rm
->tisci_psil_ops
->pair(tisci_rm
->tisci
,
408 tisci_rm
->tisci_navss_dev_id
,
409 src_thread
, dst_thread
);
412 static int navss_psil_unpair(struct udma_dev
*ud
, u32 src_thread
,
415 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
417 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
418 return tisci_rm
->tisci_psil_ops
->unpair(tisci_rm
->tisci
,
419 tisci_rm
->tisci_navss_dev_id
,
420 src_thread
, dst_thread
);
423 static void k3_configure_chan_coherency(struct dma_chan
*chan
, u32 asel
)
425 struct device
*chan_dev
= &chan
->dev
->device
;
428 /* No special handling for the channel */
429 chan
->dev
->chan_dma_dev
= false;
431 chan_dev
->dma_coherent
= false;
432 chan_dev
->dma_parms
= NULL
;
433 } else if (asel
== 14 || asel
== 15) {
434 chan
->dev
->chan_dma_dev
= true;
436 chan_dev
->dma_coherent
= true;
437 dma_coerce_mask_and_coherent(chan_dev
, DMA_BIT_MASK(48));
438 chan_dev
->dma_parms
= chan_dev
->parent
->dma_parms
;
440 dev_warn(chan
->device
->dev
, "Invalid ASEL value: %u\n", asel
);
442 chan_dev
->dma_coherent
= false;
443 chan_dev
->dma_parms
= NULL
;
447 static u8
udma_get_chan_tpl_index(struct udma_tpl
*tpl_map
, int chan_id
)
451 for (i
= 0; i
< tpl_map
->levels
; i
++) {
452 if (chan_id
>= tpl_map
->start_idx
[i
])
459 static void udma_reset_uchan(struct udma_chan
*uc
)
461 memset(&uc
->config
, 0, sizeof(uc
->config
));
462 uc
->config
.remote_thread_id
= -1;
463 uc
->config
.mapped_channel_id
= -1;
464 uc
->config
.default_flow_id
= -1;
465 uc
->state
= UDMA_CHAN_IS_IDLE
;
468 static void udma_dump_chan_stdata(struct udma_chan
*uc
)
470 struct device
*dev
= uc
->ud
->dev
;
474 if (uc
->config
.dir
== DMA_MEM_TO_DEV
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
475 dev_dbg(dev
, "TCHAN State data:\n");
476 for (i
= 0; i
< 32; i
++) {
477 offset
= UDMA_CHAN_RT_STDATA_REG
+ i
* 4;
478 dev_dbg(dev
, "TRT_STDATA[%02d]: 0x%08x\n", i
,
479 udma_tchanrt_read(uc
, offset
));
483 if (uc
->config
.dir
== DMA_DEV_TO_MEM
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
484 dev_dbg(dev
, "RCHAN State data:\n");
485 for (i
= 0; i
< 32; i
++) {
486 offset
= UDMA_CHAN_RT_STDATA_REG
+ i
* 4;
487 dev_dbg(dev
, "RRT_STDATA[%02d]: 0x%08x\n", i
,
488 udma_rchanrt_read(uc
, offset
));
493 static inline dma_addr_t
udma_curr_cppi5_desc_paddr(struct udma_desc
*d
,
496 return d
->hwdesc
[idx
].cppi5_desc_paddr
;
499 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc
*d
, int idx
)
501 return d
->hwdesc
[idx
].cppi5_desc_vaddr
;
504 static struct udma_desc
*udma_udma_desc_from_paddr(struct udma_chan
*uc
,
507 struct udma_desc
*d
= uc
->terminated_desc
;
510 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
513 if (desc_paddr
!= paddr
)
520 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
523 if (desc_paddr
!= paddr
)
531 static void udma_free_hwdesc(struct udma_chan
*uc
, struct udma_desc
*d
)
533 if (uc
->use_dma_pool
) {
536 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
537 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
540 dma_pool_free(uc
->hdesc_pool
,
541 d
->hwdesc
[i
].cppi5_desc_vaddr
,
542 d
->hwdesc
[i
].cppi5_desc_paddr
);
544 d
->hwdesc
[i
].cppi5_desc_vaddr
= NULL
;
546 } else if (d
->hwdesc
[0].cppi5_desc_vaddr
) {
547 dma_free_coherent(uc
->dma_dev
, d
->hwdesc
[0].cppi5_desc_size
,
548 d
->hwdesc
[0].cppi5_desc_vaddr
,
549 d
->hwdesc
[0].cppi5_desc_paddr
);
551 d
->hwdesc
[0].cppi5_desc_vaddr
= NULL
;
555 static void udma_purge_desc_work(struct work_struct
*work
)
557 struct udma_dev
*ud
= container_of(work
, typeof(*ud
), purge_work
);
558 struct virt_dma_desc
*vd
, *_vd
;
562 spin_lock_irqsave(&ud
->lock
, flags
);
563 list_splice_tail_init(&ud
->desc_to_purge
, &head
);
564 spin_unlock_irqrestore(&ud
->lock
, flags
);
566 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
567 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
568 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
570 udma_free_hwdesc(uc
, d
);
575 /* If more to purge, schedule the work again */
576 if (!list_empty(&ud
->desc_to_purge
))
577 schedule_work(&ud
->purge_work
);
580 static void udma_desc_free(struct virt_dma_desc
*vd
)
582 struct udma_dev
*ud
= to_udma_dev(vd
->tx
.chan
->device
);
583 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
584 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
587 if (uc
->terminated_desc
== d
)
588 uc
->terminated_desc
= NULL
;
590 if (uc
->use_dma_pool
) {
591 udma_free_hwdesc(uc
, d
);
596 spin_lock_irqsave(&ud
->lock
, flags
);
597 list_add_tail(&vd
->node
, &ud
->desc_to_purge
);
598 spin_unlock_irqrestore(&ud
->lock
, flags
);
600 schedule_work(&ud
->purge_work
);
603 static bool udma_is_chan_running(struct udma_chan
*uc
)
609 trt_ctl
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_CTL_REG
);
611 rrt_ctl
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_CTL_REG
);
613 if (trt_ctl
& UDMA_CHAN_RT_CTL_EN
|| rrt_ctl
& UDMA_CHAN_RT_CTL_EN
)
619 static bool udma_is_chan_paused(struct udma_chan
*uc
)
623 switch (uc
->config
.dir
) {
625 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
);
626 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
629 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
);
630 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
633 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_CTL_REG
);
634 pause_mask
= UDMA_CHAN_RT_CTL_PAUSE
;
640 if (val
& pause_mask
)
646 static inline dma_addr_t
udma_get_rx_flush_hwdesc_paddr(struct udma_chan
*uc
)
648 return uc
->ud
->rx_flush
.hwdescs
[uc
->config
.pkt_mode
].cppi5_desc_paddr
;
651 static int udma_push_to_ring(struct udma_chan
*uc
, int idx
)
653 struct udma_desc
*d
= uc
->desc
;
654 struct k3_ring
*ring
= NULL
;
657 switch (uc
->config
.dir
) {
659 ring
= uc
->rflow
->fd_ring
;
663 ring
= uc
->tchan
->t_ring
;
669 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
671 paddr
= udma_get_rx_flush_hwdesc_paddr(uc
);
673 paddr
= udma_curr_cppi5_desc_paddr(d
, idx
);
675 wmb(); /* Ensure that writes are not moved over this point */
678 return k3_ringacc_ring_push(ring
, &paddr
);
681 static bool udma_desc_is_rx_flush(struct udma_chan
*uc
, dma_addr_t addr
)
683 if (uc
->config
.dir
!= DMA_DEV_TO_MEM
)
686 if (addr
== udma_get_rx_flush_hwdesc_paddr(uc
))
692 static int udma_pop_from_ring(struct udma_chan
*uc
, dma_addr_t
*addr
)
694 struct k3_ring
*ring
= NULL
;
697 switch (uc
->config
.dir
) {
699 ring
= uc
->rflow
->r_ring
;
703 ring
= uc
->tchan
->tc_ring
;
709 ret
= k3_ringacc_ring_pop(ring
, addr
);
713 rmb(); /* Ensure that reads are not moved before this point */
715 /* Teardown completion */
716 if (cppi5_desc_is_tdcm(*addr
))
719 /* Check for flush descriptor */
720 if (udma_desc_is_rx_flush(uc
, *addr
))
726 static void udma_reset_rings(struct udma_chan
*uc
)
728 struct k3_ring
*ring1
= NULL
;
729 struct k3_ring
*ring2
= NULL
;
731 switch (uc
->config
.dir
) {
734 ring1
= uc
->rflow
->fd_ring
;
735 ring2
= uc
->rflow
->r_ring
;
741 ring1
= uc
->tchan
->t_ring
;
742 ring2
= uc
->tchan
->tc_ring
;
750 k3_ringacc_ring_reset_dma(ring1
,
751 k3_ringacc_ring_get_occ(ring1
));
753 k3_ringacc_ring_reset(ring2
);
755 /* make sure we are not leaking memory by stalled descriptor */
756 if (uc
->terminated_desc
) {
757 udma_desc_free(&uc
->terminated_desc
->vd
);
758 uc
->terminated_desc
= NULL
;
762 static void udma_decrement_byte_counters(struct udma_chan
*uc
, u32 val
)
764 if (uc
->desc
->dir
== DMA_DEV_TO_MEM
) {
765 udma_rchanrt_write(uc
, UDMA_CHAN_RT_BCNT_REG
, val
);
766 udma_rchanrt_write(uc
, UDMA_CHAN_RT_SBCNT_REG
, val
);
767 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
)
768 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
, val
);
770 udma_tchanrt_write(uc
, UDMA_CHAN_RT_BCNT_REG
, val
);
771 udma_tchanrt_write(uc
, UDMA_CHAN_RT_SBCNT_REG
, val
);
772 if (!uc
->bchan
&& uc
->config
.ep_type
!= PSIL_EP_NATIVE
)
773 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
, val
);
777 static void udma_reset_counters(struct udma_chan
*uc
)
782 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
783 udma_tchanrt_write(uc
, UDMA_CHAN_RT_BCNT_REG
, val
);
785 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_SBCNT_REG
);
786 udma_tchanrt_write(uc
, UDMA_CHAN_RT_SBCNT_REG
, val
);
788 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PCNT_REG
);
789 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PCNT_REG
, val
);
792 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
);
793 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
, val
);
798 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
799 udma_rchanrt_write(uc
, UDMA_CHAN_RT_BCNT_REG
, val
);
801 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_SBCNT_REG
);
802 udma_rchanrt_write(uc
, UDMA_CHAN_RT_SBCNT_REG
, val
);
804 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_PCNT_REG
);
805 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PCNT_REG
, val
);
807 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
);
808 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
, val
);
812 static int udma_reset_chan(struct udma_chan
*uc
, bool hard
)
814 switch (uc
->config
.dir
) {
816 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
, 0);
817 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
820 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
821 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
, 0);
824 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
825 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
831 /* Reset all counters */
832 udma_reset_counters(uc
);
834 /* Hard reset: re-initialize the channel to reset */
836 struct udma_chan_config ucc_backup
;
839 memcpy(&ucc_backup
, &uc
->config
, sizeof(uc
->config
));
840 uc
->ud
->ddev
.device_free_chan_resources(&uc
->vc
.chan
);
842 /* restore the channel configuration */
843 memcpy(&uc
->config
, &ucc_backup
, sizeof(uc
->config
));
844 ret
= uc
->ud
->ddev
.device_alloc_chan_resources(&uc
->vc
.chan
);
849 * Setting forced teardown after forced reset helps recovering
852 if (uc
->config
.dir
== DMA_DEV_TO_MEM
)
853 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
854 UDMA_CHAN_RT_CTL_EN
|
855 UDMA_CHAN_RT_CTL_TDOWN
|
856 UDMA_CHAN_RT_CTL_FTDOWN
);
858 uc
->state
= UDMA_CHAN_IS_IDLE
;
863 static void udma_start_desc(struct udma_chan
*uc
)
865 struct udma_chan_config
*ucc
= &uc
->config
;
867 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
&& ucc
->pkt_mode
&&
868 (uc
->cyclic
|| ucc
->dir
== DMA_DEV_TO_MEM
)) {
872 * UDMA only: Push all descriptors to ring for packet mode
874 * PKTDMA supports pre-linked descriptor and cyclic is not
877 for (i
= 0; i
< uc
->desc
->sglen
; i
++)
878 udma_push_to_ring(uc
, i
);
880 udma_push_to_ring(uc
, 0);
884 static bool udma_chan_needs_reconfiguration(struct udma_chan
*uc
)
886 /* Only PDMAs have staticTR */
887 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
)
890 /* Check if the staticTR configuration has changed for TX */
891 if (memcmp(&uc
->static_tr
, &uc
->desc
->static_tr
, sizeof(uc
->static_tr
)))
897 static int udma_start(struct udma_chan
*uc
)
899 struct virt_dma_desc
*vd
= vchan_next_desc(&uc
->vc
);
908 uc
->desc
= to_udma_desc(&vd
->tx
);
910 /* Channel is already running and does not need reconfiguration */
911 if (udma_is_chan_running(uc
) && !udma_chan_needs_reconfiguration(uc
)) {
916 /* Make sure that we clear the teardown bit, if it is set */
917 udma_reset_chan(uc
, false);
919 /* Push descriptors before we start the channel */
922 switch (uc
->desc
->dir
) {
924 /* Config remote TR */
925 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
926 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
927 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
928 const struct udma_match_data
*match_data
=
931 if (uc
->config
.enable_acc32
)
932 val
|= PDMA_STATIC_TR_XY_ACC32
;
933 if (uc
->config
.enable_burst
)
934 val
|= PDMA_STATIC_TR_XY_BURST
;
936 udma_rchanrt_write(uc
,
937 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG
,
940 udma_rchanrt_write(uc
,
941 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG
,
942 PDMA_STATIC_TR_Z(uc
->desc
->static_tr
.bstcnt
,
943 match_data
->statictr_z_mask
));
945 /* save the current staticTR configuration */
946 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
947 sizeof(uc
->static_tr
));
950 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
951 UDMA_CHAN_RT_CTL_EN
);
954 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
955 UDMA_PEER_RT_EN_ENABLE
);
959 /* Config remote TR */
960 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
961 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
962 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
964 if (uc
->config
.enable_acc32
)
965 val
|= PDMA_STATIC_TR_XY_ACC32
;
966 if (uc
->config
.enable_burst
)
967 val
|= PDMA_STATIC_TR_XY_BURST
;
969 udma_tchanrt_write(uc
,
970 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG
,
973 /* save the current staticTR configuration */
974 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
975 sizeof(uc
->static_tr
));
979 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
980 UDMA_PEER_RT_EN_ENABLE
);
982 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
983 UDMA_CHAN_RT_CTL_EN
);
987 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
988 UDMA_CHAN_RT_CTL_EN
);
989 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
990 UDMA_CHAN_RT_CTL_EN
);
997 uc
->state
= UDMA_CHAN_IS_ACTIVE
;
1003 static int udma_stop(struct udma_chan
*uc
)
1005 enum udma_chan_state old_state
= uc
->state
;
1007 uc
->state
= UDMA_CHAN_IS_TERMINATING
;
1008 reinit_completion(&uc
->teardown_completed
);
1010 switch (uc
->config
.dir
) {
1011 case DMA_DEV_TO_MEM
:
1012 if (!uc
->cyclic
&& !uc
->desc
)
1013 udma_push_to_ring(uc
, -1);
1015 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
1016 UDMA_PEER_RT_EN_ENABLE
|
1017 UDMA_PEER_RT_EN_TEARDOWN
);
1019 case DMA_MEM_TO_DEV
:
1020 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
1021 UDMA_PEER_RT_EN_ENABLE
|
1022 UDMA_PEER_RT_EN_FLUSH
);
1023 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
1024 UDMA_CHAN_RT_CTL_EN
|
1025 UDMA_CHAN_RT_CTL_TDOWN
);
1027 case DMA_MEM_TO_MEM
:
1028 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
1029 UDMA_CHAN_RT_CTL_EN
|
1030 UDMA_CHAN_RT_CTL_TDOWN
);
1033 uc
->state
= old_state
;
1034 complete_all(&uc
->teardown_completed
);
1041 static void udma_cyclic_packet_elapsed(struct udma_chan
*uc
)
1043 struct udma_desc
*d
= uc
->desc
;
1044 struct cppi5_host_desc_t
*h_desc
;
1046 h_desc
= d
->hwdesc
[d
->desc_idx
].cppi5_desc_vaddr
;
1047 cppi5_hdesc_reset_to_original(h_desc
);
1048 udma_push_to_ring(uc
, d
->desc_idx
);
1049 d
->desc_idx
= (d
->desc_idx
+ 1) % d
->sglen
;
1052 static inline void udma_fetch_epib(struct udma_chan
*uc
, struct udma_desc
*d
)
1054 struct cppi5_host_desc_t
*h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
1056 memcpy(d
->metadata
, h_desc
->epib
, d
->metadata_size
);
1059 static bool udma_is_desc_really_done(struct udma_chan
*uc
, struct udma_desc
*d
)
1061 u32 peer_bcnt
, bcnt
;
1064 * Only TX towards PDMA is affected.
1065 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1066 * completion calculation, consumer must ensure that there is no stale
1067 * data in DMA fabric in this case.
1069 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
||
1070 uc
->config
.dir
!= DMA_MEM_TO_DEV
|| !(uc
->config
.tx_flags
& DMA_PREP_INTERRUPT
))
1073 peer_bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
);
1074 bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
1076 /* Transfer is incomplete, store current residue and time stamp */
1077 if (peer_bcnt
< bcnt
) {
1078 uc
->tx_drain
.residue
= bcnt
- peer_bcnt
;
1079 uc
->tx_drain
.tstamp
= ktime_get();
1086 static void udma_check_tx_completion(struct work_struct
*work
)
1088 struct udma_chan
*uc
= container_of(work
, typeof(*uc
),
1089 tx_drain
.work
.work
);
1090 bool desc_done
= true;
1093 unsigned long delay
;
1097 /* Get previous residue and time stamp */
1098 residue_diff
= uc
->tx_drain
.residue
;
1099 time_diff
= uc
->tx_drain
.tstamp
;
1101 * Get current residue and time stamp or see if
1102 * transfer is complete
1104 desc_done
= udma_is_desc_really_done(uc
, uc
->desc
);
1109 * Find the time delta and residue delta w.r.t
1112 time_diff
= ktime_sub(uc
->tx_drain
.tstamp
,
1114 residue_diff
-= uc
->tx_drain
.residue
;
1117 * Try to guess when we should check
1118 * next time by calculating rate at
1119 * which data is being drained at the
1122 delay
= (time_diff
/ residue_diff
) *
1123 uc
->tx_drain
.residue
;
1125 /* No progress, check again in 1 second */
1126 schedule_delayed_work(&uc
->tx_drain
.work
, HZ
);
1130 usleep_range(ktime_to_us(delay
),
1131 ktime_to_us(delay
) + 10);
1136 struct udma_desc
*d
= uc
->desc
;
1138 udma_decrement_byte_counters(uc
, d
->residue
);
1140 vchan_cookie_complete(&d
->vd
);
1148 static irqreturn_t
udma_ring_irq_handler(int irq
, void *data
)
1150 struct udma_chan
*uc
= data
;
1151 struct udma_desc
*d
;
1152 dma_addr_t paddr
= 0;
1154 if (udma_pop_from_ring(uc
, &paddr
) || !paddr
)
1157 spin_lock(&uc
->vc
.lock
);
1159 /* Teardown completion message */
1160 if (cppi5_desc_is_tdcm(paddr
)) {
1161 complete_all(&uc
->teardown_completed
);
1163 if (uc
->terminated_desc
) {
1164 udma_desc_free(&uc
->terminated_desc
->vd
);
1165 uc
->terminated_desc
= NULL
;
1174 d
= udma_udma_desc_from_paddr(uc
, paddr
);
1177 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
1179 if (desc_paddr
!= paddr
) {
1180 dev_err(uc
->ud
->dev
, "not matching descriptors!\n");
1184 if (d
== uc
->desc
) {
1185 /* active descriptor */
1187 udma_cyclic_packet_elapsed(uc
);
1188 vchan_cyclic_callback(&d
->vd
);
1190 if (udma_is_desc_really_done(uc
, d
)) {
1191 udma_decrement_byte_counters(uc
, d
->residue
);
1193 vchan_cookie_complete(&d
->vd
);
1195 schedule_delayed_work(&uc
->tx_drain
.work
,
1201 * terminated descriptor, mark the descriptor as
1202 * completed to update the channel's cookie marker
1204 dma_cookie_complete(&d
->vd
.tx
);
1208 spin_unlock(&uc
->vc
.lock
);
1213 static irqreturn_t
udma_udma_irq_handler(int irq
, void *data
)
1215 struct udma_chan
*uc
= data
;
1216 struct udma_desc
*d
;
1218 spin_lock(&uc
->vc
.lock
);
1221 d
->tr_idx
= (d
->tr_idx
+ 1) % d
->sglen
;
1224 vchan_cyclic_callback(&d
->vd
);
1226 /* TODO: figure out the real amount of data */
1227 udma_decrement_byte_counters(uc
, d
->residue
);
1229 vchan_cookie_complete(&d
->vd
);
1233 spin_unlock(&uc
->vc
.lock
);
1239 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1241 * @from: Start the search from this flow id number
1242 * @cnt: Number of consecutive flow ids to allocate
1244 * Allocate range of RX flow ids for future use, those flows can be requested
1245 * only using explicit flow id number. if @from is set to -1 it will try to find
1246 * first free range. if @from is positive value it will force allocation only
1247 * of the specified range of flows.
1249 * Returns -ENOMEM if can't find free range.
1250 * -EEXIST if requested range is busy.
1251 * -EINVAL if wrong input values passed.
1252 * Returns flow id on success.
1254 static int __udma_alloc_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1256 int start
, tmp_from
;
1257 DECLARE_BITMAP(tmp
, K3_UDMA_MAX_RFLOWS
);
1261 tmp_from
= ud
->rchan_cnt
;
1262 /* default flows can't be allocated and accessible only by id */
1263 if (tmp_from
< ud
->rchan_cnt
)
1266 if (tmp_from
+ cnt
> ud
->rflow_cnt
)
1269 bitmap_or(tmp
, ud
->rflow_gp_map
, ud
->rflow_gp_map_allocated
,
1272 start
= bitmap_find_next_zero_area(tmp
,
1275 if (start
>= ud
->rflow_cnt
)
1278 if (from
>= 0 && start
!= from
)
1281 bitmap_set(ud
->rflow_gp_map_allocated
, start
, cnt
);
1285 static int __udma_free_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1287 if (from
< ud
->rchan_cnt
)
1289 if (from
+ cnt
> ud
->rflow_cnt
)
1292 bitmap_clear(ud
->rflow_gp_map_allocated
, from
, cnt
);
1296 static struct udma_rflow
*__udma_get_rflow(struct udma_dev
*ud
, int id
)
1299 * Attempt to request rflow by ID can be made for any rflow
1300 * if not in use with assumption that caller knows what's doing.
1301 * TI-SCI FW will perform additional permission check ant way, it's
1305 if (id
< 0 || id
>= ud
->rflow_cnt
)
1306 return ERR_PTR(-ENOENT
);
1308 if (test_bit(id
, ud
->rflow_in_use
))
1309 return ERR_PTR(-ENOENT
);
1311 if (ud
->rflow_gp_map
) {
1312 /* GP rflow has to be allocated first */
1313 if (!test_bit(id
, ud
->rflow_gp_map
) &&
1314 !test_bit(id
, ud
->rflow_gp_map_allocated
))
1315 return ERR_PTR(-EINVAL
);
1318 dev_dbg(ud
->dev
, "get rflow%d\n", id
);
1319 set_bit(id
, ud
->rflow_in_use
);
1320 return &ud
->rflows
[id
];
1323 static void __udma_put_rflow(struct udma_dev
*ud
, struct udma_rflow
*rflow
)
1325 if (!test_bit(rflow
->id
, ud
->rflow_in_use
)) {
1326 dev_err(ud
->dev
, "attempt to put unused rflow%d\n", rflow
->id
);
1330 dev_dbg(ud
->dev
, "put rflow%d\n", rflow
->id
);
1331 clear_bit(rflow
->id
, ud
->rflow_in_use
);
1334 #define UDMA_RESERVE_RESOURCE(res) \
1335 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1336 enum udma_tp_level tpl, \
1340 if (test_bit(id, ud->res##_map)) { \
1341 dev_err(ud->dev, "res##%d is in use\n", id); \
1342 return ERR_PTR(-ENOENT); \
1347 if (tpl >= ud->res##_tpl.levels) \
1348 tpl = ud->res##_tpl.levels - 1; \
1350 start = ud->res##_tpl.start_idx[tpl]; \
1352 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1354 if (id == ud->res##_cnt) { \
1355 return ERR_PTR(-ENOENT); \
1359 set_bit(id, ud->res##_map); \
1360 return &ud->res##s[id]; \
1363 UDMA_RESERVE_RESOURCE(bchan
);
1364 UDMA_RESERVE_RESOURCE(tchan
);
1365 UDMA_RESERVE_RESOURCE(rchan
);
1367 static int bcdma_get_bchan(struct udma_chan
*uc
)
1369 struct udma_dev
*ud
= uc
->ud
;
1370 enum udma_tp_level tpl
;
1374 dev_dbg(ud
->dev
, "chan%d: already have bchan%d allocated\n",
1375 uc
->id
, uc
->bchan
->id
);
1380 * Use normal channels for peripherals, and highest TPL channel for
1383 if (uc
->config
.tr_trigger_type
)
1386 tpl
= ud
->bchan_tpl
.levels
- 1;
1388 uc
->bchan
= __udma_reserve_bchan(ud
, tpl
, -1);
1389 if (IS_ERR(uc
->bchan
)) {
1390 ret
= PTR_ERR(uc
->bchan
);
1395 uc
->tchan
= uc
->bchan
;
1400 static int udma_get_tchan(struct udma_chan
*uc
)
1402 struct udma_dev
*ud
= uc
->ud
;
1406 dev_dbg(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1407 uc
->id
, uc
->tchan
->id
);
1412 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1413 * For PKTDMA mapped channels it is configured to a channel which must
1414 * be used to service the peripheral.
1416 uc
->tchan
= __udma_reserve_tchan(ud
, uc
->config
.channel_tpl
,
1417 uc
->config
.mapped_channel_id
);
1418 if (IS_ERR(uc
->tchan
)) {
1419 ret
= PTR_ERR(uc
->tchan
);
1424 if (ud
->tflow_cnt
) {
1427 /* Only PKTDMA have support for tx flows */
1428 if (uc
->config
.default_flow_id
>= 0)
1429 tflow_id
= uc
->config
.default_flow_id
;
1431 tflow_id
= uc
->tchan
->id
;
1433 if (test_bit(tflow_id
, ud
->tflow_map
)) {
1434 dev_err(ud
->dev
, "tflow%d is in use\n", tflow_id
);
1435 clear_bit(uc
->tchan
->id
, ud
->tchan_map
);
1440 uc
->tchan
->tflow_id
= tflow_id
;
1441 set_bit(tflow_id
, ud
->tflow_map
);
1443 uc
->tchan
->tflow_id
= -1;
1449 static int udma_get_rchan(struct udma_chan
*uc
)
1451 struct udma_dev
*ud
= uc
->ud
;
1455 dev_dbg(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1456 uc
->id
, uc
->rchan
->id
);
1461 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1462 * For PKTDMA mapped channels it is configured to a channel which must
1463 * be used to service the peripheral.
1465 uc
->rchan
= __udma_reserve_rchan(ud
, uc
->config
.channel_tpl
,
1466 uc
->config
.mapped_channel_id
);
1467 if (IS_ERR(uc
->rchan
)) {
1468 ret
= PTR_ERR(uc
->rchan
);
1476 static int udma_get_chan_pair(struct udma_chan
*uc
)
1478 struct udma_dev
*ud
= uc
->ud
;
1481 if ((uc
->tchan
&& uc
->rchan
) && uc
->tchan
->id
== uc
->rchan
->id
) {
1482 dev_info(ud
->dev
, "chan%d: already have %d pair allocated\n",
1483 uc
->id
, uc
->tchan
->id
);
1488 dev_err(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1489 uc
->id
, uc
->tchan
->id
);
1491 } else if (uc
->rchan
) {
1492 dev_err(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1493 uc
->id
, uc
->rchan
->id
);
1497 /* Can be optimized, but let's have it like this for now */
1498 end
= min(ud
->tchan_cnt
, ud
->rchan_cnt
);
1500 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1501 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1503 chan_id
= ud
->tchan_tpl
.start_idx
[ud
->tchan_tpl
.levels
- 1];
1504 for (; chan_id
< end
; chan_id
++) {
1505 if (!test_bit(chan_id
, ud
->tchan_map
) &&
1506 !test_bit(chan_id
, ud
->rchan_map
))
1513 set_bit(chan_id
, ud
->tchan_map
);
1514 set_bit(chan_id
, ud
->rchan_map
);
1515 uc
->tchan
= &ud
->tchans
[chan_id
];
1516 uc
->rchan
= &ud
->rchans
[chan_id
];
1518 /* UDMA does not use tx flows */
1519 uc
->tchan
->tflow_id
= -1;
1524 static int udma_get_rflow(struct udma_chan
*uc
, int flow_id
)
1526 struct udma_dev
*ud
= uc
->ud
;
1530 dev_err(ud
->dev
, "chan%d: does not have rchan??\n", uc
->id
);
1535 dev_dbg(ud
->dev
, "chan%d: already have rflow%d allocated\n",
1536 uc
->id
, uc
->rflow
->id
);
1540 uc
->rflow
= __udma_get_rflow(ud
, flow_id
);
1541 if (IS_ERR(uc
->rflow
)) {
1542 ret
= PTR_ERR(uc
->rflow
);
1550 static void bcdma_put_bchan(struct udma_chan
*uc
)
1552 struct udma_dev
*ud
= uc
->ud
;
1555 dev_dbg(ud
->dev
, "chan%d: put bchan%d\n", uc
->id
,
1557 clear_bit(uc
->bchan
->id
, ud
->bchan_map
);
1563 static void udma_put_rchan(struct udma_chan
*uc
)
1565 struct udma_dev
*ud
= uc
->ud
;
1568 dev_dbg(ud
->dev
, "chan%d: put rchan%d\n", uc
->id
,
1570 clear_bit(uc
->rchan
->id
, ud
->rchan_map
);
1575 static void udma_put_tchan(struct udma_chan
*uc
)
1577 struct udma_dev
*ud
= uc
->ud
;
1580 dev_dbg(ud
->dev
, "chan%d: put tchan%d\n", uc
->id
,
1582 clear_bit(uc
->tchan
->id
, ud
->tchan_map
);
1584 if (uc
->tchan
->tflow_id
>= 0)
1585 clear_bit(uc
->tchan
->tflow_id
, ud
->tflow_map
);
1591 static void udma_put_rflow(struct udma_chan
*uc
)
1593 struct udma_dev
*ud
= uc
->ud
;
1596 dev_dbg(ud
->dev
, "chan%d: put rflow%d\n", uc
->id
,
1598 __udma_put_rflow(ud
, uc
->rflow
);
1603 static void bcdma_free_bchan_resources(struct udma_chan
*uc
)
1608 k3_ringacc_ring_free(uc
->bchan
->tc_ring
);
1609 k3_ringacc_ring_free(uc
->bchan
->t_ring
);
1610 uc
->bchan
->tc_ring
= NULL
;
1611 uc
->bchan
->t_ring
= NULL
;
1612 k3_configure_chan_coherency(&uc
->vc
.chan
, 0);
1614 bcdma_put_bchan(uc
);
1617 static int bcdma_alloc_bchan_resources(struct udma_chan
*uc
)
1619 struct k3_ring_cfg ring_cfg
;
1620 struct udma_dev
*ud
= uc
->ud
;
1623 ret
= bcdma_get_bchan(uc
);
1627 ret
= k3_ringacc_request_rings_pair(ud
->ringacc
, uc
->bchan
->id
, -1,
1629 &uc
->bchan
->tc_ring
);
1635 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1636 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1637 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1638 ring_cfg
.mode
= K3_RINGACC_RING_MODE_RING
;
1640 k3_configure_chan_coherency(&uc
->vc
.chan
, ud
->asel
);
1641 ring_cfg
.asel
= ud
->asel
;
1642 ring_cfg
.dma_dev
= dmaengine_get_dma_device(&uc
->vc
.chan
);
1644 ret
= k3_ringacc_ring_cfg(uc
->bchan
->t_ring
, &ring_cfg
);
1651 k3_ringacc_ring_free(uc
->bchan
->tc_ring
);
1652 uc
->bchan
->tc_ring
= NULL
;
1653 k3_ringacc_ring_free(uc
->bchan
->t_ring
);
1654 uc
->bchan
->t_ring
= NULL
;
1655 k3_configure_chan_coherency(&uc
->vc
.chan
, 0);
1657 bcdma_put_bchan(uc
);
1662 static void udma_free_tx_resources(struct udma_chan
*uc
)
1667 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1668 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1669 uc
->tchan
->t_ring
= NULL
;
1670 uc
->tchan
->tc_ring
= NULL
;
1675 static int udma_alloc_tx_resources(struct udma_chan
*uc
)
1677 struct k3_ring_cfg ring_cfg
;
1678 struct udma_dev
*ud
= uc
->ud
;
1679 struct udma_tchan
*tchan
;
1682 ret
= udma_get_tchan(uc
);
1687 if (tchan
->tflow_id
>= 0)
1688 ring_idx
= tchan
->tflow_id
;
1690 ring_idx
= ud
->bchan_cnt
+ tchan
->id
;
1692 ret
= k3_ringacc_request_rings_pair(ud
->ringacc
, ring_idx
, -1,
1700 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1701 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1702 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1703 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
1704 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1706 ring_cfg
.mode
= K3_RINGACC_RING_MODE_RING
;
1708 k3_configure_chan_coherency(&uc
->vc
.chan
, uc
->config
.asel
);
1709 ring_cfg
.asel
= uc
->config
.asel
;
1710 ring_cfg
.dma_dev
= dmaengine_get_dma_device(&uc
->vc
.chan
);
1713 ret
= k3_ringacc_ring_cfg(tchan
->t_ring
, &ring_cfg
);
1714 ret
|= k3_ringacc_ring_cfg(tchan
->tc_ring
, &ring_cfg
);
1722 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1723 uc
->tchan
->tc_ring
= NULL
;
1724 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1725 uc
->tchan
->t_ring
= NULL
;
1732 static void udma_free_rx_resources(struct udma_chan
*uc
)
1738 struct udma_rflow
*rflow
= uc
->rflow
;
1740 k3_ringacc_ring_free(rflow
->fd_ring
);
1741 k3_ringacc_ring_free(rflow
->r_ring
);
1742 rflow
->fd_ring
= NULL
;
1743 rflow
->r_ring
= NULL
;
1751 static int udma_alloc_rx_resources(struct udma_chan
*uc
)
1753 struct udma_dev
*ud
= uc
->ud
;
1754 struct k3_ring_cfg ring_cfg
;
1755 struct udma_rflow
*rflow
;
1759 ret
= udma_get_rchan(uc
);
1763 /* For MEM_TO_MEM we don't need rflow or rings */
1764 if (uc
->config
.dir
== DMA_MEM_TO_MEM
)
1767 if (uc
->config
.default_flow_id
>= 0)
1768 ret
= udma_get_rflow(uc
, uc
->config
.default_flow_id
);
1770 ret
= udma_get_rflow(uc
, uc
->rchan
->id
);
1779 fd_ring_id
= ud
->tflow_cnt
+ rflow
->id
;
1781 fd_ring_id
= ud
->bchan_cnt
+ ud
->tchan_cnt
+ ud
->echan_cnt
+
1784 ret
= k3_ringacc_request_rings_pair(ud
->ringacc
, fd_ring_id
, -1,
1785 &rflow
->fd_ring
, &rflow
->r_ring
);
1791 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1793 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1794 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
1795 if (uc
->config
.pkt_mode
)
1796 ring_cfg
.size
= SG_MAX_SEGMENTS
;
1798 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1800 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1802 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1803 ring_cfg
.mode
= K3_RINGACC_RING_MODE_RING
;
1805 k3_configure_chan_coherency(&uc
->vc
.chan
, uc
->config
.asel
);
1806 ring_cfg
.asel
= uc
->config
.asel
;
1807 ring_cfg
.dma_dev
= dmaengine_get_dma_device(&uc
->vc
.chan
);
1810 ret
= k3_ringacc_ring_cfg(rflow
->fd_ring
, &ring_cfg
);
1812 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1813 ret
|= k3_ringacc_ring_cfg(rflow
->r_ring
, &ring_cfg
);
1821 k3_ringacc_ring_free(rflow
->r_ring
);
1822 rflow
->r_ring
= NULL
;
1823 k3_ringacc_ring_free(rflow
->fd_ring
);
1824 rflow
->fd_ring
= NULL
;
1833 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1834 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1837 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1838 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1839 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1841 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1844 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1850 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1851 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1852 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1854 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1860 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1861 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1862 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1863 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1865 static int udma_tisci_m2m_channel_config(struct udma_chan
*uc
)
1867 struct udma_dev
*ud
= uc
->ud
;
1868 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1869 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1870 struct udma_tchan
*tchan
= uc
->tchan
;
1871 struct udma_rchan
*rchan
= uc
->rchan
;
1876 /* Non synchronized - mem to mem type of transfer */
1877 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1878 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1879 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1881 if (ud
->match_data
->flags
& UDMA_FLAG_BURST_SIZE
) {
1882 tpl
= udma_get_chan_tpl_index(&ud
->tchan_tpl
, tchan
->id
);
1884 burst_size
= ud
->match_data
->burst_size
[tpl
];
1887 req_tx
.valid_params
= TISCI_UDMA_TCHAN_VALID_PARAMS
;
1888 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1889 req_tx
.index
= tchan
->id
;
1890 req_tx
.tx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1891 req_tx
.tx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1892 req_tx
.txcq_qnum
= tc_ring
;
1893 req_tx
.tx_atype
= ud
->atype
;
1895 req_tx
.valid_params
|= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID
;
1896 req_tx
.tx_burst_size
= burst_size
;
1899 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1901 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1905 req_rx
.valid_params
= TISCI_UDMA_RCHAN_VALID_PARAMS
;
1906 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1907 req_rx
.index
= rchan
->id
;
1908 req_rx
.rx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1909 req_rx
.rxcq_qnum
= tc_ring
;
1910 req_rx
.rx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1911 req_rx
.rx_atype
= ud
->atype
;
1913 req_rx
.valid_params
|= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID
;
1914 req_rx
.rx_burst_size
= burst_size
;
1917 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1919 dev_err(ud
->dev
, "rchan%d alloc failed %d\n", rchan
->id
, ret
);
1924 static int bcdma_tisci_m2m_channel_config(struct udma_chan
*uc
)
1926 struct udma_dev
*ud
= uc
->ud
;
1927 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1928 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1929 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1930 struct udma_bchan
*bchan
= uc
->bchan
;
1935 if (ud
->match_data
->flags
& UDMA_FLAG_BURST_SIZE
) {
1936 tpl
= udma_get_chan_tpl_index(&ud
->bchan_tpl
, bchan
->id
);
1938 burst_size
= ud
->match_data
->burst_size
[tpl
];
1941 req_tx
.valid_params
= TISCI_BCDMA_BCHAN_VALID_PARAMS
;
1942 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1943 req_tx
.extended_ch_type
= TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN
;
1944 req_tx
.index
= bchan
->id
;
1946 req_tx
.valid_params
|= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID
;
1947 req_tx
.tx_burst_size
= burst_size
;
1950 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1952 dev_err(ud
->dev
, "bchan%d cfg failed %d\n", bchan
->id
, ret
);
1957 static int udma_tisci_tx_channel_config(struct udma_chan
*uc
)
1959 struct udma_dev
*ud
= uc
->ud
;
1960 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1961 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1962 struct udma_tchan
*tchan
= uc
->tchan
;
1963 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1964 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1965 u32 mode
, fetch_size
;
1968 if (uc
->config
.pkt_mode
) {
1969 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1970 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1971 uc
->config
.psd_size
, 0);
1973 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1974 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1977 req_tx
.valid_params
= TISCI_UDMA_TCHAN_VALID_PARAMS
;
1978 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1979 req_tx
.index
= tchan
->id
;
1980 req_tx
.tx_chan_type
= mode
;
1981 req_tx
.tx_supr_tdpkt
= uc
->config
.notdpkt
;
1982 req_tx
.tx_fetch_size
= fetch_size
>> 2;
1983 req_tx
.txcq_qnum
= tc_ring
;
1984 req_tx
.tx_atype
= uc
->config
.atype
;
1985 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
&&
1986 ud
->match_data
->flags
& UDMA_FLAG_TDTYPE
) {
1987 /* wait for peer to complete the teardown for PDMAs */
1988 req_tx
.valid_params
|=
1989 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID
;
1990 req_tx
.tx_tdtype
= 1;
1993 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1995 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
2000 static int bcdma_tisci_tx_channel_config(struct udma_chan
*uc
)
2002 struct udma_dev
*ud
= uc
->ud
;
2003 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
2004 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
2005 struct udma_tchan
*tchan
= uc
->tchan
;
2006 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
2009 req_tx
.valid_params
= TISCI_BCDMA_TCHAN_VALID_PARAMS
;
2010 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
2011 req_tx
.index
= tchan
->id
;
2012 req_tx
.tx_supr_tdpkt
= uc
->config
.notdpkt
;
2013 if (ud
->match_data
->flags
& UDMA_FLAG_TDTYPE
) {
2014 /* wait for peer to complete the teardown for PDMAs */
2015 req_tx
.valid_params
|=
2016 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID
;
2017 req_tx
.tx_tdtype
= 1;
2020 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
2022 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
2027 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2029 static int udma_tisci_rx_channel_config(struct udma_chan
*uc
)
2031 struct udma_dev
*ud
= uc
->ud
;
2032 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
2033 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
2034 struct udma_rchan
*rchan
= uc
->rchan
;
2035 int fd_ring
= k3_ringacc_get_ring_id(uc
->rflow
->fd_ring
);
2036 int rx_ring
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2037 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
2038 struct ti_sci_msg_rm_udmap_flow_cfg flow_req
= { 0 };
2039 u32 mode
, fetch_size
;
2042 if (uc
->config
.pkt_mode
) {
2043 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
2044 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
2045 uc
->config
.psd_size
, 0);
2047 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
2048 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
2051 req_rx
.valid_params
= TISCI_UDMA_RCHAN_VALID_PARAMS
;
2052 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
2053 req_rx
.index
= rchan
->id
;
2054 req_rx
.rx_fetch_size
= fetch_size
>> 2;
2055 req_rx
.rxcq_qnum
= rx_ring
;
2056 req_rx
.rx_chan_type
= mode
;
2057 req_rx
.rx_atype
= uc
->config
.atype
;
2059 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
2061 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", rchan
->id
, ret
);
2065 flow_req
.valid_params
=
2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
|
2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID
|
2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID
|
2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID
|
2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID
|
2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID
|
2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
2076 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
2077 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
2078 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
2080 flow_req
.nav_id
= tisci_rm
->tisci_dev_id
;
2081 flow_req
.flow_index
= rchan
->id
;
2083 if (uc
->config
.needs_epib
)
2084 flow_req
.rx_einfo_present
= 1;
2086 flow_req
.rx_einfo_present
= 0;
2087 if (uc
->config
.psd_size
)
2088 flow_req
.rx_psinfo_present
= 1;
2090 flow_req
.rx_psinfo_present
= 0;
2091 flow_req
.rx_error_handling
= 1;
2092 flow_req
.rx_dest_qnum
= rx_ring
;
2093 flow_req
.rx_src_tag_hi_sel
= UDMA_RFLOW_SRCTAG_NONE
;
2094 flow_req
.rx_src_tag_lo_sel
= UDMA_RFLOW_SRCTAG_SRC_TAG
;
2095 flow_req
.rx_dest_tag_hi_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_HI
;
2096 flow_req
.rx_dest_tag_lo_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_LO
;
2097 flow_req
.rx_fdq0_sz0_qnum
= fd_ring
;
2098 flow_req
.rx_fdq1_qnum
= fd_ring
;
2099 flow_req
.rx_fdq2_qnum
= fd_ring
;
2100 flow_req
.rx_fdq3_qnum
= fd_ring
;
2102 ret
= tisci_ops
->rx_flow_cfg(tisci_rm
->tisci
, &flow_req
);
2105 dev_err(ud
->dev
, "flow%d config failed: %d\n", rchan
->id
, ret
);
2110 static int bcdma_tisci_rx_channel_config(struct udma_chan
*uc
)
2112 struct udma_dev
*ud
= uc
->ud
;
2113 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
2114 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
2115 struct udma_rchan
*rchan
= uc
->rchan
;
2116 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
2119 req_rx
.valid_params
= TISCI_BCDMA_RCHAN_VALID_PARAMS
;
2120 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
2121 req_rx
.index
= rchan
->id
;
2123 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
2125 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", rchan
->id
, ret
);
2130 static int pktdma_tisci_rx_channel_config(struct udma_chan
*uc
)
2132 struct udma_dev
*ud
= uc
->ud
;
2133 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
2134 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
2135 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
2136 struct ti_sci_msg_rm_udmap_flow_cfg flow_req
= { 0 };
2139 req_rx
.valid_params
= TISCI_BCDMA_RCHAN_VALID_PARAMS
;
2140 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
2141 req_rx
.index
= uc
->rchan
->id
;
2143 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
2145 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", uc
->rchan
->id
, ret
);
2149 flow_req
.valid_params
=
2150 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
2151 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
2152 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
;
2154 flow_req
.nav_id
= tisci_rm
->tisci_dev_id
;
2155 flow_req
.flow_index
= uc
->rflow
->id
;
2157 if (uc
->config
.needs_epib
)
2158 flow_req
.rx_einfo_present
= 1;
2160 flow_req
.rx_einfo_present
= 0;
2161 if (uc
->config
.psd_size
)
2162 flow_req
.rx_psinfo_present
= 1;
2164 flow_req
.rx_psinfo_present
= 0;
2165 flow_req
.rx_error_handling
= 1;
2167 ret
= tisci_ops
->rx_flow_cfg(tisci_rm
->tisci
, &flow_req
);
2170 dev_err(ud
->dev
, "flow%d config failed: %d\n", uc
->rflow
->id
,
2176 static int udma_alloc_chan_resources(struct dma_chan
*chan
)
2178 struct udma_chan
*uc
= to_udma_chan(chan
);
2179 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
2180 const struct udma_soc_data
*soc_data
= ud
->soc_data
;
2181 struct k3_ring
*irq_ring
;
2185 uc
->dma_dev
= ud
->dev
;
2187 if (uc
->config
.pkt_mode
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
2188 uc
->use_dma_pool
= true;
2189 /* in case of MEM_TO_MEM we have maximum of two TRs */
2190 if (uc
->config
.dir
== DMA_MEM_TO_MEM
) {
2191 uc
->config
.hdesc_size
= cppi5_trdesc_calc_size(
2192 sizeof(struct cppi5_tr_type15_t
), 2);
2193 uc
->config
.pkt_mode
= false;
2197 if (uc
->use_dma_pool
) {
2198 uc
->hdesc_pool
= dma_pool_create(uc
->name
, ud
->ddev
.dev
,
2199 uc
->config
.hdesc_size
,
2202 if (!uc
->hdesc_pool
) {
2203 dev_err(ud
->ddev
.dev
,
2204 "Descriptor pool allocation failed\n");
2205 uc
->use_dma_pool
= false;
2212 * Make sure that the completion is in a known state:
2213 * No teardown, the channel is idle
2215 reinit_completion(&uc
->teardown_completed
);
2216 complete_all(&uc
->teardown_completed
);
2217 uc
->state
= UDMA_CHAN_IS_IDLE
;
2219 switch (uc
->config
.dir
) {
2220 case DMA_MEM_TO_MEM
:
2221 /* Non synchronized - mem to mem type of transfer */
2222 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-MEM\n", __func__
,
2225 ret
= udma_get_chan_pair(uc
);
2229 ret
= udma_alloc_tx_resources(uc
);
2235 ret
= udma_alloc_rx_resources(uc
);
2237 udma_free_tx_resources(uc
);
2241 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2242 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2243 K3_PSIL_DST_THREAD_ID_OFFSET
;
2245 irq_ring
= uc
->tchan
->tc_ring
;
2246 irq_udma_idx
= uc
->tchan
->id
;
2248 ret
= udma_tisci_m2m_channel_config(uc
);
2250 case DMA_MEM_TO_DEV
:
2251 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2252 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
2255 ret
= udma_alloc_tx_resources(uc
);
2259 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2260 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
2261 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
2263 irq_ring
= uc
->tchan
->tc_ring
;
2264 irq_udma_idx
= uc
->tchan
->id
;
2266 ret
= udma_tisci_tx_channel_config(uc
);
2268 case DMA_DEV_TO_MEM
:
2269 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2270 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
2273 ret
= udma_alloc_rx_resources(uc
);
2277 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
2278 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2279 K3_PSIL_DST_THREAD_ID_OFFSET
;
2281 irq_ring
= uc
->rflow
->r_ring
;
2282 irq_udma_idx
= soc_data
->oes
.udma_rchan
+ uc
->rchan
->id
;
2284 ret
= udma_tisci_rx_channel_config(uc
);
2287 /* Can not happen */
2288 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
2289 __func__
, uc
->id
, uc
->config
.dir
);
2295 /* check if the channel configuration was successful */
2299 if (udma_is_chan_running(uc
)) {
2300 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
2301 udma_reset_chan(uc
, false);
2302 if (udma_is_chan_running(uc
)) {
2303 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
2310 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2312 dev_err(ud
->dev
, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2313 uc
->config
.src_thread
, uc
->config
.dst_thread
);
2317 uc
->psil_paired
= true;
2319 uc
->irq_num_ring
= k3_ringacc_get_ring_irq_num(irq_ring
);
2320 if (uc
->irq_num_ring
<= 0) {
2321 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
2322 k3_ringacc_get_ring_id(irq_ring
));
2327 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
2328 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
2330 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
2334 /* Event from UDMA (TR events) only needed for slave TR mode channels */
2335 if (is_slave_direction(uc
->config
.dir
) && !uc
->config
.pkt_mode
) {
2336 uc
->irq_num_udma
= msi_get_virq(ud
->dev
, irq_udma_idx
);
2337 if (uc
->irq_num_udma
<= 0) {
2338 dev_err(ud
->dev
, "Failed to get udma irq (index: %u)\n",
2340 free_irq(uc
->irq_num_ring
, uc
);
2345 ret
= request_irq(uc
->irq_num_udma
, udma_udma_irq_handler
, 0,
2348 dev_err(ud
->dev
, "chan%d: UDMA irq request failed\n",
2350 free_irq(uc
->irq_num_ring
, uc
);
2354 uc
->irq_num_udma
= 0;
2357 udma_reset_rings(uc
);
2362 uc
->irq_num_ring
= 0;
2363 uc
->irq_num_udma
= 0;
2365 navss_psil_unpair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2366 uc
->psil_paired
= false;
2368 udma_free_tx_resources(uc
);
2369 udma_free_rx_resources(uc
);
2371 udma_reset_uchan(uc
);
2373 if (uc
->use_dma_pool
) {
2374 dma_pool_destroy(uc
->hdesc_pool
);
2375 uc
->use_dma_pool
= false;
2381 static int bcdma_alloc_chan_resources(struct dma_chan
*chan
)
2383 struct udma_chan
*uc
= to_udma_chan(chan
);
2384 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
2385 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
2386 u32 irq_udma_idx
, irq_ring_idx
;
2389 /* Only TR mode is supported */
2390 uc
->config
.pkt_mode
= false;
2393 * Make sure that the completion is in a known state:
2394 * No teardown, the channel is idle
2396 reinit_completion(&uc
->teardown_completed
);
2397 complete_all(&uc
->teardown_completed
);
2398 uc
->state
= UDMA_CHAN_IS_IDLE
;
2400 switch (uc
->config
.dir
) {
2401 case DMA_MEM_TO_MEM
:
2402 /* Non synchronized - mem to mem type of transfer */
2403 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-MEM\n", __func__
,
2406 ret
= bcdma_alloc_bchan_resources(uc
);
2410 irq_ring_idx
= uc
->bchan
->id
+ oes
->bcdma_bchan_ring
;
2411 irq_udma_idx
= uc
->bchan
->id
+ oes
->bcdma_bchan_data
;
2413 ret
= bcdma_tisci_m2m_channel_config(uc
);
2415 case DMA_MEM_TO_DEV
:
2416 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2417 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
2420 ret
= udma_alloc_tx_resources(uc
);
2422 uc
->config
.remote_thread_id
= -1;
2426 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2427 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
2428 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
2430 irq_ring_idx
= uc
->tchan
->id
+ oes
->bcdma_tchan_ring
;
2431 irq_udma_idx
= uc
->tchan
->id
+ oes
->bcdma_tchan_data
;
2433 ret
= bcdma_tisci_tx_channel_config(uc
);
2435 case DMA_DEV_TO_MEM
:
2436 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2437 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
2440 ret
= udma_alloc_rx_resources(uc
);
2442 uc
->config
.remote_thread_id
= -1;
2446 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
2447 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2448 K3_PSIL_DST_THREAD_ID_OFFSET
;
2450 irq_ring_idx
= uc
->rchan
->id
+ oes
->bcdma_rchan_ring
;
2451 irq_udma_idx
= uc
->rchan
->id
+ oes
->bcdma_rchan_data
;
2453 ret
= bcdma_tisci_rx_channel_config(uc
);
2456 /* Can not happen */
2457 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
2458 __func__
, uc
->id
, uc
->config
.dir
);
2462 /* check if the channel configuration was successful */
2466 if (udma_is_chan_running(uc
)) {
2467 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
2468 udma_reset_chan(uc
, false);
2469 if (udma_is_chan_running(uc
)) {
2470 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
2476 uc
->dma_dev
= dmaengine_get_dma_device(chan
);
2477 if (uc
->config
.dir
== DMA_MEM_TO_MEM
&& !uc
->config
.tr_trigger_type
) {
2478 uc
->config
.hdesc_size
= cppi5_trdesc_calc_size(
2479 sizeof(struct cppi5_tr_type15_t
), 2);
2481 uc
->hdesc_pool
= dma_pool_create(uc
->name
, ud
->ddev
.dev
,
2482 uc
->config
.hdesc_size
,
2485 if (!uc
->hdesc_pool
) {
2486 dev_err(ud
->ddev
.dev
,
2487 "Descriptor pool allocation failed\n");
2488 uc
->use_dma_pool
= false;
2493 uc
->use_dma_pool
= true;
2494 } else if (uc
->config
.dir
!= DMA_MEM_TO_MEM
) {
2496 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
,
2497 uc
->config
.dst_thread
);
2500 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2501 uc
->config
.src_thread
, uc
->config
.dst_thread
);
2505 uc
->psil_paired
= true;
2508 uc
->irq_num_ring
= msi_get_virq(ud
->dev
, irq_ring_idx
);
2509 if (uc
->irq_num_ring
<= 0) {
2510 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
2516 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
2517 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
2519 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
2523 /* Event from BCDMA (TR events) only needed for slave channels */
2524 if (is_slave_direction(uc
->config
.dir
)) {
2525 uc
->irq_num_udma
= msi_get_virq(ud
->dev
, irq_udma_idx
);
2526 if (uc
->irq_num_udma
<= 0) {
2527 dev_err(ud
->dev
, "Failed to get bcdma irq (index: %u)\n",
2529 free_irq(uc
->irq_num_ring
, uc
);
2534 ret
= request_irq(uc
->irq_num_udma
, udma_udma_irq_handler
, 0,
2537 dev_err(ud
->dev
, "chan%d: BCDMA irq request failed\n",
2539 free_irq(uc
->irq_num_ring
, uc
);
2543 uc
->irq_num_udma
= 0;
2546 udma_reset_rings(uc
);
2548 INIT_DELAYED_WORK_ONSTACK(&uc
->tx_drain
.work
,
2549 udma_check_tx_completion
);
2553 uc
->irq_num_ring
= 0;
2554 uc
->irq_num_udma
= 0;
2556 if (uc
->psil_paired
)
2557 navss_psil_unpair(ud
, uc
->config
.src_thread
,
2558 uc
->config
.dst_thread
);
2559 uc
->psil_paired
= false;
2561 bcdma_free_bchan_resources(uc
);
2562 udma_free_tx_resources(uc
);
2563 udma_free_rx_resources(uc
);
2565 udma_reset_uchan(uc
);
2567 if (uc
->use_dma_pool
) {
2568 dma_pool_destroy(uc
->hdesc_pool
);
2569 uc
->use_dma_pool
= false;
2575 static int bcdma_router_config(struct dma_chan
*chan
)
2577 struct k3_event_route_data
*router_data
= chan
->route_data
;
2578 struct udma_chan
*uc
= to_udma_chan(chan
);
2584 if (uc
->config
.tr_trigger_type
!= 1 && uc
->config
.tr_trigger_type
!= 2)
2587 trigger_event
= uc
->ud
->soc_data
->bcdma_trigger_event_offset
;
2588 trigger_event
+= (uc
->bchan
->id
* 2) + uc
->config
.tr_trigger_type
- 1;
2590 return router_data
->set_event(router_data
->priv
, trigger_event
);
2593 static int pktdma_alloc_chan_resources(struct dma_chan
*chan
)
2595 struct udma_chan
*uc
= to_udma_chan(chan
);
2596 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
2597 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
2602 * Make sure that the completion is in a known state:
2603 * No teardown, the channel is idle
2605 reinit_completion(&uc
->teardown_completed
);
2606 complete_all(&uc
->teardown_completed
);
2607 uc
->state
= UDMA_CHAN_IS_IDLE
;
2609 switch (uc
->config
.dir
) {
2610 case DMA_MEM_TO_DEV
:
2611 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2612 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
2615 ret
= udma_alloc_tx_resources(uc
);
2617 uc
->config
.remote_thread_id
= -1;
2621 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2622 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
2623 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
2625 irq_ring_idx
= uc
->tchan
->tflow_id
+ oes
->pktdma_tchan_flow
;
2627 ret
= pktdma_tisci_tx_channel_config(uc
);
2629 case DMA_DEV_TO_MEM
:
2630 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2631 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
2634 ret
= udma_alloc_rx_resources(uc
);
2636 uc
->config
.remote_thread_id
= -1;
2640 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
2641 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2642 K3_PSIL_DST_THREAD_ID_OFFSET
;
2644 irq_ring_idx
= uc
->rflow
->id
+ oes
->pktdma_rchan_flow
;
2646 ret
= pktdma_tisci_rx_channel_config(uc
);
2649 /* Can not happen */
2650 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
2651 __func__
, uc
->id
, uc
->config
.dir
);
2655 /* check if the channel configuration was successful */
2659 if (udma_is_chan_running(uc
)) {
2660 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
2661 udma_reset_chan(uc
, false);
2662 if (udma_is_chan_running(uc
)) {
2663 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
2669 uc
->dma_dev
= dmaengine_get_dma_device(chan
);
2670 uc
->hdesc_pool
= dma_pool_create(uc
->name
, uc
->dma_dev
,
2671 uc
->config
.hdesc_size
, ud
->desc_align
,
2673 if (!uc
->hdesc_pool
) {
2674 dev_err(ud
->ddev
.dev
,
2675 "Descriptor pool allocation failed\n");
2676 uc
->use_dma_pool
= false;
2681 uc
->use_dma_pool
= true;
2684 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2686 dev_err(ud
->dev
, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2687 uc
->config
.src_thread
, uc
->config
.dst_thread
);
2691 uc
->psil_paired
= true;
2693 uc
->irq_num_ring
= msi_get_virq(ud
->dev
, irq_ring_idx
);
2694 if (uc
->irq_num_ring
<= 0) {
2695 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
2701 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
2702 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
2704 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
2708 uc
->irq_num_udma
= 0;
2710 udma_reset_rings(uc
);
2712 INIT_DELAYED_WORK_ONSTACK(&uc
->tx_drain
.work
,
2713 udma_check_tx_completion
);
2717 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2718 uc
->id
, uc
->tchan
->id
, uc
->tchan
->tflow_id
,
2719 uc
->config
.remote_thread_id
);
2722 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2723 uc
->id
, uc
->rchan
->id
, uc
->rflow
->id
,
2724 uc
->config
.remote_thread_id
);
2728 uc
->irq_num_ring
= 0;
2730 navss_psil_unpair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2731 uc
->psil_paired
= false;
2733 udma_free_tx_resources(uc
);
2734 udma_free_rx_resources(uc
);
2736 udma_reset_uchan(uc
);
2738 dma_pool_destroy(uc
->hdesc_pool
);
2739 uc
->use_dma_pool
= false;
2744 static int udma_slave_config(struct dma_chan
*chan
,
2745 struct dma_slave_config
*cfg
)
2747 struct udma_chan
*uc
= to_udma_chan(chan
);
2749 memcpy(&uc
->cfg
, cfg
, sizeof(uc
->cfg
));
2754 static struct udma_desc
*udma_alloc_tr_desc(struct udma_chan
*uc
,
2755 size_t tr_size
, int tr_count
,
2756 enum dma_transfer_direction dir
)
2758 struct udma_hwdesc
*hwdesc
;
2759 struct cppi5_desc_hdr_t
*tr_desc
;
2760 struct udma_desc
*d
;
2761 u32 reload_count
= 0;
2771 dev_err(uc
->ud
->dev
, "Unsupported TR size of %zu\n", tr_size
);
2775 /* We have only one descriptor containing multiple TRs */
2776 d
= kzalloc(sizeof(*d
) + sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2780 d
->sglen
= tr_count
;
2782 d
->hwdesc_count
= 1;
2783 hwdesc
= &d
->hwdesc
[0];
2785 /* Allocate memory for DMA ring descriptor */
2786 if (uc
->use_dma_pool
) {
2787 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2788 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2790 &hwdesc
->cppi5_desc_paddr
);
2792 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
,
2794 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
2795 uc
->ud
->desc_align
);
2796 hwdesc
->cppi5_desc_vaddr
= dma_alloc_coherent(uc
->ud
->dev
,
2797 hwdesc
->cppi5_desc_size
,
2798 &hwdesc
->cppi5_desc_paddr
,
2802 if (!hwdesc
->cppi5_desc_vaddr
) {
2807 /* Start of the TR req records */
2808 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
2809 /* Start address of the TR response array */
2810 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
* tr_count
;
2812 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
2815 reload_count
= CPPI5_INFO0_TRDESC_RLDCNT_INFINITE
;
2817 if (dir
== DMA_DEV_TO_MEM
)
2818 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2820 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2822 cppi5_trdesc_init(tr_desc
, tr_count
, tr_size
, 0, reload_count
);
2823 cppi5_desc_set_pktids(tr_desc
, uc
->id
,
2824 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2825 cppi5_desc_set_retpolicy(tr_desc
, 0, ring_id
);
2831 * udma_get_tr_counters - calculate TR counters for a given length
2832 * @len: Length of the trasnfer
2833 * @align_to: Preferred alignment
2834 * @tr0_cnt0: First TR icnt0
2835 * @tr0_cnt1: First TR icnt1
2836 * @tr1_cnt0: Second (if used) TR icnt0
2838 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2839 * For len >= SZ_64K two TRs are used in a simple way:
2840 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2841 * Second TR: the remaining length (tr1_cnt0)
2843 * Returns the number of TRs the length needs (1 or 2)
2844 * -EINVAL if the length can not be supported
2846 static int udma_get_tr_counters(size_t len
, unsigned long align_to
,
2847 u16
*tr0_cnt0
, u16
*tr0_cnt1
, u16
*tr1_cnt0
)
2860 *tr0_cnt0
= SZ_64K
- BIT(align_to
);
2861 if (len
/ *tr0_cnt0
>= SZ_64K
) {
2869 *tr0_cnt1
= len
/ *tr0_cnt0
;
2870 *tr1_cnt0
= len
% *tr0_cnt0
;
2875 static struct udma_desc
*
2876 udma_prep_slave_sg_tr(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2877 unsigned int sglen
, enum dma_transfer_direction dir
,
2878 unsigned long tx_flags
, void *context
)
2880 struct scatterlist
*sgent
;
2881 struct udma_desc
*d
;
2882 struct cppi5_tr_type1_t
*tr_req
= NULL
;
2883 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2890 /* estimate the number of TRs we will need */
2891 for_each_sg(sgl
, sgent
, sglen
, i
) {
2892 if (sg_dma_len(sgent
) < SZ_64K
)
2898 /* Now allocate and setup the descriptor. */
2899 tr_size
= sizeof(struct cppi5_tr_type1_t
);
2900 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, dir
);
2906 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
)
2909 asel
= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
2911 tr_req
= d
->hwdesc
[0].tr_req_base
;
2912 for_each_sg(sgl
, sgent
, sglen
, i
) {
2913 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2915 num_tr
= udma_get_tr_counters(sg_dma_len(sgent
), __ffs(sg_addr
),
2916 &tr0_cnt0
, &tr0_cnt1
, &tr1_cnt0
);
2918 dev_err(uc
->ud
->dev
, "size %u is not supported\n",
2920 udma_free_hwdesc(uc
, d
);
2925 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
, false,
2926 false, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2927 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2930 tr_req
[tr_idx
].addr
= sg_addr
;
2931 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
2932 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
2933 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
2937 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
2939 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2940 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2941 CPPI5_TR_CSF_SUPR_EVT
);
2943 tr_req
[tr_idx
].addr
= sg_addr
+ tr0_cnt1
* tr0_cnt0
;
2944 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
2945 tr_req
[tr_idx
].icnt1
= 1;
2946 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
2950 d
->residue
+= sg_dma_len(sgent
);
2953 cppi5_tr_csf_set(&tr_req
[tr_idx
- 1].flags
,
2954 CPPI5_TR_CSF_SUPR_EVT
| CPPI5_TR_CSF_EOP
);
2959 static struct udma_desc
*
2960 udma_prep_slave_sg_triggered_tr(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2962 enum dma_transfer_direction dir
,
2963 unsigned long tx_flags
, void *context
)
2965 struct scatterlist
*sgent
;
2966 struct cppi5_tr_type15_t
*tr_req
= NULL
;
2967 enum dma_slave_buswidth dev_width
;
2968 u32 csf
= CPPI5_TR_CSF_SUPR_EVT
;
2969 u16 tr_cnt0
, tr_cnt1
;
2970 dma_addr_t dev_addr
;
2971 struct udma_desc
*d
;
2973 size_t tr_size
, sg_len
;
2976 u32 burst
, trigger_size
, port_window
;
2979 if (dir
== DMA_DEV_TO_MEM
) {
2980 dev_addr
= uc
->cfg
.src_addr
;
2981 dev_width
= uc
->cfg
.src_addr_width
;
2982 burst
= uc
->cfg
.src_maxburst
;
2983 port_window
= uc
->cfg
.src_port_window_size
;
2984 } else if (dir
== DMA_MEM_TO_DEV
) {
2985 dev_addr
= uc
->cfg
.dst_addr
;
2986 dev_width
= uc
->cfg
.dst_addr_width
;
2987 burst
= uc
->cfg
.dst_maxburst
;
2988 port_window
= uc
->cfg
.dst_port_window_size
;
2990 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
2998 if (port_window
!= burst
) {
2999 dev_err(uc
->ud
->dev
,
3000 "The burst must be equal to port_window\n");
3004 tr_cnt0
= dev_width
* port_window
;
3007 tr_cnt0
= dev_width
;
3010 trigger_size
= tr_cnt0
* tr_cnt1
;
3012 /* estimate the number of TRs we will need */
3013 for_each_sg(sgl
, sgent
, sglen
, i
) {
3014 sg_len
= sg_dma_len(sgent
);
3016 if (sg_len
% trigger_size
) {
3017 dev_err(uc
->ud
->dev
,
3018 "Not aligned SG entry (%zu for %u)\n", sg_len
,
3023 if (sg_len
/ trigger_size
< SZ_64K
)
3029 /* Now allocate and setup the descriptor. */
3030 tr_size
= sizeof(struct cppi5_tr_type15_t
);
3031 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, dir
);
3037 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
) {
3039 csf
|= CPPI5_TR_CSF_EOL_ICNT0
;
3041 asel
= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
3045 tr_req
= d
->hwdesc
[0].tr_req_base
;
3046 for_each_sg(sgl
, sgent
, sglen
, i
) {
3047 u16 tr0_cnt2
, tr0_cnt3
, tr1_cnt2
;
3048 dma_addr_t sg_addr
= sg_dma_address(sgent
);
3050 sg_len
= sg_dma_len(sgent
);
3051 num_tr
= udma_get_tr_counters(sg_len
/ trigger_size
, 0,
3052 &tr0_cnt2
, &tr0_cnt3
, &tr1_cnt2
);
3054 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
3056 udma_free_hwdesc(uc
, d
);
3061 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE15
, false,
3062 true, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3063 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
, csf
);
3064 cppi5_tr_set_trigger(&tr_req
[tr_idx
].flags
,
3065 uc
->config
.tr_trigger_type
,
3066 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC
, 0, 0);
3069 if (dir
== DMA_DEV_TO_MEM
) {
3070 tr_req
[tr_idx
].addr
= dev_addr
;
3071 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
3072 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
3073 tr_req
[tr_idx
].icnt2
= tr0_cnt2
;
3074 tr_req
[tr_idx
].icnt3
= tr0_cnt3
;
3075 tr_req
[tr_idx
].dim1
= (-1) * tr_cnt0
;
3077 tr_req
[tr_idx
].daddr
= sg_addr
;
3078 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
3079 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
3080 tr_req
[tr_idx
].dicnt2
= tr0_cnt2
;
3081 tr_req
[tr_idx
].dicnt3
= tr0_cnt3
;
3082 tr_req
[tr_idx
].ddim1
= tr_cnt0
;
3083 tr_req
[tr_idx
].ddim2
= trigger_size
;
3084 tr_req
[tr_idx
].ddim3
= trigger_size
* tr0_cnt2
;
3086 tr_req
[tr_idx
].addr
= sg_addr
;
3087 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
3088 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
3089 tr_req
[tr_idx
].icnt2
= tr0_cnt2
;
3090 tr_req
[tr_idx
].icnt3
= tr0_cnt3
;
3091 tr_req
[tr_idx
].dim1
= tr_cnt0
;
3092 tr_req
[tr_idx
].dim2
= trigger_size
;
3093 tr_req
[tr_idx
].dim3
= trigger_size
* tr0_cnt2
;
3095 tr_req
[tr_idx
].daddr
= dev_addr
;
3096 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
3097 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
3098 tr_req
[tr_idx
].dicnt2
= tr0_cnt2
;
3099 tr_req
[tr_idx
].dicnt3
= tr0_cnt3
;
3100 tr_req
[tr_idx
].ddim1
= (-1) * tr_cnt0
;
3106 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE15
,
3108 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3109 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
, csf
);
3110 cppi5_tr_set_trigger(&tr_req
[tr_idx
].flags
,
3111 uc
->config
.tr_trigger_type
,
3112 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC
,
3115 sg_addr
+= trigger_size
* tr0_cnt2
* tr0_cnt3
;
3116 if (dir
== DMA_DEV_TO_MEM
) {
3117 tr_req
[tr_idx
].addr
= dev_addr
;
3118 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
3119 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
3120 tr_req
[tr_idx
].icnt2
= tr1_cnt2
;
3121 tr_req
[tr_idx
].icnt3
= 1;
3122 tr_req
[tr_idx
].dim1
= (-1) * tr_cnt0
;
3124 tr_req
[tr_idx
].daddr
= sg_addr
;
3125 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
3126 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
3127 tr_req
[tr_idx
].dicnt2
= tr1_cnt2
;
3128 tr_req
[tr_idx
].dicnt3
= 1;
3129 tr_req
[tr_idx
].ddim1
= tr_cnt0
;
3130 tr_req
[tr_idx
].ddim2
= trigger_size
;
3132 tr_req
[tr_idx
].addr
= sg_addr
;
3133 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
3134 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
3135 tr_req
[tr_idx
].icnt2
= tr1_cnt2
;
3136 tr_req
[tr_idx
].icnt3
= 1;
3137 tr_req
[tr_idx
].dim1
= tr_cnt0
;
3138 tr_req
[tr_idx
].dim2
= trigger_size
;
3140 tr_req
[tr_idx
].daddr
= dev_addr
;
3141 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
3142 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
3143 tr_req
[tr_idx
].dicnt2
= tr1_cnt2
;
3144 tr_req
[tr_idx
].dicnt3
= 1;
3145 tr_req
[tr_idx
].ddim1
= (-1) * tr_cnt0
;
3150 d
->residue
+= sg_len
;
3153 cppi5_tr_csf_set(&tr_req
[tr_idx
- 1].flags
, csf
| CPPI5_TR_CSF_EOP
);
3158 static int udma_configure_statictr(struct udma_chan
*uc
, struct udma_desc
*d
,
3159 enum dma_slave_buswidth dev_width
,
3162 if (uc
->config
.ep_type
!= PSIL_EP_PDMA_XY
)
3165 /* Bus width translates to the element size (ES) */
3166 switch (dev_width
) {
3167 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
3168 d
->static_tr
.elsize
= 0;
3170 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
3171 d
->static_tr
.elsize
= 1;
3173 case DMA_SLAVE_BUSWIDTH_3_BYTES
:
3174 d
->static_tr
.elsize
= 2;
3176 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
3177 d
->static_tr
.elsize
= 3;
3179 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
3180 d
->static_tr
.elsize
= 4;
3182 default: /* not reached */
3186 d
->static_tr
.elcnt
= elcnt
;
3188 if (uc
->config
.pkt_mode
|| !uc
->cyclic
) {
3190 * PDMA must close the packet when the channel is in packet mode.
3191 * For TR mode when the channel is not cyclic we also need PDMA
3192 * to close the packet otherwise the transfer will stall because
3193 * PDMA holds on the data it has received from the peripheral.
3195 unsigned int div
= dev_width
* elcnt
;
3198 d
->static_tr
.bstcnt
= d
->residue
/ d
->sglen
/ div
;
3200 d
->static_tr
.bstcnt
= d
->residue
/ div
;
3201 } else if (uc
->ud
->match_data
->type
== DMA_TYPE_BCDMA
&&
3202 uc
->config
.dir
== DMA_DEV_TO_MEM
&&
3205 * For cyclic mode with BCDMA we have to set EOP in each TR to
3206 * prevent short packet errors seen on channel teardown. So the
3207 * PDMA must close the packet after every TR transfer by setting
3208 * burst count equal to the number of bytes transferred.
3210 struct cppi5_tr_type1_t
*tr_req
= d
->hwdesc
[0].tr_req_base
;
3212 d
->static_tr
.bstcnt
=
3213 (tr_req
->icnt0
* tr_req
->icnt1
) / dev_width
;
3215 d
->static_tr
.bstcnt
= 0;
3218 if (uc
->config
.dir
== DMA_DEV_TO_MEM
&&
3219 d
->static_tr
.bstcnt
> uc
->ud
->match_data
->statictr_z_mask
)
3225 static struct udma_desc
*
3226 udma_prep_slave_sg_pkt(struct udma_chan
*uc
, struct scatterlist
*sgl
,
3227 unsigned int sglen
, enum dma_transfer_direction dir
,
3228 unsigned long tx_flags
, void *context
)
3230 struct scatterlist
*sgent
;
3231 struct cppi5_host_desc_t
*h_desc
= NULL
;
3232 struct udma_desc
*d
;
3237 d
= kzalloc(struct_size(d
, hwdesc
, sglen
), GFP_NOWAIT
);
3242 d
->hwdesc_count
= sglen
;
3244 if (dir
== DMA_DEV_TO_MEM
)
3245 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
3247 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
3249 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
)
3252 asel
= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
3254 for_each_sg(sgl
, sgent
, sglen
, i
) {
3255 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
3256 dma_addr_t sg_addr
= sg_dma_address(sgent
);
3257 struct cppi5_host_desc_t
*desc
;
3258 size_t sg_len
= sg_dma_len(sgent
);
3260 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
3262 &hwdesc
->cppi5_desc_paddr
);
3263 if (!hwdesc
->cppi5_desc_vaddr
) {
3264 dev_err(uc
->ud
->dev
,
3265 "descriptor%d allocation failed\n", i
);
3267 udma_free_hwdesc(uc
, d
);
3272 d
->residue
+= sg_len
;
3273 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
3274 desc
= hwdesc
->cppi5_desc_vaddr
;
3277 cppi5_hdesc_init(desc
, 0, 0);
3278 /* Flow and Packed ID */
3279 cppi5_desc_set_pktids(&desc
->hdr
, uc
->id
,
3280 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3281 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, ring_id
);
3283 cppi5_hdesc_reset_hbdesc(desc
);
3284 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0xffff);
3287 /* attach the sg buffer to the descriptor */
3289 cppi5_hdesc_attach_buf(desc
, sg_addr
, sg_len
, sg_addr
, sg_len
);
3291 /* Attach link as host buffer descriptor */
3293 cppi5_hdesc_link_hbdesc(h_desc
,
3294 hwdesc
->cppi5_desc_paddr
| asel
);
3296 if (uc
->ud
->match_data
->type
== DMA_TYPE_PKTDMA
||
3297 dir
== DMA_MEM_TO_DEV
)
3301 if (d
->residue
>= SZ_4M
) {
3302 dev_err(uc
->ud
->dev
,
3303 "%s: Transfer size %u is over the supported 4M range\n",
3304 __func__
, d
->residue
);
3305 udma_free_hwdesc(uc
, d
);
3310 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3311 cppi5_hdesc_set_pktlen(h_desc
, d
->residue
);
3316 static int udma_attach_metadata(struct dma_async_tx_descriptor
*desc
,
3317 void *data
, size_t len
)
3319 struct udma_desc
*d
= to_udma_desc(desc
);
3320 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
3321 struct cppi5_host_desc_t
*h_desc
;
3325 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
3328 if (!data
|| len
> uc
->config
.metadata_size
)
3331 if (uc
->config
.needs_epib
&& len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
3334 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3335 if (d
->dir
== DMA_MEM_TO_DEV
)
3336 memcpy(h_desc
->epib
, data
, len
);
3338 if (uc
->config
.needs_epib
)
3339 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
3342 d
->metadata_size
= len
;
3343 if (uc
->config
.needs_epib
)
3344 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
3346 cppi5_hdesc_update_flags(h_desc
, flags
);
3347 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
3352 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
3353 size_t *payload_len
, size_t *max_len
)
3355 struct udma_desc
*d
= to_udma_desc(desc
);
3356 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
3357 struct cppi5_host_desc_t
*h_desc
;
3359 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
3360 return ERR_PTR(-ENOTSUPP
);
3362 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3364 *max_len
= uc
->config
.metadata_size
;
3366 *payload_len
= cppi5_hdesc_epib_present(&h_desc
->hdr
) ?
3367 CPPI5_INFO0_HDESC_EPIB_SIZE
: 0;
3368 *payload_len
+= cppi5_hdesc_get_psdata_size(h_desc
);
3370 return h_desc
->epib
;
3373 static int udma_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
3376 struct udma_desc
*d
= to_udma_desc(desc
);
3377 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
3378 struct cppi5_host_desc_t
*h_desc
;
3379 u32 psd_size
= payload_len
;
3382 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
3385 if (payload_len
> uc
->config
.metadata_size
)
3388 if (uc
->config
.needs_epib
&& payload_len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
3391 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3393 if (uc
->config
.needs_epib
) {
3394 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
3395 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
3398 cppi5_hdesc_update_flags(h_desc
, flags
);
3399 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
3404 static struct dma_descriptor_metadata_ops metadata_ops
= {
3405 .attach
= udma_attach_metadata
,
3406 .get_ptr
= udma_get_metadata_ptr
,
3407 .set_len
= udma_set_metadata_len
,
3410 static struct dma_async_tx_descriptor
*
3411 udma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
3412 unsigned int sglen
, enum dma_transfer_direction dir
,
3413 unsigned long tx_flags
, void *context
)
3415 struct udma_chan
*uc
= to_udma_chan(chan
);
3416 enum dma_slave_buswidth dev_width
;
3417 struct udma_desc
*d
;
3420 if (dir
!= uc
->config
.dir
&&
3421 (uc
->config
.dir
== DMA_MEM_TO_MEM
&& !uc
->config
.tr_trigger_type
)) {
3422 dev_err(chan
->device
->dev
,
3423 "%s: chan%d is for %s, not supporting %s\n",
3425 dmaengine_get_direction_text(uc
->config
.dir
),
3426 dmaengine_get_direction_text(dir
));
3430 if (dir
== DMA_DEV_TO_MEM
) {
3431 dev_width
= uc
->cfg
.src_addr_width
;
3432 burst
= uc
->cfg
.src_maxburst
;
3433 } else if (dir
== DMA_MEM_TO_DEV
) {
3434 dev_width
= uc
->cfg
.dst_addr_width
;
3435 burst
= uc
->cfg
.dst_maxburst
;
3437 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
3444 uc
->config
.tx_flags
= tx_flags
;
3446 if (uc
->config
.pkt_mode
)
3447 d
= udma_prep_slave_sg_pkt(uc
, sgl
, sglen
, dir
, tx_flags
,
3449 else if (is_slave_direction(uc
->config
.dir
))
3450 d
= udma_prep_slave_sg_tr(uc
, sgl
, sglen
, dir
, tx_flags
,
3453 d
= udma_prep_slave_sg_triggered_tr(uc
, sgl
, sglen
, dir
,
3463 /* static TR for remote PDMA */
3464 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
3465 dev_err(uc
->ud
->dev
,
3466 "%s: StaticTR Z is limited to maximum %u (%u)\n",
3467 __func__
, uc
->ud
->match_data
->statictr_z_mask
,
3468 d
->static_tr
.bstcnt
);
3470 udma_free_hwdesc(uc
, d
);
3475 if (uc
->config
.metadata_size
)
3476 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
3478 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
3481 static struct udma_desc
*
3482 udma_prep_dma_cyclic_tr(struct udma_chan
*uc
, dma_addr_t buf_addr
,
3483 size_t buf_len
, size_t period_len
,
3484 enum dma_transfer_direction dir
, unsigned long flags
)
3486 struct udma_desc
*d
;
3487 size_t tr_size
, period_addr
;
3488 struct cppi5_tr_type1_t
*tr_req
;
3489 unsigned int periods
= buf_len
/ period_len
;
3490 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
3495 num_tr
= udma_get_tr_counters(period_len
, __ffs(buf_addr
), &tr0_cnt0
,
3496 &tr0_cnt1
, &tr1_cnt0
);
3498 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
3503 /* Now allocate and setup the descriptor. */
3504 tr_size
= sizeof(struct cppi5_tr_type1_t
);
3505 d
= udma_alloc_tr_desc(uc
, tr_size
, periods
* num_tr
, dir
);
3509 tr_req
= d
->hwdesc
[0].tr_req_base
;
3510 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
)
3511 period_addr
= buf_addr
;
3513 period_addr
= buf_addr
|
3514 ((u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
);
3517 * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
3518 * last TR of a descriptor, to mark the packet as complete.
3519 * This is required for getting the teardown completion message in case
3520 * of TX, and to avoid short-packet error in case of RX.
3522 * As we are in cyclic mode, we do not know which period might be the
3523 * last one, so set the flag for each period.
3525 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
&&
3526 uc
->ud
->match_data
->type
== DMA_TYPE_BCDMA
) {
3527 period_csf
= CPPI5_TR_CSF_EOP
;
3530 for (i
= 0; i
< periods
; i
++) {
3531 int tr_idx
= i
* num_tr
;
3533 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
, false,
3534 false, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3536 tr_req
[tr_idx
].addr
= period_addr
;
3537 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
3538 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
3539 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
3542 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
3543 CPPI5_TR_CSF_SUPR_EVT
);
3546 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
3548 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3550 tr_req
[tr_idx
].addr
= period_addr
+ tr0_cnt1
* tr0_cnt0
;
3551 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
3552 tr_req
[tr_idx
].icnt1
= 1;
3553 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
3556 if (!(flags
& DMA_PREP_INTERRUPT
))
3557 period_csf
|= CPPI5_TR_CSF_SUPR_EVT
;
3560 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
, period_csf
);
3562 period_addr
+= period_len
;
3568 static struct udma_desc
*
3569 udma_prep_dma_cyclic_pkt(struct udma_chan
*uc
, dma_addr_t buf_addr
,
3570 size_t buf_len
, size_t period_len
,
3571 enum dma_transfer_direction dir
, unsigned long flags
)
3573 struct udma_desc
*d
;
3576 int periods
= buf_len
/ period_len
;
3578 if (periods
> (K3_UDMA_DEFAULT_RING_SIZE
- 1))
3581 if (period_len
>= SZ_4M
)
3584 d
= kzalloc(struct_size(d
, hwdesc
, periods
), GFP_NOWAIT
);
3588 d
->hwdesc_count
= periods
;
3590 /* TODO: re-check this... */
3591 if (dir
== DMA_DEV_TO_MEM
)
3592 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
3594 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
3596 if (uc
->ud
->match_data
->type
!= DMA_TYPE_UDMA
)
3597 buf_addr
|= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
3599 for (i
= 0; i
< periods
; i
++) {
3600 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
3601 dma_addr_t period_addr
= buf_addr
+ (period_len
* i
);
3602 struct cppi5_host_desc_t
*h_desc
;
3604 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
3606 &hwdesc
->cppi5_desc_paddr
);
3607 if (!hwdesc
->cppi5_desc_vaddr
) {
3608 dev_err(uc
->ud
->dev
,
3609 "descriptor%d allocation failed\n", i
);
3611 udma_free_hwdesc(uc
, d
);
3616 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
3617 h_desc
= hwdesc
->cppi5_desc_vaddr
;
3619 cppi5_hdesc_init(h_desc
, 0, 0);
3620 cppi5_hdesc_set_pktlen(h_desc
, period_len
);
3622 /* Flow and Packed ID */
3623 cppi5_desc_set_pktids(&h_desc
->hdr
, uc
->id
,
3624 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3625 cppi5_desc_set_retpolicy(&h_desc
->hdr
, 0, ring_id
);
3627 /* attach each period to a new descriptor */
3628 cppi5_hdesc_attach_buf(h_desc
,
3629 period_addr
, period_len
,
3630 period_addr
, period_len
);
3636 static struct dma_async_tx_descriptor
*
3637 udma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
3638 size_t period_len
, enum dma_transfer_direction dir
,
3639 unsigned long flags
)
3641 struct udma_chan
*uc
= to_udma_chan(chan
);
3642 enum dma_slave_buswidth dev_width
;
3643 struct udma_desc
*d
;
3646 if (dir
!= uc
->config
.dir
) {
3647 dev_err(chan
->device
->dev
,
3648 "%s: chan%d is for %s, not supporting %s\n",
3650 dmaengine_get_direction_text(uc
->config
.dir
),
3651 dmaengine_get_direction_text(dir
));
3657 if (dir
== DMA_DEV_TO_MEM
) {
3658 dev_width
= uc
->cfg
.src_addr_width
;
3659 burst
= uc
->cfg
.src_maxburst
;
3660 } else if (dir
== DMA_MEM_TO_DEV
) {
3661 dev_width
= uc
->cfg
.dst_addr_width
;
3662 burst
= uc
->cfg
.dst_maxburst
;
3664 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
3671 if (uc
->config
.pkt_mode
)
3672 d
= udma_prep_dma_cyclic_pkt(uc
, buf_addr
, buf_len
, period_len
,
3675 d
= udma_prep_dma_cyclic_tr(uc
, buf_addr
, buf_len
, period_len
,
3681 d
->sglen
= buf_len
/ period_len
;
3684 d
->residue
= buf_len
;
3686 /* static TR for remote PDMA */
3687 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
3688 dev_err(uc
->ud
->dev
,
3689 "%s: StaticTR Z is limited to maximum %u (%u)\n",
3690 __func__
, uc
->ud
->match_data
->statictr_z_mask
,
3691 d
->static_tr
.bstcnt
);
3693 udma_free_hwdesc(uc
, d
);
3698 if (uc
->config
.metadata_size
)
3699 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
3701 return vchan_tx_prep(&uc
->vc
, &d
->vd
, flags
);
3704 static struct dma_async_tx_descriptor
*
3705 udma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
3706 size_t len
, unsigned long tx_flags
)
3708 struct udma_chan
*uc
= to_udma_chan(chan
);
3709 struct udma_desc
*d
;
3710 struct cppi5_tr_type15_t
*tr_req
;
3712 size_t tr_size
= sizeof(struct cppi5_tr_type15_t
);
3713 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
3714 u32 csf
= CPPI5_TR_CSF_SUPR_EVT
;
3716 if (uc
->config
.dir
!= DMA_MEM_TO_MEM
) {
3717 dev_err(chan
->device
->dev
,
3718 "%s: chan%d is for %s, not supporting %s\n",
3720 dmaengine_get_direction_text(uc
->config
.dir
),
3721 dmaengine_get_direction_text(DMA_MEM_TO_MEM
));
3725 num_tr
= udma_get_tr_counters(len
, __ffs(src
| dest
), &tr0_cnt0
,
3726 &tr0_cnt1
, &tr1_cnt0
);
3728 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
3733 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, DMA_MEM_TO_MEM
);
3737 d
->dir
= DMA_MEM_TO_MEM
;
3742 if (uc
->ud
->match_data
->type
!= DMA_TYPE_UDMA
) {
3743 src
|= (u64
)uc
->ud
->asel
<< K3_ADDRESS_ASEL_SHIFT
;
3744 dest
|= (u64
)uc
->ud
->asel
<< K3_ADDRESS_ASEL_SHIFT
;
3746 csf
|= CPPI5_TR_CSF_EOL_ICNT0
;
3749 tr_req
= d
->hwdesc
[0].tr_req_base
;
3751 cppi5_tr_init(&tr_req
[0].flags
, CPPI5_TR_TYPE15
, false, true,
3752 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3753 cppi5_tr_csf_set(&tr_req
[0].flags
, csf
);
3755 tr_req
[0].addr
= src
;
3756 tr_req
[0].icnt0
= tr0_cnt0
;
3757 tr_req
[0].icnt1
= tr0_cnt1
;
3758 tr_req
[0].icnt2
= 1;
3759 tr_req
[0].icnt3
= 1;
3760 tr_req
[0].dim1
= tr0_cnt0
;
3762 tr_req
[0].daddr
= dest
;
3763 tr_req
[0].dicnt0
= tr0_cnt0
;
3764 tr_req
[0].dicnt1
= tr0_cnt1
;
3765 tr_req
[0].dicnt2
= 1;
3766 tr_req
[0].dicnt3
= 1;
3767 tr_req
[0].ddim1
= tr0_cnt0
;
3770 cppi5_tr_init(&tr_req
[1].flags
, CPPI5_TR_TYPE15
, false, true,
3771 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3772 cppi5_tr_csf_set(&tr_req
[1].flags
, csf
);
3774 tr_req
[1].addr
= src
+ tr0_cnt1
* tr0_cnt0
;
3775 tr_req
[1].icnt0
= tr1_cnt0
;
3776 tr_req
[1].icnt1
= 1;
3777 tr_req
[1].icnt2
= 1;
3778 tr_req
[1].icnt3
= 1;
3780 tr_req
[1].daddr
= dest
+ tr0_cnt1
* tr0_cnt0
;
3781 tr_req
[1].dicnt0
= tr1_cnt0
;
3782 tr_req
[1].dicnt1
= 1;
3783 tr_req
[1].dicnt2
= 1;
3784 tr_req
[1].dicnt3
= 1;
3787 cppi5_tr_csf_set(&tr_req
[num_tr
- 1].flags
, csf
| CPPI5_TR_CSF_EOP
);
3789 if (uc
->config
.metadata_size
)
3790 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
3792 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
3795 static void udma_issue_pending(struct dma_chan
*chan
)
3797 struct udma_chan
*uc
= to_udma_chan(chan
);
3798 unsigned long flags
;
3800 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
3802 /* If we have something pending and no active descriptor, then */
3803 if (vchan_issue_pending(&uc
->vc
) && !uc
->desc
) {
3805 * start a descriptor if the channel is NOT [marked as
3806 * terminating _and_ it is still running (teardown has not
3809 if (!(uc
->state
== UDMA_CHAN_IS_TERMINATING
&&
3810 udma_is_chan_running(uc
)))
3814 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
3817 static enum dma_status
udma_tx_status(struct dma_chan
*chan
,
3818 dma_cookie_t cookie
,
3819 struct dma_tx_state
*txstate
)
3821 struct udma_chan
*uc
= to_udma_chan(chan
);
3822 enum dma_status ret
;
3823 unsigned long flags
;
3825 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
3827 ret
= dma_cookie_status(chan
, cookie
, txstate
);
3829 if (!udma_is_chan_running(uc
))
3832 if (ret
== DMA_IN_PROGRESS
&& udma_is_chan_paused(uc
))
3835 if (ret
== DMA_COMPLETE
|| !txstate
)
3838 if (uc
->desc
&& uc
->desc
->vd
.tx
.cookie
== cookie
) {
3841 u32 residue
= uc
->desc
->residue
;
3844 if (uc
->desc
->dir
== DMA_MEM_TO_DEV
) {
3845 bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_SBCNT_REG
);
3847 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
3848 peer_bcnt
= udma_tchanrt_read(uc
,
3849 UDMA_CHAN_RT_PEER_BCNT_REG
);
3851 if (bcnt
> peer_bcnt
)
3852 delay
= bcnt
- peer_bcnt
;
3854 } else if (uc
->desc
->dir
== DMA_DEV_TO_MEM
) {
3855 bcnt
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
3857 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
3858 peer_bcnt
= udma_rchanrt_read(uc
,
3859 UDMA_CHAN_RT_PEER_BCNT_REG
);
3861 if (peer_bcnt
> bcnt
)
3862 delay
= peer_bcnt
- bcnt
;
3865 bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
3868 if (bcnt
&& !(bcnt
% uc
->desc
->residue
))
3871 residue
-= bcnt
% uc
->desc
->residue
;
3873 if (!residue
&& (uc
->config
.dir
== DMA_DEV_TO_MEM
|| !delay
)) {
3878 dma_set_residue(txstate
, residue
);
3879 dma_set_in_flight_bytes(txstate
, delay
);
3886 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
3890 static int udma_pause(struct dma_chan
*chan
)
3892 struct udma_chan
*uc
= to_udma_chan(chan
);
3894 /* pause the channel */
3895 switch (uc
->config
.dir
) {
3896 case DMA_DEV_TO_MEM
:
3897 udma_rchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3898 UDMA_PEER_RT_EN_PAUSE
,
3899 UDMA_PEER_RT_EN_PAUSE
);
3901 case DMA_MEM_TO_DEV
:
3902 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3903 UDMA_PEER_RT_EN_PAUSE
,
3904 UDMA_PEER_RT_EN_PAUSE
);
3906 case DMA_MEM_TO_MEM
:
3907 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_CTL_REG
,
3908 UDMA_CHAN_RT_CTL_PAUSE
,
3909 UDMA_CHAN_RT_CTL_PAUSE
);
3918 static int udma_resume(struct dma_chan
*chan
)
3920 struct udma_chan
*uc
= to_udma_chan(chan
);
3922 /* resume the channel */
3923 switch (uc
->config
.dir
) {
3924 case DMA_DEV_TO_MEM
:
3925 udma_rchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3926 UDMA_PEER_RT_EN_PAUSE
, 0);
3929 case DMA_MEM_TO_DEV
:
3930 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3931 UDMA_PEER_RT_EN_PAUSE
, 0);
3933 case DMA_MEM_TO_MEM
:
3934 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_CTL_REG
,
3935 UDMA_CHAN_RT_CTL_PAUSE
, 0);
3944 static int udma_terminate_all(struct dma_chan
*chan
)
3946 struct udma_chan
*uc
= to_udma_chan(chan
);
3947 unsigned long flags
;
3950 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
3952 if (udma_is_chan_running(uc
))
3956 uc
->terminated_desc
= uc
->desc
;
3958 uc
->terminated_desc
->terminated
= true;
3959 cancel_delayed_work(&uc
->tx_drain
.work
);
3964 vchan_get_all_descriptors(&uc
->vc
, &head
);
3965 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
3966 vchan_dma_desc_free_list(&uc
->vc
, &head
);
3971 static void udma_synchronize(struct dma_chan
*chan
)
3973 struct udma_chan
*uc
= to_udma_chan(chan
);
3974 unsigned long timeout
= msecs_to_jiffies(1000);
3976 vchan_synchronize(&uc
->vc
);
3978 if (uc
->state
== UDMA_CHAN_IS_TERMINATING
) {
3979 timeout
= wait_for_completion_timeout(&uc
->teardown_completed
,
3982 dev_warn(uc
->ud
->dev
, "chan%d teardown timeout!\n",
3984 udma_dump_chan_stdata(uc
);
3985 udma_reset_chan(uc
, true);
3989 udma_reset_chan(uc
, false);
3990 if (udma_is_chan_running(uc
))
3991 dev_warn(uc
->ud
->dev
, "chan%d refused to stop!\n", uc
->id
);
3993 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
3994 udma_reset_rings(uc
);
3997 static void udma_desc_pre_callback(struct virt_dma_chan
*vc
,
3998 struct virt_dma_desc
*vd
,
3999 struct dmaengine_result
*result
)
4001 struct udma_chan
*uc
= to_udma_chan(&vc
->chan
);
4002 struct udma_desc
*d
;
4008 d
= to_udma_desc(&vd
->tx
);
4010 if (d
->metadata_size
)
4011 udma_fetch_epib(uc
, d
);
4014 void *desc_vaddr
= udma_curr_cppi5_desc_vaddr(d
, d
->desc_idx
);
4016 if (cppi5_desc_get_type(desc_vaddr
) ==
4017 CPPI5_INFO0_DESC_TYPE_VAL_HOST
) {
4018 /* Provide residue information for the client */
4019 result
->residue
= d
->residue
-
4020 cppi5_hdesc_get_pktlen(desc_vaddr
);
4021 if (result
->residue
)
4022 result
->result
= DMA_TRANS_ABORTED
;
4024 result
->result
= DMA_TRANS_NOERROR
;
4026 result
->residue
= 0;
4027 /* Propagate TR Response errors to the client */
4028 status
= d
->hwdesc
[0].tr_resp_base
->status
;
4030 result
->result
= DMA_TRANS_ABORTED
;
4032 result
->result
= DMA_TRANS_NOERROR
;
4038 * This tasklet handles the completion of a DMA descriptor by
4039 * calling its callback and freeing it.
4041 static void udma_vchan_complete(struct tasklet_struct
*t
)
4043 struct virt_dma_chan
*vc
= from_tasklet(vc
, t
, task
);
4044 struct virt_dma_desc
*vd
, *_vd
;
4045 struct dmaengine_desc_callback cb
;
4048 spin_lock_irq(&vc
->lock
);
4049 list_splice_tail_init(&vc
->desc_completed
, &head
);
4053 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
4055 memset(&cb
, 0, sizeof(cb
));
4057 spin_unlock_irq(&vc
->lock
);
4059 udma_desc_pre_callback(vc
, vd
, NULL
);
4060 dmaengine_desc_callback_invoke(&cb
, NULL
);
4062 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
4063 struct dmaengine_result result
;
4065 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
4067 list_del(&vd
->node
);
4069 udma_desc_pre_callback(vc
, vd
, &result
);
4070 dmaengine_desc_callback_invoke(&cb
, &result
);
4072 vchan_vdesc_fini(vd
);
4076 static void udma_free_chan_resources(struct dma_chan
*chan
)
4078 struct udma_chan
*uc
= to_udma_chan(chan
);
4079 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
4081 udma_terminate_all(chan
);
4082 if (uc
->terminated_desc
) {
4083 udma_reset_chan(uc
, false);
4084 udma_reset_rings(uc
);
4087 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
4089 if (uc
->irq_num_ring
> 0) {
4090 free_irq(uc
->irq_num_ring
, uc
);
4092 uc
->irq_num_ring
= 0;
4094 if (uc
->irq_num_udma
> 0) {
4095 free_irq(uc
->irq_num_udma
, uc
);
4097 uc
->irq_num_udma
= 0;
4100 /* Release PSI-L pairing */
4101 if (uc
->psil_paired
) {
4102 navss_psil_unpair(ud
, uc
->config
.src_thread
,
4103 uc
->config
.dst_thread
);
4104 uc
->psil_paired
= false;
4107 vchan_free_chan_resources(&uc
->vc
);
4108 tasklet_kill(&uc
->vc
.task
);
4110 bcdma_free_bchan_resources(uc
);
4111 udma_free_tx_resources(uc
);
4112 udma_free_rx_resources(uc
);
4113 udma_reset_uchan(uc
);
4115 if (uc
->use_dma_pool
) {
4116 dma_pool_destroy(uc
->hdesc_pool
);
4117 uc
->use_dma_pool
= false;
4121 static struct platform_driver udma_driver
;
4122 static struct platform_driver bcdma_driver
;
4123 static struct platform_driver pktdma_driver
;
4125 struct udma_filter_param
{
4126 int remote_thread_id
;
4129 u32 tr_trigger_type
;
4132 static bool udma_dma_filter_fn(struct dma_chan
*chan
, void *param
)
4134 struct udma_chan_config
*ucc
;
4135 struct psil_endpoint_config
*ep_config
;
4136 struct udma_filter_param
*filter_param
;
4137 struct udma_chan
*uc
;
4138 struct udma_dev
*ud
;
4140 if (chan
->device
->dev
->driver
!= &udma_driver
.driver
&&
4141 chan
->device
->dev
->driver
!= &bcdma_driver
.driver
&&
4142 chan
->device
->dev
->driver
!= &pktdma_driver
.driver
)
4145 uc
= to_udma_chan(chan
);
4148 filter_param
= param
;
4150 if (filter_param
->atype
> 2) {
4151 dev_err(ud
->dev
, "Invalid channel atype: %u\n",
4152 filter_param
->atype
);
4156 if (filter_param
->asel
> 15) {
4157 dev_err(ud
->dev
, "Invalid channel asel: %u\n",
4158 filter_param
->asel
);
4162 ucc
->remote_thread_id
= filter_param
->remote_thread_id
;
4163 ucc
->atype
= filter_param
->atype
;
4164 ucc
->asel
= filter_param
->asel
;
4165 ucc
->tr_trigger_type
= filter_param
->tr_trigger_type
;
4167 if (ucc
->tr_trigger_type
) {
4168 ucc
->dir
= DMA_MEM_TO_MEM
;
4169 goto triggered_bchan
;
4170 } else if (ucc
->remote_thread_id
& K3_PSIL_DST_THREAD_ID_OFFSET
) {
4171 ucc
->dir
= DMA_MEM_TO_DEV
;
4173 ucc
->dir
= DMA_DEV_TO_MEM
;
4176 ep_config
= psil_get_ep_config(ucc
->remote_thread_id
);
4177 if (IS_ERR(ep_config
)) {
4178 dev_err(ud
->dev
, "No configuration for psi-l thread 0x%04x\n",
4179 ucc
->remote_thread_id
);
4180 ucc
->dir
= DMA_MEM_TO_MEM
;
4181 ucc
->remote_thread_id
= -1;
4187 if (ud
->match_data
->type
== DMA_TYPE_BCDMA
&&
4188 ep_config
->pkt_mode
) {
4190 "Only TR mode is supported (psi-l thread 0x%04x)\n",
4191 ucc
->remote_thread_id
);
4192 ucc
->dir
= DMA_MEM_TO_MEM
;
4193 ucc
->remote_thread_id
= -1;
4199 ucc
->pkt_mode
= ep_config
->pkt_mode
;
4200 ucc
->channel_tpl
= ep_config
->channel_tpl
;
4201 ucc
->notdpkt
= ep_config
->notdpkt
;
4202 ucc
->ep_type
= ep_config
->ep_type
;
4204 if (ud
->match_data
->type
== DMA_TYPE_PKTDMA
&&
4205 ep_config
->mapped_channel_id
>= 0) {
4206 ucc
->mapped_channel_id
= ep_config
->mapped_channel_id
;
4207 ucc
->default_flow_id
= ep_config
->default_flow_id
;
4209 ucc
->mapped_channel_id
= -1;
4210 ucc
->default_flow_id
= -1;
4213 if (ucc
->ep_type
!= PSIL_EP_NATIVE
) {
4214 const struct udma_match_data
*match_data
= ud
->match_data
;
4216 if (match_data
->flags
& UDMA_FLAG_PDMA_ACC32
)
4217 ucc
->enable_acc32
= ep_config
->pdma_acc32
;
4218 if (match_data
->flags
& UDMA_FLAG_PDMA_BURST
)
4219 ucc
->enable_burst
= ep_config
->pdma_burst
;
4222 ucc
->needs_epib
= ep_config
->needs_epib
;
4223 ucc
->psd_size
= ep_config
->psd_size
;
4224 ucc
->metadata_size
=
4225 (ucc
->needs_epib
? CPPI5_INFO0_HDESC_EPIB_SIZE
: 0) +
4229 ucc
->hdesc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
4230 ucc
->metadata_size
, ud
->desc_align
);
4232 dev_dbg(ud
->dev
, "chan%d: Remote thread: 0x%04x (%s)\n", uc
->id
,
4233 ucc
->remote_thread_id
, dmaengine_get_direction_text(ucc
->dir
));
4238 dev_dbg(ud
->dev
, "chan%d: triggered channel (type: %u)\n", uc
->id
,
4239 ucc
->tr_trigger_type
);
4245 static struct dma_chan
*udma_of_xlate(struct of_phandle_args
*dma_spec
,
4246 struct of_dma
*ofdma
)
4248 struct udma_dev
*ud
= ofdma
->of_dma_data
;
4249 dma_cap_mask_t mask
= ud
->ddev
.cap_mask
;
4250 struct udma_filter_param filter_param
;
4251 struct dma_chan
*chan
;
4253 if (ud
->match_data
->type
== DMA_TYPE_BCDMA
) {
4254 if (dma_spec
->args_count
!= 3)
4257 filter_param
.tr_trigger_type
= dma_spec
->args
[0];
4258 filter_param
.remote_thread_id
= dma_spec
->args
[1];
4259 filter_param
.asel
= dma_spec
->args
[2];
4260 filter_param
.atype
= 0;
4262 if (dma_spec
->args_count
!= 1 && dma_spec
->args_count
!= 2)
4265 filter_param
.remote_thread_id
= dma_spec
->args
[0];
4266 filter_param
.tr_trigger_type
= 0;
4267 if (dma_spec
->args_count
== 2) {
4268 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
4269 filter_param
.atype
= dma_spec
->args
[1];
4270 filter_param
.asel
= 0;
4272 filter_param
.atype
= 0;
4273 filter_param
.asel
= dma_spec
->args
[1];
4276 filter_param
.atype
= 0;
4277 filter_param
.asel
= 0;
4281 chan
= __dma_request_channel(&mask
, udma_dma_filter_fn
, &filter_param
,
4284 dev_err(ud
->dev
, "get channel fail in %s.\n", __func__
);
4285 return ERR_PTR(-EINVAL
);
4291 static struct udma_match_data am654_main_data
= {
4292 .type
= DMA_TYPE_UDMA
,
4293 .psil_base
= 0x1000,
4294 .enable_memcpy_support
= true,
4295 .statictr_z_mask
= GENMASK(11, 0),
4297 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4298 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* H Channels */
4299 0, /* No UH Channels */
4303 static struct udma_match_data am654_mcu_data
= {
4304 .type
= DMA_TYPE_UDMA
,
4305 .psil_base
= 0x6000,
4306 .enable_memcpy_support
= false,
4307 .statictr_z_mask
= GENMASK(11, 0),
4309 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4310 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* H Channels */
4311 0, /* No UH Channels */
4315 static struct udma_match_data j721e_main_data
= {
4316 .type
= DMA_TYPE_UDMA
,
4317 .psil_base
= 0x1000,
4318 .enable_memcpy_support
= true,
4319 .flags
= UDMA_FLAGS_J7_CLASS
,
4320 .statictr_z_mask
= GENMASK(23, 0),
4322 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4323 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES
, /* H Channels */
4324 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES
, /* UH Channels */
4328 static struct udma_match_data j721e_mcu_data
= {
4329 .type
= DMA_TYPE_UDMA
,
4330 .psil_base
= 0x6000,
4331 .enable_memcpy_support
= false, /* MEM_TO_MEM is slow via MCU UDMA */
4332 .flags
= UDMA_FLAGS_J7_CLASS
,
4333 .statictr_z_mask
= GENMASK(23, 0),
4335 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4336 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES
, /* H Channels */
4337 0, /* No UH Channels */
4341 static struct udma_soc_data am62a_dmss_csi_soc_data
= {
4343 .bcdma_rchan_data
= 0xe00,
4344 .bcdma_rchan_ring
= 0x1000,
4348 static struct udma_soc_data j721s2_bcdma_csi_soc_data
= {
4350 .bcdma_tchan_data
= 0x800,
4351 .bcdma_tchan_ring
= 0xa00,
4352 .bcdma_rchan_data
= 0xe00,
4353 .bcdma_rchan_ring
= 0x1000,
4357 static struct udma_match_data am62a_bcdma_csirx_data
= {
4358 .type
= DMA_TYPE_BCDMA
,
4359 .psil_base
= 0x3100,
4360 .enable_memcpy_support
= false,
4362 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4363 0, /* No H Channels */
4364 0, /* No UH Channels */
4366 .soc_data
= &am62a_dmss_csi_soc_data
,
4369 static struct udma_match_data am64_bcdma_data
= {
4370 .type
= DMA_TYPE_BCDMA
,
4371 .psil_base
= 0x2000, /* for tchan and rchan, not applicable to bchan */
4372 .enable_memcpy_support
= true, /* Supported via bchan */
4373 .flags
= UDMA_FLAGS_J7_CLASS
,
4374 .statictr_z_mask
= GENMASK(23, 0),
4376 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4377 0, /* No H Channels */
4378 0, /* No UH Channels */
4382 static struct udma_match_data am64_pktdma_data
= {
4383 .type
= DMA_TYPE_PKTDMA
,
4384 .psil_base
= 0x1000,
4385 .enable_memcpy_support
= false, /* PKTDMA does not support MEM_TO_MEM */
4386 .flags
= UDMA_FLAGS_J7_CLASS
,
4387 .statictr_z_mask
= GENMASK(23, 0),
4389 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4390 0, /* No H Channels */
4391 0, /* No UH Channels */
4395 static struct udma_match_data j721s2_bcdma_csi_data
= {
4396 .type
= DMA_TYPE_BCDMA
,
4397 .psil_base
= 0x2000,
4398 .enable_memcpy_support
= false,
4400 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
, /* Normal Channels */
4401 0, /* No H Channels */
4402 0, /* No UH Channels */
4404 .soc_data
= &j721s2_bcdma_csi_soc_data
,
4407 static const struct of_device_id udma_of_match
[] = {
4409 .compatible
= "ti,am654-navss-main-udmap",
4410 .data
= &am654_main_data
,
4413 .compatible
= "ti,am654-navss-mcu-udmap",
4414 .data
= &am654_mcu_data
,
4416 .compatible
= "ti,j721e-navss-main-udmap",
4417 .data
= &j721e_main_data
,
4419 .compatible
= "ti,j721e-navss-mcu-udmap",
4420 .data
= &j721e_mcu_data
,
4423 .compatible
= "ti,am64-dmss-bcdma",
4424 .data
= &am64_bcdma_data
,
4427 .compatible
= "ti,am64-dmss-pktdma",
4428 .data
= &am64_pktdma_data
,
4431 .compatible
= "ti,am62a-dmss-bcdma-csirx",
4432 .data
= &am62a_bcdma_csirx_data
,
4435 .compatible
= "ti,j721s2-dmss-bcdma-csi",
4436 .data
= &j721s2_bcdma_csi_data
,
4440 MODULE_DEVICE_TABLE(of
, udma_of_match
);
4442 static struct udma_soc_data am654_soc_data
= {
4444 .udma_rchan
= 0x200,
4448 static struct udma_soc_data j721e_soc_data
= {
4450 .udma_rchan
= 0x400,
4454 static struct udma_soc_data j7200_soc_data
= {
4460 static struct udma_soc_data am64_soc_data
= {
4462 .bcdma_bchan_data
= 0x2200,
4463 .bcdma_bchan_ring
= 0x2400,
4464 .bcdma_tchan_data
= 0x2800,
4465 .bcdma_tchan_ring
= 0x2a00,
4466 .bcdma_rchan_data
= 0x2e00,
4467 .bcdma_rchan_ring
= 0x3000,
4468 .pktdma_tchan_flow
= 0x1200,
4469 .pktdma_rchan_flow
= 0x1600,
4471 .bcdma_trigger_event_offset
= 0xc400,
4474 static const struct soc_device_attribute k3_soc_devices
[] = {
4475 { .family
= "AM65X", .data
= &am654_soc_data
},
4476 { .family
= "J721E", .data
= &j721e_soc_data
},
4477 { .family
= "J7200", .data
= &j7200_soc_data
},
4478 { .family
= "AM64X", .data
= &am64_soc_data
},
4479 { .family
= "J721S2", .data
= &j721e_soc_data
},
4480 { .family
= "AM62X", .data
= &am64_soc_data
},
4481 { .family
= "AM62AX", .data
= &am64_soc_data
},
4482 { .family
= "J784S4", .data
= &j721e_soc_data
},
4483 { .family
= "AM62PX", .data
= &am64_soc_data
},
4484 { .family
= "J722S", .data
= &am64_soc_data
},
4488 static int udma_get_mmrs(struct platform_device
*pdev
, struct udma_dev
*ud
)
4490 u32 cap2
, cap3
, cap4
;
4493 ud
->mmrs
[MMR_GCFG
] = devm_platform_ioremap_resource_byname(pdev
, mmr_names
[MMR_GCFG
]);
4494 if (IS_ERR(ud
->mmrs
[MMR_GCFG
]))
4495 return PTR_ERR(ud
->mmrs
[MMR_GCFG
]);
4497 cap2
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x28);
4498 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4500 switch (ud
->match_data
->type
) {
4502 ud
->rflow_cnt
= UDMA_CAP3_RFLOW_CNT(cap3
);
4503 ud
->tchan_cnt
= UDMA_CAP2_TCHAN_CNT(cap2
);
4504 ud
->echan_cnt
= UDMA_CAP2_ECHAN_CNT(cap2
);
4505 ud
->rchan_cnt
= UDMA_CAP2_RCHAN_CNT(cap2
);
4507 case DMA_TYPE_BCDMA
:
4508 ud
->bchan_cnt
= BCDMA_CAP2_BCHAN_CNT(cap2
) +
4509 BCDMA_CAP3_HBCHAN_CNT(cap3
) +
4510 BCDMA_CAP3_UBCHAN_CNT(cap3
);
4511 ud
->tchan_cnt
= BCDMA_CAP2_TCHAN_CNT(cap2
);
4512 ud
->rchan_cnt
= BCDMA_CAP2_RCHAN_CNT(cap2
);
4513 ud
->rflow_cnt
= ud
->rchan_cnt
;
4515 case DMA_TYPE_PKTDMA
:
4516 cap4
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x30);
4517 ud
->tchan_cnt
= UDMA_CAP2_TCHAN_CNT(cap2
);
4518 ud
->rchan_cnt
= UDMA_CAP2_RCHAN_CNT(cap2
);
4519 ud
->rflow_cnt
= UDMA_CAP3_RFLOW_CNT(cap3
);
4520 ud
->tflow_cnt
= PKTDMA_CAP4_TFLOW_CNT(cap4
);
4526 for (i
= 1; i
< MMR_LAST
; i
++) {
4527 if (i
== MMR_BCHANRT
&& ud
->bchan_cnt
== 0)
4529 if (i
== MMR_TCHANRT
&& ud
->tchan_cnt
== 0)
4531 if (i
== MMR_RCHANRT
&& ud
->rchan_cnt
== 0)
4534 ud
->mmrs
[i
] = devm_platform_ioremap_resource_byname(pdev
, mmr_names
[i
]);
4535 if (IS_ERR(ud
->mmrs
[i
]))
4536 return PTR_ERR(ud
->mmrs
[i
]);
4542 static void udma_mark_resource_ranges(struct udma_dev
*ud
, unsigned long *map
,
4543 struct ti_sci_resource_desc
*rm_desc
,
4546 bitmap_clear(map
, rm_desc
->start
, rm_desc
->num
);
4547 bitmap_clear(map
, rm_desc
->start_sec
, rm_desc
->num_sec
);
4548 dev_dbg(ud
->dev
, "ti_sci resource range for %s: %d:%d | %d:%d\n", name
,
4549 rm_desc
->start
, rm_desc
->num
, rm_desc
->start_sec
,
4553 static const char * const range_names
[] = {
4554 [RM_RANGE_BCHAN
] = "ti,sci-rm-range-bchan",
4555 [RM_RANGE_TCHAN
] = "ti,sci-rm-range-tchan",
4556 [RM_RANGE_RCHAN
] = "ti,sci-rm-range-rchan",
4557 [RM_RANGE_RFLOW
] = "ti,sci-rm-range-rflow",
4558 [RM_RANGE_TFLOW
] = "ti,sci-rm-range-tflow",
4561 static int udma_setup_resources(struct udma_dev
*ud
)
4564 struct device
*dev
= ud
->dev
;
4565 struct ti_sci_resource
*rm_res
, irq_res
;
4566 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
4569 /* Set up the throughput level start indexes */
4570 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4571 if (of_device_is_compatible(dev
->of_node
,
4572 "ti,am654-navss-main-udmap")) {
4573 ud
->tchan_tpl
.levels
= 2;
4574 ud
->tchan_tpl
.start_idx
[0] = 8;
4575 } else if (of_device_is_compatible(dev
->of_node
,
4576 "ti,am654-navss-mcu-udmap")) {
4577 ud
->tchan_tpl
.levels
= 2;
4578 ud
->tchan_tpl
.start_idx
[0] = 2;
4579 } else if (UDMA_CAP3_UCHAN_CNT(cap3
)) {
4580 ud
->tchan_tpl
.levels
= 3;
4581 ud
->tchan_tpl
.start_idx
[1] = UDMA_CAP3_UCHAN_CNT(cap3
);
4582 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4583 } else if (UDMA_CAP3_HCHAN_CNT(cap3
)) {
4584 ud
->tchan_tpl
.levels
= 2;
4585 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4587 ud
->tchan_tpl
.levels
= 1;
4590 ud
->rchan_tpl
.levels
= ud
->tchan_tpl
.levels
;
4591 ud
->rchan_tpl
.start_idx
[0] = ud
->tchan_tpl
.start_idx
[0];
4592 ud
->rchan_tpl
.start_idx
[1] = ud
->tchan_tpl
.start_idx
[1];
4594 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
4595 sizeof(unsigned long), GFP_KERNEL
);
4596 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
4598 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4599 sizeof(unsigned long), GFP_KERNEL
);
4600 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
4602 ud
->rflow_gp_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
4603 sizeof(unsigned long),
4605 ud
->rflow_gp_map_allocated
= devm_kcalloc(dev
,
4606 BITS_TO_LONGS(ud
->rflow_cnt
),
4607 sizeof(unsigned long),
4609 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
4610 sizeof(unsigned long),
4612 ud
->rflows
= devm_kcalloc(dev
, ud
->rflow_cnt
, sizeof(*ud
->rflows
),
4615 if (!ud
->tchan_map
|| !ud
->rchan_map
|| !ud
->rflow_gp_map
||
4616 !ud
->rflow_gp_map_allocated
|| !ud
->tchans
|| !ud
->rchans
||
4617 !ud
->rflows
|| !ud
->rflow_in_use
)
4621 * RX flows with the same Ids as RX channels are reserved to be used
4622 * as default flows if remote HW can't generate flow_ids. Those
4623 * RX flows can be requested only explicitly by id.
4625 bitmap_set(ud
->rflow_gp_map_allocated
, 0, ud
->rchan_cnt
);
4627 /* by default no GP rflows are assigned to Linux */
4628 bitmap_set(ud
->rflow_gp_map
, 0, ud
->rflow_cnt
);
4630 /* Get resource ranges from tisci */
4631 for (i
= 0; i
< RM_RANGE_LAST
; i
++) {
4632 if (i
== RM_RANGE_BCHAN
|| i
== RM_RANGE_TFLOW
)
4635 tisci_rm
->rm_ranges
[i
] =
4636 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
4637 tisci_rm
->tisci_dev_id
,
4638 (char *)range_names
[i
]);
4642 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4643 if (IS_ERR(rm_res
)) {
4644 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
4647 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
4648 for (i
= 0; i
< rm_res
->sets
; i
++)
4649 udma_mark_resource_ranges(ud
, ud
->tchan_map
,
4650 &rm_res
->desc
[i
], "tchan");
4651 irq_res
.sets
= rm_res
->sets
;
4654 /* rchan and matching default flow ranges */
4655 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4656 if (IS_ERR(rm_res
)) {
4657 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
4660 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
4661 for (i
= 0; i
< rm_res
->sets
; i
++)
4662 udma_mark_resource_ranges(ud
, ud
->rchan_map
,
4663 &rm_res
->desc
[i
], "rchan");
4664 irq_res
.sets
+= rm_res
->sets
;
4667 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
4670 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4671 if (IS_ERR(rm_res
)) {
4672 irq_res
.desc
[0].start
= 0;
4673 irq_res
.desc
[0].num
= ud
->tchan_cnt
;
4676 for (i
= 0; i
< rm_res
->sets
; i
++) {
4677 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
;
4678 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
4679 irq_res
.desc
[i
].start_sec
= rm_res
->desc
[i
].start_sec
;
4680 irq_res
.desc
[i
].num_sec
= rm_res
->desc
[i
].num_sec
;
4683 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4684 if (IS_ERR(rm_res
)) {
4685 irq_res
.desc
[i
].start
= 0;
4686 irq_res
.desc
[i
].num
= ud
->rchan_cnt
;
4688 for (j
= 0; j
< rm_res
->sets
; j
++, i
++) {
4689 if (rm_res
->desc
[j
].num
) {
4690 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
4691 ud
->soc_data
->oes
.udma_rchan
;
4692 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
4694 if (rm_res
->desc
[j
].num_sec
) {
4695 irq_res
.desc
[i
].start_sec
= rm_res
->desc
[j
].start_sec
+
4696 ud
->soc_data
->oes
.udma_rchan
;
4697 irq_res
.desc
[i
].num_sec
= rm_res
->desc
[j
].num_sec
;
4701 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
4702 kfree(irq_res
.desc
);
4704 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
4708 /* GP rflow ranges */
4709 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
4710 if (IS_ERR(rm_res
)) {
4711 /* all gp flows are assigned exclusively to Linux */
4712 bitmap_clear(ud
->rflow_gp_map
, ud
->rchan_cnt
,
4713 ud
->rflow_cnt
- ud
->rchan_cnt
);
4715 for (i
= 0; i
< rm_res
->sets
; i
++)
4716 udma_mark_resource_ranges(ud
, ud
->rflow_gp_map
,
4717 &rm_res
->desc
[i
], "gp-rflow");
4723 static int bcdma_setup_resources(struct udma_dev
*ud
)
4726 struct device
*dev
= ud
->dev
;
4727 struct ti_sci_resource
*rm_res
, irq_res
;
4728 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
4729 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
4732 /* Set up the throughput level start indexes */
4733 cap
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4734 if (BCDMA_CAP3_UBCHAN_CNT(cap
)) {
4735 ud
->bchan_tpl
.levels
= 3;
4736 ud
->bchan_tpl
.start_idx
[1] = BCDMA_CAP3_UBCHAN_CNT(cap
);
4737 ud
->bchan_tpl
.start_idx
[0] = BCDMA_CAP3_HBCHAN_CNT(cap
);
4738 } else if (BCDMA_CAP3_HBCHAN_CNT(cap
)) {
4739 ud
->bchan_tpl
.levels
= 2;
4740 ud
->bchan_tpl
.start_idx
[0] = BCDMA_CAP3_HBCHAN_CNT(cap
);
4742 ud
->bchan_tpl
.levels
= 1;
4745 cap
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x30);
4746 if (BCDMA_CAP4_URCHAN_CNT(cap
)) {
4747 ud
->rchan_tpl
.levels
= 3;
4748 ud
->rchan_tpl
.start_idx
[1] = BCDMA_CAP4_URCHAN_CNT(cap
);
4749 ud
->rchan_tpl
.start_idx
[0] = BCDMA_CAP4_HRCHAN_CNT(cap
);
4750 } else if (BCDMA_CAP4_HRCHAN_CNT(cap
)) {
4751 ud
->rchan_tpl
.levels
= 2;
4752 ud
->rchan_tpl
.start_idx
[0] = BCDMA_CAP4_HRCHAN_CNT(cap
);
4754 ud
->rchan_tpl
.levels
= 1;
4757 if (BCDMA_CAP4_UTCHAN_CNT(cap
)) {
4758 ud
->tchan_tpl
.levels
= 3;
4759 ud
->tchan_tpl
.start_idx
[1] = BCDMA_CAP4_UTCHAN_CNT(cap
);
4760 ud
->tchan_tpl
.start_idx
[0] = BCDMA_CAP4_HTCHAN_CNT(cap
);
4761 } else if (BCDMA_CAP4_HTCHAN_CNT(cap
)) {
4762 ud
->tchan_tpl
.levels
= 2;
4763 ud
->tchan_tpl
.start_idx
[0] = BCDMA_CAP4_HTCHAN_CNT(cap
);
4765 ud
->tchan_tpl
.levels
= 1;
4768 ud
->bchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->bchan_cnt
),
4769 sizeof(unsigned long), GFP_KERNEL
);
4770 ud
->bchans
= devm_kcalloc(dev
, ud
->bchan_cnt
, sizeof(*ud
->bchans
),
4772 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
4773 sizeof(unsigned long), GFP_KERNEL
);
4774 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
4776 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4777 sizeof(unsigned long), GFP_KERNEL
);
4778 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
4780 /* BCDMA do not really have flows, but the driver expect it */
4781 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4782 sizeof(unsigned long),
4784 ud
->rflows
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rflows
),
4787 if (!ud
->bchan_map
|| !ud
->tchan_map
|| !ud
->rchan_map
||
4788 !ud
->rflow_in_use
|| !ud
->bchans
|| !ud
->tchans
|| !ud
->rchans
||
4792 /* Get resource ranges from tisci */
4793 for (i
= 0; i
< RM_RANGE_LAST
; i
++) {
4794 if (i
== RM_RANGE_RFLOW
|| i
== RM_RANGE_TFLOW
)
4796 if (i
== RM_RANGE_BCHAN
&& ud
->bchan_cnt
== 0)
4798 if (i
== RM_RANGE_TCHAN
&& ud
->tchan_cnt
== 0)
4800 if (i
== RM_RANGE_RCHAN
&& ud
->rchan_cnt
== 0)
4803 tisci_rm
->rm_ranges
[i
] =
4804 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
4805 tisci_rm
->tisci_dev_id
,
4806 (char *)range_names
[i
]);
4812 if (ud
->bchan_cnt
) {
4813 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_BCHAN
];
4814 if (IS_ERR(rm_res
)) {
4815 bitmap_zero(ud
->bchan_map
, ud
->bchan_cnt
);
4818 bitmap_fill(ud
->bchan_map
, ud
->bchan_cnt
);
4819 for (i
= 0; i
< rm_res
->sets
; i
++)
4820 udma_mark_resource_ranges(ud
, ud
->bchan_map
,
4823 irq_res
.sets
+= rm_res
->sets
;
4828 if (ud
->tchan_cnt
) {
4829 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4830 if (IS_ERR(rm_res
)) {
4831 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
4834 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
4835 for (i
= 0; i
< rm_res
->sets
; i
++)
4836 udma_mark_resource_ranges(ud
, ud
->tchan_map
,
4839 irq_res
.sets
+= rm_res
->sets
* 2;
4844 if (ud
->rchan_cnt
) {
4845 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4846 if (IS_ERR(rm_res
)) {
4847 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
4850 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
4851 for (i
= 0; i
< rm_res
->sets
; i
++)
4852 udma_mark_resource_ranges(ud
, ud
->rchan_map
,
4855 irq_res
.sets
+= rm_res
->sets
* 2;
4859 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
4862 if (ud
->bchan_cnt
) {
4863 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_BCHAN
];
4864 if (IS_ERR(rm_res
)) {
4865 irq_res
.desc
[0].start
= oes
->bcdma_bchan_ring
;
4866 irq_res
.desc
[0].num
= ud
->bchan_cnt
;
4869 for (i
= 0; i
< rm_res
->sets
; i
++) {
4870 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
+
4871 oes
->bcdma_bchan_ring
;
4872 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
4879 if (ud
->tchan_cnt
) {
4880 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4881 if (IS_ERR(rm_res
)) {
4882 irq_res
.desc
[i
].start
= oes
->bcdma_tchan_data
;
4883 irq_res
.desc
[i
].num
= ud
->tchan_cnt
;
4884 irq_res
.desc
[i
+ 1].start
= oes
->bcdma_tchan_ring
;
4885 irq_res
.desc
[i
+ 1].num
= ud
->tchan_cnt
;
4888 for (j
= 0; j
< rm_res
->sets
; j
++, i
+= 2) {
4889 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
4890 oes
->bcdma_tchan_data
;
4891 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
4893 irq_res
.desc
[i
+ 1].start
= rm_res
->desc
[j
].start
+
4894 oes
->bcdma_tchan_ring
;
4895 irq_res
.desc
[i
+ 1].num
= rm_res
->desc
[j
].num
;
4899 if (ud
->rchan_cnt
) {
4900 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4901 if (IS_ERR(rm_res
)) {
4902 irq_res
.desc
[i
].start
= oes
->bcdma_rchan_data
;
4903 irq_res
.desc
[i
].num
= ud
->rchan_cnt
;
4904 irq_res
.desc
[i
+ 1].start
= oes
->bcdma_rchan_ring
;
4905 irq_res
.desc
[i
+ 1].num
= ud
->rchan_cnt
;
4908 for (j
= 0; j
< rm_res
->sets
; j
++, i
+= 2) {
4909 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
4910 oes
->bcdma_rchan_data
;
4911 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
4913 irq_res
.desc
[i
+ 1].start
= rm_res
->desc
[j
].start
+
4914 oes
->bcdma_rchan_ring
;
4915 irq_res
.desc
[i
+ 1].num
= rm_res
->desc
[j
].num
;
4920 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
4921 kfree(irq_res
.desc
);
4923 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
4930 static int pktdma_setup_resources(struct udma_dev
*ud
)
4933 struct device
*dev
= ud
->dev
;
4934 struct ti_sci_resource
*rm_res
, irq_res
;
4935 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
4936 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
4939 /* Set up the throughput level start indexes */
4940 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4941 if (UDMA_CAP3_UCHAN_CNT(cap3
)) {
4942 ud
->tchan_tpl
.levels
= 3;
4943 ud
->tchan_tpl
.start_idx
[1] = UDMA_CAP3_UCHAN_CNT(cap3
);
4944 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4945 } else if (UDMA_CAP3_HCHAN_CNT(cap3
)) {
4946 ud
->tchan_tpl
.levels
= 2;
4947 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4949 ud
->tchan_tpl
.levels
= 1;
4952 ud
->rchan_tpl
.levels
= ud
->tchan_tpl
.levels
;
4953 ud
->rchan_tpl
.start_idx
[0] = ud
->tchan_tpl
.start_idx
[0];
4954 ud
->rchan_tpl
.start_idx
[1] = ud
->tchan_tpl
.start_idx
[1];
4956 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
4957 sizeof(unsigned long), GFP_KERNEL
);
4958 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
4960 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4961 sizeof(unsigned long), GFP_KERNEL
);
4962 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
4964 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
4965 sizeof(unsigned long),
4967 ud
->rflows
= devm_kcalloc(dev
, ud
->rflow_cnt
, sizeof(*ud
->rflows
),
4969 ud
->tflow_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tflow_cnt
),
4970 sizeof(unsigned long), GFP_KERNEL
);
4972 if (!ud
->tchan_map
|| !ud
->rchan_map
|| !ud
->tflow_map
|| !ud
->tchans
||
4973 !ud
->rchans
|| !ud
->rflows
|| !ud
->rflow_in_use
)
4976 /* Get resource ranges from tisci */
4977 for (i
= 0; i
< RM_RANGE_LAST
; i
++) {
4978 if (i
== RM_RANGE_BCHAN
)
4981 tisci_rm
->rm_ranges
[i
] =
4982 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
4983 tisci_rm
->tisci_dev_id
,
4984 (char *)range_names
[i
]);
4988 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4989 if (IS_ERR(rm_res
)) {
4990 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
4992 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
4993 for (i
= 0; i
< rm_res
->sets
; i
++)
4994 udma_mark_resource_ranges(ud
, ud
->tchan_map
,
4995 &rm_res
->desc
[i
], "tchan");
4999 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
5000 if (IS_ERR(rm_res
)) {
5001 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
5003 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
5004 for (i
= 0; i
< rm_res
->sets
; i
++)
5005 udma_mark_resource_ranges(ud
, ud
->rchan_map
,
5006 &rm_res
->desc
[i
], "rchan");
5010 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
5011 if (IS_ERR(rm_res
)) {
5012 /* all rflows are assigned exclusively to Linux */
5013 bitmap_zero(ud
->rflow_in_use
, ud
->rflow_cnt
);
5016 bitmap_fill(ud
->rflow_in_use
, ud
->rflow_cnt
);
5017 for (i
= 0; i
< rm_res
->sets
; i
++)
5018 udma_mark_resource_ranges(ud
, ud
->rflow_in_use
,
5019 &rm_res
->desc
[i
], "rflow");
5020 irq_res
.sets
= rm_res
->sets
;
5024 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TFLOW
];
5025 if (IS_ERR(rm_res
)) {
5026 /* all tflows are assigned exclusively to Linux */
5027 bitmap_zero(ud
->tflow_map
, ud
->tflow_cnt
);
5030 bitmap_fill(ud
->tflow_map
, ud
->tflow_cnt
);
5031 for (i
= 0; i
< rm_res
->sets
; i
++)
5032 udma_mark_resource_ranges(ud
, ud
->tflow_map
,
5033 &rm_res
->desc
[i
], "tflow");
5034 irq_res
.sets
+= rm_res
->sets
;
5037 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
5040 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TFLOW
];
5041 if (IS_ERR(rm_res
)) {
5042 irq_res
.desc
[0].start
= oes
->pktdma_tchan_flow
;
5043 irq_res
.desc
[0].num
= ud
->tflow_cnt
;
5046 for (i
= 0; i
< rm_res
->sets
; i
++) {
5047 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
+
5048 oes
->pktdma_tchan_flow
;
5049 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
5052 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
5053 if (IS_ERR(rm_res
)) {
5054 irq_res
.desc
[i
].start
= oes
->pktdma_rchan_flow
;
5055 irq_res
.desc
[i
].num
= ud
->rflow_cnt
;
5057 for (j
= 0; j
< rm_res
->sets
; j
++, i
++) {
5058 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
5059 oes
->pktdma_rchan_flow
;
5060 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
5063 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
5064 kfree(irq_res
.desc
);
5066 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
5073 static int setup_resources(struct udma_dev
*ud
)
5075 struct device
*dev
= ud
->dev
;
5078 switch (ud
->match_data
->type
) {
5080 ret
= udma_setup_resources(ud
);
5082 case DMA_TYPE_BCDMA
:
5083 ret
= bcdma_setup_resources(ud
);
5085 case DMA_TYPE_PKTDMA
:
5086 ret
= pktdma_setup_resources(ud
);
5095 ch_count
= ud
->bchan_cnt
+ ud
->tchan_cnt
+ ud
->rchan_cnt
;
5097 ch_count
-= bitmap_weight(ud
->bchan_map
, ud
->bchan_cnt
);
5098 ch_count
-= bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
);
5099 ch_count
-= bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
);
5103 ud
->channels
= devm_kcalloc(dev
, ch_count
, sizeof(*ud
->channels
),
5108 switch (ud
->match_data
->type
) {
5111 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5113 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
,
5115 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
,
5117 ud
->rflow_cnt
- bitmap_weight(ud
->rflow_gp_map
,
5120 case DMA_TYPE_BCDMA
:
5122 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5124 ud
->bchan_cnt
- bitmap_weight(ud
->bchan_map
,
5126 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
,
5128 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
,
5131 case DMA_TYPE_PKTDMA
:
5133 "Channels: %d (tchan: %u, rchan: %u)\n",
5135 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
,
5137 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
,
5147 static int udma_setup_rx_flush(struct udma_dev
*ud
)
5149 struct udma_rx_flush
*rx_flush
= &ud
->rx_flush
;
5150 struct cppi5_desc_hdr_t
*tr_desc
;
5151 struct cppi5_tr_type1_t
*tr_req
;
5152 struct cppi5_host_desc_t
*desc
;
5153 struct device
*dev
= ud
->dev
;
5154 struct udma_hwdesc
*hwdesc
;
5157 /* Allocate 1K buffer for discarded data on RX channel teardown */
5158 rx_flush
->buffer_size
= SZ_1K
;
5159 rx_flush
->buffer_vaddr
= devm_kzalloc(dev
, rx_flush
->buffer_size
,
5161 if (!rx_flush
->buffer_vaddr
)
5164 rx_flush
->buffer_paddr
= dma_map_single(dev
, rx_flush
->buffer_vaddr
,
5165 rx_flush
->buffer_size
,
5167 if (dma_mapping_error(dev
, rx_flush
->buffer_paddr
))
5170 /* Set up descriptor to be used for TR mode */
5171 hwdesc
= &rx_flush
->hwdescs
[0];
5172 tr_size
= sizeof(struct cppi5_tr_type1_t
);
5173 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
, 1);
5174 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
5177 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
5179 if (!hwdesc
->cppi5_desc_vaddr
)
5182 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
5183 hwdesc
->cppi5_desc_size
,
5185 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
5188 /* Start of the TR req records */
5189 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
5190 /* Start address of the TR response array */
5191 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
;
5193 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
5194 cppi5_trdesc_init(tr_desc
, 1, tr_size
, 0, 0);
5195 cppi5_desc_set_pktids(tr_desc
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
5196 cppi5_desc_set_retpolicy(tr_desc
, 0, 0);
5198 tr_req
= hwdesc
->tr_req_base
;
5199 cppi5_tr_init(&tr_req
->flags
, CPPI5_TR_TYPE1
, false, false,
5200 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
5201 cppi5_tr_csf_set(&tr_req
->flags
, CPPI5_TR_CSF_SUPR_EVT
);
5203 tr_req
->addr
= rx_flush
->buffer_paddr
;
5204 tr_req
->icnt0
= rx_flush
->buffer_size
;
5207 dma_sync_single_for_device(dev
, hwdesc
->cppi5_desc_paddr
,
5208 hwdesc
->cppi5_desc_size
, DMA_TO_DEVICE
);
5210 /* Set up descriptor to be used for packet mode */
5211 hwdesc
= &rx_flush
->hwdescs
[1];
5212 hwdesc
->cppi5_desc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
5213 CPPI5_INFO0_HDESC_EPIB_SIZE
+
5214 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
,
5217 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
5219 if (!hwdesc
->cppi5_desc_vaddr
)
5222 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
5223 hwdesc
->cppi5_desc_size
,
5225 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
5228 desc
= hwdesc
->cppi5_desc_vaddr
;
5229 cppi5_hdesc_init(desc
, 0, 0);
5230 cppi5_desc_set_pktids(&desc
->hdr
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
5231 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0);
5233 cppi5_hdesc_attach_buf(desc
,
5234 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
,
5235 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
);
5237 dma_sync_single_for_device(dev
, hwdesc
->cppi5_desc_paddr
,
5238 hwdesc
->cppi5_desc_size
, DMA_TO_DEVICE
);
5242 #ifdef CONFIG_DEBUG_FS
5243 static void udma_dbg_summary_show_chan(struct seq_file
*s
,
5244 struct dma_chan
*chan
)
5246 struct udma_chan
*uc
= to_udma_chan(chan
);
5247 struct udma_chan_config
*ucc
= &uc
->config
;
5249 seq_printf(s
, " %-13s| %s", dma_chan_name(chan
),
5250 chan
->dbg_client_name
?: "in-use");
5251 if (ucc
->tr_trigger_type
)
5252 seq_puts(s
, " (triggered, ");
5254 seq_printf(s
, " (%s, ",
5255 dmaengine_get_direction_text(uc
->config
.dir
));
5257 switch (uc
->config
.dir
) {
5258 case DMA_MEM_TO_MEM
:
5259 if (uc
->ud
->match_data
->type
== DMA_TYPE_BCDMA
) {
5260 seq_printf(s
, "bchan%d)\n", uc
->bchan
->id
);
5264 seq_printf(s
, "chan%d pair [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
5265 ucc
->src_thread
, ucc
->dst_thread
);
5267 case DMA_DEV_TO_MEM
:
5268 seq_printf(s
, "rchan%d [0x%04x -> 0x%04x], ", uc
->rchan
->id
,
5269 ucc
->src_thread
, ucc
->dst_thread
);
5270 if (uc
->ud
->match_data
->type
== DMA_TYPE_PKTDMA
)
5271 seq_printf(s
, "rflow%d, ", uc
->rflow
->id
);
5273 case DMA_MEM_TO_DEV
:
5274 seq_printf(s
, "tchan%d [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
5275 ucc
->src_thread
, ucc
->dst_thread
);
5276 if (uc
->ud
->match_data
->type
== DMA_TYPE_PKTDMA
)
5277 seq_printf(s
, "tflow%d, ", uc
->tchan
->tflow_id
);
5280 seq_printf(s
, ")\n");
5284 if (ucc
->ep_type
== PSIL_EP_NATIVE
) {
5285 seq_printf(s
, "PSI-L Native");
5286 if (ucc
->metadata_size
) {
5287 seq_printf(s
, "[%s", ucc
->needs_epib
? " EPIB" : "");
5289 seq_printf(s
, " PSDsize:%u", ucc
->psd_size
);
5290 seq_printf(s
, " ]");
5293 seq_printf(s
, "PDMA");
5294 if (ucc
->enable_acc32
|| ucc
->enable_burst
)
5295 seq_printf(s
, "[%s%s ]",
5296 ucc
->enable_acc32
? " ACC32" : "",
5297 ucc
->enable_burst
? " BURST" : "");
5300 seq_printf(s
, ", %s)\n", ucc
->pkt_mode
? "Packet mode" : "TR mode");
5303 static void udma_dbg_summary_show(struct seq_file
*s
,
5304 struct dma_device
*dma_dev
)
5306 struct dma_chan
*chan
;
5308 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
5309 if (chan
->client_count
)
5310 udma_dbg_summary_show_chan(s
, chan
);
5313 #endif /* CONFIG_DEBUG_FS */
5315 static enum dmaengine_alignment
udma_get_copy_align(struct udma_dev
*ud
)
5317 const struct udma_match_data
*match_data
= ud
->match_data
;
5320 if (!match_data
->enable_memcpy_support
)
5321 return DMAENGINE_ALIGN_8_BYTES
;
5323 /* Get the highest TPL level the device supports for memcpy */
5325 tpl
= udma_get_chan_tpl_index(&ud
->bchan_tpl
, 0);
5326 else if (ud
->tchan_cnt
)
5327 tpl
= udma_get_chan_tpl_index(&ud
->tchan_tpl
, 0);
5329 return DMAENGINE_ALIGN_8_BYTES
;
5331 switch (match_data
->burst_size
[tpl
]) {
5332 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES
:
5333 return DMAENGINE_ALIGN_256_BYTES
;
5334 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES
:
5335 return DMAENGINE_ALIGN_128_BYTES
;
5336 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES
:
5339 return DMAENGINE_ALIGN_64_BYTES
;
5343 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5344 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5345 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5346 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5347 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5349 static int udma_probe(struct platform_device
*pdev
)
5351 struct device_node
*navss_node
= pdev
->dev
.parent
->of_node
;
5352 const struct soc_device_attribute
*soc
;
5353 struct device
*dev
= &pdev
->dev
;
5354 struct udma_dev
*ud
;
5355 const struct of_device_id
*match
;
5359 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(48));
5361 dev_err(dev
, "failed to set dma mask stuff\n");
5363 ud
= devm_kzalloc(dev
, sizeof(*ud
), GFP_KERNEL
);
5367 match
= of_match_node(udma_of_match
, dev
->of_node
);
5369 dev_err(dev
, "No compatible match found\n");
5372 ud
->match_data
= match
->data
;
5374 ud
->soc_data
= ud
->match_data
->soc_data
;
5375 if (!ud
->soc_data
) {
5376 soc
= soc_device_match(k3_soc_devices
);
5378 dev_err(dev
, "No compatible SoC found\n");
5381 ud
->soc_data
= soc
->data
;
5384 ret
= udma_get_mmrs(pdev
, ud
);
5388 ud
->tisci_rm
.tisci
= ti_sci_get_by_phandle(dev
->of_node
, "ti,sci");
5389 if (IS_ERR(ud
->tisci_rm
.tisci
))
5390 return PTR_ERR(ud
->tisci_rm
.tisci
);
5392 ret
= of_property_read_u32(dev
->of_node
, "ti,sci-dev-id",
5393 &ud
->tisci_rm
.tisci_dev_id
);
5395 dev_err(dev
, "ti,sci-dev-id read failure %d\n", ret
);
5398 pdev
->id
= ud
->tisci_rm
.tisci_dev_id
;
5400 ret
= of_property_read_u32(navss_node
, "ti,sci-dev-id",
5401 &ud
->tisci_rm
.tisci_navss_dev_id
);
5403 dev_err(dev
, "NAVSS ti,sci-dev-id read failure %d\n", ret
);
5407 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
5408 ret
= of_property_read_u32(dev
->of_node
, "ti,udma-atype",
5410 if (!ret
&& ud
->atype
> 2) {
5411 dev_err(dev
, "Invalid atype: %u\n", ud
->atype
);
5415 ret
= of_property_read_u32(dev
->of_node
, "ti,asel",
5417 if (!ret
&& ud
->asel
> 15) {
5418 dev_err(dev
, "Invalid asel: %u\n", ud
->asel
);
5423 ud
->tisci_rm
.tisci_udmap_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_udmap_ops
;
5424 ud
->tisci_rm
.tisci_psil_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_psil_ops
;
5426 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
5427 ud
->ringacc
= of_k3_ringacc_get_by_phandle(dev
->of_node
, "ti,ringacc");
5429 struct k3_ringacc_init_data ring_init_data
;
5431 ring_init_data
.tisci
= ud
->tisci_rm
.tisci
;
5432 ring_init_data
.tisci_dev_id
= ud
->tisci_rm
.tisci_dev_id
;
5433 if (ud
->match_data
->type
== DMA_TYPE_BCDMA
) {
5434 ring_init_data
.num_rings
= ud
->bchan_cnt
+
5438 ring_init_data
.num_rings
= ud
->rflow_cnt
+
5442 ud
->ringacc
= k3_ringacc_dmarings_init(pdev
, &ring_init_data
);
5445 if (IS_ERR(ud
->ringacc
))
5446 return PTR_ERR(ud
->ringacc
);
5448 dev
->msi
.domain
= of_msi_get_domain(dev
, dev
->of_node
,
5449 DOMAIN_BUS_TI_SCI_INTA_MSI
);
5450 if (!dev
->msi
.domain
) {
5451 return -EPROBE_DEFER
;
5454 dma_cap_set(DMA_SLAVE
, ud
->ddev
.cap_mask
);
5455 /* cyclic operation is not supported via PKTDMA */
5456 if (ud
->match_data
->type
!= DMA_TYPE_PKTDMA
) {
5457 dma_cap_set(DMA_CYCLIC
, ud
->ddev
.cap_mask
);
5458 ud
->ddev
.device_prep_dma_cyclic
= udma_prep_dma_cyclic
;
5461 ud
->ddev
.device_config
= udma_slave_config
;
5462 ud
->ddev
.device_prep_slave_sg
= udma_prep_slave_sg
;
5463 ud
->ddev
.device_issue_pending
= udma_issue_pending
;
5464 ud
->ddev
.device_tx_status
= udma_tx_status
;
5465 ud
->ddev
.device_pause
= udma_pause
;
5466 ud
->ddev
.device_resume
= udma_resume
;
5467 ud
->ddev
.device_terminate_all
= udma_terminate_all
;
5468 ud
->ddev
.device_synchronize
= udma_synchronize
;
5469 #ifdef CONFIG_DEBUG_FS
5470 ud
->ddev
.dbg_summary_show
= udma_dbg_summary_show
;
5473 switch (ud
->match_data
->type
) {
5475 ud
->ddev
.device_alloc_chan_resources
=
5476 udma_alloc_chan_resources
;
5478 case DMA_TYPE_BCDMA
:
5479 ud
->ddev
.device_alloc_chan_resources
=
5480 bcdma_alloc_chan_resources
;
5481 ud
->ddev
.device_router_config
= bcdma_router_config
;
5483 case DMA_TYPE_PKTDMA
:
5484 ud
->ddev
.device_alloc_chan_resources
=
5485 pktdma_alloc_chan_resources
;
5490 ud
->ddev
.device_free_chan_resources
= udma_free_chan_resources
;
5492 ud
->ddev
.src_addr_widths
= TI_UDMAC_BUSWIDTHS
;
5493 ud
->ddev
.dst_addr_widths
= TI_UDMAC_BUSWIDTHS
;
5494 ud
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
5495 ud
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
5496 ud
->ddev
.desc_metadata_modes
= DESC_METADATA_CLIENT
|
5497 DESC_METADATA_ENGINE
;
5498 if (ud
->match_data
->enable_memcpy_support
&&
5499 !(ud
->match_data
->type
== DMA_TYPE_BCDMA
&& ud
->bchan_cnt
== 0)) {
5500 dma_cap_set(DMA_MEMCPY
, ud
->ddev
.cap_mask
);
5501 ud
->ddev
.device_prep_dma_memcpy
= udma_prep_dma_memcpy
;
5502 ud
->ddev
.directions
|= BIT(DMA_MEM_TO_MEM
);
5507 ud
->psil_base
= ud
->match_data
->psil_base
;
5509 INIT_LIST_HEAD(&ud
->ddev
.channels
);
5510 INIT_LIST_HEAD(&ud
->desc_to_purge
);
5512 ch_count
= setup_resources(ud
);
5516 spin_lock_init(&ud
->lock
);
5517 INIT_WORK(&ud
->purge_work
, udma_purge_desc_work
);
5519 ud
->desc_align
= 64;
5520 if (ud
->desc_align
< dma_get_cache_alignment())
5521 ud
->desc_align
= dma_get_cache_alignment();
5523 ret
= udma_setup_rx_flush(ud
);
5527 for (i
= 0; i
< ud
->bchan_cnt
; i
++) {
5528 struct udma_bchan
*bchan
= &ud
->bchans
[i
];
5531 bchan
->reg_rt
= ud
->mmrs
[MMR_BCHANRT
] + i
* 0x1000;
5534 for (i
= 0; i
< ud
->tchan_cnt
; i
++) {
5535 struct udma_tchan
*tchan
= &ud
->tchans
[i
];
5538 tchan
->reg_rt
= ud
->mmrs
[MMR_TCHANRT
] + i
* 0x1000;
5541 for (i
= 0; i
< ud
->rchan_cnt
; i
++) {
5542 struct udma_rchan
*rchan
= &ud
->rchans
[i
];
5545 rchan
->reg_rt
= ud
->mmrs
[MMR_RCHANRT
] + i
* 0x1000;
5548 for (i
= 0; i
< ud
->rflow_cnt
; i
++) {
5549 struct udma_rflow
*rflow
= &ud
->rflows
[i
];
5554 for (i
= 0; i
< ch_count
; i
++) {
5555 struct udma_chan
*uc
= &ud
->channels
[i
];
5558 uc
->vc
.desc_free
= udma_desc_free
;
5563 uc
->config
.remote_thread_id
= -1;
5564 uc
->config
.mapped_channel_id
= -1;
5565 uc
->config
.default_flow_id
= -1;
5566 uc
->config
.dir
= DMA_MEM_TO_MEM
;
5567 uc
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s chan%d",
5570 vchan_init(&uc
->vc
, &ud
->ddev
);
5571 /* Use custom vchan completion handling */
5572 tasklet_setup(&uc
->vc
.task
, udma_vchan_complete
);
5573 init_completion(&uc
->teardown_completed
);
5574 INIT_DELAYED_WORK(&uc
->tx_drain
.work
, udma_check_tx_completion
);
5577 /* Configure the copy_align to the maximum burst size the device supports */
5578 ud
->ddev
.copy_align
= udma_get_copy_align(ud
);
5580 ret
= dma_async_device_register(&ud
->ddev
);
5582 dev_err(dev
, "failed to register slave DMA engine: %d\n", ret
);
5586 platform_set_drvdata(pdev
, ud
);
5588 ret
= of_dma_controller_register(dev
->of_node
, udma_of_xlate
, ud
);
5590 dev_err(dev
, "failed to register of_dma controller\n");
5591 dma_async_device_unregister(&ud
->ddev
);
5597 static int __maybe_unused
udma_pm_suspend(struct device
*dev
)
5599 struct udma_dev
*ud
= dev_get_drvdata(dev
);
5600 struct dma_device
*dma_dev
= &ud
->ddev
;
5601 struct dma_chan
*chan
;
5602 struct udma_chan
*uc
;
5604 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
5605 if (chan
->client_count
) {
5606 uc
= to_udma_chan(chan
);
5607 /* backup the channel configuration */
5608 memcpy(&uc
->backup_config
, &uc
->config
,
5609 sizeof(struct udma_chan_config
));
5610 dev_dbg(dev
, "Suspending channel %s\n",
5611 dma_chan_name(chan
));
5612 ud
->ddev
.device_free_chan_resources(chan
);
5619 static int __maybe_unused
udma_pm_resume(struct device
*dev
)
5621 struct udma_dev
*ud
= dev_get_drvdata(dev
);
5622 struct dma_device
*dma_dev
= &ud
->ddev
;
5623 struct dma_chan
*chan
;
5624 struct udma_chan
*uc
;
5627 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
5628 if (chan
->client_count
) {
5629 uc
= to_udma_chan(chan
);
5630 /* restore the channel configuration */
5631 memcpy(&uc
->config
, &uc
->backup_config
,
5632 sizeof(struct udma_chan_config
));
5633 dev_dbg(dev
, "Resuming channel %s\n",
5634 dma_chan_name(chan
));
5635 ret
= ud
->ddev
.device_alloc_chan_resources(chan
);
5644 static const struct dev_pm_ops udma_pm_ops
= {
5645 SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend
, udma_pm_resume
)
5648 static struct platform_driver udma_driver
= {
5651 .of_match_table
= udma_of_match
,
5652 .suppress_bind_attrs
= true,
5655 .probe
= udma_probe
,
5658 module_platform_driver(udma_driver
);
5659 MODULE_DESCRIPTION("Texas Instruments UDMA support");
5660 MODULE_LICENSE("GPL v2");
5662 /* Private interfaces to UDMA */
5663 #include "k3-udma-private.c"