1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_device.h>
21 #include <linux/of_irq.h>
22 #include <linux/workqueue.h>
23 #include <linux/completion.h>
24 #include <linux/soc/ti/k3-ringacc.h>
25 #include <linux/soc/ti/ti_sci_protocol.h>
26 #include <linux/soc/ti/ti_sci_inta_msi.h>
27 #include <linux/dma/ti-cppi5.h>
29 #include "../virt-dma.h"
31 #include "k3-psil-priv.h"
33 struct udma_static_tr
{
34 u8 elsize
; /* RPSTR0 */
35 u16 elcnt
; /* RPSTR0 */
36 u16 bstcnt
; /* RPSTR1 */
39 #define K3_UDMA_MAX_RFLOWS 1024
40 #define K3_UDMA_DEFAULT_RING_SIZE 16
42 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
43 #define UDMA_RFLOW_SRCTAG_NONE 0
44 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
45 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
46 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
48 #define UDMA_RFLOW_DSTTAG_NONE 0
49 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
50 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
51 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
52 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
63 static const char * const mmr_names
[] = { "gcfg", "rchanrt", "tchanrt" };
69 struct k3_ring
*t_ring
; /* Transmit ring */
70 struct k3_ring
*tc_ring
; /* Transmit Completion ring */
75 struct k3_ring
*fd_ring
; /* Free Descriptor ring */
76 struct k3_ring
*r_ring
; /* Receive ring */
85 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
86 #define UDMA_FLAG_PDMA_BURST BIT(1)
88 struct udma_match_data
{
90 bool enable_memcpy_support
;
96 u32 level_start_idx
[];
100 struct dma_device ddev
;
102 void __iomem
*mmrs
[MMR_LAST
];
103 const struct udma_match_data
*match_data
;
105 size_t desc_align
; /* alignment to use for descriptors */
107 struct udma_tisci_rm tisci_rm
;
109 struct k3_ringacc
*ringacc
;
111 struct work_struct purge_work
;
112 struct list_head desc_to_purge
;
119 unsigned long *tchan_map
;
120 unsigned long *rchan_map
;
121 unsigned long *rflow_gp_map
;
122 unsigned long *rflow_gp_map_allocated
;
123 unsigned long *rflow_in_use
;
125 struct udma_tchan
*tchans
;
126 struct udma_rchan
*rchans
;
127 struct udma_rflow
*rflows
;
129 struct udma_chan
*channels
;
134 size_t cppi5_desc_size
;
135 void *cppi5_desc_vaddr
;
136 dma_addr_t cppi5_desc_paddr
;
138 /* TR descriptor internal pointers */
140 struct cppi5_tr_resp_t
*tr_resp_base
;
144 struct virt_dma_desc vd
;
148 enum dma_transfer_direction dir
;
150 struct udma_static_tr static_tr
;
154 unsigned int desc_idx
; /* Only used for cyclic in packet mode */
158 void *metadata
; /* pointer to provided metadata buffer (EPIP, PSdata) */
160 unsigned int hwdesc_count
;
161 struct udma_hwdesc hwdesc
[0];
164 enum udma_chan_state
{
165 UDMA_CHAN_IS_IDLE
= 0, /* not active, no teardown is in progress */
166 UDMA_CHAN_IS_ACTIVE
, /* Normal operation */
167 UDMA_CHAN_IS_TERMINATING
, /* channel is being terminated */
170 struct udma_tx_drain
{
171 struct delayed_work work
;
172 unsigned long jiffie
;
176 struct udma_chan_config
{
177 bool pkt_mode
; /* TR or packet */
178 bool needs_epib
; /* EPIB is needed for the communication or not */
179 u32 psd_size
; /* size of Protocol Specific Data */
180 u32 metadata_size
; /* (needs_epib ? 16:0) + psd_size */
181 u32 hdesc_size
; /* Size of a packet descriptor in packet mode */
182 bool notdpkt
; /* Suppress sending TDC packet */
183 int remote_thread_id
;
186 enum psil_endpoint_type ep_type
;
189 enum udma_tp_level channel_tpl
; /* Channel Throughput Level */
191 enum dma_transfer_direction dir
;
195 struct virt_dma_chan vc
;
196 struct dma_slave_config cfg
;
198 struct udma_desc
*desc
;
199 struct udma_desc
*terminated_desc
;
200 struct udma_static_tr static_tr
;
203 struct udma_tchan
*tchan
;
204 struct udma_rchan
*rchan
;
205 struct udma_rflow
*rflow
;
215 enum udma_chan_state state
;
216 struct completion teardown_completed
;
218 struct udma_tx_drain tx_drain
;
220 u32 bcnt
; /* number of bytes completed since the start of the channel */
221 u32 in_ring_cnt
; /* number of descriptors in flight */
223 /* Channel configuration parameters */
224 struct udma_chan_config config
;
226 /* dmapool for packet mode descriptors */
228 struct dma_pool
*hdesc_pool
;
233 static inline struct udma_dev
*to_udma_dev(struct dma_device
*d
)
235 return container_of(d
, struct udma_dev
, ddev
);
238 static inline struct udma_chan
*to_udma_chan(struct dma_chan
*c
)
240 return container_of(c
, struct udma_chan
, vc
.chan
);
243 static inline struct udma_desc
*to_udma_desc(struct dma_async_tx_descriptor
*t
)
245 return container_of(t
, struct udma_desc
, vd
.tx
);
248 /* Generic register access functions */
249 static inline u32
udma_read(void __iomem
*base
, int reg
)
251 return readl(base
+ reg
);
254 static inline void udma_write(void __iomem
*base
, int reg
, u32 val
)
256 writel(val
, base
+ reg
);
259 static inline void udma_update_bits(void __iomem
*base
, int reg
,
264 orig
= readl(base
+ reg
);
269 writel(tmp
, base
+ reg
);
273 static inline u32
udma_tchanrt_read(struct udma_tchan
*tchan
, int reg
)
277 return udma_read(tchan
->reg_rt
, reg
);
280 static inline void udma_tchanrt_write(struct udma_tchan
*tchan
, int reg
,
285 udma_write(tchan
->reg_rt
, reg
, val
);
288 static inline void udma_tchanrt_update_bits(struct udma_tchan
*tchan
, int reg
,
293 udma_update_bits(tchan
->reg_rt
, reg
, mask
, val
);
297 static inline u32
udma_rchanrt_read(struct udma_rchan
*rchan
, int reg
)
301 return udma_read(rchan
->reg_rt
, reg
);
304 static inline void udma_rchanrt_write(struct udma_rchan
*rchan
, int reg
,
309 udma_write(rchan
->reg_rt
, reg
, val
);
312 static inline void udma_rchanrt_update_bits(struct udma_rchan
*rchan
, int reg
,
317 udma_update_bits(rchan
->reg_rt
, reg
, mask
, val
);
320 static int navss_psil_pair(struct udma_dev
*ud
, u32 src_thread
, u32 dst_thread
)
322 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
324 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
325 return tisci_rm
->tisci_psil_ops
->pair(tisci_rm
->tisci
,
326 tisci_rm
->tisci_navss_dev_id
,
327 src_thread
, dst_thread
);
330 static int navss_psil_unpair(struct udma_dev
*ud
, u32 src_thread
,
333 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
335 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
336 return tisci_rm
->tisci_psil_ops
->unpair(tisci_rm
->tisci
,
337 tisci_rm
->tisci_navss_dev_id
,
338 src_thread
, dst_thread
);
341 static void udma_reset_uchan(struct udma_chan
*uc
)
343 memset(&uc
->config
, 0, sizeof(uc
->config
));
344 uc
->config
.remote_thread_id
= -1;
345 uc
->state
= UDMA_CHAN_IS_IDLE
;
348 static void udma_dump_chan_stdata(struct udma_chan
*uc
)
350 struct device
*dev
= uc
->ud
->dev
;
354 if (uc
->config
.dir
== DMA_MEM_TO_DEV
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
355 dev_dbg(dev
, "TCHAN State data:\n");
356 for (i
= 0; i
< 32; i
++) {
357 offset
= UDMA_TCHAN_RT_STDATA_REG
+ i
* 4;
358 dev_dbg(dev
, "TRT_STDATA[%02d]: 0x%08x\n", i
,
359 udma_tchanrt_read(uc
->tchan
, offset
));
363 if (uc
->config
.dir
== DMA_DEV_TO_MEM
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
364 dev_dbg(dev
, "RCHAN State data:\n");
365 for (i
= 0; i
< 32; i
++) {
366 offset
= UDMA_RCHAN_RT_STDATA_REG
+ i
* 4;
367 dev_dbg(dev
, "RRT_STDATA[%02d]: 0x%08x\n", i
,
368 udma_rchanrt_read(uc
->rchan
, offset
));
373 static inline dma_addr_t
udma_curr_cppi5_desc_paddr(struct udma_desc
*d
,
376 return d
->hwdesc
[idx
].cppi5_desc_paddr
;
379 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc
*d
, int idx
)
381 return d
->hwdesc
[idx
].cppi5_desc_vaddr
;
384 static struct udma_desc
*udma_udma_desc_from_paddr(struct udma_chan
*uc
,
387 struct udma_desc
*d
= uc
->terminated_desc
;
390 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
393 if (desc_paddr
!= paddr
)
400 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
403 if (desc_paddr
!= paddr
)
411 static void udma_free_hwdesc(struct udma_chan
*uc
, struct udma_desc
*d
)
413 if (uc
->use_dma_pool
) {
416 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
417 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
420 dma_pool_free(uc
->hdesc_pool
,
421 d
->hwdesc
[i
].cppi5_desc_vaddr
,
422 d
->hwdesc
[i
].cppi5_desc_paddr
);
424 d
->hwdesc
[i
].cppi5_desc_vaddr
= NULL
;
426 } else if (d
->hwdesc
[0].cppi5_desc_vaddr
) {
427 struct udma_dev
*ud
= uc
->ud
;
429 dma_free_coherent(ud
->dev
, d
->hwdesc
[0].cppi5_desc_size
,
430 d
->hwdesc
[0].cppi5_desc_vaddr
,
431 d
->hwdesc
[0].cppi5_desc_paddr
);
433 d
->hwdesc
[0].cppi5_desc_vaddr
= NULL
;
437 static void udma_purge_desc_work(struct work_struct
*work
)
439 struct udma_dev
*ud
= container_of(work
, typeof(*ud
), purge_work
);
440 struct virt_dma_desc
*vd
, *_vd
;
444 spin_lock_irqsave(&ud
->lock
, flags
);
445 list_splice_tail_init(&ud
->desc_to_purge
, &head
);
446 spin_unlock_irqrestore(&ud
->lock
, flags
);
448 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
449 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
450 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
452 udma_free_hwdesc(uc
, d
);
457 /* If more to purge, schedule the work again */
458 if (!list_empty(&ud
->desc_to_purge
))
459 schedule_work(&ud
->purge_work
);
462 static void udma_desc_free(struct virt_dma_desc
*vd
)
464 struct udma_dev
*ud
= to_udma_dev(vd
->tx
.chan
->device
);
465 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
466 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
469 if (uc
->terminated_desc
== d
)
470 uc
->terminated_desc
= NULL
;
472 if (uc
->use_dma_pool
) {
473 udma_free_hwdesc(uc
, d
);
478 spin_lock_irqsave(&ud
->lock
, flags
);
479 list_add_tail(&vd
->node
, &ud
->desc_to_purge
);
480 spin_unlock_irqrestore(&ud
->lock
, flags
);
482 schedule_work(&ud
->purge_work
);
485 static bool udma_is_chan_running(struct udma_chan
*uc
)
491 trt_ctl
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
);
493 rrt_ctl
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
);
495 if (trt_ctl
& UDMA_CHAN_RT_CTL_EN
|| rrt_ctl
& UDMA_CHAN_RT_CTL_EN
)
501 static bool udma_is_chan_paused(struct udma_chan
*uc
)
505 switch (uc
->desc
->dir
) {
507 val
= udma_rchanrt_read(uc
->rchan
,
508 UDMA_RCHAN_RT_PEER_RT_EN_REG
);
509 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
512 val
= udma_tchanrt_read(uc
->tchan
,
513 UDMA_TCHAN_RT_PEER_RT_EN_REG
);
514 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
517 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
);
518 pause_mask
= UDMA_CHAN_RT_CTL_PAUSE
;
524 if (val
& pause_mask
)
530 static void udma_sync_for_device(struct udma_chan
*uc
, int idx
)
532 struct udma_desc
*d
= uc
->desc
;
534 if (uc
->cyclic
&& uc
->config
.pkt_mode
) {
535 dma_sync_single_for_device(uc
->ud
->dev
,
536 d
->hwdesc
[idx
].cppi5_desc_paddr
,
537 d
->hwdesc
[idx
].cppi5_desc_size
,
542 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
543 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
546 dma_sync_single_for_device(uc
->ud
->dev
,
547 d
->hwdesc
[i
].cppi5_desc_paddr
,
548 d
->hwdesc
[i
].cppi5_desc_size
,
554 static int udma_push_to_ring(struct udma_chan
*uc
, int idx
)
556 struct udma_desc
*d
= uc
->desc
;
558 struct k3_ring
*ring
= NULL
;
561 switch (uc
->config
.dir
) {
563 ring
= uc
->rflow
->fd_ring
;
567 ring
= uc
->tchan
->t_ring
;
574 dma_addr_t desc_addr
= udma_curr_cppi5_desc_paddr(d
, idx
);
576 wmb(); /* Ensure that writes are not moved over this point */
577 udma_sync_for_device(uc
, idx
);
578 ret
= k3_ringacc_ring_push(ring
, &desc_addr
);
585 static int udma_pop_from_ring(struct udma_chan
*uc
, dma_addr_t
*addr
)
587 struct k3_ring
*ring
= NULL
;
590 switch (uc
->config
.dir
) {
592 ring
= uc
->rflow
->r_ring
;
596 ring
= uc
->tchan
->tc_ring
;
602 if (ring
&& k3_ringacc_ring_get_occ(ring
)) {
603 struct udma_desc
*d
= NULL
;
605 ret
= k3_ringacc_ring_pop(ring
, addr
);
609 /* Teardown completion */
610 if (cppi5_desc_is_tdcm(*addr
))
613 d
= udma_udma_desc_from_paddr(uc
, *addr
);
616 dma_sync_single_for_cpu(uc
->ud
->dev
, *addr
,
617 d
->hwdesc
[0].cppi5_desc_size
,
619 rmb(); /* Ensure that reads are not moved before this point */
628 static void udma_reset_rings(struct udma_chan
*uc
)
630 struct k3_ring
*ring1
= NULL
;
631 struct k3_ring
*ring2
= NULL
;
633 switch (uc
->config
.dir
) {
636 ring1
= uc
->rflow
->fd_ring
;
637 ring2
= uc
->rflow
->r_ring
;
643 ring1
= uc
->tchan
->t_ring
;
644 ring2
= uc
->tchan
->tc_ring
;
652 k3_ringacc_ring_reset_dma(ring1
,
653 k3_ringacc_ring_get_occ(ring1
));
655 k3_ringacc_ring_reset(ring2
);
657 /* make sure we are not leaking memory by stalled descriptor */
658 if (uc
->terminated_desc
) {
659 udma_desc_free(&uc
->terminated_desc
->vd
);
660 uc
->terminated_desc
= NULL
;
666 static void udma_reset_counters(struct udma_chan
*uc
)
671 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
);
672 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
, val
);
674 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_SBCNT_REG
);
675 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_SBCNT_REG
, val
);
677 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PCNT_REG
);
678 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PCNT_REG
, val
);
680 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
);
681 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
, val
);
685 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_BCNT_REG
);
686 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_BCNT_REG
, val
);
688 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_SBCNT_REG
);
689 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_SBCNT_REG
, val
);
691 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_PCNT_REG
);
692 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PCNT_REG
, val
);
694 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_PEER_BCNT_REG
);
695 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_BCNT_REG
, val
);
701 static int udma_reset_chan(struct udma_chan
*uc
, bool hard
)
703 switch (uc
->config
.dir
) {
705 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
, 0);
706 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
, 0);
709 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
, 0);
710 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
, 0);
713 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
, 0);
714 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
, 0);
720 /* Reset all counters */
721 udma_reset_counters(uc
);
723 /* Hard reset: re-initialize the channel to reset */
725 struct udma_chan_config ucc_backup
;
728 memcpy(&ucc_backup
, &uc
->config
, sizeof(uc
->config
));
729 uc
->ud
->ddev
.device_free_chan_resources(&uc
->vc
.chan
);
731 /* restore the channel configuration */
732 memcpy(&uc
->config
, &ucc_backup
, sizeof(uc
->config
));
733 ret
= uc
->ud
->ddev
.device_alloc_chan_resources(&uc
->vc
.chan
);
738 * Setting forced teardown after forced reset helps recovering
741 if (uc
->config
.dir
== DMA_DEV_TO_MEM
)
742 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
743 UDMA_CHAN_RT_CTL_EN
|
744 UDMA_CHAN_RT_CTL_TDOWN
|
745 UDMA_CHAN_RT_CTL_FTDOWN
);
747 uc
->state
= UDMA_CHAN_IS_IDLE
;
752 static void udma_start_desc(struct udma_chan
*uc
)
754 struct udma_chan_config
*ucc
= &uc
->config
;
756 if (ucc
->pkt_mode
&& (uc
->cyclic
|| ucc
->dir
== DMA_DEV_TO_MEM
)) {
759 /* Push all descriptors to ring for packet mode cyclic or RX */
760 for (i
= 0; i
< uc
->desc
->sglen
; i
++)
761 udma_push_to_ring(uc
, i
);
763 udma_push_to_ring(uc
, 0);
767 static bool udma_chan_needs_reconfiguration(struct udma_chan
*uc
)
769 /* Only PDMAs have staticTR */
770 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
)
773 /* Check if the staticTR configuration has changed for TX */
774 if (memcmp(&uc
->static_tr
, &uc
->desc
->static_tr
, sizeof(uc
->static_tr
)))
780 static int udma_start(struct udma_chan
*uc
)
782 struct virt_dma_desc
*vd
= vchan_next_desc(&uc
->vc
);
791 uc
->desc
= to_udma_desc(&vd
->tx
);
793 /* Channel is already running and does not need reconfiguration */
794 if (udma_is_chan_running(uc
) && !udma_chan_needs_reconfiguration(uc
)) {
799 /* Make sure that we clear the teardown bit, if it is set */
800 udma_reset_chan(uc
, false);
802 /* Push descriptors before we start the channel */
805 switch (uc
->desc
->dir
) {
807 /* Config remote TR */
808 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
809 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
810 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
811 const struct udma_match_data
*match_data
=
814 if (uc
->config
.enable_acc32
)
815 val
|= PDMA_STATIC_TR_XY_ACC32
;
816 if (uc
->config
.enable_burst
)
817 val
|= PDMA_STATIC_TR_XY_BURST
;
819 udma_rchanrt_write(uc
->rchan
,
820 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
, val
);
822 udma_rchanrt_write(uc
->rchan
,
823 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
,
824 PDMA_STATIC_TR_Z(uc
->desc
->static_tr
.bstcnt
,
825 match_data
->statictr_z_mask
));
827 /* save the current staticTR configuration */
828 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
829 sizeof(uc
->static_tr
));
832 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
833 UDMA_CHAN_RT_CTL_EN
);
836 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
,
837 UDMA_PEER_RT_EN_ENABLE
);
841 /* Config remote TR */
842 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
843 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
844 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
846 if (uc
->config
.enable_acc32
)
847 val
|= PDMA_STATIC_TR_XY_ACC32
;
848 if (uc
->config
.enable_burst
)
849 val
|= PDMA_STATIC_TR_XY_BURST
;
851 udma_tchanrt_write(uc
->tchan
,
852 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG
, val
);
854 /* save the current staticTR configuration */
855 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
856 sizeof(uc
->static_tr
));
860 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
,
861 UDMA_PEER_RT_EN_ENABLE
);
863 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
864 UDMA_CHAN_RT_CTL_EN
);
868 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
869 UDMA_CHAN_RT_CTL_EN
);
870 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
871 UDMA_CHAN_RT_CTL_EN
);
878 uc
->state
= UDMA_CHAN_IS_ACTIVE
;
884 static int udma_stop(struct udma_chan
*uc
)
886 enum udma_chan_state old_state
= uc
->state
;
888 uc
->state
= UDMA_CHAN_IS_TERMINATING
;
889 reinit_completion(&uc
->teardown_completed
);
891 switch (uc
->config
.dir
) {
893 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
,
894 UDMA_PEER_RT_EN_ENABLE
|
895 UDMA_PEER_RT_EN_TEARDOWN
);
898 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
,
899 UDMA_PEER_RT_EN_ENABLE
|
900 UDMA_PEER_RT_EN_FLUSH
);
901 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
902 UDMA_CHAN_RT_CTL_EN
|
903 UDMA_CHAN_RT_CTL_TDOWN
);
906 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
907 UDMA_CHAN_RT_CTL_EN
|
908 UDMA_CHAN_RT_CTL_TDOWN
);
911 uc
->state
= old_state
;
912 complete_all(&uc
->teardown_completed
);
919 static void udma_cyclic_packet_elapsed(struct udma_chan
*uc
)
921 struct udma_desc
*d
= uc
->desc
;
922 struct cppi5_host_desc_t
*h_desc
;
924 h_desc
= d
->hwdesc
[d
->desc_idx
].cppi5_desc_vaddr
;
925 cppi5_hdesc_reset_to_original(h_desc
);
926 udma_push_to_ring(uc
, d
->desc_idx
);
927 d
->desc_idx
= (d
->desc_idx
+ 1) % d
->sglen
;
930 static inline void udma_fetch_epib(struct udma_chan
*uc
, struct udma_desc
*d
)
932 struct cppi5_host_desc_t
*h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
934 memcpy(d
->metadata
, h_desc
->epib
, d
->metadata_size
);
937 static bool udma_is_desc_really_done(struct udma_chan
*uc
, struct udma_desc
*d
)
941 /* Only TX towards PDMA is affected */
942 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
||
943 uc
->config
.dir
!= DMA_MEM_TO_DEV
)
946 peer_bcnt
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
);
947 bcnt
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
);
949 if (peer_bcnt
< bcnt
) {
950 uc
->tx_drain
.residue
= bcnt
- peer_bcnt
;
951 uc
->tx_drain
.jiffie
= jiffies
;
958 static void udma_check_tx_completion(struct work_struct
*work
)
960 struct udma_chan
*uc
= container_of(work
, typeof(*uc
),
962 bool desc_done
= true;
964 unsigned long jiffie_diff
, delay
;
967 residue_diff
= uc
->tx_drain
.residue
;
968 jiffie_diff
= uc
->tx_drain
.jiffie
;
969 desc_done
= udma_is_desc_really_done(uc
, uc
->desc
);
973 jiffie_diff
= uc
->tx_drain
.jiffie
- jiffie_diff
;
974 residue_diff
-= uc
->tx_drain
.residue
;
976 /* Try to guess when we should check next time */
977 residue_diff
/= jiffie_diff
;
978 delay
= uc
->tx_drain
.residue
/ residue_diff
/ 3;
979 if (jiffies_to_msecs(delay
) < 5)
982 /* No progress, check again in 1 second */
986 schedule_delayed_work(&uc
->tx_drain
.work
, delay
);
987 } else if (uc
->desc
) {
988 struct udma_desc
*d
= uc
->desc
;
990 uc
->bcnt
+= d
->residue
;
992 vchan_cookie_complete(&d
->vd
);
996 static irqreturn_t
udma_ring_irq_handler(int irq
, void *data
)
998 struct udma_chan
*uc
= data
;
1000 unsigned long flags
;
1001 dma_addr_t paddr
= 0;
1003 if (udma_pop_from_ring(uc
, &paddr
) || !paddr
)
1006 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
1008 /* Teardown completion message */
1009 if (cppi5_desc_is_tdcm(paddr
)) {
1010 /* Compensate our internal pop/push counter */
1013 complete_all(&uc
->teardown_completed
);
1015 if (uc
->terminated_desc
) {
1016 udma_desc_free(&uc
->terminated_desc
->vd
);
1017 uc
->terminated_desc
= NULL
;
1026 d
= udma_udma_desc_from_paddr(uc
, paddr
);
1029 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
1031 if (desc_paddr
!= paddr
) {
1032 dev_err(uc
->ud
->dev
, "not matching descriptors!\n");
1037 /* push the descriptor back to the ring */
1038 if (d
== uc
->desc
) {
1039 udma_cyclic_packet_elapsed(uc
);
1040 vchan_cyclic_callback(&d
->vd
);
1043 bool desc_done
= false;
1045 if (d
== uc
->desc
) {
1046 desc_done
= udma_is_desc_really_done(uc
, d
);
1049 uc
->bcnt
+= d
->residue
;
1052 schedule_delayed_work(&uc
->tx_drain
.work
,
1058 vchan_cookie_complete(&d
->vd
);
1062 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
1067 static irqreturn_t
udma_udma_irq_handler(int irq
, void *data
)
1069 struct udma_chan
*uc
= data
;
1070 struct udma_desc
*d
;
1071 unsigned long flags
;
1073 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
1076 d
->tr_idx
= (d
->tr_idx
+ 1) % d
->sglen
;
1079 vchan_cyclic_callback(&d
->vd
);
1081 /* TODO: figure out the real amount of data */
1082 uc
->bcnt
+= d
->residue
;
1084 vchan_cookie_complete(&d
->vd
);
1088 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
1094 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1096 * @from: Start the search from this flow id number
1097 * @cnt: Number of consecutive flow ids to allocate
1099 * Allocate range of RX flow ids for future use, those flows can be requested
1100 * only using explicit flow id number. if @from is set to -1 it will try to find
1101 * first free range. if @from is positive value it will force allocation only
1102 * of the specified range of flows.
1104 * Returns -ENOMEM if can't find free range.
1105 * -EEXIST if requested range is busy.
1106 * -EINVAL if wrong input values passed.
1107 * Returns flow id on success.
1109 static int __udma_alloc_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1111 int start
, tmp_from
;
1112 DECLARE_BITMAP(tmp
, K3_UDMA_MAX_RFLOWS
);
1116 tmp_from
= ud
->rchan_cnt
;
1117 /* default flows can't be allocated and accessible only by id */
1118 if (tmp_from
< ud
->rchan_cnt
)
1121 if (tmp_from
+ cnt
> ud
->rflow_cnt
)
1124 bitmap_or(tmp
, ud
->rflow_gp_map
, ud
->rflow_gp_map_allocated
,
1127 start
= bitmap_find_next_zero_area(tmp
,
1130 if (start
>= ud
->rflow_cnt
)
1133 if (from
>= 0 && start
!= from
)
1136 bitmap_set(ud
->rflow_gp_map_allocated
, start
, cnt
);
1140 static int __udma_free_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1142 if (from
< ud
->rchan_cnt
)
1144 if (from
+ cnt
> ud
->rflow_cnt
)
1147 bitmap_clear(ud
->rflow_gp_map_allocated
, from
, cnt
);
1151 static struct udma_rflow
*__udma_get_rflow(struct udma_dev
*ud
, int id
)
1154 * Attempt to request rflow by ID can be made for any rflow
1155 * if not in use with assumption that caller knows what's doing.
1156 * TI-SCI FW will perform additional permission check ant way, it's
1160 if (id
< 0 || id
>= ud
->rflow_cnt
)
1161 return ERR_PTR(-ENOENT
);
1163 if (test_bit(id
, ud
->rflow_in_use
))
1164 return ERR_PTR(-ENOENT
);
1166 /* GP rflow has to be allocated first */
1167 if (!test_bit(id
, ud
->rflow_gp_map
) &&
1168 !test_bit(id
, ud
->rflow_gp_map_allocated
))
1169 return ERR_PTR(-EINVAL
);
1171 dev_dbg(ud
->dev
, "get rflow%d\n", id
);
1172 set_bit(id
, ud
->rflow_in_use
);
1173 return &ud
->rflows
[id
];
1176 static void __udma_put_rflow(struct udma_dev
*ud
, struct udma_rflow
*rflow
)
1178 if (!test_bit(rflow
->id
, ud
->rflow_in_use
)) {
1179 dev_err(ud
->dev
, "attempt to put unused rflow%d\n", rflow
->id
);
1183 dev_dbg(ud
->dev
, "put rflow%d\n", rflow
->id
);
1184 clear_bit(rflow
->id
, ud
->rflow_in_use
);
1187 #define UDMA_RESERVE_RESOURCE(res) \
1188 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1189 enum udma_tp_level tpl, \
1193 if (test_bit(id, ud->res##_map)) { \
1194 dev_err(ud->dev, "res##%d is in use\n", id); \
1195 return ERR_PTR(-ENOENT); \
1200 if (tpl >= ud->match_data->tpl_levels) \
1201 tpl = ud->match_data->tpl_levels - 1; \
1203 start = ud->match_data->level_start_idx[tpl]; \
1205 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1207 if (id == ud->res##_cnt) { \
1208 return ERR_PTR(-ENOENT); \
1212 set_bit(id, ud->res##_map); \
1213 return &ud->res##s[id]; \
1216 UDMA_RESERVE_RESOURCE(tchan
);
1217 UDMA_RESERVE_RESOURCE(rchan
);
1219 static int udma_get_tchan(struct udma_chan
*uc
)
1221 struct udma_dev
*ud
= uc
->ud
;
1224 dev_dbg(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1225 uc
->id
, uc
->tchan
->id
);
1229 uc
->tchan
= __udma_reserve_tchan(ud
, uc
->config
.channel_tpl
, -1);
1230 if (IS_ERR(uc
->tchan
))
1231 return PTR_ERR(uc
->tchan
);
1236 static int udma_get_rchan(struct udma_chan
*uc
)
1238 struct udma_dev
*ud
= uc
->ud
;
1241 dev_dbg(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1242 uc
->id
, uc
->rchan
->id
);
1246 uc
->rchan
= __udma_reserve_rchan(ud
, uc
->config
.channel_tpl
, -1);
1247 if (IS_ERR(uc
->rchan
))
1248 return PTR_ERR(uc
->rchan
);
1253 static int udma_get_chan_pair(struct udma_chan
*uc
)
1255 struct udma_dev
*ud
= uc
->ud
;
1256 const struct udma_match_data
*match_data
= ud
->match_data
;
1259 if ((uc
->tchan
&& uc
->rchan
) && uc
->tchan
->id
== uc
->rchan
->id
) {
1260 dev_info(ud
->dev
, "chan%d: already have %d pair allocated\n",
1261 uc
->id
, uc
->tchan
->id
);
1266 dev_err(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1267 uc
->id
, uc
->tchan
->id
);
1269 } else if (uc
->rchan
) {
1270 dev_err(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1271 uc
->id
, uc
->rchan
->id
);
1275 /* Can be optimized, but let's have it like this for now */
1276 end
= min(ud
->tchan_cnt
, ud
->rchan_cnt
);
1277 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1278 chan_id
= match_data
->level_start_idx
[match_data
->tpl_levels
- 1];
1279 for (; chan_id
< end
; chan_id
++) {
1280 if (!test_bit(chan_id
, ud
->tchan_map
) &&
1281 !test_bit(chan_id
, ud
->rchan_map
))
1288 set_bit(chan_id
, ud
->tchan_map
);
1289 set_bit(chan_id
, ud
->rchan_map
);
1290 uc
->tchan
= &ud
->tchans
[chan_id
];
1291 uc
->rchan
= &ud
->rchans
[chan_id
];
1296 static int udma_get_rflow(struct udma_chan
*uc
, int flow_id
)
1298 struct udma_dev
*ud
= uc
->ud
;
1301 dev_err(ud
->dev
, "chan%d: does not have rchan??\n", uc
->id
);
1306 dev_dbg(ud
->dev
, "chan%d: already have rflow%d allocated\n",
1307 uc
->id
, uc
->rflow
->id
);
1311 uc
->rflow
= __udma_get_rflow(ud
, flow_id
);
1312 if (IS_ERR(uc
->rflow
))
1313 return PTR_ERR(uc
->rflow
);
1318 static void udma_put_rchan(struct udma_chan
*uc
)
1320 struct udma_dev
*ud
= uc
->ud
;
1323 dev_dbg(ud
->dev
, "chan%d: put rchan%d\n", uc
->id
,
1325 clear_bit(uc
->rchan
->id
, ud
->rchan_map
);
1330 static void udma_put_tchan(struct udma_chan
*uc
)
1332 struct udma_dev
*ud
= uc
->ud
;
1335 dev_dbg(ud
->dev
, "chan%d: put tchan%d\n", uc
->id
,
1337 clear_bit(uc
->tchan
->id
, ud
->tchan_map
);
1342 static void udma_put_rflow(struct udma_chan
*uc
)
1344 struct udma_dev
*ud
= uc
->ud
;
1347 dev_dbg(ud
->dev
, "chan%d: put rflow%d\n", uc
->id
,
1349 __udma_put_rflow(ud
, uc
->rflow
);
1354 static void udma_free_tx_resources(struct udma_chan
*uc
)
1359 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1360 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1361 uc
->tchan
->t_ring
= NULL
;
1362 uc
->tchan
->tc_ring
= NULL
;
1367 static int udma_alloc_tx_resources(struct udma_chan
*uc
)
1369 struct k3_ring_cfg ring_cfg
;
1370 struct udma_dev
*ud
= uc
->ud
;
1373 ret
= udma_get_tchan(uc
);
1377 uc
->tchan
->t_ring
= k3_ringacc_request_ring(ud
->ringacc
,
1379 if (!uc
->tchan
->t_ring
) {
1384 uc
->tchan
->tc_ring
= k3_ringacc_request_ring(ud
->ringacc
, -1, 0);
1385 if (!uc
->tchan
->tc_ring
) {
1390 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1391 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1392 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1393 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1395 ret
= k3_ringacc_ring_cfg(uc
->tchan
->t_ring
, &ring_cfg
);
1396 ret
|= k3_ringacc_ring_cfg(uc
->tchan
->tc_ring
, &ring_cfg
);
1404 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1405 uc
->tchan
->tc_ring
= NULL
;
1407 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1408 uc
->tchan
->t_ring
= NULL
;
1415 static void udma_free_rx_resources(struct udma_chan
*uc
)
1421 struct udma_rflow
*rflow
= uc
->rflow
;
1423 k3_ringacc_ring_free(rflow
->fd_ring
);
1424 k3_ringacc_ring_free(rflow
->r_ring
);
1425 rflow
->fd_ring
= NULL
;
1426 rflow
->r_ring
= NULL
;
1434 static int udma_alloc_rx_resources(struct udma_chan
*uc
)
1436 struct udma_dev
*ud
= uc
->ud
;
1437 struct k3_ring_cfg ring_cfg
;
1438 struct udma_rflow
*rflow
;
1442 ret
= udma_get_rchan(uc
);
1446 /* For MEM_TO_MEM we don't need rflow or rings */
1447 if (uc
->config
.dir
== DMA_MEM_TO_MEM
)
1450 ret
= udma_get_rflow(uc
, uc
->rchan
->id
);
1457 fd_ring_id
= ud
->tchan_cnt
+ ud
->echan_cnt
+ uc
->rchan
->id
;
1458 rflow
->fd_ring
= k3_ringacc_request_ring(ud
->ringacc
, fd_ring_id
, 0);
1459 if (!rflow
->fd_ring
) {
1464 rflow
->r_ring
= k3_ringacc_request_ring(ud
->ringacc
, -1, 0);
1465 if (!rflow
->r_ring
) {
1470 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1472 if (uc
->config
.pkt_mode
)
1473 ring_cfg
.size
= SG_MAX_SEGMENTS
;
1475 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1477 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1478 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1480 ret
= k3_ringacc_ring_cfg(rflow
->fd_ring
, &ring_cfg
);
1481 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1482 ret
|= k3_ringacc_ring_cfg(rflow
->r_ring
, &ring_cfg
);
1490 k3_ringacc_ring_free(rflow
->r_ring
);
1491 rflow
->r_ring
= NULL
;
1493 k3_ringacc_ring_free(rflow
->fd_ring
);
1494 rflow
->fd_ring
= NULL
;
1503 #define TISCI_TCHAN_VALID_PARAMS ( \
1504 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1505 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1506 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1507 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1508 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1509 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1510 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
1512 #define TISCI_RCHAN_VALID_PARAMS ( \
1513 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1514 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1515 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1516 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1517 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1518 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1519 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1520 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
1522 static int udma_tisci_m2m_channel_config(struct udma_chan
*uc
)
1524 struct udma_dev
*ud
= uc
->ud
;
1525 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1526 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1527 struct udma_tchan
*tchan
= uc
->tchan
;
1528 struct udma_rchan
*rchan
= uc
->rchan
;
1531 /* Non synchronized - mem to mem type of transfer */
1532 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1533 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1534 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1536 req_tx
.valid_params
= TISCI_TCHAN_VALID_PARAMS
;
1537 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1538 req_tx
.index
= tchan
->id
;
1539 req_tx
.tx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1540 req_tx
.tx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1541 req_tx
.txcq_qnum
= tc_ring
;
1543 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1545 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1549 req_rx
.valid_params
= TISCI_RCHAN_VALID_PARAMS
;
1550 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1551 req_rx
.index
= rchan
->id
;
1552 req_rx
.rx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1553 req_rx
.rxcq_qnum
= tc_ring
;
1554 req_rx
.rx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1556 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1558 dev_err(ud
->dev
, "rchan%d alloc failed %d\n", rchan
->id
, ret
);
1563 static int udma_tisci_tx_channel_config(struct udma_chan
*uc
)
1565 struct udma_dev
*ud
= uc
->ud
;
1566 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1567 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1568 struct udma_tchan
*tchan
= uc
->tchan
;
1569 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1570 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1571 u32 mode
, fetch_size
;
1574 if (uc
->config
.pkt_mode
) {
1575 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1576 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1577 uc
->config
.psd_size
, 0);
1579 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1580 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1583 req_tx
.valid_params
= TISCI_TCHAN_VALID_PARAMS
;
1584 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1585 req_tx
.index
= tchan
->id
;
1586 req_tx
.tx_chan_type
= mode
;
1587 req_tx
.tx_supr_tdpkt
= uc
->config
.notdpkt
;
1588 req_tx
.tx_fetch_size
= fetch_size
>> 2;
1589 req_tx
.txcq_qnum
= tc_ring
;
1591 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1593 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1598 static int udma_tisci_rx_channel_config(struct udma_chan
*uc
)
1600 struct udma_dev
*ud
= uc
->ud
;
1601 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1602 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1603 struct udma_rchan
*rchan
= uc
->rchan
;
1604 int fd_ring
= k3_ringacc_get_ring_id(uc
->rflow
->fd_ring
);
1605 int rx_ring
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
1606 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1607 struct ti_sci_msg_rm_udmap_flow_cfg flow_req
= { 0 };
1608 u32 mode
, fetch_size
;
1611 if (uc
->config
.pkt_mode
) {
1612 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1613 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1614 uc
->config
.psd_size
, 0);
1616 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1617 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1620 req_rx
.valid_params
= TISCI_RCHAN_VALID_PARAMS
;
1621 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1622 req_rx
.index
= rchan
->id
;
1623 req_rx
.rx_fetch_size
= fetch_size
>> 2;
1624 req_rx
.rxcq_qnum
= rx_ring
;
1625 req_rx
.rx_chan_type
= mode
;
1627 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1629 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", rchan
->id
, ret
);
1633 flow_req
.valid_params
=
1634 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
1635 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
1636 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
|
1637 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID
|
1638 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
1639 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID
|
1640 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID
|
1641 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID
|
1642 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID
|
1643 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
1644 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
1645 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
1646 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
1648 flow_req
.nav_id
= tisci_rm
->tisci_dev_id
;
1649 flow_req
.flow_index
= rchan
->id
;
1651 if (uc
->config
.needs_epib
)
1652 flow_req
.rx_einfo_present
= 1;
1654 flow_req
.rx_einfo_present
= 0;
1655 if (uc
->config
.psd_size
)
1656 flow_req
.rx_psinfo_present
= 1;
1658 flow_req
.rx_psinfo_present
= 0;
1659 flow_req
.rx_error_handling
= 1;
1660 flow_req
.rx_dest_qnum
= rx_ring
;
1661 flow_req
.rx_src_tag_hi_sel
= UDMA_RFLOW_SRCTAG_NONE
;
1662 flow_req
.rx_src_tag_lo_sel
= UDMA_RFLOW_SRCTAG_SRC_TAG
;
1663 flow_req
.rx_dest_tag_hi_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_HI
;
1664 flow_req
.rx_dest_tag_lo_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_LO
;
1665 flow_req
.rx_fdq0_sz0_qnum
= fd_ring
;
1666 flow_req
.rx_fdq1_qnum
= fd_ring
;
1667 flow_req
.rx_fdq2_qnum
= fd_ring
;
1668 flow_req
.rx_fdq3_qnum
= fd_ring
;
1670 ret
= tisci_ops
->rx_flow_cfg(tisci_rm
->tisci
, &flow_req
);
1673 dev_err(ud
->dev
, "flow%d config failed: %d\n", rchan
->id
, ret
);
1678 static int udma_alloc_chan_resources(struct dma_chan
*chan
)
1680 struct udma_chan
*uc
= to_udma_chan(chan
);
1681 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
1682 const struct udma_match_data
*match_data
= ud
->match_data
;
1683 struct k3_ring
*irq_ring
;
1687 if (uc
->config
.pkt_mode
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
1688 uc
->use_dma_pool
= true;
1689 /* in case of MEM_TO_MEM we have maximum of two TRs */
1690 if (uc
->config
.dir
== DMA_MEM_TO_MEM
) {
1691 uc
->config
.hdesc_size
= cppi5_trdesc_calc_size(
1692 sizeof(struct cppi5_tr_type15_t
), 2);
1693 uc
->config
.pkt_mode
= false;
1697 if (uc
->use_dma_pool
) {
1698 uc
->hdesc_pool
= dma_pool_create(uc
->name
, ud
->ddev
.dev
,
1699 uc
->config
.hdesc_size
,
1702 if (!uc
->hdesc_pool
) {
1703 dev_err(ud
->ddev
.dev
,
1704 "Descriptor pool allocation failed\n");
1705 uc
->use_dma_pool
= false;
1711 * Make sure that the completion is in a known state:
1712 * No teardown, the channel is idle
1714 reinit_completion(&uc
->teardown_completed
);
1715 complete_all(&uc
->teardown_completed
);
1716 uc
->state
= UDMA_CHAN_IS_IDLE
;
1718 switch (uc
->config
.dir
) {
1719 case DMA_MEM_TO_MEM
:
1720 /* Non synchronized - mem to mem type of transfer */
1721 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-MEM\n", __func__
,
1724 ret
= udma_get_chan_pair(uc
);
1728 ret
= udma_alloc_tx_resources(uc
);
1732 ret
= udma_alloc_rx_resources(uc
);
1734 udma_free_tx_resources(uc
);
1738 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
1739 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
1740 K3_PSIL_DST_THREAD_ID_OFFSET
;
1742 irq_ring
= uc
->tchan
->tc_ring
;
1743 irq_udma_idx
= uc
->tchan
->id
;
1745 ret
= udma_tisci_m2m_channel_config(uc
);
1747 case DMA_MEM_TO_DEV
:
1748 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1749 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
1752 ret
= udma_alloc_tx_resources(uc
);
1754 uc
->config
.remote_thread_id
= -1;
1758 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
1759 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
1760 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
1762 irq_ring
= uc
->tchan
->tc_ring
;
1763 irq_udma_idx
= uc
->tchan
->id
;
1765 ret
= udma_tisci_tx_channel_config(uc
);
1767 case DMA_DEV_TO_MEM
:
1768 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1769 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
1772 ret
= udma_alloc_rx_resources(uc
);
1774 uc
->config
.remote_thread_id
= -1;
1778 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
1779 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
1780 K3_PSIL_DST_THREAD_ID_OFFSET
;
1782 irq_ring
= uc
->rflow
->r_ring
;
1783 irq_udma_idx
= match_data
->rchan_oes_offset
+ uc
->rchan
->id
;
1785 ret
= udma_tisci_rx_channel_config(uc
);
1788 /* Can not happen */
1789 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
1790 __func__
, uc
->id
, uc
->config
.dir
);
1794 /* check if the channel configuration was successful */
1798 if (udma_is_chan_running(uc
)) {
1799 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
1801 if (udma_is_chan_running(uc
)) {
1802 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
1808 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
1810 dev_err(ud
->dev
, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1811 uc
->config
.src_thread
, uc
->config
.dst_thread
);
1815 uc
->psil_paired
= true;
1817 uc
->irq_num_ring
= k3_ringacc_get_ring_irq_num(irq_ring
);
1818 if (uc
->irq_num_ring
<= 0) {
1819 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
1820 k3_ringacc_get_ring_id(irq_ring
));
1825 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
1826 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
1828 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
1832 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1833 if (is_slave_direction(uc
->config
.dir
) && !uc
->config
.pkt_mode
) {
1834 uc
->irq_num_udma
= ti_sci_inta_msi_get_virq(ud
->dev
,
1836 if (uc
->irq_num_udma
<= 0) {
1837 dev_err(ud
->dev
, "Failed to get udma irq (index: %u)\n",
1839 free_irq(uc
->irq_num_ring
, uc
);
1844 ret
= request_irq(uc
->irq_num_udma
, udma_udma_irq_handler
, 0,
1847 dev_err(ud
->dev
, "chan%d: UDMA irq request failed\n",
1849 free_irq(uc
->irq_num_ring
, uc
);
1853 uc
->irq_num_udma
= 0;
1856 udma_reset_rings(uc
);
1858 INIT_DELAYED_WORK_ONSTACK(&uc
->tx_drain
.work
,
1859 udma_check_tx_completion
);
1863 uc
->irq_num_ring
= 0;
1864 uc
->irq_num_udma
= 0;
1866 navss_psil_unpair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
1867 uc
->psil_paired
= false;
1869 udma_free_tx_resources(uc
);
1870 udma_free_rx_resources(uc
);
1872 udma_reset_uchan(uc
);
1874 if (uc
->use_dma_pool
) {
1875 dma_pool_destroy(uc
->hdesc_pool
);
1876 uc
->use_dma_pool
= false;
1882 static int udma_slave_config(struct dma_chan
*chan
,
1883 struct dma_slave_config
*cfg
)
1885 struct udma_chan
*uc
= to_udma_chan(chan
);
1887 memcpy(&uc
->cfg
, cfg
, sizeof(uc
->cfg
));
1892 static struct udma_desc
*udma_alloc_tr_desc(struct udma_chan
*uc
,
1893 size_t tr_size
, int tr_count
,
1894 enum dma_transfer_direction dir
)
1896 struct udma_hwdesc
*hwdesc
;
1897 struct cppi5_desc_hdr_t
*tr_desc
;
1898 struct udma_desc
*d
;
1899 u32 reload_count
= 0;
1909 dev_err(uc
->ud
->dev
, "Unsupported TR size of %zu\n", tr_size
);
1913 /* We have only one descriptor containing multiple TRs */
1914 d
= kzalloc(sizeof(*d
) + sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
1918 d
->sglen
= tr_count
;
1920 d
->hwdesc_count
= 1;
1921 hwdesc
= &d
->hwdesc
[0];
1923 /* Allocate memory for DMA ring descriptor */
1924 if (uc
->use_dma_pool
) {
1925 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
1926 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
1928 &hwdesc
->cppi5_desc_paddr
);
1930 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
,
1932 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
1933 uc
->ud
->desc_align
);
1934 hwdesc
->cppi5_desc_vaddr
= dma_alloc_coherent(uc
->ud
->dev
,
1935 hwdesc
->cppi5_desc_size
,
1936 &hwdesc
->cppi5_desc_paddr
,
1940 if (!hwdesc
->cppi5_desc_vaddr
) {
1945 /* Start of the TR req records */
1946 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
1947 /* Start address of the TR response array */
1948 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
* tr_count
;
1950 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
1953 reload_count
= CPPI5_INFO0_TRDESC_RLDCNT_INFINITE
;
1955 if (dir
== DMA_DEV_TO_MEM
)
1956 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
1958 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
1960 cppi5_trdesc_init(tr_desc
, tr_count
, tr_size
, 0, reload_count
);
1961 cppi5_desc_set_pktids(tr_desc
, uc
->id
,
1962 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
1963 cppi5_desc_set_retpolicy(tr_desc
, 0, ring_id
);
1968 static struct udma_desc
*
1969 udma_prep_slave_sg_tr(struct udma_chan
*uc
, struct scatterlist
*sgl
,
1970 unsigned int sglen
, enum dma_transfer_direction dir
,
1971 unsigned long tx_flags
, void *context
)
1973 enum dma_slave_buswidth dev_width
;
1974 struct scatterlist
*sgent
;
1975 struct udma_desc
*d
;
1977 struct cppi5_tr_type1_t
*tr_req
= NULL
;
1981 if (dir
== DMA_DEV_TO_MEM
) {
1982 dev_width
= uc
->cfg
.src_addr_width
;
1983 burst
= uc
->cfg
.src_maxburst
;
1984 } else if (dir
== DMA_MEM_TO_DEV
) {
1985 dev_width
= uc
->cfg
.dst_addr_width
;
1986 burst
= uc
->cfg
.dst_maxburst
;
1988 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
1995 /* Now allocate and setup the descriptor. */
1996 tr_size
= sizeof(struct cppi5_tr_type1_t
);
1997 d
= udma_alloc_tr_desc(uc
, tr_size
, sglen
, dir
);
2003 tr_req
= d
->hwdesc
[0].tr_req_base
;
2004 for_each_sg(sgl
, sgent
, sglen
, i
) {
2005 d
->residue
+= sg_dma_len(sgent
);
2007 cppi5_tr_init(&tr_req
[i
].flags
, CPPI5_TR_TYPE1
, false, false,
2008 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2009 cppi5_tr_csf_set(&tr_req
[i
].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2011 tr_req
[i
].addr
= sg_dma_address(sgent
);
2012 tr_req
[i
].icnt0
= burst
* dev_width
;
2013 tr_req
[i
].dim1
= burst
* dev_width
;
2014 tr_req
[i
].icnt1
= sg_dma_len(sgent
) / tr_req
[i
].icnt0
;
2017 cppi5_tr_csf_set(&tr_req
[i
- 1].flags
, CPPI5_TR_CSF_EOP
);
2022 static int udma_configure_statictr(struct udma_chan
*uc
, struct udma_desc
*d
,
2023 enum dma_slave_buswidth dev_width
,
2026 if (uc
->config
.ep_type
!= PSIL_EP_PDMA_XY
)
2029 /* Bus width translates to the element size (ES) */
2030 switch (dev_width
) {
2031 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2032 d
->static_tr
.elsize
= 0;
2034 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2035 d
->static_tr
.elsize
= 1;
2037 case DMA_SLAVE_BUSWIDTH_3_BYTES
:
2038 d
->static_tr
.elsize
= 2;
2040 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2041 d
->static_tr
.elsize
= 3;
2043 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2044 d
->static_tr
.elsize
= 4;
2046 default: /* not reached */
2050 d
->static_tr
.elcnt
= elcnt
;
2053 * PDMA must to close the packet when the channel is in packet mode.
2054 * For TR mode when the channel is not cyclic we also need PDMA to close
2055 * the packet otherwise the transfer will stall because PDMA holds on
2056 * the data it has received from the peripheral.
2058 if (uc
->config
.pkt_mode
|| !uc
->cyclic
) {
2059 unsigned int div
= dev_width
* elcnt
;
2062 d
->static_tr
.bstcnt
= d
->residue
/ d
->sglen
/ div
;
2064 d
->static_tr
.bstcnt
= d
->residue
/ div
;
2066 if (uc
->config
.dir
== DMA_DEV_TO_MEM
&&
2067 d
->static_tr
.bstcnt
> uc
->ud
->match_data
->statictr_z_mask
)
2070 d
->static_tr
.bstcnt
= 0;
2076 static struct udma_desc
*
2077 udma_prep_slave_sg_pkt(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2078 unsigned int sglen
, enum dma_transfer_direction dir
,
2079 unsigned long tx_flags
, void *context
)
2081 struct scatterlist
*sgent
;
2082 struct cppi5_host_desc_t
*h_desc
= NULL
;
2083 struct udma_desc
*d
;
2087 d
= kzalloc(sizeof(*d
) + sglen
* sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2092 d
->hwdesc_count
= sglen
;
2094 if (dir
== DMA_DEV_TO_MEM
)
2095 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2097 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2099 for_each_sg(sgl
, sgent
, sglen
, i
) {
2100 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
2101 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2102 struct cppi5_host_desc_t
*desc
;
2103 size_t sg_len
= sg_dma_len(sgent
);
2105 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2107 &hwdesc
->cppi5_desc_paddr
);
2108 if (!hwdesc
->cppi5_desc_vaddr
) {
2109 dev_err(uc
->ud
->dev
,
2110 "descriptor%d allocation failed\n", i
);
2112 udma_free_hwdesc(uc
, d
);
2117 d
->residue
+= sg_len
;
2118 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2119 desc
= hwdesc
->cppi5_desc_vaddr
;
2122 cppi5_hdesc_init(desc
, 0, 0);
2123 /* Flow and Packed ID */
2124 cppi5_desc_set_pktids(&desc
->hdr
, uc
->id
,
2125 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2126 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, ring_id
);
2128 cppi5_hdesc_reset_hbdesc(desc
);
2129 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0xffff);
2132 /* attach the sg buffer to the descriptor */
2133 cppi5_hdesc_attach_buf(desc
, sg_addr
, sg_len
, sg_addr
, sg_len
);
2135 /* Attach link as host buffer descriptor */
2137 cppi5_hdesc_link_hbdesc(h_desc
,
2138 hwdesc
->cppi5_desc_paddr
);
2140 if (dir
== DMA_MEM_TO_DEV
)
2144 if (d
->residue
>= SZ_4M
) {
2145 dev_err(uc
->ud
->dev
,
2146 "%s: Transfer size %u is over the supported 4M range\n",
2147 __func__
, d
->residue
);
2148 udma_free_hwdesc(uc
, d
);
2153 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2154 cppi5_hdesc_set_pktlen(h_desc
, d
->residue
);
2159 static int udma_attach_metadata(struct dma_async_tx_descriptor
*desc
,
2160 void *data
, size_t len
)
2162 struct udma_desc
*d
= to_udma_desc(desc
);
2163 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2164 struct cppi5_host_desc_t
*h_desc
;
2168 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2171 if (!data
|| len
> uc
->config
.metadata_size
)
2174 if (uc
->config
.needs_epib
&& len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
2177 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2178 if (d
->dir
== DMA_MEM_TO_DEV
)
2179 memcpy(h_desc
->epib
, data
, len
);
2181 if (uc
->config
.needs_epib
)
2182 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
2185 d
->metadata_size
= len
;
2186 if (uc
->config
.needs_epib
)
2187 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
2189 cppi5_hdesc_update_flags(h_desc
, flags
);
2190 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
2195 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
2196 size_t *payload_len
, size_t *max_len
)
2198 struct udma_desc
*d
= to_udma_desc(desc
);
2199 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2200 struct cppi5_host_desc_t
*h_desc
;
2202 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2203 return ERR_PTR(-ENOTSUPP
);
2205 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2207 *max_len
= uc
->config
.metadata_size
;
2209 *payload_len
= cppi5_hdesc_epib_present(&h_desc
->hdr
) ?
2210 CPPI5_INFO0_HDESC_EPIB_SIZE
: 0;
2211 *payload_len
+= cppi5_hdesc_get_psdata_size(h_desc
);
2213 return h_desc
->epib
;
2216 static int udma_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
2219 struct udma_desc
*d
= to_udma_desc(desc
);
2220 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2221 struct cppi5_host_desc_t
*h_desc
;
2222 u32 psd_size
= payload_len
;
2225 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2228 if (payload_len
> uc
->config
.metadata_size
)
2231 if (uc
->config
.needs_epib
&& payload_len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
2234 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2236 if (uc
->config
.needs_epib
) {
2237 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
2238 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
2241 cppi5_hdesc_update_flags(h_desc
, flags
);
2242 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
2247 static struct dma_descriptor_metadata_ops metadata_ops
= {
2248 .attach
= udma_attach_metadata
,
2249 .get_ptr
= udma_get_metadata_ptr
,
2250 .set_len
= udma_set_metadata_len
,
2253 static struct dma_async_tx_descriptor
*
2254 udma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2255 unsigned int sglen
, enum dma_transfer_direction dir
,
2256 unsigned long tx_flags
, void *context
)
2258 struct udma_chan
*uc
= to_udma_chan(chan
);
2259 enum dma_slave_buswidth dev_width
;
2260 struct udma_desc
*d
;
2263 if (dir
!= uc
->config
.dir
) {
2264 dev_err(chan
->device
->dev
,
2265 "%s: chan%d is for %s, not supporting %s\n",
2267 dmaengine_get_direction_text(uc
->config
.dir
),
2268 dmaengine_get_direction_text(dir
));
2272 if (dir
== DMA_DEV_TO_MEM
) {
2273 dev_width
= uc
->cfg
.src_addr_width
;
2274 burst
= uc
->cfg
.src_maxburst
;
2275 } else if (dir
== DMA_MEM_TO_DEV
) {
2276 dev_width
= uc
->cfg
.dst_addr_width
;
2277 burst
= uc
->cfg
.dst_maxburst
;
2279 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
2286 if (uc
->config
.pkt_mode
)
2287 d
= udma_prep_slave_sg_pkt(uc
, sgl
, sglen
, dir
, tx_flags
,
2290 d
= udma_prep_slave_sg_tr(uc
, sgl
, sglen
, dir
, tx_flags
,
2300 /* static TR for remote PDMA */
2301 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
2302 dev_err(uc
->ud
->dev
,
2303 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2304 __func__
, d
->static_tr
.bstcnt
);
2306 udma_free_hwdesc(uc
, d
);
2311 if (uc
->config
.metadata_size
)
2312 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2314 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
2317 static struct udma_desc
*
2318 udma_prep_dma_cyclic_tr(struct udma_chan
*uc
, dma_addr_t buf_addr
,
2319 size_t buf_len
, size_t period_len
,
2320 enum dma_transfer_direction dir
, unsigned long flags
)
2322 enum dma_slave_buswidth dev_width
;
2323 struct udma_desc
*d
;
2325 struct cppi5_tr_type1_t
*tr_req
;
2327 unsigned int periods
= buf_len
/ period_len
;
2330 if (dir
== DMA_DEV_TO_MEM
) {
2331 dev_width
= uc
->cfg
.src_addr_width
;
2332 burst
= uc
->cfg
.src_maxburst
;
2333 } else if (dir
== DMA_MEM_TO_DEV
) {
2334 dev_width
= uc
->cfg
.dst_addr_width
;
2335 burst
= uc
->cfg
.dst_maxburst
;
2337 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
2344 /* Now allocate and setup the descriptor. */
2345 tr_size
= sizeof(struct cppi5_tr_type1_t
);
2346 d
= udma_alloc_tr_desc(uc
, tr_size
, periods
, dir
);
2350 tr_req
= d
->hwdesc
[0].tr_req_base
;
2351 for (i
= 0; i
< periods
; i
++) {
2352 cppi5_tr_init(&tr_req
[i
].flags
, CPPI5_TR_TYPE1
, false, false,
2353 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2355 tr_req
[i
].addr
= buf_addr
+ period_len
* i
;
2356 tr_req
[i
].icnt0
= dev_width
;
2357 tr_req
[i
].icnt1
= period_len
/ dev_width
;
2358 tr_req
[i
].dim1
= dev_width
;
2360 if (!(flags
& DMA_PREP_INTERRUPT
))
2361 cppi5_tr_csf_set(&tr_req
[i
].flags
,
2362 CPPI5_TR_CSF_SUPR_EVT
);
2368 static struct udma_desc
*
2369 udma_prep_dma_cyclic_pkt(struct udma_chan
*uc
, dma_addr_t buf_addr
,
2370 size_t buf_len
, size_t period_len
,
2371 enum dma_transfer_direction dir
, unsigned long flags
)
2373 struct udma_desc
*d
;
2376 int periods
= buf_len
/ period_len
;
2378 if (periods
> (K3_UDMA_DEFAULT_RING_SIZE
- 1))
2381 if (period_len
>= SZ_4M
)
2384 d
= kzalloc(sizeof(*d
) + periods
* sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2388 d
->hwdesc_count
= periods
;
2390 /* TODO: re-check this... */
2391 if (dir
== DMA_DEV_TO_MEM
)
2392 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2394 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2396 for (i
= 0; i
< periods
; i
++) {
2397 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
2398 dma_addr_t period_addr
= buf_addr
+ (period_len
* i
);
2399 struct cppi5_host_desc_t
*h_desc
;
2401 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2403 &hwdesc
->cppi5_desc_paddr
);
2404 if (!hwdesc
->cppi5_desc_vaddr
) {
2405 dev_err(uc
->ud
->dev
,
2406 "descriptor%d allocation failed\n", i
);
2408 udma_free_hwdesc(uc
, d
);
2413 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2414 h_desc
= hwdesc
->cppi5_desc_vaddr
;
2416 cppi5_hdesc_init(h_desc
, 0, 0);
2417 cppi5_hdesc_set_pktlen(h_desc
, period_len
);
2419 /* Flow and Packed ID */
2420 cppi5_desc_set_pktids(&h_desc
->hdr
, uc
->id
,
2421 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2422 cppi5_desc_set_retpolicy(&h_desc
->hdr
, 0, ring_id
);
2424 /* attach each period to a new descriptor */
2425 cppi5_hdesc_attach_buf(h_desc
,
2426 period_addr
, period_len
,
2427 period_addr
, period_len
);
2433 static struct dma_async_tx_descriptor
*
2434 udma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
2435 size_t period_len
, enum dma_transfer_direction dir
,
2436 unsigned long flags
)
2438 struct udma_chan
*uc
= to_udma_chan(chan
);
2439 enum dma_slave_buswidth dev_width
;
2440 struct udma_desc
*d
;
2443 if (dir
!= uc
->config
.dir
) {
2444 dev_err(chan
->device
->dev
,
2445 "%s: chan%d is for %s, not supporting %s\n",
2447 dmaengine_get_direction_text(uc
->config
.dir
),
2448 dmaengine_get_direction_text(dir
));
2454 if (dir
== DMA_DEV_TO_MEM
) {
2455 dev_width
= uc
->cfg
.src_addr_width
;
2456 burst
= uc
->cfg
.src_maxburst
;
2457 } else if (dir
== DMA_MEM_TO_DEV
) {
2458 dev_width
= uc
->cfg
.dst_addr_width
;
2459 burst
= uc
->cfg
.dst_maxburst
;
2461 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
2468 if (uc
->config
.pkt_mode
)
2469 d
= udma_prep_dma_cyclic_pkt(uc
, buf_addr
, buf_len
, period_len
,
2472 d
= udma_prep_dma_cyclic_tr(uc
, buf_addr
, buf_len
, period_len
,
2478 d
->sglen
= buf_len
/ period_len
;
2481 d
->residue
= buf_len
;
2483 /* static TR for remote PDMA */
2484 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
2485 dev_err(uc
->ud
->dev
,
2486 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2487 __func__
, d
->static_tr
.bstcnt
);
2489 udma_free_hwdesc(uc
, d
);
2494 if (uc
->config
.metadata_size
)
2495 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2497 return vchan_tx_prep(&uc
->vc
, &d
->vd
, flags
);
2500 static struct dma_async_tx_descriptor
*
2501 udma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
2502 size_t len
, unsigned long tx_flags
)
2504 struct udma_chan
*uc
= to_udma_chan(chan
);
2505 struct udma_desc
*d
;
2506 struct cppi5_tr_type15_t
*tr_req
;
2508 size_t tr_size
= sizeof(struct cppi5_tr_type15_t
);
2509 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2511 if (uc
->config
.dir
!= DMA_MEM_TO_MEM
) {
2512 dev_err(chan
->device
->dev
,
2513 "%s: chan%d is for %s, not supporting %s\n",
2515 dmaengine_get_direction_text(uc
->config
.dir
),
2516 dmaengine_get_direction_text(DMA_MEM_TO_MEM
));
2525 unsigned long align_to
= __ffs(src
| dest
);
2530 * Keep simple: tr0: SZ_64K-alignment blocks,
2531 * tr1: the remaining
2534 tr0_cnt0
= (SZ_64K
- BIT(align_to
));
2535 if (len
/ tr0_cnt0
>= SZ_64K
) {
2536 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
2541 tr0_cnt1
= len
/ tr0_cnt0
;
2542 tr1_cnt0
= len
% tr0_cnt0
;
2545 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, DMA_MEM_TO_MEM
);
2549 d
->dir
= DMA_MEM_TO_MEM
;
2554 tr_req
= d
->hwdesc
[0].tr_req_base
;
2556 cppi5_tr_init(&tr_req
[0].flags
, CPPI5_TR_TYPE15
, false, true,
2557 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2558 cppi5_tr_csf_set(&tr_req
[0].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2560 tr_req
[0].addr
= src
;
2561 tr_req
[0].icnt0
= tr0_cnt0
;
2562 tr_req
[0].icnt1
= tr0_cnt1
;
2563 tr_req
[0].icnt2
= 1;
2564 tr_req
[0].icnt3
= 1;
2565 tr_req
[0].dim1
= tr0_cnt0
;
2567 tr_req
[0].daddr
= dest
;
2568 tr_req
[0].dicnt0
= tr0_cnt0
;
2569 tr_req
[0].dicnt1
= tr0_cnt1
;
2570 tr_req
[0].dicnt2
= 1;
2571 tr_req
[0].dicnt3
= 1;
2572 tr_req
[0].ddim1
= tr0_cnt0
;
2575 cppi5_tr_init(&tr_req
[1].flags
, CPPI5_TR_TYPE15
, false, true,
2576 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2577 cppi5_tr_csf_set(&tr_req
[1].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2579 tr_req
[1].addr
= src
+ tr0_cnt1
* tr0_cnt0
;
2580 tr_req
[1].icnt0
= tr1_cnt0
;
2581 tr_req
[1].icnt1
= 1;
2582 tr_req
[1].icnt2
= 1;
2583 tr_req
[1].icnt3
= 1;
2585 tr_req
[1].daddr
= dest
+ tr0_cnt1
* tr0_cnt0
;
2586 tr_req
[1].dicnt0
= tr1_cnt0
;
2587 tr_req
[1].dicnt1
= 1;
2588 tr_req
[1].dicnt2
= 1;
2589 tr_req
[1].dicnt3
= 1;
2592 cppi5_tr_csf_set(&tr_req
[num_tr
- 1].flags
, CPPI5_TR_CSF_EOP
);
2594 if (uc
->config
.metadata_size
)
2595 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2597 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
2600 static void udma_issue_pending(struct dma_chan
*chan
)
2602 struct udma_chan
*uc
= to_udma_chan(chan
);
2603 unsigned long flags
;
2605 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2607 /* If we have something pending and no active descriptor, then */
2608 if (vchan_issue_pending(&uc
->vc
) && !uc
->desc
) {
2610 * start a descriptor if the channel is NOT [marked as
2611 * terminating _and_ it is still running (teardown has not
2614 if (!(uc
->state
== UDMA_CHAN_IS_TERMINATING
&&
2615 udma_is_chan_running(uc
)))
2619 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2622 static enum dma_status
udma_tx_status(struct dma_chan
*chan
,
2623 dma_cookie_t cookie
,
2624 struct dma_tx_state
*txstate
)
2626 struct udma_chan
*uc
= to_udma_chan(chan
);
2627 enum dma_status ret
;
2628 unsigned long flags
;
2630 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2632 ret
= dma_cookie_status(chan
, cookie
, txstate
);
2634 if (ret
== DMA_IN_PROGRESS
&& udma_is_chan_paused(uc
))
2637 if (ret
== DMA_COMPLETE
|| !txstate
)
2640 if (uc
->desc
&& uc
->desc
->vd
.tx
.cookie
== cookie
) {
2643 u32 residue
= uc
->desc
->residue
;
2646 if (uc
->desc
->dir
== DMA_MEM_TO_DEV
) {
2647 bcnt
= udma_tchanrt_read(uc
->tchan
,
2648 UDMA_TCHAN_RT_SBCNT_REG
);
2650 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
2651 peer_bcnt
= udma_tchanrt_read(uc
->tchan
,
2652 UDMA_TCHAN_RT_PEER_BCNT_REG
);
2654 if (bcnt
> peer_bcnt
)
2655 delay
= bcnt
- peer_bcnt
;
2657 } else if (uc
->desc
->dir
== DMA_DEV_TO_MEM
) {
2658 bcnt
= udma_rchanrt_read(uc
->rchan
,
2659 UDMA_RCHAN_RT_BCNT_REG
);
2661 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
2662 peer_bcnt
= udma_rchanrt_read(uc
->rchan
,
2663 UDMA_RCHAN_RT_PEER_BCNT_REG
);
2665 if (peer_bcnt
> bcnt
)
2666 delay
= peer_bcnt
- bcnt
;
2669 bcnt
= udma_tchanrt_read(uc
->tchan
,
2670 UDMA_TCHAN_RT_BCNT_REG
);
2674 if (bcnt
&& !(bcnt
% uc
->desc
->residue
))
2677 residue
-= bcnt
% uc
->desc
->residue
;
2679 if (!residue
&& (uc
->config
.dir
== DMA_DEV_TO_MEM
|| !delay
)) {
2684 dma_set_residue(txstate
, residue
);
2685 dma_set_in_flight_bytes(txstate
, delay
);
2692 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2696 static int udma_pause(struct dma_chan
*chan
)
2698 struct udma_chan
*uc
= to_udma_chan(chan
);
2703 /* pause the channel */
2704 switch (uc
->desc
->dir
) {
2705 case DMA_DEV_TO_MEM
:
2706 udma_rchanrt_update_bits(uc
->rchan
,
2707 UDMA_RCHAN_RT_PEER_RT_EN_REG
,
2708 UDMA_PEER_RT_EN_PAUSE
,
2709 UDMA_PEER_RT_EN_PAUSE
);
2711 case DMA_MEM_TO_DEV
:
2712 udma_tchanrt_update_bits(uc
->tchan
,
2713 UDMA_TCHAN_RT_PEER_RT_EN_REG
,
2714 UDMA_PEER_RT_EN_PAUSE
,
2715 UDMA_PEER_RT_EN_PAUSE
);
2717 case DMA_MEM_TO_MEM
:
2718 udma_tchanrt_update_bits(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
2719 UDMA_CHAN_RT_CTL_PAUSE
,
2720 UDMA_CHAN_RT_CTL_PAUSE
);
2729 static int udma_resume(struct dma_chan
*chan
)
2731 struct udma_chan
*uc
= to_udma_chan(chan
);
2736 /* resume the channel */
2737 switch (uc
->desc
->dir
) {
2738 case DMA_DEV_TO_MEM
:
2739 udma_rchanrt_update_bits(uc
->rchan
,
2740 UDMA_RCHAN_RT_PEER_RT_EN_REG
,
2741 UDMA_PEER_RT_EN_PAUSE
, 0);
2744 case DMA_MEM_TO_DEV
:
2745 udma_tchanrt_update_bits(uc
->tchan
,
2746 UDMA_TCHAN_RT_PEER_RT_EN_REG
,
2747 UDMA_PEER_RT_EN_PAUSE
, 0);
2749 case DMA_MEM_TO_MEM
:
2750 udma_tchanrt_update_bits(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
2751 UDMA_CHAN_RT_CTL_PAUSE
, 0);
2760 static int udma_terminate_all(struct dma_chan
*chan
)
2762 struct udma_chan
*uc
= to_udma_chan(chan
);
2763 unsigned long flags
;
2766 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2768 if (udma_is_chan_running(uc
))
2772 uc
->terminated_desc
= uc
->desc
;
2774 uc
->terminated_desc
->terminated
= true;
2775 cancel_delayed_work(&uc
->tx_drain
.work
);
2780 vchan_get_all_descriptors(&uc
->vc
, &head
);
2781 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2782 vchan_dma_desc_free_list(&uc
->vc
, &head
);
2787 static void udma_synchronize(struct dma_chan
*chan
)
2789 struct udma_chan
*uc
= to_udma_chan(chan
);
2790 unsigned long timeout
= msecs_to_jiffies(1000);
2792 vchan_synchronize(&uc
->vc
);
2794 if (uc
->state
== UDMA_CHAN_IS_TERMINATING
) {
2795 timeout
= wait_for_completion_timeout(&uc
->teardown_completed
,
2798 dev_warn(uc
->ud
->dev
, "chan%d teardown timeout!\n",
2800 udma_dump_chan_stdata(uc
);
2801 udma_reset_chan(uc
, true);
2805 udma_reset_chan(uc
, false);
2806 if (udma_is_chan_running(uc
))
2807 dev_warn(uc
->ud
->dev
, "chan%d refused to stop!\n", uc
->id
);
2809 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
2810 udma_reset_rings(uc
);
2813 static void udma_desc_pre_callback(struct virt_dma_chan
*vc
,
2814 struct virt_dma_desc
*vd
,
2815 struct dmaengine_result
*result
)
2817 struct udma_chan
*uc
= to_udma_chan(&vc
->chan
);
2818 struct udma_desc
*d
;
2823 d
= to_udma_desc(&vd
->tx
);
2825 if (d
->metadata_size
)
2826 udma_fetch_epib(uc
, d
);
2828 /* Provide residue information for the client */
2830 void *desc_vaddr
= udma_curr_cppi5_desc_vaddr(d
, d
->desc_idx
);
2832 if (cppi5_desc_get_type(desc_vaddr
) ==
2833 CPPI5_INFO0_DESC_TYPE_VAL_HOST
) {
2834 result
->residue
= d
->residue
-
2835 cppi5_hdesc_get_pktlen(desc_vaddr
);
2836 if (result
->residue
)
2837 result
->result
= DMA_TRANS_ABORTED
;
2839 result
->result
= DMA_TRANS_NOERROR
;
2841 result
->residue
= 0;
2842 result
->result
= DMA_TRANS_NOERROR
;
2848 * This tasklet handles the completion of a DMA descriptor by
2849 * calling its callback and freeing it.
2851 static void udma_vchan_complete(unsigned long arg
)
2853 struct virt_dma_chan
*vc
= (struct virt_dma_chan
*)arg
;
2854 struct virt_dma_desc
*vd
, *_vd
;
2855 struct dmaengine_desc_callback cb
;
2858 spin_lock_irq(&vc
->lock
);
2859 list_splice_tail_init(&vc
->desc_completed
, &head
);
2863 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
2865 memset(&cb
, 0, sizeof(cb
));
2867 spin_unlock_irq(&vc
->lock
);
2869 udma_desc_pre_callback(vc
, vd
, NULL
);
2870 dmaengine_desc_callback_invoke(&cb
, NULL
);
2872 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
2873 struct dmaengine_result result
;
2875 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
2877 list_del(&vd
->node
);
2879 udma_desc_pre_callback(vc
, vd
, &result
);
2880 dmaengine_desc_callback_invoke(&cb
, &result
);
2882 vchan_vdesc_fini(vd
);
2886 static void udma_free_chan_resources(struct dma_chan
*chan
)
2888 struct udma_chan
*uc
= to_udma_chan(chan
);
2889 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
2891 udma_terminate_all(chan
);
2892 if (uc
->terminated_desc
) {
2893 udma_reset_chan(uc
, false);
2894 udma_reset_rings(uc
);
2897 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
2898 destroy_delayed_work_on_stack(&uc
->tx_drain
.work
);
2900 if (uc
->irq_num_ring
> 0) {
2901 free_irq(uc
->irq_num_ring
, uc
);
2903 uc
->irq_num_ring
= 0;
2905 if (uc
->irq_num_udma
> 0) {
2906 free_irq(uc
->irq_num_udma
, uc
);
2908 uc
->irq_num_udma
= 0;
2911 /* Release PSI-L pairing */
2912 if (uc
->psil_paired
) {
2913 navss_psil_unpair(ud
, uc
->config
.src_thread
,
2914 uc
->config
.dst_thread
);
2915 uc
->psil_paired
= false;
2918 vchan_free_chan_resources(&uc
->vc
);
2919 tasklet_kill(&uc
->vc
.task
);
2921 udma_free_tx_resources(uc
);
2922 udma_free_rx_resources(uc
);
2923 udma_reset_uchan(uc
);
2925 if (uc
->use_dma_pool
) {
2926 dma_pool_destroy(uc
->hdesc_pool
);
2927 uc
->use_dma_pool
= false;
2931 static struct platform_driver udma_driver
;
2933 static bool udma_dma_filter_fn(struct dma_chan
*chan
, void *param
)
2935 struct udma_chan_config
*ucc
;
2936 struct psil_endpoint_config
*ep_config
;
2937 struct udma_chan
*uc
;
2938 struct udma_dev
*ud
;
2941 if (chan
->device
->dev
->driver
!= &udma_driver
.driver
)
2944 uc
= to_udma_chan(chan
);
2949 ucc
->remote_thread_id
= args
[0];
2951 if (ucc
->remote_thread_id
& K3_PSIL_DST_THREAD_ID_OFFSET
)
2952 ucc
->dir
= DMA_MEM_TO_DEV
;
2954 ucc
->dir
= DMA_DEV_TO_MEM
;
2956 ep_config
= psil_get_ep_config(ucc
->remote_thread_id
);
2957 if (IS_ERR(ep_config
)) {
2958 dev_err(ud
->dev
, "No configuration for psi-l thread 0x%04x\n",
2959 ucc
->remote_thread_id
);
2960 ucc
->dir
= DMA_MEM_TO_MEM
;
2961 ucc
->remote_thread_id
= -1;
2965 ucc
->pkt_mode
= ep_config
->pkt_mode
;
2966 ucc
->channel_tpl
= ep_config
->channel_tpl
;
2967 ucc
->notdpkt
= ep_config
->notdpkt
;
2968 ucc
->ep_type
= ep_config
->ep_type
;
2970 if (ucc
->ep_type
!= PSIL_EP_NATIVE
) {
2971 const struct udma_match_data
*match_data
= ud
->match_data
;
2973 if (match_data
->flags
& UDMA_FLAG_PDMA_ACC32
)
2974 ucc
->enable_acc32
= ep_config
->pdma_acc32
;
2975 if (match_data
->flags
& UDMA_FLAG_PDMA_BURST
)
2976 ucc
->enable_burst
= ep_config
->pdma_burst
;
2979 ucc
->needs_epib
= ep_config
->needs_epib
;
2980 ucc
->psd_size
= ep_config
->psd_size
;
2981 ucc
->metadata_size
=
2982 (ucc
->needs_epib
? CPPI5_INFO0_HDESC_EPIB_SIZE
: 0) +
2986 ucc
->hdesc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
2987 ucc
->metadata_size
, ud
->desc_align
);
2989 dev_dbg(ud
->dev
, "chan%d: Remote thread: 0x%04x (%s)\n", uc
->id
,
2990 ucc
->remote_thread_id
, dmaengine_get_direction_text(ucc
->dir
));
2995 static struct dma_chan
*udma_of_xlate(struct of_phandle_args
*dma_spec
,
2996 struct of_dma
*ofdma
)
2998 struct udma_dev
*ud
= ofdma
->of_dma_data
;
2999 dma_cap_mask_t mask
= ud
->ddev
.cap_mask
;
3000 struct dma_chan
*chan
;
3002 if (dma_spec
->args_count
!= 1)
3005 chan
= __dma_request_channel(&mask
, udma_dma_filter_fn
,
3006 &dma_spec
->args
[0], ofdma
->of_node
);
3008 dev_err(ud
->dev
, "get channel fail in %s.\n", __func__
);
3009 return ERR_PTR(-EINVAL
);
3015 static struct udma_match_data am654_main_data
= {
3016 .psil_base
= 0x1000,
3017 .enable_memcpy_support
= true,
3018 .statictr_z_mask
= GENMASK(11, 0),
3019 .rchan_oes_offset
= 0x2000,
3021 .level_start_idx
= {
3022 [0] = 8, /* Normal channels */
3023 [1] = 0, /* High Throughput channels */
3027 static struct udma_match_data am654_mcu_data
= {
3028 .psil_base
= 0x6000,
3029 .enable_memcpy_support
= true, /* TEST: DMA domains */
3030 .statictr_z_mask
= GENMASK(11, 0),
3031 .rchan_oes_offset
= 0x2000,
3033 .level_start_idx
= {
3034 [0] = 2, /* Normal channels */
3035 [1] = 0, /* High Throughput channels */
3039 static struct udma_match_data j721e_main_data
= {
3040 .psil_base
= 0x1000,
3041 .enable_memcpy_support
= true,
3042 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
,
3043 .statictr_z_mask
= GENMASK(23, 0),
3044 .rchan_oes_offset
= 0x400,
3046 .level_start_idx
= {
3047 [0] = 16, /* Normal channels */
3048 [1] = 4, /* High Throughput channels */
3049 [2] = 0, /* Ultra High Throughput channels */
3053 static struct udma_match_data j721e_mcu_data
= {
3054 .psil_base
= 0x6000,
3055 .enable_memcpy_support
= false, /* MEM_TO_MEM is slow via MCU UDMA */
3056 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
,
3057 .statictr_z_mask
= GENMASK(23, 0),
3058 .rchan_oes_offset
= 0x400,
3060 .level_start_idx
= {
3061 [0] = 2, /* Normal channels */
3062 [1] = 0, /* High Throughput channels */
3066 static const struct of_device_id udma_of_match
[] = {
3068 .compatible
= "ti,am654-navss-main-udmap",
3069 .data
= &am654_main_data
,
3072 .compatible
= "ti,am654-navss-mcu-udmap",
3073 .data
= &am654_mcu_data
,
3075 .compatible
= "ti,j721e-navss-main-udmap",
3076 .data
= &j721e_main_data
,
3078 .compatible
= "ti,j721e-navss-mcu-udmap",
3079 .data
= &j721e_mcu_data
,
3084 static int udma_get_mmrs(struct platform_device
*pdev
, struct udma_dev
*ud
)
3086 struct resource
*res
;
3089 for (i
= 0; i
< MMR_LAST
; i
++) {
3090 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
3092 ud
->mmrs
[i
] = devm_ioremap_resource(&pdev
->dev
, res
);
3093 if (IS_ERR(ud
->mmrs
[i
]))
3094 return PTR_ERR(ud
->mmrs
[i
]);
3100 static int udma_setup_resources(struct udma_dev
*ud
)
3102 struct device
*dev
= ud
->dev
;
3103 int ch_count
, ret
, i
, j
;
3105 struct ti_sci_resource_desc
*rm_desc
;
3106 struct ti_sci_resource
*rm_res
, irq_res
;
3107 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
3108 static const char * const range_names
[] = { "ti,sci-rm-range-tchan",
3109 "ti,sci-rm-range-rchan",
3110 "ti,sci-rm-range-rflow" };
3112 cap2
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x28);
3113 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
3115 ud
->rflow_cnt
= cap3
& 0x3fff;
3116 ud
->tchan_cnt
= cap2
& 0x1ff;
3117 ud
->echan_cnt
= (cap2
>> 9) & 0x1ff;
3118 ud
->rchan_cnt
= (cap2
>> 18) & 0x1ff;
3119 ch_count
= ud
->tchan_cnt
+ ud
->rchan_cnt
;
3121 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
3122 sizeof(unsigned long), GFP_KERNEL
);
3123 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
3125 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
3126 sizeof(unsigned long), GFP_KERNEL
);
3127 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
3129 ud
->rflow_gp_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
3130 sizeof(unsigned long),
3132 ud
->rflow_gp_map_allocated
= devm_kcalloc(dev
,
3133 BITS_TO_LONGS(ud
->rflow_cnt
),
3134 sizeof(unsigned long),
3136 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
3137 sizeof(unsigned long),
3139 ud
->rflows
= devm_kcalloc(dev
, ud
->rflow_cnt
, sizeof(*ud
->rflows
),
3142 if (!ud
->tchan_map
|| !ud
->rchan_map
|| !ud
->rflow_gp_map
||
3143 !ud
->rflow_gp_map_allocated
|| !ud
->tchans
|| !ud
->rchans
||
3144 !ud
->rflows
|| !ud
->rflow_in_use
)
3148 * RX flows with the same Ids as RX channels are reserved to be used
3149 * as default flows if remote HW can't generate flow_ids. Those
3150 * RX flows can be requested only explicitly by id.
3152 bitmap_set(ud
->rflow_gp_map_allocated
, 0, ud
->rchan_cnt
);
3154 /* by default no GP rflows are assigned to Linux */
3155 bitmap_set(ud
->rflow_gp_map
, 0, ud
->rflow_cnt
);
3157 /* Get resource ranges from tisci */
3158 for (i
= 0; i
< RM_RANGE_LAST
; i
++)
3159 tisci_rm
->rm_ranges
[i
] =
3160 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
3161 tisci_rm
->tisci_dev_id
,
3162 (char *)range_names
[i
]);
3165 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
3166 if (IS_ERR(rm_res
)) {
3167 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
3169 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
3170 for (i
= 0; i
< rm_res
->sets
; i
++) {
3171 rm_desc
= &rm_res
->desc
[i
];
3172 bitmap_clear(ud
->tchan_map
, rm_desc
->start
,
3174 dev_dbg(dev
, "ti-sci-res: tchan: %d:%d\n",
3175 rm_desc
->start
, rm_desc
->num
);
3178 irq_res
.sets
= rm_res
->sets
;
3180 /* rchan and matching default flow ranges */
3181 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
3182 if (IS_ERR(rm_res
)) {
3183 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
3185 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
3186 for (i
= 0; i
< rm_res
->sets
; i
++) {
3187 rm_desc
= &rm_res
->desc
[i
];
3188 bitmap_clear(ud
->rchan_map
, rm_desc
->start
,
3190 dev_dbg(dev
, "ti-sci-res: rchan: %d:%d\n",
3191 rm_desc
->start
, rm_desc
->num
);
3195 irq_res
.sets
+= rm_res
->sets
;
3196 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
3197 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
3198 for (i
= 0; i
< rm_res
->sets
; i
++) {
3199 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
;
3200 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
3202 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
3203 for (j
= 0; j
< rm_res
->sets
; j
++, i
++) {
3204 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
3205 ud
->match_data
->rchan_oes_offset
;
3206 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
3208 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
3209 kfree(irq_res
.desc
);
3211 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
3215 /* GP rflow ranges */
3216 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
3217 if (IS_ERR(rm_res
)) {
3218 /* all gp flows are assigned exclusively to Linux */
3219 bitmap_clear(ud
->rflow_gp_map
, ud
->rchan_cnt
,
3220 ud
->rflow_cnt
- ud
->rchan_cnt
);
3222 for (i
= 0; i
< rm_res
->sets
; i
++) {
3223 rm_desc
= &rm_res
->desc
[i
];
3224 bitmap_clear(ud
->rflow_gp_map
, rm_desc
->start
,
3226 dev_dbg(dev
, "ti-sci-res: rflow: %d:%d\n",
3227 rm_desc
->start
, rm_desc
->num
);
3231 ch_count
-= bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
);
3232 ch_count
-= bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
);
3236 ud
->channels
= devm_kcalloc(dev
, ch_count
, sizeof(*ud
->channels
),
3241 dev_info(dev
, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3243 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
),
3244 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
),
3245 ud
->rflow_cnt
- bitmap_weight(ud
->rflow_gp_map
,
3251 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3252 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3253 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3254 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3255 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3257 static int udma_probe(struct platform_device
*pdev
)
3259 struct device_node
*navss_node
= pdev
->dev
.parent
->of_node
;
3260 struct device
*dev
= &pdev
->dev
;
3261 struct udma_dev
*ud
;
3262 const struct of_device_id
*match
;
3266 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(48));
3268 dev_err(dev
, "failed to set dma mask stuff\n");
3270 ud
= devm_kzalloc(dev
, sizeof(*ud
), GFP_KERNEL
);
3274 ret
= udma_get_mmrs(pdev
, ud
);
3278 ud
->tisci_rm
.tisci
= ti_sci_get_by_phandle(dev
->of_node
, "ti,sci");
3279 if (IS_ERR(ud
->tisci_rm
.tisci
))
3280 return PTR_ERR(ud
->tisci_rm
.tisci
);
3282 ret
= of_property_read_u32(dev
->of_node
, "ti,sci-dev-id",
3283 &ud
->tisci_rm
.tisci_dev_id
);
3285 dev_err(dev
, "ti,sci-dev-id read failure %d\n", ret
);
3288 pdev
->id
= ud
->tisci_rm
.tisci_dev_id
;
3290 ret
= of_property_read_u32(navss_node
, "ti,sci-dev-id",
3291 &ud
->tisci_rm
.tisci_navss_dev_id
);
3293 dev_err(dev
, "NAVSS ti,sci-dev-id read failure %d\n", ret
);
3297 ud
->tisci_rm
.tisci_udmap_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_udmap_ops
;
3298 ud
->tisci_rm
.tisci_psil_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_psil_ops
;
3300 ud
->ringacc
= of_k3_ringacc_get_by_phandle(dev
->of_node
, "ti,ringacc");
3301 if (IS_ERR(ud
->ringacc
))
3302 return PTR_ERR(ud
->ringacc
);
3304 dev
->msi_domain
= of_msi_get_domain(dev
, dev
->of_node
,
3305 DOMAIN_BUS_TI_SCI_INTA_MSI
);
3306 if (!dev
->msi_domain
) {
3307 dev_err(dev
, "Failed to get MSI domain\n");
3308 return -EPROBE_DEFER
;
3311 match
= of_match_node(udma_of_match
, dev
->of_node
);
3313 dev_err(dev
, "No compatible match found\n");
3316 ud
->match_data
= match
->data
;
3318 dma_cap_set(DMA_SLAVE
, ud
->ddev
.cap_mask
);
3319 dma_cap_set(DMA_CYCLIC
, ud
->ddev
.cap_mask
);
3321 ud
->ddev
.device_alloc_chan_resources
= udma_alloc_chan_resources
;
3322 ud
->ddev
.device_config
= udma_slave_config
;
3323 ud
->ddev
.device_prep_slave_sg
= udma_prep_slave_sg
;
3324 ud
->ddev
.device_prep_dma_cyclic
= udma_prep_dma_cyclic
;
3325 ud
->ddev
.device_issue_pending
= udma_issue_pending
;
3326 ud
->ddev
.device_tx_status
= udma_tx_status
;
3327 ud
->ddev
.device_pause
= udma_pause
;
3328 ud
->ddev
.device_resume
= udma_resume
;
3329 ud
->ddev
.device_terminate_all
= udma_terminate_all
;
3330 ud
->ddev
.device_synchronize
= udma_synchronize
;
3332 ud
->ddev
.device_free_chan_resources
= udma_free_chan_resources
;
3333 ud
->ddev
.src_addr_widths
= TI_UDMAC_BUSWIDTHS
;
3334 ud
->ddev
.dst_addr_widths
= TI_UDMAC_BUSWIDTHS
;
3335 ud
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
3336 ud
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
3337 ud
->ddev
.copy_align
= DMAENGINE_ALIGN_8_BYTES
;
3338 ud
->ddev
.desc_metadata_modes
= DESC_METADATA_CLIENT
|
3339 DESC_METADATA_ENGINE
;
3340 if (ud
->match_data
->enable_memcpy_support
) {
3341 dma_cap_set(DMA_MEMCPY
, ud
->ddev
.cap_mask
);
3342 ud
->ddev
.device_prep_dma_memcpy
= udma_prep_dma_memcpy
;
3343 ud
->ddev
.directions
|= BIT(DMA_MEM_TO_MEM
);
3348 ud
->psil_base
= ud
->match_data
->psil_base
;
3350 INIT_LIST_HEAD(&ud
->ddev
.channels
);
3351 INIT_LIST_HEAD(&ud
->desc_to_purge
);
3353 ch_count
= udma_setup_resources(ud
);
3357 spin_lock_init(&ud
->lock
);
3358 INIT_WORK(&ud
->purge_work
, udma_purge_desc_work
);
3360 ud
->desc_align
= 64;
3361 if (ud
->desc_align
< dma_get_cache_alignment())
3362 ud
->desc_align
= dma_get_cache_alignment();
3364 for (i
= 0; i
< ud
->tchan_cnt
; i
++) {
3365 struct udma_tchan
*tchan
= &ud
->tchans
[i
];
3368 tchan
->reg_rt
= ud
->mmrs
[MMR_TCHANRT
] + i
* 0x1000;
3371 for (i
= 0; i
< ud
->rchan_cnt
; i
++) {
3372 struct udma_rchan
*rchan
= &ud
->rchans
[i
];
3375 rchan
->reg_rt
= ud
->mmrs
[MMR_RCHANRT
] + i
* 0x1000;
3378 for (i
= 0; i
< ud
->rflow_cnt
; i
++) {
3379 struct udma_rflow
*rflow
= &ud
->rflows
[i
];
3384 for (i
= 0; i
< ch_count
; i
++) {
3385 struct udma_chan
*uc
= &ud
->channels
[i
];
3388 uc
->vc
.desc_free
= udma_desc_free
;
3392 uc
->config
.remote_thread_id
= -1;
3393 uc
->config
.dir
= DMA_MEM_TO_MEM
;
3394 uc
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s chan%d",
3397 vchan_init(&uc
->vc
, &ud
->ddev
);
3398 /* Use custom vchan completion handling */
3399 tasklet_init(&uc
->vc
.task
, udma_vchan_complete
,
3400 (unsigned long)&uc
->vc
);
3401 init_completion(&uc
->teardown_completed
);
3404 ret
= dma_async_device_register(&ud
->ddev
);
3406 dev_err(dev
, "failed to register slave DMA engine: %d\n", ret
);
3410 platform_set_drvdata(pdev
, ud
);
3412 ret
= of_dma_controller_register(dev
->of_node
, udma_of_xlate
, ud
);
3414 dev_err(dev
, "failed to register of_dma controller\n");
3415 dma_async_device_unregister(&ud
->ddev
);
3421 static struct platform_driver udma_driver
= {
3424 .of_match_table
= udma_of_match
,
3425 .suppress_bind_attrs
= true,
3427 .probe
= udma_probe
,
3429 builtin_platform_driver(udma_driver
);
3431 /* Private interfaces to UDMA */
3432 #include "k3-udma-private.c"