1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/sys_soc.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_device.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
32 #include "../virt-dma.h"
34 #include "k3-psil-priv.h"
36 struct udma_static_tr
{
37 u8 elsize
; /* RPSTR0 */
38 u16 elcnt
; /* RPSTR0 */
39 u16 bstcnt
; /* RPSTR1 */
42 #define K3_UDMA_MAX_RFLOWS 1024
43 #define K3_UDMA_DEFAULT_RING_SIZE 16
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE 0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
51 #define UDMA_RFLOW_DSTTAG_NONE 0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
73 static const char * const mmr_names
[] = {
75 [MMR_BCHANRT
] = "bchanrt",
76 [MMR_RCHANRT
] = "rchanrt",
77 [MMR_TCHANRT
] = "tchanrt",
84 struct k3_ring
*t_ring
; /* Transmit ring */
85 struct k3_ring
*tc_ring
; /* Transmit Completion ring */
86 int tflow_id
; /* applicable only for PKTDMA */
90 #define udma_bchan udma_tchan
94 struct k3_ring
*fd_ring
; /* Free Descriptor ring */
95 struct k3_ring
*r_ring
; /* Receive ring */
104 struct udma_oes_offsets
{
105 /* K3 UDMA Output Event Offset */
108 /* BCDMA Output Event Offsets */
109 u32 bcdma_bchan_data
;
110 u32 bcdma_bchan_ring
;
111 u32 bcdma_tchan_data
;
112 u32 bcdma_tchan_ring
;
113 u32 bcdma_rchan_data
;
114 u32 bcdma_rchan_ring
;
116 /* PKTDMA Output Event Offsets */
117 u32 pktdma_tchan_flow
;
118 u32 pktdma_rchan_flow
;
121 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
122 #define UDMA_FLAG_PDMA_BURST BIT(1)
123 #define UDMA_FLAG_TDTYPE BIT(2)
125 struct udma_match_data
{
126 enum k3_dma_type type
;
128 bool enable_memcpy_support
;
133 struct udma_soc_data
{
134 struct udma_oes_offsets oes
;
135 u32 bcdma_trigger_event_offset
;
139 size_t cppi5_desc_size
;
140 void *cppi5_desc_vaddr
;
141 dma_addr_t cppi5_desc_paddr
;
143 /* TR descriptor internal pointers */
145 struct cppi5_tr_resp_t
*tr_resp_base
;
148 struct udma_rx_flush
{
149 struct udma_hwdesc hwdescs
[2];
153 dma_addr_t buffer_paddr
;
162 struct dma_device ddev
;
164 void __iomem
*mmrs
[MMR_LAST
];
165 const struct udma_match_data
*match_data
;
166 const struct udma_soc_data
*soc_data
;
168 struct udma_tpl bchan_tpl
;
169 struct udma_tpl tchan_tpl
;
170 struct udma_tpl rchan_tpl
;
172 size_t desc_align
; /* alignment to use for descriptors */
174 struct udma_tisci_rm tisci_rm
;
176 struct k3_ringacc
*ringacc
;
178 struct work_struct purge_work
;
179 struct list_head desc_to_purge
;
182 struct udma_rx_flush rx_flush
;
190 unsigned long *bchan_map
;
191 unsigned long *tchan_map
;
192 unsigned long *rchan_map
;
193 unsigned long *rflow_gp_map
;
194 unsigned long *rflow_gp_map_allocated
;
195 unsigned long *rflow_in_use
;
196 unsigned long *tflow_map
;
198 struct udma_bchan
*bchans
;
199 struct udma_tchan
*tchans
;
200 struct udma_rchan
*rchans
;
201 struct udma_rflow
*rflows
;
203 struct udma_chan
*channels
;
210 struct virt_dma_desc vd
;
214 enum dma_transfer_direction dir
;
216 struct udma_static_tr static_tr
;
220 unsigned int desc_idx
; /* Only used for cyclic in packet mode */
224 void *metadata
; /* pointer to provided metadata buffer (EPIP, PSdata) */
226 unsigned int hwdesc_count
;
227 struct udma_hwdesc hwdesc
[];
230 enum udma_chan_state
{
231 UDMA_CHAN_IS_IDLE
= 0, /* not active, no teardown is in progress */
232 UDMA_CHAN_IS_ACTIVE
, /* Normal operation */
233 UDMA_CHAN_IS_TERMINATING
, /* channel is being terminated */
236 struct udma_tx_drain
{
237 struct delayed_work work
;
242 struct udma_chan_config
{
243 bool pkt_mode
; /* TR or packet */
244 bool needs_epib
; /* EPIB is needed for the communication or not */
245 u32 psd_size
; /* size of Protocol Specific Data */
246 u32 metadata_size
; /* (needs_epib ? 16:0) + psd_size */
247 u32 hdesc_size
; /* Size of a packet descriptor in packet mode */
248 bool notdpkt
; /* Suppress sending TDC packet */
249 int remote_thread_id
;
254 enum psil_endpoint_type ep_type
;
257 enum udma_tp_level channel_tpl
; /* Channel Throughput Level */
261 /* PKDMA mapped channel */
262 int mapped_channel_id
;
263 /* PKTDMA default tflow or rflow for mapped channel */
266 enum dma_transfer_direction dir
;
270 struct virt_dma_chan vc
;
271 struct dma_slave_config cfg
;
273 struct device
*dma_dev
;
274 struct udma_desc
*desc
;
275 struct udma_desc
*terminated_desc
;
276 struct udma_static_tr static_tr
;
279 struct udma_bchan
*bchan
;
280 struct udma_tchan
*tchan
;
281 struct udma_rchan
*rchan
;
282 struct udma_rflow
*rflow
;
292 enum udma_chan_state state
;
293 struct completion teardown_completed
;
295 struct udma_tx_drain tx_drain
;
297 u32 bcnt
; /* number of bytes completed since the start of the channel */
299 /* Channel configuration parameters */
300 struct udma_chan_config config
;
302 /* dmapool for packet mode descriptors */
304 struct dma_pool
*hdesc_pool
;
309 static inline struct udma_dev
*to_udma_dev(struct dma_device
*d
)
311 return container_of(d
, struct udma_dev
, ddev
);
314 static inline struct udma_chan
*to_udma_chan(struct dma_chan
*c
)
316 return container_of(c
, struct udma_chan
, vc
.chan
);
319 static inline struct udma_desc
*to_udma_desc(struct dma_async_tx_descriptor
*t
)
321 return container_of(t
, struct udma_desc
, vd
.tx
);
324 /* Generic register access functions */
325 static inline u32
udma_read(void __iomem
*base
, int reg
)
327 return readl(base
+ reg
);
330 static inline void udma_write(void __iomem
*base
, int reg
, u32 val
)
332 writel(val
, base
+ reg
);
335 static inline void udma_update_bits(void __iomem
*base
, int reg
,
340 orig
= readl(base
+ reg
);
345 writel(tmp
, base
+ reg
);
349 static inline u32
udma_tchanrt_read(struct udma_chan
*uc
, int reg
)
353 return udma_read(uc
->tchan
->reg_rt
, reg
);
356 static inline void udma_tchanrt_write(struct udma_chan
*uc
, int reg
, u32 val
)
360 udma_write(uc
->tchan
->reg_rt
, reg
, val
);
363 static inline void udma_tchanrt_update_bits(struct udma_chan
*uc
, int reg
,
368 udma_update_bits(uc
->tchan
->reg_rt
, reg
, mask
, val
);
372 static inline u32
udma_rchanrt_read(struct udma_chan
*uc
, int reg
)
376 return udma_read(uc
->rchan
->reg_rt
, reg
);
379 static inline void udma_rchanrt_write(struct udma_chan
*uc
, int reg
, u32 val
)
383 udma_write(uc
->rchan
->reg_rt
, reg
, val
);
386 static inline void udma_rchanrt_update_bits(struct udma_chan
*uc
, int reg
,
391 udma_update_bits(uc
->rchan
->reg_rt
, reg
, mask
, val
);
394 static int navss_psil_pair(struct udma_dev
*ud
, u32 src_thread
, u32 dst_thread
)
396 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
398 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
399 return tisci_rm
->tisci_psil_ops
->pair(tisci_rm
->tisci
,
400 tisci_rm
->tisci_navss_dev_id
,
401 src_thread
, dst_thread
);
404 static int navss_psil_unpair(struct udma_dev
*ud
, u32 src_thread
,
407 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
409 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
410 return tisci_rm
->tisci_psil_ops
->unpair(tisci_rm
->tisci
,
411 tisci_rm
->tisci_navss_dev_id
,
412 src_thread
, dst_thread
);
415 static void k3_configure_chan_coherency(struct dma_chan
*chan
, u32 asel
)
417 struct device
*chan_dev
= &chan
->dev
->device
;
420 /* No special handling for the channel */
421 chan
->dev
->chan_dma_dev
= false;
423 chan_dev
->dma_coherent
= false;
424 chan_dev
->dma_parms
= NULL
;
425 } else if (asel
== 14 || asel
== 15) {
426 chan
->dev
->chan_dma_dev
= true;
428 chan_dev
->dma_coherent
= true;
429 dma_coerce_mask_and_coherent(chan_dev
, DMA_BIT_MASK(48));
430 chan_dev
->dma_parms
= chan_dev
->parent
->dma_parms
;
432 dev_warn(chan
->device
->dev
, "Invalid ASEL value: %u\n", asel
);
434 chan_dev
->dma_coherent
= false;
435 chan_dev
->dma_parms
= NULL
;
439 static void udma_reset_uchan(struct udma_chan
*uc
)
441 memset(&uc
->config
, 0, sizeof(uc
->config
));
442 uc
->config
.remote_thread_id
= -1;
443 uc
->config
.mapped_channel_id
= -1;
444 uc
->config
.default_flow_id
= -1;
445 uc
->state
= UDMA_CHAN_IS_IDLE
;
448 static void udma_dump_chan_stdata(struct udma_chan
*uc
)
450 struct device
*dev
= uc
->ud
->dev
;
454 if (uc
->config
.dir
== DMA_MEM_TO_DEV
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
455 dev_dbg(dev
, "TCHAN State data:\n");
456 for (i
= 0; i
< 32; i
++) {
457 offset
= UDMA_CHAN_RT_STDATA_REG
+ i
* 4;
458 dev_dbg(dev
, "TRT_STDATA[%02d]: 0x%08x\n", i
,
459 udma_tchanrt_read(uc
, offset
));
463 if (uc
->config
.dir
== DMA_DEV_TO_MEM
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
464 dev_dbg(dev
, "RCHAN State data:\n");
465 for (i
= 0; i
< 32; i
++) {
466 offset
= UDMA_CHAN_RT_STDATA_REG
+ i
* 4;
467 dev_dbg(dev
, "RRT_STDATA[%02d]: 0x%08x\n", i
,
468 udma_rchanrt_read(uc
, offset
));
473 static inline dma_addr_t
udma_curr_cppi5_desc_paddr(struct udma_desc
*d
,
476 return d
->hwdesc
[idx
].cppi5_desc_paddr
;
479 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc
*d
, int idx
)
481 return d
->hwdesc
[idx
].cppi5_desc_vaddr
;
484 static struct udma_desc
*udma_udma_desc_from_paddr(struct udma_chan
*uc
,
487 struct udma_desc
*d
= uc
->terminated_desc
;
490 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
493 if (desc_paddr
!= paddr
)
500 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
503 if (desc_paddr
!= paddr
)
511 static void udma_free_hwdesc(struct udma_chan
*uc
, struct udma_desc
*d
)
513 if (uc
->use_dma_pool
) {
516 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
517 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
520 dma_pool_free(uc
->hdesc_pool
,
521 d
->hwdesc
[i
].cppi5_desc_vaddr
,
522 d
->hwdesc
[i
].cppi5_desc_paddr
);
524 d
->hwdesc
[i
].cppi5_desc_vaddr
= NULL
;
526 } else if (d
->hwdesc
[0].cppi5_desc_vaddr
) {
527 dma_free_coherent(uc
->dma_dev
, d
->hwdesc
[0].cppi5_desc_size
,
528 d
->hwdesc
[0].cppi5_desc_vaddr
,
529 d
->hwdesc
[0].cppi5_desc_paddr
);
531 d
->hwdesc
[0].cppi5_desc_vaddr
= NULL
;
535 static void udma_purge_desc_work(struct work_struct
*work
)
537 struct udma_dev
*ud
= container_of(work
, typeof(*ud
), purge_work
);
538 struct virt_dma_desc
*vd
, *_vd
;
542 spin_lock_irqsave(&ud
->lock
, flags
);
543 list_splice_tail_init(&ud
->desc_to_purge
, &head
);
544 spin_unlock_irqrestore(&ud
->lock
, flags
);
546 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
547 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
548 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
550 udma_free_hwdesc(uc
, d
);
555 /* If more to purge, schedule the work again */
556 if (!list_empty(&ud
->desc_to_purge
))
557 schedule_work(&ud
->purge_work
);
560 static void udma_desc_free(struct virt_dma_desc
*vd
)
562 struct udma_dev
*ud
= to_udma_dev(vd
->tx
.chan
->device
);
563 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
564 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
567 if (uc
->terminated_desc
== d
)
568 uc
->terminated_desc
= NULL
;
570 if (uc
->use_dma_pool
) {
571 udma_free_hwdesc(uc
, d
);
576 spin_lock_irqsave(&ud
->lock
, flags
);
577 list_add_tail(&vd
->node
, &ud
->desc_to_purge
);
578 spin_unlock_irqrestore(&ud
->lock
, flags
);
580 schedule_work(&ud
->purge_work
);
583 static bool udma_is_chan_running(struct udma_chan
*uc
)
589 trt_ctl
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_CTL_REG
);
591 rrt_ctl
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_CTL_REG
);
593 if (trt_ctl
& UDMA_CHAN_RT_CTL_EN
|| rrt_ctl
& UDMA_CHAN_RT_CTL_EN
)
599 static bool udma_is_chan_paused(struct udma_chan
*uc
)
603 switch (uc
->config
.dir
) {
605 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
);
606 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
609 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
);
610 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
613 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_CTL_REG
);
614 pause_mask
= UDMA_CHAN_RT_CTL_PAUSE
;
620 if (val
& pause_mask
)
626 static inline dma_addr_t
udma_get_rx_flush_hwdesc_paddr(struct udma_chan
*uc
)
628 return uc
->ud
->rx_flush
.hwdescs
[uc
->config
.pkt_mode
].cppi5_desc_paddr
;
631 static int udma_push_to_ring(struct udma_chan
*uc
, int idx
)
633 struct udma_desc
*d
= uc
->desc
;
634 struct k3_ring
*ring
= NULL
;
637 switch (uc
->config
.dir
) {
639 ring
= uc
->rflow
->fd_ring
;
643 ring
= uc
->tchan
->t_ring
;
649 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
651 paddr
= udma_get_rx_flush_hwdesc_paddr(uc
);
653 paddr
= udma_curr_cppi5_desc_paddr(d
, idx
);
655 wmb(); /* Ensure that writes are not moved over this point */
658 return k3_ringacc_ring_push(ring
, &paddr
);
661 static bool udma_desc_is_rx_flush(struct udma_chan
*uc
, dma_addr_t addr
)
663 if (uc
->config
.dir
!= DMA_DEV_TO_MEM
)
666 if (addr
== udma_get_rx_flush_hwdesc_paddr(uc
))
672 static int udma_pop_from_ring(struct udma_chan
*uc
, dma_addr_t
*addr
)
674 struct k3_ring
*ring
= NULL
;
677 switch (uc
->config
.dir
) {
679 ring
= uc
->rflow
->r_ring
;
683 ring
= uc
->tchan
->tc_ring
;
689 ret
= k3_ringacc_ring_pop(ring
, addr
);
693 rmb(); /* Ensure that reads are not moved before this point */
695 /* Teardown completion */
696 if (cppi5_desc_is_tdcm(*addr
))
699 /* Check for flush descriptor */
700 if (udma_desc_is_rx_flush(uc
, *addr
))
706 static void udma_reset_rings(struct udma_chan
*uc
)
708 struct k3_ring
*ring1
= NULL
;
709 struct k3_ring
*ring2
= NULL
;
711 switch (uc
->config
.dir
) {
714 ring1
= uc
->rflow
->fd_ring
;
715 ring2
= uc
->rflow
->r_ring
;
721 ring1
= uc
->tchan
->t_ring
;
722 ring2
= uc
->tchan
->tc_ring
;
730 k3_ringacc_ring_reset_dma(ring1
,
731 k3_ringacc_ring_get_occ(ring1
));
733 k3_ringacc_ring_reset(ring2
);
735 /* make sure we are not leaking memory by stalled descriptor */
736 if (uc
->terminated_desc
) {
737 udma_desc_free(&uc
->terminated_desc
->vd
);
738 uc
->terminated_desc
= NULL
;
742 static void udma_reset_counters(struct udma_chan
*uc
)
747 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
748 udma_tchanrt_write(uc
, UDMA_CHAN_RT_BCNT_REG
, val
);
750 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_SBCNT_REG
);
751 udma_tchanrt_write(uc
, UDMA_CHAN_RT_SBCNT_REG
, val
);
753 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PCNT_REG
);
754 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PCNT_REG
, val
);
757 val
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
);
758 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
, val
);
763 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
764 udma_rchanrt_write(uc
, UDMA_CHAN_RT_BCNT_REG
, val
);
766 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_SBCNT_REG
);
767 udma_rchanrt_write(uc
, UDMA_CHAN_RT_SBCNT_REG
, val
);
769 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_PCNT_REG
);
770 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PCNT_REG
, val
);
772 val
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
);
773 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
, val
);
779 static int udma_reset_chan(struct udma_chan
*uc
, bool hard
)
781 switch (uc
->config
.dir
) {
783 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
, 0);
784 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
787 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
788 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
, 0);
791 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
792 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
, 0);
798 /* Reset all counters */
799 udma_reset_counters(uc
);
801 /* Hard reset: re-initialize the channel to reset */
803 struct udma_chan_config ucc_backup
;
806 memcpy(&ucc_backup
, &uc
->config
, sizeof(uc
->config
));
807 uc
->ud
->ddev
.device_free_chan_resources(&uc
->vc
.chan
);
809 /* restore the channel configuration */
810 memcpy(&uc
->config
, &ucc_backup
, sizeof(uc
->config
));
811 ret
= uc
->ud
->ddev
.device_alloc_chan_resources(&uc
->vc
.chan
);
816 * Setting forced teardown after forced reset helps recovering
819 if (uc
->config
.dir
== DMA_DEV_TO_MEM
)
820 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
821 UDMA_CHAN_RT_CTL_EN
|
822 UDMA_CHAN_RT_CTL_TDOWN
|
823 UDMA_CHAN_RT_CTL_FTDOWN
);
825 uc
->state
= UDMA_CHAN_IS_IDLE
;
830 static void udma_start_desc(struct udma_chan
*uc
)
832 struct udma_chan_config
*ucc
= &uc
->config
;
834 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
&& ucc
->pkt_mode
&&
835 (uc
->cyclic
|| ucc
->dir
== DMA_DEV_TO_MEM
)) {
839 * UDMA only: Push all descriptors to ring for packet mode
841 * PKTDMA supports pre-linked descriptor and cyclic is not
844 for (i
= 0; i
< uc
->desc
->sglen
; i
++)
845 udma_push_to_ring(uc
, i
);
847 udma_push_to_ring(uc
, 0);
851 static bool udma_chan_needs_reconfiguration(struct udma_chan
*uc
)
853 /* Only PDMAs have staticTR */
854 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
)
857 /* Check if the staticTR configuration has changed for TX */
858 if (memcmp(&uc
->static_tr
, &uc
->desc
->static_tr
, sizeof(uc
->static_tr
)))
864 static int udma_start(struct udma_chan
*uc
)
866 struct virt_dma_desc
*vd
= vchan_next_desc(&uc
->vc
);
875 uc
->desc
= to_udma_desc(&vd
->tx
);
877 /* Channel is already running and does not need reconfiguration */
878 if (udma_is_chan_running(uc
) && !udma_chan_needs_reconfiguration(uc
)) {
883 /* Make sure that we clear the teardown bit, if it is set */
884 udma_reset_chan(uc
, false);
886 /* Push descriptors before we start the channel */
889 switch (uc
->desc
->dir
) {
891 /* Config remote TR */
892 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
893 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
894 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
895 const struct udma_match_data
*match_data
=
898 if (uc
->config
.enable_acc32
)
899 val
|= PDMA_STATIC_TR_XY_ACC32
;
900 if (uc
->config
.enable_burst
)
901 val
|= PDMA_STATIC_TR_XY_BURST
;
903 udma_rchanrt_write(uc
,
904 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG
,
907 udma_rchanrt_write(uc
,
908 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG
,
909 PDMA_STATIC_TR_Z(uc
->desc
->static_tr
.bstcnt
,
910 match_data
->statictr_z_mask
));
912 /* save the current staticTR configuration */
913 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
914 sizeof(uc
->static_tr
));
917 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
918 UDMA_CHAN_RT_CTL_EN
);
921 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
922 UDMA_PEER_RT_EN_ENABLE
);
926 /* Config remote TR */
927 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
928 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
929 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
931 if (uc
->config
.enable_acc32
)
932 val
|= PDMA_STATIC_TR_XY_ACC32
;
933 if (uc
->config
.enable_burst
)
934 val
|= PDMA_STATIC_TR_XY_BURST
;
936 udma_tchanrt_write(uc
,
937 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG
,
940 /* save the current staticTR configuration */
941 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
942 sizeof(uc
->static_tr
));
946 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
947 UDMA_PEER_RT_EN_ENABLE
);
949 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
950 UDMA_CHAN_RT_CTL_EN
);
954 udma_rchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
955 UDMA_CHAN_RT_CTL_EN
);
956 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
957 UDMA_CHAN_RT_CTL_EN
);
964 uc
->state
= UDMA_CHAN_IS_ACTIVE
;
970 static int udma_stop(struct udma_chan
*uc
)
972 enum udma_chan_state old_state
= uc
->state
;
974 uc
->state
= UDMA_CHAN_IS_TERMINATING
;
975 reinit_completion(&uc
->teardown_completed
);
977 switch (uc
->config
.dir
) {
979 if (!uc
->cyclic
&& !uc
->desc
)
980 udma_push_to_ring(uc
, -1);
982 udma_rchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
983 UDMA_PEER_RT_EN_ENABLE
|
984 UDMA_PEER_RT_EN_TEARDOWN
);
987 udma_tchanrt_write(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
988 UDMA_PEER_RT_EN_ENABLE
|
989 UDMA_PEER_RT_EN_FLUSH
);
990 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
991 UDMA_CHAN_RT_CTL_EN
|
992 UDMA_CHAN_RT_CTL_TDOWN
);
995 udma_tchanrt_write(uc
, UDMA_CHAN_RT_CTL_REG
,
996 UDMA_CHAN_RT_CTL_EN
|
997 UDMA_CHAN_RT_CTL_TDOWN
);
1000 uc
->state
= old_state
;
1001 complete_all(&uc
->teardown_completed
);
1008 static void udma_cyclic_packet_elapsed(struct udma_chan
*uc
)
1010 struct udma_desc
*d
= uc
->desc
;
1011 struct cppi5_host_desc_t
*h_desc
;
1013 h_desc
= d
->hwdesc
[d
->desc_idx
].cppi5_desc_vaddr
;
1014 cppi5_hdesc_reset_to_original(h_desc
);
1015 udma_push_to_ring(uc
, d
->desc_idx
);
1016 d
->desc_idx
= (d
->desc_idx
+ 1) % d
->sglen
;
1019 static inline void udma_fetch_epib(struct udma_chan
*uc
, struct udma_desc
*d
)
1021 struct cppi5_host_desc_t
*h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
1023 memcpy(d
->metadata
, h_desc
->epib
, d
->metadata_size
);
1026 static bool udma_is_desc_really_done(struct udma_chan
*uc
, struct udma_desc
*d
)
1028 u32 peer_bcnt
, bcnt
;
1030 /* Only TX towards PDMA is affected */
1031 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
||
1032 uc
->config
.dir
!= DMA_MEM_TO_DEV
)
1035 peer_bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_PEER_BCNT_REG
);
1036 bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
1038 /* Transfer is incomplete, store current residue and time stamp */
1039 if (peer_bcnt
< bcnt
) {
1040 uc
->tx_drain
.residue
= bcnt
- peer_bcnt
;
1041 uc
->tx_drain
.tstamp
= ktime_get();
1048 static void udma_check_tx_completion(struct work_struct
*work
)
1050 struct udma_chan
*uc
= container_of(work
, typeof(*uc
),
1051 tx_drain
.work
.work
);
1052 bool desc_done
= true;
1055 unsigned long delay
;
1059 /* Get previous residue and time stamp */
1060 residue_diff
= uc
->tx_drain
.residue
;
1061 time_diff
= uc
->tx_drain
.tstamp
;
1063 * Get current residue and time stamp or see if
1064 * transfer is complete
1066 desc_done
= udma_is_desc_really_done(uc
, uc
->desc
);
1071 * Find the time delta and residue delta w.r.t
1074 time_diff
= ktime_sub(uc
->tx_drain
.tstamp
,
1076 residue_diff
-= uc
->tx_drain
.residue
;
1079 * Try to guess when we should check
1080 * next time by calculating rate at
1081 * which data is being drained at the
1084 delay
= (time_diff
/ residue_diff
) *
1085 uc
->tx_drain
.residue
;
1087 /* No progress, check again in 1 second */
1088 schedule_delayed_work(&uc
->tx_drain
.work
, HZ
);
1092 usleep_range(ktime_to_us(delay
),
1093 ktime_to_us(delay
) + 10);
1098 struct udma_desc
*d
= uc
->desc
;
1100 uc
->bcnt
+= d
->residue
;
1102 vchan_cookie_complete(&d
->vd
);
1110 static irqreturn_t
udma_ring_irq_handler(int irq
, void *data
)
1112 struct udma_chan
*uc
= data
;
1113 struct udma_desc
*d
;
1114 dma_addr_t paddr
= 0;
1116 if (udma_pop_from_ring(uc
, &paddr
) || !paddr
)
1119 spin_lock(&uc
->vc
.lock
);
1121 /* Teardown completion message */
1122 if (cppi5_desc_is_tdcm(paddr
)) {
1123 complete_all(&uc
->teardown_completed
);
1125 if (uc
->terminated_desc
) {
1126 udma_desc_free(&uc
->terminated_desc
->vd
);
1127 uc
->terminated_desc
= NULL
;
1136 d
= udma_udma_desc_from_paddr(uc
, paddr
);
1139 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
1141 if (desc_paddr
!= paddr
) {
1142 dev_err(uc
->ud
->dev
, "not matching descriptors!\n");
1146 if (d
== uc
->desc
) {
1147 /* active descriptor */
1149 udma_cyclic_packet_elapsed(uc
);
1150 vchan_cyclic_callback(&d
->vd
);
1152 if (udma_is_desc_really_done(uc
, d
)) {
1153 uc
->bcnt
+= d
->residue
;
1155 vchan_cookie_complete(&d
->vd
);
1157 schedule_delayed_work(&uc
->tx_drain
.work
,
1163 * terminated descriptor, mark the descriptor as
1164 * completed to update the channel's cookie marker
1166 dma_cookie_complete(&d
->vd
.tx
);
1170 spin_unlock(&uc
->vc
.lock
);
1175 static irqreturn_t
udma_udma_irq_handler(int irq
, void *data
)
1177 struct udma_chan
*uc
= data
;
1178 struct udma_desc
*d
;
1180 spin_lock(&uc
->vc
.lock
);
1183 d
->tr_idx
= (d
->tr_idx
+ 1) % d
->sglen
;
1186 vchan_cyclic_callback(&d
->vd
);
1188 /* TODO: figure out the real amount of data */
1189 uc
->bcnt
+= d
->residue
;
1191 vchan_cookie_complete(&d
->vd
);
1195 spin_unlock(&uc
->vc
.lock
);
1201 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1203 * @from: Start the search from this flow id number
1204 * @cnt: Number of consecutive flow ids to allocate
1206 * Allocate range of RX flow ids for future use, those flows can be requested
1207 * only using explicit flow id number. if @from is set to -1 it will try to find
1208 * first free range. if @from is positive value it will force allocation only
1209 * of the specified range of flows.
1211 * Returns -ENOMEM if can't find free range.
1212 * -EEXIST if requested range is busy.
1213 * -EINVAL if wrong input values passed.
1214 * Returns flow id on success.
1216 static int __udma_alloc_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1218 int start
, tmp_from
;
1219 DECLARE_BITMAP(tmp
, K3_UDMA_MAX_RFLOWS
);
1223 tmp_from
= ud
->rchan_cnt
;
1224 /* default flows can't be allocated and accessible only by id */
1225 if (tmp_from
< ud
->rchan_cnt
)
1228 if (tmp_from
+ cnt
> ud
->rflow_cnt
)
1231 bitmap_or(tmp
, ud
->rflow_gp_map
, ud
->rflow_gp_map_allocated
,
1234 start
= bitmap_find_next_zero_area(tmp
,
1237 if (start
>= ud
->rflow_cnt
)
1240 if (from
>= 0 && start
!= from
)
1243 bitmap_set(ud
->rflow_gp_map_allocated
, start
, cnt
);
1247 static int __udma_free_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1249 if (from
< ud
->rchan_cnt
)
1251 if (from
+ cnt
> ud
->rflow_cnt
)
1254 bitmap_clear(ud
->rflow_gp_map_allocated
, from
, cnt
);
1258 static struct udma_rflow
*__udma_get_rflow(struct udma_dev
*ud
, int id
)
1261 * Attempt to request rflow by ID can be made for any rflow
1262 * if not in use with assumption that caller knows what's doing.
1263 * TI-SCI FW will perform additional permission check ant way, it's
1267 if (id
< 0 || id
>= ud
->rflow_cnt
)
1268 return ERR_PTR(-ENOENT
);
1270 if (test_bit(id
, ud
->rflow_in_use
))
1271 return ERR_PTR(-ENOENT
);
1273 if (ud
->rflow_gp_map
) {
1274 /* GP rflow has to be allocated first */
1275 if (!test_bit(id
, ud
->rflow_gp_map
) &&
1276 !test_bit(id
, ud
->rflow_gp_map_allocated
))
1277 return ERR_PTR(-EINVAL
);
1280 dev_dbg(ud
->dev
, "get rflow%d\n", id
);
1281 set_bit(id
, ud
->rflow_in_use
);
1282 return &ud
->rflows
[id
];
1285 static void __udma_put_rflow(struct udma_dev
*ud
, struct udma_rflow
*rflow
)
1287 if (!test_bit(rflow
->id
, ud
->rflow_in_use
)) {
1288 dev_err(ud
->dev
, "attempt to put unused rflow%d\n", rflow
->id
);
1292 dev_dbg(ud
->dev
, "put rflow%d\n", rflow
->id
);
1293 clear_bit(rflow
->id
, ud
->rflow_in_use
);
1296 #define UDMA_RESERVE_RESOURCE(res) \
1297 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1298 enum udma_tp_level tpl, \
1302 if (test_bit(id, ud->res##_map)) { \
1303 dev_err(ud->dev, "res##%d is in use\n", id); \
1304 return ERR_PTR(-ENOENT); \
1309 if (tpl >= ud->res##_tpl.levels) \
1310 tpl = ud->res##_tpl.levels - 1; \
1312 start = ud->res##_tpl.start_idx[tpl]; \
1314 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1316 if (id == ud->res##_cnt) { \
1317 return ERR_PTR(-ENOENT); \
1321 set_bit(id, ud->res##_map); \
1322 return &ud->res##s[id]; \
1325 UDMA_RESERVE_RESOURCE(bchan
);
1326 UDMA_RESERVE_RESOURCE(tchan
);
1327 UDMA_RESERVE_RESOURCE(rchan
);
1329 static int bcdma_get_bchan(struct udma_chan
*uc
)
1331 struct udma_dev
*ud
= uc
->ud
;
1332 enum udma_tp_level tpl
;
1335 dev_dbg(ud
->dev
, "chan%d: already have bchan%d allocated\n",
1336 uc
->id
, uc
->bchan
->id
);
1341 * Use normal channels for peripherals, and highest TPL channel for
1344 if (uc
->config
.tr_trigger_type
)
1347 tpl
= ud
->bchan_tpl
.levels
- 1;
1349 uc
->bchan
= __udma_reserve_bchan(ud
, tpl
, -1);
1350 if (IS_ERR(uc
->bchan
))
1351 return PTR_ERR(uc
->bchan
);
1353 uc
->tchan
= uc
->bchan
;
1358 static int udma_get_tchan(struct udma_chan
*uc
)
1360 struct udma_dev
*ud
= uc
->ud
;
1363 dev_dbg(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1364 uc
->id
, uc
->tchan
->id
);
1369 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1370 * For PKTDMA mapped channels it is configured to a channel which must
1371 * be used to service the peripheral.
1373 uc
->tchan
= __udma_reserve_tchan(ud
, uc
->config
.channel_tpl
,
1374 uc
->config
.mapped_channel_id
);
1375 if (IS_ERR(uc
->tchan
))
1376 return PTR_ERR(uc
->tchan
);
1378 if (ud
->tflow_cnt
) {
1381 /* Only PKTDMA have support for tx flows */
1382 if (uc
->config
.default_flow_id
>= 0)
1383 tflow_id
= uc
->config
.default_flow_id
;
1385 tflow_id
= uc
->tchan
->id
;
1387 if (test_bit(tflow_id
, ud
->tflow_map
)) {
1388 dev_err(ud
->dev
, "tflow%d is in use\n", tflow_id
);
1389 clear_bit(uc
->tchan
->id
, ud
->tchan_map
);
1394 uc
->tchan
->tflow_id
= tflow_id
;
1395 set_bit(tflow_id
, ud
->tflow_map
);
1397 uc
->tchan
->tflow_id
= -1;
1403 static int udma_get_rchan(struct udma_chan
*uc
)
1405 struct udma_dev
*ud
= uc
->ud
;
1408 dev_dbg(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1409 uc
->id
, uc
->rchan
->id
);
1414 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1415 * For PKTDMA mapped channels it is configured to a channel which must
1416 * be used to service the peripheral.
1418 uc
->rchan
= __udma_reserve_rchan(ud
, uc
->config
.channel_tpl
,
1419 uc
->config
.mapped_channel_id
);
1421 return PTR_ERR_OR_ZERO(uc
->rchan
);
1424 static int udma_get_chan_pair(struct udma_chan
*uc
)
1426 struct udma_dev
*ud
= uc
->ud
;
1429 if ((uc
->tchan
&& uc
->rchan
) && uc
->tchan
->id
== uc
->rchan
->id
) {
1430 dev_info(ud
->dev
, "chan%d: already have %d pair allocated\n",
1431 uc
->id
, uc
->tchan
->id
);
1436 dev_err(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1437 uc
->id
, uc
->tchan
->id
);
1439 } else if (uc
->rchan
) {
1440 dev_err(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1441 uc
->id
, uc
->rchan
->id
);
1445 /* Can be optimized, but let's have it like this for now */
1446 end
= min(ud
->tchan_cnt
, ud
->rchan_cnt
);
1448 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1449 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1451 chan_id
= ud
->tchan_tpl
.start_idx
[ud
->tchan_tpl
.levels
- 1];
1452 for (; chan_id
< end
; chan_id
++) {
1453 if (!test_bit(chan_id
, ud
->tchan_map
) &&
1454 !test_bit(chan_id
, ud
->rchan_map
))
1461 set_bit(chan_id
, ud
->tchan_map
);
1462 set_bit(chan_id
, ud
->rchan_map
);
1463 uc
->tchan
= &ud
->tchans
[chan_id
];
1464 uc
->rchan
= &ud
->rchans
[chan_id
];
1466 /* UDMA does not use tx flows */
1467 uc
->tchan
->tflow_id
= -1;
1472 static int udma_get_rflow(struct udma_chan
*uc
, int flow_id
)
1474 struct udma_dev
*ud
= uc
->ud
;
1477 dev_err(ud
->dev
, "chan%d: does not have rchan??\n", uc
->id
);
1482 dev_dbg(ud
->dev
, "chan%d: already have rflow%d allocated\n",
1483 uc
->id
, uc
->rflow
->id
);
1487 uc
->rflow
= __udma_get_rflow(ud
, flow_id
);
1489 return PTR_ERR_OR_ZERO(uc
->rflow
);
1492 static void bcdma_put_bchan(struct udma_chan
*uc
)
1494 struct udma_dev
*ud
= uc
->ud
;
1497 dev_dbg(ud
->dev
, "chan%d: put bchan%d\n", uc
->id
,
1499 clear_bit(uc
->bchan
->id
, ud
->bchan_map
);
1505 static void udma_put_rchan(struct udma_chan
*uc
)
1507 struct udma_dev
*ud
= uc
->ud
;
1510 dev_dbg(ud
->dev
, "chan%d: put rchan%d\n", uc
->id
,
1512 clear_bit(uc
->rchan
->id
, ud
->rchan_map
);
1517 static void udma_put_tchan(struct udma_chan
*uc
)
1519 struct udma_dev
*ud
= uc
->ud
;
1522 dev_dbg(ud
->dev
, "chan%d: put tchan%d\n", uc
->id
,
1524 clear_bit(uc
->tchan
->id
, ud
->tchan_map
);
1526 if (uc
->tchan
->tflow_id
>= 0)
1527 clear_bit(uc
->tchan
->tflow_id
, ud
->tflow_map
);
1533 static void udma_put_rflow(struct udma_chan
*uc
)
1535 struct udma_dev
*ud
= uc
->ud
;
1538 dev_dbg(ud
->dev
, "chan%d: put rflow%d\n", uc
->id
,
1540 __udma_put_rflow(ud
, uc
->rflow
);
1545 static void bcdma_free_bchan_resources(struct udma_chan
*uc
)
1550 k3_ringacc_ring_free(uc
->bchan
->tc_ring
);
1551 k3_ringacc_ring_free(uc
->bchan
->t_ring
);
1552 uc
->bchan
->tc_ring
= NULL
;
1553 uc
->bchan
->t_ring
= NULL
;
1554 k3_configure_chan_coherency(&uc
->vc
.chan
, 0);
1556 bcdma_put_bchan(uc
);
1559 static int bcdma_alloc_bchan_resources(struct udma_chan
*uc
)
1561 struct k3_ring_cfg ring_cfg
;
1562 struct udma_dev
*ud
= uc
->ud
;
1565 ret
= bcdma_get_bchan(uc
);
1569 ret
= k3_ringacc_request_rings_pair(ud
->ringacc
, uc
->bchan
->id
, -1,
1571 &uc
->bchan
->tc_ring
);
1577 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1578 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1579 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1580 ring_cfg
.mode
= K3_RINGACC_RING_MODE_RING
;
1582 k3_configure_chan_coherency(&uc
->vc
.chan
, ud
->asel
);
1583 ring_cfg
.asel
= ud
->asel
;
1584 ring_cfg
.dma_dev
= dmaengine_get_dma_device(&uc
->vc
.chan
);
1586 ret
= k3_ringacc_ring_cfg(uc
->bchan
->t_ring
, &ring_cfg
);
1593 k3_ringacc_ring_free(uc
->bchan
->tc_ring
);
1594 uc
->bchan
->tc_ring
= NULL
;
1595 k3_ringacc_ring_free(uc
->bchan
->t_ring
);
1596 uc
->bchan
->t_ring
= NULL
;
1597 k3_configure_chan_coherency(&uc
->vc
.chan
, 0);
1599 bcdma_put_bchan(uc
);
1604 static void udma_free_tx_resources(struct udma_chan
*uc
)
1609 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1610 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1611 uc
->tchan
->t_ring
= NULL
;
1612 uc
->tchan
->tc_ring
= NULL
;
1617 static int udma_alloc_tx_resources(struct udma_chan
*uc
)
1619 struct k3_ring_cfg ring_cfg
;
1620 struct udma_dev
*ud
= uc
->ud
;
1621 struct udma_tchan
*tchan
;
1624 ret
= udma_get_tchan(uc
);
1629 if (tchan
->tflow_id
>= 0)
1630 ring_idx
= tchan
->tflow_id
;
1632 ring_idx
= ud
->bchan_cnt
+ tchan
->id
;
1634 ret
= k3_ringacc_request_rings_pair(ud
->ringacc
, ring_idx
, -1,
1642 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1643 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1644 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1645 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
1646 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1648 ring_cfg
.mode
= K3_RINGACC_RING_MODE_RING
;
1650 k3_configure_chan_coherency(&uc
->vc
.chan
, uc
->config
.asel
);
1651 ring_cfg
.asel
= uc
->config
.asel
;
1652 ring_cfg
.dma_dev
= dmaengine_get_dma_device(&uc
->vc
.chan
);
1655 ret
= k3_ringacc_ring_cfg(tchan
->t_ring
, &ring_cfg
);
1656 ret
|= k3_ringacc_ring_cfg(tchan
->tc_ring
, &ring_cfg
);
1664 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1665 uc
->tchan
->tc_ring
= NULL
;
1666 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1667 uc
->tchan
->t_ring
= NULL
;
1674 static void udma_free_rx_resources(struct udma_chan
*uc
)
1680 struct udma_rflow
*rflow
= uc
->rflow
;
1682 k3_ringacc_ring_free(rflow
->fd_ring
);
1683 k3_ringacc_ring_free(rflow
->r_ring
);
1684 rflow
->fd_ring
= NULL
;
1685 rflow
->r_ring
= NULL
;
1693 static int udma_alloc_rx_resources(struct udma_chan
*uc
)
1695 struct udma_dev
*ud
= uc
->ud
;
1696 struct k3_ring_cfg ring_cfg
;
1697 struct udma_rflow
*rflow
;
1701 ret
= udma_get_rchan(uc
);
1705 /* For MEM_TO_MEM we don't need rflow or rings */
1706 if (uc
->config
.dir
== DMA_MEM_TO_MEM
)
1709 if (uc
->config
.default_flow_id
>= 0)
1710 ret
= udma_get_rflow(uc
, uc
->config
.default_flow_id
);
1712 ret
= udma_get_rflow(uc
, uc
->rchan
->id
);
1721 fd_ring_id
= ud
->tflow_cnt
+ rflow
->id
;
1723 fd_ring_id
= ud
->bchan_cnt
+ ud
->tchan_cnt
+ ud
->echan_cnt
+
1726 ret
= k3_ringacc_request_rings_pair(ud
->ringacc
, fd_ring_id
, -1,
1727 &rflow
->fd_ring
, &rflow
->r_ring
);
1733 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1735 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1736 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
1737 if (uc
->config
.pkt_mode
)
1738 ring_cfg
.size
= SG_MAX_SEGMENTS
;
1740 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1742 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1744 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1745 ring_cfg
.mode
= K3_RINGACC_RING_MODE_RING
;
1747 k3_configure_chan_coherency(&uc
->vc
.chan
, uc
->config
.asel
);
1748 ring_cfg
.asel
= uc
->config
.asel
;
1749 ring_cfg
.dma_dev
= dmaengine_get_dma_device(&uc
->vc
.chan
);
1752 ret
= k3_ringacc_ring_cfg(rflow
->fd_ring
, &ring_cfg
);
1754 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1755 ret
|= k3_ringacc_ring_cfg(rflow
->r_ring
, &ring_cfg
);
1763 k3_ringacc_ring_free(rflow
->r_ring
);
1764 rflow
->r_ring
= NULL
;
1765 k3_ringacc_ring_free(rflow
->fd_ring
);
1766 rflow
->fd_ring
= NULL
;
1775 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1776 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1777 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1779 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1780 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1781 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1783 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1784 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1786 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1787 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1788 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1789 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1790 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1791 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1792 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1793 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1794 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1796 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1797 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1798 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1799 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1800 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1801 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1802 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1803 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1804 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1805 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1807 static int udma_tisci_m2m_channel_config(struct udma_chan
*uc
)
1809 struct udma_dev
*ud
= uc
->ud
;
1810 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1811 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1812 struct udma_tchan
*tchan
= uc
->tchan
;
1813 struct udma_rchan
*rchan
= uc
->rchan
;
1816 /* Non synchronized - mem to mem type of transfer */
1817 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1818 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1819 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1821 req_tx
.valid_params
= TISCI_UDMA_TCHAN_VALID_PARAMS
;
1822 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1823 req_tx
.index
= tchan
->id
;
1824 req_tx
.tx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1825 req_tx
.tx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1826 req_tx
.txcq_qnum
= tc_ring
;
1827 req_tx
.tx_atype
= ud
->atype
;
1829 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1831 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1835 req_rx
.valid_params
= TISCI_UDMA_RCHAN_VALID_PARAMS
;
1836 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1837 req_rx
.index
= rchan
->id
;
1838 req_rx
.rx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1839 req_rx
.rxcq_qnum
= tc_ring
;
1840 req_rx
.rx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1841 req_rx
.rx_atype
= ud
->atype
;
1843 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1845 dev_err(ud
->dev
, "rchan%d alloc failed %d\n", rchan
->id
, ret
);
1850 static int bcdma_tisci_m2m_channel_config(struct udma_chan
*uc
)
1852 struct udma_dev
*ud
= uc
->ud
;
1853 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1854 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1855 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1856 struct udma_bchan
*bchan
= uc
->bchan
;
1859 req_tx
.valid_params
= TISCI_BCDMA_BCHAN_VALID_PARAMS
;
1860 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1861 req_tx
.extended_ch_type
= TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN
;
1862 req_tx
.index
= bchan
->id
;
1864 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1866 dev_err(ud
->dev
, "bchan%d cfg failed %d\n", bchan
->id
, ret
);
1871 static int udma_tisci_tx_channel_config(struct udma_chan
*uc
)
1873 struct udma_dev
*ud
= uc
->ud
;
1874 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1875 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1876 struct udma_tchan
*tchan
= uc
->tchan
;
1877 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1878 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1879 u32 mode
, fetch_size
;
1882 if (uc
->config
.pkt_mode
) {
1883 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1884 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1885 uc
->config
.psd_size
, 0);
1887 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1888 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1891 req_tx
.valid_params
= TISCI_UDMA_TCHAN_VALID_PARAMS
;
1892 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1893 req_tx
.index
= tchan
->id
;
1894 req_tx
.tx_chan_type
= mode
;
1895 req_tx
.tx_supr_tdpkt
= uc
->config
.notdpkt
;
1896 req_tx
.tx_fetch_size
= fetch_size
>> 2;
1897 req_tx
.txcq_qnum
= tc_ring
;
1898 req_tx
.tx_atype
= uc
->config
.atype
;
1899 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
&&
1900 ud
->match_data
->flags
& UDMA_FLAG_TDTYPE
) {
1901 /* wait for peer to complete the teardown for PDMAs */
1902 req_tx
.valid_params
|=
1903 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID
;
1904 req_tx
.tx_tdtype
= 1;
1907 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1909 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1914 static int bcdma_tisci_tx_channel_config(struct udma_chan
*uc
)
1916 struct udma_dev
*ud
= uc
->ud
;
1917 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1918 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1919 struct udma_tchan
*tchan
= uc
->tchan
;
1920 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1923 req_tx
.valid_params
= TISCI_BCDMA_TCHAN_VALID_PARAMS
;
1924 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1925 req_tx
.index
= tchan
->id
;
1926 req_tx
.tx_supr_tdpkt
= uc
->config
.notdpkt
;
1927 if (ud
->match_data
->flags
& UDMA_FLAG_TDTYPE
) {
1928 /* wait for peer to complete the teardown for PDMAs */
1929 req_tx
.valid_params
|=
1930 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID
;
1931 req_tx
.tx_tdtype
= 1;
1934 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1936 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1941 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
1943 static int udma_tisci_rx_channel_config(struct udma_chan
*uc
)
1945 struct udma_dev
*ud
= uc
->ud
;
1946 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1947 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1948 struct udma_rchan
*rchan
= uc
->rchan
;
1949 int fd_ring
= k3_ringacc_get_ring_id(uc
->rflow
->fd_ring
);
1950 int rx_ring
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
1951 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1952 struct ti_sci_msg_rm_udmap_flow_cfg flow_req
= { 0 };
1953 u32 mode
, fetch_size
;
1956 if (uc
->config
.pkt_mode
) {
1957 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1958 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1959 uc
->config
.psd_size
, 0);
1961 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1962 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1965 req_rx
.valid_params
= TISCI_UDMA_RCHAN_VALID_PARAMS
;
1966 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1967 req_rx
.index
= rchan
->id
;
1968 req_rx
.rx_fetch_size
= fetch_size
>> 2;
1969 req_rx
.rxcq_qnum
= rx_ring
;
1970 req_rx
.rx_chan_type
= mode
;
1971 req_rx
.rx_atype
= uc
->config
.atype
;
1973 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1975 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", rchan
->id
, ret
);
1979 flow_req
.valid_params
=
1980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
1981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
1982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
|
1983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID
|
1984 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
1985 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID
|
1986 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID
|
1987 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID
|
1988 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID
|
1989 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
1990 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
1991 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
1992 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
1994 flow_req
.nav_id
= tisci_rm
->tisci_dev_id
;
1995 flow_req
.flow_index
= rchan
->id
;
1997 if (uc
->config
.needs_epib
)
1998 flow_req
.rx_einfo_present
= 1;
2000 flow_req
.rx_einfo_present
= 0;
2001 if (uc
->config
.psd_size
)
2002 flow_req
.rx_psinfo_present
= 1;
2004 flow_req
.rx_psinfo_present
= 0;
2005 flow_req
.rx_error_handling
= 1;
2006 flow_req
.rx_dest_qnum
= rx_ring
;
2007 flow_req
.rx_src_tag_hi_sel
= UDMA_RFLOW_SRCTAG_NONE
;
2008 flow_req
.rx_src_tag_lo_sel
= UDMA_RFLOW_SRCTAG_SRC_TAG
;
2009 flow_req
.rx_dest_tag_hi_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_HI
;
2010 flow_req
.rx_dest_tag_lo_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_LO
;
2011 flow_req
.rx_fdq0_sz0_qnum
= fd_ring
;
2012 flow_req
.rx_fdq1_qnum
= fd_ring
;
2013 flow_req
.rx_fdq2_qnum
= fd_ring
;
2014 flow_req
.rx_fdq3_qnum
= fd_ring
;
2016 ret
= tisci_ops
->rx_flow_cfg(tisci_rm
->tisci
, &flow_req
);
2019 dev_err(ud
->dev
, "flow%d config failed: %d\n", rchan
->id
, ret
);
2024 static int bcdma_tisci_rx_channel_config(struct udma_chan
*uc
)
2026 struct udma_dev
*ud
= uc
->ud
;
2027 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
2028 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
2029 struct udma_rchan
*rchan
= uc
->rchan
;
2030 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
2033 req_rx
.valid_params
= TISCI_BCDMA_RCHAN_VALID_PARAMS
;
2034 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
2035 req_rx
.index
= rchan
->id
;
2037 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
2039 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", rchan
->id
, ret
);
2044 static int pktdma_tisci_rx_channel_config(struct udma_chan
*uc
)
2046 struct udma_dev
*ud
= uc
->ud
;
2047 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
2048 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
2049 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
2050 struct ti_sci_msg_rm_udmap_flow_cfg flow_req
= { 0 };
2053 req_rx
.valid_params
= TISCI_BCDMA_RCHAN_VALID_PARAMS
;
2054 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
2055 req_rx
.index
= uc
->rchan
->id
;
2057 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
2059 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", uc
->rchan
->id
, ret
);
2063 flow_req
.valid_params
=
2064 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
2065 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
;
2068 flow_req
.nav_id
= tisci_rm
->tisci_dev_id
;
2069 flow_req
.flow_index
= uc
->rflow
->id
;
2071 if (uc
->config
.needs_epib
)
2072 flow_req
.rx_einfo_present
= 1;
2074 flow_req
.rx_einfo_present
= 0;
2075 if (uc
->config
.psd_size
)
2076 flow_req
.rx_psinfo_present
= 1;
2078 flow_req
.rx_psinfo_present
= 0;
2079 flow_req
.rx_error_handling
= 1;
2081 ret
= tisci_ops
->rx_flow_cfg(tisci_rm
->tisci
, &flow_req
);
2084 dev_err(ud
->dev
, "flow%d config failed: %d\n", uc
->rflow
->id
,
2090 static int udma_alloc_chan_resources(struct dma_chan
*chan
)
2092 struct udma_chan
*uc
= to_udma_chan(chan
);
2093 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
2094 const struct udma_soc_data
*soc_data
= ud
->soc_data
;
2095 struct k3_ring
*irq_ring
;
2099 uc
->dma_dev
= ud
->dev
;
2101 if (uc
->config
.pkt_mode
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
2102 uc
->use_dma_pool
= true;
2103 /* in case of MEM_TO_MEM we have maximum of two TRs */
2104 if (uc
->config
.dir
== DMA_MEM_TO_MEM
) {
2105 uc
->config
.hdesc_size
= cppi5_trdesc_calc_size(
2106 sizeof(struct cppi5_tr_type15_t
), 2);
2107 uc
->config
.pkt_mode
= false;
2111 if (uc
->use_dma_pool
) {
2112 uc
->hdesc_pool
= dma_pool_create(uc
->name
, ud
->ddev
.dev
,
2113 uc
->config
.hdesc_size
,
2116 if (!uc
->hdesc_pool
) {
2117 dev_err(ud
->ddev
.dev
,
2118 "Descriptor pool allocation failed\n");
2119 uc
->use_dma_pool
= false;
2126 * Make sure that the completion is in a known state:
2127 * No teardown, the channel is idle
2129 reinit_completion(&uc
->teardown_completed
);
2130 complete_all(&uc
->teardown_completed
);
2131 uc
->state
= UDMA_CHAN_IS_IDLE
;
2133 switch (uc
->config
.dir
) {
2134 case DMA_MEM_TO_MEM
:
2135 /* Non synchronized - mem to mem type of transfer */
2136 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-MEM\n", __func__
,
2139 ret
= udma_get_chan_pair(uc
);
2143 ret
= udma_alloc_tx_resources(uc
);
2149 ret
= udma_alloc_rx_resources(uc
);
2151 udma_free_tx_resources(uc
);
2155 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2156 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2157 K3_PSIL_DST_THREAD_ID_OFFSET
;
2159 irq_ring
= uc
->tchan
->tc_ring
;
2160 irq_udma_idx
= uc
->tchan
->id
;
2162 ret
= udma_tisci_m2m_channel_config(uc
);
2164 case DMA_MEM_TO_DEV
:
2165 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2166 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
2169 ret
= udma_alloc_tx_resources(uc
);
2173 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2174 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
2175 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
2177 irq_ring
= uc
->tchan
->tc_ring
;
2178 irq_udma_idx
= uc
->tchan
->id
;
2180 ret
= udma_tisci_tx_channel_config(uc
);
2182 case DMA_DEV_TO_MEM
:
2183 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2184 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
2187 ret
= udma_alloc_rx_resources(uc
);
2191 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
2192 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2193 K3_PSIL_DST_THREAD_ID_OFFSET
;
2195 irq_ring
= uc
->rflow
->r_ring
;
2196 irq_udma_idx
= soc_data
->oes
.udma_rchan
+ uc
->rchan
->id
;
2198 ret
= udma_tisci_rx_channel_config(uc
);
2201 /* Can not happen */
2202 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
2203 __func__
, uc
->id
, uc
->config
.dir
);
2209 /* check if the channel configuration was successful */
2213 if (udma_is_chan_running(uc
)) {
2214 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
2215 udma_reset_chan(uc
, false);
2216 if (udma_is_chan_running(uc
)) {
2217 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
2224 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2226 dev_err(ud
->dev
, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2227 uc
->config
.src_thread
, uc
->config
.dst_thread
);
2231 uc
->psil_paired
= true;
2233 uc
->irq_num_ring
= k3_ringacc_get_ring_irq_num(irq_ring
);
2234 if (uc
->irq_num_ring
<= 0) {
2235 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
2236 k3_ringacc_get_ring_id(irq_ring
));
2241 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
2242 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
2244 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
2248 /* Event from UDMA (TR events) only needed for slave TR mode channels */
2249 if (is_slave_direction(uc
->config
.dir
) && !uc
->config
.pkt_mode
) {
2250 uc
->irq_num_udma
= ti_sci_inta_msi_get_virq(ud
->dev
,
2252 if (uc
->irq_num_udma
<= 0) {
2253 dev_err(ud
->dev
, "Failed to get udma irq (index: %u)\n",
2255 free_irq(uc
->irq_num_ring
, uc
);
2260 ret
= request_irq(uc
->irq_num_udma
, udma_udma_irq_handler
, 0,
2263 dev_err(ud
->dev
, "chan%d: UDMA irq request failed\n",
2265 free_irq(uc
->irq_num_ring
, uc
);
2269 uc
->irq_num_udma
= 0;
2272 udma_reset_rings(uc
);
2277 uc
->irq_num_ring
= 0;
2278 uc
->irq_num_udma
= 0;
2280 navss_psil_unpair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2281 uc
->psil_paired
= false;
2283 udma_free_tx_resources(uc
);
2284 udma_free_rx_resources(uc
);
2286 udma_reset_uchan(uc
);
2288 if (uc
->use_dma_pool
) {
2289 dma_pool_destroy(uc
->hdesc_pool
);
2290 uc
->use_dma_pool
= false;
2296 static int bcdma_alloc_chan_resources(struct dma_chan
*chan
)
2298 struct udma_chan
*uc
= to_udma_chan(chan
);
2299 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
2300 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
2301 u32 irq_udma_idx
, irq_ring_idx
;
2304 /* Only TR mode is supported */
2305 uc
->config
.pkt_mode
= false;
2308 * Make sure that the completion is in a known state:
2309 * No teardown, the channel is idle
2311 reinit_completion(&uc
->teardown_completed
);
2312 complete_all(&uc
->teardown_completed
);
2313 uc
->state
= UDMA_CHAN_IS_IDLE
;
2315 switch (uc
->config
.dir
) {
2316 case DMA_MEM_TO_MEM
:
2317 /* Non synchronized - mem to mem type of transfer */
2318 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-MEM\n", __func__
,
2321 ret
= bcdma_alloc_bchan_resources(uc
);
2325 irq_ring_idx
= uc
->bchan
->id
+ oes
->bcdma_bchan_ring
;
2326 irq_udma_idx
= uc
->bchan
->id
+ oes
->bcdma_bchan_data
;
2328 ret
= bcdma_tisci_m2m_channel_config(uc
);
2330 case DMA_MEM_TO_DEV
:
2331 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2332 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
2335 ret
= udma_alloc_tx_resources(uc
);
2337 uc
->config
.remote_thread_id
= -1;
2341 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2342 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
2343 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
2345 irq_ring_idx
= uc
->tchan
->id
+ oes
->bcdma_tchan_ring
;
2346 irq_udma_idx
= uc
->tchan
->id
+ oes
->bcdma_tchan_data
;
2348 ret
= bcdma_tisci_tx_channel_config(uc
);
2350 case DMA_DEV_TO_MEM
:
2351 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2352 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
2355 ret
= udma_alloc_rx_resources(uc
);
2357 uc
->config
.remote_thread_id
= -1;
2361 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
2362 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2363 K3_PSIL_DST_THREAD_ID_OFFSET
;
2365 irq_ring_idx
= uc
->rchan
->id
+ oes
->bcdma_rchan_ring
;
2366 irq_udma_idx
= uc
->rchan
->id
+ oes
->bcdma_rchan_data
;
2368 ret
= bcdma_tisci_rx_channel_config(uc
);
2371 /* Can not happen */
2372 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
2373 __func__
, uc
->id
, uc
->config
.dir
);
2377 /* check if the channel configuration was successful */
2381 if (udma_is_chan_running(uc
)) {
2382 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
2383 udma_reset_chan(uc
, false);
2384 if (udma_is_chan_running(uc
)) {
2385 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
2391 uc
->dma_dev
= dmaengine_get_dma_device(chan
);
2392 if (uc
->config
.dir
== DMA_MEM_TO_MEM
&& !uc
->config
.tr_trigger_type
) {
2393 uc
->config
.hdesc_size
= cppi5_trdesc_calc_size(
2394 sizeof(struct cppi5_tr_type15_t
), 2);
2396 uc
->hdesc_pool
= dma_pool_create(uc
->name
, ud
->ddev
.dev
,
2397 uc
->config
.hdesc_size
,
2400 if (!uc
->hdesc_pool
) {
2401 dev_err(ud
->ddev
.dev
,
2402 "Descriptor pool allocation failed\n");
2403 uc
->use_dma_pool
= false;
2407 uc
->use_dma_pool
= true;
2408 } else if (uc
->config
.dir
!= DMA_MEM_TO_MEM
) {
2410 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
,
2411 uc
->config
.dst_thread
);
2414 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2415 uc
->config
.src_thread
, uc
->config
.dst_thread
);
2419 uc
->psil_paired
= true;
2422 uc
->irq_num_ring
= ti_sci_inta_msi_get_virq(ud
->dev
, irq_ring_idx
);
2423 if (uc
->irq_num_ring
<= 0) {
2424 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
2430 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
2431 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
2433 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
2437 /* Event from BCDMA (TR events) only needed for slave channels */
2438 if (is_slave_direction(uc
->config
.dir
)) {
2439 uc
->irq_num_udma
= ti_sci_inta_msi_get_virq(ud
->dev
,
2441 if (uc
->irq_num_udma
<= 0) {
2442 dev_err(ud
->dev
, "Failed to get bcdma irq (index: %u)\n",
2444 free_irq(uc
->irq_num_ring
, uc
);
2449 ret
= request_irq(uc
->irq_num_udma
, udma_udma_irq_handler
, 0,
2452 dev_err(ud
->dev
, "chan%d: BCDMA irq request failed\n",
2454 free_irq(uc
->irq_num_ring
, uc
);
2458 uc
->irq_num_udma
= 0;
2461 udma_reset_rings(uc
);
2463 INIT_DELAYED_WORK_ONSTACK(&uc
->tx_drain
.work
,
2464 udma_check_tx_completion
);
2468 uc
->irq_num_ring
= 0;
2469 uc
->irq_num_udma
= 0;
2471 if (uc
->psil_paired
)
2472 navss_psil_unpair(ud
, uc
->config
.src_thread
,
2473 uc
->config
.dst_thread
);
2474 uc
->psil_paired
= false;
2476 bcdma_free_bchan_resources(uc
);
2477 udma_free_tx_resources(uc
);
2478 udma_free_rx_resources(uc
);
2480 udma_reset_uchan(uc
);
2482 if (uc
->use_dma_pool
) {
2483 dma_pool_destroy(uc
->hdesc_pool
);
2484 uc
->use_dma_pool
= false;
2490 static int bcdma_router_config(struct dma_chan
*chan
)
2492 struct k3_event_route_data
*router_data
= chan
->route_data
;
2493 struct udma_chan
*uc
= to_udma_chan(chan
);
2499 if (uc
->config
.tr_trigger_type
!= 1 && uc
->config
.tr_trigger_type
!= 2)
2502 trigger_event
= uc
->ud
->soc_data
->bcdma_trigger_event_offset
;
2503 trigger_event
+= (uc
->bchan
->id
* 2) + uc
->config
.tr_trigger_type
- 1;
2505 return router_data
->set_event(router_data
->priv
, trigger_event
);
2508 static int pktdma_alloc_chan_resources(struct dma_chan
*chan
)
2510 struct udma_chan
*uc
= to_udma_chan(chan
);
2511 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
2512 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
2517 * Make sure that the completion is in a known state:
2518 * No teardown, the channel is idle
2520 reinit_completion(&uc
->teardown_completed
);
2521 complete_all(&uc
->teardown_completed
);
2522 uc
->state
= UDMA_CHAN_IS_IDLE
;
2524 switch (uc
->config
.dir
) {
2525 case DMA_MEM_TO_DEV
:
2526 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2527 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
2530 ret
= udma_alloc_tx_resources(uc
);
2532 uc
->config
.remote_thread_id
= -1;
2536 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
2537 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
2538 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
2540 irq_ring_idx
= uc
->tchan
->tflow_id
+ oes
->pktdma_tchan_flow
;
2542 ret
= pktdma_tisci_tx_channel_config(uc
);
2544 case DMA_DEV_TO_MEM
:
2545 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2546 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
2549 ret
= udma_alloc_rx_resources(uc
);
2551 uc
->config
.remote_thread_id
= -1;
2555 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
2556 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
2557 K3_PSIL_DST_THREAD_ID_OFFSET
;
2559 irq_ring_idx
= uc
->rflow
->id
+ oes
->pktdma_rchan_flow
;
2561 ret
= pktdma_tisci_rx_channel_config(uc
);
2564 /* Can not happen */
2565 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
2566 __func__
, uc
->id
, uc
->config
.dir
);
2570 /* check if the channel configuration was successful */
2574 if (udma_is_chan_running(uc
)) {
2575 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
2576 udma_reset_chan(uc
, false);
2577 if (udma_is_chan_running(uc
)) {
2578 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
2584 uc
->dma_dev
= dmaengine_get_dma_device(chan
);
2585 uc
->hdesc_pool
= dma_pool_create(uc
->name
, uc
->dma_dev
,
2586 uc
->config
.hdesc_size
, ud
->desc_align
,
2588 if (!uc
->hdesc_pool
) {
2589 dev_err(ud
->ddev
.dev
,
2590 "Descriptor pool allocation failed\n");
2591 uc
->use_dma_pool
= false;
2596 uc
->use_dma_pool
= true;
2599 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2601 dev_err(ud
->dev
, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2602 uc
->config
.src_thread
, uc
->config
.dst_thread
);
2606 uc
->psil_paired
= true;
2608 uc
->irq_num_ring
= ti_sci_inta_msi_get_virq(ud
->dev
, irq_ring_idx
);
2609 if (uc
->irq_num_ring
<= 0) {
2610 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
2616 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
2617 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
2619 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
2623 uc
->irq_num_udma
= 0;
2625 udma_reset_rings(uc
);
2627 INIT_DELAYED_WORK_ONSTACK(&uc
->tx_drain
.work
,
2628 udma_check_tx_completion
);
2632 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2633 uc
->id
, uc
->tchan
->id
, uc
->tchan
->tflow_id
,
2634 uc
->config
.remote_thread_id
);
2637 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2638 uc
->id
, uc
->rchan
->id
, uc
->rflow
->id
,
2639 uc
->config
.remote_thread_id
);
2643 uc
->irq_num_ring
= 0;
2645 navss_psil_unpair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
2646 uc
->psil_paired
= false;
2648 udma_free_tx_resources(uc
);
2649 udma_free_rx_resources(uc
);
2651 udma_reset_uchan(uc
);
2653 dma_pool_destroy(uc
->hdesc_pool
);
2654 uc
->use_dma_pool
= false;
2659 static int udma_slave_config(struct dma_chan
*chan
,
2660 struct dma_slave_config
*cfg
)
2662 struct udma_chan
*uc
= to_udma_chan(chan
);
2664 memcpy(&uc
->cfg
, cfg
, sizeof(uc
->cfg
));
2669 static struct udma_desc
*udma_alloc_tr_desc(struct udma_chan
*uc
,
2670 size_t tr_size
, int tr_count
,
2671 enum dma_transfer_direction dir
)
2673 struct udma_hwdesc
*hwdesc
;
2674 struct cppi5_desc_hdr_t
*tr_desc
;
2675 struct udma_desc
*d
;
2676 u32 reload_count
= 0;
2686 dev_err(uc
->ud
->dev
, "Unsupported TR size of %zu\n", tr_size
);
2690 /* We have only one descriptor containing multiple TRs */
2691 d
= kzalloc(sizeof(*d
) + sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2695 d
->sglen
= tr_count
;
2697 d
->hwdesc_count
= 1;
2698 hwdesc
= &d
->hwdesc
[0];
2700 /* Allocate memory for DMA ring descriptor */
2701 if (uc
->use_dma_pool
) {
2702 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2703 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2705 &hwdesc
->cppi5_desc_paddr
);
2707 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
,
2709 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
2710 uc
->ud
->desc_align
);
2711 hwdesc
->cppi5_desc_vaddr
= dma_alloc_coherent(uc
->ud
->dev
,
2712 hwdesc
->cppi5_desc_size
,
2713 &hwdesc
->cppi5_desc_paddr
,
2717 if (!hwdesc
->cppi5_desc_vaddr
) {
2722 /* Start of the TR req records */
2723 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
2724 /* Start address of the TR response array */
2725 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
* tr_count
;
2727 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
2730 reload_count
= CPPI5_INFO0_TRDESC_RLDCNT_INFINITE
;
2732 if (dir
== DMA_DEV_TO_MEM
)
2733 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2735 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2737 cppi5_trdesc_init(tr_desc
, tr_count
, tr_size
, 0, reload_count
);
2738 cppi5_desc_set_pktids(tr_desc
, uc
->id
,
2739 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2740 cppi5_desc_set_retpolicy(tr_desc
, 0, ring_id
);
2746 * udma_get_tr_counters - calculate TR counters for a given length
2747 * @len: Length of the trasnfer
2748 * @align_to: Preferred alignment
2749 * @tr0_cnt0: First TR icnt0
2750 * @tr0_cnt1: First TR icnt1
2751 * @tr1_cnt0: Second (if used) TR icnt0
2753 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2754 * For len >= SZ_64K two TRs are used in a simple way:
2755 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2756 * Second TR: the remaining length (tr1_cnt0)
2758 * Returns the number of TRs the length needs (1 or 2)
2759 * -EINVAL if the length can not be supported
2761 static int udma_get_tr_counters(size_t len
, unsigned long align_to
,
2762 u16
*tr0_cnt0
, u16
*tr0_cnt1
, u16
*tr1_cnt0
)
2775 *tr0_cnt0
= SZ_64K
- BIT(align_to
);
2776 if (len
/ *tr0_cnt0
>= SZ_64K
) {
2784 *tr0_cnt1
= len
/ *tr0_cnt0
;
2785 *tr1_cnt0
= len
% *tr0_cnt0
;
2790 static struct udma_desc
*
2791 udma_prep_slave_sg_tr(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2792 unsigned int sglen
, enum dma_transfer_direction dir
,
2793 unsigned long tx_flags
, void *context
)
2795 struct scatterlist
*sgent
;
2796 struct udma_desc
*d
;
2797 struct cppi5_tr_type1_t
*tr_req
= NULL
;
2798 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2805 /* estimate the number of TRs we will need */
2806 for_each_sg(sgl
, sgent
, sglen
, i
) {
2807 if (sg_dma_len(sgent
) < SZ_64K
)
2813 /* Now allocate and setup the descriptor. */
2814 tr_size
= sizeof(struct cppi5_tr_type1_t
);
2815 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, dir
);
2821 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
)
2824 asel
= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
2826 tr_req
= d
->hwdesc
[0].tr_req_base
;
2827 for_each_sg(sgl
, sgent
, sglen
, i
) {
2828 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2830 num_tr
= udma_get_tr_counters(sg_dma_len(sgent
), __ffs(sg_addr
),
2831 &tr0_cnt0
, &tr0_cnt1
, &tr1_cnt0
);
2833 dev_err(uc
->ud
->dev
, "size %u is not supported\n",
2835 udma_free_hwdesc(uc
, d
);
2840 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
, false,
2841 false, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2842 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2845 tr_req
[tr_idx
].addr
= sg_addr
;
2846 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
2847 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
2848 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
2852 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
2854 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2855 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2856 CPPI5_TR_CSF_SUPR_EVT
);
2858 tr_req
[tr_idx
].addr
= sg_addr
+ tr0_cnt1
* tr0_cnt0
;
2859 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
2860 tr_req
[tr_idx
].icnt1
= 1;
2861 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
2865 d
->residue
+= sg_dma_len(sgent
);
2868 cppi5_tr_csf_set(&tr_req
[tr_idx
- 1].flags
,
2869 CPPI5_TR_CSF_SUPR_EVT
| CPPI5_TR_CSF_EOP
);
2874 static struct udma_desc
*
2875 udma_prep_slave_sg_triggered_tr(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2877 enum dma_transfer_direction dir
,
2878 unsigned long tx_flags
, void *context
)
2880 struct scatterlist
*sgent
;
2881 struct cppi5_tr_type15_t
*tr_req
= NULL
;
2882 enum dma_slave_buswidth dev_width
;
2883 u16 tr_cnt0
, tr_cnt1
;
2884 dma_addr_t dev_addr
;
2885 struct udma_desc
*d
;
2887 size_t tr_size
, sg_len
;
2890 u32 burst
, trigger_size
, port_window
;
2893 if (dir
== DMA_DEV_TO_MEM
) {
2894 dev_addr
= uc
->cfg
.src_addr
;
2895 dev_width
= uc
->cfg
.src_addr_width
;
2896 burst
= uc
->cfg
.src_maxburst
;
2897 port_window
= uc
->cfg
.src_port_window_size
;
2898 } else if (dir
== DMA_MEM_TO_DEV
) {
2899 dev_addr
= uc
->cfg
.dst_addr
;
2900 dev_width
= uc
->cfg
.dst_addr_width
;
2901 burst
= uc
->cfg
.dst_maxburst
;
2902 port_window
= uc
->cfg
.dst_port_window_size
;
2904 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
2912 if (port_window
!= burst
) {
2913 dev_err(uc
->ud
->dev
,
2914 "The burst must be equal to port_window\n");
2918 tr_cnt0
= dev_width
* port_window
;
2921 tr_cnt0
= dev_width
;
2924 trigger_size
= tr_cnt0
* tr_cnt1
;
2926 /* estimate the number of TRs we will need */
2927 for_each_sg(sgl
, sgent
, sglen
, i
) {
2928 sg_len
= sg_dma_len(sgent
);
2930 if (sg_len
% trigger_size
) {
2931 dev_err(uc
->ud
->dev
,
2932 "Not aligned SG entry (%zu for %u)\n", sg_len
,
2937 if (sg_len
/ trigger_size
< SZ_64K
)
2943 /* Now allocate and setup the descriptor. */
2944 tr_size
= sizeof(struct cppi5_tr_type15_t
);
2945 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, dir
);
2951 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
) {
2954 asel
= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
2958 tr_req
= d
->hwdesc
[0].tr_req_base
;
2959 for_each_sg(sgl
, sgent
, sglen
, i
) {
2960 u16 tr0_cnt2
, tr0_cnt3
, tr1_cnt2
;
2961 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2963 sg_len
= sg_dma_len(sgent
);
2964 num_tr
= udma_get_tr_counters(sg_len
/ trigger_size
, 0,
2965 &tr0_cnt2
, &tr0_cnt3
, &tr1_cnt2
);
2967 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
2969 udma_free_hwdesc(uc
, d
);
2974 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE15
, false,
2975 true, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2976 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2977 cppi5_tr_set_trigger(&tr_req
[tr_idx
].flags
,
2978 uc
->config
.tr_trigger_type
,
2979 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC
, 0, 0);
2982 if (dir
== DMA_DEV_TO_MEM
) {
2983 tr_req
[tr_idx
].addr
= dev_addr
;
2984 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
2985 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
2986 tr_req
[tr_idx
].icnt2
= tr0_cnt2
;
2987 tr_req
[tr_idx
].icnt3
= tr0_cnt3
;
2988 tr_req
[tr_idx
].dim1
= (-1) * tr_cnt0
;
2990 tr_req
[tr_idx
].daddr
= sg_addr
;
2991 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
2992 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
2993 tr_req
[tr_idx
].dicnt2
= tr0_cnt2
;
2994 tr_req
[tr_idx
].dicnt3
= tr0_cnt3
;
2995 tr_req
[tr_idx
].ddim1
= tr_cnt0
;
2996 tr_req
[tr_idx
].ddim2
= trigger_size
;
2997 tr_req
[tr_idx
].ddim3
= trigger_size
* tr0_cnt2
;
2999 tr_req
[tr_idx
].addr
= sg_addr
;
3000 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
3001 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
3002 tr_req
[tr_idx
].icnt2
= tr0_cnt2
;
3003 tr_req
[tr_idx
].icnt3
= tr0_cnt3
;
3004 tr_req
[tr_idx
].dim1
= tr_cnt0
;
3005 tr_req
[tr_idx
].dim2
= trigger_size
;
3006 tr_req
[tr_idx
].dim3
= trigger_size
* tr0_cnt2
;
3008 tr_req
[tr_idx
].daddr
= dev_addr
;
3009 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
3010 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
3011 tr_req
[tr_idx
].dicnt2
= tr0_cnt2
;
3012 tr_req
[tr_idx
].dicnt3
= tr0_cnt3
;
3013 tr_req
[tr_idx
].ddim1
= (-1) * tr_cnt0
;
3019 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE15
,
3021 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3022 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
3023 CPPI5_TR_CSF_SUPR_EVT
);
3024 cppi5_tr_set_trigger(&tr_req
[tr_idx
].flags
,
3025 uc
->config
.tr_trigger_type
,
3026 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC
,
3029 sg_addr
+= trigger_size
* tr0_cnt2
* tr0_cnt3
;
3030 if (dir
== DMA_DEV_TO_MEM
) {
3031 tr_req
[tr_idx
].addr
= dev_addr
;
3032 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
3033 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
3034 tr_req
[tr_idx
].icnt2
= tr1_cnt2
;
3035 tr_req
[tr_idx
].icnt3
= 1;
3036 tr_req
[tr_idx
].dim1
= (-1) * tr_cnt0
;
3038 tr_req
[tr_idx
].daddr
= sg_addr
;
3039 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
3040 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
3041 tr_req
[tr_idx
].dicnt2
= tr1_cnt2
;
3042 tr_req
[tr_idx
].dicnt3
= 1;
3043 tr_req
[tr_idx
].ddim1
= tr_cnt0
;
3044 tr_req
[tr_idx
].ddim2
= trigger_size
;
3046 tr_req
[tr_idx
].addr
= sg_addr
;
3047 tr_req
[tr_idx
].icnt0
= tr_cnt0
;
3048 tr_req
[tr_idx
].icnt1
= tr_cnt1
;
3049 tr_req
[tr_idx
].icnt2
= tr1_cnt2
;
3050 tr_req
[tr_idx
].icnt3
= 1;
3051 tr_req
[tr_idx
].dim1
= tr_cnt0
;
3052 tr_req
[tr_idx
].dim2
= trigger_size
;
3054 tr_req
[tr_idx
].daddr
= dev_addr
;
3055 tr_req
[tr_idx
].dicnt0
= tr_cnt0
;
3056 tr_req
[tr_idx
].dicnt1
= tr_cnt1
;
3057 tr_req
[tr_idx
].dicnt2
= tr1_cnt2
;
3058 tr_req
[tr_idx
].dicnt3
= 1;
3059 tr_req
[tr_idx
].ddim1
= (-1) * tr_cnt0
;
3064 d
->residue
+= sg_len
;
3067 cppi5_tr_csf_set(&tr_req
[tr_idx
- 1].flags
,
3068 CPPI5_TR_CSF_SUPR_EVT
| CPPI5_TR_CSF_EOP
);
3073 static int udma_configure_statictr(struct udma_chan
*uc
, struct udma_desc
*d
,
3074 enum dma_slave_buswidth dev_width
,
3077 if (uc
->config
.ep_type
!= PSIL_EP_PDMA_XY
)
3080 /* Bus width translates to the element size (ES) */
3081 switch (dev_width
) {
3082 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
3083 d
->static_tr
.elsize
= 0;
3085 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
3086 d
->static_tr
.elsize
= 1;
3088 case DMA_SLAVE_BUSWIDTH_3_BYTES
:
3089 d
->static_tr
.elsize
= 2;
3091 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
3092 d
->static_tr
.elsize
= 3;
3094 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
3095 d
->static_tr
.elsize
= 4;
3097 default: /* not reached */
3101 d
->static_tr
.elcnt
= elcnt
;
3104 * PDMA must to close the packet when the channel is in packet mode.
3105 * For TR mode when the channel is not cyclic we also need PDMA to close
3106 * the packet otherwise the transfer will stall because PDMA holds on
3107 * the data it has received from the peripheral.
3109 if (uc
->config
.pkt_mode
|| !uc
->cyclic
) {
3110 unsigned int div
= dev_width
* elcnt
;
3113 d
->static_tr
.bstcnt
= d
->residue
/ d
->sglen
/ div
;
3115 d
->static_tr
.bstcnt
= d
->residue
/ div
;
3117 if (uc
->config
.dir
== DMA_DEV_TO_MEM
&&
3118 d
->static_tr
.bstcnt
> uc
->ud
->match_data
->statictr_z_mask
)
3121 d
->static_tr
.bstcnt
= 0;
3127 static struct udma_desc
*
3128 udma_prep_slave_sg_pkt(struct udma_chan
*uc
, struct scatterlist
*sgl
,
3129 unsigned int sglen
, enum dma_transfer_direction dir
,
3130 unsigned long tx_flags
, void *context
)
3132 struct scatterlist
*sgent
;
3133 struct cppi5_host_desc_t
*h_desc
= NULL
;
3134 struct udma_desc
*d
;
3139 d
= kzalloc(struct_size(d
, hwdesc
, sglen
), GFP_NOWAIT
);
3144 d
->hwdesc_count
= sglen
;
3146 if (dir
== DMA_DEV_TO_MEM
)
3147 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
3149 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
3151 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
)
3154 asel
= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
3156 for_each_sg(sgl
, sgent
, sglen
, i
) {
3157 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
3158 dma_addr_t sg_addr
= sg_dma_address(sgent
);
3159 struct cppi5_host_desc_t
*desc
;
3160 size_t sg_len
= sg_dma_len(sgent
);
3162 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
3164 &hwdesc
->cppi5_desc_paddr
);
3165 if (!hwdesc
->cppi5_desc_vaddr
) {
3166 dev_err(uc
->ud
->dev
,
3167 "descriptor%d allocation failed\n", i
);
3169 udma_free_hwdesc(uc
, d
);
3174 d
->residue
+= sg_len
;
3175 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
3176 desc
= hwdesc
->cppi5_desc_vaddr
;
3179 cppi5_hdesc_init(desc
, 0, 0);
3180 /* Flow and Packed ID */
3181 cppi5_desc_set_pktids(&desc
->hdr
, uc
->id
,
3182 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3183 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, ring_id
);
3185 cppi5_hdesc_reset_hbdesc(desc
);
3186 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0xffff);
3189 /* attach the sg buffer to the descriptor */
3191 cppi5_hdesc_attach_buf(desc
, sg_addr
, sg_len
, sg_addr
, sg_len
);
3193 /* Attach link as host buffer descriptor */
3195 cppi5_hdesc_link_hbdesc(h_desc
,
3196 hwdesc
->cppi5_desc_paddr
| asel
);
3198 if (uc
->ud
->match_data
->type
== DMA_TYPE_PKTDMA
||
3199 dir
== DMA_MEM_TO_DEV
)
3203 if (d
->residue
>= SZ_4M
) {
3204 dev_err(uc
->ud
->dev
,
3205 "%s: Transfer size %u is over the supported 4M range\n",
3206 __func__
, d
->residue
);
3207 udma_free_hwdesc(uc
, d
);
3212 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3213 cppi5_hdesc_set_pktlen(h_desc
, d
->residue
);
3218 static int udma_attach_metadata(struct dma_async_tx_descriptor
*desc
,
3219 void *data
, size_t len
)
3221 struct udma_desc
*d
= to_udma_desc(desc
);
3222 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
3223 struct cppi5_host_desc_t
*h_desc
;
3227 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
3230 if (!data
|| len
> uc
->config
.metadata_size
)
3233 if (uc
->config
.needs_epib
&& len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
3236 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3237 if (d
->dir
== DMA_MEM_TO_DEV
)
3238 memcpy(h_desc
->epib
, data
, len
);
3240 if (uc
->config
.needs_epib
)
3241 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
3244 d
->metadata_size
= len
;
3245 if (uc
->config
.needs_epib
)
3246 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
3248 cppi5_hdesc_update_flags(h_desc
, flags
);
3249 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
3254 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
3255 size_t *payload_len
, size_t *max_len
)
3257 struct udma_desc
*d
= to_udma_desc(desc
);
3258 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
3259 struct cppi5_host_desc_t
*h_desc
;
3261 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
3262 return ERR_PTR(-ENOTSUPP
);
3264 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3266 *max_len
= uc
->config
.metadata_size
;
3268 *payload_len
= cppi5_hdesc_epib_present(&h_desc
->hdr
) ?
3269 CPPI5_INFO0_HDESC_EPIB_SIZE
: 0;
3270 *payload_len
+= cppi5_hdesc_get_psdata_size(h_desc
);
3272 return h_desc
->epib
;
3275 static int udma_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
3278 struct udma_desc
*d
= to_udma_desc(desc
);
3279 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
3280 struct cppi5_host_desc_t
*h_desc
;
3281 u32 psd_size
= payload_len
;
3284 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
3287 if (payload_len
> uc
->config
.metadata_size
)
3290 if (uc
->config
.needs_epib
&& payload_len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
3293 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
3295 if (uc
->config
.needs_epib
) {
3296 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
3297 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
3300 cppi5_hdesc_update_flags(h_desc
, flags
);
3301 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
3306 static struct dma_descriptor_metadata_ops metadata_ops
= {
3307 .attach
= udma_attach_metadata
,
3308 .get_ptr
= udma_get_metadata_ptr
,
3309 .set_len
= udma_set_metadata_len
,
3312 static struct dma_async_tx_descriptor
*
3313 udma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
3314 unsigned int sglen
, enum dma_transfer_direction dir
,
3315 unsigned long tx_flags
, void *context
)
3317 struct udma_chan
*uc
= to_udma_chan(chan
);
3318 enum dma_slave_buswidth dev_width
;
3319 struct udma_desc
*d
;
3322 if (dir
!= uc
->config
.dir
&&
3323 (uc
->config
.dir
== DMA_MEM_TO_MEM
&& !uc
->config
.tr_trigger_type
)) {
3324 dev_err(chan
->device
->dev
,
3325 "%s: chan%d is for %s, not supporting %s\n",
3327 dmaengine_get_direction_text(uc
->config
.dir
),
3328 dmaengine_get_direction_text(dir
));
3332 if (dir
== DMA_DEV_TO_MEM
) {
3333 dev_width
= uc
->cfg
.src_addr_width
;
3334 burst
= uc
->cfg
.src_maxburst
;
3335 } else if (dir
== DMA_MEM_TO_DEV
) {
3336 dev_width
= uc
->cfg
.dst_addr_width
;
3337 burst
= uc
->cfg
.dst_maxburst
;
3339 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
3346 if (uc
->config
.pkt_mode
)
3347 d
= udma_prep_slave_sg_pkt(uc
, sgl
, sglen
, dir
, tx_flags
,
3349 else if (is_slave_direction(uc
->config
.dir
))
3350 d
= udma_prep_slave_sg_tr(uc
, sgl
, sglen
, dir
, tx_flags
,
3353 d
= udma_prep_slave_sg_triggered_tr(uc
, sgl
, sglen
, dir
,
3363 /* static TR for remote PDMA */
3364 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
3365 dev_err(uc
->ud
->dev
,
3366 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3367 __func__
, d
->static_tr
.bstcnt
);
3369 udma_free_hwdesc(uc
, d
);
3374 if (uc
->config
.metadata_size
)
3375 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
3377 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
3380 static struct udma_desc
*
3381 udma_prep_dma_cyclic_tr(struct udma_chan
*uc
, dma_addr_t buf_addr
,
3382 size_t buf_len
, size_t period_len
,
3383 enum dma_transfer_direction dir
, unsigned long flags
)
3385 struct udma_desc
*d
;
3386 size_t tr_size
, period_addr
;
3387 struct cppi5_tr_type1_t
*tr_req
;
3388 unsigned int periods
= buf_len
/ period_len
;
3389 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
3393 num_tr
= udma_get_tr_counters(period_len
, __ffs(buf_addr
), &tr0_cnt0
,
3394 &tr0_cnt1
, &tr1_cnt0
);
3396 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
3401 /* Now allocate and setup the descriptor. */
3402 tr_size
= sizeof(struct cppi5_tr_type1_t
);
3403 d
= udma_alloc_tr_desc(uc
, tr_size
, periods
* num_tr
, dir
);
3407 tr_req
= d
->hwdesc
[0].tr_req_base
;
3408 if (uc
->ud
->match_data
->type
== DMA_TYPE_UDMA
)
3409 period_addr
= buf_addr
;
3411 period_addr
= buf_addr
|
3412 ((u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
);
3414 for (i
= 0; i
< periods
; i
++) {
3415 int tr_idx
= i
* num_tr
;
3417 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
, false,
3418 false, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3420 tr_req
[tr_idx
].addr
= period_addr
;
3421 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
3422 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
3423 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
3426 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
3427 CPPI5_TR_CSF_SUPR_EVT
);
3430 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
3432 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3434 tr_req
[tr_idx
].addr
= period_addr
+ tr0_cnt1
* tr0_cnt0
;
3435 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
3436 tr_req
[tr_idx
].icnt1
= 1;
3437 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
3440 if (!(flags
& DMA_PREP_INTERRUPT
))
3441 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
3442 CPPI5_TR_CSF_SUPR_EVT
);
3444 period_addr
+= period_len
;
3450 static struct udma_desc
*
3451 udma_prep_dma_cyclic_pkt(struct udma_chan
*uc
, dma_addr_t buf_addr
,
3452 size_t buf_len
, size_t period_len
,
3453 enum dma_transfer_direction dir
, unsigned long flags
)
3455 struct udma_desc
*d
;
3458 int periods
= buf_len
/ period_len
;
3460 if (periods
> (K3_UDMA_DEFAULT_RING_SIZE
- 1))
3463 if (period_len
>= SZ_4M
)
3466 d
= kzalloc(struct_size(d
, hwdesc
, periods
), GFP_NOWAIT
);
3470 d
->hwdesc_count
= periods
;
3472 /* TODO: re-check this... */
3473 if (dir
== DMA_DEV_TO_MEM
)
3474 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
3476 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
3478 if (uc
->ud
->match_data
->type
!= DMA_TYPE_UDMA
)
3479 buf_addr
|= (u64
)uc
->config
.asel
<< K3_ADDRESS_ASEL_SHIFT
;
3481 for (i
= 0; i
< periods
; i
++) {
3482 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
3483 dma_addr_t period_addr
= buf_addr
+ (period_len
* i
);
3484 struct cppi5_host_desc_t
*h_desc
;
3486 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
3488 &hwdesc
->cppi5_desc_paddr
);
3489 if (!hwdesc
->cppi5_desc_vaddr
) {
3490 dev_err(uc
->ud
->dev
,
3491 "descriptor%d allocation failed\n", i
);
3493 udma_free_hwdesc(uc
, d
);
3498 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
3499 h_desc
= hwdesc
->cppi5_desc_vaddr
;
3501 cppi5_hdesc_init(h_desc
, 0, 0);
3502 cppi5_hdesc_set_pktlen(h_desc
, period_len
);
3504 /* Flow and Packed ID */
3505 cppi5_desc_set_pktids(&h_desc
->hdr
, uc
->id
,
3506 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3507 cppi5_desc_set_retpolicy(&h_desc
->hdr
, 0, ring_id
);
3509 /* attach each period to a new descriptor */
3510 cppi5_hdesc_attach_buf(h_desc
,
3511 period_addr
, period_len
,
3512 period_addr
, period_len
);
3518 static struct dma_async_tx_descriptor
*
3519 udma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
3520 size_t period_len
, enum dma_transfer_direction dir
,
3521 unsigned long flags
)
3523 struct udma_chan
*uc
= to_udma_chan(chan
);
3524 enum dma_slave_buswidth dev_width
;
3525 struct udma_desc
*d
;
3528 if (dir
!= uc
->config
.dir
) {
3529 dev_err(chan
->device
->dev
,
3530 "%s: chan%d is for %s, not supporting %s\n",
3532 dmaengine_get_direction_text(uc
->config
.dir
),
3533 dmaengine_get_direction_text(dir
));
3539 if (dir
== DMA_DEV_TO_MEM
) {
3540 dev_width
= uc
->cfg
.src_addr_width
;
3541 burst
= uc
->cfg
.src_maxburst
;
3542 } else if (dir
== DMA_MEM_TO_DEV
) {
3543 dev_width
= uc
->cfg
.dst_addr_width
;
3544 burst
= uc
->cfg
.dst_maxburst
;
3546 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
3553 if (uc
->config
.pkt_mode
)
3554 d
= udma_prep_dma_cyclic_pkt(uc
, buf_addr
, buf_len
, period_len
,
3557 d
= udma_prep_dma_cyclic_tr(uc
, buf_addr
, buf_len
, period_len
,
3563 d
->sglen
= buf_len
/ period_len
;
3566 d
->residue
= buf_len
;
3568 /* static TR for remote PDMA */
3569 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
3570 dev_err(uc
->ud
->dev
,
3571 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3572 __func__
, d
->static_tr
.bstcnt
);
3574 udma_free_hwdesc(uc
, d
);
3579 if (uc
->config
.metadata_size
)
3580 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
3582 return vchan_tx_prep(&uc
->vc
, &d
->vd
, flags
);
3585 static struct dma_async_tx_descriptor
*
3586 udma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
3587 size_t len
, unsigned long tx_flags
)
3589 struct udma_chan
*uc
= to_udma_chan(chan
);
3590 struct udma_desc
*d
;
3591 struct cppi5_tr_type15_t
*tr_req
;
3593 size_t tr_size
= sizeof(struct cppi5_tr_type15_t
);
3594 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
3596 if (uc
->config
.dir
!= DMA_MEM_TO_MEM
) {
3597 dev_err(chan
->device
->dev
,
3598 "%s: chan%d is for %s, not supporting %s\n",
3600 dmaengine_get_direction_text(uc
->config
.dir
),
3601 dmaengine_get_direction_text(DMA_MEM_TO_MEM
));
3605 num_tr
= udma_get_tr_counters(len
, __ffs(src
| dest
), &tr0_cnt0
,
3606 &tr0_cnt1
, &tr1_cnt0
);
3608 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
3613 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, DMA_MEM_TO_MEM
);
3617 d
->dir
= DMA_MEM_TO_MEM
;
3622 if (uc
->ud
->match_data
->type
!= DMA_TYPE_UDMA
) {
3623 src
|= (u64
)uc
->ud
->asel
<< K3_ADDRESS_ASEL_SHIFT
;
3624 dest
|= (u64
)uc
->ud
->asel
<< K3_ADDRESS_ASEL_SHIFT
;
3627 tr_req
= d
->hwdesc
[0].tr_req_base
;
3629 cppi5_tr_init(&tr_req
[0].flags
, CPPI5_TR_TYPE15
, false, true,
3630 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3631 cppi5_tr_csf_set(&tr_req
[0].flags
, CPPI5_TR_CSF_SUPR_EVT
);
3633 tr_req
[0].addr
= src
;
3634 tr_req
[0].icnt0
= tr0_cnt0
;
3635 tr_req
[0].icnt1
= tr0_cnt1
;
3636 tr_req
[0].icnt2
= 1;
3637 tr_req
[0].icnt3
= 1;
3638 tr_req
[0].dim1
= tr0_cnt0
;
3640 tr_req
[0].daddr
= dest
;
3641 tr_req
[0].dicnt0
= tr0_cnt0
;
3642 tr_req
[0].dicnt1
= tr0_cnt1
;
3643 tr_req
[0].dicnt2
= 1;
3644 tr_req
[0].dicnt3
= 1;
3645 tr_req
[0].ddim1
= tr0_cnt0
;
3648 cppi5_tr_init(&tr_req
[1].flags
, CPPI5_TR_TYPE15
, false, true,
3649 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3650 cppi5_tr_csf_set(&tr_req
[1].flags
, CPPI5_TR_CSF_SUPR_EVT
);
3652 tr_req
[1].addr
= src
+ tr0_cnt1
* tr0_cnt0
;
3653 tr_req
[1].icnt0
= tr1_cnt0
;
3654 tr_req
[1].icnt1
= 1;
3655 tr_req
[1].icnt2
= 1;
3656 tr_req
[1].icnt3
= 1;
3658 tr_req
[1].daddr
= dest
+ tr0_cnt1
* tr0_cnt0
;
3659 tr_req
[1].dicnt0
= tr1_cnt0
;
3660 tr_req
[1].dicnt1
= 1;
3661 tr_req
[1].dicnt2
= 1;
3662 tr_req
[1].dicnt3
= 1;
3665 cppi5_tr_csf_set(&tr_req
[num_tr
- 1].flags
,
3666 CPPI5_TR_CSF_SUPR_EVT
| CPPI5_TR_CSF_EOP
);
3668 if (uc
->config
.metadata_size
)
3669 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
3671 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
3674 static void udma_issue_pending(struct dma_chan
*chan
)
3676 struct udma_chan
*uc
= to_udma_chan(chan
);
3677 unsigned long flags
;
3679 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
3681 /* If we have something pending and no active descriptor, then */
3682 if (vchan_issue_pending(&uc
->vc
) && !uc
->desc
) {
3684 * start a descriptor if the channel is NOT [marked as
3685 * terminating _and_ it is still running (teardown has not
3688 if (!(uc
->state
== UDMA_CHAN_IS_TERMINATING
&&
3689 udma_is_chan_running(uc
)))
3693 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
3696 static enum dma_status
udma_tx_status(struct dma_chan
*chan
,
3697 dma_cookie_t cookie
,
3698 struct dma_tx_state
*txstate
)
3700 struct udma_chan
*uc
= to_udma_chan(chan
);
3701 enum dma_status ret
;
3702 unsigned long flags
;
3704 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
3706 ret
= dma_cookie_status(chan
, cookie
, txstate
);
3708 if (!udma_is_chan_running(uc
))
3711 if (ret
== DMA_IN_PROGRESS
&& udma_is_chan_paused(uc
))
3714 if (ret
== DMA_COMPLETE
|| !txstate
)
3717 if (uc
->desc
&& uc
->desc
->vd
.tx
.cookie
== cookie
) {
3720 u32 residue
= uc
->desc
->residue
;
3723 if (uc
->desc
->dir
== DMA_MEM_TO_DEV
) {
3724 bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_SBCNT_REG
);
3726 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
3727 peer_bcnt
= udma_tchanrt_read(uc
,
3728 UDMA_CHAN_RT_PEER_BCNT_REG
);
3730 if (bcnt
> peer_bcnt
)
3731 delay
= bcnt
- peer_bcnt
;
3733 } else if (uc
->desc
->dir
== DMA_DEV_TO_MEM
) {
3734 bcnt
= udma_rchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
3736 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
3737 peer_bcnt
= udma_rchanrt_read(uc
,
3738 UDMA_CHAN_RT_PEER_BCNT_REG
);
3740 if (peer_bcnt
> bcnt
)
3741 delay
= peer_bcnt
- bcnt
;
3744 bcnt
= udma_tchanrt_read(uc
, UDMA_CHAN_RT_BCNT_REG
);
3748 if (bcnt
&& !(bcnt
% uc
->desc
->residue
))
3751 residue
-= bcnt
% uc
->desc
->residue
;
3753 if (!residue
&& (uc
->config
.dir
== DMA_DEV_TO_MEM
|| !delay
)) {
3758 dma_set_residue(txstate
, residue
);
3759 dma_set_in_flight_bytes(txstate
, delay
);
3766 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
3770 static int udma_pause(struct dma_chan
*chan
)
3772 struct udma_chan
*uc
= to_udma_chan(chan
);
3774 /* pause the channel */
3775 switch (uc
->config
.dir
) {
3776 case DMA_DEV_TO_MEM
:
3777 udma_rchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3778 UDMA_PEER_RT_EN_PAUSE
,
3779 UDMA_PEER_RT_EN_PAUSE
);
3781 case DMA_MEM_TO_DEV
:
3782 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3783 UDMA_PEER_RT_EN_PAUSE
,
3784 UDMA_PEER_RT_EN_PAUSE
);
3786 case DMA_MEM_TO_MEM
:
3787 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_CTL_REG
,
3788 UDMA_CHAN_RT_CTL_PAUSE
,
3789 UDMA_CHAN_RT_CTL_PAUSE
);
3798 static int udma_resume(struct dma_chan
*chan
)
3800 struct udma_chan
*uc
= to_udma_chan(chan
);
3802 /* resume the channel */
3803 switch (uc
->config
.dir
) {
3804 case DMA_DEV_TO_MEM
:
3805 udma_rchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3806 UDMA_PEER_RT_EN_PAUSE
, 0);
3809 case DMA_MEM_TO_DEV
:
3810 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
3811 UDMA_PEER_RT_EN_PAUSE
, 0);
3813 case DMA_MEM_TO_MEM
:
3814 udma_tchanrt_update_bits(uc
, UDMA_CHAN_RT_CTL_REG
,
3815 UDMA_CHAN_RT_CTL_PAUSE
, 0);
3824 static int udma_terminate_all(struct dma_chan
*chan
)
3826 struct udma_chan
*uc
= to_udma_chan(chan
);
3827 unsigned long flags
;
3830 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
3832 if (udma_is_chan_running(uc
))
3836 uc
->terminated_desc
= uc
->desc
;
3838 uc
->terminated_desc
->terminated
= true;
3839 cancel_delayed_work(&uc
->tx_drain
.work
);
3844 vchan_get_all_descriptors(&uc
->vc
, &head
);
3845 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
3846 vchan_dma_desc_free_list(&uc
->vc
, &head
);
3851 static void udma_synchronize(struct dma_chan
*chan
)
3853 struct udma_chan
*uc
= to_udma_chan(chan
);
3854 unsigned long timeout
= msecs_to_jiffies(1000);
3856 vchan_synchronize(&uc
->vc
);
3858 if (uc
->state
== UDMA_CHAN_IS_TERMINATING
) {
3859 timeout
= wait_for_completion_timeout(&uc
->teardown_completed
,
3862 dev_warn(uc
->ud
->dev
, "chan%d teardown timeout!\n",
3864 udma_dump_chan_stdata(uc
);
3865 udma_reset_chan(uc
, true);
3869 udma_reset_chan(uc
, false);
3870 if (udma_is_chan_running(uc
))
3871 dev_warn(uc
->ud
->dev
, "chan%d refused to stop!\n", uc
->id
);
3873 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
3874 udma_reset_rings(uc
);
3877 static void udma_desc_pre_callback(struct virt_dma_chan
*vc
,
3878 struct virt_dma_desc
*vd
,
3879 struct dmaengine_result
*result
)
3881 struct udma_chan
*uc
= to_udma_chan(&vc
->chan
);
3882 struct udma_desc
*d
;
3887 d
= to_udma_desc(&vd
->tx
);
3889 if (d
->metadata_size
)
3890 udma_fetch_epib(uc
, d
);
3892 /* Provide residue information for the client */
3894 void *desc_vaddr
= udma_curr_cppi5_desc_vaddr(d
, d
->desc_idx
);
3896 if (cppi5_desc_get_type(desc_vaddr
) ==
3897 CPPI5_INFO0_DESC_TYPE_VAL_HOST
) {
3898 result
->residue
= d
->residue
-
3899 cppi5_hdesc_get_pktlen(desc_vaddr
);
3900 if (result
->residue
)
3901 result
->result
= DMA_TRANS_ABORTED
;
3903 result
->result
= DMA_TRANS_NOERROR
;
3905 result
->residue
= 0;
3906 result
->result
= DMA_TRANS_NOERROR
;
3912 * This tasklet handles the completion of a DMA descriptor by
3913 * calling its callback and freeing it.
3915 static void udma_vchan_complete(struct tasklet_struct
*t
)
3917 struct virt_dma_chan
*vc
= from_tasklet(vc
, t
, task
);
3918 struct virt_dma_desc
*vd
, *_vd
;
3919 struct dmaengine_desc_callback cb
;
3922 spin_lock_irq(&vc
->lock
);
3923 list_splice_tail_init(&vc
->desc_completed
, &head
);
3927 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
3929 memset(&cb
, 0, sizeof(cb
));
3931 spin_unlock_irq(&vc
->lock
);
3933 udma_desc_pre_callback(vc
, vd
, NULL
);
3934 dmaengine_desc_callback_invoke(&cb
, NULL
);
3936 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
3937 struct dmaengine_result result
;
3939 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
3941 list_del(&vd
->node
);
3943 udma_desc_pre_callback(vc
, vd
, &result
);
3944 dmaengine_desc_callback_invoke(&cb
, &result
);
3946 vchan_vdesc_fini(vd
);
3950 static void udma_free_chan_resources(struct dma_chan
*chan
)
3952 struct udma_chan
*uc
= to_udma_chan(chan
);
3953 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
3955 udma_terminate_all(chan
);
3956 if (uc
->terminated_desc
) {
3957 udma_reset_chan(uc
, false);
3958 udma_reset_rings(uc
);
3961 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
3963 if (uc
->irq_num_ring
> 0) {
3964 free_irq(uc
->irq_num_ring
, uc
);
3966 uc
->irq_num_ring
= 0;
3968 if (uc
->irq_num_udma
> 0) {
3969 free_irq(uc
->irq_num_udma
, uc
);
3971 uc
->irq_num_udma
= 0;
3974 /* Release PSI-L pairing */
3975 if (uc
->psil_paired
) {
3976 navss_psil_unpair(ud
, uc
->config
.src_thread
,
3977 uc
->config
.dst_thread
);
3978 uc
->psil_paired
= false;
3981 vchan_free_chan_resources(&uc
->vc
);
3982 tasklet_kill(&uc
->vc
.task
);
3984 bcdma_free_bchan_resources(uc
);
3985 udma_free_tx_resources(uc
);
3986 udma_free_rx_resources(uc
);
3987 udma_reset_uchan(uc
);
3989 if (uc
->use_dma_pool
) {
3990 dma_pool_destroy(uc
->hdesc_pool
);
3991 uc
->use_dma_pool
= false;
3995 static struct platform_driver udma_driver
;
3996 static struct platform_driver bcdma_driver
;
3997 static struct platform_driver pktdma_driver
;
3999 struct udma_filter_param
{
4000 int remote_thread_id
;
4003 u32 tr_trigger_type
;
4006 static bool udma_dma_filter_fn(struct dma_chan
*chan
, void *param
)
4008 struct udma_chan_config
*ucc
;
4009 struct psil_endpoint_config
*ep_config
;
4010 struct udma_filter_param
*filter_param
;
4011 struct udma_chan
*uc
;
4012 struct udma_dev
*ud
;
4014 if (chan
->device
->dev
->driver
!= &udma_driver
.driver
&&
4015 chan
->device
->dev
->driver
!= &bcdma_driver
.driver
&&
4016 chan
->device
->dev
->driver
!= &pktdma_driver
.driver
)
4019 uc
= to_udma_chan(chan
);
4022 filter_param
= param
;
4024 if (filter_param
->atype
> 2) {
4025 dev_err(ud
->dev
, "Invalid channel atype: %u\n",
4026 filter_param
->atype
);
4030 if (filter_param
->asel
> 15) {
4031 dev_err(ud
->dev
, "Invalid channel asel: %u\n",
4032 filter_param
->asel
);
4036 ucc
->remote_thread_id
= filter_param
->remote_thread_id
;
4037 ucc
->atype
= filter_param
->atype
;
4038 ucc
->asel
= filter_param
->asel
;
4039 ucc
->tr_trigger_type
= filter_param
->tr_trigger_type
;
4041 if (ucc
->tr_trigger_type
) {
4042 ucc
->dir
= DMA_MEM_TO_MEM
;
4043 goto triggered_bchan
;
4044 } else if (ucc
->remote_thread_id
& K3_PSIL_DST_THREAD_ID_OFFSET
) {
4045 ucc
->dir
= DMA_MEM_TO_DEV
;
4047 ucc
->dir
= DMA_DEV_TO_MEM
;
4050 ep_config
= psil_get_ep_config(ucc
->remote_thread_id
);
4051 if (IS_ERR(ep_config
)) {
4052 dev_err(ud
->dev
, "No configuration for psi-l thread 0x%04x\n",
4053 ucc
->remote_thread_id
);
4054 ucc
->dir
= DMA_MEM_TO_MEM
;
4055 ucc
->remote_thread_id
= -1;
4061 if (ud
->match_data
->type
== DMA_TYPE_BCDMA
&&
4062 ep_config
->pkt_mode
) {
4064 "Only TR mode is supported (psi-l thread 0x%04x)\n",
4065 ucc
->remote_thread_id
);
4066 ucc
->dir
= DMA_MEM_TO_MEM
;
4067 ucc
->remote_thread_id
= -1;
4073 ucc
->pkt_mode
= ep_config
->pkt_mode
;
4074 ucc
->channel_tpl
= ep_config
->channel_tpl
;
4075 ucc
->notdpkt
= ep_config
->notdpkt
;
4076 ucc
->ep_type
= ep_config
->ep_type
;
4078 if (ud
->match_data
->type
== DMA_TYPE_PKTDMA
&&
4079 ep_config
->mapped_channel_id
>= 0) {
4080 ucc
->mapped_channel_id
= ep_config
->mapped_channel_id
;
4081 ucc
->default_flow_id
= ep_config
->default_flow_id
;
4083 ucc
->mapped_channel_id
= -1;
4084 ucc
->default_flow_id
= -1;
4087 if (ucc
->ep_type
!= PSIL_EP_NATIVE
) {
4088 const struct udma_match_data
*match_data
= ud
->match_data
;
4090 if (match_data
->flags
& UDMA_FLAG_PDMA_ACC32
)
4091 ucc
->enable_acc32
= ep_config
->pdma_acc32
;
4092 if (match_data
->flags
& UDMA_FLAG_PDMA_BURST
)
4093 ucc
->enable_burst
= ep_config
->pdma_burst
;
4096 ucc
->needs_epib
= ep_config
->needs_epib
;
4097 ucc
->psd_size
= ep_config
->psd_size
;
4098 ucc
->metadata_size
=
4099 (ucc
->needs_epib
? CPPI5_INFO0_HDESC_EPIB_SIZE
: 0) +
4103 ucc
->hdesc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
4104 ucc
->metadata_size
, ud
->desc_align
);
4106 dev_dbg(ud
->dev
, "chan%d: Remote thread: 0x%04x (%s)\n", uc
->id
,
4107 ucc
->remote_thread_id
, dmaengine_get_direction_text(ucc
->dir
));
4112 dev_dbg(ud
->dev
, "chan%d: triggered channel (type: %u)\n", uc
->id
,
4113 ucc
->tr_trigger_type
);
4119 static struct dma_chan
*udma_of_xlate(struct of_phandle_args
*dma_spec
,
4120 struct of_dma
*ofdma
)
4122 struct udma_dev
*ud
= ofdma
->of_dma_data
;
4123 dma_cap_mask_t mask
= ud
->ddev
.cap_mask
;
4124 struct udma_filter_param filter_param
;
4125 struct dma_chan
*chan
;
4127 if (ud
->match_data
->type
== DMA_TYPE_BCDMA
) {
4128 if (dma_spec
->args_count
!= 3)
4131 filter_param
.tr_trigger_type
= dma_spec
->args
[0];
4132 filter_param
.remote_thread_id
= dma_spec
->args
[1];
4133 filter_param
.asel
= dma_spec
->args
[2];
4134 filter_param
.atype
= 0;
4136 if (dma_spec
->args_count
!= 1 && dma_spec
->args_count
!= 2)
4139 filter_param
.remote_thread_id
= dma_spec
->args
[0];
4140 filter_param
.tr_trigger_type
= 0;
4141 if (dma_spec
->args_count
== 2) {
4142 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
4143 filter_param
.atype
= dma_spec
->args
[1];
4144 filter_param
.asel
= 0;
4146 filter_param
.atype
= 0;
4147 filter_param
.asel
= dma_spec
->args
[1];
4150 filter_param
.atype
= 0;
4151 filter_param
.asel
= 0;
4155 chan
= __dma_request_channel(&mask
, udma_dma_filter_fn
, &filter_param
,
4158 dev_err(ud
->dev
, "get channel fail in %s.\n", __func__
);
4159 return ERR_PTR(-EINVAL
);
4165 static struct udma_match_data am654_main_data
= {
4166 .type
= DMA_TYPE_UDMA
,
4167 .psil_base
= 0x1000,
4168 .enable_memcpy_support
= true,
4169 .statictr_z_mask
= GENMASK(11, 0),
4172 static struct udma_match_data am654_mcu_data
= {
4173 .type
= DMA_TYPE_UDMA
,
4174 .psil_base
= 0x6000,
4175 .enable_memcpy_support
= false,
4176 .statictr_z_mask
= GENMASK(11, 0),
4179 static struct udma_match_data j721e_main_data
= {
4180 .type
= DMA_TYPE_UDMA
,
4181 .psil_base
= 0x1000,
4182 .enable_memcpy_support
= true,
4183 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
| UDMA_FLAG_TDTYPE
,
4184 .statictr_z_mask
= GENMASK(23, 0),
4187 static struct udma_match_data j721e_mcu_data
= {
4188 .type
= DMA_TYPE_UDMA
,
4189 .psil_base
= 0x6000,
4190 .enable_memcpy_support
= false, /* MEM_TO_MEM is slow via MCU UDMA */
4191 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
| UDMA_FLAG_TDTYPE
,
4192 .statictr_z_mask
= GENMASK(23, 0),
4195 static struct udma_match_data am64_bcdma_data
= {
4196 .type
= DMA_TYPE_BCDMA
,
4197 .psil_base
= 0x2000, /* for tchan and rchan, not applicable to bchan */
4198 .enable_memcpy_support
= true, /* Supported via bchan */
4199 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
| UDMA_FLAG_TDTYPE
,
4200 .statictr_z_mask
= GENMASK(23, 0),
4203 static struct udma_match_data am64_pktdma_data
= {
4204 .type
= DMA_TYPE_PKTDMA
,
4205 .psil_base
= 0x1000,
4206 .enable_memcpy_support
= false, /* PKTDMA does not support MEM_TO_MEM */
4207 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
| UDMA_FLAG_TDTYPE
,
4208 .statictr_z_mask
= GENMASK(23, 0),
4211 static const struct of_device_id udma_of_match
[] = {
4213 .compatible
= "ti,am654-navss-main-udmap",
4214 .data
= &am654_main_data
,
4217 .compatible
= "ti,am654-navss-mcu-udmap",
4218 .data
= &am654_mcu_data
,
4220 .compatible
= "ti,j721e-navss-main-udmap",
4221 .data
= &j721e_main_data
,
4223 .compatible
= "ti,j721e-navss-mcu-udmap",
4224 .data
= &j721e_mcu_data
,
4229 static const struct of_device_id bcdma_of_match
[] = {
4231 .compatible
= "ti,am64-dmss-bcdma",
4232 .data
= &am64_bcdma_data
,
4237 static const struct of_device_id pktdma_of_match
[] = {
4239 .compatible
= "ti,am64-dmss-pktdma",
4240 .data
= &am64_pktdma_data
,
4245 static struct udma_soc_data am654_soc_data
= {
4247 .udma_rchan
= 0x200,
4251 static struct udma_soc_data j721e_soc_data
= {
4253 .udma_rchan
= 0x400,
4257 static struct udma_soc_data j7200_soc_data
= {
4263 static struct udma_soc_data am64_soc_data
= {
4265 .bcdma_bchan_data
= 0x2200,
4266 .bcdma_bchan_ring
= 0x2400,
4267 .bcdma_tchan_data
= 0x2800,
4268 .bcdma_tchan_ring
= 0x2a00,
4269 .bcdma_rchan_data
= 0x2e00,
4270 .bcdma_rchan_ring
= 0x3000,
4271 .pktdma_tchan_flow
= 0x1200,
4272 .pktdma_rchan_flow
= 0x1600,
4274 .bcdma_trigger_event_offset
= 0xc400,
4277 static const struct soc_device_attribute k3_soc_devices
[] = {
4278 { .family
= "AM65X", .data
= &am654_soc_data
},
4279 { .family
= "J721E", .data
= &j721e_soc_data
},
4280 { .family
= "J7200", .data
= &j7200_soc_data
},
4281 { .family
= "AM64X", .data
= &am64_soc_data
},
4285 static int udma_get_mmrs(struct platform_device
*pdev
, struct udma_dev
*ud
)
4287 u32 cap2
, cap3
, cap4
;
4290 ud
->mmrs
[MMR_GCFG
] = devm_platform_ioremap_resource_byname(pdev
, mmr_names
[MMR_GCFG
]);
4291 if (IS_ERR(ud
->mmrs
[MMR_GCFG
]))
4292 return PTR_ERR(ud
->mmrs
[MMR_GCFG
]);
4294 cap2
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x28);
4295 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4297 switch (ud
->match_data
->type
) {
4299 ud
->rflow_cnt
= UDMA_CAP3_RFLOW_CNT(cap3
);
4300 ud
->tchan_cnt
= UDMA_CAP2_TCHAN_CNT(cap2
);
4301 ud
->echan_cnt
= UDMA_CAP2_ECHAN_CNT(cap2
);
4302 ud
->rchan_cnt
= UDMA_CAP2_RCHAN_CNT(cap2
);
4304 case DMA_TYPE_BCDMA
:
4305 ud
->bchan_cnt
= BCDMA_CAP2_BCHAN_CNT(cap2
);
4306 ud
->tchan_cnt
= BCDMA_CAP2_TCHAN_CNT(cap2
);
4307 ud
->rchan_cnt
= BCDMA_CAP2_RCHAN_CNT(cap2
);
4309 case DMA_TYPE_PKTDMA
:
4310 cap4
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x30);
4311 ud
->tchan_cnt
= UDMA_CAP2_TCHAN_CNT(cap2
);
4312 ud
->rchan_cnt
= UDMA_CAP2_RCHAN_CNT(cap2
);
4313 ud
->rflow_cnt
= UDMA_CAP3_RFLOW_CNT(cap3
);
4314 ud
->tflow_cnt
= PKTDMA_CAP4_TFLOW_CNT(cap4
);
4320 for (i
= 1; i
< MMR_LAST
; i
++) {
4321 if (i
== MMR_BCHANRT
&& ud
->bchan_cnt
== 0)
4323 if (i
== MMR_TCHANRT
&& ud
->tchan_cnt
== 0)
4325 if (i
== MMR_RCHANRT
&& ud
->rchan_cnt
== 0)
4328 ud
->mmrs
[i
] = devm_platform_ioremap_resource_byname(pdev
, mmr_names
[i
]);
4329 if (IS_ERR(ud
->mmrs
[i
]))
4330 return PTR_ERR(ud
->mmrs
[i
]);
4336 static void udma_mark_resource_ranges(struct udma_dev
*ud
, unsigned long *map
,
4337 struct ti_sci_resource_desc
*rm_desc
,
4340 bitmap_clear(map
, rm_desc
->start
, rm_desc
->num
);
4341 bitmap_clear(map
, rm_desc
->start_sec
, rm_desc
->num_sec
);
4342 dev_dbg(ud
->dev
, "ti_sci resource range for %s: %d:%d | %d:%d\n", name
,
4343 rm_desc
->start
, rm_desc
->num
, rm_desc
->start_sec
,
4347 static const char * const range_names
[] = {
4348 [RM_RANGE_BCHAN
] = "ti,sci-rm-range-bchan",
4349 [RM_RANGE_TCHAN
] = "ti,sci-rm-range-tchan",
4350 [RM_RANGE_RCHAN
] = "ti,sci-rm-range-rchan",
4351 [RM_RANGE_RFLOW
] = "ti,sci-rm-range-rflow",
4352 [RM_RANGE_TFLOW
] = "ti,sci-rm-range-tflow",
4355 static int udma_setup_resources(struct udma_dev
*ud
)
4358 struct device
*dev
= ud
->dev
;
4359 struct ti_sci_resource
*rm_res
, irq_res
;
4360 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
4363 /* Set up the throughput level start indexes */
4364 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4365 if (of_device_is_compatible(dev
->of_node
,
4366 "ti,am654-navss-main-udmap")) {
4367 ud
->tchan_tpl
.levels
= 2;
4368 ud
->tchan_tpl
.start_idx
[0] = 8;
4369 } else if (of_device_is_compatible(dev
->of_node
,
4370 "ti,am654-navss-mcu-udmap")) {
4371 ud
->tchan_tpl
.levels
= 2;
4372 ud
->tchan_tpl
.start_idx
[0] = 2;
4373 } else if (UDMA_CAP3_UCHAN_CNT(cap3
)) {
4374 ud
->tchan_tpl
.levels
= 3;
4375 ud
->tchan_tpl
.start_idx
[1] = UDMA_CAP3_UCHAN_CNT(cap3
);
4376 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4377 } else if (UDMA_CAP3_HCHAN_CNT(cap3
)) {
4378 ud
->tchan_tpl
.levels
= 2;
4379 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4381 ud
->tchan_tpl
.levels
= 1;
4384 ud
->rchan_tpl
.levels
= ud
->tchan_tpl
.levels
;
4385 ud
->rchan_tpl
.start_idx
[0] = ud
->tchan_tpl
.start_idx
[0];
4386 ud
->rchan_tpl
.start_idx
[1] = ud
->tchan_tpl
.start_idx
[1];
4388 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
4389 sizeof(unsigned long), GFP_KERNEL
);
4390 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
4392 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4393 sizeof(unsigned long), GFP_KERNEL
);
4394 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
4396 ud
->rflow_gp_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
4397 sizeof(unsigned long),
4399 ud
->rflow_gp_map_allocated
= devm_kcalloc(dev
,
4400 BITS_TO_LONGS(ud
->rflow_cnt
),
4401 sizeof(unsigned long),
4403 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
4404 sizeof(unsigned long),
4406 ud
->rflows
= devm_kcalloc(dev
, ud
->rflow_cnt
, sizeof(*ud
->rflows
),
4409 if (!ud
->tchan_map
|| !ud
->rchan_map
|| !ud
->rflow_gp_map
||
4410 !ud
->rflow_gp_map_allocated
|| !ud
->tchans
|| !ud
->rchans
||
4411 !ud
->rflows
|| !ud
->rflow_in_use
)
4415 * RX flows with the same Ids as RX channels are reserved to be used
4416 * as default flows if remote HW can't generate flow_ids. Those
4417 * RX flows can be requested only explicitly by id.
4419 bitmap_set(ud
->rflow_gp_map_allocated
, 0, ud
->rchan_cnt
);
4421 /* by default no GP rflows are assigned to Linux */
4422 bitmap_set(ud
->rflow_gp_map
, 0, ud
->rflow_cnt
);
4424 /* Get resource ranges from tisci */
4425 for (i
= 0; i
< RM_RANGE_LAST
; i
++) {
4426 if (i
== RM_RANGE_BCHAN
|| i
== RM_RANGE_TFLOW
)
4429 tisci_rm
->rm_ranges
[i
] =
4430 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
4431 tisci_rm
->tisci_dev_id
,
4432 (char *)range_names
[i
]);
4436 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4437 if (IS_ERR(rm_res
)) {
4438 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
4440 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
4441 for (i
= 0; i
< rm_res
->sets
; i
++)
4442 udma_mark_resource_ranges(ud
, ud
->tchan_map
,
4443 &rm_res
->desc
[i
], "tchan");
4445 irq_res
.sets
= rm_res
->sets
;
4447 /* rchan and matching default flow ranges */
4448 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4449 if (IS_ERR(rm_res
)) {
4450 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
4452 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
4453 for (i
= 0; i
< rm_res
->sets
; i
++)
4454 udma_mark_resource_ranges(ud
, ud
->rchan_map
,
4455 &rm_res
->desc
[i
], "rchan");
4458 irq_res
.sets
+= rm_res
->sets
;
4459 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
4460 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4461 for (i
= 0; i
< rm_res
->sets
; i
++) {
4462 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
;
4463 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
4464 irq_res
.desc
[i
].start_sec
= rm_res
->desc
[i
].start_sec
;
4465 irq_res
.desc
[i
].num_sec
= rm_res
->desc
[i
].num_sec
;
4467 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4468 for (j
= 0; j
< rm_res
->sets
; j
++, i
++) {
4469 if (rm_res
->desc
[j
].num
) {
4470 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
4471 ud
->soc_data
->oes
.udma_rchan
;
4472 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
4474 if (rm_res
->desc
[j
].num_sec
) {
4475 irq_res
.desc
[i
].start_sec
= rm_res
->desc
[j
].start_sec
+
4476 ud
->soc_data
->oes
.udma_rchan
;
4477 irq_res
.desc
[i
].num_sec
= rm_res
->desc
[j
].num_sec
;
4480 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
4481 kfree(irq_res
.desc
);
4483 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
4487 /* GP rflow ranges */
4488 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
4489 if (IS_ERR(rm_res
)) {
4490 /* all gp flows are assigned exclusively to Linux */
4491 bitmap_clear(ud
->rflow_gp_map
, ud
->rchan_cnt
,
4492 ud
->rflow_cnt
- ud
->rchan_cnt
);
4494 for (i
= 0; i
< rm_res
->sets
; i
++)
4495 udma_mark_resource_ranges(ud
, ud
->rflow_gp_map
,
4496 &rm_res
->desc
[i
], "gp-rflow");
4502 static int bcdma_setup_resources(struct udma_dev
*ud
)
4505 struct device
*dev
= ud
->dev
;
4506 struct ti_sci_resource
*rm_res
, irq_res
;
4507 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
4508 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
4511 /* Set up the throughput level start indexes */
4512 cap
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4513 if (BCDMA_CAP3_UBCHAN_CNT(cap
)) {
4514 ud
->bchan_tpl
.levels
= 3;
4515 ud
->bchan_tpl
.start_idx
[1] = BCDMA_CAP3_UBCHAN_CNT(cap
);
4516 ud
->bchan_tpl
.start_idx
[0] = BCDMA_CAP3_HBCHAN_CNT(cap
);
4517 } else if (BCDMA_CAP3_HBCHAN_CNT(cap
)) {
4518 ud
->bchan_tpl
.levels
= 2;
4519 ud
->bchan_tpl
.start_idx
[0] = BCDMA_CAP3_HBCHAN_CNT(cap
);
4521 ud
->bchan_tpl
.levels
= 1;
4524 cap
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x30);
4525 if (BCDMA_CAP4_URCHAN_CNT(cap
)) {
4526 ud
->rchan_tpl
.levels
= 3;
4527 ud
->rchan_tpl
.start_idx
[1] = BCDMA_CAP4_URCHAN_CNT(cap
);
4528 ud
->rchan_tpl
.start_idx
[0] = BCDMA_CAP4_HRCHAN_CNT(cap
);
4529 } else if (BCDMA_CAP4_HRCHAN_CNT(cap
)) {
4530 ud
->rchan_tpl
.levels
= 2;
4531 ud
->rchan_tpl
.start_idx
[0] = BCDMA_CAP4_HRCHAN_CNT(cap
);
4533 ud
->rchan_tpl
.levels
= 1;
4536 if (BCDMA_CAP4_UTCHAN_CNT(cap
)) {
4537 ud
->tchan_tpl
.levels
= 3;
4538 ud
->tchan_tpl
.start_idx
[1] = BCDMA_CAP4_UTCHAN_CNT(cap
);
4539 ud
->tchan_tpl
.start_idx
[0] = BCDMA_CAP4_HTCHAN_CNT(cap
);
4540 } else if (BCDMA_CAP4_HTCHAN_CNT(cap
)) {
4541 ud
->tchan_tpl
.levels
= 2;
4542 ud
->tchan_tpl
.start_idx
[0] = BCDMA_CAP4_HTCHAN_CNT(cap
);
4544 ud
->tchan_tpl
.levels
= 1;
4547 ud
->bchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->bchan_cnt
),
4548 sizeof(unsigned long), GFP_KERNEL
);
4549 ud
->bchans
= devm_kcalloc(dev
, ud
->bchan_cnt
, sizeof(*ud
->bchans
),
4551 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
4552 sizeof(unsigned long), GFP_KERNEL
);
4553 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
4555 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4556 sizeof(unsigned long), GFP_KERNEL
);
4557 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
4559 /* BCDMA do not really have flows, but the driver expect it */
4560 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4561 sizeof(unsigned long),
4563 ud
->rflows
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rflows
),
4566 if (!ud
->bchan_map
|| !ud
->tchan_map
|| !ud
->rchan_map
||
4567 !ud
->rflow_in_use
|| !ud
->bchans
|| !ud
->tchans
|| !ud
->rchans
||
4571 /* Get resource ranges from tisci */
4572 for (i
= 0; i
< RM_RANGE_LAST
; i
++) {
4573 if (i
== RM_RANGE_RFLOW
|| i
== RM_RANGE_TFLOW
)
4575 if (i
== RM_RANGE_BCHAN
&& ud
->bchan_cnt
== 0)
4577 if (i
== RM_RANGE_TCHAN
&& ud
->tchan_cnt
== 0)
4579 if (i
== RM_RANGE_RCHAN
&& ud
->rchan_cnt
== 0)
4582 tisci_rm
->rm_ranges
[i
] =
4583 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
4584 tisci_rm
->tisci_dev_id
,
4585 (char *)range_names
[i
]);
4591 if (ud
->bchan_cnt
) {
4592 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_BCHAN
];
4593 if (IS_ERR(rm_res
)) {
4594 bitmap_zero(ud
->bchan_map
, ud
->bchan_cnt
);
4596 bitmap_fill(ud
->bchan_map
, ud
->bchan_cnt
);
4597 for (i
= 0; i
< rm_res
->sets
; i
++)
4598 udma_mark_resource_ranges(ud
, ud
->bchan_map
,
4602 irq_res
.sets
+= rm_res
->sets
;
4606 if (ud
->tchan_cnt
) {
4607 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4608 if (IS_ERR(rm_res
)) {
4609 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
4611 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
4612 for (i
= 0; i
< rm_res
->sets
; i
++)
4613 udma_mark_resource_ranges(ud
, ud
->tchan_map
,
4617 irq_res
.sets
+= rm_res
->sets
* 2;
4621 if (ud
->rchan_cnt
) {
4622 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4623 if (IS_ERR(rm_res
)) {
4624 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
4626 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
4627 for (i
= 0; i
< rm_res
->sets
; i
++)
4628 udma_mark_resource_ranges(ud
, ud
->rchan_map
,
4632 irq_res
.sets
+= rm_res
->sets
* 2;
4635 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
4636 if (ud
->bchan_cnt
) {
4637 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_BCHAN
];
4638 for (i
= 0; i
< rm_res
->sets
; i
++) {
4639 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
+
4640 oes
->bcdma_bchan_ring
;
4641 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
4644 if (ud
->tchan_cnt
) {
4645 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4646 for (j
= 0; j
< rm_res
->sets
; j
++, i
+= 2) {
4647 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
4648 oes
->bcdma_tchan_data
;
4649 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
4651 irq_res
.desc
[i
+ 1].start
= rm_res
->desc
[j
].start
+
4652 oes
->bcdma_tchan_ring
;
4653 irq_res
.desc
[i
+ 1].num
= rm_res
->desc
[j
].num
;
4656 if (ud
->rchan_cnt
) {
4657 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4658 for (j
= 0; j
< rm_res
->sets
; j
++, i
+= 2) {
4659 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
4660 oes
->bcdma_rchan_data
;
4661 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
4663 irq_res
.desc
[i
+ 1].start
= rm_res
->desc
[j
].start
+
4664 oes
->bcdma_rchan_ring
;
4665 irq_res
.desc
[i
+ 1].num
= rm_res
->desc
[j
].num
;
4669 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
4670 kfree(irq_res
.desc
);
4672 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
4679 static int pktdma_setup_resources(struct udma_dev
*ud
)
4682 struct device
*dev
= ud
->dev
;
4683 struct ti_sci_resource
*rm_res
, irq_res
;
4684 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
4685 const struct udma_oes_offsets
*oes
= &ud
->soc_data
->oes
;
4688 /* Set up the throughput level start indexes */
4689 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
4690 if (UDMA_CAP3_UCHAN_CNT(cap3
)) {
4691 ud
->tchan_tpl
.levels
= 3;
4692 ud
->tchan_tpl
.start_idx
[1] = UDMA_CAP3_UCHAN_CNT(cap3
);
4693 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4694 } else if (UDMA_CAP3_HCHAN_CNT(cap3
)) {
4695 ud
->tchan_tpl
.levels
= 2;
4696 ud
->tchan_tpl
.start_idx
[0] = UDMA_CAP3_HCHAN_CNT(cap3
);
4698 ud
->tchan_tpl
.levels
= 1;
4701 ud
->tchan_tpl
.levels
= ud
->tchan_tpl
.levels
;
4702 ud
->tchan_tpl
.start_idx
[0] = ud
->tchan_tpl
.start_idx
[0];
4703 ud
->tchan_tpl
.start_idx
[1] = ud
->tchan_tpl
.start_idx
[1];
4705 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
4706 sizeof(unsigned long), GFP_KERNEL
);
4707 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
4709 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
4710 sizeof(unsigned long), GFP_KERNEL
);
4711 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
4713 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
4714 sizeof(unsigned long),
4716 ud
->rflows
= devm_kcalloc(dev
, ud
->rflow_cnt
, sizeof(*ud
->rflows
),
4718 ud
->tflow_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tflow_cnt
),
4719 sizeof(unsigned long), GFP_KERNEL
);
4721 if (!ud
->tchan_map
|| !ud
->rchan_map
|| !ud
->tflow_map
|| !ud
->tchans
||
4722 !ud
->rchans
|| !ud
->rflows
|| !ud
->rflow_in_use
)
4725 /* Get resource ranges from tisci */
4726 for (i
= 0; i
< RM_RANGE_LAST
; i
++) {
4727 if (i
== RM_RANGE_BCHAN
)
4730 tisci_rm
->rm_ranges
[i
] =
4731 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
4732 tisci_rm
->tisci_dev_id
,
4733 (char *)range_names
[i
]);
4737 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
4738 if (IS_ERR(rm_res
)) {
4739 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
4741 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
4742 for (i
= 0; i
< rm_res
->sets
; i
++)
4743 udma_mark_resource_ranges(ud
, ud
->tchan_map
,
4744 &rm_res
->desc
[i
], "tchan");
4748 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
4749 if (IS_ERR(rm_res
)) {
4750 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
4752 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
4753 for (i
= 0; i
< rm_res
->sets
; i
++)
4754 udma_mark_resource_ranges(ud
, ud
->rchan_map
,
4755 &rm_res
->desc
[i
], "rchan");
4759 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
4760 if (IS_ERR(rm_res
)) {
4761 /* all rflows are assigned exclusively to Linux */
4762 bitmap_zero(ud
->rflow_in_use
, ud
->rflow_cnt
);
4764 bitmap_fill(ud
->rflow_in_use
, ud
->rflow_cnt
);
4765 for (i
= 0; i
< rm_res
->sets
; i
++)
4766 udma_mark_resource_ranges(ud
, ud
->rflow_in_use
,
4767 &rm_res
->desc
[i
], "rflow");
4769 irq_res
.sets
= rm_res
->sets
;
4772 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TFLOW
];
4773 if (IS_ERR(rm_res
)) {
4774 /* all tflows are assigned exclusively to Linux */
4775 bitmap_zero(ud
->tflow_map
, ud
->tflow_cnt
);
4777 bitmap_fill(ud
->tflow_map
, ud
->tflow_cnt
);
4778 for (i
= 0; i
< rm_res
->sets
; i
++)
4779 udma_mark_resource_ranges(ud
, ud
->tflow_map
,
4780 &rm_res
->desc
[i
], "tflow");
4782 irq_res
.sets
+= rm_res
->sets
;
4784 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
4785 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TFLOW
];
4786 for (i
= 0; i
< rm_res
->sets
; i
++) {
4787 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
+
4788 oes
->pktdma_tchan_flow
;
4789 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
4791 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
4792 for (j
= 0; j
< rm_res
->sets
; j
++, i
++) {
4793 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
4794 oes
->pktdma_rchan_flow
;
4795 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
4797 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
4798 kfree(irq_res
.desc
);
4800 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
4807 static int setup_resources(struct udma_dev
*ud
)
4809 struct device
*dev
= ud
->dev
;
4812 switch (ud
->match_data
->type
) {
4814 ret
= udma_setup_resources(ud
);
4816 case DMA_TYPE_BCDMA
:
4817 ret
= bcdma_setup_resources(ud
);
4819 case DMA_TYPE_PKTDMA
:
4820 ret
= pktdma_setup_resources(ud
);
4829 ch_count
= ud
->bchan_cnt
+ ud
->tchan_cnt
+ ud
->rchan_cnt
;
4831 ch_count
-= bitmap_weight(ud
->bchan_map
, ud
->bchan_cnt
);
4832 ch_count
-= bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
);
4833 ch_count
-= bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
);
4837 ud
->channels
= devm_kcalloc(dev
, ch_count
, sizeof(*ud
->channels
),
4842 switch (ud
->match_data
->type
) {
4845 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
4847 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
,
4849 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
,
4851 ud
->rflow_cnt
- bitmap_weight(ud
->rflow_gp_map
,
4854 case DMA_TYPE_BCDMA
:
4856 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
4858 ud
->bchan_cnt
- bitmap_weight(ud
->bchan_map
,
4860 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
,
4862 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
,
4865 case DMA_TYPE_PKTDMA
:
4867 "Channels: %d (tchan: %u, rchan: %u)\n",
4869 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
,
4871 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
,
4880 static int udma_setup_rx_flush(struct udma_dev
*ud
)
4882 struct udma_rx_flush
*rx_flush
= &ud
->rx_flush
;
4883 struct cppi5_desc_hdr_t
*tr_desc
;
4884 struct cppi5_tr_type1_t
*tr_req
;
4885 struct cppi5_host_desc_t
*desc
;
4886 struct device
*dev
= ud
->dev
;
4887 struct udma_hwdesc
*hwdesc
;
4890 /* Allocate 1K buffer for discarded data on RX channel teardown */
4891 rx_flush
->buffer_size
= SZ_1K
;
4892 rx_flush
->buffer_vaddr
= devm_kzalloc(dev
, rx_flush
->buffer_size
,
4894 if (!rx_flush
->buffer_vaddr
)
4897 rx_flush
->buffer_paddr
= dma_map_single(dev
, rx_flush
->buffer_vaddr
,
4898 rx_flush
->buffer_size
,
4900 if (dma_mapping_error(dev
, rx_flush
->buffer_paddr
))
4903 /* Set up descriptor to be used for TR mode */
4904 hwdesc
= &rx_flush
->hwdescs
[0];
4905 tr_size
= sizeof(struct cppi5_tr_type1_t
);
4906 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
, 1);
4907 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
4910 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
4912 if (!hwdesc
->cppi5_desc_vaddr
)
4915 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
4916 hwdesc
->cppi5_desc_size
,
4918 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
4921 /* Start of the TR req records */
4922 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
4923 /* Start address of the TR response array */
4924 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
;
4926 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
4927 cppi5_trdesc_init(tr_desc
, 1, tr_size
, 0, 0);
4928 cppi5_desc_set_pktids(tr_desc
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
4929 cppi5_desc_set_retpolicy(tr_desc
, 0, 0);
4931 tr_req
= hwdesc
->tr_req_base
;
4932 cppi5_tr_init(&tr_req
->flags
, CPPI5_TR_TYPE1
, false, false,
4933 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
4934 cppi5_tr_csf_set(&tr_req
->flags
, CPPI5_TR_CSF_SUPR_EVT
);
4936 tr_req
->addr
= rx_flush
->buffer_paddr
;
4937 tr_req
->icnt0
= rx_flush
->buffer_size
;
4940 dma_sync_single_for_device(dev
, hwdesc
->cppi5_desc_paddr
,
4941 hwdesc
->cppi5_desc_size
, DMA_TO_DEVICE
);
4943 /* Set up descriptor to be used for packet mode */
4944 hwdesc
= &rx_flush
->hwdescs
[1];
4945 hwdesc
->cppi5_desc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
4946 CPPI5_INFO0_HDESC_EPIB_SIZE
+
4947 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
,
4950 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
4952 if (!hwdesc
->cppi5_desc_vaddr
)
4955 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
4956 hwdesc
->cppi5_desc_size
,
4958 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
4961 desc
= hwdesc
->cppi5_desc_vaddr
;
4962 cppi5_hdesc_init(desc
, 0, 0);
4963 cppi5_desc_set_pktids(&desc
->hdr
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
4964 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0);
4966 cppi5_hdesc_attach_buf(desc
,
4967 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
,
4968 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
);
4970 dma_sync_single_for_device(dev
, hwdesc
->cppi5_desc_paddr
,
4971 hwdesc
->cppi5_desc_size
, DMA_TO_DEVICE
);
4975 #ifdef CONFIG_DEBUG_FS
4976 static void udma_dbg_summary_show_chan(struct seq_file
*s
,
4977 struct dma_chan
*chan
)
4979 struct udma_chan
*uc
= to_udma_chan(chan
);
4980 struct udma_chan_config
*ucc
= &uc
->config
;
4982 seq_printf(s
, " %-13s| %s", dma_chan_name(chan
),
4983 chan
->dbg_client_name
?: "in-use");
4984 if (ucc
->tr_trigger_type
)
4985 seq_puts(s
, " (triggered, ");
4987 seq_printf(s
, " (%s, ",
4988 dmaengine_get_direction_text(uc
->config
.dir
));
4990 switch (uc
->config
.dir
) {
4991 case DMA_MEM_TO_MEM
:
4992 if (uc
->ud
->match_data
->type
== DMA_TYPE_BCDMA
) {
4993 seq_printf(s
, "bchan%d)\n", uc
->bchan
->id
);
4997 seq_printf(s
, "chan%d pair [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
4998 ucc
->src_thread
, ucc
->dst_thread
);
5000 case DMA_DEV_TO_MEM
:
5001 seq_printf(s
, "rchan%d [0x%04x -> 0x%04x], ", uc
->rchan
->id
,
5002 ucc
->src_thread
, ucc
->dst_thread
);
5003 if (uc
->ud
->match_data
->type
== DMA_TYPE_PKTDMA
)
5004 seq_printf(s
, "rflow%d, ", uc
->rflow
->id
);
5006 case DMA_MEM_TO_DEV
:
5007 seq_printf(s
, "tchan%d [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
5008 ucc
->src_thread
, ucc
->dst_thread
);
5009 if (uc
->ud
->match_data
->type
== DMA_TYPE_PKTDMA
)
5010 seq_printf(s
, "tflow%d, ", uc
->tchan
->tflow_id
);
5013 seq_printf(s
, ")\n");
5017 if (ucc
->ep_type
== PSIL_EP_NATIVE
) {
5018 seq_printf(s
, "PSI-L Native");
5019 if (ucc
->metadata_size
) {
5020 seq_printf(s
, "[%s", ucc
->needs_epib
? " EPIB" : "");
5022 seq_printf(s
, " PSDsize:%u", ucc
->psd_size
);
5023 seq_printf(s
, " ]");
5026 seq_printf(s
, "PDMA");
5027 if (ucc
->enable_acc32
|| ucc
->enable_burst
)
5028 seq_printf(s
, "[%s%s ]",
5029 ucc
->enable_acc32
? " ACC32" : "",
5030 ucc
->enable_burst
? " BURST" : "");
5033 seq_printf(s
, ", %s)\n", ucc
->pkt_mode
? "Packet mode" : "TR mode");
5036 static void udma_dbg_summary_show(struct seq_file
*s
,
5037 struct dma_device
*dma_dev
)
5039 struct dma_chan
*chan
;
5041 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
5042 if (chan
->client_count
)
5043 udma_dbg_summary_show_chan(s
, chan
);
5046 #endif /* CONFIG_DEBUG_FS */
5048 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5049 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5050 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5051 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5052 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5054 static int udma_probe(struct platform_device
*pdev
)
5056 struct device_node
*navss_node
= pdev
->dev
.parent
->of_node
;
5057 const struct soc_device_attribute
*soc
;
5058 struct device
*dev
= &pdev
->dev
;
5059 struct udma_dev
*ud
;
5060 const struct of_device_id
*match
;
5064 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(48));
5066 dev_err(dev
, "failed to set dma mask stuff\n");
5068 ud
= devm_kzalloc(dev
, sizeof(*ud
), GFP_KERNEL
);
5072 match
= of_match_node(udma_of_match
, dev
->of_node
);
5074 match
= of_match_node(bcdma_of_match
, dev
->of_node
);
5076 match
= of_match_node(pktdma_of_match
, dev
->of_node
);
5078 dev_err(dev
, "No compatible match found\n");
5082 ud
->match_data
= match
->data
;
5084 soc
= soc_device_match(k3_soc_devices
);
5086 dev_err(dev
, "No compatible SoC found\n");
5089 ud
->soc_data
= soc
->data
;
5091 ret
= udma_get_mmrs(pdev
, ud
);
5095 ud
->tisci_rm
.tisci
= ti_sci_get_by_phandle(dev
->of_node
, "ti,sci");
5096 if (IS_ERR(ud
->tisci_rm
.tisci
))
5097 return PTR_ERR(ud
->tisci_rm
.tisci
);
5099 ret
= of_property_read_u32(dev
->of_node
, "ti,sci-dev-id",
5100 &ud
->tisci_rm
.tisci_dev_id
);
5102 dev_err(dev
, "ti,sci-dev-id read failure %d\n", ret
);
5105 pdev
->id
= ud
->tisci_rm
.tisci_dev_id
;
5107 ret
= of_property_read_u32(navss_node
, "ti,sci-dev-id",
5108 &ud
->tisci_rm
.tisci_navss_dev_id
);
5110 dev_err(dev
, "NAVSS ti,sci-dev-id read failure %d\n", ret
);
5114 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
5115 ret
= of_property_read_u32(dev
->of_node
, "ti,udma-atype",
5117 if (!ret
&& ud
->atype
> 2) {
5118 dev_err(dev
, "Invalid atype: %u\n", ud
->atype
);
5122 ret
= of_property_read_u32(dev
->of_node
, "ti,asel",
5124 if (!ret
&& ud
->asel
> 15) {
5125 dev_err(dev
, "Invalid asel: %u\n", ud
->asel
);
5130 ud
->tisci_rm
.tisci_udmap_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_udmap_ops
;
5131 ud
->tisci_rm
.tisci_psil_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_psil_ops
;
5133 if (ud
->match_data
->type
== DMA_TYPE_UDMA
) {
5134 ud
->ringacc
= of_k3_ringacc_get_by_phandle(dev
->of_node
, "ti,ringacc");
5136 struct k3_ringacc_init_data ring_init_data
;
5138 ring_init_data
.tisci
= ud
->tisci_rm
.tisci
;
5139 ring_init_data
.tisci_dev_id
= ud
->tisci_rm
.tisci_dev_id
;
5140 if (ud
->match_data
->type
== DMA_TYPE_BCDMA
) {
5141 ring_init_data
.num_rings
= ud
->bchan_cnt
+
5145 ring_init_data
.num_rings
= ud
->rflow_cnt
+
5149 ud
->ringacc
= k3_ringacc_dmarings_init(pdev
, &ring_init_data
);
5152 if (IS_ERR(ud
->ringacc
))
5153 return PTR_ERR(ud
->ringacc
);
5155 dev
->msi_domain
= of_msi_get_domain(dev
, dev
->of_node
,
5156 DOMAIN_BUS_TI_SCI_INTA_MSI
);
5157 if (!dev
->msi_domain
) {
5158 dev_err(dev
, "Failed to get MSI domain\n");
5159 return -EPROBE_DEFER
;
5162 dma_cap_set(DMA_SLAVE
, ud
->ddev
.cap_mask
);
5163 /* cyclic operation is not supported via PKTDMA */
5164 if (ud
->match_data
->type
!= DMA_TYPE_PKTDMA
) {
5165 dma_cap_set(DMA_CYCLIC
, ud
->ddev
.cap_mask
);
5166 ud
->ddev
.device_prep_dma_cyclic
= udma_prep_dma_cyclic
;
5169 ud
->ddev
.device_config
= udma_slave_config
;
5170 ud
->ddev
.device_prep_slave_sg
= udma_prep_slave_sg
;
5171 ud
->ddev
.device_issue_pending
= udma_issue_pending
;
5172 ud
->ddev
.device_tx_status
= udma_tx_status
;
5173 ud
->ddev
.device_pause
= udma_pause
;
5174 ud
->ddev
.device_resume
= udma_resume
;
5175 ud
->ddev
.device_terminate_all
= udma_terminate_all
;
5176 ud
->ddev
.device_synchronize
= udma_synchronize
;
5177 #ifdef CONFIG_DEBUG_FS
5178 ud
->ddev
.dbg_summary_show
= udma_dbg_summary_show
;
5181 switch (ud
->match_data
->type
) {
5183 ud
->ddev
.device_alloc_chan_resources
=
5184 udma_alloc_chan_resources
;
5186 case DMA_TYPE_BCDMA
:
5187 ud
->ddev
.device_alloc_chan_resources
=
5188 bcdma_alloc_chan_resources
;
5189 ud
->ddev
.device_router_config
= bcdma_router_config
;
5191 case DMA_TYPE_PKTDMA
:
5192 ud
->ddev
.device_alloc_chan_resources
=
5193 pktdma_alloc_chan_resources
;
5198 ud
->ddev
.device_free_chan_resources
= udma_free_chan_resources
;
5200 ud
->ddev
.src_addr_widths
= TI_UDMAC_BUSWIDTHS
;
5201 ud
->ddev
.dst_addr_widths
= TI_UDMAC_BUSWIDTHS
;
5202 ud
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
5203 ud
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
5204 ud
->ddev
.copy_align
= DMAENGINE_ALIGN_8_BYTES
;
5205 ud
->ddev
.desc_metadata_modes
= DESC_METADATA_CLIENT
|
5206 DESC_METADATA_ENGINE
;
5207 if (ud
->match_data
->enable_memcpy_support
&&
5208 !(ud
->match_data
->type
== DMA_TYPE_BCDMA
&& ud
->bchan_cnt
== 0)) {
5209 dma_cap_set(DMA_MEMCPY
, ud
->ddev
.cap_mask
);
5210 ud
->ddev
.device_prep_dma_memcpy
= udma_prep_dma_memcpy
;
5211 ud
->ddev
.directions
|= BIT(DMA_MEM_TO_MEM
);
5216 ud
->psil_base
= ud
->match_data
->psil_base
;
5218 INIT_LIST_HEAD(&ud
->ddev
.channels
);
5219 INIT_LIST_HEAD(&ud
->desc_to_purge
);
5221 ch_count
= setup_resources(ud
);
5225 spin_lock_init(&ud
->lock
);
5226 INIT_WORK(&ud
->purge_work
, udma_purge_desc_work
);
5228 ud
->desc_align
= 64;
5229 if (ud
->desc_align
< dma_get_cache_alignment())
5230 ud
->desc_align
= dma_get_cache_alignment();
5232 ret
= udma_setup_rx_flush(ud
);
5236 for (i
= 0; i
< ud
->bchan_cnt
; i
++) {
5237 struct udma_bchan
*bchan
= &ud
->bchans
[i
];
5240 bchan
->reg_rt
= ud
->mmrs
[MMR_BCHANRT
] + i
* 0x1000;
5243 for (i
= 0; i
< ud
->tchan_cnt
; i
++) {
5244 struct udma_tchan
*tchan
= &ud
->tchans
[i
];
5247 tchan
->reg_rt
= ud
->mmrs
[MMR_TCHANRT
] + i
* 0x1000;
5250 for (i
= 0; i
< ud
->rchan_cnt
; i
++) {
5251 struct udma_rchan
*rchan
= &ud
->rchans
[i
];
5254 rchan
->reg_rt
= ud
->mmrs
[MMR_RCHANRT
] + i
* 0x1000;
5257 for (i
= 0; i
< ud
->rflow_cnt
; i
++) {
5258 struct udma_rflow
*rflow
= &ud
->rflows
[i
];
5263 for (i
= 0; i
< ch_count
; i
++) {
5264 struct udma_chan
*uc
= &ud
->channels
[i
];
5267 uc
->vc
.desc_free
= udma_desc_free
;
5272 uc
->config
.remote_thread_id
= -1;
5273 uc
->config
.mapped_channel_id
= -1;
5274 uc
->config
.default_flow_id
= -1;
5275 uc
->config
.dir
= DMA_MEM_TO_MEM
;
5276 uc
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s chan%d",
5279 vchan_init(&uc
->vc
, &ud
->ddev
);
5280 /* Use custom vchan completion handling */
5281 tasklet_setup(&uc
->vc
.task
, udma_vchan_complete
);
5282 init_completion(&uc
->teardown_completed
);
5283 INIT_DELAYED_WORK(&uc
->tx_drain
.work
, udma_check_tx_completion
);
5286 ret
= dma_async_device_register(&ud
->ddev
);
5288 dev_err(dev
, "failed to register slave DMA engine: %d\n", ret
);
5292 platform_set_drvdata(pdev
, ud
);
5294 ret
= of_dma_controller_register(dev
->of_node
, udma_of_xlate
, ud
);
5296 dev_err(dev
, "failed to register of_dma controller\n");
5297 dma_async_device_unregister(&ud
->ddev
);
5303 static struct platform_driver udma_driver
= {
5306 .of_match_table
= udma_of_match
,
5307 .suppress_bind_attrs
= true,
5309 .probe
= udma_probe
,
5311 builtin_platform_driver(udma_driver
);
5313 static struct platform_driver bcdma_driver
= {
5316 .of_match_table
= bcdma_of_match
,
5317 .suppress_bind_attrs
= true,
5319 .probe
= udma_probe
,
5321 builtin_platform_driver(bcdma_driver
);
5323 static struct platform_driver pktdma_driver
= {
5325 .name
= "ti-pktdma",
5326 .of_match_table
= pktdma_of_match
,
5327 .suppress_bind_attrs
= true,
5329 .probe
= udma_probe
,
5331 builtin_platform_driver(pktdma_driver
);
5333 /* Private interfaces to UDMA */
5334 #include "k3-udma-private.c"