1 // SPDX-License-Identifier: GPL-2.0
3 * K3 NAVSS DMA glue interface
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9 #include <linux/module.h>
10 #include <linux/atomic.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/init.h>
16 #include <linux/platform_device.h>
17 #include <linux/soc/ti/k3-ringacc.h>
18 #include <linux/dma/ti-cppi5.h>
19 #include <linux/dma/k3-udma-glue.h>
22 #include "k3-psil-priv.h"
24 struct k3_udma_glue_common
{
26 struct device chan_dev
;
27 struct udma_dev
*udmax
;
28 const struct udma_tisci_rm
*tisci_rm
;
29 struct k3_ringacc
*ringacc
;
38 struct psil_endpoint_config
*ep_config
;
41 struct k3_udma_glue_tx_channel
{
42 struct k3_udma_glue_common common
;
44 struct udma_tchan
*udma_tchanx
;
47 struct k3_ring
*ringtx
;
48 struct k3_ring
*ringtxcq
;
63 struct k3_udma_glue_rx_flow
{
64 struct udma_rflow
*udma_rflow
;
66 struct k3_ring
*ringrx
;
67 struct k3_ring
*ringrxfdq
;
72 struct k3_udma_glue_rx_channel
{
73 struct k3_udma_glue_common common
;
75 struct udma_rchan
*udma_rchanx
;
84 struct k3_udma_glue_rx_flow
*flows
;
89 static void k3_udma_chan_dev_release(struct device
*dev
)
91 /* The struct containing the device is devm managed */
94 static struct class k3_udma_glue_devclass
= {
95 .name
= "k3_udma_glue_chan",
96 .dev_release
= k3_udma_chan_dev_release
,
99 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
101 static int of_k3_udma_glue_parse(struct device_node
*udmax_np
,
102 struct k3_udma_glue_common
*common
)
104 common
->udmax
= of_xudma_dev_get(udmax_np
, NULL
);
105 if (IS_ERR(common
->udmax
))
106 return PTR_ERR(common
->udmax
);
108 common
->ringacc
= xudma_get_ringacc(common
->udmax
);
109 common
->tisci_rm
= xudma_dev_get_tisci_rm(common
->udmax
);
114 static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common
*common
, u32 thread_id
,
117 if (tx_chn
&& !(thread_id
& K3_PSIL_DST_THREAD_ID_OFFSET
))
120 if (!tx_chn
&& (thread_id
& K3_PSIL_DST_THREAD_ID_OFFSET
))
123 /* get psil endpoint config */
124 common
->ep_config
= psil_get_ep_config(thread_id
);
125 if (IS_ERR(common
->ep_config
)) {
127 "No configuration for psi-l thread 0x%04x\n",
129 return PTR_ERR(common
->ep_config
);
132 common
->epib
= common
->ep_config
->needs_epib
;
133 common
->psdata_size
= common
->ep_config
->psd_size
;
136 common
->dst_thread
= thread_id
;
138 common
->src_thread
= thread_id
;
143 static int of_k3_udma_glue_parse_chn(struct device_node
*chn_np
,
144 const char *name
, struct k3_udma_glue_common
*common
,
147 struct of_phandle_args dma_spec
;
155 index
= of_property_match_string(chn_np
, "dma-names", name
);
159 if (of_parse_phandle_with_args(chn_np
, "dmas", "#dma-cells", index
,
163 ret
= of_k3_udma_glue_parse(dma_spec
.np
, common
);
167 thread_id
= dma_spec
.args
[0];
168 if (dma_spec
.args_count
== 2) {
169 if (dma_spec
.args
[1] > 2 && !xudma_is_pktdma(common
->udmax
)) {
170 dev_err(common
->dev
, "Invalid channel atype: %u\n",
175 if (dma_spec
.args
[1] > 15 && xudma_is_pktdma(common
->udmax
)) {
176 dev_err(common
->dev
, "Invalid channel asel: %u\n",
182 common
->atype_asel
= dma_spec
.args
[1];
185 ret
= of_k3_udma_glue_parse_chn_common(common
, thread_id
, tx_chn
);
188 of_node_put(dma_spec
.np
);
193 of_k3_udma_glue_parse_chn_by_id(struct device_node
*udmax_np
, struct k3_udma_glue_common
*common
,
194 bool tx_chn
, u32 thread_id
)
198 if (unlikely(!udmax_np
))
201 ret
= of_k3_udma_glue_parse(udmax_np
, common
);
205 ret
= of_k3_udma_glue_parse_chn_common(common
, thread_id
, tx_chn
);
209 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
)
211 struct device
*dev
= tx_chn
->common
.dev
;
213 dev_dbg(dev
, "dump_tx_chn:\n"
214 "udma_tchan_id: %d\n"
216 "dst_thread: %08x\n",
217 tx_chn
->udma_tchan_id
,
218 tx_chn
->common
.src_thread
,
219 tx_chn
->common
.dst_thread
);
222 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel
*chn
,
225 struct device
*dev
= chn
->common
.dev
;
227 dev_dbg(dev
, "=== dump ===> %s\n", mark
);
228 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG
,
229 xudma_tchanrt_read(chn
->udma_tchanx
, UDMA_CHAN_RT_CTL_REG
));
230 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG
,
231 xudma_tchanrt_read(chn
->udma_tchanx
,
232 UDMA_CHAN_RT_PEER_RT_EN_REG
));
233 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG
,
234 xudma_tchanrt_read(chn
->udma_tchanx
, UDMA_CHAN_RT_PCNT_REG
));
235 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG
,
236 xudma_tchanrt_read(chn
->udma_tchanx
, UDMA_CHAN_RT_BCNT_REG
));
237 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG
,
238 xudma_tchanrt_read(chn
->udma_tchanx
, UDMA_CHAN_RT_SBCNT_REG
));
241 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
)
243 const struct udma_tisci_rm
*tisci_rm
= tx_chn
->common
.tisci_rm
;
244 struct ti_sci_msg_rm_udmap_tx_ch_cfg req
;
246 memset(&req
, 0, sizeof(req
));
248 req
.valid_params
= TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID
|
249 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID
|
250 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID
|
251 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID
|
252 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID
|
253 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID
|
254 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID
|
255 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID
;
256 req
.nav_id
= tisci_rm
->tisci_dev_id
;
257 req
.index
= tx_chn
->udma_tchan_id
;
258 if (tx_chn
->tx_pause_on_err
)
259 req
.tx_pause_on_err
= 1;
260 if (tx_chn
->tx_filt_einfo
)
261 req
.tx_filt_einfo
= 1;
262 if (tx_chn
->tx_filt_pswords
)
263 req
.tx_filt_pswords
= 1;
264 req
.tx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
265 if (tx_chn
->tx_supr_tdpkt
)
266 req
.tx_supr_tdpkt
= 1;
267 req
.tx_fetch_size
= tx_chn
->common
.hdesc_size
>> 2;
268 req
.txcq_qnum
= k3_ringacc_get_ring_id(tx_chn
->ringtxcq
);
269 req
.tx_atype
= tx_chn
->common
.atype_asel
;
271 return tisci_rm
->tisci_udmap_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req
);
275 k3_udma_glue_request_tx_chn_common(struct device
*dev
,
276 struct k3_udma_glue_tx_channel
*tx_chn
,
277 struct k3_udma_glue_tx_channel_cfg
*cfg
)
281 tx_chn
->common
.hdesc_size
= cppi5_hdesc_calc_size(tx_chn
->common
.epib
,
282 tx_chn
->common
.psdata_size
,
283 tx_chn
->common
.swdata_size
);
285 if (xudma_is_pktdma(tx_chn
->common
.udmax
))
286 tx_chn
->udma_tchan_id
= tx_chn
->common
.ep_config
->mapped_channel_id
;
288 tx_chn
->udma_tchan_id
= -1;
290 /* request and cfg UDMAP TX channel */
291 tx_chn
->udma_tchanx
= xudma_tchan_get(tx_chn
->common
.udmax
,
292 tx_chn
->udma_tchan_id
);
293 if (IS_ERR(tx_chn
->udma_tchanx
)) {
294 ret
= PTR_ERR(tx_chn
->udma_tchanx
);
295 dev_err(dev
, "UDMAX tchanx get err %d\n", ret
);
298 tx_chn
->udma_tchan_id
= xudma_tchan_get_id(tx_chn
->udma_tchanx
);
300 tx_chn
->common
.chan_dev
.class = &k3_udma_glue_devclass
;
301 tx_chn
->common
.chan_dev
.parent
= xudma_get_device(tx_chn
->common
.udmax
);
302 dev_set_name(&tx_chn
->common
.chan_dev
, "tchan%d-0x%04x",
303 tx_chn
->udma_tchan_id
, tx_chn
->common
.dst_thread
);
304 ret
= device_register(&tx_chn
->common
.chan_dev
);
306 dev_err(dev
, "Channel Device registration failed %d\n", ret
);
307 put_device(&tx_chn
->common
.chan_dev
);
308 tx_chn
->common
.chan_dev
.parent
= NULL
;
312 if (xudma_is_pktdma(tx_chn
->common
.udmax
)) {
313 /* prepare the channel device as coherent */
314 tx_chn
->common
.chan_dev
.dma_coherent
= true;
315 dma_coerce_mask_and_coherent(&tx_chn
->common
.chan_dev
,
319 atomic_set(&tx_chn
->free_pkts
, cfg
->txcq_cfg
.size
);
321 if (xudma_is_pktdma(tx_chn
->common
.udmax
))
322 tx_chn
->udma_tflow_id
= tx_chn
->common
.ep_config
->default_flow_id
;
324 tx_chn
->udma_tflow_id
= tx_chn
->udma_tchan_id
;
326 /* request and cfg rings */
327 ret
= k3_ringacc_request_rings_pair(tx_chn
->common
.ringacc
,
328 tx_chn
->udma_tflow_id
, -1,
332 dev_err(dev
, "Failed to get TX/TXCQ rings %d\n", ret
);
336 /* Set the dma_dev for the rings to be configured */
337 cfg
->tx_cfg
.dma_dev
= k3_udma_glue_tx_get_dma_device(tx_chn
);
338 cfg
->txcq_cfg
.dma_dev
= cfg
->tx_cfg
.dma_dev
;
340 /* Set the ASEL value for DMA rings of PKTDMA */
341 if (xudma_is_pktdma(tx_chn
->common
.udmax
)) {
342 cfg
->tx_cfg
.asel
= tx_chn
->common
.atype_asel
;
343 cfg
->txcq_cfg
.asel
= tx_chn
->common
.atype_asel
;
346 ret
= k3_ringacc_ring_cfg(tx_chn
->ringtx
, &cfg
->tx_cfg
);
348 dev_err(dev
, "Failed to cfg ringtx %d\n", ret
);
352 ret
= k3_ringacc_ring_cfg(tx_chn
->ringtxcq
, &cfg
->txcq_cfg
);
354 dev_err(dev
, "Failed to cfg ringtx %d\n", ret
);
358 /* request and cfg psi-l */
359 tx_chn
->common
.src_thread
=
360 xudma_dev_get_psil_base(tx_chn
->common
.udmax
) +
361 tx_chn
->udma_tchan_id
;
363 ret
= k3_udma_glue_cfg_tx_chn(tx_chn
);
365 dev_err(dev
, "Failed to cfg tchan %d\n", ret
);
369 k3_udma_glue_dump_tx_chn(tx_chn
);
374 struct k3_udma_glue_tx_channel
*
375 k3_udma_glue_request_tx_chn(struct device
*dev
, const char *name
,
376 struct k3_udma_glue_tx_channel_cfg
*cfg
)
378 struct k3_udma_glue_tx_channel
*tx_chn
;
381 tx_chn
= devm_kzalloc(dev
, sizeof(*tx_chn
), GFP_KERNEL
);
383 return ERR_PTR(-ENOMEM
);
385 tx_chn
->common
.dev
= dev
;
386 tx_chn
->common
.swdata_size
= cfg
->swdata_size
;
387 tx_chn
->tx_pause_on_err
= cfg
->tx_pause_on_err
;
388 tx_chn
->tx_filt_einfo
= cfg
->tx_filt_einfo
;
389 tx_chn
->tx_filt_pswords
= cfg
->tx_filt_pswords
;
390 tx_chn
->tx_supr_tdpkt
= cfg
->tx_supr_tdpkt
;
392 /* parse of udmap channel */
393 ret
= of_k3_udma_glue_parse_chn(dev
->of_node
, name
,
394 &tx_chn
->common
, true);
398 ret
= k3_udma_glue_request_tx_chn_common(dev
, tx_chn
, cfg
);
405 k3_udma_glue_release_tx_chn(tx_chn
);
408 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn
);
410 struct k3_udma_glue_tx_channel
*
411 k3_udma_glue_request_tx_chn_for_thread_id(struct device
*dev
,
412 struct k3_udma_glue_tx_channel_cfg
*cfg
,
413 struct device_node
*udmax_np
, u32 thread_id
)
415 struct k3_udma_glue_tx_channel
*tx_chn
;
418 tx_chn
= devm_kzalloc(dev
, sizeof(*tx_chn
), GFP_KERNEL
);
420 return ERR_PTR(-ENOMEM
);
422 tx_chn
->common
.dev
= dev
;
423 tx_chn
->common
.swdata_size
= cfg
->swdata_size
;
424 tx_chn
->tx_pause_on_err
= cfg
->tx_pause_on_err
;
425 tx_chn
->tx_filt_einfo
= cfg
->tx_filt_einfo
;
426 tx_chn
->tx_filt_pswords
= cfg
->tx_filt_pswords
;
427 tx_chn
->tx_supr_tdpkt
= cfg
->tx_supr_tdpkt
;
429 ret
= of_k3_udma_glue_parse_chn_by_id(udmax_np
, &tx_chn
->common
, true, thread_id
);
433 ret
= k3_udma_glue_request_tx_chn_common(dev
, tx_chn
, cfg
);
440 k3_udma_glue_release_tx_chn(tx_chn
);
443 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id
);
445 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
)
447 if (tx_chn
->psil_paired
) {
448 xudma_navss_psil_unpair(tx_chn
->common
.udmax
,
449 tx_chn
->common
.src_thread
,
450 tx_chn
->common
.dst_thread
);
451 tx_chn
->psil_paired
= false;
454 if (!IS_ERR_OR_NULL(tx_chn
->udma_tchanx
))
455 xudma_tchan_put(tx_chn
->common
.udmax
,
456 tx_chn
->udma_tchanx
);
458 if (tx_chn
->ringtxcq
)
459 k3_ringacc_ring_free(tx_chn
->ringtxcq
);
462 k3_ringacc_ring_free(tx_chn
->ringtx
);
464 if (tx_chn
->common
.chan_dev
.parent
) {
465 device_unregister(&tx_chn
->common
.chan_dev
);
466 tx_chn
->common
.chan_dev
.parent
= NULL
;
469 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn
);
471 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
,
472 struct cppi5_host_desc_t
*desc_tx
,
477 if (!atomic_add_unless(&tx_chn
->free_pkts
, -1, 0))
480 ringtxcq_id
= k3_ringacc_get_ring_id(tx_chn
->ringtxcq
);
481 cppi5_desc_set_retpolicy(&desc_tx
->hdr
, 0, ringtxcq_id
);
483 return k3_ringacc_ring_push(tx_chn
->ringtx
, &desc_dma
);
485 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn
);
487 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
,
488 dma_addr_t
*desc_dma
)
492 ret
= k3_ringacc_ring_pop(tx_chn
->ringtxcq
, desc_dma
);
494 atomic_inc(&tx_chn
->free_pkts
);
498 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn
);
500 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
)
504 ret
= xudma_navss_psil_pair(tx_chn
->common
.udmax
,
505 tx_chn
->common
.src_thread
,
506 tx_chn
->common
.dst_thread
);
508 dev_err(tx_chn
->common
.dev
, "PSI-L request err %d\n", ret
);
512 tx_chn
->psil_paired
= true;
514 xudma_tchanrt_write(tx_chn
->udma_tchanx
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
515 UDMA_PEER_RT_EN_ENABLE
);
517 xudma_tchanrt_write(tx_chn
->udma_tchanx
, UDMA_CHAN_RT_CTL_REG
,
518 UDMA_CHAN_RT_CTL_EN
);
520 k3_udma_glue_dump_tx_rt_chn(tx_chn
, "txchn en");
523 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn
);
525 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
)
527 k3_udma_glue_dump_tx_rt_chn(tx_chn
, "txchn dis1");
529 xudma_tchanrt_write(tx_chn
->udma_tchanx
, UDMA_CHAN_RT_CTL_REG
, 0);
531 xudma_tchanrt_write(tx_chn
->udma_tchanx
,
532 UDMA_CHAN_RT_PEER_RT_EN_REG
, 0);
533 k3_udma_glue_dump_tx_rt_chn(tx_chn
, "txchn dis2");
535 if (tx_chn
->psil_paired
) {
536 xudma_navss_psil_unpair(tx_chn
->common
.udmax
,
537 tx_chn
->common
.src_thread
,
538 tx_chn
->common
.dst_thread
);
539 tx_chn
->psil_paired
= false;
542 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn
);
544 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
,
550 k3_udma_glue_dump_tx_rt_chn(tx_chn
, "txchn tdown1");
552 xudma_tchanrt_write(tx_chn
->udma_tchanx
, UDMA_CHAN_RT_CTL_REG
,
553 UDMA_CHAN_RT_CTL_EN
| UDMA_CHAN_RT_CTL_TDOWN
);
555 val
= xudma_tchanrt_read(tx_chn
->udma_tchanx
, UDMA_CHAN_RT_CTL_REG
);
557 while (sync
&& (val
& UDMA_CHAN_RT_CTL_EN
)) {
558 val
= xudma_tchanrt_read(tx_chn
->udma_tchanx
,
559 UDMA_CHAN_RT_CTL_REG
);
561 if (i
> K3_UDMAX_TDOWN_TIMEOUT_US
) {
562 dev_err(tx_chn
->common
.dev
, "TX tdown timeout\n");
568 val
= xudma_tchanrt_read(tx_chn
->udma_tchanx
,
569 UDMA_CHAN_RT_PEER_RT_EN_REG
);
570 if (sync
&& (val
& UDMA_PEER_RT_EN_ENABLE
))
571 dev_err(tx_chn
->common
.dev
, "TX tdown peer not stopped\n");
572 k3_udma_glue_dump_tx_rt_chn(tx_chn
, "txchn tdown2");
574 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn
);
576 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel
*tx_chn
,
578 void (*cleanup
)(void *data
, dma_addr_t desc_dma
))
580 struct device
*dev
= tx_chn
->common
.dev
;
585 * TXQ reset need to be special way as it is input for udma and its
586 * state cached by udma, so:
588 * 2) clean up TXQ and call callback .cleanup() for each desc
589 * 3) reset TXQ in a special way
591 occ_tx
= k3_ringacc_ring_get_occ(tx_chn
->ringtx
);
592 dev_dbg(dev
, "TX reset occ_tx %u\n", occ_tx
);
594 for (i
= 0; i
< occ_tx
; i
++) {
595 ret
= k3_ringacc_ring_pop(tx_chn
->ringtx
, &desc_dma
);
598 dev_err(dev
, "TX reset pop %d\n", ret
);
601 cleanup(data
, desc_dma
);
604 /* reset TXCQ as it is not input for udma - expected to be empty */
605 k3_ringacc_ring_reset(tx_chn
->ringtxcq
);
606 k3_ringacc_ring_reset_dma(tx_chn
->ringtx
, occ_tx
);
608 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn
);
610 u32
k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel
*tx_chn
)
612 return tx_chn
->common
.hdesc_size
;
614 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size
);
616 u32
k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel
*tx_chn
)
618 return k3_ringacc_get_ring_id(tx_chn
->ringtxcq
);
620 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id
);
622 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel
*tx_chn
)
624 if (xudma_is_pktdma(tx_chn
->common
.udmax
)) {
625 tx_chn
->virq
= xudma_pktdma_tflow_get_irq(tx_chn
->common
.udmax
,
626 tx_chn
->udma_tflow_id
);
628 tx_chn
->virq
= k3_ringacc_get_ring_irq_num(tx_chn
->ringtxcq
);
636 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq
);
639 k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel
*tx_chn
)
641 if (xudma_is_pktdma(tx_chn
->common
.udmax
) &&
642 (tx_chn
->common
.atype_asel
== 14 || tx_chn
->common
.atype_asel
== 15))
643 return &tx_chn
->common
.chan_dev
;
645 return xudma_get_device(tx_chn
->common
.udmax
);
647 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device
);
649 void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel
*tx_chn
,
652 if (!xudma_is_pktdma(tx_chn
->common
.udmax
) ||
653 !tx_chn
->common
.atype_asel
)
656 *addr
|= (u64
)tx_chn
->common
.atype_asel
<< K3_ADDRESS_ASEL_SHIFT
;
658 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr
);
660 void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel
*tx_chn
,
663 if (!xudma_is_pktdma(tx_chn
->common
.udmax
) ||
664 !tx_chn
->common
.atype_asel
)
667 *addr
&= (u64
)GENMASK(K3_ADDRESS_ASEL_SHIFT
- 1, 0);
669 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr
);
671 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
)
673 const struct udma_tisci_rm
*tisci_rm
= rx_chn
->common
.tisci_rm
;
674 struct ti_sci_msg_rm_udmap_rx_ch_cfg req
;
677 memset(&req
, 0, sizeof(req
));
679 req
.valid_params
= TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID
|
680 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID
|
681 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID
|
682 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID
;
684 req
.nav_id
= tisci_rm
->tisci_dev_id
;
685 req
.index
= rx_chn
->udma_rchan_id
;
686 req
.rx_fetch_size
= rx_chn
->common
.hdesc_size
>> 2;
688 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
689 * and udmax impl, so just configure it to invalid value.
690 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
692 req
.rxcq_qnum
= 0xFFFF;
693 if (!xudma_is_pktdma(rx_chn
->common
.udmax
) && rx_chn
->flow_num
&&
694 rx_chn
->flow_id_base
!= rx_chn
->udma_rchan_id
) {
695 /* Default flow + extra ones */
696 req
.valid_params
|= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID
|
697 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID
;
698 req
.flowid_start
= rx_chn
->flow_id_base
;
699 req
.flowid_cnt
= rx_chn
->flow_num
;
701 req
.rx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
702 req
.rx_atype
= rx_chn
->common
.atype_asel
;
704 ret
= tisci_rm
->tisci_udmap_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req
);
706 dev_err(rx_chn
->common
.dev
, "rchan%d cfg failed %d\n",
707 rx_chn
->udma_rchan_id
, ret
);
712 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel
*rx_chn
,
715 struct k3_udma_glue_rx_flow
*flow
= &rx_chn
->flows
[flow_num
];
717 if (IS_ERR_OR_NULL(flow
->udma_rflow
))
721 k3_ringacc_ring_free(flow
->ringrxfdq
);
724 k3_ringacc_ring_free(flow
->ringrx
);
726 xudma_rflow_put(rx_chn
->common
.udmax
, flow
->udma_rflow
);
727 flow
->udma_rflow
= NULL
;
728 rx_chn
->flows_ready
--;
731 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel
*rx_chn
,
733 struct k3_udma_glue_rx_flow_cfg
*flow_cfg
)
735 struct k3_udma_glue_rx_flow
*flow
= &rx_chn
->flows
[flow_idx
];
736 const struct udma_tisci_rm
*tisci_rm
= rx_chn
->common
.tisci_rm
;
737 struct device
*dev
= rx_chn
->common
.dev
;
738 struct ti_sci_msg_rm_udmap_flow_cfg req
;
743 flow
->udma_rflow
= xudma_rflow_get(rx_chn
->common
.udmax
,
744 flow
->udma_rflow_id
);
745 if (IS_ERR(flow
->udma_rflow
)) {
746 ret
= PTR_ERR(flow
->udma_rflow
);
747 dev_err(dev
, "UDMAX rflow get err %d\n", ret
);
751 if (flow
->udma_rflow_id
!= xudma_rflow_get_id(flow
->udma_rflow
)) {
756 if (xudma_is_pktdma(rx_chn
->common
.udmax
)) {
757 rx_ringfdq_id
= flow
->udma_rflow_id
+
758 xudma_get_rflow_ring_offset(rx_chn
->common
.udmax
);
761 rx_ring_id
= flow_cfg
->ring_rxq_id
;
762 rx_ringfdq_id
= flow_cfg
->ring_rxfdq0_id
;
765 /* request and cfg rings */
766 ret
= k3_ringacc_request_rings_pair(rx_chn
->common
.ringacc
,
767 rx_ringfdq_id
, rx_ring_id
,
771 dev_err(dev
, "Failed to get RX/RXFDQ rings %d\n", ret
);
775 /* Set the dma_dev for the rings to be configured */
776 flow_cfg
->rx_cfg
.dma_dev
= k3_udma_glue_rx_get_dma_device(rx_chn
);
777 flow_cfg
->rxfdq_cfg
.dma_dev
= flow_cfg
->rx_cfg
.dma_dev
;
779 /* Set the ASEL value for DMA rings of PKTDMA */
780 if (xudma_is_pktdma(rx_chn
->common
.udmax
)) {
781 flow_cfg
->rx_cfg
.asel
= rx_chn
->common
.atype_asel
;
782 flow_cfg
->rxfdq_cfg
.asel
= rx_chn
->common
.atype_asel
;
785 ret
= k3_ringacc_ring_cfg(flow
->ringrx
, &flow_cfg
->rx_cfg
);
787 dev_err(dev
, "Failed to cfg ringrx %d\n", ret
);
788 goto err_ringrxfdq_free
;
791 ret
= k3_ringacc_ring_cfg(flow
->ringrxfdq
, &flow_cfg
->rxfdq_cfg
);
793 dev_err(dev
, "Failed to cfg ringrxfdq %d\n", ret
);
794 goto err_ringrxfdq_free
;
797 if (rx_chn
->remote
) {
798 rx_ring_id
= TI_SCI_RESOURCE_NULL
;
799 rx_ringfdq_id
= TI_SCI_RESOURCE_NULL
;
801 rx_ring_id
= k3_ringacc_get_ring_id(flow
->ringrx
);
802 rx_ringfdq_id
= k3_ringacc_get_ring_id(flow
->ringrxfdq
);
805 memset(&req
, 0, sizeof(req
));
808 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
809 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
810 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
|
811 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID
|
812 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
813 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID
|
814 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID
|
815 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID
|
816 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID
|
817 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
818 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
819 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
820 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
821 req
.nav_id
= tisci_rm
->tisci_dev_id
;
822 req
.flow_index
= flow
->udma_rflow_id
;
823 if (rx_chn
->common
.epib
)
824 req
.rx_einfo_present
= 1;
825 if (rx_chn
->common
.psdata_size
)
826 req
.rx_psinfo_present
= 1;
827 if (flow_cfg
->rx_error_handling
)
828 req
.rx_error_handling
= 1;
829 req
.rx_desc_type
= 0;
830 req
.rx_dest_qnum
= rx_ring_id
;
831 req
.rx_src_tag_hi_sel
= 0;
832 req
.rx_src_tag_lo_sel
= flow_cfg
->src_tag_lo_sel
;
833 req
.rx_dest_tag_hi_sel
= 0;
834 req
.rx_dest_tag_lo_sel
= 0;
835 req
.rx_fdq0_sz0_qnum
= rx_ringfdq_id
;
836 req
.rx_fdq1_qnum
= rx_ringfdq_id
;
837 req
.rx_fdq2_qnum
= rx_ringfdq_id
;
838 req
.rx_fdq3_qnum
= rx_ringfdq_id
;
840 ret
= tisci_rm
->tisci_udmap_ops
->rx_flow_cfg(tisci_rm
->tisci
, &req
);
842 dev_err(dev
, "flow%d config failed: %d\n", flow
->udma_rflow_id
,
844 goto err_ringrxfdq_free
;
847 rx_chn
->flows_ready
++;
848 dev_dbg(dev
, "flow%d config done. ready:%d\n",
849 flow
->udma_rflow_id
, rx_chn
->flows_ready
);
854 k3_ringacc_ring_free(flow
->ringrxfdq
);
855 k3_ringacc_ring_free(flow
->ringrx
);
858 xudma_rflow_put(rx_chn
->common
.udmax
, flow
->udma_rflow
);
859 flow
->udma_rflow
= NULL
;
864 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel
*chn
)
866 struct device
*dev
= chn
->common
.dev
;
868 dev_dbg(dev
, "dump_rx_chn:\n"
869 "udma_rchan_id: %d\n"
879 chn
->common
.src_thread
,
880 chn
->common
.dst_thread
,
882 chn
->common
.hdesc_size
,
883 chn
->common
.psdata_size
,
884 chn
->common
.swdata_size
,
889 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel
*chn
,
892 struct device
*dev
= chn
->common
.dev
;
894 dev_dbg(dev
, "=== dump ===> %s\n", mark
);
896 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG
,
897 xudma_rchanrt_read(chn
->udma_rchanx
, UDMA_CHAN_RT_CTL_REG
));
898 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG
,
899 xudma_rchanrt_read(chn
->udma_rchanx
,
900 UDMA_CHAN_RT_PEER_RT_EN_REG
));
901 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG
,
902 xudma_rchanrt_read(chn
->udma_rchanx
, UDMA_CHAN_RT_PCNT_REG
));
903 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG
,
904 xudma_rchanrt_read(chn
->udma_rchanx
, UDMA_CHAN_RT_BCNT_REG
));
905 dev_dbg(dev
, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG
,
906 xudma_rchanrt_read(chn
->udma_rchanx
, UDMA_CHAN_RT_SBCNT_REG
));
910 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel
*rx_chn
,
911 struct k3_udma_glue_rx_channel_cfg
*cfg
)
916 if (cfg
->flow_id_use_rxchan_id
)
919 /* not a GP rflows */
920 if (rx_chn
->flow_id_base
!= -1 &&
921 !xudma_rflow_is_gp(rx_chn
->common
.udmax
, rx_chn
->flow_id_base
))
924 /* Allocate range of GP rflows */
925 ret
= xudma_alloc_gp_rflow_range(rx_chn
->common
.udmax
,
926 rx_chn
->flow_id_base
,
929 dev_err(rx_chn
->common
.dev
, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
930 rx_chn
->flow_id_base
, rx_chn
->flow_num
, ret
);
933 rx_chn
->flow_id_base
= ret
;
938 static struct k3_udma_glue_rx_channel
*
939 k3_udma_glue_request_rx_chn_priv(struct device
*dev
, const char *name
,
940 struct k3_udma_glue_rx_channel_cfg
*cfg
)
942 struct k3_udma_glue_rx_channel
*rx_chn
;
943 struct psil_endpoint_config
*ep_cfg
;
946 if (cfg
->flow_id_num
<= 0)
947 return ERR_PTR(-EINVAL
);
949 if (cfg
->flow_id_num
!= 1 &&
950 (cfg
->def_flow_cfg
|| cfg
->flow_id_use_rxchan_id
))
951 return ERR_PTR(-EINVAL
);
953 rx_chn
= devm_kzalloc(dev
, sizeof(*rx_chn
), GFP_KERNEL
);
955 return ERR_PTR(-ENOMEM
);
957 rx_chn
->common
.dev
= dev
;
958 rx_chn
->common
.swdata_size
= cfg
->swdata_size
;
959 rx_chn
->remote
= false;
961 /* parse of udmap channel */
962 ret
= of_k3_udma_glue_parse_chn(dev
->of_node
, name
,
963 &rx_chn
->common
, false);
967 rx_chn
->common
.hdesc_size
= cppi5_hdesc_calc_size(rx_chn
->common
.epib
,
968 rx_chn
->common
.psdata_size
,
969 rx_chn
->common
.swdata_size
);
971 ep_cfg
= rx_chn
->common
.ep_config
;
973 if (xudma_is_pktdma(rx_chn
->common
.udmax
))
974 rx_chn
->udma_rchan_id
= ep_cfg
->mapped_channel_id
;
976 rx_chn
->udma_rchan_id
= -1;
978 /* request and cfg UDMAP RX channel */
979 rx_chn
->udma_rchanx
= xudma_rchan_get(rx_chn
->common
.udmax
,
980 rx_chn
->udma_rchan_id
);
981 if (IS_ERR(rx_chn
->udma_rchanx
)) {
982 ret
= PTR_ERR(rx_chn
->udma_rchanx
);
983 dev_err(dev
, "UDMAX rchanx get err %d\n", ret
);
986 rx_chn
->udma_rchan_id
= xudma_rchan_get_id(rx_chn
->udma_rchanx
);
988 rx_chn
->common
.chan_dev
.class = &k3_udma_glue_devclass
;
989 rx_chn
->common
.chan_dev
.parent
= xudma_get_device(rx_chn
->common
.udmax
);
990 dev_set_name(&rx_chn
->common
.chan_dev
, "rchan%d-0x%04x",
991 rx_chn
->udma_rchan_id
, rx_chn
->common
.src_thread
);
992 ret
= device_register(&rx_chn
->common
.chan_dev
);
994 dev_err(dev
, "Channel Device registration failed %d\n", ret
);
995 put_device(&rx_chn
->common
.chan_dev
);
996 rx_chn
->common
.chan_dev
.parent
= NULL
;
1000 if (xudma_is_pktdma(rx_chn
->common
.udmax
)) {
1001 /* prepare the channel device as coherent */
1002 rx_chn
->common
.chan_dev
.dma_coherent
= true;
1003 dma_coerce_mask_and_coherent(&rx_chn
->common
.chan_dev
,
1007 if (xudma_is_pktdma(rx_chn
->common
.udmax
)) {
1008 int flow_start
= cfg
->flow_id_base
;
1011 if (flow_start
== -1)
1012 flow_start
= ep_cfg
->flow_start
;
1014 flow_end
= flow_start
+ cfg
->flow_id_num
- 1;
1015 if (flow_start
< ep_cfg
->flow_start
||
1016 flow_end
> (ep_cfg
->flow_start
+ ep_cfg
->flow_num
- 1)) {
1017 dev_err(dev
, "Invalid flow range requested\n");
1021 rx_chn
->flow_id_base
= flow_start
;
1023 rx_chn
->flow_id_base
= cfg
->flow_id_base
;
1025 /* Use RX channel id as flow id: target dev can't generate flow_id */
1026 if (cfg
->flow_id_use_rxchan_id
)
1027 rx_chn
->flow_id_base
= rx_chn
->udma_rchan_id
;
1030 rx_chn
->flow_num
= cfg
->flow_id_num
;
1032 rx_chn
->flows
= devm_kcalloc(dev
, rx_chn
->flow_num
,
1033 sizeof(*rx_chn
->flows
), GFP_KERNEL
);
1034 if (!rx_chn
->flows
) {
1039 ret
= k3_udma_glue_allocate_rx_flows(rx_chn
, cfg
);
1043 for (i
= 0; i
< rx_chn
->flow_num
; i
++)
1044 rx_chn
->flows
[i
].udma_rflow_id
= rx_chn
->flow_id_base
+ i
;
1046 /* request and cfg psi-l */
1047 rx_chn
->common
.dst_thread
=
1048 xudma_dev_get_psil_base(rx_chn
->common
.udmax
) +
1049 rx_chn
->udma_rchan_id
;
1051 ret
= k3_udma_glue_cfg_rx_chn(rx_chn
);
1053 dev_err(dev
, "Failed to cfg rchan %d\n", ret
);
1057 /* init default RX flow only if flow_num = 1 */
1058 if (cfg
->def_flow_cfg
) {
1059 ret
= k3_udma_glue_cfg_rx_flow(rx_chn
, 0, cfg
->def_flow_cfg
);
1064 k3_udma_glue_dump_rx_chn(rx_chn
);
1069 k3_udma_glue_release_rx_chn(rx_chn
);
1070 return ERR_PTR(ret
);
1074 k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel
*rx_chn
,
1075 struct k3_udma_glue_rx_channel_cfg
*cfg
,
1080 rx_chn
->common
.hdesc_size
= cppi5_hdesc_calc_size(rx_chn
->common
.epib
,
1081 rx_chn
->common
.psdata_size
,
1082 rx_chn
->common
.swdata_size
);
1084 rx_chn
->flows
= devm_kcalloc(dev
, rx_chn
->flow_num
,
1085 sizeof(*rx_chn
->flows
), GFP_KERNEL
);
1089 rx_chn
->common
.chan_dev
.class = &k3_udma_glue_devclass
;
1090 rx_chn
->common
.chan_dev
.parent
= xudma_get_device(rx_chn
->common
.udmax
);
1091 dev_set_name(&rx_chn
->common
.chan_dev
, "rchan_remote-0x%04x-0x%02x",
1092 rx_chn
->common
.src_thread
, rx_chn
->flow_id_base
);
1093 ret
= device_register(&rx_chn
->common
.chan_dev
);
1095 dev_err(dev
, "Channel Device registration failed %d\n", ret
);
1096 put_device(&rx_chn
->common
.chan_dev
);
1097 rx_chn
->common
.chan_dev
.parent
= NULL
;
1101 if (xudma_is_pktdma(rx_chn
->common
.udmax
)) {
1102 /* prepare the channel device as coherent */
1103 rx_chn
->common
.chan_dev
.dma_coherent
= true;
1104 dma_coerce_mask_and_coherent(&rx_chn
->common
.chan_dev
,
1108 ret
= k3_udma_glue_allocate_rx_flows(rx_chn
, cfg
);
1112 for (i
= 0; i
< rx_chn
->flow_num
; i
++)
1113 rx_chn
->flows
[i
].udma_rflow_id
= rx_chn
->flow_id_base
+ i
;
1115 k3_udma_glue_dump_rx_chn(rx_chn
);
1120 static struct k3_udma_glue_rx_channel
*
1121 k3_udma_glue_request_remote_rx_chn(struct device
*dev
, const char *name
,
1122 struct k3_udma_glue_rx_channel_cfg
*cfg
)
1124 struct k3_udma_glue_rx_channel
*rx_chn
;
1127 if (cfg
->flow_id_num
<= 0 ||
1128 cfg
->flow_id_use_rxchan_id
||
1129 cfg
->def_flow_cfg
||
1130 cfg
->flow_id_base
< 0)
1131 return ERR_PTR(-EINVAL
);
1134 * Remote RX channel is under control of Remote CPU core, so
1135 * Linux can only request and manipulate by dedicated RX flows
1138 rx_chn
= devm_kzalloc(dev
, sizeof(*rx_chn
), GFP_KERNEL
);
1140 return ERR_PTR(-ENOMEM
);
1142 rx_chn
->common
.dev
= dev
;
1143 rx_chn
->common
.swdata_size
= cfg
->swdata_size
;
1144 rx_chn
->remote
= true;
1145 rx_chn
->udma_rchan_id
= -1;
1146 rx_chn
->flow_num
= cfg
->flow_id_num
;
1147 rx_chn
->flow_id_base
= cfg
->flow_id_base
;
1148 rx_chn
->psil_paired
= false;
1150 /* parse of udmap channel */
1151 ret
= of_k3_udma_glue_parse_chn(dev
->of_node
, name
,
1152 &rx_chn
->common
, false);
1156 ret
= k3_udma_glue_request_remote_rx_chn_common(rx_chn
, cfg
, dev
);
1163 k3_udma_glue_release_rx_chn(rx_chn
);
1164 return ERR_PTR(ret
);
1167 struct k3_udma_glue_rx_channel
*
1168 k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device
*dev
,
1169 struct k3_udma_glue_rx_channel_cfg
*cfg
,
1170 struct device_node
*udmax_np
, u32 thread_id
)
1172 struct k3_udma_glue_rx_channel
*rx_chn
;
1175 if (cfg
->flow_id_num
<= 0 ||
1176 cfg
->flow_id_use_rxchan_id
||
1177 cfg
->def_flow_cfg
||
1178 cfg
->flow_id_base
< 0)
1179 return ERR_PTR(-EINVAL
);
1182 * Remote RX channel is under control of Remote CPU core, so
1183 * Linux can only request and manipulate by dedicated RX flows
1186 rx_chn
= devm_kzalloc(dev
, sizeof(*rx_chn
), GFP_KERNEL
);
1188 return ERR_PTR(-ENOMEM
);
1190 rx_chn
->common
.dev
= dev
;
1191 rx_chn
->common
.swdata_size
= cfg
->swdata_size
;
1192 rx_chn
->remote
= true;
1193 rx_chn
->udma_rchan_id
= -1;
1194 rx_chn
->flow_num
= cfg
->flow_id_num
;
1195 rx_chn
->flow_id_base
= cfg
->flow_id_base
;
1196 rx_chn
->psil_paired
= false;
1198 ret
= of_k3_udma_glue_parse_chn_by_id(udmax_np
, &rx_chn
->common
, false, thread_id
);
1202 ret
= k3_udma_glue_request_remote_rx_chn_common(rx_chn
, cfg
, dev
);
1209 k3_udma_glue_release_rx_chn(rx_chn
);
1210 return ERR_PTR(ret
);
1212 EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id
);
1214 struct k3_udma_glue_rx_channel
*
1215 k3_udma_glue_request_rx_chn(struct device
*dev
, const char *name
,
1216 struct k3_udma_glue_rx_channel_cfg
*cfg
)
1219 return k3_udma_glue_request_remote_rx_chn(dev
, name
, cfg
);
1221 return k3_udma_glue_request_rx_chn_priv(dev
, name
, cfg
);
1223 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn
);
1225 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
)
1229 if (IS_ERR_OR_NULL(rx_chn
->common
.udmax
))
1232 if (rx_chn
->psil_paired
) {
1233 xudma_navss_psil_unpair(rx_chn
->common
.udmax
,
1234 rx_chn
->common
.src_thread
,
1235 rx_chn
->common
.dst_thread
);
1236 rx_chn
->psil_paired
= false;
1239 for (i
= 0; i
< rx_chn
->flow_num
; i
++)
1240 k3_udma_glue_release_rx_flow(rx_chn
, i
);
1242 if (xudma_rflow_is_gp(rx_chn
->common
.udmax
, rx_chn
->flow_id_base
))
1243 xudma_free_gp_rflow_range(rx_chn
->common
.udmax
,
1244 rx_chn
->flow_id_base
,
1247 if (!IS_ERR_OR_NULL(rx_chn
->udma_rchanx
))
1248 xudma_rchan_put(rx_chn
->common
.udmax
,
1249 rx_chn
->udma_rchanx
);
1251 if (rx_chn
->common
.chan_dev
.parent
) {
1252 device_unregister(&rx_chn
->common
.chan_dev
);
1253 rx_chn
->common
.chan_dev
.parent
= NULL
;
1256 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn
);
1258 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel
*rx_chn
,
1260 struct k3_udma_glue_rx_flow_cfg
*flow_cfg
)
1262 if (flow_idx
>= rx_chn
->flow_num
)
1265 return k3_udma_glue_cfg_rx_flow(rx_chn
, flow_idx
, flow_cfg
);
1267 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init
);
1269 u32
k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel
*rx_chn
,
1272 struct k3_udma_glue_rx_flow
*flow
;
1274 if (flow_idx
>= rx_chn
->flow_num
)
1277 flow
= &rx_chn
->flows
[flow_idx
];
1279 return k3_ringacc_get_ring_id(flow
->ringrxfdq
);
1281 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id
);
1283 u32
k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel
*rx_chn
)
1285 return rx_chn
->flow_id_base
;
1287 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base
);
1289 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel
*rx_chn
,
1292 struct k3_udma_glue_rx_flow
*flow
= &rx_chn
->flows
[flow_idx
];
1293 const struct udma_tisci_rm
*tisci_rm
= rx_chn
->common
.tisci_rm
;
1294 struct device
*dev
= rx_chn
->common
.dev
;
1295 struct ti_sci_msg_rm_udmap_flow_cfg req
;
1300 if (!rx_chn
->remote
)
1303 rx_ring_id
= k3_ringacc_get_ring_id(flow
->ringrx
);
1304 rx_ringfdq_id
= k3_ringacc_get_ring_id(flow
->ringrxfdq
);
1306 memset(&req
, 0, sizeof(req
));
1309 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
1310 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
1311 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
1312 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
1313 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
1314 req
.nav_id
= tisci_rm
->tisci_dev_id
;
1315 req
.flow_index
= flow
->udma_rflow_id
;
1316 req
.rx_dest_qnum
= rx_ring_id
;
1317 req
.rx_fdq0_sz0_qnum
= rx_ringfdq_id
;
1318 req
.rx_fdq1_qnum
= rx_ringfdq_id
;
1319 req
.rx_fdq2_qnum
= rx_ringfdq_id
;
1320 req
.rx_fdq3_qnum
= rx_ringfdq_id
;
1322 ret
= tisci_rm
->tisci_udmap_ops
->rx_flow_cfg(tisci_rm
->tisci
, &req
);
1324 dev_err(dev
, "flow%d enable failed: %d\n", flow
->udma_rflow_id
,
1330 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable
);
1332 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel
*rx_chn
,
1335 struct k3_udma_glue_rx_flow
*flow
= &rx_chn
->flows
[flow_idx
];
1336 const struct udma_tisci_rm
*tisci_rm
= rx_chn
->common
.tisci_rm
;
1337 struct device
*dev
= rx_chn
->common
.dev
;
1338 struct ti_sci_msg_rm_udmap_flow_cfg req
;
1341 if (!rx_chn
->remote
)
1344 memset(&req
, 0, sizeof(req
));
1346 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
1347 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
1348 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
1349 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
1350 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
1351 req
.nav_id
= tisci_rm
->tisci_dev_id
;
1352 req
.flow_index
= flow
->udma_rflow_id
;
1353 req
.rx_dest_qnum
= TI_SCI_RESOURCE_NULL
;
1354 req
.rx_fdq0_sz0_qnum
= TI_SCI_RESOURCE_NULL
;
1355 req
.rx_fdq1_qnum
= TI_SCI_RESOURCE_NULL
;
1356 req
.rx_fdq2_qnum
= TI_SCI_RESOURCE_NULL
;
1357 req
.rx_fdq3_qnum
= TI_SCI_RESOURCE_NULL
;
1359 ret
= tisci_rm
->tisci_udmap_ops
->rx_flow_cfg(tisci_rm
->tisci
, &req
);
1361 dev_err(dev
, "flow%d disable failed: %d\n", flow
->udma_rflow_id
,
1367 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable
);
1369 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
)
1376 if (rx_chn
->flows_ready
< rx_chn
->flow_num
)
1379 ret
= xudma_navss_psil_pair(rx_chn
->common
.udmax
,
1380 rx_chn
->common
.src_thread
,
1381 rx_chn
->common
.dst_thread
);
1383 dev_err(rx_chn
->common
.dev
, "PSI-L request err %d\n", ret
);
1387 rx_chn
->psil_paired
= true;
1389 xudma_rchanrt_write(rx_chn
->udma_rchanx
, UDMA_CHAN_RT_CTL_REG
,
1390 UDMA_CHAN_RT_CTL_EN
);
1392 xudma_rchanrt_write(rx_chn
->udma_rchanx
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
1393 UDMA_PEER_RT_EN_ENABLE
);
1395 k3_udma_glue_dump_rx_rt_chn(rx_chn
, "rxrt en");
1398 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn
);
1400 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
)
1402 k3_udma_glue_dump_rx_rt_chn(rx_chn
, "rxrt dis1");
1404 xudma_rchanrt_write(rx_chn
->udma_rchanx
,
1405 UDMA_CHAN_RT_PEER_RT_EN_REG
, 0);
1406 xudma_rchanrt_write(rx_chn
->udma_rchanx
, UDMA_CHAN_RT_CTL_REG
, 0);
1408 k3_udma_glue_dump_rx_rt_chn(rx_chn
, "rxrt dis2");
1410 if (rx_chn
->psil_paired
) {
1411 xudma_navss_psil_unpair(rx_chn
->common
.udmax
,
1412 rx_chn
->common
.src_thread
,
1413 rx_chn
->common
.dst_thread
);
1414 rx_chn
->psil_paired
= false;
1417 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn
);
1419 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
,
1428 k3_udma_glue_dump_rx_rt_chn(rx_chn
, "rxrt tdown1");
1430 xudma_rchanrt_write(rx_chn
->udma_rchanx
, UDMA_CHAN_RT_PEER_RT_EN_REG
,
1431 UDMA_PEER_RT_EN_ENABLE
| UDMA_PEER_RT_EN_TEARDOWN
);
1433 val
= xudma_rchanrt_read(rx_chn
->udma_rchanx
, UDMA_CHAN_RT_CTL_REG
);
1435 while (sync
&& (val
& UDMA_CHAN_RT_CTL_EN
)) {
1436 val
= xudma_rchanrt_read(rx_chn
->udma_rchanx
,
1437 UDMA_CHAN_RT_CTL_REG
);
1439 if (i
> K3_UDMAX_TDOWN_TIMEOUT_US
) {
1440 dev_err(rx_chn
->common
.dev
, "RX tdown timeout\n");
1446 val
= xudma_rchanrt_read(rx_chn
->udma_rchanx
,
1447 UDMA_CHAN_RT_PEER_RT_EN_REG
);
1448 if (sync
&& (val
& UDMA_PEER_RT_EN_ENABLE
))
1449 dev_err(rx_chn
->common
.dev
, "TX tdown peer not stopped\n");
1450 k3_udma_glue_dump_rx_rt_chn(rx_chn
, "rxrt tdown2");
1452 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn
);
1454 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
,
1455 u32 flow_num
, void *data
,
1456 void (*cleanup
)(void *data
, dma_addr_t desc_dma
), bool skip_fdq
)
1458 struct k3_udma_glue_rx_flow
*flow
= &rx_chn
->flows
[flow_num
];
1459 struct device
*dev
= rx_chn
->common
.dev
;
1460 dma_addr_t desc_dma
;
1463 /* reset RXCQ as it is not input for udma - expected to be empty */
1464 occ_rx
= k3_ringacc_ring_get_occ(flow
->ringrx
);
1465 dev_dbg(dev
, "RX reset flow %u occ_rx %u\n", flow_num
, occ_rx
);
1467 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1472 * RX FDQ reset need to be special way as it is input for udma and its
1473 * state cached by udma, so:
1474 * 1) save RX FDQ occ
1475 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1476 * 3) reset RX FDQ in a special way
1478 occ_rx
= k3_ringacc_ring_get_occ(flow
->ringrxfdq
);
1479 dev_dbg(dev
, "RX reset flow %u occ_rx_fdq %u\n", flow_num
, occ_rx
);
1481 for (i
= 0; i
< occ_rx
; i
++) {
1482 ret
= k3_ringacc_ring_pop(flow
->ringrxfdq
, &desc_dma
);
1484 if (ret
!= -ENODATA
)
1485 dev_err(dev
, "RX reset pop %d\n", ret
);
1488 cleanup(data
, desc_dma
);
1491 k3_ringacc_ring_reset_dma(flow
->ringrxfdq
, occ_rx
);
1494 k3_ringacc_ring_reset(flow
->ringrx
);
1496 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn
);
1498 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
,
1499 u32 flow_num
, struct cppi5_host_desc_t
*desc_rx
,
1500 dma_addr_t desc_dma
)
1502 struct k3_udma_glue_rx_flow
*flow
= &rx_chn
->flows
[flow_num
];
1504 return k3_ringacc_ring_push(flow
->ringrxfdq
, &desc_dma
);
1506 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn
);
1508 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel
*rx_chn
,
1509 u32 flow_num
, dma_addr_t
*desc_dma
)
1511 struct k3_udma_glue_rx_flow
*flow
= &rx_chn
->flows
[flow_num
];
1513 return k3_ringacc_ring_pop(flow
->ringrx
, desc_dma
);
1515 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn
);
1517 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel
*rx_chn
,
1520 struct k3_udma_glue_rx_flow
*flow
;
1522 flow
= &rx_chn
->flows
[flow_num
];
1524 if (xudma_is_pktdma(rx_chn
->common
.udmax
)) {
1525 flow
->virq
= xudma_pktdma_rflow_get_irq(rx_chn
->common
.udmax
,
1526 flow
->udma_rflow_id
);
1528 flow
->virq
= k3_ringacc_get_ring_irq_num(flow
->ringrx
);
1536 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq
);
1539 k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel
*rx_chn
)
1541 if (xudma_is_pktdma(rx_chn
->common
.udmax
) &&
1542 (rx_chn
->common
.atype_asel
== 14 || rx_chn
->common
.atype_asel
== 15))
1543 return &rx_chn
->common
.chan_dev
;
1545 return xudma_get_device(rx_chn
->common
.udmax
);
1547 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device
);
1549 void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel
*rx_chn
,
1552 if (!xudma_is_pktdma(rx_chn
->common
.udmax
) ||
1553 !rx_chn
->common
.atype_asel
)
1556 *addr
|= (u64
)rx_chn
->common
.atype_asel
<< K3_ADDRESS_ASEL_SHIFT
;
1558 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr
);
1560 void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel
*rx_chn
,
1563 if (!xudma_is_pktdma(rx_chn
->common
.udmax
) ||
1564 !rx_chn
->common
.atype_asel
)
1567 *addr
&= (u64
)GENMASK(K3_ADDRESS_ASEL_SHIFT
- 1, 0);
1569 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr
);
1571 static int __init
k3_udma_glue_class_init(void)
1573 return class_register(&k3_udma_glue_devclass
);
1576 module_init(k3_udma_glue_class_init
);
1577 MODULE_DESCRIPTION("TI K3 NAVSS DMA glue interface");
1578 MODULE_LICENSE("GPL v2");