treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / dma / ti / k3-udma-glue.c
blobc1511298ece2a319a616f06d94aa95b5548ac56d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * K3 NAVSS DMA glue interface
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
7 */
9 #include <linux/atomic.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/io.h>
13 #include <linux/init.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/soc/ti/k3-ringacc.h>
17 #include <linux/dma/ti-cppi5.h>
18 #include <linux/dma/k3-udma-glue.h>
20 #include "k3-udma.h"
21 #include "k3-psil-priv.h"
23 struct k3_udma_glue_common {
24 struct device *dev;
25 struct udma_dev *udmax;
26 const struct udma_tisci_rm *tisci_rm;
27 struct k3_ringacc *ringacc;
28 u32 src_thread;
29 u32 dst_thread;
31 u32 hdesc_size;
32 bool epib;
33 u32 psdata_size;
34 u32 swdata_size;
37 struct k3_udma_glue_tx_channel {
38 struct k3_udma_glue_common common;
40 struct udma_tchan *udma_tchanx;
41 int udma_tchan_id;
43 struct k3_ring *ringtx;
44 struct k3_ring *ringtxcq;
46 bool psil_paired;
48 int virq;
50 atomic_t free_pkts;
51 bool tx_pause_on_err;
52 bool tx_filt_einfo;
53 bool tx_filt_pswords;
54 bool tx_supr_tdpkt;
57 struct k3_udma_glue_rx_flow {
58 struct udma_rflow *udma_rflow;
59 int udma_rflow_id;
60 struct k3_ring *ringrx;
61 struct k3_ring *ringrxfdq;
63 int virq;
66 struct k3_udma_glue_rx_channel {
67 struct k3_udma_glue_common common;
69 struct udma_rchan *udma_rchanx;
70 int udma_rchan_id;
71 bool remote;
73 bool psil_paired;
75 u32 swdata_size;
76 int flow_id_base;
78 struct k3_udma_glue_rx_flow *flows;
79 u32 flow_num;
80 u32 flows_ready;
83 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
85 static int of_k3_udma_glue_parse(struct device_node *udmax_np,
86 struct k3_udma_glue_common *common)
88 common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
89 "ti,ringacc");
90 if (IS_ERR(common->ringacc))
91 return PTR_ERR(common->ringacc);
93 common->udmax = of_xudma_dev_get(udmax_np, NULL);
94 if (IS_ERR(common->udmax))
95 return PTR_ERR(common->udmax);
97 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
99 return 0;
102 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
103 const char *name, struct k3_udma_glue_common *common,
104 bool tx_chn)
106 struct psil_endpoint_config *ep_config;
107 struct of_phandle_args dma_spec;
108 u32 thread_id;
109 int ret = 0;
110 int index;
112 if (unlikely(!name))
113 return -EINVAL;
115 index = of_property_match_string(chn_np, "dma-names", name);
116 if (index < 0)
117 return index;
119 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
120 &dma_spec))
121 return -ENOENT;
123 thread_id = dma_spec.args[0];
125 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
126 ret = -EINVAL;
127 goto out_put_spec;
130 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
131 ret = -EINVAL;
132 goto out_put_spec;
135 /* get psil endpoint config */
136 ep_config = psil_get_ep_config(thread_id);
137 if (IS_ERR(ep_config)) {
138 dev_err(common->dev,
139 "No configuration for psi-l thread 0x%04x\n",
140 thread_id);
141 ret = PTR_ERR(ep_config);
142 goto out_put_spec;
145 common->epib = ep_config->needs_epib;
146 common->psdata_size = ep_config->psd_size;
148 if (tx_chn)
149 common->dst_thread = thread_id;
150 else
151 common->src_thread = thread_id;
153 ret = of_k3_udma_glue_parse(dma_spec.np, common);
155 out_put_spec:
156 of_node_put(dma_spec.np);
157 return ret;
160 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
162 struct device *dev = tx_chn->common.dev;
164 dev_dbg(dev, "dump_tx_chn:\n"
165 "udma_tchan_id: %d\n"
166 "src_thread: %08x\n"
167 "dst_thread: %08x\n",
168 tx_chn->udma_tchan_id,
169 tx_chn->common.src_thread,
170 tx_chn->common.dst_thread);
173 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
174 char *mark)
176 struct device *dev = chn->common.dev;
178 dev_dbg(dev, "=== dump ===> %s\n", mark);
179 dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG,
180 xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG));
181 dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG,
182 xudma_tchanrt_read(chn->udma_tchanx,
183 UDMA_TCHAN_RT_PEER_RT_EN_REG));
184 dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG,
185 xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG));
186 dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG,
187 xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG));
188 dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG,
189 xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG));
192 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
194 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
195 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
197 memset(&req, 0, sizeof(req));
199 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
200 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
201 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
202 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
203 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
204 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
205 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
206 req.nav_id = tisci_rm->tisci_dev_id;
207 req.index = tx_chn->udma_tchan_id;
208 if (tx_chn->tx_pause_on_err)
209 req.tx_pause_on_err = 1;
210 if (tx_chn->tx_filt_einfo)
211 req.tx_filt_einfo = 1;
212 if (tx_chn->tx_filt_pswords)
213 req.tx_filt_pswords = 1;
214 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
215 if (tx_chn->tx_supr_tdpkt)
216 req.tx_supr_tdpkt = 1;
217 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
218 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
220 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
223 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
224 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
226 struct k3_udma_glue_tx_channel *tx_chn;
227 int ret;
229 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
230 if (!tx_chn)
231 return ERR_PTR(-ENOMEM);
233 tx_chn->common.dev = dev;
234 tx_chn->common.swdata_size = cfg->swdata_size;
235 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
236 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
237 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
238 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
240 /* parse of udmap channel */
241 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
242 &tx_chn->common, true);
243 if (ret)
244 goto err;
246 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
247 tx_chn->common.psdata_size,
248 tx_chn->common.swdata_size);
250 /* request and cfg UDMAP TX channel */
251 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
252 if (IS_ERR(tx_chn->udma_tchanx)) {
253 ret = PTR_ERR(tx_chn->udma_tchanx);
254 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
255 goto err;
257 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
259 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
261 /* request and cfg rings */
262 tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
263 tx_chn->udma_tchan_id, 0);
264 if (!tx_chn->ringtx) {
265 ret = -ENODEV;
266 dev_err(dev, "Failed to get TX ring %u\n",
267 tx_chn->udma_tchan_id);
268 goto err;
271 tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
272 -1, 0);
273 if (!tx_chn->ringtxcq) {
274 ret = -ENODEV;
275 dev_err(dev, "Failed to get TXCQ ring\n");
276 goto err;
279 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
280 if (ret) {
281 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
282 goto err;
285 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
286 if (ret) {
287 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
288 goto err;
291 /* request and cfg psi-l */
292 tx_chn->common.src_thread =
293 xudma_dev_get_psil_base(tx_chn->common.udmax) +
294 tx_chn->udma_tchan_id;
296 ret = k3_udma_glue_cfg_tx_chn(tx_chn);
297 if (ret) {
298 dev_err(dev, "Failed to cfg tchan %d\n", ret);
299 goto err;
302 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
303 tx_chn->common.src_thread,
304 tx_chn->common.dst_thread);
305 if (ret) {
306 dev_err(dev, "PSI-L request err %d\n", ret);
307 goto err;
310 tx_chn->psil_paired = true;
312 /* reset TX RT registers */
313 k3_udma_glue_disable_tx_chn(tx_chn);
315 k3_udma_glue_dump_tx_chn(tx_chn);
317 return tx_chn;
319 err:
320 k3_udma_glue_release_tx_chn(tx_chn);
321 return ERR_PTR(ret);
323 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
325 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
327 if (tx_chn->psil_paired) {
328 xudma_navss_psil_unpair(tx_chn->common.udmax,
329 tx_chn->common.src_thread,
330 tx_chn->common.dst_thread);
331 tx_chn->psil_paired = false;
334 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
335 xudma_tchan_put(tx_chn->common.udmax,
336 tx_chn->udma_tchanx);
338 if (tx_chn->ringtxcq)
339 k3_ringacc_ring_free(tx_chn->ringtxcq);
341 if (tx_chn->ringtx)
342 k3_ringacc_ring_free(tx_chn->ringtx);
344 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
346 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
347 struct cppi5_host_desc_t *desc_tx,
348 dma_addr_t desc_dma)
350 u32 ringtxcq_id;
352 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
353 return -ENOMEM;
355 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
356 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
358 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
360 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
362 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
363 dma_addr_t *desc_dma)
365 int ret;
367 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
368 if (!ret)
369 atomic_inc(&tx_chn->free_pkts);
371 return ret;
373 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
375 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
377 u32 txrt_ctl;
379 txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
380 xudma_tchanrt_write(tx_chn->udma_tchanx,
381 UDMA_TCHAN_RT_PEER_RT_EN_REG,
382 txrt_ctl);
384 txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
385 UDMA_TCHAN_RT_CTL_REG);
386 txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
387 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
388 txrt_ctl);
390 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
391 return 0;
393 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
395 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
397 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
399 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0);
401 xudma_tchanrt_write(tx_chn->udma_tchanx,
402 UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
403 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
405 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
407 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
408 bool sync)
410 int i = 0;
411 u32 val;
413 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
415 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
416 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
418 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG);
420 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
421 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
422 UDMA_TCHAN_RT_CTL_REG);
423 udelay(1);
424 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
425 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
426 break;
428 i++;
431 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
432 UDMA_TCHAN_RT_PEER_RT_EN_REG);
433 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
434 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
435 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
437 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
439 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
440 void *data,
441 void (*cleanup)(void *data, dma_addr_t desc_dma))
443 dma_addr_t desc_dma;
444 int occ_tx, i, ret;
446 /* reset TXCQ as it is not input for udma - expected to be empty */
447 if (tx_chn->ringtxcq)
448 k3_ringacc_ring_reset(tx_chn->ringtxcq);
451 * TXQ reset need to be special way as it is input for udma and its
452 * state cached by udma, so:
453 * 1) save TXQ occ
454 * 2) clean up TXQ and call callback .cleanup() for each desc
455 * 3) reset TXQ in a special way
457 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
458 dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
460 for (i = 0; i < occ_tx; i++) {
461 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
462 if (ret) {
463 dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
464 break;
466 cleanup(data, desc_dma);
469 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
471 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
473 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
475 return tx_chn->common.hdesc_size;
477 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
479 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
481 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
483 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
485 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
487 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
489 return tx_chn->virq;
491 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
493 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
495 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
496 struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
497 int ret;
499 memset(&req, 0, sizeof(req));
501 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
502 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
503 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
504 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
505 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
507 req.nav_id = tisci_rm->tisci_dev_id;
508 req.index = rx_chn->udma_rchan_id;
509 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
511 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
512 * and udmax impl, so just configure it to invalid value.
513 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
515 req.rxcq_qnum = 0xFFFF;
516 if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
517 /* Default flow + extra ones */
518 req.flowid_start = rx_chn->flow_id_base;
519 req.flowid_cnt = rx_chn->flow_num;
521 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
523 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
524 if (ret)
525 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
526 rx_chn->udma_rchan_id, ret);
528 return ret;
531 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
532 u32 flow_num)
534 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
536 if (IS_ERR_OR_NULL(flow->udma_rflow))
537 return;
539 if (flow->ringrxfdq)
540 k3_ringacc_ring_free(flow->ringrxfdq);
542 if (flow->ringrx)
543 k3_ringacc_ring_free(flow->ringrx);
545 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
546 flow->udma_rflow = NULL;
547 rx_chn->flows_ready--;
550 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
551 u32 flow_idx,
552 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
554 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
555 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
556 struct device *dev = rx_chn->common.dev;
557 struct ti_sci_msg_rm_udmap_flow_cfg req;
558 int rx_ring_id;
559 int rx_ringfdq_id;
560 int ret = 0;
562 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
563 flow->udma_rflow_id);
564 if (IS_ERR(flow->udma_rflow)) {
565 ret = PTR_ERR(flow->udma_rflow);
566 dev_err(dev, "UDMAX rflow get err %d\n", ret);
567 goto err;
570 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
571 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
572 return -ENODEV;
575 /* request and cfg rings */
576 flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
577 flow_cfg->ring_rxq_id, 0);
578 if (!flow->ringrx) {
579 ret = -ENODEV;
580 dev_err(dev, "Failed to get RX ring\n");
581 goto err;
584 flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
585 flow_cfg->ring_rxfdq0_id, 0);
586 if (!flow->ringrxfdq) {
587 ret = -ENODEV;
588 dev_err(dev, "Failed to get RXFDQ ring\n");
589 goto err;
592 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
593 if (ret) {
594 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
595 goto err;
598 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
599 if (ret) {
600 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
601 goto err;
604 if (rx_chn->remote) {
605 rx_ring_id = TI_SCI_RESOURCE_NULL;
606 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
607 } else {
608 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
609 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
612 memset(&req, 0, sizeof(req));
614 req.valid_params =
615 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
616 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
617 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
618 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
619 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
620 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
621 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
622 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
623 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
624 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
625 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
626 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
627 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
628 req.nav_id = tisci_rm->tisci_dev_id;
629 req.flow_index = flow->udma_rflow_id;
630 if (rx_chn->common.epib)
631 req.rx_einfo_present = 1;
632 if (rx_chn->common.psdata_size)
633 req.rx_psinfo_present = 1;
634 if (flow_cfg->rx_error_handling)
635 req.rx_error_handling = 1;
636 req.rx_desc_type = 0;
637 req.rx_dest_qnum = rx_ring_id;
638 req.rx_src_tag_hi_sel = 0;
639 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
640 req.rx_dest_tag_hi_sel = 0;
641 req.rx_dest_tag_lo_sel = 0;
642 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
643 req.rx_fdq1_qnum = rx_ringfdq_id;
644 req.rx_fdq2_qnum = rx_ringfdq_id;
645 req.rx_fdq3_qnum = rx_ringfdq_id;
647 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
648 if (ret) {
649 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
650 ret);
651 goto err;
654 rx_chn->flows_ready++;
655 dev_dbg(dev, "flow%d config done. ready:%d\n",
656 flow->udma_rflow_id, rx_chn->flows_ready);
658 return 0;
659 err:
660 k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
661 return ret;
664 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
666 struct device *dev = chn->common.dev;
668 dev_dbg(dev, "dump_rx_chn:\n"
669 "udma_rchan_id: %d\n"
670 "src_thread: %08x\n"
671 "dst_thread: %08x\n"
672 "epib: %d\n"
673 "hdesc_size: %u\n"
674 "psdata_size: %u\n"
675 "swdata_size: %u\n"
676 "flow_id_base: %d\n"
677 "flow_num: %d\n",
678 chn->udma_rchan_id,
679 chn->common.src_thread,
680 chn->common.dst_thread,
681 chn->common.epib,
682 chn->common.hdesc_size,
683 chn->common.psdata_size,
684 chn->common.swdata_size,
685 chn->flow_id_base,
686 chn->flow_num);
689 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
690 char *mark)
692 struct device *dev = chn->common.dev;
694 dev_dbg(dev, "=== dump ===> %s\n", mark);
696 dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG,
697 xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG));
698 dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG,
699 xudma_rchanrt_read(chn->udma_rchanx,
700 UDMA_RCHAN_RT_PEER_RT_EN_REG));
701 dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG,
702 xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG));
703 dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG,
704 xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG));
705 dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG,
706 xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
709 static int
710 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
711 struct k3_udma_glue_rx_channel_cfg *cfg)
713 int ret;
715 /* default rflow */
716 if (cfg->flow_id_use_rxchan_id)
717 return 0;
719 /* not a GP rflows */
720 if (rx_chn->flow_id_base != -1 &&
721 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
722 return 0;
724 /* Allocate range of GP rflows */
725 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
726 rx_chn->flow_id_base,
727 rx_chn->flow_num);
728 if (ret < 0) {
729 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
730 rx_chn->flow_id_base, rx_chn->flow_num, ret);
731 return ret;
733 rx_chn->flow_id_base = ret;
735 return 0;
738 static struct k3_udma_glue_rx_channel *
739 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
740 struct k3_udma_glue_rx_channel_cfg *cfg)
742 struct k3_udma_glue_rx_channel *rx_chn;
743 int ret, i;
745 if (cfg->flow_id_num <= 0)
746 return ERR_PTR(-EINVAL);
748 if (cfg->flow_id_num != 1 &&
749 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
750 return ERR_PTR(-EINVAL);
752 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
753 if (!rx_chn)
754 return ERR_PTR(-ENOMEM);
756 rx_chn->common.dev = dev;
757 rx_chn->common.swdata_size = cfg->swdata_size;
758 rx_chn->remote = false;
760 /* parse of udmap channel */
761 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
762 &rx_chn->common, false);
763 if (ret)
764 goto err;
766 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
767 rx_chn->common.psdata_size,
768 rx_chn->common.swdata_size);
770 /* request and cfg UDMAP RX channel */
771 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
772 if (IS_ERR(rx_chn->udma_rchanx)) {
773 ret = PTR_ERR(rx_chn->udma_rchanx);
774 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
775 goto err;
777 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
779 rx_chn->flow_num = cfg->flow_id_num;
780 rx_chn->flow_id_base = cfg->flow_id_base;
782 /* Use RX channel id as flow id: target dev can't generate flow_id */
783 if (cfg->flow_id_use_rxchan_id)
784 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
786 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
787 sizeof(*rx_chn->flows), GFP_KERNEL);
788 if (!rx_chn->flows) {
789 ret = -ENOMEM;
790 goto err;
793 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
794 if (ret)
795 goto err;
797 for (i = 0; i < rx_chn->flow_num; i++)
798 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
800 /* request and cfg psi-l */
801 rx_chn->common.dst_thread =
802 xudma_dev_get_psil_base(rx_chn->common.udmax) +
803 rx_chn->udma_rchan_id;
805 ret = k3_udma_glue_cfg_rx_chn(rx_chn);
806 if (ret) {
807 dev_err(dev, "Failed to cfg rchan %d\n", ret);
808 goto err;
811 /* init default RX flow only if flow_num = 1 */
812 if (cfg->def_flow_cfg) {
813 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
814 if (ret)
815 goto err;
818 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
819 rx_chn->common.src_thread,
820 rx_chn->common.dst_thread);
821 if (ret) {
822 dev_err(dev, "PSI-L request err %d\n", ret);
823 goto err;
826 rx_chn->psil_paired = true;
828 /* reset RX RT registers */
829 k3_udma_glue_disable_rx_chn(rx_chn);
831 k3_udma_glue_dump_rx_chn(rx_chn);
833 return rx_chn;
835 err:
836 k3_udma_glue_release_rx_chn(rx_chn);
837 return ERR_PTR(ret);
840 static struct k3_udma_glue_rx_channel *
841 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
842 struct k3_udma_glue_rx_channel_cfg *cfg)
844 struct k3_udma_glue_rx_channel *rx_chn;
845 int ret, i;
847 if (cfg->flow_id_num <= 0 ||
848 cfg->flow_id_use_rxchan_id ||
849 cfg->def_flow_cfg ||
850 cfg->flow_id_base < 0)
851 return ERR_PTR(-EINVAL);
854 * Remote RX channel is under control of Remote CPU core, so
855 * Linux can only request and manipulate by dedicated RX flows
858 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
859 if (!rx_chn)
860 return ERR_PTR(-ENOMEM);
862 rx_chn->common.dev = dev;
863 rx_chn->common.swdata_size = cfg->swdata_size;
864 rx_chn->remote = true;
865 rx_chn->udma_rchan_id = -1;
866 rx_chn->flow_num = cfg->flow_id_num;
867 rx_chn->flow_id_base = cfg->flow_id_base;
868 rx_chn->psil_paired = false;
870 /* parse of udmap channel */
871 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
872 &rx_chn->common, false);
873 if (ret)
874 goto err;
876 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
877 rx_chn->common.psdata_size,
878 rx_chn->common.swdata_size);
880 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
881 sizeof(*rx_chn->flows), GFP_KERNEL);
882 if (!rx_chn->flows) {
883 ret = -ENOMEM;
884 goto err;
887 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
888 if (ret)
889 goto err;
891 for (i = 0; i < rx_chn->flow_num; i++)
892 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
894 k3_udma_glue_dump_rx_chn(rx_chn);
896 return rx_chn;
898 err:
899 k3_udma_glue_release_rx_chn(rx_chn);
900 return ERR_PTR(ret);
903 struct k3_udma_glue_rx_channel *
904 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
905 struct k3_udma_glue_rx_channel_cfg *cfg)
907 if (cfg->remote)
908 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
909 else
910 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
912 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
914 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
916 int i;
918 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
919 return;
921 if (rx_chn->psil_paired) {
922 xudma_navss_psil_unpair(rx_chn->common.udmax,
923 rx_chn->common.src_thread,
924 rx_chn->common.dst_thread);
925 rx_chn->psil_paired = false;
928 for (i = 0; i < rx_chn->flow_num; i++)
929 k3_udma_glue_release_rx_flow(rx_chn, i);
931 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
932 xudma_free_gp_rflow_range(rx_chn->common.udmax,
933 rx_chn->flow_id_base,
934 rx_chn->flow_num);
936 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
937 xudma_rchan_put(rx_chn->common.udmax,
938 rx_chn->udma_rchanx);
940 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
942 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
943 u32 flow_idx,
944 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
946 if (flow_idx >= rx_chn->flow_num)
947 return -EINVAL;
949 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
951 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
953 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
954 u32 flow_idx)
956 struct k3_udma_glue_rx_flow *flow;
958 if (flow_idx >= rx_chn->flow_num)
959 return -EINVAL;
961 flow = &rx_chn->flows[flow_idx];
963 return k3_ringacc_get_ring_id(flow->ringrxfdq);
965 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
967 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
969 return rx_chn->flow_id_base;
971 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
973 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
974 u32 flow_idx)
976 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
977 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
978 struct device *dev = rx_chn->common.dev;
979 struct ti_sci_msg_rm_udmap_flow_cfg req;
980 int rx_ring_id;
981 int rx_ringfdq_id;
982 int ret = 0;
984 if (!rx_chn->remote)
985 return -EINVAL;
987 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
988 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
990 memset(&req, 0, sizeof(req));
992 req.valid_params =
993 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
994 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
995 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
996 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
997 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
998 req.nav_id = tisci_rm->tisci_dev_id;
999 req.flow_index = flow->udma_rflow_id;
1000 req.rx_dest_qnum = rx_ring_id;
1001 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1002 req.rx_fdq1_qnum = rx_ringfdq_id;
1003 req.rx_fdq2_qnum = rx_ringfdq_id;
1004 req.rx_fdq3_qnum = rx_ringfdq_id;
1006 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1007 if (ret) {
1008 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1009 ret);
1012 return ret;
1014 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1016 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1017 u32 flow_idx)
1019 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1020 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1021 struct device *dev = rx_chn->common.dev;
1022 struct ti_sci_msg_rm_udmap_flow_cfg req;
1023 int ret = 0;
1025 if (!rx_chn->remote)
1026 return -EINVAL;
1028 memset(&req, 0, sizeof(req));
1029 req.valid_params =
1030 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1031 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1032 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1033 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1034 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1035 req.nav_id = tisci_rm->tisci_dev_id;
1036 req.flow_index = flow->udma_rflow_id;
1037 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1038 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1039 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1040 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1041 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1043 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1044 if (ret) {
1045 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1046 ret);
1049 return ret;
1051 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1053 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1055 u32 rxrt_ctl;
1057 if (rx_chn->remote)
1058 return -EINVAL;
1060 if (rx_chn->flows_ready < rx_chn->flow_num)
1061 return -EINVAL;
1063 rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
1064 UDMA_RCHAN_RT_CTL_REG);
1065 rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
1066 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG,
1067 rxrt_ctl);
1069 xudma_rchanrt_write(rx_chn->udma_rchanx,
1070 UDMA_RCHAN_RT_PEER_RT_EN_REG,
1071 UDMA_PEER_RT_EN_ENABLE);
1073 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1074 return 0;
1076 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1078 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1080 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1082 xudma_rchanrt_write(rx_chn->udma_rchanx,
1083 UDMA_RCHAN_RT_PEER_RT_EN_REG,
1085 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0);
1087 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1089 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1091 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1092 bool sync)
1094 int i = 0;
1095 u32 val;
1097 if (rx_chn->remote)
1098 return;
1100 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1102 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG,
1103 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1105 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG);
1107 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1108 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1109 UDMA_RCHAN_RT_CTL_REG);
1110 udelay(1);
1111 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1112 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1113 break;
1115 i++;
1118 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1119 UDMA_RCHAN_RT_PEER_RT_EN_REG);
1120 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1121 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1122 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1124 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1126 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1127 u32 flow_num, void *data,
1128 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1130 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1131 struct device *dev = rx_chn->common.dev;
1132 dma_addr_t desc_dma;
1133 int occ_rx, i, ret;
1135 /* reset RXCQ as it is not input for udma - expected to be empty */
1136 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1137 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1138 if (flow->ringrx)
1139 k3_ringacc_ring_reset(flow->ringrx);
1141 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1142 if (skip_fdq)
1143 return;
1146 * RX FDQ reset need to be special way as it is input for udma and its
1147 * state cached by udma, so:
1148 * 1) save RX FDQ occ
1149 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1150 * 3) reset RX FDQ in a special way
1152 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1153 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1155 for (i = 0; i < occ_rx; i++) {
1156 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1157 if (ret) {
1158 dev_err(dev, "RX reset pop %d\n", ret);
1159 break;
1161 cleanup(data, desc_dma);
1164 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1166 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1168 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1169 u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1170 dma_addr_t desc_dma)
1172 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1174 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1176 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1178 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1179 u32 flow_num, dma_addr_t *desc_dma)
1181 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1183 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1185 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1187 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1188 u32 flow_num)
1190 struct k3_udma_glue_rx_flow *flow;
1192 flow = &rx_chn->flows[flow_num];
1194 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1196 return flow->virq;
1198 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);