Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / dma / fsl-dpaa2-qdma / dpaa2-qdma.c
blob4ec909e0b8106864917c3a171856c04e169d8513
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright 2019 NXP
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/dmapool.h>
7 #include <linux/of_irq.h>
8 #include <linux/iommu.h>
9 #include <linux/sys_soc.h>
10 #include <linux/fsl/mc.h>
11 #include <soc/fsl/dpaa2-io.h>
13 #include "../virt-dma.h"
14 #include "dpdmai.h"
15 #include "dpaa2-qdma.h"
17 static bool smmu_disable = true;
19 static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
21 return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
24 static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
26 return container_of(vd, struct dpaa2_qdma_comp, vdesc);
29 static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
31 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
32 struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
33 struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
35 dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
36 sizeof(struct dpaa2_fd),
37 sizeof(struct dpaa2_fd), 0);
38 if (!dpaa2_chan->fd_pool)
39 goto err;
41 dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
42 sizeof(struct dpaa2_fl_entry),
43 sizeof(struct dpaa2_fl_entry), 0);
44 if (!dpaa2_chan->fl_pool)
45 goto err_fd;
47 dpaa2_chan->sdd_pool =
48 dma_pool_create("sdd_pool", dev,
49 sizeof(struct dpaa2_qdma_sd_d),
50 sizeof(struct dpaa2_qdma_sd_d), 0);
51 if (!dpaa2_chan->sdd_pool)
52 goto err_fl;
54 return dpaa2_qdma->desc_allocated++;
55 err_fl:
56 dma_pool_destroy(dpaa2_chan->fl_pool);
57 err_fd:
58 dma_pool_destroy(dpaa2_chan->fd_pool);
59 err:
60 return -ENOMEM;
63 static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
65 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
66 struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
67 unsigned long flags;
69 LIST_HEAD(head);
71 spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
72 vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
73 spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
75 vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
77 dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
78 dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
80 dma_pool_destroy(dpaa2_chan->fd_pool);
81 dma_pool_destroy(dpaa2_chan->fl_pool);
82 dma_pool_destroy(dpaa2_chan->sdd_pool);
83 dpaa2_qdma->desc_allocated--;
87 * Request a command descriptor for enqueue.
89 static struct dpaa2_qdma_comp *
90 dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
92 struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
93 struct device *dev = &qdma_priv->dpdmai_dev->dev;
94 struct dpaa2_qdma_comp *comp_temp = NULL;
95 unsigned long flags;
97 spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
98 if (list_empty(&dpaa2_chan->comp_free)) {
99 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
100 comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
101 if (!comp_temp)
102 goto err;
103 comp_temp->fd_virt_addr =
104 dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
105 &comp_temp->fd_bus_addr);
106 if (!comp_temp->fd_virt_addr)
107 goto err_comp;
109 comp_temp->fl_virt_addr =
110 dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
111 &comp_temp->fl_bus_addr);
112 if (!comp_temp->fl_virt_addr)
113 goto err_fd_virt;
115 comp_temp->desc_virt_addr =
116 dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
117 &comp_temp->desc_bus_addr);
118 if (!comp_temp->desc_virt_addr)
119 goto err_fl_virt;
121 comp_temp->qchan = dpaa2_chan;
122 return comp_temp;
125 comp_temp = list_first_entry(&dpaa2_chan->comp_free,
126 struct dpaa2_qdma_comp, list);
127 list_del(&comp_temp->list);
128 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
130 comp_temp->qchan = dpaa2_chan;
132 return comp_temp;
134 err_fl_virt:
135 dma_pool_free(dpaa2_chan->fl_pool,
136 comp_temp->fl_virt_addr,
137 comp_temp->fl_bus_addr);
138 err_fd_virt:
139 dma_pool_free(dpaa2_chan->fd_pool,
140 comp_temp->fd_virt_addr,
141 comp_temp->fd_bus_addr);
142 err_comp:
143 kfree(comp_temp);
144 err:
145 dev_err(dev, "Failed to request descriptor\n");
146 return NULL;
149 static void
150 dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
152 struct dpaa2_fd *fd;
154 fd = dpaa2_comp->fd_virt_addr;
155 memset(fd, 0, sizeof(struct dpaa2_fd));
157 /* fd populated */
158 dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
161 * Bypass memory translation, Frame list format, short length disable
162 * we need to disable BMT if fsl-mc use iova addr
164 if (smmu_disable)
165 dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
166 dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
168 dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
171 /* first frame list for descriptor buffer */
172 static void
173 dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
174 struct dpaa2_qdma_comp *dpaa2_comp,
175 bool wrt_changed)
177 struct dpaa2_qdma_sd_d *sdd;
179 sdd = dpaa2_comp->desc_virt_addr;
180 memset(sdd, 0, 2 * (sizeof(*sdd)));
182 /* source descriptor CMD */
183 sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
184 sdd++;
186 /* dest descriptor CMD */
187 if (wrt_changed)
188 sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
189 else
190 sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
192 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
194 /* first frame list to source descriptor */
195 dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
196 dpaa2_fl_set_len(f_list, 0x20);
197 dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
199 /* bypass memory translation */
200 if (smmu_disable)
201 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
204 /* source and destination frame list */
205 static void
206 dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
207 dma_addr_t dst, dma_addr_t src,
208 size_t len, uint8_t fmt)
210 /* source frame list to source buffer */
211 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
213 dpaa2_fl_set_addr(f_list, src);
214 dpaa2_fl_set_len(f_list, len);
216 /* single buffer frame or scatter gather frame */
217 dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
219 /* bypass memory translation */
220 if (smmu_disable)
221 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
223 f_list++;
225 /* destination frame list to destination buffer */
226 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
228 dpaa2_fl_set_addr(f_list, dst);
229 dpaa2_fl_set_len(f_list, len);
230 dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
231 /* single buffer frame or scatter gather frame */
232 dpaa2_fl_set_final(f_list, QDMA_FL_F);
233 /* bypass memory translation */
234 if (smmu_disable)
235 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
238 static struct dma_async_tx_descriptor
239 *dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
240 dma_addr_t src, size_t len, ulong flags)
242 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
243 struct dpaa2_qdma_engine *dpaa2_qdma;
244 struct dpaa2_qdma_comp *dpaa2_comp;
245 struct dpaa2_fl_entry *f_list;
246 bool wrt_changed;
248 dpaa2_qdma = dpaa2_chan->qdma;
249 dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
250 if (!dpaa2_comp)
251 return NULL;
253 wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
255 /* populate Frame descriptor */
256 dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
258 f_list = dpaa2_comp->fl_virt_addr;
260 /* first frame list for descriptor buffer (logn format) */
261 dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
263 f_list++;
265 dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
267 return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
270 static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
272 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
273 struct dpaa2_qdma_comp *dpaa2_comp;
274 struct virt_dma_desc *vdesc;
275 struct dpaa2_fd *fd;
276 unsigned long flags;
277 int err;
279 spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
280 spin_lock(&dpaa2_chan->vchan.lock);
281 if (vchan_issue_pending(&dpaa2_chan->vchan)) {
282 vdesc = vchan_next_desc(&dpaa2_chan->vchan);
283 if (!vdesc)
284 goto err_enqueue;
285 dpaa2_comp = to_fsl_qdma_comp(vdesc);
287 fd = dpaa2_comp->fd_virt_addr;
289 list_del(&vdesc->node);
290 list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
292 err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
293 if (err) {
294 list_del(&dpaa2_comp->list);
295 list_add_tail(&dpaa2_comp->list,
296 &dpaa2_chan->comp_free);
299 err_enqueue:
300 spin_unlock(&dpaa2_chan->vchan.lock);
301 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
304 static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
306 struct dpaa2_qdma_priv_per_prio *ppriv;
307 struct device *dev = &ls_dev->dev;
308 struct dpaa2_qdma_priv *priv;
309 u8 prio_def = DPDMAI_PRIO_NUM;
310 int err = -EINVAL;
311 int i;
313 priv = dev_get_drvdata(dev);
315 priv->dev = dev;
316 priv->dpqdma_id = ls_dev->obj_desc.id;
318 /* Get the handle for the DPDMAI this interface is associate with */
319 err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
320 if (err) {
321 dev_err(dev, "dpdmai_open() failed\n");
322 return err;
325 dev_dbg(dev, "Opened dpdmai object successfully\n");
327 err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
328 &priv->dpdmai_attr);
329 if (err) {
330 dev_err(dev, "dpdmai_get_attributes() failed\n");
331 goto exit;
334 if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
335 dev_err(dev, "DPDMAI major version mismatch\n"
336 "Found %u.%u, supported version is %u.%u\n",
337 priv->dpdmai_attr.version.major,
338 priv->dpdmai_attr.version.minor,
339 DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
340 goto exit;
343 if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
344 dev_err(dev, "DPDMAI minor version mismatch\n"
345 "Found %u.%u, supported version is %u.%u\n",
346 priv->dpdmai_attr.version.major,
347 priv->dpdmai_attr.version.minor,
348 DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
349 goto exit;
352 priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
353 ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
354 if (!ppriv) {
355 err = -ENOMEM;
356 goto exit;
358 priv->ppriv = ppriv;
360 for (i = 0; i < priv->num_pairs; i++) {
361 err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
362 i, &priv->rx_queue_attr[i]);
363 if (err) {
364 dev_err(dev, "dpdmai_get_rx_queue() failed\n");
365 goto exit;
367 ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
369 err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
370 i, &priv->tx_fqid[i]);
371 if (err) {
372 dev_err(dev, "dpdmai_get_tx_queue() failed\n");
373 goto exit;
375 ppriv->req_fqid = priv->tx_fqid[i];
376 ppriv->prio = i;
377 ppriv->priv = priv;
378 ppriv++;
381 return 0;
382 exit:
383 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
384 return err;
387 static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
389 struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
390 struct dpaa2_qdma_priv_per_prio, nctx);
391 struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
392 struct dpaa2_qdma_priv *priv = ppriv->priv;
393 u32 n_chans = priv->dpaa2_qdma->n_chans;
394 struct dpaa2_qdma_chan *qchan;
395 const struct dpaa2_fd *fd_eq;
396 const struct dpaa2_fd *fd;
397 struct dpaa2_dq *dq;
398 int is_last = 0;
399 int found;
400 u8 status;
401 int err;
402 int i;
404 do {
405 err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
406 ppriv->store);
407 } while (err);
409 while (!is_last) {
410 do {
411 dq = dpaa2_io_store_next(ppriv->store, &is_last);
412 } while (!is_last && !dq);
413 if (!dq) {
414 dev_err(priv->dev, "FQID returned no valid frames!\n");
415 continue;
418 /* obtain FD and process the error */
419 fd = dpaa2_dq_fd(dq);
421 status = dpaa2_fd_get_ctrl(fd) & 0xff;
422 if (status)
423 dev_err(priv->dev, "FD error occurred\n");
424 found = 0;
425 for (i = 0; i < n_chans; i++) {
426 qchan = &priv->dpaa2_qdma->chans[i];
427 spin_lock(&qchan->queue_lock);
428 if (list_empty(&qchan->comp_used)) {
429 spin_unlock(&qchan->queue_lock);
430 continue;
432 list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
433 &qchan->comp_used, list) {
434 fd_eq = dpaa2_comp->fd_virt_addr;
436 if (le64_to_cpu(fd_eq->simple.addr) ==
437 le64_to_cpu(fd->simple.addr)) {
438 spin_lock(&qchan->vchan.lock);
439 vchan_cookie_complete(&
440 dpaa2_comp->vdesc);
441 spin_unlock(&qchan->vchan.lock);
442 found = 1;
443 break;
446 spin_unlock(&qchan->queue_lock);
447 if (found)
448 break;
452 dpaa2_io_service_rearm(NULL, ctx);
455 static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
457 struct dpaa2_qdma_priv_per_prio *ppriv;
458 struct device *dev = priv->dev;
459 int err = -EINVAL;
460 int i, num;
462 num = priv->num_pairs;
463 ppriv = priv->ppriv;
464 for (i = 0; i < num; i++) {
465 ppriv->nctx.is_cdan = 0;
466 ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
467 ppriv->nctx.id = ppriv->rsp_fqid;
468 ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
469 err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
470 if (err) {
471 dev_err(dev, "Notification register failed\n");
472 goto err_service;
475 ppriv->store =
476 dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
477 if (!ppriv->store) {
478 dev_err(dev, "dpaa2_io_store_create() failed\n");
479 goto err_store;
482 ppriv++;
484 return 0;
486 err_store:
487 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
488 err_service:
489 ppriv--;
490 while (ppriv >= priv->ppriv) {
491 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
492 dpaa2_io_store_destroy(ppriv->store);
493 ppriv--;
495 return err;
498 static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
500 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
501 int i;
503 for (i = 0; i < priv->num_pairs; i++) {
504 dpaa2_io_store_destroy(ppriv->store);
505 ppriv++;
509 static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
511 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
512 struct device *dev = priv->dev;
513 int i;
515 for (i = 0; i < priv->num_pairs; i++) {
516 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
517 ppriv++;
521 static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
523 struct dpdmai_rx_queue_cfg rx_queue_cfg;
524 struct dpaa2_qdma_priv_per_prio *ppriv;
525 struct device *dev = priv->dev;
526 struct fsl_mc_device *ls_dev;
527 int i, num;
528 int err;
530 ls_dev = to_fsl_mc_device(dev);
531 num = priv->num_pairs;
532 ppriv = priv->ppriv;
533 for (i = 0; i < num; i++) {
534 rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
535 DPDMAI_QUEUE_OPT_DEST;
536 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
537 rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
538 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
539 rx_queue_cfg.dest_cfg.priority = ppriv->prio;
540 err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
541 rx_queue_cfg.dest_cfg.priority,
542 &rx_queue_cfg);
543 if (err) {
544 dev_err(dev, "dpdmai_set_rx_queue() failed\n");
545 return err;
548 ppriv++;
551 return 0;
554 static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
556 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
557 struct device *dev = priv->dev;
558 struct fsl_mc_device *ls_dev;
559 int err = 0;
560 int i;
562 ls_dev = to_fsl_mc_device(dev);
564 for (i = 0; i < priv->num_pairs; i++) {
565 ppriv->nctx.qman64 = 0;
566 ppriv->nctx.dpio_id = 0;
567 ppriv++;
570 err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
571 if (err)
572 dev_err(dev, "dpdmai_reset() failed\n");
574 return err;
577 static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
578 struct list_head *head)
580 struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
581 unsigned long flags;
583 list_for_each_entry_safe(comp_tmp, _comp_tmp,
584 head, list) {
585 spin_lock_irqsave(&qchan->queue_lock, flags);
586 list_del(&comp_tmp->list);
587 spin_unlock_irqrestore(&qchan->queue_lock, flags);
588 dma_pool_free(qchan->fd_pool,
589 comp_tmp->fd_virt_addr,
590 comp_tmp->fd_bus_addr);
591 dma_pool_free(qchan->fl_pool,
592 comp_tmp->fl_virt_addr,
593 comp_tmp->fl_bus_addr);
594 dma_pool_free(qchan->sdd_pool,
595 comp_tmp->desc_virt_addr,
596 comp_tmp->desc_bus_addr);
597 kfree(comp_tmp);
601 static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
603 struct dpaa2_qdma_chan *qchan;
604 int num, i;
606 num = dpaa2_qdma->n_chans;
607 for (i = 0; i < num; i++) {
608 qchan = &dpaa2_qdma->chans[i];
609 dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
610 dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
611 dma_pool_destroy(qchan->fd_pool);
612 dma_pool_destroy(qchan->fl_pool);
613 dma_pool_destroy(qchan->sdd_pool);
617 static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
619 struct dpaa2_qdma_comp *dpaa2_comp;
620 struct dpaa2_qdma_chan *qchan;
621 unsigned long flags;
623 dpaa2_comp = to_fsl_qdma_comp(vdesc);
624 qchan = dpaa2_comp->qchan;
625 spin_lock_irqsave(&qchan->queue_lock, flags);
626 list_del(&dpaa2_comp->list);
627 list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
628 spin_unlock_irqrestore(&qchan->queue_lock, flags);
631 static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
633 struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
634 struct dpaa2_qdma_chan *dpaa2_chan;
635 int num = priv->num_pairs;
636 int i;
638 INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
639 for (i = 0; i < dpaa2_qdma->n_chans; i++) {
640 dpaa2_chan = &dpaa2_qdma->chans[i];
641 dpaa2_chan->qdma = dpaa2_qdma;
642 dpaa2_chan->fqid = priv->tx_fqid[i % num];
643 dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
644 vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
645 spin_lock_init(&dpaa2_chan->queue_lock);
646 INIT_LIST_HEAD(&dpaa2_chan->comp_used);
647 INIT_LIST_HEAD(&dpaa2_chan->comp_free);
649 return 0;
652 static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
654 struct device *dev = &dpdmai_dev->dev;
655 struct dpaa2_qdma_engine *dpaa2_qdma;
656 struct dpaa2_qdma_priv *priv;
657 int err;
659 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
660 if (!priv)
661 return -ENOMEM;
662 dev_set_drvdata(dev, priv);
663 priv->dpdmai_dev = dpdmai_dev;
665 priv->iommu_domain = iommu_get_domain_for_dev(dev);
666 if (priv->iommu_domain)
667 smmu_disable = false;
669 /* obtain a MC portal */
670 err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
671 if (err) {
672 if (err == -ENXIO)
673 err = -EPROBE_DEFER;
674 else
675 dev_err(dev, "MC portal allocation failed\n");
676 goto err_mcportal;
679 /* DPDMAI initialization */
680 err = dpaa2_qdma_setup(dpdmai_dev);
681 if (err) {
682 dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
683 goto err_dpdmai_setup;
686 /* DPIO */
687 err = dpaa2_qdma_dpio_setup(priv);
688 if (err) {
689 dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
690 goto err_dpio_setup;
693 /* DPDMAI binding to DPIO */
694 err = dpaa2_dpdmai_bind(priv);
695 if (err) {
696 dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
697 goto err_bind;
700 /* DPDMAI enable */
701 err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
702 if (err) {
703 dev_err(dev, "dpdmai_enable() faile\n");
704 goto err_enable;
707 dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
708 if (!dpaa2_qdma) {
709 err = -ENOMEM;
710 goto err_eng;
713 priv->dpaa2_qdma = dpaa2_qdma;
714 dpaa2_qdma->priv = priv;
716 dpaa2_qdma->desc_allocated = 0;
717 dpaa2_qdma->n_chans = NUM_CH;
719 dpaa2_dpdmai_init_channels(dpaa2_qdma);
721 if (soc_device_match(soc_fixup_tuning))
722 dpaa2_qdma->qdma_wrtype_fixup = true;
723 else
724 dpaa2_qdma->qdma_wrtype_fixup = false;
726 dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
727 dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
728 dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
730 dpaa2_qdma->dma_dev.dev = dev;
731 dpaa2_qdma->dma_dev.device_alloc_chan_resources =
732 dpaa2_qdma_alloc_chan_resources;
733 dpaa2_qdma->dma_dev.device_free_chan_resources =
734 dpaa2_qdma_free_chan_resources;
735 dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
736 dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
737 dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
739 err = dma_async_device_register(&dpaa2_qdma->dma_dev);
740 if (err) {
741 dev_err(dev, "Can't register NXP QDMA engine.\n");
742 goto err_dpaa2_qdma;
745 return 0;
747 err_dpaa2_qdma:
748 kfree(dpaa2_qdma);
749 err_eng:
750 dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
751 err_enable:
752 dpaa2_dpdmai_dpio_unbind(priv);
753 err_bind:
754 dpaa2_dpmai_store_free(priv);
755 dpaa2_dpdmai_dpio_free(priv);
756 err_dpio_setup:
757 kfree(priv->ppriv);
758 dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
759 err_dpdmai_setup:
760 fsl_mc_portal_free(priv->mc_io);
761 err_mcportal:
762 kfree(priv);
763 dev_set_drvdata(dev, NULL);
764 return err;
767 static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
769 struct dpaa2_qdma_engine *dpaa2_qdma;
770 struct dpaa2_qdma_priv *priv;
771 struct device *dev;
773 dev = &ls_dev->dev;
774 priv = dev_get_drvdata(dev);
775 dpaa2_qdma = priv->dpaa2_qdma;
777 dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
778 dpaa2_dpdmai_dpio_unbind(priv);
779 dpaa2_dpmai_store_free(priv);
780 dpaa2_dpdmai_dpio_free(priv);
781 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
782 fsl_mc_portal_free(priv->mc_io);
783 dev_set_drvdata(dev, NULL);
784 dpaa2_dpdmai_free_channels(dpaa2_qdma);
786 dma_async_device_unregister(&dpaa2_qdma->dma_dev);
787 kfree(priv);
788 kfree(dpaa2_qdma);
790 return 0;
793 static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
795 struct dpaa2_qdma_priv *priv;
796 struct device *dev;
798 dev = &ls_dev->dev;
799 priv = dev_get_drvdata(dev);
801 dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
802 dpaa2_dpdmai_dpio_unbind(priv);
803 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
804 dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
807 static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
809 .vendor = FSL_MC_VENDOR_FREESCALE,
810 .obj_type = "dpdmai",
812 { .vendor = 0x0 }
815 static struct fsl_mc_driver dpaa2_qdma_driver = {
816 .driver = {
817 .name = "dpaa2-qdma",
818 .owner = THIS_MODULE,
820 .probe = dpaa2_qdma_probe,
821 .remove = dpaa2_qdma_remove,
822 .shutdown = dpaa2_qdma_shutdown,
823 .match_id_table = dpaa2_qdma_id_table
826 static int __init dpaa2_qdma_driver_init(void)
828 return fsl_mc_driver_register(&(dpaa2_qdma_driver));
830 late_initcall(dpaa2_qdma_driver_init);
832 static void __exit fsl_qdma_exit(void)
834 fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
836 module_exit(fsl_qdma_exit);
838 MODULE_ALIAS("platform:fsl-dpaa2-qdma");
839 MODULE_LICENSE("GPL v2");
840 MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");