1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/dmapool.h>
7 #include <linux/of_irq.h>
8 #include <linux/iommu.h>
9 #include <linux/sys_soc.h>
10 #include <linux/fsl/mc.h>
11 #include <soc/fsl/dpaa2-io.h>
13 #include "../virt-dma.h"
15 #include "dpaa2-qdma.h"
17 static bool smmu_disable
= true;
19 static struct dpaa2_qdma_chan
*to_dpaa2_qdma_chan(struct dma_chan
*chan
)
21 return container_of(chan
, struct dpaa2_qdma_chan
, vchan
.chan
);
24 static struct dpaa2_qdma_comp
*to_fsl_qdma_comp(struct virt_dma_desc
*vd
)
26 return container_of(vd
, struct dpaa2_qdma_comp
, vdesc
);
29 static int dpaa2_qdma_alloc_chan_resources(struct dma_chan
*chan
)
31 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
32 struct dpaa2_qdma_engine
*dpaa2_qdma
= dpaa2_chan
->qdma
;
33 struct device
*dev
= &dpaa2_qdma
->priv
->dpdmai_dev
->dev
;
35 dpaa2_chan
->fd_pool
= dma_pool_create("fd_pool", dev
,
36 sizeof(struct dpaa2_fd
),
37 sizeof(struct dpaa2_fd
), 0);
38 if (!dpaa2_chan
->fd_pool
)
42 dma_pool_create("fl_pool", dev
,
43 sizeof(struct dpaa2_fl_entry
) * 3,
44 sizeof(struct dpaa2_fl_entry
), 0);
46 if (!dpaa2_chan
->fl_pool
)
49 dpaa2_chan
->sdd_pool
=
50 dma_pool_create("sdd_pool", dev
,
51 sizeof(struct dpaa2_qdma_sd_d
) * 2,
52 sizeof(struct dpaa2_qdma_sd_d
), 0);
53 if (!dpaa2_chan
->sdd_pool
)
56 return dpaa2_qdma
->desc_allocated
++;
58 dma_pool_destroy(dpaa2_chan
->fl_pool
);
60 dma_pool_destroy(dpaa2_chan
->fd_pool
);
65 static void dpaa2_qdma_free_chan_resources(struct dma_chan
*chan
)
67 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
68 struct dpaa2_qdma_engine
*dpaa2_qdma
= dpaa2_chan
->qdma
;
73 spin_lock_irqsave(&dpaa2_chan
->vchan
.lock
, flags
);
74 vchan_get_all_descriptors(&dpaa2_chan
->vchan
, &head
);
75 spin_unlock_irqrestore(&dpaa2_chan
->vchan
.lock
, flags
);
77 vchan_dma_desc_free_list(&dpaa2_chan
->vchan
, &head
);
79 dpaa2_dpdmai_free_comp(dpaa2_chan
, &dpaa2_chan
->comp_used
);
80 dpaa2_dpdmai_free_comp(dpaa2_chan
, &dpaa2_chan
->comp_free
);
82 dma_pool_destroy(dpaa2_chan
->fd_pool
);
83 dma_pool_destroy(dpaa2_chan
->fl_pool
);
84 dma_pool_destroy(dpaa2_chan
->sdd_pool
);
85 dpaa2_qdma
->desc_allocated
--;
89 * Request a command descriptor for enqueue.
91 static struct dpaa2_qdma_comp
*
92 dpaa2_qdma_request_desc(struct dpaa2_qdma_chan
*dpaa2_chan
)
94 struct dpaa2_qdma_priv
*qdma_priv
= dpaa2_chan
->qdma
->priv
;
95 struct device
*dev
= &qdma_priv
->dpdmai_dev
->dev
;
96 struct dpaa2_qdma_comp
*comp_temp
= NULL
;
99 spin_lock_irqsave(&dpaa2_chan
->queue_lock
, flags
);
100 if (list_empty(&dpaa2_chan
->comp_free
)) {
101 spin_unlock_irqrestore(&dpaa2_chan
->queue_lock
, flags
);
102 comp_temp
= kzalloc(sizeof(*comp_temp
), GFP_NOWAIT
);
105 comp_temp
->fd_virt_addr
=
106 dma_pool_alloc(dpaa2_chan
->fd_pool
, GFP_NOWAIT
,
107 &comp_temp
->fd_bus_addr
);
108 if (!comp_temp
->fd_virt_addr
)
111 comp_temp
->fl_virt_addr
=
112 dma_pool_alloc(dpaa2_chan
->fl_pool
, GFP_NOWAIT
,
113 &comp_temp
->fl_bus_addr
);
114 if (!comp_temp
->fl_virt_addr
)
117 comp_temp
->desc_virt_addr
=
118 dma_pool_alloc(dpaa2_chan
->sdd_pool
, GFP_NOWAIT
,
119 &comp_temp
->desc_bus_addr
);
120 if (!comp_temp
->desc_virt_addr
)
123 comp_temp
->qchan
= dpaa2_chan
;
127 comp_temp
= list_first_entry(&dpaa2_chan
->comp_free
,
128 struct dpaa2_qdma_comp
, list
);
129 list_del(&comp_temp
->list
);
130 spin_unlock_irqrestore(&dpaa2_chan
->queue_lock
, flags
);
132 comp_temp
->qchan
= dpaa2_chan
;
137 dma_pool_free(dpaa2_chan
->fl_pool
,
138 comp_temp
->fl_virt_addr
,
139 comp_temp
->fl_bus_addr
);
141 dma_pool_free(dpaa2_chan
->fd_pool
,
142 comp_temp
->fd_virt_addr
,
143 comp_temp
->fd_bus_addr
);
147 dev_err(dev
, "Failed to request descriptor\n");
152 dpaa2_qdma_populate_fd(u32 format
, struct dpaa2_qdma_comp
*dpaa2_comp
)
156 fd
= dpaa2_comp
->fd_virt_addr
;
157 memset(fd
, 0, sizeof(struct dpaa2_fd
));
160 dpaa2_fd_set_addr(fd
, dpaa2_comp
->fl_bus_addr
);
163 * Bypass memory translation, Frame list format, short length disable
164 * we need to disable BMT if fsl-mc use iova addr
167 dpaa2_fd_set_bpid(fd
, QMAN_FD_BMT_ENABLE
);
168 dpaa2_fd_set_format(fd
, QMAN_FD_FMT_ENABLE
| QMAN_FD_SL_DISABLE
);
170 dpaa2_fd_set_frc(fd
, format
| QDMA_SER_CTX
);
173 /* first frame list for descriptor buffer */
175 dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry
*f_list
,
176 struct dpaa2_qdma_comp
*dpaa2_comp
,
179 struct dpaa2_qdma_sd_d
*sdd
;
181 sdd
= dpaa2_comp
->desc_virt_addr
;
182 memset(sdd
, 0, 2 * (sizeof(*sdd
)));
184 /* source descriptor CMD */
185 sdd
->cmd
= cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT
);
188 /* dest descriptor CMD */
190 sdd
->cmd
= cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT
);
192 sdd
->cmd
= cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT
);
194 memset(f_list
, 0, sizeof(struct dpaa2_fl_entry
));
196 /* first frame list to source descriptor */
197 dpaa2_fl_set_addr(f_list
, dpaa2_comp
->desc_bus_addr
);
198 dpaa2_fl_set_len(f_list
, 0x20);
199 dpaa2_fl_set_format(f_list
, QDMA_FL_FMT_SBF
| QDMA_FL_SL_LONG
);
201 /* bypass memory translation */
203 f_list
->bpid
= cpu_to_le16(QDMA_FL_BMT_ENABLE
);
206 /* source and destination frame list */
208 dpaa2_qdma_populate_frames(struct dpaa2_fl_entry
*f_list
,
209 dma_addr_t dst
, dma_addr_t src
,
210 size_t len
, uint8_t fmt
)
212 /* source frame list to source buffer */
213 memset(f_list
, 0, sizeof(struct dpaa2_fl_entry
));
215 dpaa2_fl_set_addr(f_list
, src
);
216 dpaa2_fl_set_len(f_list
, len
);
218 /* single buffer frame or scatter gather frame */
219 dpaa2_fl_set_format(f_list
, (fmt
| QDMA_FL_SL_LONG
));
221 /* bypass memory translation */
223 f_list
->bpid
= cpu_to_le16(QDMA_FL_BMT_ENABLE
);
227 /* destination frame list to destination buffer */
228 memset(f_list
, 0, sizeof(struct dpaa2_fl_entry
));
230 dpaa2_fl_set_addr(f_list
, dst
);
231 dpaa2_fl_set_len(f_list
, len
);
232 dpaa2_fl_set_format(f_list
, (fmt
| QDMA_FL_SL_LONG
));
233 /* single buffer frame or scatter gather frame */
234 dpaa2_fl_set_final(f_list
, QDMA_FL_F
);
235 /* bypass memory translation */
237 f_list
->bpid
= cpu_to_le16(QDMA_FL_BMT_ENABLE
);
240 static struct dma_async_tx_descriptor
241 *dpaa2_qdma_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
242 dma_addr_t src
, size_t len
, ulong flags
)
244 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
245 struct dpaa2_qdma_engine
*dpaa2_qdma
;
246 struct dpaa2_qdma_comp
*dpaa2_comp
;
247 struct dpaa2_fl_entry
*f_list
;
250 dpaa2_qdma
= dpaa2_chan
->qdma
;
251 dpaa2_comp
= dpaa2_qdma_request_desc(dpaa2_chan
);
255 wrt_changed
= (bool)dpaa2_qdma
->qdma_wrtype_fixup
;
257 /* populate Frame descriptor */
258 dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT
, dpaa2_comp
);
260 f_list
= dpaa2_comp
->fl_virt_addr
;
262 /* first frame list for descriptor buffer (logn format) */
263 dpaa2_qdma_populate_first_framel(f_list
, dpaa2_comp
, wrt_changed
);
267 dpaa2_qdma_populate_frames(f_list
, dst
, src
, len
, QDMA_FL_FMT_SBF
);
269 return vchan_tx_prep(&dpaa2_chan
->vchan
, &dpaa2_comp
->vdesc
, flags
);
272 static void dpaa2_qdma_issue_pending(struct dma_chan
*chan
)
274 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
275 struct dpaa2_qdma_comp
*dpaa2_comp
;
276 struct virt_dma_desc
*vdesc
;
281 spin_lock_irqsave(&dpaa2_chan
->queue_lock
, flags
);
282 spin_lock(&dpaa2_chan
->vchan
.lock
);
283 if (vchan_issue_pending(&dpaa2_chan
->vchan
)) {
284 vdesc
= vchan_next_desc(&dpaa2_chan
->vchan
);
287 dpaa2_comp
= to_fsl_qdma_comp(vdesc
);
289 fd
= dpaa2_comp
->fd_virt_addr
;
291 list_del(&vdesc
->node
);
292 list_add_tail(&dpaa2_comp
->list
, &dpaa2_chan
->comp_used
);
294 err
= dpaa2_io_service_enqueue_fq(NULL
, dpaa2_chan
->fqid
, fd
);
296 list_move_tail(&dpaa2_comp
->list
,
297 &dpaa2_chan
->comp_free
);
301 spin_unlock(&dpaa2_chan
->vchan
.lock
);
302 spin_unlock_irqrestore(&dpaa2_chan
->queue_lock
, flags
);
305 static int __cold
dpaa2_qdma_setup(struct fsl_mc_device
*ls_dev
)
307 struct dpaa2_qdma_priv_per_prio
*ppriv
;
308 struct device
*dev
= &ls_dev
->dev
;
309 struct dpaa2_qdma_priv
*priv
;
310 u8 prio_def
= DPDMAI_PRIO_NUM
;
314 priv
= dev_get_drvdata(dev
);
317 priv
->dpqdma_id
= ls_dev
->obj_desc
.id
;
319 /* Get the handle for the DPDMAI this interface is associate with */
320 err
= dpdmai_open(priv
->mc_io
, 0, priv
->dpqdma_id
, &ls_dev
->mc_handle
);
322 dev_err(dev
, "dpdmai_open() failed\n");
326 dev_dbg(dev
, "Opened dpdmai object successfully\n");
328 err
= dpdmai_get_attributes(priv
->mc_io
, 0, ls_dev
->mc_handle
,
331 dev_err(dev
, "dpdmai_get_attributes() failed\n");
335 if (priv
->dpdmai_attr
.version
.major
> DPDMAI_VER_MAJOR
) {
337 dev_err(dev
, "DPDMAI major version mismatch\n"
338 "Found %u.%u, supported version is %u.%u\n",
339 priv
->dpdmai_attr
.version
.major
,
340 priv
->dpdmai_attr
.version
.minor
,
341 DPDMAI_VER_MAJOR
, DPDMAI_VER_MINOR
);
345 if (priv
->dpdmai_attr
.version
.minor
> DPDMAI_VER_MINOR
) {
347 dev_err(dev
, "DPDMAI minor version mismatch\n"
348 "Found %u.%u, supported version is %u.%u\n",
349 priv
->dpdmai_attr
.version
.major
,
350 priv
->dpdmai_attr
.version
.minor
,
351 DPDMAI_VER_MAJOR
, DPDMAI_VER_MINOR
);
355 priv
->num_pairs
= min(priv
->dpdmai_attr
.num_of_priorities
, prio_def
);
356 ppriv
= kcalloc(priv
->num_pairs
, sizeof(*ppriv
), GFP_KERNEL
);
363 for (i
= 0; i
< priv
->num_pairs
; i
++) {
364 err
= dpdmai_get_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
,
365 i
, 0, &priv
->rx_queue_attr
[i
]);
367 dev_err(dev
, "dpdmai_get_rx_queue() failed\n");
370 ppriv
->rsp_fqid
= priv
->rx_queue_attr
[i
].fqid
;
372 err
= dpdmai_get_tx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
,
373 i
, 0, &priv
->tx_queue_attr
[i
]);
375 dev_err(dev
, "dpdmai_get_tx_queue() failed\n");
378 ppriv
->req_fqid
= priv
->tx_queue_attr
[i
].fqid
;
379 ppriv
->prio
= DPAA2_QDMA_DEFAULT_PRIORITY
;
386 dpdmai_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
390 static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx
*ctx
)
392 struct dpaa2_qdma_priv_per_prio
*ppriv
= container_of(ctx
,
393 struct dpaa2_qdma_priv_per_prio
, nctx
);
394 struct dpaa2_qdma_comp
*dpaa2_comp
, *_comp_tmp
;
395 struct dpaa2_qdma_priv
*priv
= ppriv
->priv
;
396 u32 n_chans
= priv
->dpaa2_qdma
->n_chans
;
397 struct dpaa2_qdma_chan
*qchan
;
398 const struct dpaa2_fd
*fd_eq
;
399 const struct dpaa2_fd
*fd
;
408 err
= dpaa2_io_service_pull_fq(NULL
, ppriv
->rsp_fqid
,
414 dq
= dpaa2_io_store_next(ppriv
->store
, &is_last
);
415 } while (!is_last
&& !dq
);
417 dev_err(priv
->dev
, "FQID returned no valid frames!\n");
421 /* obtain FD and process the error */
422 fd
= dpaa2_dq_fd(dq
);
424 status
= dpaa2_fd_get_ctrl(fd
) & 0xff;
426 dev_err(priv
->dev
, "FD error occurred\n");
428 for (i
= 0; i
< n_chans
; i
++) {
429 qchan
= &priv
->dpaa2_qdma
->chans
[i
];
430 spin_lock(&qchan
->queue_lock
);
431 if (list_empty(&qchan
->comp_used
)) {
432 spin_unlock(&qchan
->queue_lock
);
435 list_for_each_entry_safe(dpaa2_comp
, _comp_tmp
,
436 &qchan
->comp_used
, list
) {
437 fd_eq
= dpaa2_comp
->fd_virt_addr
;
439 if (le64_to_cpu(fd_eq
->simple
.addr
) ==
440 le64_to_cpu(fd
->simple
.addr
)) {
441 spin_lock(&qchan
->vchan
.lock
);
442 vchan_cookie_complete(&
444 spin_unlock(&qchan
->vchan
.lock
);
449 spin_unlock(&qchan
->queue_lock
);
455 dpaa2_io_service_rearm(NULL
, ctx
);
458 static int __cold
dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv
*priv
)
460 struct dpaa2_qdma_priv_per_prio
*ppriv
;
461 struct device
*dev
= priv
->dev
;
465 num
= priv
->num_pairs
;
467 for (i
= 0; i
< num
; i
++) {
468 ppriv
->nctx
.is_cdan
= 0;
469 ppriv
->nctx
.desired_cpu
= DPAA2_IO_ANY_CPU
;
470 ppriv
->nctx
.id
= ppriv
->rsp_fqid
;
471 ppriv
->nctx
.cb
= dpaa2_qdma_fqdan_cb
;
472 err
= dpaa2_io_service_register(NULL
, &ppriv
->nctx
, dev
);
474 dev_err(dev
, "Notification register failed\n");
479 dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE
, dev
);
482 dev_err(dev
, "dpaa2_io_store_create() failed\n");
491 dpaa2_io_service_deregister(NULL
, &ppriv
->nctx
, dev
);
494 while (ppriv
>= priv
->ppriv
) {
495 dpaa2_io_service_deregister(NULL
, &ppriv
->nctx
, dev
);
496 dpaa2_io_store_destroy(ppriv
->store
);
502 static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv
*priv
)
504 struct dpaa2_qdma_priv_per_prio
*ppriv
= priv
->ppriv
;
507 for (i
= 0; i
< priv
->num_pairs
; i
++) {
508 dpaa2_io_store_destroy(ppriv
->store
);
513 static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv
*priv
)
515 struct dpaa2_qdma_priv_per_prio
*ppriv
= priv
->ppriv
;
516 struct device
*dev
= priv
->dev
;
519 for (i
= 0; i
< priv
->num_pairs
; i
++) {
520 dpaa2_io_service_deregister(NULL
, &ppriv
->nctx
, dev
);
525 static int __cold
dpaa2_dpdmai_bind(struct dpaa2_qdma_priv
*priv
)
527 struct dpdmai_rx_queue_cfg rx_queue_cfg
;
528 struct dpaa2_qdma_priv_per_prio
*ppriv
;
529 struct device
*dev
= priv
->dev
;
530 struct fsl_mc_device
*ls_dev
;
534 ls_dev
= to_fsl_mc_device(dev
);
535 num
= priv
->num_pairs
;
537 for (i
= 0; i
< num
; i
++) {
538 rx_queue_cfg
.options
= DPDMAI_QUEUE_OPT_USER_CTX
|
539 DPDMAI_QUEUE_OPT_DEST
;
540 rx_queue_cfg
.user_ctx
= ppriv
->nctx
.qman64
;
541 rx_queue_cfg
.dest_cfg
.dest_type
= DPDMAI_DEST_DPIO
;
542 rx_queue_cfg
.dest_cfg
.dest_id
= ppriv
->nctx
.dpio_id
;
543 rx_queue_cfg
.dest_cfg
.priority
= ppriv
->prio
;
544 err
= dpdmai_set_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
,
545 rx_queue_cfg
.dest_cfg
.priority
, 0,
548 dev_err(dev
, "dpdmai_set_rx_queue() failed\n");
558 static int __cold
dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv
*priv
)
560 struct dpaa2_qdma_priv_per_prio
*ppriv
= priv
->ppriv
;
561 struct device
*dev
= priv
->dev
;
562 struct fsl_mc_device
*ls_dev
;
566 ls_dev
= to_fsl_mc_device(dev
);
568 for (i
= 0; i
< priv
->num_pairs
; i
++) {
569 ppriv
->nctx
.qman64
= 0;
570 ppriv
->nctx
.dpio_id
= 0;
574 err
= dpdmai_reset(priv
->mc_io
, 0, ls_dev
->mc_handle
);
576 dev_err(dev
, "dpdmai_reset() failed\n");
581 static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan
*qchan
,
582 struct list_head
*head
)
584 struct dpaa2_qdma_comp
*comp_tmp
, *_comp_tmp
;
587 list_for_each_entry_safe(comp_tmp
, _comp_tmp
,
589 spin_lock_irqsave(&qchan
->queue_lock
, flags
);
590 list_del(&comp_tmp
->list
);
591 spin_unlock_irqrestore(&qchan
->queue_lock
, flags
);
592 dma_pool_free(qchan
->fd_pool
,
593 comp_tmp
->fd_virt_addr
,
594 comp_tmp
->fd_bus_addr
);
595 dma_pool_free(qchan
->fl_pool
,
596 comp_tmp
->fl_virt_addr
,
597 comp_tmp
->fl_bus_addr
);
598 dma_pool_free(qchan
->sdd_pool
,
599 comp_tmp
->desc_virt_addr
,
600 comp_tmp
->desc_bus_addr
);
605 static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine
*dpaa2_qdma
)
607 struct dpaa2_qdma_chan
*qchan
;
610 num
= dpaa2_qdma
->n_chans
;
611 for (i
= 0; i
< num
; i
++) {
612 qchan
= &dpaa2_qdma
->chans
[i
];
613 dpaa2_dpdmai_free_comp(qchan
, &qchan
->comp_used
);
614 dpaa2_dpdmai_free_comp(qchan
, &qchan
->comp_free
);
615 dma_pool_destroy(qchan
->fd_pool
);
616 dma_pool_destroy(qchan
->fl_pool
);
617 dma_pool_destroy(qchan
->sdd_pool
);
621 static void dpaa2_qdma_free_desc(struct virt_dma_desc
*vdesc
)
623 struct dpaa2_qdma_comp
*dpaa2_comp
;
624 struct dpaa2_qdma_chan
*qchan
;
627 dpaa2_comp
= to_fsl_qdma_comp(vdesc
);
628 qchan
= dpaa2_comp
->qchan
;
629 spin_lock_irqsave(&qchan
->queue_lock
, flags
);
630 list_move_tail(&dpaa2_comp
->list
, &qchan
->comp_free
);
631 spin_unlock_irqrestore(&qchan
->queue_lock
, flags
);
634 static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine
*dpaa2_qdma
)
636 struct dpaa2_qdma_priv
*priv
= dpaa2_qdma
->priv
;
637 struct dpaa2_qdma_chan
*dpaa2_chan
;
638 int num
= priv
->num_pairs
;
641 INIT_LIST_HEAD(&dpaa2_qdma
->dma_dev
.channels
);
642 for (i
= 0; i
< dpaa2_qdma
->n_chans
; i
++) {
643 dpaa2_chan
= &dpaa2_qdma
->chans
[i
];
644 dpaa2_chan
->qdma
= dpaa2_qdma
;
645 dpaa2_chan
->fqid
= priv
->tx_queue_attr
[i
% num
].fqid
;
646 dpaa2_chan
->vchan
.desc_free
= dpaa2_qdma_free_desc
;
647 vchan_init(&dpaa2_chan
->vchan
, &dpaa2_qdma
->dma_dev
);
648 spin_lock_init(&dpaa2_chan
->queue_lock
);
649 INIT_LIST_HEAD(&dpaa2_chan
->comp_used
);
650 INIT_LIST_HEAD(&dpaa2_chan
->comp_free
);
655 static int dpaa2_qdma_probe(struct fsl_mc_device
*dpdmai_dev
)
657 struct device
*dev
= &dpdmai_dev
->dev
;
658 struct dpaa2_qdma_engine
*dpaa2_qdma
;
659 struct dpaa2_qdma_priv
*priv
;
662 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
665 dev_set_drvdata(dev
, priv
);
666 priv
->dpdmai_dev
= dpdmai_dev
;
668 priv
->iommu_domain
= iommu_get_domain_for_dev(dev
);
669 if (priv
->iommu_domain
)
670 smmu_disable
= false;
672 /* obtain a MC portal */
673 err
= fsl_mc_portal_allocate(dpdmai_dev
, 0, &priv
->mc_io
);
678 dev_err(dev
, "MC portal allocation failed\n");
682 /* DPDMAI initialization */
683 err
= dpaa2_qdma_setup(dpdmai_dev
);
685 dev_err(dev
, "dpaa2_dpdmai_setup() failed\n");
686 goto err_dpdmai_setup
;
690 err
= dpaa2_qdma_dpio_setup(priv
);
692 dev_err(dev
, "dpaa2_dpdmai_dpio_setup() failed\n");
696 /* DPDMAI binding to DPIO */
697 err
= dpaa2_dpdmai_bind(priv
);
699 dev_err(dev
, "dpaa2_dpdmai_bind() failed\n");
704 err
= dpdmai_enable(priv
->mc_io
, 0, dpdmai_dev
->mc_handle
);
706 dev_err(dev
, "dpdmai_enable() failed\n");
710 dpaa2_qdma
= kzalloc(sizeof(*dpaa2_qdma
), GFP_KERNEL
);
716 priv
->dpaa2_qdma
= dpaa2_qdma
;
717 dpaa2_qdma
->priv
= priv
;
719 dpaa2_qdma
->desc_allocated
= 0;
720 dpaa2_qdma
->n_chans
= NUM_CH
;
722 dpaa2_dpdmai_init_channels(dpaa2_qdma
);
724 if (soc_device_match(soc_fixup_tuning
))
725 dpaa2_qdma
->qdma_wrtype_fixup
= true;
727 dpaa2_qdma
->qdma_wrtype_fixup
= false;
729 dma_cap_set(DMA_PRIVATE
, dpaa2_qdma
->dma_dev
.cap_mask
);
730 dma_cap_set(DMA_SLAVE
, dpaa2_qdma
->dma_dev
.cap_mask
);
731 dma_cap_set(DMA_MEMCPY
, dpaa2_qdma
->dma_dev
.cap_mask
);
733 dpaa2_qdma
->dma_dev
.dev
= dev
;
734 dpaa2_qdma
->dma_dev
.device_alloc_chan_resources
=
735 dpaa2_qdma_alloc_chan_resources
;
736 dpaa2_qdma
->dma_dev
.device_free_chan_resources
=
737 dpaa2_qdma_free_chan_resources
;
738 dpaa2_qdma
->dma_dev
.device_tx_status
= dma_cookie_status
;
739 dpaa2_qdma
->dma_dev
.device_prep_dma_memcpy
= dpaa2_qdma_prep_memcpy
;
740 dpaa2_qdma
->dma_dev
.device_issue_pending
= dpaa2_qdma_issue_pending
;
742 err
= dma_async_device_register(&dpaa2_qdma
->dma_dev
);
744 dev_err(dev
, "Can't register NXP QDMA engine.\n");
753 dpdmai_disable(priv
->mc_io
, 0, dpdmai_dev
->mc_handle
);
755 dpaa2_dpdmai_dpio_unbind(priv
);
757 dpaa2_dpmai_store_free(priv
);
758 dpaa2_dpdmai_dpio_free(priv
);
761 dpdmai_close(priv
->mc_io
, 0, dpdmai_dev
->mc_handle
);
763 fsl_mc_portal_free(priv
->mc_io
);
766 dev_set_drvdata(dev
, NULL
);
770 static void dpaa2_qdma_remove(struct fsl_mc_device
*ls_dev
)
772 struct dpaa2_qdma_engine
*dpaa2_qdma
;
773 struct dpaa2_qdma_priv
*priv
;
777 priv
= dev_get_drvdata(dev
);
778 dpaa2_qdma
= priv
->dpaa2_qdma
;
780 dpdmai_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
781 dpaa2_dpdmai_dpio_unbind(priv
);
782 dpaa2_dpmai_store_free(priv
);
783 dpaa2_dpdmai_dpio_free(priv
);
784 dpdmai_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
785 fsl_mc_portal_free(priv
->mc_io
);
786 dev_set_drvdata(dev
, NULL
);
787 dpaa2_dpdmai_free_channels(dpaa2_qdma
);
789 dma_async_device_unregister(&dpaa2_qdma
->dma_dev
);
794 static void dpaa2_qdma_shutdown(struct fsl_mc_device
*ls_dev
)
796 struct dpaa2_qdma_priv
*priv
;
800 priv
= dev_get_drvdata(dev
);
802 dpdmai_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
803 dpaa2_dpdmai_dpio_unbind(priv
);
804 dpdmai_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
805 dpdmai_destroy(priv
->mc_io
, 0, priv
->dpqdma_id
, ls_dev
->mc_handle
);
808 static const struct fsl_mc_device_id dpaa2_qdma_id_table
[] = {
810 .vendor
= FSL_MC_VENDOR_FREESCALE
,
811 .obj_type
= "dpdmai",
816 static struct fsl_mc_driver dpaa2_qdma_driver
= {
818 .name
= "dpaa2-qdma",
820 .probe
= dpaa2_qdma_probe
,
821 .remove
= dpaa2_qdma_remove
,
822 .shutdown
= dpaa2_qdma_shutdown
,
823 .match_id_table
= dpaa2_qdma_id_table
826 static int __init
dpaa2_qdma_driver_init(void)
828 return fsl_mc_driver_register(&(dpaa2_qdma_driver
));
830 late_initcall(dpaa2_qdma_driver_init
);
832 static void __exit
fsl_qdma_exit(void)
834 fsl_mc_driver_unregister(&(dpaa2_qdma_driver
));
836 module_exit(fsl_qdma_exit
);
838 MODULE_ALIAS("platform:fsl-dpaa2-qdma");
839 MODULE_LICENSE("GPL v2");
840 MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");