1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/dmapool.h>
7 #include <linux/of_irq.h>
8 #include <linux/iommu.h>
9 #include <linux/sys_soc.h>
10 #include <linux/fsl/mc.h>
11 #include <soc/fsl/dpaa2-io.h>
13 #include "../virt-dma.h"
15 #include "dpaa2-qdma.h"
17 static bool smmu_disable
= true;
19 static struct dpaa2_qdma_chan
*to_dpaa2_qdma_chan(struct dma_chan
*chan
)
21 return container_of(chan
, struct dpaa2_qdma_chan
, vchan
.chan
);
24 static struct dpaa2_qdma_comp
*to_fsl_qdma_comp(struct virt_dma_desc
*vd
)
26 return container_of(vd
, struct dpaa2_qdma_comp
, vdesc
);
29 static int dpaa2_qdma_alloc_chan_resources(struct dma_chan
*chan
)
31 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
32 struct dpaa2_qdma_engine
*dpaa2_qdma
= dpaa2_chan
->qdma
;
33 struct device
*dev
= &dpaa2_qdma
->priv
->dpdmai_dev
->dev
;
35 dpaa2_chan
->fd_pool
= dma_pool_create("fd_pool", dev
,
36 sizeof(struct dpaa2_fd
),
37 sizeof(struct dpaa2_fd
), 0);
38 if (!dpaa2_chan
->fd_pool
)
41 dpaa2_chan
->fl_pool
= dma_pool_create("fl_pool", dev
,
42 sizeof(struct dpaa2_fl_entry
),
43 sizeof(struct dpaa2_fl_entry
), 0);
44 if (!dpaa2_chan
->fl_pool
)
47 dpaa2_chan
->sdd_pool
=
48 dma_pool_create("sdd_pool", dev
,
49 sizeof(struct dpaa2_qdma_sd_d
),
50 sizeof(struct dpaa2_qdma_sd_d
), 0);
51 if (!dpaa2_chan
->sdd_pool
)
54 return dpaa2_qdma
->desc_allocated
++;
56 dma_pool_destroy(dpaa2_chan
->fl_pool
);
58 dma_pool_destroy(dpaa2_chan
->fd_pool
);
63 static void dpaa2_qdma_free_chan_resources(struct dma_chan
*chan
)
65 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
66 struct dpaa2_qdma_engine
*dpaa2_qdma
= dpaa2_chan
->qdma
;
71 spin_lock_irqsave(&dpaa2_chan
->vchan
.lock
, flags
);
72 vchan_get_all_descriptors(&dpaa2_chan
->vchan
, &head
);
73 spin_unlock_irqrestore(&dpaa2_chan
->vchan
.lock
, flags
);
75 vchan_dma_desc_free_list(&dpaa2_chan
->vchan
, &head
);
77 dpaa2_dpdmai_free_comp(dpaa2_chan
, &dpaa2_chan
->comp_used
);
78 dpaa2_dpdmai_free_comp(dpaa2_chan
, &dpaa2_chan
->comp_free
);
80 dma_pool_destroy(dpaa2_chan
->fd_pool
);
81 dma_pool_destroy(dpaa2_chan
->fl_pool
);
82 dma_pool_destroy(dpaa2_chan
->sdd_pool
);
83 dpaa2_qdma
->desc_allocated
--;
87 * Request a command descriptor for enqueue.
89 static struct dpaa2_qdma_comp
*
90 dpaa2_qdma_request_desc(struct dpaa2_qdma_chan
*dpaa2_chan
)
92 struct dpaa2_qdma_priv
*qdma_priv
= dpaa2_chan
->qdma
->priv
;
93 struct device
*dev
= &qdma_priv
->dpdmai_dev
->dev
;
94 struct dpaa2_qdma_comp
*comp_temp
= NULL
;
97 spin_lock_irqsave(&dpaa2_chan
->queue_lock
, flags
);
98 if (list_empty(&dpaa2_chan
->comp_free
)) {
99 spin_unlock_irqrestore(&dpaa2_chan
->queue_lock
, flags
);
100 comp_temp
= kzalloc(sizeof(*comp_temp
), GFP_NOWAIT
);
103 comp_temp
->fd_virt_addr
=
104 dma_pool_alloc(dpaa2_chan
->fd_pool
, GFP_NOWAIT
,
105 &comp_temp
->fd_bus_addr
);
106 if (!comp_temp
->fd_virt_addr
)
109 comp_temp
->fl_virt_addr
=
110 dma_pool_alloc(dpaa2_chan
->fl_pool
, GFP_NOWAIT
,
111 &comp_temp
->fl_bus_addr
);
112 if (!comp_temp
->fl_virt_addr
)
115 comp_temp
->desc_virt_addr
=
116 dma_pool_alloc(dpaa2_chan
->sdd_pool
, GFP_NOWAIT
,
117 &comp_temp
->desc_bus_addr
);
118 if (!comp_temp
->desc_virt_addr
)
121 comp_temp
->qchan
= dpaa2_chan
;
125 comp_temp
= list_first_entry(&dpaa2_chan
->comp_free
,
126 struct dpaa2_qdma_comp
, list
);
127 list_del(&comp_temp
->list
);
128 spin_unlock_irqrestore(&dpaa2_chan
->queue_lock
, flags
);
130 comp_temp
->qchan
= dpaa2_chan
;
135 dma_pool_free(dpaa2_chan
->fl_pool
,
136 comp_temp
->fl_virt_addr
,
137 comp_temp
->fl_bus_addr
);
139 dma_pool_free(dpaa2_chan
->fd_pool
,
140 comp_temp
->fd_virt_addr
,
141 comp_temp
->fd_bus_addr
);
145 dev_err(dev
, "Failed to request descriptor\n");
150 dpaa2_qdma_populate_fd(u32 format
, struct dpaa2_qdma_comp
*dpaa2_comp
)
154 fd
= dpaa2_comp
->fd_virt_addr
;
155 memset(fd
, 0, sizeof(struct dpaa2_fd
));
158 dpaa2_fd_set_addr(fd
, dpaa2_comp
->fl_bus_addr
);
161 * Bypass memory translation, Frame list format, short length disable
162 * we need to disable BMT if fsl-mc use iova addr
165 dpaa2_fd_set_bpid(fd
, QMAN_FD_BMT_ENABLE
);
166 dpaa2_fd_set_format(fd
, QMAN_FD_FMT_ENABLE
| QMAN_FD_SL_DISABLE
);
168 dpaa2_fd_set_frc(fd
, format
| QDMA_SER_CTX
);
171 /* first frame list for descriptor buffer */
173 dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry
*f_list
,
174 struct dpaa2_qdma_comp
*dpaa2_comp
,
177 struct dpaa2_qdma_sd_d
*sdd
;
179 sdd
= dpaa2_comp
->desc_virt_addr
;
180 memset(sdd
, 0, 2 * (sizeof(*sdd
)));
182 /* source descriptor CMD */
183 sdd
->cmd
= cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT
);
186 /* dest descriptor CMD */
188 sdd
->cmd
= cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT
);
190 sdd
->cmd
= cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT
);
192 memset(f_list
, 0, sizeof(struct dpaa2_fl_entry
));
194 /* first frame list to source descriptor */
195 dpaa2_fl_set_addr(f_list
, dpaa2_comp
->desc_bus_addr
);
196 dpaa2_fl_set_len(f_list
, 0x20);
197 dpaa2_fl_set_format(f_list
, QDMA_FL_FMT_SBF
| QDMA_FL_SL_LONG
);
199 /* bypass memory translation */
201 f_list
->bpid
= cpu_to_le16(QDMA_FL_BMT_ENABLE
);
204 /* source and destination frame list */
206 dpaa2_qdma_populate_frames(struct dpaa2_fl_entry
*f_list
,
207 dma_addr_t dst
, dma_addr_t src
,
208 size_t len
, uint8_t fmt
)
210 /* source frame list to source buffer */
211 memset(f_list
, 0, sizeof(struct dpaa2_fl_entry
));
213 dpaa2_fl_set_addr(f_list
, src
);
214 dpaa2_fl_set_len(f_list
, len
);
216 /* single buffer frame or scatter gather frame */
217 dpaa2_fl_set_format(f_list
, (fmt
| QDMA_FL_SL_LONG
));
219 /* bypass memory translation */
221 f_list
->bpid
= cpu_to_le16(QDMA_FL_BMT_ENABLE
);
225 /* destination frame list to destination buffer */
226 memset(f_list
, 0, sizeof(struct dpaa2_fl_entry
));
228 dpaa2_fl_set_addr(f_list
, dst
);
229 dpaa2_fl_set_len(f_list
, len
);
230 dpaa2_fl_set_format(f_list
, (fmt
| QDMA_FL_SL_LONG
));
231 /* single buffer frame or scatter gather frame */
232 dpaa2_fl_set_final(f_list
, QDMA_FL_F
);
233 /* bypass memory translation */
235 f_list
->bpid
= cpu_to_le16(QDMA_FL_BMT_ENABLE
);
238 static struct dma_async_tx_descriptor
239 *dpaa2_qdma_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
240 dma_addr_t src
, size_t len
, ulong flags
)
242 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
243 struct dpaa2_qdma_engine
*dpaa2_qdma
;
244 struct dpaa2_qdma_comp
*dpaa2_comp
;
245 struct dpaa2_fl_entry
*f_list
;
248 dpaa2_qdma
= dpaa2_chan
->qdma
;
249 dpaa2_comp
= dpaa2_qdma_request_desc(dpaa2_chan
);
253 wrt_changed
= (bool)dpaa2_qdma
->qdma_wrtype_fixup
;
255 /* populate Frame descriptor */
256 dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT
, dpaa2_comp
);
258 f_list
= dpaa2_comp
->fl_virt_addr
;
260 /* first frame list for descriptor buffer (logn format) */
261 dpaa2_qdma_populate_first_framel(f_list
, dpaa2_comp
, wrt_changed
);
265 dpaa2_qdma_populate_frames(f_list
, dst
, src
, len
, QDMA_FL_FMT_SBF
);
267 return vchan_tx_prep(&dpaa2_chan
->vchan
, &dpaa2_comp
->vdesc
, flags
);
270 static void dpaa2_qdma_issue_pending(struct dma_chan
*chan
)
272 struct dpaa2_qdma_chan
*dpaa2_chan
= to_dpaa2_qdma_chan(chan
);
273 struct dpaa2_qdma_comp
*dpaa2_comp
;
274 struct virt_dma_desc
*vdesc
;
279 spin_lock_irqsave(&dpaa2_chan
->queue_lock
, flags
);
280 spin_lock(&dpaa2_chan
->vchan
.lock
);
281 if (vchan_issue_pending(&dpaa2_chan
->vchan
)) {
282 vdesc
= vchan_next_desc(&dpaa2_chan
->vchan
);
285 dpaa2_comp
= to_fsl_qdma_comp(vdesc
);
287 fd
= dpaa2_comp
->fd_virt_addr
;
289 list_del(&vdesc
->node
);
290 list_add_tail(&dpaa2_comp
->list
, &dpaa2_chan
->comp_used
);
292 err
= dpaa2_io_service_enqueue_fq(NULL
, dpaa2_chan
->fqid
, fd
);
294 list_del(&dpaa2_comp
->list
);
295 list_add_tail(&dpaa2_comp
->list
,
296 &dpaa2_chan
->comp_free
);
300 spin_unlock(&dpaa2_chan
->vchan
.lock
);
301 spin_unlock_irqrestore(&dpaa2_chan
->queue_lock
, flags
);
304 static int __cold
dpaa2_qdma_setup(struct fsl_mc_device
*ls_dev
)
306 struct dpaa2_qdma_priv_per_prio
*ppriv
;
307 struct device
*dev
= &ls_dev
->dev
;
308 struct dpaa2_qdma_priv
*priv
;
309 u8 prio_def
= DPDMAI_PRIO_NUM
;
313 priv
= dev_get_drvdata(dev
);
316 priv
->dpqdma_id
= ls_dev
->obj_desc
.id
;
318 /* Get the handle for the DPDMAI this interface is associate with */
319 err
= dpdmai_open(priv
->mc_io
, 0, priv
->dpqdma_id
, &ls_dev
->mc_handle
);
321 dev_err(dev
, "dpdmai_open() failed\n");
325 dev_dbg(dev
, "Opened dpdmai object successfully\n");
327 err
= dpdmai_get_attributes(priv
->mc_io
, 0, ls_dev
->mc_handle
,
330 dev_err(dev
, "dpdmai_get_attributes() failed\n");
334 if (priv
->dpdmai_attr
.version
.major
> DPDMAI_VER_MAJOR
) {
335 dev_err(dev
, "DPDMAI major version mismatch\n"
336 "Found %u.%u, supported version is %u.%u\n",
337 priv
->dpdmai_attr
.version
.major
,
338 priv
->dpdmai_attr
.version
.minor
,
339 DPDMAI_VER_MAJOR
, DPDMAI_VER_MINOR
);
343 if (priv
->dpdmai_attr
.version
.minor
> DPDMAI_VER_MINOR
) {
344 dev_err(dev
, "DPDMAI minor version mismatch\n"
345 "Found %u.%u, supported version is %u.%u\n",
346 priv
->dpdmai_attr
.version
.major
,
347 priv
->dpdmai_attr
.version
.minor
,
348 DPDMAI_VER_MAJOR
, DPDMAI_VER_MINOR
);
352 priv
->num_pairs
= min(priv
->dpdmai_attr
.num_of_priorities
, prio_def
);
353 ppriv
= kcalloc(priv
->num_pairs
, sizeof(*ppriv
), GFP_KERNEL
);
360 for (i
= 0; i
< priv
->num_pairs
; i
++) {
361 err
= dpdmai_get_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
,
362 i
, &priv
->rx_queue_attr
[i
]);
364 dev_err(dev
, "dpdmai_get_rx_queue() failed\n");
367 ppriv
->rsp_fqid
= priv
->rx_queue_attr
[i
].fqid
;
369 err
= dpdmai_get_tx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
,
370 i
, &priv
->tx_fqid
[i
]);
372 dev_err(dev
, "dpdmai_get_tx_queue() failed\n");
375 ppriv
->req_fqid
= priv
->tx_fqid
[i
];
383 dpdmai_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
387 static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx
*ctx
)
389 struct dpaa2_qdma_priv_per_prio
*ppriv
= container_of(ctx
,
390 struct dpaa2_qdma_priv_per_prio
, nctx
);
391 struct dpaa2_qdma_comp
*dpaa2_comp
, *_comp_tmp
;
392 struct dpaa2_qdma_priv
*priv
= ppriv
->priv
;
393 u32 n_chans
= priv
->dpaa2_qdma
->n_chans
;
394 struct dpaa2_qdma_chan
*qchan
;
395 const struct dpaa2_fd
*fd_eq
;
396 const struct dpaa2_fd
*fd
;
405 err
= dpaa2_io_service_pull_fq(NULL
, ppriv
->rsp_fqid
,
411 dq
= dpaa2_io_store_next(ppriv
->store
, &is_last
);
412 } while (!is_last
&& !dq
);
414 dev_err(priv
->dev
, "FQID returned no valid frames!\n");
418 /* obtain FD and process the error */
419 fd
= dpaa2_dq_fd(dq
);
421 status
= dpaa2_fd_get_ctrl(fd
) & 0xff;
423 dev_err(priv
->dev
, "FD error occurred\n");
425 for (i
= 0; i
< n_chans
; i
++) {
426 qchan
= &priv
->dpaa2_qdma
->chans
[i
];
427 spin_lock(&qchan
->queue_lock
);
428 if (list_empty(&qchan
->comp_used
)) {
429 spin_unlock(&qchan
->queue_lock
);
432 list_for_each_entry_safe(dpaa2_comp
, _comp_tmp
,
433 &qchan
->comp_used
, list
) {
434 fd_eq
= dpaa2_comp
->fd_virt_addr
;
436 if (le64_to_cpu(fd_eq
->simple
.addr
) ==
437 le64_to_cpu(fd
->simple
.addr
)) {
438 spin_lock(&qchan
->vchan
.lock
);
439 vchan_cookie_complete(&
441 spin_unlock(&qchan
->vchan
.lock
);
446 spin_unlock(&qchan
->queue_lock
);
452 dpaa2_io_service_rearm(NULL
, ctx
);
455 static int __cold
dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv
*priv
)
457 struct dpaa2_qdma_priv_per_prio
*ppriv
;
458 struct device
*dev
= priv
->dev
;
462 num
= priv
->num_pairs
;
464 for (i
= 0; i
< num
; i
++) {
465 ppriv
->nctx
.is_cdan
= 0;
466 ppriv
->nctx
.desired_cpu
= DPAA2_IO_ANY_CPU
;
467 ppriv
->nctx
.id
= ppriv
->rsp_fqid
;
468 ppriv
->nctx
.cb
= dpaa2_qdma_fqdan_cb
;
469 err
= dpaa2_io_service_register(NULL
, &ppriv
->nctx
, dev
);
471 dev_err(dev
, "Notification register failed\n");
476 dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE
, dev
);
478 dev_err(dev
, "dpaa2_io_store_create() failed\n");
487 dpaa2_io_service_deregister(NULL
, &ppriv
->nctx
, dev
);
490 while (ppriv
>= priv
->ppriv
) {
491 dpaa2_io_service_deregister(NULL
, &ppriv
->nctx
, dev
);
492 dpaa2_io_store_destroy(ppriv
->store
);
498 static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv
*priv
)
500 struct dpaa2_qdma_priv_per_prio
*ppriv
= priv
->ppriv
;
503 for (i
= 0; i
< priv
->num_pairs
; i
++) {
504 dpaa2_io_store_destroy(ppriv
->store
);
509 static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv
*priv
)
511 struct dpaa2_qdma_priv_per_prio
*ppriv
= priv
->ppriv
;
512 struct device
*dev
= priv
->dev
;
515 for (i
= 0; i
< priv
->num_pairs
; i
++) {
516 dpaa2_io_service_deregister(NULL
, &ppriv
->nctx
, dev
);
521 static int __cold
dpaa2_dpdmai_bind(struct dpaa2_qdma_priv
*priv
)
523 struct dpdmai_rx_queue_cfg rx_queue_cfg
;
524 struct dpaa2_qdma_priv_per_prio
*ppriv
;
525 struct device
*dev
= priv
->dev
;
526 struct fsl_mc_device
*ls_dev
;
530 ls_dev
= to_fsl_mc_device(dev
);
531 num
= priv
->num_pairs
;
533 for (i
= 0; i
< num
; i
++) {
534 rx_queue_cfg
.options
= DPDMAI_QUEUE_OPT_USER_CTX
|
535 DPDMAI_QUEUE_OPT_DEST
;
536 rx_queue_cfg
.user_ctx
= ppriv
->nctx
.qman64
;
537 rx_queue_cfg
.dest_cfg
.dest_type
= DPDMAI_DEST_DPIO
;
538 rx_queue_cfg
.dest_cfg
.dest_id
= ppriv
->nctx
.dpio_id
;
539 rx_queue_cfg
.dest_cfg
.priority
= ppriv
->prio
;
540 err
= dpdmai_set_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
,
541 rx_queue_cfg
.dest_cfg
.priority
,
544 dev_err(dev
, "dpdmai_set_rx_queue() failed\n");
554 static int __cold
dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv
*priv
)
556 struct dpaa2_qdma_priv_per_prio
*ppriv
= priv
->ppriv
;
557 struct device
*dev
= priv
->dev
;
558 struct fsl_mc_device
*ls_dev
;
562 ls_dev
= to_fsl_mc_device(dev
);
564 for (i
= 0; i
< priv
->num_pairs
; i
++) {
565 ppriv
->nctx
.qman64
= 0;
566 ppriv
->nctx
.dpio_id
= 0;
570 err
= dpdmai_reset(priv
->mc_io
, 0, ls_dev
->mc_handle
);
572 dev_err(dev
, "dpdmai_reset() failed\n");
577 static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan
*qchan
,
578 struct list_head
*head
)
580 struct dpaa2_qdma_comp
*comp_tmp
, *_comp_tmp
;
583 list_for_each_entry_safe(comp_tmp
, _comp_tmp
,
585 spin_lock_irqsave(&qchan
->queue_lock
, flags
);
586 list_del(&comp_tmp
->list
);
587 spin_unlock_irqrestore(&qchan
->queue_lock
, flags
);
588 dma_pool_free(qchan
->fd_pool
,
589 comp_tmp
->fd_virt_addr
,
590 comp_tmp
->fd_bus_addr
);
591 dma_pool_free(qchan
->fl_pool
,
592 comp_tmp
->fl_virt_addr
,
593 comp_tmp
->fl_bus_addr
);
594 dma_pool_free(qchan
->sdd_pool
,
595 comp_tmp
->desc_virt_addr
,
596 comp_tmp
->desc_bus_addr
);
601 static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine
*dpaa2_qdma
)
603 struct dpaa2_qdma_chan
*qchan
;
606 num
= dpaa2_qdma
->n_chans
;
607 for (i
= 0; i
< num
; i
++) {
608 qchan
= &dpaa2_qdma
->chans
[i
];
609 dpaa2_dpdmai_free_comp(qchan
, &qchan
->comp_used
);
610 dpaa2_dpdmai_free_comp(qchan
, &qchan
->comp_free
);
611 dma_pool_destroy(qchan
->fd_pool
);
612 dma_pool_destroy(qchan
->fl_pool
);
613 dma_pool_destroy(qchan
->sdd_pool
);
617 static void dpaa2_qdma_free_desc(struct virt_dma_desc
*vdesc
)
619 struct dpaa2_qdma_comp
*dpaa2_comp
;
620 struct dpaa2_qdma_chan
*qchan
;
623 dpaa2_comp
= to_fsl_qdma_comp(vdesc
);
624 qchan
= dpaa2_comp
->qchan
;
625 spin_lock_irqsave(&qchan
->queue_lock
, flags
);
626 list_del(&dpaa2_comp
->list
);
627 list_add_tail(&dpaa2_comp
->list
, &qchan
->comp_free
);
628 spin_unlock_irqrestore(&qchan
->queue_lock
, flags
);
631 static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine
*dpaa2_qdma
)
633 struct dpaa2_qdma_priv
*priv
= dpaa2_qdma
->priv
;
634 struct dpaa2_qdma_chan
*dpaa2_chan
;
635 int num
= priv
->num_pairs
;
638 INIT_LIST_HEAD(&dpaa2_qdma
->dma_dev
.channels
);
639 for (i
= 0; i
< dpaa2_qdma
->n_chans
; i
++) {
640 dpaa2_chan
= &dpaa2_qdma
->chans
[i
];
641 dpaa2_chan
->qdma
= dpaa2_qdma
;
642 dpaa2_chan
->fqid
= priv
->tx_fqid
[i
% num
];
643 dpaa2_chan
->vchan
.desc_free
= dpaa2_qdma_free_desc
;
644 vchan_init(&dpaa2_chan
->vchan
, &dpaa2_qdma
->dma_dev
);
645 spin_lock_init(&dpaa2_chan
->queue_lock
);
646 INIT_LIST_HEAD(&dpaa2_chan
->comp_used
);
647 INIT_LIST_HEAD(&dpaa2_chan
->comp_free
);
652 static int dpaa2_qdma_probe(struct fsl_mc_device
*dpdmai_dev
)
654 struct device
*dev
= &dpdmai_dev
->dev
;
655 struct dpaa2_qdma_engine
*dpaa2_qdma
;
656 struct dpaa2_qdma_priv
*priv
;
659 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
662 dev_set_drvdata(dev
, priv
);
663 priv
->dpdmai_dev
= dpdmai_dev
;
665 priv
->iommu_domain
= iommu_get_domain_for_dev(dev
);
666 if (priv
->iommu_domain
)
667 smmu_disable
= false;
669 /* obtain a MC portal */
670 err
= fsl_mc_portal_allocate(dpdmai_dev
, 0, &priv
->mc_io
);
675 dev_err(dev
, "MC portal allocation failed\n");
679 /* DPDMAI initialization */
680 err
= dpaa2_qdma_setup(dpdmai_dev
);
682 dev_err(dev
, "dpaa2_dpdmai_setup() failed\n");
683 goto err_dpdmai_setup
;
687 err
= dpaa2_qdma_dpio_setup(priv
);
689 dev_err(dev
, "dpaa2_dpdmai_dpio_setup() failed\n");
693 /* DPDMAI binding to DPIO */
694 err
= dpaa2_dpdmai_bind(priv
);
696 dev_err(dev
, "dpaa2_dpdmai_bind() failed\n");
701 err
= dpdmai_enable(priv
->mc_io
, 0, dpdmai_dev
->mc_handle
);
703 dev_err(dev
, "dpdmai_enable() faile\n");
707 dpaa2_qdma
= kzalloc(sizeof(*dpaa2_qdma
), GFP_KERNEL
);
713 priv
->dpaa2_qdma
= dpaa2_qdma
;
714 dpaa2_qdma
->priv
= priv
;
716 dpaa2_qdma
->desc_allocated
= 0;
717 dpaa2_qdma
->n_chans
= NUM_CH
;
719 dpaa2_dpdmai_init_channels(dpaa2_qdma
);
721 if (soc_device_match(soc_fixup_tuning
))
722 dpaa2_qdma
->qdma_wrtype_fixup
= true;
724 dpaa2_qdma
->qdma_wrtype_fixup
= false;
726 dma_cap_set(DMA_PRIVATE
, dpaa2_qdma
->dma_dev
.cap_mask
);
727 dma_cap_set(DMA_SLAVE
, dpaa2_qdma
->dma_dev
.cap_mask
);
728 dma_cap_set(DMA_MEMCPY
, dpaa2_qdma
->dma_dev
.cap_mask
);
730 dpaa2_qdma
->dma_dev
.dev
= dev
;
731 dpaa2_qdma
->dma_dev
.device_alloc_chan_resources
=
732 dpaa2_qdma_alloc_chan_resources
;
733 dpaa2_qdma
->dma_dev
.device_free_chan_resources
=
734 dpaa2_qdma_free_chan_resources
;
735 dpaa2_qdma
->dma_dev
.device_tx_status
= dma_cookie_status
;
736 dpaa2_qdma
->dma_dev
.device_prep_dma_memcpy
= dpaa2_qdma_prep_memcpy
;
737 dpaa2_qdma
->dma_dev
.device_issue_pending
= dpaa2_qdma_issue_pending
;
739 err
= dma_async_device_register(&dpaa2_qdma
->dma_dev
);
741 dev_err(dev
, "Can't register NXP QDMA engine.\n");
750 dpdmai_disable(priv
->mc_io
, 0, dpdmai_dev
->mc_handle
);
752 dpaa2_dpdmai_dpio_unbind(priv
);
754 dpaa2_dpmai_store_free(priv
);
755 dpaa2_dpdmai_dpio_free(priv
);
758 dpdmai_close(priv
->mc_io
, 0, dpdmai_dev
->mc_handle
);
760 fsl_mc_portal_free(priv
->mc_io
);
763 dev_set_drvdata(dev
, NULL
);
767 static int dpaa2_qdma_remove(struct fsl_mc_device
*ls_dev
)
769 struct dpaa2_qdma_engine
*dpaa2_qdma
;
770 struct dpaa2_qdma_priv
*priv
;
774 priv
= dev_get_drvdata(dev
);
775 dpaa2_qdma
= priv
->dpaa2_qdma
;
777 dpdmai_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
778 dpaa2_dpdmai_dpio_unbind(priv
);
779 dpaa2_dpmai_store_free(priv
);
780 dpaa2_dpdmai_dpio_free(priv
);
781 dpdmai_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
782 fsl_mc_portal_free(priv
->mc_io
);
783 dev_set_drvdata(dev
, NULL
);
784 dpaa2_dpdmai_free_channels(dpaa2_qdma
);
786 dma_async_device_unregister(&dpaa2_qdma
->dma_dev
);
793 static void dpaa2_qdma_shutdown(struct fsl_mc_device
*ls_dev
)
795 struct dpaa2_qdma_priv
*priv
;
799 priv
= dev_get_drvdata(dev
);
801 dpdmai_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
802 dpaa2_dpdmai_dpio_unbind(priv
);
803 dpdmai_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
804 dpdmai_destroy(priv
->mc_io
, 0, ls_dev
->mc_handle
);
807 static const struct fsl_mc_device_id dpaa2_qdma_id_table
[] = {
809 .vendor
= FSL_MC_VENDOR_FREESCALE
,
810 .obj_type
= "dpdmai",
815 static struct fsl_mc_driver dpaa2_qdma_driver
= {
817 .name
= "dpaa2-qdma",
818 .owner
= THIS_MODULE
,
820 .probe
= dpaa2_qdma_probe
,
821 .remove
= dpaa2_qdma_remove
,
822 .shutdown
= dpaa2_qdma_shutdown
,
823 .match_id_table
= dpaa2_qdma_id_table
826 static int __init
dpaa2_qdma_driver_init(void)
828 return fsl_mc_driver_register(&(dpaa2_qdma_driver
));
830 late_initcall(dpaa2_qdma_driver_init
);
832 static void __exit
fsl_qdma_exit(void)
834 fsl_mc_driver_unregister(&(dpaa2_qdma_driver
));
836 module_exit(fsl_qdma_exit
);
838 MODULE_ALIAS("platform:fsl-dpaa2-qdma");
839 MODULE_LICENSE("GPL v2");
840 MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");