1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) driver
5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
7 * Author: Gary R Hook <gary.hook@amd.com>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/spinlock.h>
15 #include <linux/mutex.h>
16 #include <linux/ccp.h>
19 #include "../../dma/dmaengine.h"
21 #define CCP_DMA_WIDTH(_mask) \
23 u64 mask = _mask + 1; \
24 (mask == 0) ? 64 : fls64(mask); \
27 /* The CCP as a DMA provider can be configured for public or private
28 * channels. Default is specified in the vdata for the device (PCI ID).
29 * This module parameter will override for all channels on all devices:
30 * dma_chan_attr = 0x2 to force all channels public
31 * = 0x1 to force all channels private
32 * = 0x0 to defer to the vdata setting
33 * = any other value: warning, revert to 0x0
35 static unsigned int dma_chan_attr
= CCP_DMA_DFLT
;
36 module_param(dma_chan_attr
, uint
, 0444);
37 MODULE_PARM_DESC(dma_chan_attr
, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
39 static unsigned int dmaengine
= 1;
40 module_param(dmaengine
, uint
, 0444);
41 MODULE_PARM_DESC(dmaengine
, "Register services with the DMA subsystem (any non-zero value, default: 1)");
43 static unsigned int ccp_get_dma_chan_attr(struct ccp_device
*ccp
)
45 switch (dma_chan_attr
) {
47 return ccp
->vdata
->dma_chan_attr
;
56 dev_info_once(ccp
->dev
, "Invalid value for dma_chan_attr: %d\n",
58 return ccp
->vdata
->dma_chan_attr
;
62 static void ccp_free_cmd_resources(struct ccp_device
*ccp
,
63 struct list_head
*list
)
65 struct ccp_dma_cmd
*cmd
, *ctmp
;
67 list_for_each_entry_safe(cmd
, ctmp
, list
, entry
) {
68 list_del(&cmd
->entry
);
69 kmem_cache_free(ccp
->dma_cmd_cache
, cmd
);
73 static void ccp_free_desc_resources(struct ccp_device
*ccp
,
74 struct list_head
*list
)
76 struct ccp_dma_desc
*desc
, *dtmp
;
78 list_for_each_entry_safe(desc
, dtmp
, list
, entry
) {
79 ccp_free_cmd_resources(ccp
, &desc
->active
);
80 ccp_free_cmd_resources(ccp
, &desc
->pending
);
82 list_del(&desc
->entry
);
83 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
87 static void ccp_free_chan_resources(struct dma_chan
*dma_chan
)
89 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
93 dev_dbg(chan
->ccp
->dev
, "%s - chan=%p\n", __func__
, chan
);
95 spin_lock_irqsave(&chan
->lock
, flags
);
97 ccp_free_desc_resources(chan
->ccp
, &chan
->complete
);
98 ccp_free_desc_resources(chan
->ccp
, &chan
->active
);
99 ccp_free_desc_resources(chan
->ccp
, &chan
->pending
);
100 ccp_free_desc_resources(chan
->ccp
, &chan
->created
);
102 spin_unlock_irqrestore(&chan
->lock
, flags
);
105 static void ccp_cleanup_desc_resources(struct ccp_device
*ccp
,
106 struct list_head
*list
)
108 struct ccp_dma_desc
*desc
, *dtmp
;
110 list_for_each_entry_safe_reverse(desc
, dtmp
, list
, entry
) {
111 if (!async_tx_test_ack(&desc
->tx_desc
))
114 dev_dbg(ccp
->dev
, "%s - desc=%p\n", __func__
, desc
);
116 ccp_free_cmd_resources(ccp
, &desc
->active
);
117 ccp_free_cmd_resources(ccp
, &desc
->pending
);
119 list_del(&desc
->entry
);
120 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
124 static void ccp_do_cleanup(unsigned long data
)
126 struct ccp_dma_chan
*chan
= (struct ccp_dma_chan
*)data
;
129 dev_dbg(chan
->ccp
->dev
, "%s - chan=%s\n", __func__
,
130 dma_chan_name(&chan
->dma_chan
));
132 spin_lock_irqsave(&chan
->lock
, flags
);
134 ccp_cleanup_desc_resources(chan
->ccp
, &chan
->complete
);
136 spin_unlock_irqrestore(&chan
->lock
, flags
);
139 static int ccp_issue_next_cmd(struct ccp_dma_desc
*desc
)
141 struct ccp_dma_cmd
*cmd
;
144 cmd
= list_first_entry(&desc
->pending
, struct ccp_dma_cmd
, entry
);
145 list_move(&cmd
->entry
, &desc
->active
);
147 dev_dbg(desc
->ccp
->dev
, "%s - tx %d, cmd=%p\n", __func__
,
148 desc
->tx_desc
.cookie
, cmd
);
150 ret
= ccp_enqueue_cmd(&cmd
->ccp_cmd
);
151 if (!ret
|| (ret
== -EINPROGRESS
) || (ret
== -EBUSY
))
154 dev_dbg(desc
->ccp
->dev
, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__
,
155 ret
, desc
->tx_desc
.cookie
, cmd
);
160 static void ccp_free_active_cmd(struct ccp_dma_desc
*desc
)
162 struct ccp_dma_cmd
*cmd
;
164 cmd
= list_first_entry_or_null(&desc
->active
, struct ccp_dma_cmd
,
169 dev_dbg(desc
->ccp
->dev
, "%s - freeing tx %d cmd=%p\n",
170 __func__
, desc
->tx_desc
.cookie
, cmd
);
172 list_del(&cmd
->entry
);
173 kmem_cache_free(desc
->ccp
->dma_cmd_cache
, cmd
);
176 static struct ccp_dma_desc
*__ccp_next_dma_desc(struct ccp_dma_chan
*chan
,
177 struct ccp_dma_desc
*desc
)
179 /* Move current DMA descriptor to the complete list */
181 list_move(&desc
->entry
, &chan
->complete
);
183 /* Get the next DMA descriptor on the active list */
184 desc
= list_first_entry_or_null(&chan
->active
, struct ccp_dma_desc
,
190 static struct ccp_dma_desc
*ccp_handle_active_desc(struct ccp_dma_chan
*chan
,
191 struct ccp_dma_desc
*desc
)
193 struct dma_async_tx_descriptor
*tx_desc
;
196 /* Loop over descriptors until one is found with commands */
199 /* Remove the DMA command from the list and free it */
200 ccp_free_active_cmd(desc
);
202 if (!list_empty(&desc
->pending
)) {
203 /* No errors, keep going */
204 if (desc
->status
!= DMA_ERROR
)
207 /* Error, free remaining commands and move on */
208 ccp_free_cmd_resources(desc
->ccp
,
212 tx_desc
= &desc
->tx_desc
;
217 spin_lock_irqsave(&chan
->lock
, flags
);
220 if (desc
->status
!= DMA_ERROR
)
221 desc
->status
= DMA_COMPLETE
;
223 dev_dbg(desc
->ccp
->dev
,
224 "%s - tx %d complete, status=%u\n", __func__
,
225 desc
->tx_desc
.cookie
, desc
->status
);
227 dma_cookie_complete(tx_desc
);
228 dma_descriptor_unmap(tx_desc
);
231 desc
= __ccp_next_dma_desc(chan
, desc
);
233 spin_unlock_irqrestore(&chan
->lock
, flags
);
236 dmaengine_desc_get_callback_invoke(tx_desc
, NULL
);
238 dma_run_dependencies(tx_desc
);
245 static struct ccp_dma_desc
*__ccp_pending_to_active(struct ccp_dma_chan
*chan
)
247 struct ccp_dma_desc
*desc
;
249 if (list_empty(&chan
->pending
))
252 desc
= list_empty(&chan
->active
)
253 ? list_first_entry(&chan
->pending
, struct ccp_dma_desc
, entry
)
256 list_splice_tail_init(&chan
->pending
, &chan
->active
);
261 static void ccp_cmd_callback(void *data
, int err
)
263 struct ccp_dma_desc
*desc
= data
;
264 struct ccp_dma_chan
*chan
;
267 if (err
== -EINPROGRESS
)
270 chan
= container_of(desc
->tx_desc
.chan
, struct ccp_dma_chan
,
273 dev_dbg(chan
->ccp
->dev
, "%s - tx %d callback, err=%d\n",
274 __func__
, desc
->tx_desc
.cookie
, err
);
277 desc
->status
= DMA_ERROR
;
280 /* Check for DMA descriptor completion */
281 desc
= ccp_handle_active_desc(chan
, desc
);
283 /* Don't submit cmd if no descriptor or DMA is paused */
284 if (!desc
|| (chan
->status
== DMA_PAUSED
))
287 ret
= ccp_issue_next_cmd(desc
);
291 desc
->status
= DMA_ERROR
;
294 tasklet_schedule(&chan
->cleanup_tasklet
);
297 static dma_cookie_t
ccp_tx_submit(struct dma_async_tx_descriptor
*tx_desc
)
299 struct ccp_dma_desc
*desc
= container_of(tx_desc
, struct ccp_dma_desc
,
301 struct ccp_dma_chan
*chan
;
305 chan
= container_of(tx_desc
->chan
, struct ccp_dma_chan
, dma_chan
);
307 spin_lock_irqsave(&chan
->lock
, flags
);
309 cookie
= dma_cookie_assign(tx_desc
);
310 list_move_tail(&desc
->entry
, &chan
->pending
);
312 spin_unlock_irqrestore(&chan
->lock
, flags
);
314 dev_dbg(chan
->ccp
->dev
, "%s - added tx descriptor %d to pending list\n",
320 static struct ccp_dma_cmd
*ccp_alloc_dma_cmd(struct ccp_dma_chan
*chan
)
322 struct ccp_dma_cmd
*cmd
;
324 cmd
= kmem_cache_alloc(chan
->ccp
->dma_cmd_cache
, GFP_NOWAIT
);
326 memset(cmd
, 0, sizeof(*cmd
));
331 static struct ccp_dma_desc
*ccp_alloc_dma_desc(struct ccp_dma_chan
*chan
,
334 struct ccp_dma_desc
*desc
;
336 desc
= kmem_cache_zalloc(chan
->ccp
->dma_desc_cache
, GFP_NOWAIT
);
340 dma_async_tx_descriptor_init(&desc
->tx_desc
, &chan
->dma_chan
);
341 desc
->tx_desc
.flags
= flags
;
342 desc
->tx_desc
.tx_submit
= ccp_tx_submit
;
343 desc
->ccp
= chan
->ccp
;
344 INIT_LIST_HEAD(&desc
->entry
);
345 INIT_LIST_HEAD(&desc
->pending
);
346 INIT_LIST_HEAD(&desc
->active
);
347 desc
->status
= DMA_IN_PROGRESS
;
352 static struct ccp_dma_desc
*ccp_create_desc(struct dma_chan
*dma_chan
,
353 struct scatterlist
*dst_sg
,
354 unsigned int dst_nents
,
355 struct scatterlist
*src_sg
,
356 unsigned int src_nents
,
359 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
361 struct ccp_device
*ccp
= chan
->ccp
;
362 struct ccp_dma_desc
*desc
;
363 struct ccp_dma_cmd
*cmd
;
364 struct ccp_cmd
*ccp_cmd
;
365 struct ccp_passthru_nomap_engine
*ccp_pt
;
366 unsigned int src_offset
, src_len
;
367 unsigned int dst_offset
, dst_len
;
369 unsigned long sflags
;
372 if (!dst_sg
|| !src_sg
)
375 if (!dst_nents
|| !src_nents
)
378 desc
= ccp_alloc_dma_desc(chan
, flags
);
384 src_len
= sg_dma_len(src_sg
);
387 dst_len
= sg_dma_len(dst_sg
);
396 src_sg
= sg_next(src_sg
);
400 src_len
= sg_dma_len(src_sg
);
410 dst_sg
= sg_next(dst_sg
);
414 dst_len
= sg_dma_len(dst_sg
);
419 len
= min(dst_len
, src_len
);
421 cmd
= ccp_alloc_dma_cmd(chan
);
425 ccp_cmd
= &cmd
->ccp_cmd
;
426 ccp_cmd
->ccp
= chan
->ccp
;
427 ccp_pt
= &ccp_cmd
->u
.passthru_nomap
;
428 ccp_cmd
->flags
= CCP_CMD_MAY_BACKLOG
;
429 ccp_cmd
->flags
|= CCP_CMD_PASSTHRU_NO_DMA_MAP
;
430 ccp_cmd
->engine
= CCP_ENGINE_PASSTHRU
;
431 ccp_pt
->bit_mod
= CCP_PASSTHRU_BITWISE_NOOP
;
432 ccp_pt
->byte_swap
= CCP_PASSTHRU_BYTESWAP_NOOP
;
433 ccp_pt
->src_dma
= sg_dma_address(src_sg
) + src_offset
;
434 ccp_pt
->dst_dma
= sg_dma_address(dst_sg
) + dst_offset
;
435 ccp_pt
->src_len
= len
;
437 ccp_cmd
->callback
= ccp_cmd_callback
;
438 ccp_cmd
->data
= desc
;
440 list_add_tail(&cmd
->entry
, &desc
->pending
);
443 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__
,
444 cmd
, &ccp_pt
->src_dma
,
445 &ccp_pt
->dst_dma
, ccp_pt
->src_len
);
456 desc
->len
= total_len
;
458 if (list_empty(&desc
->pending
))
461 dev_dbg(ccp
->dev
, "%s - desc=%p\n", __func__
, desc
);
463 spin_lock_irqsave(&chan
->lock
, sflags
);
465 list_add_tail(&desc
->entry
, &chan
->created
);
467 spin_unlock_irqrestore(&chan
->lock
, sflags
);
472 ccp_free_cmd_resources(ccp
, &desc
->pending
);
473 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
478 static struct dma_async_tx_descriptor
*ccp_prep_dma_memcpy(
479 struct dma_chan
*dma_chan
, dma_addr_t dst
, dma_addr_t src
, size_t len
,
482 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
484 struct ccp_dma_desc
*desc
;
485 struct scatterlist dst_sg
, src_sg
;
487 dev_dbg(chan
->ccp
->dev
,
488 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
489 __func__
, &src
, &dst
, len
, flags
);
491 sg_init_table(&dst_sg
, 1);
492 sg_dma_address(&dst_sg
) = dst
;
493 sg_dma_len(&dst_sg
) = len
;
495 sg_init_table(&src_sg
, 1);
496 sg_dma_address(&src_sg
) = src
;
497 sg_dma_len(&src_sg
) = len
;
499 desc
= ccp_create_desc(dma_chan
, &dst_sg
, 1, &src_sg
, 1, flags
);
503 return &desc
->tx_desc
;
506 static struct dma_async_tx_descriptor
*ccp_prep_dma_interrupt(
507 struct dma_chan
*dma_chan
, unsigned long flags
)
509 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
511 struct ccp_dma_desc
*desc
;
513 desc
= ccp_alloc_dma_desc(chan
, flags
);
517 return &desc
->tx_desc
;
520 static void ccp_issue_pending(struct dma_chan
*dma_chan
)
522 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
524 struct ccp_dma_desc
*desc
;
527 dev_dbg(chan
->ccp
->dev
, "%s\n", __func__
);
529 spin_lock_irqsave(&chan
->lock
, flags
);
531 desc
= __ccp_pending_to_active(chan
);
533 spin_unlock_irqrestore(&chan
->lock
, flags
);
535 /* If there was nothing active, start processing */
537 ccp_cmd_callback(desc
, 0);
540 static enum dma_status
ccp_tx_status(struct dma_chan
*dma_chan
,
542 struct dma_tx_state
*state
)
544 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
546 struct ccp_dma_desc
*desc
;
550 if (chan
->status
== DMA_PAUSED
) {
555 ret
= dma_cookie_status(dma_chan
, cookie
, state
);
556 if (ret
== DMA_COMPLETE
) {
557 spin_lock_irqsave(&chan
->lock
, flags
);
559 /* Get status from complete chain, if still there */
560 list_for_each_entry(desc
, &chan
->complete
, entry
) {
561 if (desc
->tx_desc
.cookie
!= cookie
)
568 spin_unlock_irqrestore(&chan
->lock
, flags
);
572 dev_dbg(chan
->ccp
->dev
, "%s - %u\n", __func__
, ret
);
577 static int ccp_pause(struct dma_chan
*dma_chan
)
579 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
582 chan
->status
= DMA_PAUSED
;
584 /*TODO: Wait for active DMA to complete before returning? */
589 static int ccp_resume(struct dma_chan
*dma_chan
)
591 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
593 struct ccp_dma_desc
*desc
;
596 spin_lock_irqsave(&chan
->lock
, flags
);
598 desc
= list_first_entry_or_null(&chan
->active
, struct ccp_dma_desc
,
601 spin_unlock_irqrestore(&chan
->lock
, flags
);
603 /* Indicate the channel is running again */
604 chan
->status
= DMA_IN_PROGRESS
;
606 /* If there was something active, re-start */
608 ccp_cmd_callback(desc
, 0);
613 static int ccp_terminate_all(struct dma_chan
*dma_chan
)
615 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
619 dev_dbg(chan
->ccp
->dev
, "%s\n", __func__
);
621 /*TODO: Wait for active DMA to complete before continuing */
623 spin_lock_irqsave(&chan
->lock
, flags
);
625 /*TODO: Purge the complete list? */
626 ccp_free_desc_resources(chan
->ccp
, &chan
->active
);
627 ccp_free_desc_resources(chan
->ccp
, &chan
->pending
);
628 ccp_free_desc_resources(chan
->ccp
, &chan
->created
);
630 spin_unlock_irqrestore(&chan
->lock
, flags
);
635 static void ccp_dma_release(struct ccp_device
*ccp
)
637 struct ccp_dma_chan
*chan
;
638 struct dma_chan
*dma_chan
;
641 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
642 chan
= ccp
->ccp_dma_chan
+ i
;
643 dma_chan
= &chan
->dma_chan
;
645 tasklet_kill(&chan
->cleanup_tasklet
);
646 list_del_rcu(&dma_chan
->device_node
);
650 static void ccp_dma_release_channels(struct ccp_device
*ccp
)
652 struct ccp_dma_chan
*chan
;
653 struct dma_chan
*dma_chan
;
656 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
657 chan
= ccp
->ccp_dma_chan
+ i
;
658 dma_chan
= &chan
->dma_chan
;
660 if (dma_chan
->client_count
)
661 dma_release_channel(dma_chan
);
665 int ccp_dmaengine_register(struct ccp_device
*ccp
)
667 struct ccp_dma_chan
*chan
;
668 struct dma_device
*dma_dev
= &ccp
->dma_dev
;
669 struct dma_chan
*dma_chan
;
670 char *dma_cmd_cache_name
;
671 char *dma_desc_cache_name
;
678 ccp
->ccp_dma_chan
= devm_kcalloc(ccp
->dev
, ccp
->cmd_q_count
,
679 sizeof(*(ccp
->ccp_dma_chan
)),
681 if (!ccp
->ccp_dma_chan
)
684 dma_cmd_cache_name
= devm_kasprintf(ccp
->dev
, GFP_KERNEL
,
685 "%s-dmaengine-cmd-cache",
687 if (!dma_cmd_cache_name
)
690 ccp
->dma_cmd_cache
= kmem_cache_create(dma_cmd_cache_name
,
691 sizeof(struct ccp_dma_cmd
),
693 SLAB_HWCACHE_ALIGN
, NULL
);
694 if (!ccp
->dma_cmd_cache
)
697 dma_desc_cache_name
= devm_kasprintf(ccp
->dev
, GFP_KERNEL
,
698 "%s-dmaengine-desc-cache",
700 if (!dma_desc_cache_name
) {
705 ccp
->dma_desc_cache
= kmem_cache_create(dma_desc_cache_name
,
706 sizeof(struct ccp_dma_desc
),
708 SLAB_HWCACHE_ALIGN
, NULL
);
709 if (!ccp
->dma_desc_cache
) {
714 dma_dev
->dev
= ccp
->dev
;
715 dma_dev
->src_addr_widths
= CCP_DMA_WIDTH(dma_get_mask(ccp
->dev
));
716 dma_dev
->dst_addr_widths
= CCP_DMA_WIDTH(dma_get_mask(ccp
->dev
));
717 dma_dev
->directions
= DMA_MEM_TO_MEM
;
718 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
719 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
720 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
722 /* The DMA channels for this device can be set to public or private,
723 * and overridden by the module parameter dma_chan_attr.
724 * Default: according to the value in vdata (dma_chan_attr=0)
725 * dma_chan_attr=0x1: all channels private (override vdata)
726 * dma_chan_attr=0x2: all channels public (override vdata)
728 if (ccp_get_dma_chan_attr(ccp
) == DMA_PRIVATE
)
729 dma_cap_set(DMA_PRIVATE
, dma_dev
->cap_mask
);
731 INIT_LIST_HEAD(&dma_dev
->channels
);
732 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
733 chan
= ccp
->ccp_dma_chan
+ i
;
734 dma_chan
= &chan
->dma_chan
;
738 spin_lock_init(&chan
->lock
);
739 INIT_LIST_HEAD(&chan
->created
);
740 INIT_LIST_HEAD(&chan
->pending
);
741 INIT_LIST_HEAD(&chan
->active
);
742 INIT_LIST_HEAD(&chan
->complete
);
744 tasklet_init(&chan
->cleanup_tasklet
, ccp_do_cleanup
,
745 (unsigned long)chan
);
747 dma_chan
->device
= dma_dev
;
748 dma_cookie_init(dma_chan
);
750 list_add_tail(&dma_chan
->device_node
, &dma_dev
->channels
);
753 dma_dev
->device_free_chan_resources
= ccp_free_chan_resources
;
754 dma_dev
->device_prep_dma_memcpy
= ccp_prep_dma_memcpy
;
755 dma_dev
->device_prep_dma_interrupt
= ccp_prep_dma_interrupt
;
756 dma_dev
->device_issue_pending
= ccp_issue_pending
;
757 dma_dev
->device_tx_status
= ccp_tx_status
;
758 dma_dev
->device_pause
= ccp_pause
;
759 dma_dev
->device_resume
= ccp_resume
;
760 dma_dev
->device_terminate_all
= ccp_terminate_all
;
762 ret
= dma_async_device_register(dma_dev
);
769 ccp_dma_release(ccp
);
770 kmem_cache_destroy(ccp
->dma_desc_cache
);
773 kmem_cache_destroy(ccp
->dma_cmd_cache
);
778 void ccp_dmaengine_unregister(struct ccp_device
*ccp
)
780 struct dma_device
*dma_dev
= &ccp
->dma_dev
;
785 ccp_dma_release_channels(ccp
);
786 dma_async_device_unregister(dma_dev
);
787 ccp_dma_release(ccp
);
789 kmem_cache_destroy(ccp
->dma_desc_cache
);
790 kmem_cache_destroy(ccp
->dma_cmd_cache
);