2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Gary R Hook <gary.hook@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/dmaengine.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/ccp.h>
20 #include "../../dma/dmaengine.h"
22 #define CCP_DMA_WIDTH(_mask) \
24 u64 mask = _mask + 1; \
25 (mask == 0) ? 64 : fls64(mask); \
28 static void ccp_free_cmd_resources(struct ccp_device
*ccp
,
29 struct list_head
*list
)
31 struct ccp_dma_cmd
*cmd
, *ctmp
;
33 list_for_each_entry_safe(cmd
, ctmp
, list
, entry
) {
34 list_del(&cmd
->entry
);
35 kmem_cache_free(ccp
->dma_cmd_cache
, cmd
);
39 static void ccp_free_desc_resources(struct ccp_device
*ccp
,
40 struct list_head
*list
)
42 struct ccp_dma_desc
*desc
, *dtmp
;
44 list_for_each_entry_safe(desc
, dtmp
, list
, entry
) {
45 ccp_free_cmd_resources(ccp
, &desc
->active
);
46 ccp_free_cmd_resources(ccp
, &desc
->pending
);
48 list_del(&desc
->entry
);
49 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
53 static void ccp_free_chan_resources(struct dma_chan
*dma_chan
)
55 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
59 dev_dbg(chan
->ccp
->dev
, "%s - chan=%p\n", __func__
, chan
);
61 spin_lock_irqsave(&chan
->lock
, flags
);
63 ccp_free_desc_resources(chan
->ccp
, &chan
->complete
);
64 ccp_free_desc_resources(chan
->ccp
, &chan
->active
);
65 ccp_free_desc_resources(chan
->ccp
, &chan
->pending
);
67 spin_unlock_irqrestore(&chan
->lock
, flags
);
70 static void ccp_cleanup_desc_resources(struct ccp_device
*ccp
,
71 struct list_head
*list
)
73 struct ccp_dma_desc
*desc
, *dtmp
;
75 list_for_each_entry_safe_reverse(desc
, dtmp
, list
, entry
) {
76 if (!async_tx_test_ack(&desc
->tx_desc
))
79 dev_dbg(ccp
->dev
, "%s - desc=%p\n", __func__
, desc
);
81 ccp_free_cmd_resources(ccp
, &desc
->active
);
82 ccp_free_cmd_resources(ccp
, &desc
->pending
);
84 list_del(&desc
->entry
);
85 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
89 static void ccp_do_cleanup(unsigned long data
)
91 struct ccp_dma_chan
*chan
= (struct ccp_dma_chan
*)data
;
94 dev_dbg(chan
->ccp
->dev
, "%s - chan=%s\n", __func__
,
95 dma_chan_name(&chan
->dma_chan
));
97 spin_lock_irqsave(&chan
->lock
, flags
);
99 ccp_cleanup_desc_resources(chan
->ccp
, &chan
->complete
);
101 spin_unlock_irqrestore(&chan
->lock
, flags
);
104 static int ccp_issue_next_cmd(struct ccp_dma_desc
*desc
)
106 struct ccp_dma_cmd
*cmd
;
109 cmd
= list_first_entry(&desc
->pending
, struct ccp_dma_cmd
, entry
);
110 list_move(&cmd
->entry
, &desc
->active
);
112 dev_dbg(desc
->ccp
->dev
, "%s - tx %d, cmd=%p\n", __func__
,
113 desc
->tx_desc
.cookie
, cmd
);
115 ret
= ccp_enqueue_cmd(&cmd
->ccp_cmd
);
116 if (!ret
|| (ret
== -EINPROGRESS
) || (ret
== -EBUSY
))
119 dev_dbg(desc
->ccp
->dev
, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__
,
120 ret
, desc
->tx_desc
.cookie
, cmd
);
125 static void ccp_free_active_cmd(struct ccp_dma_desc
*desc
)
127 struct ccp_dma_cmd
*cmd
;
129 cmd
= list_first_entry_or_null(&desc
->active
, struct ccp_dma_cmd
,
134 dev_dbg(desc
->ccp
->dev
, "%s - freeing tx %d cmd=%p\n",
135 __func__
, desc
->tx_desc
.cookie
, cmd
);
137 list_del(&cmd
->entry
);
138 kmem_cache_free(desc
->ccp
->dma_cmd_cache
, cmd
);
141 static struct ccp_dma_desc
*__ccp_next_dma_desc(struct ccp_dma_chan
*chan
,
142 struct ccp_dma_desc
*desc
)
144 /* Move current DMA descriptor to the complete list */
146 list_move(&desc
->entry
, &chan
->complete
);
148 /* Get the next DMA descriptor on the active list */
149 desc
= list_first_entry_or_null(&chan
->active
, struct ccp_dma_desc
,
155 static struct ccp_dma_desc
*ccp_handle_active_desc(struct ccp_dma_chan
*chan
,
156 struct ccp_dma_desc
*desc
)
158 struct dma_async_tx_descriptor
*tx_desc
;
161 /* Loop over descriptors until one is found with commands */
164 /* Remove the DMA command from the list and free it */
165 ccp_free_active_cmd(desc
);
167 if (!list_empty(&desc
->pending
)) {
168 /* No errors, keep going */
169 if (desc
->status
!= DMA_ERROR
)
172 /* Error, free remaining commands and move on */
173 ccp_free_cmd_resources(desc
->ccp
,
177 tx_desc
= &desc
->tx_desc
;
182 spin_lock_irqsave(&chan
->lock
, flags
);
185 if (desc
->status
!= DMA_ERROR
)
186 desc
->status
= DMA_COMPLETE
;
188 dev_dbg(desc
->ccp
->dev
,
189 "%s - tx %d complete, status=%u\n", __func__
,
190 desc
->tx_desc
.cookie
, desc
->status
);
192 dma_cookie_complete(tx_desc
);
195 desc
= __ccp_next_dma_desc(chan
, desc
);
197 spin_unlock_irqrestore(&chan
->lock
, flags
);
200 if (tx_desc
->callback
&&
201 (tx_desc
->flags
& DMA_PREP_INTERRUPT
))
202 tx_desc
->callback(tx_desc
->callback_param
);
204 dma_run_dependencies(tx_desc
);
211 static struct ccp_dma_desc
*__ccp_pending_to_active(struct ccp_dma_chan
*chan
)
213 struct ccp_dma_desc
*desc
;
215 if (list_empty(&chan
->pending
))
218 desc
= list_empty(&chan
->active
)
219 ? list_first_entry(&chan
->pending
, struct ccp_dma_desc
, entry
)
222 list_splice_tail_init(&chan
->pending
, &chan
->active
);
227 static void ccp_cmd_callback(void *data
, int err
)
229 struct ccp_dma_desc
*desc
= data
;
230 struct ccp_dma_chan
*chan
;
233 if (err
== -EINPROGRESS
)
236 chan
= container_of(desc
->tx_desc
.chan
, struct ccp_dma_chan
,
239 dev_dbg(chan
->ccp
->dev
, "%s - tx %d callback, err=%d\n",
240 __func__
, desc
->tx_desc
.cookie
, err
);
243 desc
->status
= DMA_ERROR
;
246 /* Check for DMA descriptor completion */
247 desc
= ccp_handle_active_desc(chan
, desc
);
249 /* Don't submit cmd if no descriptor or DMA is paused */
250 if (!desc
|| (chan
->status
== DMA_PAUSED
))
253 ret
= ccp_issue_next_cmd(desc
);
257 desc
->status
= DMA_ERROR
;
260 tasklet_schedule(&chan
->cleanup_tasklet
);
263 static dma_cookie_t
ccp_tx_submit(struct dma_async_tx_descriptor
*tx_desc
)
265 struct ccp_dma_desc
*desc
= container_of(tx_desc
, struct ccp_dma_desc
,
267 struct ccp_dma_chan
*chan
;
271 chan
= container_of(tx_desc
->chan
, struct ccp_dma_chan
, dma_chan
);
273 spin_lock_irqsave(&chan
->lock
, flags
);
275 cookie
= dma_cookie_assign(tx_desc
);
276 list_add_tail(&desc
->entry
, &chan
->pending
);
278 spin_unlock_irqrestore(&chan
->lock
, flags
);
280 dev_dbg(chan
->ccp
->dev
, "%s - added tx descriptor %d to pending list\n",
286 static struct ccp_dma_cmd
*ccp_alloc_dma_cmd(struct ccp_dma_chan
*chan
)
288 struct ccp_dma_cmd
*cmd
;
290 cmd
= kmem_cache_alloc(chan
->ccp
->dma_cmd_cache
, GFP_NOWAIT
);
292 memset(cmd
, 0, sizeof(*cmd
));
297 static struct ccp_dma_desc
*ccp_alloc_dma_desc(struct ccp_dma_chan
*chan
,
300 struct ccp_dma_desc
*desc
;
302 desc
= kmem_cache_zalloc(chan
->ccp
->dma_desc_cache
, GFP_NOWAIT
);
306 dma_async_tx_descriptor_init(&desc
->tx_desc
, &chan
->dma_chan
);
307 desc
->tx_desc
.flags
= flags
;
308 desc
->tx_desc
.tx_submit
= ccp_tx_submit
;
309 desc
->ccp
= chan
->ccp
;
310 INIT_LIST_HEAD(&desc
->pending
);
311 INIT_LIST_HEAD(&desc
->active
);
312 desc
->status
= DMA_IN_PROGRESS
;
317 static struct ccp_dma_desc
*ccp_create_desc(struct dma_chan
*dma_chan
,
318 struct scatterlist
*dst_sg
,
319 unsigned int dst_nents
,
320 struct scatterlist
*src_sg
,
321 unsigned int src_nents
,
324 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
326 struct ccp_device
*ccp
= chan
->ccp
;
327 struct ccp_dma_desc
*desc
;
328 struct ccp_dma_cmd
*cmd
;
329 struct ccp_cmd
*ccp_cmd
;
330 struct ccp_passthru_nomap_engine
*ccp_pt
;
331 unsigned int src_offset
, src_len
;
332 unsigned int dst_offset
, dst_len
;
334 unsigned long sflags
;
337 if (!dst_sg
|| !src_sg
)
340 if (!dst_nents
|| !src_nents
)
343 desc
= ccp_alloc_dma_desc(chan
, flags
);
349 src_len
= sg_dma_len(src_sg
);
352 dst_len
= sg_dma_len(dst_sg
);
361 src_sg
= sg_next(src_sg
);
365 src_len
= sg_dma_len(src_sg
);
375 dst_sg
= sg_next(dst_sg
);
379 dst_len
= sg_dma_len(dst_sg
);
384 len
= min(dst_len
, src_len
);
386 cmd
= ccp_alloc_dma_cmd(chan
);
390 ccp_cmd
= &cmd
->ccp_cmd
;
391 ccp_pt
= &ccp_cmd
->u
.passthru_nomap
;
392 ccp_cmd
->flags
= CCP_CMD_MAY_BACKLOG
;
393 ccp_cmd
->flags
|= CCP_CMD_PASSTHRU_NO_DMA_MAP
;
394 ccp_cmd
->engine
= CCP_ENGINE_PASSTHRU
;
395 ccp_pt
->bit_mod
= CCP_PASSTHRU_BITWISE_NOOP
;
396 ccp_pt
->byte_swap
= CCP_PASSTHRU_BYTESWAP_NOOP
;
397 ccp_pt
->src_dma
= sg_dma_address(src_sg
) + src_offset
;
398 ccp_pt
->dst_dma
= sg_dma_address(dst_sg
) + dst_offset
;
399 ccp_pt
->src_len
= len
;
401 ccp_cmd
->callback
= ccp_cmd_callback
;
402 ccp_cmd
->data
= desc
;
404 list_add_tail(&cmd
->entry
, &desc
->pending
);
407 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__
,
408 cmd
, &ccp_pt
->src_dma
,
409 &ccp_pt
->dst_dma
, ccp_pt
->src_len
);
420 desc
->len
= total_len
;
422 if (list_empty(&desc
->pending
))
425 dev_dbg(ccp
->dev
, "%s - desc=%p\n", __func__
, desc
);
427 spin_lock_irqsave(&chan
->lock
, sflags
);
429 list_add_tail(&desc
->entry
, &chan
->pending
);
431 spin_unlock_irqrestore(&chan
->lock
, sflags
);
436 ccp_free_cmd_resources(ccp
, &desc
->pending
);
437 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
442 static struct dma_async_tx_descriptor
*ccp_prep_dma_memcpy(
443 struct dma_chan
*dma_chan
, dma_addr_t dst
, dma_addr_t src
, size_t len
,
446 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
448 struct ccp_dma_desc
*desc
;
449 struct scatterlist dst_sg
, src_sg
;
451 dev_dbg(chan
->ccp
->dev
,
452 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
453 __func__
, &src
, &dst
, len
, flags
);
455 sg_init_table(&dst_sg
, 1);
456 sg_dma_address(&dst_sg
) = dst
;
457 sg_dma_len(&dst_sg
) = len
;
459 sg_init_table(&src_sg
, 1);
460 sg_dma_address(&src_sg
) = src
;
461 sg_dma_len(&src_sg
) = len
;
463 desc
= ccp_create_desc(dma_chan
, &dst_sg
, 1, &src_sg
, 1, flags
);
467 return &desc
->tx_desc
;
470 static struct dma_async_tx_descriptor
*ccp_prep_dma_sg(
471 struct dma_chan
*dma_chan
, struct scatterlist
*dst_sg
,
472 unsigned int dst_nents
, struct scatterlist
*src_sg
,
473 unsigned int src_nents
, unsigned long flags
)
475 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
477 struct ccp_dma_desc
*desc
;
479 dev_dbg(chan
->ccp
->dev
,
480 "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
481 __func__
, src_sg
, src_nents
, dst_sg
, dst_nents
, flags
);
483 desc
= ccp_create_desc(dma_chan
, dst_sg
, dst_nents
, src_sg
, src_nents
,
488 return &desc
->tx_desc
;
491 static struct dma_async_tx_descriptor
*ccp_prep_dma_interrupt(
492 struct dma_chan
*dma_chan
, unsigned long flags
)
494 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
496 struct ccp_dma_desc
*desc
;
498 desc
= ccp_alloc_dma_desc(chan
, flags
);
502 return &desc
->tx_desc
;
505 static void ccp_issue_pending(struct dma_chan
*dma_chan
)
507 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
509 struct ccp_dma_desc
*desc
;
512 dev_dbg(chan
->ccp
->dev
, "%s\n", __func__
);
514 spin_lock_irqsave(&chan
->lock
, flags
);
516 desc
= __ccp_pending_to_active(chan
);
518 spin_unlock_irqrestore(&chan
->lock
, flags
);
520 /* If there was nothing active, start processing */
522 ccp_cmd_callback(desc
, 0);
525 static enum dma_status
ccp_tx_status(struct dma_chan
*dma_chan
,
527 struct dma_tx_state
*state
)
529 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
531 struct ccp_dma_desc
*desc
;
535 if (chan
->status
== DMA_PAUSED
) {
540 ret
= dma_cookie_status(dma_chan
, cookie
, state
);
541 if (ret
== DMA_COMPLETE
) {
542 spin_lock_irqsave(&chan
->lock
, flags
);
544 /* Get status from complete chain, if still there */
545 list_for_each_entry(desc
, &chan
->complete
, entry
) {
546 if (desc
->tx_desc
.cookie
!= cookie
)
553 spin_unlock_irqrestore(&chan
->lock
, flags
);
557 dev_dbg(chan
->ccp
->dev
, "%s - %u\n", __func__
, ret
);
562 static int ccp_pause(struct dma_chan
*dma_chan
)
564 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
567 chan
->status
= DMA_PAUSED
;
569 /*TODO: Wait for active DMA to complete before returning? */
574 static int ccp_resume(struct dma_chan
*dma_chan
)
576 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
578 struct ccp_dma_desc
*desc
;
581 spin_lock_irqsave(&chan
->lock
, flags
);
583 desc
= list_first_entry_or_null(&chan
->active
, struct ccp_dma_desc
,
586 spin_unlock_irqrestore(&chan
->lock
, flags
);
588 /* Indicate the channel is running again */
589 chan
->status
= DMA_IN_PROGRESS
;
591 /* If there was something active, re-start */
593 ccp_cmd_callback(desc
, 0);
598 static int ccp_terminate_all(struct dma_chan
*dma_chan
)
600 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
604 dev_dbg(chan
->ccp
->dev
, "%s\n", __func__
);
606 /*TODO: Wait for active DMA to complete before continuing */
608 spin_lock_irqsave(&chan
->lock
, flags
);
610 /*TODO: Purge the complete list? */
611 ccp_free_desc_resources(chan
->ccp
, &chan
->active
);
612 ccp_free_desc_resources(chan
->ccp
, &chan
->pending
);
614 spin_unlock_irqrestore(&chan
->lock
, flags
);
619 int ccp_dmaengine_register(struct ccp_device
*ccp
)
621 struct ccp_dma_chan
*chan
;
622 struct dma_device
*dma_dev
= &ccp
->dma_dev
;
623 struct dma_chan
*dma_chan
;
624 char *dma_cmd_cache_name
;
625 char *dma_desc_cache_name
;
629 ccp
->ccp_dma_chan
= devm_kcalloc(ccp
->dev
, ccp
->cmd_q_count
,
630 sizeof(*(ccp
->ccp_dma_chan
)),
632 if (!ccp
->ccp_dma_chan
)
635 dma_cmd_cache_name
= devm_kasprintf(ccp
->dev
, GFP_KERNEL
,
636 "%s-dmaengine-cmd-cache",
638 if (!dma_cmd_cache_name
)
641 ccp
->dma_cmd_cache
= kmem_cache_create(dma_cmd_cache_name
,
642 sizeof(struct ccp_dma_cmd
),
644 SLAB_HWCACHE_ALIGN
, NULL
);
645 if (!ccp
->dma_cmd_cache
)
648 dma_desc_cache_name
= devm_kasprintf(ccp
->dev
, GFP_KERNEL
,
649 "%s-dmaengine-desc-cache",
651 if (!dma_desc_cache_name
) {
656 ccp
->dma_desc_cache
= kmem_cache_create(dma_desc_cache_name
,
657 sizeof(struct ccp_dma_desc
),
659 SLAB_HWCACHE_ALIGN
, NULL
);
660 if (!ccp
->dma_desc_cache
) {
665 dma_dev
->dev
= ccp
->dev
;
666 dma_dev
->src_addr_widths
= CCP_DMA_WIDTH(dma_get_mask(ccp
->dev
));
667 dma_dev
->dst_addr_widths
= CCP_DMA_WIDTH(dma_get_mask(ccp
->dev
));
668 dma_dev
->directions
= DMA_MEM_TO_MEM
;
669 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
670 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
671 dma_cap_set(DMA_SG
, dma_dev
->cap_mask
);
672 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
674 INIT_LIST_HEAD(&dma_dev
->channels
);
675 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
676 chan
= ccp
->ccp_dma_chan
+ i
;
677 dma_chan
= &chan
->dma_chan
;
681 spin_lock_init(&chan
->lock
);
682 INIT_LIST_HEAD(&chan
->pending
);
683 INIT_LIST_HEAD(&chan
->active
);
684 INIT_LIST_HEAD(&chan
->complete
);
686 tasklet_init(&chan
->cleanup_tasklet
, ccp_do_cleanup
,
687 (unsigned long)chan
);
689 dma_chan
->device
= dma_dev
;
690 dma_cookie_init(dma_chan
);
692 list_add_tail(&dma_chan
->device_node
, &dma_dev
->channels
);
695 dma_dev
->device_free_chan_resources
= ccp_free_chan_resources
;
696 dma_dev
->device_prep_dma_memcpy
= ccp_prep_dma_memcpy
;
697 dma_dev
->device_prep_dma_sg
= ccp_prep_dma_sg
;
698 dma_dev
->device_prep_dma_interrupt
= ccp_prep_dma_interrupt
;
699 dma_dev
->device_issue_pending
= ccp_issue_pending
;
700 dma_dev
->device_tx_status
= ccp_tx_status
;
701 dma_dev
->device_pause
= ccp_pause
;
702 dma_dev
->device_resume
= ccp_resume
;
703 dma_dev
->device_terminate_all
= ccp_terminate_all
;
705 ret
= dma_async_device_register(dma_dev
);
712 kmem_cache_destroy(ccp
->dma_desc_cache
);
715 kmem_cache_destroy(ccp
->dma_cmd_cache
);
720 void ccp_dmaengine_unregister(struct ccp_device
*ccp
)
722 struct dma_device
*dma_dev
= &ccp
->dma_dev
;
724 dma_async_device_unregister(dma_dev
);
726 kmem_cache_destroy(ccp
->dma_desc_cache
);
727 kmem_cache_destroy(ccp
->dma_cmd_cache
);