2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Gary R Hook <gary.hook@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/dmaengine.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/ccp.h>
20 #include "../../dma/dmaengine.h"
22 #define CCP_DMA_WIDTH(_mask) \
24 u64 mask = _mask + 1; \
25 (mask == 0) ? 64 : fls64(mask); \
28 static void ccp_free_cmd_resources(struct ccp_device
*ccp
,
29 struct list_head
*list
)
31 struct ccp_dma_cmd
*cmd
, *ctmp
;
33 list_for_each_entry_safe(cmd
, ctmp
, list
, entry
) {
34 list_del(&cmd
->entry
);
35 kmem_cache_free(ccp
->dma_cmd_cache
, cmd
);
39 static void ccp_free_desc_resources(struct ccp_device
*ccp
,
40 struct list_head
*list
)
42 struct ccp_dma_desc
*desc
, *dtmp
;
44 list_for_each_entry_safe(desc
, dtmp
, list
, entry
) {
45 ccp_free_cmd_resources(ccp
, &desc
->active
);
46 ccp_free_cmd_resources(ccp
, &desc
->pending
);
48 list_del(&desc
->entry
);
49 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
53 static void ccp_free_chan_resources(struct dma_chan
*dma_chan
)
55 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
59 dev_dbg(chan
->ccp
->dev
, "%s - chan=%p\n", __func__
, chan
);
61 spin_lock_irqsave(&chan
->lock
, flags
);
63 ccp_free_desc_resources(chan
->ccp
, &chan
->complete
);
64 ccp_free_desc_resources(chan
->ccp
, &chan
->active
);
65 ccp_free_desc_resources(chan
->ccp
, &chan
->pending
);
66 ccp_free_desc_resources(chan
->ccp
, &chan
->created
);
68 spin_unlock_irqrestore(&chan
->lock
, flags
);
71 static void ccp_cleanup_desc_resources(struct ccp_device
*ccp
,
72 struct list_head
*list
)
74 struct ccp_dma_desc
*desc
, *dtmp
;
76 list_for_each_entry_safe_reverse(desc
, dtmp
, list
, entry
) {
77 if (!async_tx_test_ack(&desc
->tx_desc
))
80 dev_dbg(ccp
->dev
, "%s - desc=%p\n", __func__
, desc
);
82 ccp_free_cmd_resources(ccp
, &desc
->active
);
83 ccp_free_cmd_resources(ccp
, &desc
->pending
);
85 list_del(&desc
->entry
);
86 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
90 static void ccp_do_cleanup(unsigned long data
)
92 struct ccp_dma_chan
*chan
= (struct ccp_dma_chan
*)data
;
95 dev_dbg(chan
->ccp
->dev
, "%s - chan=%s\n", __func__
,
96 dma_chan_name(&chan
->dma_chan
));
98 spin_lock_irqsave(&chan
->lock
, flags
);
100 ccp_cleanup_desc_resources(chan
->ccp
, &chan
->complete
);
102 spin_unlock_irqrestore(&chan
->lock
, flags
);
105 static int ccp_issue_next_cmd(struct ccp_dma_desc
*desc
)
107 struct ccp_dma_cmd
*cmd
;
110 cmd
= list_first_entry(&desc
->pending
, struct ccp_dma_cmd
, entry
);
111 list_move(&cmd
->entry
, &desc
->active
);
113 dev_dbg(desc
->ccp
->dev
, "%s - tx %d, cmd=%p\n", __func__
,
114 desc
->tx_desc
.cookie
, cmd
);
116 ret
= ccp_enqueue_cmd(&cmd
->ccp_cmd
);
117 if (!ret
|| (ret
== -EINPROGRESS
) || (ret
== -EBUSY
))
120 dev_dbg(desc
->ccp
->dev
, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__
,
121 ret
, desc
->tx_desc
.cookie
, cmd
);
126 static void ccp_free_active_cmd(struct ccp_dma_desc
*desc
)
128 struct ccp_dma_cmd
*cmd
;
130 cmd
= list_first_entry_or_null(&desc
->active
, struct ccp_dma_cmd
,
135 dev_dbg(desc
->ccp
->dev
, "%s - freeing tx %d cmd=%p\n",
136 __func__
, desc
->tx_desc
.cookie
, cmd
);
138 list_del(&cmd
->entry
);
139 kmem_cache_free(desc
->ccp
->dma_cmd_cache
, cmd
);
142 static struct ccp_dma_desc
*__ccp_next_dma_desc(struct ccp_dma_chan
*chan
,
143 struct ccp_dma_desc
*desc
)
145 /* Move current DMA descriptor to the complete list */
147 list_move(&desc
->entry
, &chan
->complete
);
149 /* Get the next DMA descriptor on the active list */
150 desc
= list_first_entry_or_null(&chan
->active
, struct ccp_dma_desc
,
156 static struct ccp_dma_desc
*ccp_handle_active_desc(struct ccp_dma_chan
*chan
,
157 struct ccp_dma_desc
*desc
)
159 struct dma_async_tx_descriptor
*tx_desc
;
162 /* Loop over descriptors until one is found with commands */
165 /* Remove the DMA command from the list and free it */
166 ccp_free_active_cmd(desc
);
168 if (!list_empty(&desc
->pending
)) {
169 /* No errors, keep going */
170 if (desc
->status
!= DMA_ERROR
)
173 /* Error, free remaining commands and move on */
174 ccp_free_cmd_resources(desc
->ccp
,
178 tx_desc
= &desc
->tx_desc
;
183 spin_lock_irqsave(&chan
->lock
, flags
);
186 if (desc
->status
!= DMA_ERROR
)
187 desc
->status
= DMA_COMPLETE
;
189 dev_dbg(desc
->ccp
->dev
,
190 "%s - tx %d complete, status=%u\n", __func__
,
191 desc
->tx_desc
.cookie
, desc
->status
);
193 dma_cookie_complete(tx_desc
);
196 desc
= __ccp_next_dma_desc(chan
, desc
);
198 spin_unlock_irqrestore(&chan
->lock
, flags
);
201 if (tx_desc
->callback
&&
202 (tx_desc
->flags
& DMA_PREP_INTERRUPT
))
203 tx_desc
->callback(tx_desc
->callback_param
);
205 dma_run_dependencies(tx_desc
);
212 static struct ccp_dma_desc
*__ccp_pending_to_active(struct ccp_dma_chan
*chan
)
214 struct ccp_dma_desc
*desc
;
216 if (list_empty(&chan
->pending
))
219 desc
= list_empty(&chan
->active
)
220 ? list_first_entry(&chan
->pending
, struct ccp_dma_desc
, entry
)
223 list_splice_tail_init(&chan
->pending
, &chan
->active
);
228 static void ccp_cmd_callback(void *data
, int err
)
230 struct ccp_dma_desc
*desc
= data
;
231 struct ccp_dma_chan
*chan
;
234 if (err
== -EINPROGRESS
)
237 chan
= container_of(desc
->tx_desc
.chan
, struct ccp_dma_chan
,
240 dev_dbg(chan
->ccp
->dev
, "%s - tx %d callback, err=%d\n",
241 __func__
, desc
->tx_desc
.cookie
, err
);
244 desc
->status
= DMA_ERROR
;
247 /* Check for DMA descriptor completion */
248 desc
= ccp_handle_active_desc(chan
, desc
);
250 /* Don't submit cmd if no descriptor or DMA is paused */
251 if (!desc
|| (chan
->status
== DMA_PAUSED
))
254 ret
= ccp_issue_next_cmd(desc
);
258 desc
->status
= DMA_ERROR
;
261 tasklet_schedule(&chan
->cleanup_tasklet
);
264 static dma_cookie_t
ccp_tx_submit(struct dma_async_tx_descriptor
*tx_desc
)
266 struct ccp_dma_desc
*desc
= container_of(tx_desc
, struct ccp_dma_desc
,
268 struct ccp_dma_chan
*chan
;
272 chan
= container_of(tx_desc
->chan
, struct ccp_dma_chan
, dma_chan
);
274 spin_lock_irqsave(&chan
->lock
, flags
);
276 cookie
= dma_cookie_assign(tx_desc
);
277 list_del(&desc
->entry
);
278 list_add_tail(&desc
->entry
, &chan
->pending
);
280 spin_unlock_irqrestore(&chan
->lock
, flags
);
282 dev_dbg(chan
->ccp
->dev
, "%s - added tx descriptor %d to pending list\n",
288 static struct ccp_dma_cmd
*ccp_alloc_dma_cmd(struct ccp_dma_chan
*chan
)
290 struct ccp_dma_cmd
*cmd
;
292 cmd
= kmem_cache_alloc(chan
->ccp
->dma_cmd_cache
, GFP_NOWAIT
);
294 memset(cmd
, 0, sizeof(*cmd
));
299 static struct ccp_dma_desc
*ccp_alloc_dma_desc(struct ccp_dma_chan
*chan
,
302 struct ccp_dma_desc
*desc
;
304 desc
= kmem_cache_zalloc(chan
->ccp
->dma_desc_cache
, GFP_NOWAIT
);
308 dma_async_tx_descriptor_init(&desc
->tx_desc
, &chan
->dma_chan
);
309 desc
->tx_desc
.flags
= flags
;
310 desc
->tx_desc
.tx_submit
= ccp_tx_submit
;
311 desc
->ccp
= chan
->ccp
;
312 INIT_LIST_HEAD(&desc
->pending
);
313 INIT_LIST_HEAD(&desc
->active
);
314 desc
->status
= DMA_IN_PROGRESS
;
319 static struct ccp_dma_desc
*ccp_create_desc(struct dma_chan
*dma_chan
,
320 struct scatterlist
*dst_sg
,
321 unsigned int dst_nents
,
322 struct scatterlist
*src_sg
,
323 unsigned int src_nents
,
326 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
328 struct ccp_device
*ccp
= chan
->ccp
;
329 struct ccp_dma_desc
*desc
;
330 struct ccp_dma_cmd
*cmd
;
331 struct ccp_cmd
*ccp_cmd
;
332 struct ccp_passthru_nomap_engine
*ccp_pt
;
333 unsigned int src_offset
, src_len
;
334 unsigned int dst_offset
, dst_len
;
336 unsigned long sflags
;
339 if (!dst_sg
|| !src_sg
)
342 if (!dst_nents
|| !src_nents
)
345 desc
= ccp_alloc_dma_desc(chan
, flags
);
351 src_len
= sg_dma_len(src_sg
);
354 dst_len
= sg_dma_len(dst_sg
);
363 src_sg
= sg_next(src_sg
);
367 src_len
= sg_dma_len(src_sg
);
377 dst_sg
= sg_next(dst_sg
);
381 dst_len
= sg_dma_len(dst_sg
);
386 len
= min(dst_len
, src_len
);
388 cmd
= ccp_alloc_dma_cmd(chan
);
392 ccp_cmd
= &cmd
->ccp_cmd
;
393 ccp_cmd
->ccp
= chan
->ccp
;
394 ccp_pt
= &ccp_cmd
->u
.passthru_nomap
;
395 ccp_cmd
->flags
= CCP_CMD_MAY_BACKLOG
;
396 ccp_cmd
->flags
|= CCP_CMD_PASSTHRU_NO_DMA_MAP
;
397 ccp_cmd
->engine
= CCP_ENGINE_PASSTHRU
;
398 ccp_pt
->bit_mod
= CCP_PASSTHRU_BITWISE_NOOP
;
399 ccp_pt
->byte_swap
= CCP_PASSTHRU_BYTESWAP_NOOP
;
400 ccp_pt
->src_dma
= sg_dma_address(src_sg
) + src_offset
;
401 ccp_pt
->dst_dma
= sg_dma_address(dst_sg
) + dst_offset
;
402 ccp_pt
->src_len
= len
;
404 ccp_cmd
->callback
= ccp_cmd_callback
;
405 ccp_cmd
->data
= desc
;
407 list_add_tail(&cmd
->entry
, &desc
->pending
);
410 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__
,
411 cmd
, &ccp_pt
->src_dma
,
412 &ccp_pt
->dst_dma
, ccp_pt
->src_len
);
423 desc
->len
= total_len
;
425 if (list_empty(&desc
->pending
))
428 dev_dbg(ccp
->dev
, "%s - desc=%p\n", __func__
, desc
);
430 spin_lock_irqsave(&chan
->lock
, sflags
);
432 list_add_tail(&desc
->entry
, &chan
->created
);
434 spin_unlock_irqrestore(&chan
->lock
, sflags
);
439 ccp_free_cmd_resources(ccp
, &desc
->pending
);
440 kmem_cache_free(ccp
->dma_desc_cache
, desc
);
445 static struct dma_async_tx_descriptor
*ccp_prep_dma_memcpy(
446 struct dma_chan
*dma_chan
, dma_addr_t dst
, dma_addr_t src
, size_t len
,
449 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
451 struct ccp_dma_desc
*desc
;
452 struct scatterlist dst_sg
, src_sg
;
454 dev_dbg(chan
->ccp
->dev
,
455 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
456 __func__
, &src
, &dst
, len
, flags
);
458 sg_init_table(&dst_sg
, 1);
459 sg_dma_address(&dst_sg
) = dst
;
460 sg_dma_len(&dst_sg
) = len
;
462 sg_init_table(&src_sg
, 1);
463 sg_dma_address(&src_sg
) = src
;
464 sg_dma_len(&src_sg
) = len
;
466 desc
= ccp_create_desc(dma_chan
, &dst_sg
, 1, &src_sg
, 1, flags
);
470 return &desc
->tx_desc
;
473 static struct dma_async_tx_descriptor
*ccp_prep_dma_sg(
474 struct dma_chan
*dma_chan
, struct scatterlist
*dst_sg
,
475 unsigned int dst_nents
, struct scatterlist
*src_sg
,
476 unsigned int src_nents
, unsigned long flags
)
478 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
480 struct ccp_dma_desc
*desc
;
482 dev_dbg(chan
->ccp
->dev
,
483 "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
484 __func__
, src_sg
, src_nents
, dst_sg
, dst_nents
, flags
);
486 desc
= ccp_create_desc(dma_chan
, dst_sg
, dst_nents
, src_sg
, src_nents
,
491 return &desc
->tx_desc
;
494 static struct dma_async_tx_descriptor
*ccp_prep_dma_interrupt(
495 struct dma_chan
*dma_chan
, unsigned long flags
)
497 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
499 struct ccp_dma_desc
*desc
;
501 desc
= ccp_alloc_dma_desc(chan
, flags
);
505 return &desc
->tx_desc
;
508 static void ccp_issue_pending(struct dma_chan
*dma_chan
)
510 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
512 struct ccp_dma_desc
*desc
;
515 dev_dbg(chan
->ccp
->dev
, "%s\n", __func__
);
517 spin_lock_irqsave(&chan
->lock
, flags
);
519 desc
= __ccp_pending_to_active(chan
);
521 spin_unlock_irqrestore(&chan
->lock
, flags
);
523 /* If there was nothing active, start processing */
525 ccp_cmd_callback(desc
, 0);
528 static enum dma_status
ccp_tx_status(struct dma_chan
*dma_chan
,
530 struct dma_tx_state
*state
)
532 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
534 struct ccp_dma_desc
*desc
;
538 if (chan
->status
== DMA_PAUSED
) {
543 ret
= dma_cookie_status(dma_chan
, cookie
, state
);
544 if (ret
== DMA_COMPLETE
) {
545 spin_lock_irqsave(&chan
->lock
, flags
);
547 /* Get status from complete chain, if still there */
548 list_for_each_entry(desc
, &chan
->complete
, entry
) {
549 if (desc
->tx_desc
.cookie
!= cookie
)
556 spin_unlock_irqrestore(&chan
->lock
, flags
);
560 dev_dbg(chan
->ccp
->dev
, "%s - %u\n", __func__
, ret
);
565 static int ccp_pause(struct dma_chan
*dma_chan
)
567 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
570 chan
->status
= DMA_PAUSED
;
572 /*TODO: Wait for active DMA to complete before returning? */
577 static int ccp_resume(struct dma_chan
*dma_chan
)
579 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
581 struct ccp_dma_desc
*desc
;
584 spin_lock_irqsave(&chan
->lock
, flags
);
586 desc
= list_first_entry_or_null(&chan
->active
, struct ccp_dma_desc
,
589 spin_unlock_irqrestore(&chan
->lock
, flags
);
591 /* Indicate the channel is running again */
592 chan
->status
= DMA_IN_PROGRESS
;
594 /* If there was something active, re-start */
596 ccp_cmd_callback(desc
, 0);
601 static int ccp_terminate_all(struct dma_chan
*dma_chan
)
603 struct ccp_dma_chan
*chan
= container_of(dma_chan
, struct ccp_dma_chan
,
607 dev_dbg(chan
->ccp
->dev
, "%s\n", __func__
);
609 /*TODO: Wait for active DMA to complete before continuing */
611 spin_lock_irqsave(&chan
->lock
, flags
);
613 /*TODO: Purge the complete list? */
614 ccp_free_desc_resources(chan
->ccp
, &chan
->active
);
615 ccp_free_desc_resources(chan
->ccp
, &chan
->pending
);
616 ccp_free_desc_resources(chan
->ccp
, &chan
->created
);
618 spin_unlock_irqrestore(&chan
->lock
, flags
);
623 int ccp_dmaengine_register(struct ccp_device
*ccp
)
625 struct ccp_dma_chan
*chan
;
626 struct dma_device
*dma_dev
= &ccp
->dma_dev
;
627 struct dma_chan
*dma_chan
;
628 char *dma_cmd_cache_name
;
629 char *dma_desc_cache_name
;
633 ccp
->ccp_dma_chan
= devm_kcalloc(ccp
->dev
, ccp
->cmd_q_count
,
634 sizeof(*(ccp
->ccp_dma_chan
)),
636 if (!ccp
->ccp_dma_chan
)
639 dma_cmd_cache_name
= devm_kasprintf(ccp
->dev
, GFP_KERNEL
,
640 "%s-dmaengine-cmd-cache",
642 if (!dma_cmd_cache_name
)
645 ccp
->dma_cmd_cache
= kmem_cache_create(dma_cmd_cache_name
,
646 sizeof(struct ccp_dma_cmd
),
648 SLAB_HWCACHE_ALIGN
, NULL
);
649 if (!ccp
->dma_cmd_cache
)
652 dma_desc_cache_name
= devm_kasprintf(ccp
->dev
, GFP_KERNEL
,
653 "%s-dmaengine-desc-cache",
655 if (!dma_desc_cache_name
) {
660 ccp
->dma_desc_cache
= kmem_cache_create(dma_desc_cache_name
,
661 sizeof(struct ccp_dma_desc
),
663 SLAB_HWCACHE_ALIGN
, NULL
);
664 if (!ccp
->dma_desc_cache
) {
669 dma_dev
->dev
= ccp
->dev
;
670 dma_dev
->src_addr_widths
= CCP_DMA_WIDTH(dma_get_mask(ccp
->dev
));
671 dma_dev
->dst_addr_widths
= CCP_DMA_WIDTH(dma_get_mask(ccp
->dev
));
672 dma_dev
->directions
= DMA_MEM_TO_MEM
;
673 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
674 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
675 dma_cap_set(DMA_SG
, dma_dev
->cap_mask
);
676 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
678 INIT_LIST_HEAD(&dma_dev
->channels
);
679 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
680 chan
= ccp
->ccp_dma_chan
+ i
;
681 dma_chan
= &chan
->dma_chan
;
685 spin_lock_init(&chan
->lock
);
686 INIT_LIST_HEAD(&chan
->created
);
687 INIT_LIST_HEAD(&chan
->pending
);
688 INIT_LIST_HEAD(&chan
->active
);
689 INIT_LIST_HEAD(&chan
->complete
);
691 tasklet_init(&chan
->cleanup_tasklet
, ccp_do_cleanup
,
692 (unsigned long)chan
);
694 dma_chan
->device
= dma_dev
;
695 dma_cookie_init(dma_chan
);
697 list_add_tail(&dma_chan
->device_node
, &dma_dev
->channels
);
700 dma_dev
->device_free_chan_resources
= ccp_free_chan_resources
;
701 dma_dev
->device_prep_dma_memcpy
= ccp_prep_dma_memcpy
;
702 dma_dev
->device_prep_dma_sg
= ccp_prep_dma_sg
;
703 dma_dev
->device_prep_dma_interrupt
= ccp_prep_dma_interrupt
;
704 dma_dev
->device_issue_pending
= ccp_issue_pending
;
705 dma_dev
->device_tx_status
= ccp_tx_status
;
706 dma_dev
->device_pause
= ccp_pause
;
707 dma_dev
->device_resume
= ccp_resume
;
708 dma_dev
->device_terminate_all
= ccp_terminate_all
;
710 ret
= dma_async_device_register(dma_dev
);
717 kmem_cache_destroy(ccp
->dma_desc_cache
);
720 kmem_cache_destroy(ccp
->dma_cmd_cache
);
725 void ccp_dmaengine_unregister(struct ccp_device
*ccp
)
727 struct dma_device
*dma_dev
= &ccp
->dma_dev
;
729 dma_async_device_unregister(dma_dev
);
731 kmem_cache_destroy(ccp
->dma_desc_cache
);
732 kmem_cache_destroy(ccp
->dma_cmd_cache
);