sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / crypto / ccp / ccp-dmaengine.c
blob6553912804f73f1c061aec9bdd3afd0f0d7426bc
1 /*
2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Gary R Hook <gary.hook@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/dmaengine.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/ccp.h>
19 #include "ccp-dev.h"
20 #include "../../dma/dmaengine.h"
22 #define CCP_DMA_WIDTH(_mask) \
23 ({ \
24 u64 mask = _mask + 1; \
25 (mask == 0) ? 64 : fls64(mask); \
28 static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 struct list_head *list)
31 struct ccp_dma_cmd *cmd, *ctmp;
33 list_for_each_entry_safe(cmd, ctmp, list, entry) {
34 list_del(&cmd->entry);
35 kmem_cache_free(ccp->dma_cmd_cache, cmd);
39 static void ccp_free_desc_resources(struct ccp_device *ccp,
40 struct list_head *list)
42 struct ccp_dma_desc *desc, *dtmp;
44 list_for_each_entry_safe(desc, dtmp, list, entry) {
45 ccp_free_cmd_resources(ccp, &desc->active);
46 ccp_free_cmd_resources(ccp, &desc->pending);
48 list_del(&desc->entry);
49 kmem_cache_free(ccp->dma_desc_cache, desc);
53 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
55 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
56 dma_chan);
57 unsigned long flags;
59 dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
61 spin_lock_irqsave(&chan->lock, flags);
63 ccp_free_desc_resources(chan->ccp, &chan->complete);
64 ccp_free_desc_resources(chan->ccp, &chan->active);
65 ccp_free_desc_resources(chan->ccp, &chan->pending);
67 spin_unlock_irqrestore(&chan->lock, flags);
70 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
71 struct list_head *list)
73 struct ccp_dma_desc *desc, *dtmp;
75 list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
76 if (!async_tx_test_ack(&desc->tx_desc))
77 continue;
79 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
81 ccp_free_cmd_resources(ccp, &desc->active);
82 ccp_free_cmd_resources(ccp, &desc->pending);
84 list_del(&desc->entry);
85 kmem_cache_free(ccp->dma_desc_cache, desc);
89 static void ccp_do_cleanup(unsigned long data)
91 struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
92 unsigned long flags;
94 dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
95 dma_chan_name(&chan->dma_chan));
97 spin_lock_irqsave(&chan->lock, flags);
99 ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
101 spin_unlock_irqrestore(&chan->lock, flags);
104 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
106 struct ccp_dma_cmd *cmd;
107 int ret;
109 cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
110 list_move(&cmd->entry, &desc->active);
112 dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
113 desc->tx_desc.cookie, cmd);
115 ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
116 if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
117 return 0;
119 dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
120 ret, desc->tx_desc.cookie, cmd);
122 return ret;
125 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
127 struct ccp_dma_cmd *cmd;
129 cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
130 entry);
131 if (!cmd)
132 return;
134 dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
135 __func__, desc->tx_desc.cookie, cmd);
137 list_del(&cmd->entry);
138 kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
141 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
142 struct ccp_dma_desc *desc)
144 /* Move current DMA descriptor to the complete list */
145 if (desc)
146 list_move(&desc->entry, &chan->complete);
148 /* Get the next DMA descriptor on the active list */
149 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
150 entry);
152 return desc;
155 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
156 struct ccp_dma_desc *desc)
158 struct dma_async_tx_descriptor *tx_desc;
159 unsigned long flags;
161 /* Loop over descriptors until one is found with commands */
162 do {
163 if (desc) {
164 /* Remove the DMA command from the list and free it */
165 ccp_free_active_cmd(desc);
167 if (!list_empty(&desc->pending)) {
168 /* No errors, keep going */
169 if (desc->status != DMA_ERROR)
170 return desc;
172 /* Error, free remaining commands and move on */
173 ccp_free_cmd_resources(desc->ccp,
174 &desc->pending);
177 tx_desc = &desc->tx_desc;
178 } else {
179 tx_desc = NULL;
182 spin_lock_irqsave(&chan->lock, flags);
184 if (desc) {
185 if (desc->status != DMA_ERROR)
186 desc->status = DMA_COMPLETE;
188 dev_dbg(desc->ccp->dev,
189 "%s - tx %d complete, status=%u\n", __func__,
190 desc->tx_desc.cookie, desc->status);
192 dma_cookie_complete(tx_desc);
195 desc = __ccp_next_dma_desc(chan, desc);
197 spin_unlock_irqrestore(&chan->lock, flags);
199 if (tx_desc) {
200 if (tx_desc->callback &&
201 (tx_desc->flags & DMA_PREP_INTERRUPT))
202 tx_desc->callback(tx_desc->callback_param);
204 dma_run_dependencies(tx_desc);
206 } while (desc);
208 return NULL;
211 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
213 struct ccp_dma_desc *desc;
215 if (list_empty(&chan->pending))
216 return NULL;
218 desc = list_empty(&chan->active)
219 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
220 : NULL;
222 list_splice_tail_init(&chan->pending, &chan->active);
224 return desc;
227 static void ccp_cmd_callback(void *data, int err)
229 struct ccp_dma_desc *desc = data;
230 struct ccp_dma_chan *chan;
231 int ret;
233 if (err == -EINPROGRESS)
234 return;
236 chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
237 dma_chan);
239 dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
240 __func__, desc->tx_desc.cookie, err);
242 if (err)
243 desc->status = DMA_ERROR;
245 while (true) {
246 /* Check for DMA descriptor completion */
247 desc = ccp_handle_active_desc(chan, desc);
249 /* Don't submit cmd if no descriptor or DMA is paused */
250 if (!desc || (chan->status == DMA_PAUSED))
251 break;
253 ret = ccp_issue_next_cmd(desc);
254 if (!ret)
255 break;
257 desc->status = DMA_ERROR;
260 tasklet_schedule(&chan->cleanup_tasklet);
263 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
265 struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
266 tx_desc);
267 struct ccp_dma_chan *chan;
268 dma_cookie_t cookie;
269 unsigned long flags;
271 chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
273 spin_lock_irqsave(&chan->lock, flags);
275 cookie = dma_cookie_assign(tx_desc);
276 list_add_tail(&desc->entry, &chan->pending);
278 spin_unlock_irqrestore(&chan->lock, flags);
280 dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
281 __func__, cookie);
283 return cookie;
286 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
288 struct ccp_dma_cmd *cmd;
290 cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
291 if (cmd)
292 memset(cmd, 0, sizeof(*cmd));
294 return cmd;
297 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
298 unsigned long flags)
300 struct ccp_dma_desc *desc;
302 desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
303 if (!desc)
304 return NULL;
306 dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
307 desc->tx_desc.flags = flags;
308 desc->tx_desc.tx_submit = ccp_tx_submit;
309 desc->ccp = chan->ccp;
310 INIT_LIST_HEAD(&desc->pending);
311 INIT_LIST_HEAD(&desc->active);
312 desc->status = DMA_IN_PROGRESS;
314 return desc;
317 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
318 struct scatterlist *dst_sg,
319 unsigned int dst_nents,
320 struct scatterlist *src_sg,
321 unsigned int src_nents,
322 unsigned long flags)
324 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
325 dma_chan);
326 struct ccp_device *ccp = chan->ccp;
327 struct ccp_dma_desc *desc;
328 struct ccp_dma_cmd *cmd;
329 struct ccp_cmd *ccp_cmd;
330 struct ccp_passthru_nomap_engine *ccp_pt;
331 unsigned int src_offset, src_len;
332 unsigned int dst_offset, dst_len;
333 unsigned int len;
334 unsigned long sflags;
335 size_t total_len;
337 if (!dst_sg || !src_sg)
338 return NULL;
340 if (!dst_nents || !src_nents)
341 return NULL;
343 desc = ccp_alloc_dma_desc(chan, flags);
344 if (!desc)
345 return NULL;
347 total_len = 0;
349 src_len = sg_dma_len(src_sg);
350 src_offset = 0;
352 dst_len = sg_dma_len(dst_sg);
353 dst_offset = 0;
355 while (true) {
356 if (!src_len) {
357 src_nents--;
358 if (!src_nents)
359 break;
361 src_sg = sg_next(src_sg);
362 if (!src_sg)
363 break;
365 src_len = sg_dma_len(src_sg);
366 src_offset = 0;
367 continue;
370 if (!dst_len) {
371 dst_nents--;
372 if (!dst_nents)
373 break;
375 dst_sg = sg_next(dst_sg);
376 if (!dst_sg)
377 break;
379 dst_len = sg_dma_len(dst_sg);
380 dst_offset = 0;
381 continue;
384 len = min(dst_len, src_len);
386 cmd = ccp_alloc_dma_cmd(chan);
387 if (!cmd)
388 goto err;
390 ccp_cmd = &cmd->ccp_cmd;
391 ccp_pt = &ccp_cmd->u.passthru_nomap;
392 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
393 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
394 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
395 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
396 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
397 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
398 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
399 ccp_pt->src_len = len;
400 ccp_pt->final = 1;
401 ccp_cmd->callback = ccp_cmd_callback;
402 ccp_cmd->data = desc;
404 list_add_tail(&cmd->entry, &desc->pending);
406 dev_dbg(ccp->dev,
407 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
408 cmd, &ccp_pt->src_dma,
409 &ccp_pt->dst_dma, ccp_pt->src_len);
411 total_len += len;
413 src_len -= len;
414 src_offset += len;
416 dst_len -= len;
417 dst_offset += len;
420 desc->len = total_len;
422 if (list_empty(&desc->pending))
423 goto err;
425 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
427 spin_lock_irqsave(&chan->lock, sflags);
429 list_add_tail(&desc->entry, &chan->pending);
431 spin_unlock_irqrestore(&chan->lock, sflags);
433 return desc;
435 err:
436 ccp_free_cmd_resources(ccp, &desc->pending);
437 kmem_cache_free(ccp->dma_desc_cache, desc);
439 return NULL;
442 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
443 struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
444 unsigned long flags)
446 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
447 dma_chan);
448 struct ccp_dma_desc *desc;
449 struct scatterlist dst_sg, src_sg;
451 dev_dbg(chan->ccp->dev,
452 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
453 __func__, &src, &dst, len, flags);
455 sg_init_table(&dst_sg, 1);
456 sg_dma_address(&dst_sg) = dst;
457 sg_dma_len(&dst_sg) = len;
459 sg_init_table(&src_sg, 1);
460 sg_dma_address(&src_sg) = src;
461 sg_dma_len(&src_sg) = len;
463 desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
464 if (!desc)
465 return NULL;
467 return &desc->tx_desc;
470 static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
471 struct dma_chan *dma_chan, struct scatterlist *dst_sg,
472 unsigned int dst_nents, struct scatterlist *src_sg,
473 unsigned int src_nents, unsigned long flags)
475 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
476 dma_chan);
477 struct ccp_dma_desc *desc;
479 dev_dbg(chan->ccp->dev,
480 "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
481 __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
483 desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
484 flags);
485 if (!desc)
486 return NULL;
488 return &desc->tx_desc;
491 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
492 struct dma_chan *dma_chan, unsigned long flags)
494 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
495 dma_chan);
496 struct ccp_dma_desc *desc;
498 desc = ccp_alloc_dma_desc(chan, flags);
499 if (!desc)
500 return NULL;
502 return &desc->tx_desc;
505 static void ccp_issue_pending(struct dma_chan *dma_chan)
507 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
508 dma_chan);
509 struct ccp_dma_desc *desc;
510 unsigned long flags;
512 dev_dbg(chan->ccp->dev, "%s\n", __func__);
514 spin_lock_irqsave(&chan->lock, flags);
516 desc = __ccp_pending_to_active(chan);
518 spin_unlock_irqrestore(&chan->lock, flags);
520 /* If there was nothing active, start processing */
521 if (desc)
522 ccp_cmd_callback(desc, 0);
525 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
526 dma_cookie_t cookie,
527 struct dma_tx_state *state)
529 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
530 dma_chan);
531 struct ccp_dma_desc *desc;
532 enum dma_status ret;
533 unsigned long flags;
535 if (chan->status == DMA_PAUSED) {
536 ret = DMA_PAUSED;
537 goto out;
540 ret = dma_cookie_status(dma_chan, cookie, state);
541 if (ret == DMA_COMPLETE) {
542 spin_lock_irqsave(&chan->lock, flags);
544 /* Get status from complete chain, if still there */
545 list_for_each_entry(desc, &chan->complete, entry) {
546 if (desc->tx_desc.cookie != cookie)
547 continue;
549 ret = desc->status;
550 break;
553 spin_unlock_irqrestore(&chan->lock, flags);
556 out:
557 dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
559 return ret;
562 static int ccp_pause(struct dma_chan *dma_chan)
564 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
565 dma_chan);
567 chan->status = DMA_PAUSED;
569 /*TODO: Wait for active DMA to complete before returning? */
571 return 0;
574 static int ccp_resume(struct dma_chan *dma_chan)
576 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
577 dma_chan);
578 struct ccp_dma_desc *desc;
579 unsigned long flags;
581 spin_lock_irqsave(&chan->lock, flags);
583 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
584 entry);
586 spin_unlock_irqrestore(&chan->lock, flags);
588 /* Indicate the channel is running again */
589 chan->status = DMA_IN_PROGRESS;
591 /* If there was something active, re-start */
592 if (desc)
593 ccp_cmd_callback(desc, 0);
595 return 0;
598 static int ccp_terminate_all(struct dma_chan *dma_chan)
600 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
601 dma_chan);
602 unsigned long flags;
604 dev_dbg(chan->ccp->dev, "%s\n", __func__);
606 /*TODO: Wait for active DMA to complete before continuing */
608 spin_lock_irqsave(&chan->lock, flags);
610 /*TODO: Purge the complete list? */
611 ccp_free_desc_resources(chan->ccp, &chan->active);
612 ccp_free_desc_resources(chan->ccp, &chan->pending);
614 spin_unlock_irqrestore(&chan->lock, flags);
616 return 0;
619 int ccp_dmaengine_register(struct ccp_device *ccp)
621 struct ccp_dma_chan *chan;
622 struct dma_device *dma_dev = &ccp->dma_dev;
623 struct dma_chan *dma_chan;
624 char *dma_cmd_cache_name;
625 char *dma_desc_cache_name;
626 unsigned int i;
627 int ret;
629 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
630 sizeof(*(ccp->ccp_dma_chan)),
631 GFP_KERNEL);
632 if (!ccp->ccp_dma_chan)
633 return -ENOMEM;
635 dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
636 "%s-dmaengine-cmd-cache",
637 ccp->name);
638 if (!dma_cmd_cache_name)
639 return -ENOMEM;
641 ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
642 sizeof(struct ccp_dma_cmd),
643 sizeof(void *),
644 SLAB_HWCACHE_ALIGN, NULL);
645 if (!ccp->dma_cmd_cache)
646 return -ENOMEM;
648 dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
649 "%s-dmaengine-desc-cache",
650 ccp->name);
651 if (!dma_desc_cache_name) {
652 ret = -ENOMEM;
653 goto err_cache;
656 ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
657 sizeof(struct ccp_dma_desc),
658 sizeof(void *),
659 SLAB_HWCACHE_ALIGN, NULL);
660 if (!ccp->dma_desc_cache) {
661 ret = -ENOMEM;
662 goto err_cache;
665 dma_dev->dev = ccp->dev;
666 dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
667 dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
668 dma_dev->directions = DMA_MEM_TO_MEM;
669 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
670 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
671 dma_cap_set(DMA_SG, dma_dev->cap_mask);
672 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
674 INIT_LIST_HEAD(&dma_dev->channels);
675 for (i = 0; i < ccp->cmd_q_count; i++) {
676 chan = ccp->ccp_dma_chan + i;
677 dma_chan = &chan->dma_chan;
679 chan->ccp = ccp;
681 spin_lock_init(&chan->lock);
682 INIT_LIST_HEAD(&chan->pending);
683 INIT_LIST_HEAD(&chan->active);
684 INIT_LIST_HEAD(&chan->complete);
686 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
687 (unsigned long)chan);
689 dma_chan->device = dma_dev;
690 dma_cookie_init(dma_chan);
692 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
695 dma_dev->device_free_chan_resources = ccp_free_chan_resources;
696 dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
697 dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
698 dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
699 dma_dev->device_issue_pending = ccp_issue_pending;
700 dma_dev->device_tx_status = ccp_tx_status;
701 dma_dev->device_pause = ccp_pause;
702 dma_dev->device_resume = ccp_resume;
703 dma_dev->device_terminate_all = ccp_terminate_all;
705 ret = dma_async_device_register(dma_dev);
706 if (ret)
707 goto err_reg;
709 return 0;
711 err_reg:
712 kmem_cache_destroy(ccp->dma_desc_cache);
714 err_cache:
715 kmem_cache_destroy(ccp->dma_cmd_cache);
717 return ret;
720 void ccp_dmaengine_unregister(struct ccp_device *ccp)
722 struct dma_device *dma_dev = &ccp->dma_dev;
724 dma_async_device_unregister(dma_dev);
726 kmem_cache_destroy(ccp->dma_desc_cache);
727 kmem_cache_destroy(ccp->dma_cmd_cache);