1 // SPDX-License-Identifier: GPL-2.0
5 * Support for ATMEL DES/TDES HW acceleration.
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8 * Author: Nicolas Royer <nicolas@eukrea.com>
10 * Some ideas are from omap-aes.c drivers.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/clk.h>
20 #include <linux/hw_random.h>
21 #include <linux/platform_device.h>
23 #include <linux/device.h>
24 #include <linux/dmaengine.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/of_device.h>
32 #include <linux/delay.h>
33 #include <linux/crypto.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/internal/des.h>
37 #include <crypto/internal/skcipher.h>
38 #include "atmel-tdes-regs.h"
40 #define ATMEL_TDES_PRIORITY 300
43 /* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
44 #define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
45 #define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
46 #define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
47 #define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
48 #define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
49 #define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
50 #define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
51 #define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
52 #define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
54 #define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
56 #define TDES_FLAGS_INIT BIT(3)
57 #define TDES_FLAGS_FAST BIT(4)
58 #define TDES_FLAGS_BUSY BIT(5)
59 #define TDES_FLAGS_DMA BIT(6)
61 #define ATMEL_TDES_QUEUE_LENGTH 50
63 #define CFB8_BLOCK_SIZE 1
64 #define CFB16_BLOCK_SIZE 2
65 #define CFB32_BLOCK_SIZE 4
67 struct atmel_tdes_caps
{
72 struct atmel_tdes_dev
;
74 struct atmel_tdes_ctx
{
75 struct atmel_tdes_dev
*dd
;
78 u32 key
[DES3_EDE_KEY_SIZE
/ sizeof(u32
)];
84 struct atmel_tdes_reqctx
{
86 u8 lastc
[DES_BLOCK_SIZE
];
89 struct atmel_tdes_dma
{
90 struct dma_chan
*chan
;
91 struct dma_slave_config dma_conf
;
94 struct atmel_tdes_dev
{
95 struct list_head list
;
96 unsigned long phys_base
;
97 void __iomem
*io_base
;
99 struct atmel_tdes_ctx
*ctx
;
107 struct crypto_queue queue
;
109 struct tasklet_struct done_task
;
110 struct tasklet_struct queue_task
;
112 struct skcipher_request
*req
;
115 struct scatterlist
*in_sg
;
116 unsigned int nb_in_sg
;
118 struct scatterlist
*out_sg
;
119 unsigned int nb_out_sg
;
127 dma_addr_t dma_addr_in
;
128 struct atmel_tdes_dma dma_lch_in
;
132 dma_addr_t dma_addr_out
;
133 struct atmel_tdes_dma dma_lch_out
;
135 struct atmel_tdes_caps caps
;
140 struct atmel_tdes_drv
{
141 struct list_head dev_list
;
145 static struct atmel_tdes_drv atmel_tdes
= {
146 .dev_list
= LIST_HEAD_INIT(atmel_tdes
.dev_list
),
147 .lock
= __SPIN_LOCK_UNLOCKED(atmel_tdes
.lock
),
150 static int atmel_tdes_sg_copy(struct scatterlist
**sg
, size_t *offset
,
151 void *buf
, size_t buflen
, size_t total
, int out
)
153 size_t count
, off
= 0;
155 while (buflen
&& total
) {
156 count
= min((*sg
)->length
- *offset
, total
);
157 count
= min(count
, buflen
);
162 scatterwalk_map_and_copy(buf
+ off
, *sg
, *offset
, count
, out
);
169 if (*offset
== (*sg
)->length
) {
181 static inline u32
atmel_tdes_read(struct atmel_tdes_dev
*dd
, u32 offset
)
183 return readl_relaxed(dd
->io_base
+ offset
);
186 static inline void atmel_tdes_write(struct atmel_tdes_dev
*dd
,
187 u32 offset
, u32 value
)
189 writel_relaxed(value
, dd
->io_base
+ offset
);
192 static void atmel_tdes_write_n(struct atmel_tdes_dev
*dd
, u32 offset
,
193 const u32
*value
, int count
)
195 for (; count
--; value
++, offset
+= 4)
196 atmel_tdes_write(dd
, offset
, *value
);
199 static struct atmel_tdes_dev
*atmel_tdes_find_dev(struct atmel_tdes_ctx
*ctx
)
201 struct atmel_tdes_dev
*tdes_dd
= NULL
;
202 struct atmel_tdes_dev
*tmp
;
204 spin_lock_bh(&atmel_tdes
.lock
);
206 list_for_each_entry(tmp
, &atmel_tdes
.dev_list
, list
) {
214 spin_unlock_bh(&atmel_tdes
.lock
);
219 static int atmel_tdes_hw_init(struct atmel_tdes_dev
*dd
)
223 err
= clk_prepare_enable(dd
->iclk
);
227 if (!(dd
->flags
& TDES_FLAGS_INIT
)) {
228 atmel_tdes_write(dd
, TDES_CR
, TDES_CR_SWRST
);
229 dd
->flags
|= TDES_FLAGS_INIT
;
235 static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev
*dd
)
237 return atmel_tdes_read(dd
, TDES_HW_VERSION
) & 0x00000fff;
240 static int atmel_tdes_hw_version_init(struct atmel_tdes_dev
*dd
)
244 err
= atmel_tdes_hw_init(dd
);
248 dd
->hw_version
= atmel_tdes_get_version(dd
);
251 "version: 0x%x\n", dd
->hw_version
);
253 clk_disable_unprepare(dd
->iclk
);
258 static void atmel_tdes_dma_callback(void *data
)
260 struct atmel_tdes_dev
*dd
= data
;
262 /* dma_lch_out - completed */
263 tasklet_schedule(&dd
->done_task
);
266 static int atmel_tdes_write_ctrl(struct atmel_tdes_dev
*dd
)
269 u32 valmr
= TDES_MR_SMOD_PDC
;
271 err
= atmel_tdes_hw_init(dd
);
276 if (!dd
->caps
.has_dma
)
277 atmel_tdes_write(dd
, TDES_PTCR
,
278 TDES_PTCR_TXTDIS
| TDES_PTCR_RXTDIS
);
280 /* MR register must be set before IV registers */
281 if (dd
->ctx
->keylen
> (DES_KEY_SIZE
<< 1)) {
282 valmr
|= TDES_MR_KEYMOD_3KEY
;
283 valmr
|= TDES_MR_TDESMOD_TDES
;
284 } else if (dd
->ctx
->keylen
> DES_KEY_SIZE
) {
285 valmr
|= TDES_MR_KEYMOD_2KEY
;
286 valmr
|= TDES_MR_TDESMOD_TDES
;
288 valmr
|= TDES_MR_TDESMOD_DES
;
291 valmr
|= dd
->flags
& TDES_FLAGS_MODE_MASK
;
293 atmel_tdes_write(dd
, TDES_MR
, valmr
);
295 atmel_tdes_write_n(dd
, TDES_KEY1W1R
, dd
->ctx
->key
,
296 dd
->ctx
->keylen
>> 2);
298 if (dd
->req
->iv
&& (valmr
& TDES_MR_OPMOD_MASK
) != TDES_MR_OPMOD_ECB
)
299 atmel_tdes_write_n(dd
, TDES_IV1R
, (void *)dd
->req
->iv
, 2);
304 static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev
*dd
)
309 atmel_tdes_write(dd
, TDES_PTCR
, TDES_PTCR_TXTDIS
|TDES_PTCR_RXTDIS
);
311 if (dd
->flags
& TDES_FLAGS_FAST
) {
312 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_FROM_DEVICE
);
313 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
315 dma_sync_single_for_device(dd
->dev
, dd
->dma_addr_out
,
316 dd
->dma_size
, DMA_FROM_DEVICE
);
319 count
= atmel_tdes_sg_copy(&dd
->out_sg
, &dd
->out_offset
,
320 dd
->buf_out
, dd
->buflen
, dd
->dma_size
, 1);
321 if (count
!= dd
->dma_size
) {
323 pr_err("not all data converted: %zu\n", count
);
330 static int atmel_tdes_buff_init(struct atmel_tdes_dev
*dd
)
334 dd
->buf_in
= (void *)__get_free_pages(GFP_KERNEL
, 0);
335 dd
->buf_out
= (void *)__get_free_pages(GFP_KERNEL
, 0);
336 dd
->buflen
= PAGE_SIZE
;
337 dd
->buflen
&= ~(DES_BLOCK_SIZE
- 1);
339 if (!dd
->buf_in
|| !dd
->buf_out
) {
340 dev_err(dd
->dev
, "unable to alloc pages.\n");
345 dd
->dma_addr_in
= dma_map_single(dd
->dev
, dd
->buf_in
,
346 dd
->buflen
, DMA_TO_DEVICE
);
347 if (dma_mapping_error(dd
->dev
, dd
->dma_addr_in
)) {
348 dev_err(dd
->dev
, "dma %zd bytes error\n", dd
->buflen
);
353 dd
->dma_addr_out
= dma_map_single(dd
->dev
, dd
->buf_out
,
354 dd
->buflen
, DMA_FROM_DEVICE
);
355 if (dma_mapping_error(dd
->dev
, dd
->dma_addr_out
)) {
356 dev_err(dd
->dev
, "dma %zd bytes error\n", dd
->buflen
);
364 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
,
368 free_page((unsigned long)dd
->buf_out
);
369 free_page((unsigned long)dd
->buf_in
);
371 pr_err("error: %d\n", err
);
375 static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev
*dd
)
377 dma_unmap_single(dd
->dev
, dd
->dma_addr_out
, dd
->buflen
,
379 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
,
381 free_page((unsigned long)dd
->buf_out
);
382 free_page((unsigned long)dd
->buf_in
);
385 static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev
*dd
,
386 dma_addr_t dma_addr_in
,
387 dma_addr_t dma_addr_out
, int length
)
389 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(dd
->req
);
392 dd
->dma_size
= length
;
394 if (!(dd
->flags
& TDES_FLAGS_FAST
)) {
395 dma_sync_single_for_device(dd
->dev
, dma_addr_in
, length
,
399 switch (rctx
->mode
& TDES_FLAGS_OPMODE_MASK
) {
400 case TDES_FLAGS_CFB8
:
401 len32
= DIV_ROUND_UP(length
, sizeof(u8
));
404 case TDES_FLAGS_CFB16
:
405 len32
= DIV_ROUND_UP(length
, sizeof(u16
));
409 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
413 atmel_tdes_write(dd
, TDES_PTCR
, TDES_PTCR_TXTDIS
|TDES_PTCR_RXTDIS
);
414 atmel_tdes_write(dd
, TDES_TPR
, dma_addr_in
);
415 atmel_tdes_write(dd
, TDES_TCR
, len32
);
416 atmel_tdes_write(dd
, TDES_RPR
, dma_addr_out
);
417 atmel_tdes_write(dd
, TDES_RCR
, len32
);
419 /* Enable Interrupt */
420 atmel_tdes_write(dd
, TDES_IER
, TDES_INT_ENDRX
);
422 /* Start DMA transfer */
423 atmel_tdes_write(dd
, TDES_PTCR
, TDES_PTCR_TXTEN
| TDES_PTCR_RXTEN
);
428 static int atmel_tdes_crypt_dma(struct atmel_tdes_dev
*dd
,
429 dma_addr_t dma_addr_in
,
430 dma_addr_t dma_addr_out
, int length
)
432 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(dd
->req
);
433 struct scatterlist sg
[2];
434 struct dma_async_tx_descriptor
*in_desc
, *out_desc
;
435 enum dma_slave_buswidth addr_width
;
437 dd
->dma_size
= length
;
439 if (!(dd
->flags
& TDES_FLAGS_FAST
)) {
440 dma_sync_single_for_device(dd
->dev
, dma_addr_in
, length
,
444 switch (rctx
->mode
& TDES_FLAGS_OPMODE_MASK
) {
445 case TDES_FLAGS_CFB8
:
446 addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
449 case TDES_FLAGS_CFB16
:
450 addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
454 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
458 dd
->dma_lch_in
.dma_conf
.dst_addr_width
= addr_width
;
459 dd
->dma_lch_out
.dma_conf
.src_addr_width
= addr_width
;
461 dmaengine_slave_config(dd
->dma_lch_in
.chan
, &dd
->dma_lch_in
.dma_conf
);
462 dmaengine_slave_config(dd
->dma_lch_out
.chan
, &dd
->dma_lch_out
.dma_conf
);
464 dd
->flags
|= TDES_FLAGS_DMA
;
466 sg_init_table(&sg
[0], 1);
467 sg_dma_address(&sg
[0]) = dma_addr_in
;
468 sg_dma_len(&sg
[0]) = length
;
470 sg_init_table(&sg
[1], 1);
471 sg_dma_address(&sg
[1]) = dma_addr_out
;
472 sg_dma_len(&sg
[1]) = length
;
474 in_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_in
.chan
, &sg
[0],
476 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
480 out_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_out
.chan
, &sg
[1],
482 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
486 out_desc
->callback
= atmel_tdes_dma_callback
;
487 out_desc
->callback_param
= dd
;
489 dmaengine_submit(out_desc
);
490 dma_async_issue_pending(dd
->dma_lch_out
.chan
);
492 dmaengine_submit(in_desc
);
493 dma_async_issue_pending(dd
->dma_lch_in
.chan
);
498 static int atmel_tdes_crypt_start(struct atmel_tdes_dev
*dd
)
500 int err
, fast
= 0, in
, out
;
502 dma_addr_t addr_in
, addr_out
;
504 if ((!dd
->in_offset
) && (!dd
->out_offset
)) {
505 /* check for alignment */
506 in
= IS_ALIGNED((u32
)dd
->in_sg
->offset
, sizeof(u32
)) &&
507 IS_ALIGNED(dd
->in_sg
->length
, dd
->ctx
->block_size
);
508 out
= IS_ALIGNED((u32
)dd
->out_sg
->offset
, sizeof(u32
)) &&
509 IS_ALIGNED(dd
->out_sg
->length
, dd
->ctx
->block_size
);
512 if (sg_dma_len(dd
->in_sg
) != sg_dma_len(dd
->out_sg
))
518 count
= min_t(size_t, dd
->total
, sg_dma_len(dd
->in_sg
));
519 count
= min_t(size_t, count
, sg_dma_len(dd
->out_sg
));
521 err
= dma_map_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
523 dev_err(dd
->dev
, "dma_map_sg() error\n");
527 err
= dma_map_sg(dd
->dev
, dd
->out_sg
, 1,
530 dev_err(dd
->dev
, "dma_map_sg() error\n");
531 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1,
536 addr_in
= sg_dma_address(dd
->in_sg
);
537 addr_out
= sg_dma_address(dd
->out_sg
);
539 dd
->flags
|= TDES_FLAGS_FAST
;
542 /* use cache buffers */
543 count
= atmel_tdes_sg_copy(&dd
->in_sg
, &dd
->in_offset
,
544 dd
->buf_in
, dd
->buflen
, dd
->total
, 0);
546 addr_in
= dd
->dma_addr_in
;
547 addr_out
= dd
->dma_addr_out
;
549 dd
->flags
&= ~TDES_FLAGS_FAST
;
554 if (dd
->caps
.has_dma
)
555 err
= atmel_tdes_crypt_dma(dd
, addr_in
, addr_out
, count
);
557 err
= atmel_tdes_crypt_pdc(dd
, addr_in
, addr_out
, count
);
559 if (err
&& (dd
->flags
& TDES_FLAGS_FAST
)) {
560 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
561 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_TO_DEVICE
);
568 atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev
*dd
)
570 struct skcipher_request
*req
= dd
->req
;
571 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(req
);
572 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
573 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
575 if (req
->cryptlen
< ivsize
)
578 if (rctx
->mode
& TDES_FLAGS_ENCRYPT
) {
579 scatterwalk_map_and_copy(req
->iv
, req
->dst
,
580 req
->cryptlen
- ivsize
, ivsize
, 0);
582 if (req
->src
== req
->dst
)
583 memcpy(req
->iv
, rctx
->lastc
, ivsize
);
585 scatterwalk_map_and_copy(req
->iv
, req
->src
,
586 req
->cryptlen
- ivsize
,
591 static void atmel_tdes_finish_req(struct atmel_tdes_dev
*dd
, int err
)
593 struct skcipher_request
*req
= dd
->req
;
594 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(req
);
596 clk_disable_unprepare(dd
->iclk
);
598 dd
->flags
&= ~TDES_FLAGS_BUSY
;
600 if (!err
&& (rctx
->mode
& TDES_FLAGS_OPMODE_MASK
) != TDES_FLAGS_ECB
)
601 atmel_tdes_set_iv_as_last_ciphertext_block(dd
);
603 req
->base
.complete(&req
->base
, err
);
606 static int atmel_tdes_handle_queue(struct atmel_tdes_dev
*dd
,
607 struct skcipher_request
*req
)
609 struct crypto_async_request
*async_req
, *backlog
;
610 struct atmel_tdes_ctx
*ctx
;
611 struct atmel_tdes_reqctx
*rctx
;
615 spin_lock_irqsave(&dd
->lock
, flags
);
617 ret
= crypto_enqueue_request(&dd
->queue
, &req
->base
);
618 if (dd
->flags
& TDES_FLAGS_BUSY
) {
619 spin_unlock_irqrestore(&dd
->lock
, flags
);
622 backlog
= crypto_get_backlog(&dd
->queue
);
623 async_req
= crypto_dequeue_request(&dd
->queue
);
625 dd
->flags
|= TDES_FLAGS_BUSY
;
626 spin_unlock_irqrestore(&dd
->lock
, flags
);
632 backlog
->complete(backlog
, -EINPROGRESS
);
634 req
= skcipher_request_cast(async_req
);
636 /* assign new request to device */
638 dd
->total
= req
->cryptlen
;
640 dd
->in_sg
= req
->src
;
642 dd
->out_sg
= req
->dst
;
644 rctx
= skcipher_request_ctx(req
);
645 ctx
= crypto_skcipher_ctx(crypto_skcipher_reqtfm(req
));
646 rctx
->mode
&= TDES_FLAGS_MODE_MASK
;
647 dd
->flags
= (dd
->flags
& ~TDES_FLAGS_MODE_MASK
) | rctx
->mode
;
651 err
= atmel_tdes_write_ctrl(dd
);
653 err
= atmel_tdes_crypt_start(dd
);
655 /* des_task will not finish it, so do it here */
656 atmel_tdes_finish_req(dd
, err
);
657 tasklet_schedule(&dd
->queue_task
);
663 static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev
*dd
)
668 if (dd
->flags
& TDES_FLAGS_DMA
) {
670 if (dd
->flags
& TDES_FLAGS_FAST
) {
671 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_FROM_DEVICE
);
672 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
674 dma_sync_single_for_device(dd
->dev
, dd
->dma_addr_out
,
675 dd
->dma_size
, DMA_FROM_DEVICE
);
678 count
= atmel_tdes_sg_copy(&dd
->out_sg
, &dd
->out_offset
,
679 dd
->buf_out
, dd
->buflen
, dd
->dma_size
, 1);
680 if (count
!= dd
->dma_size
) {
682 pr_err("not all data converted: %zu\n", count
);
689 static int atmel_tdes_crypt(struct skcipher_request
*req
, unsigned long mode
)
691 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
692 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
693 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(req
);
695 switch (mode
& TDES_FLAGS_OPMODE_MASK
) {
696 case TDES_FLAGS_CFB8
:
697 if (!IS_ALIGNED(req
->cryptlen
, CFB8_BLOCK_SIZE
)) {
698 pr_err("request size is not exact amount of CFB8 blocks\n");
701 ctx
->block_size
= CFB8_BLOCK_SIZE
;
704 case TDES_FLAGS_CFB16
:
705 if (!IS_ALIGNED(req
->cryptlen
, CFB16_BLOCK_SIZE
)) {
706 pr_err("request size is not exact amount of CFB16 blocks\n");
709 ctx
->block_size
= CFB16_BLOCK_SIZE
;
712 case TDES_FLAGS_CFB32
:
713 if (!IS_ALIGNED(req
->cryptlen
, CFB32_BLOCK_SIZE
)) {
714 pr_err("request size is not exact amount of CFB32 blocks\n");
717 ctx
->block_size
= CFB32_BLOCK_SIZE
;
721 if (!IS_ALIGNED(req
->cryptlen
, DES_BLOCK_SIZE
)) {
722 pr_err("request size is not exact amount of DES blocks\n");
725 ctx
->block_size
= DES_BLOCK_SIZE
;
731 if ((mode
& TDES_FLAGS_OPMODE_MASK
) != TDES_FLAGS_ECB
&&
732 !(mode
& TDES_FLAGS_ENCRYPT
) && req
->src
== req
->dst
) {
733 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
735 if (req
->cryptlen
>= ivsize
)
736 scatterwalk_map_and_copy(rctx
->lastc
, req
->src
,
737 req
->cryptlen
- ivsize
,
741 return atmel_tdes_handle_queue(ctx
->dd
, req
);
744 static int atmel_tdes_dma_init(struct atmel_tdes_dev
*dd
)
748 /* Try to grab 2 DMA channels */
749 dd
->dma_lch_in
.chan
= dma_request_chan(dd
->dev
, "tx");
750 if (IS_ERR(dd
->dma_lch_in
.chan
)) {
751 ret
= PTR_ERR(dd
->dma_lch_in
.chan
);
755 dd
->dma_lch_in
.dma_conf
.dst_addr
= dd
->phys_base
+
757 dd
->dma_lch_in
.dma_conf
.src_maxburst
= 1;
758 dd
->dma_lch_in
.dma_conf
.src_addr_width
=
759 DMA_SLAVE_BUSWIDTH_4_BYTES
;
760 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= 1;
761 dd
->dma_lch_in
.dma_conf
.dst_addr_width
=
762 DMA_SLAVE_BUSWIDTH_4_BYTES
;
763 dd
->dma_lch_in
.dma_conf
.device_fc
= false;
765 dd
->dma_lch_out
.chan
= dma_request_chan(dd
->dev
, "rx");
766 if (IS_ERR(dd
->dma_lch_out
.chan
)) {
767 ret
= PTR_ERR(dd
->dma_lch_out
.chan
);
771 dd
->dma_lch_out
.dma_conf
.src_addr
= dd
->phys_base
+
773 dd
->dma_lch_out
.dma_conf
.src_maxburst
= 1;
774 dd
->dma_lch_out
.dma_conf
.src_addr_width
=
775 DMA_SLAVE_BUSWIDTH_4_BYTES
;
776 dd
->dma_lch_out
.dma_conf
.dst_maxburst
= 1;
777 dd
->dma_lch_out
.dma_conf
.dst_addr_width
=
778 DMA_SLAVE_BUSWIDTH_4_BYTES
;
779 dd
->dma_lch_out
.dma_conf
.device_fc
= false;
784 dma_release_channel(dd
->dma_lch_in
.chan
);
786 dev_err(dd
->dev
, "no DMA channel available\n");
790 static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev
*dd
)
792 dma_release_channel(dd
->dma_lch_in
.chan
);
793 dma_release_channel(dd
->dma_lch_out
.chan
);
796 static int atmel_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
799 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
802 err
= verify_skcipher_des_key(tfm
, key
);
806 memcpy(ctx
->key
, key
, keylen
);
807 ctx
->keylen
= keylen
;
812 static int atmel_tdes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
815 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
818 err
= verify_skcipher_des3_key(tfm
, key
);
822 memcpy(ctx
->key
, key
, keylen
);
823 ctx
->keylen
= keylen
;
828 static int atmel_tdes_ecb_encrypt(struct skcipher_request
*req
)
830 return atmel_tdes_crypt(req
, TDES_FLAGS_ECB
| TDES_FLAGS_ENCRYPT
);
833 static int atmel_tdes_ecb_decrypt(struct skcipher_request
*req
)
835 return atmel_tdes_crypt(req
, TDES_FLAGS_ECB
);
838 static int atmel_tdes_cbc_encrypt(struct skcipher_request
*req
)
840 return atmel_tdes_crypt(req
, TDES_FLAGS_CBC
| TDES_FLAGS_ENCRYPT
);
843 static int atmel_tdes_cbc_decrypt(struct skcipher_request
*req
)
845 return atmel_tdes_crypt(req
, TDES_FLAGS_CBC
);
847 static int atmel_tdes_cfb_encrypt(struct skcipher_request
*req
)
849 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB64
| TDES_FLAGS_ENCRYPT
);
852 static int atmel_tdes_cfb_decrypt(struct skcipher_request
*req
)
854 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB64
);
857 static int atmel_tdes_cfb8_encrypt(struct skcipher_request
*req
)
859 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB8
| TDES_FLAGS_ENCRYPT
);
862 static int atmel_tdes_cfb8_decrypt(struct skcipher_request
*req
)
864 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB8
);
867 static int atmel_tdes_cfb16_encrypt(struct skcipher_request
*req
)
869 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB16
| TDES_FLAGS_ENCRYPT
);
872 static int atmel_tdes_cfb16_decrypt(struct skcipher_request
*req
)
874 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB16
);
877 static int atmel_tdes_cfb32_encrypt(struct skcipher_request
*req
)
879 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB32
| TDES_FLAGS_ENCRYPT
);
882 static int atmel_tdes_cfb32_decrypt(struct skcipher_request
*req
)
884 return atmel_tdes_crypt(req
, TDES_FLAGS_CFB32
);
887 static int atmel_tdes_ofb_encrypt(struct skcipher_request
*req
)
889 return atmel_tdes_crypt(req
, TDES_FLAGS_OFB
| TDES_FLAGS_ENCRYPT
);
892 static int atmel_tdes_ofb_decrypt(struct skcipher_request
*req
)
894 return atmel_tdes_crypt(req
, TDES_FLAGS_OFB
);
897 static int atmel_tdes_init_tfm(struct crypto_skcipher
*tfm
)
899 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
900 struct atmel_tdes_dev
*dd
;
902 crypto_skcipher_set_reqsize(tfm
, sizeof(struct atmel_tdes_reqctx
));
904 dd
= atmel_tdes_find_dev(ctx
);
911 static void atmel_tdes_skcipher_alg_init(struct skcipher_alg
*alg
)
913 alg
->base
.cra_priority
= ATMEL_TDES_PRIORITY
;
914 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
;
915 alg
->base
.cra_ctxsize
= sizeof(struct atmel_tdes_ctx
),
916 alg
->base
.cra_module
= THIS_MODULE
;
918 alg
->init
= atmel_tdes_init_tfm
;
921 static struct skcipher_alg tdes_algs
[] = {
923 .base
.cra_name
= "ecb(des)",
924 .base
.cra_driver_name
= "atmel-ecb-des",
925 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
926 .base
.cra_alignmask
= 0x7,
928 .min_keysize
= DES_KEY_SIZE
,
929 .max_keysize
= DES_KEY_SIZE
,
930 .setkey
= atmel_des_setkey
,
931 .encrypt
= atmel_tdes_ecb_encrypt
,
932 .decrypt
= atmel_tdes_ecb_decrypt
,
935 .base
.cra_name
= "cbc(des)",
936 .base
.cra_driver_name
= "atmel-cbc-des",
937 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
938 .base
.cra_alignmask
= 0x7,
940 .min_keysize
= DES_KEY_SIZE
,
941 .max_keysize
= DES_KEY_SIZE
,
942 .ivsize
= DES_BLOCK_SIZE
,
943 .setkey
= atmel_des_setkey
,
944 .encrypt
= atmel_tdes_cbc_encrypt
,
945 .decrypt
= atmel_tdes_cbc_decrypt
,
948 .base
.cra_name
= "cfb(des)",
949 .base
.cra_driver_name
= "atmel-cfb-des",
950 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
951 .base
.cra_alignmask
= 0x7,
953 .min_keysize
= DES_KEY_SIZE
,
954 .max_keysize
= DES_KEY_SIZE
,
955 .ivsize
= DES_BLOCK_SIZE
,
956 .setkey
= atmel_des_setkey
,
957 .encrypt
= atmel_tdes_cfb_encrypt
,
958 .decrypt
= atmel_tdes_cfb_decrypt
,
961 .base
.cra_name
= "cfb8(des)",
962 .base
.cra_driver_name
= "atmel-cfb8-des",
963 .base
.cra_blocksize
= CFB8_BLOCK_SIZE
,
964 .base
.cra_alignmask
= 0,
966 .min_keysize
= DES_KEY_SIZE
,
967 .max_keysize
= DES_KEY_SIZE
,
968 .ivsize
= DES_BLOCK_SIZE
,
969 .setkey
= atmel_des_setkey
,
970 .encrypt
= atmel_tdes_cfb8_encrypt
,
971 .decrypt
= atmel_tdes_cfb8_decrypt
,
974 .base
.cra_name
= "cfb16(des)",
975 .base
.cra_driver_name
= "atmel-cfb16-des",
976 .base
.cra_blocksize
= CFB16_BLOCK_SIZE
,
977 .base
.cra_alignmask
= 0x1,
979 .min_keysize
= DES_KEY_SIZE
,
980 .max_keysize
= DES_KEY_SIZE
,
981 .ivsize
= DES_BLOCK_SIZE
,
982 .setkey
= atmel_des_setkey
,
983 .encrypt
= atmel_tdes_cfb16_encrypt
,
984 .decrypt
= atmel_tdes_cfb16_decrypt
,
987 .base
.cra_name
= "cfb32(des)",
988 .base
.cra_driver_name
= "atmel-cfb32-des",
989 .base
.cra_blocksize
= CFB32_BLOCK_SIZE
,
990 .base
.cra_alignmask
= 0x3,
992 .min_keysize
= DES_KEY_SIZE
,
993 .max_keysize
= DES_KEY_SIZE
,
994 .ivsize
= DES_BLOCK_SIZE
,
995 .setkey
= atmel_des_setkey
,
996 .encrypt
= atmel_tdes_cfb32_encrypt
,
997 .decrypt
= atmel_tdes_cfb32_decrypt
,
1000 .base
.cra_name
= "ofb(des)",
1001 .base
.cra_driver_name
= "atmel-ofb-des",
1002 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
1003 .base
.cra_alignmask
= 0x7,
1005 .min_keysize
= DES_KEY_SIZE
,
1006 .max_keysize
= DES_KEY_SIZE
,
1007 .ivsize
= DES_BLOCK_SIZE
,
1008 .setkey
= atmel_des_setkey
,
1009 .encrypt
= atmel_tdes_ofb_encrypt
,
1010 .decrypt
= atmel_tdes_ofb_decrypt
,
1013 .base
.cra_name
= "ecb(des3_ede)",
1014 .base
.cra_driver_name
= "atmel-ecb-tdes",
1015 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
1016 .base
.cra_alignmask
= 0x7,
1018 .min_keysize
= DES3_EDE_KEY_SIZE
,
1019 .max_keysize
= DES3_EDE_KEY_SIZE
,
1020 .setkey
= atmel_tdes_setkey
,
1021 .encrypt
= atmel_tdes_ecb_encrypt
,
1022 .decrypt
= atmel_tdes_ecb_decrypt
,
1025 .base
.cra_name
= "cbc(des3_ede)",
1026 .base
.cra_driver_name
= "atmel-cbc-tdes",
1027 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
1028 .base
.cra_alignmask
= 0x7,
1030 .min_keysize
= DES3_EDE_KEY_SIZE
,
1031 .max_keysize
= DES3_EDE_KEY_SIZE
,
1032 .setkey
= atmel_tdes_setkey
,
1033 .encrypt
= atmel_tdes_cbc_encrypt
,
1034 .decrypt
= atmel_tdes_cbc_decrypt
,
1035 .ivsize
= DES_BLOCK_SIZE
,
1038 .base
.cra_name
= "ofb(des3_ede)",
1039 .base
.cra_driver_name
= "atmel-ofb-tdes",
1040 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
1041 .base
.cra_alignmask
= 0x7,
1043 .min_keysize
= DES3_EDE_KEY_SIZE
,
1044 .max_keysize
= DES3_EDE_KEY_SIZE
,
1045 .setkey
= atmel_tdes_setkey
,
1046 .encrypt
= atmel_tdes_ofb_encrypt
,
1047 .decrypt
= atmel_tdes_ofb_decrypt
,
1048 .ivsize
= DES_BLOCK_SIZE
,
1052 static void atmel_tdes_queue_task(unsigned long data
)
1054 struct atmel_tdes_dev
*dd
= (struct atmel_tdes_dev
*)data
;
1056 atmel_tdes_handle_queue(dd
, NULL
);
1059 static void atmel_tdes_done_task(unsigned long data
)
1061 struct atmel_tdes_dev
*dd
= (struct atmel_tdes_dev
*) data
;
1064 if (!(dd
->flags
& TDES_FLAGS_DMA
))
1065 err
= atmel_tdes_crypt_pdc_stop(dd
);
1067 err
= atmel_tdes_crypt_dma_stop(dd
);
1069 if (dd
->total
&& !err
) {
1070 if (dd
->flags
& TDES_FLAGS_FAST
) {
1071 dd
->in_sg
= sg_next(dd
->in_sg
);
1072 dd
->out_sg
= sg_next(dd
->out_sg
);
1073 if (!dd
->in_sg
|| !dd
->out_sg
)
1077 err
= atmel_tdes_crypt_start(dd
);
1079 return; /* DMA started. Not fininishing. */
1082 atmel_tdes_finish_req(dd
, err
);
1083 atmel_tdes_handle_queue(dd
, NULL
);
1086 static irqreturn_t
atmel_tdes_irq(int irq
, void *dev_id
)
1088 struct atmel_tdes_dev
*tdes_dd
= dev_id
;
1091 reg
= atmel_tdes_read(tdes_dd
, TDES_ISR
);
1092 if (reg
& atmel_tdes_read(tdes_dd
, TDES_IMR
)) {
1093 atmel_tdes_write(tdes_dd
, TDES_IDR
, reg
);
1094 if (TDES_FLAGS_BUSY
& tdes_dd
->flags
)
1095 tasklet_schedule(&tdes_dd
->done_task
);
1097 dev_warn(tdes_dd
->dev
, "TDES interrupt when no active requests.\n");
1104 static void atmel_tdes_unregister_algs(struct atmel_tdes_dev
*dd
)
1108 for (i
= 0; i
< ARRAY_SIZE(tdes_algs
); i
++)
1109 crypto_unregister_skcipher(&tdes_algs
[i
]);
1112 static int atmel_tdes_register_algs(struct atmel_tdes_dev
*dd
)
1116 for (i
= 0; i
< ARRAY_SIZE(tdes_algs
); i
++) {
1117 atmel_tdes_skcipher_alg_init(&tdes_algs
[i
]);
1119 err
= crypto_register_skcipher(&tdes_algs
[i
]);
1127 for (j
= 0; j
< i
; j
++)
1128 crypto_unregister_skcipher(&tdes_algs
[j
]);
1133 static void atmel_tdes_get_cap(struct atmel_tdes_dev
*dd
)
1136 dd
->caps
.has_dma
= 0;
1137 dd
->caps
.has_cfb_3keys
= 0;
1139 /* keep only major version number */
1140 switch (dd
->hw_version
& 0xf00) {
1142 dd
->caps
.has_dma
= 1;
1143 dd
->caps
.has_cfb_3keys
= 1;
1149 "Unmanaged tdes version, set minimum capabilities\n");
1154 #if defined(CONFIG_OF)
1155 static const struct of_device_id atmel_tdes_dt_ids
[] = {
1156 { .compatible
= "atmel,at91sam9g46-tdes" },
1159 MODULE_DEVICE_TABLE(of
, atmel_tdes_dt_ids
);
1162 static int atmel_tdes_probe(struct platform_device
*pdev
)
1164 struct atmel_tdes_dev
*tdes_dd
;
1165 struct device
*dev
= &pdev
->dev
;
1166 struct resource
*tdes_res
;
1169 tdes_dd
= devm_kmalloc(&pdev
->dev
, sizeof(*tdes_dd
), GFP_KERNEL
);
1175 platform_set_drvdata(pdev
, tdes_dd
);
1177 INIT_LIST_HEAD(&tdes_dd
->list
);
1178 spin_lock_init(&tdes_dd
->lock
);
1180 tasklet_init(&tdes_dd
->done_task
, atmel_tdes_done_task
,
1181 (unsigned long)tdes_dd
);
1182 tasklet_init(&tdes_dd
->queue_task
, atmel_tdes_queue_task
,
1183 (unsigned long)tdes_dd
);
1185 crypto_init_queue(&tdes_dd
->queue
, ATMEL_TDES_QUEUE_LENGTH
);
1187 /* Get the base address */
1188 tdes_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1190 dev_err(dev
, "no MEM resource info\n");
1192 goto err_tasklet_kill
;
1194 tdes_dd
->phys_base
= tdes_res
->start
;
1197 tdes_dd
->irq
= platform_get_irq(pdev
, 0);
1198 if (tdes_dd
->irq
< 0) {
1200 goto err_tasklet_kill
;
1203 err
= devm_request_irq(&pdev
->dev
, tdes_dd
->irq
, atmel_tdes_irq
,
1204 IRQF_SHARED
, "atmel-tdes", tdes_dd
);
1206 dev_err(dev
, "unable to request tdes irq.\n");
1207 goto err_tasklet_kill
;
1210 /* Initializing the clock */
1211 tdes_dd
->iclk
= devm_clk_get(&pdev
->dev
, "tdes_clk");
1212 if (IS_ERR(tdes_dd
->iclk
)) {
1213 dev_err(dev
, "clock initialization failed.\n");
1214 err
= PTR_ERR(tdes_dd
->iclk
);
1215 goto err_tasklet_kill
;
1218 tdes_dd
->io_base
= devm_ioremap_resource(&pdev
->dev
, tdes_res
);
1219 if (IS_ERR(tdes_dd
->io_base
)) {
1220 dev_err(dev
, "can't ioremap\n");
1221 err
= PTR_ERR(tdes_dd
->io_base
);
1222 goto err_tasklet_kill
;
1225 err
= atmel_tdes_hw_version_init(tdes_dd
);
1227 goto err_tasklet_kill
;
1229 atmel_tdes_get_cap(tdes_dd
);
1231 err
= atmel_tdes_buff_init(tdes_dd
);
1233 goto err_tasklet_kill
;
1235 if (tdes_dd
->caps
.has_dma
) {
1236 err
= atmel_tdes_dma_init(tdes_dd
);
1238 goto err_buff_cleanup
;
1240 dev_info(dev
, "using %s, %s for DMA transfers\n",
1241 dma_chan_name(tdes_dd
->dma_lch_in
.chan
),
1242 dma_chan_name(tdes_dd
->dma_lch_out
.chan
));
1245 spin_lock(&atmel_tdes
.lock
);
1246 list_add_tail(&tdes_dd
->list
, &atmel_tdes
.dev_list
);
1247 spin_unlock(&atmel_tdes
.lock
);
1249 err
= atmel_tdes_register_algs(tdes_dd
);
1253 dev_info(dev
, "Atmel DES/TDES\n");
1258 spin_lock(&atmel_tdes
.lock
);
1259 list_del(&tdes_dd
->list
);
1260 spin_unlock(&atmel_tdes
.lock
);
1261 if (tdes_dd
->caps
.has_dma
)
1262 atmel_tdes_dma_cleanup(tdes_dd
);
1264 atmel_tdes_buff_cleanup(tdes_dd
);
1266 tasklet_kill(&tdes_dd
->done_task
);
1267 tasklet_kill(&tdes_dd
->queue_task
);
1272 static int atmel_tdes_remove(struct platform_device
*pdev
)
1274 struct atmel_tdes_dev
*tdes_dd
;
1276 tdes_dd
= platform_get_drvdata(pdev
);
1279 spin_lock(&atmel_tdes
.lock
);
1280 list_del(&tdes_dd
->list
);
1281 spin_unlock(&atmel_tdes
.lock
);
1283 atmel_tdes_unregister_algs(tdes_dd
);
1285 tasklet_kill(&tdes_dd
->done_task
);
1286 tasklet_kill(&tdes_dd
->queue_task
);
1288 if (tdes_dd
->caps
.has_dma
)
1289 atmel_tdes_dma_cleanup(tdes_dd
);
1291 atmel_tdes_buff_cleanup(tdes_dd
);
1296 static struct platform_driver atmel_tdes_driver
= {
1297 .probe
= atmel_tdes_probe
,
1298 .remove
= atmel_tdes_remove
,
1300 .name
= "atmel_tdes",
1301 .of_match_table
= of_match_ptr(atmel_tdes_dt_ids
),
1305 module_platform_driver(atmel_tdes_driver
);
1307 MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1308 MODULE_LICENSE("GPL v2");
1309 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");