1 // SPDX-License-Identifier: GPL-2.0
5 * Support for ATMEL DES/TDES HW acceleration.
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8 * Author: Nicolas Royer <nicolas@eukrea.com>
10 * Some ideas are from omap-aes.c drivers.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/clk.h>
20 #include <linux/hw_random.h>
21 #include <linux/platform_device.h>
23 #include <linux/device.h>
24 #include <linux/dmaengine.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/delay.h>
33 #include <linux/crypto.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/internal/des.h>
37 #include <crypto/internal/skcipher.h>
38 #include "atmel-tdes-regs.h"
40 #define ATMEL_TDES_PRIORITY 300
43 /* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
44 #define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
45 #define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
46 #define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
47 #define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
49 #define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
51 #define TDES_FLAGS_INIT BIT(3)
52 #define TDES_FLAGS_FAST BIT(4)
53 #define TDES_FLAGS_BUSY BIT(5)
54 #define TDES_FLAGS_DMA BIT(6)
56 #define ATMEL_TDES_QUEUE_LENGTH 50
58 struct atmel_tdes_caps
{
62 struct atmel_tdes_dev
;
64 struct atmel_tdes_ctx
{
65 struct atmel_tdes_dev
*dd
;
68 u32 key
[DES3_EDE_KEY_SIZE
/ sizeof(u32
)];
74 struct atmel_tdes_reqctx
{
76 u8 lastc
[DES_BLOCK_SIZE
];
79 struct atmel_tdes_dma
{
80 struct dma_chan
*chan
;
81 struct dma_slave_config dma_conf
;
84 struct atmel_tdes_dev
{
85 struct list_head list
;
86 unsigned long phys_base
;
87 void __iomem
*io_base
;
89 struct atmel_tdes_ctx
*ctx
;
97 struct crypto_queue queue
;
99 struct tasklet_struct done_task
;
100 struct tasklet_struct queue_task
;
102 struct skcipher_request
*req
;
105 struct scatterlist
*in_sg
;
106 unsigned int nb_in_sg
;
108 struct scatterlist
*out_sg
;
109 unsigned int nb_out_sg
;
117 dma_addr_t dma_addr_in
;
118 struct atmel_tdes_dma dma_lch_in
;
122 dma_addr_t dma_addr_out
;
123 struct atmel_tdes_dma dma_lch_out
;
125 struct atmel_tdes_caps caps
;
130 struct atmel_tdes_drv
{
131 struct list_head dev_list
;
135 static struct atmel_tdes_drv atmel_tdes
= {
136 .dev_list
= LIST_HEAD_INIT(atmel_tdes
.dev_list
),
137 .lock
= __SPIN_LOCK_UNLOCKED(atmel_tdes
.lock
),
140 static int atmel_tdes_sg_copy(struct scatterlist
**sg
, size_t *offset
,
141 void *buf
, size_t buflen
, size_t total
, int out
)
143 size_t count
, off
= 0;
145 while (buflen
&& total
) {
146 count
= min((*sg
)->length
- *offset
, total
);
147 count
= min(count
, buflen
);
152 scatterwalk_map_and_copy(buf
+ off
, *sg
, *offset
, count
, out
);
159 if (*offset
== (*sg
)->length
) {
171 static inline u32
atmel_tdes_read(struct atmel_tdes_dev
*dd
, u32 offset
)
173 return readl_relaxed(dd
->io_base
+ offset
);
176 static inline void atmel_tdes_write(struct atmel_tdes_dev
*dd
,
177 u32 offset
, u32 value
)
179 writel_relaxed(value
, dd
->io_base
+ offset
);
182 static void atmel_tdes_write_n(struct atmel_tdes_dev
*dd
, u32 offset
,
183 const u32
*value
, int count
)
185 for (; count
--; value
++, offset
+= 4)
186 atmel_tdes_write(dd
, offset
, *value
);
189 static struct atmel_tdes_dev
*atmel_tdes_dev_alloc(void)
191 struct atmel_tdes_dev
*tdes_dd
;
193 spin_lock_bh(&atmel_tdes
.lock
);
194 /* One TDES IP per SoC. */
195 tdes_dd
= list_first_entry_or_null(&atmel_tdes
.dev_list
,
196 struct atmel_tdes_dev
, list
);
197 spin_unlock_bh(&atmel_tdes
.lock
);
201 static int atmel_tdes_hw_init(struct atmel_tdes_dev
*dd
)
205 err
= clk_prepare_enable(dd
->iclk
);
209 if (!(dd
->flags
& TDES_FLAGS_INIT
)) {
210 atmel_tdes_write(dd
, TDES_CR
, TDES_CR_SWRST
);
211 dd
->flags
|= TDES_FLAGS_INIT
;
217 static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev
*dd
)
219 return atmel_tdes_read(dd
, TDES_HW_VERSION
) & 0x00000fff;
222 static int atmel_tdes_hw_version_init(struct atmel_tdes_dev
*dd
)
226 err
= atmel_tdes_hw_init(dd
);
230 dd
->hw_version
= atmel_tdes_get_version(dd
);
233 "version: 0x%x\n", dd
->hw_version
);
235 clk_disable_unprepare(dd
->iclk
);
240 static void atmel_tdes_dma_callback(void *data
)
242 struct atmel_tdes_dev
*dd
= data
;
244 /* dma_lch_out - completed */
245 tasklet_schedule(&dd
->done_task
);
248 static int atmel_tdes_write_ctrl(struct atmel_tdes_dev
*dd
)
251 u32 valmr
= TDES_MR_SMOD_PDC
;
253 err
= atmel_tdes_hw_init(dd
);
258 if (!dd
->caps
.has_dma
)
259 atmel_tdes_write(dd
, TDES_PTCR
,
260 TDES_PTCR_TXTDIS
| TDES_PTCR_RXTDIS
);
262 /* MR register must be set before IV registers */
263 if (dd
->ctx
->keylen
> (DES_KEY_SIZE
<< 1)) {
264 valmr
|= TDES_MR_KEYMOD_3KEY
;
265 valmr
|= TDES_MR_TDESMOD_TDES
;
266 } else if (dd
->ctx
->keylen
> DES_KEY_SIZE
) {
267 valmr
|= TDES_MR_KEYMOD_2KEY
;
268 valmr
|= TDES_MR_TDESMOD_TDES
;
270 valmr
|= TDES_MR_TDESMOD_DES
;
273 valmr
|= dd
->flags
& TDES_FLAGS_MODE_MASK
;
275 atmel_tdes_write(dd
, TDES_MR
, valmr
);
277 atmel_tdes_write_n(dd
, TDES_KEY1W1R
, dd
->ctx
->key
,
278 dd
->ctx
->keylen
>> 2);
280 if (dd
->req
->iv
&& (valmr
& TDES_MR_OPMOD_MASK
) != TDES_MR_OPMOD_ECB
)
281 atmel_tdes_write_n(dd
, TDES_IV1R
, (void *)dd
->req
->iv
, 2);
286 static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev
*dd
)
291 atmel_tdes_write(dd
, TDES_PTCR
, TDES_PTCR_TXTDIS
|TDES_PTCR_RXTDIS
);
293 if (dd
->flags
& TDES_FLAGS_FAST
) {
294 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_FROM_DEVICE
);
295 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
297 dma_sync_single_for_device(dd
->dev
, dd
->dma_addr_out
,
298 dd
->dma_size
, DMA_FROM_DEVICE
);
301 count
= atmel_tdes_sg_copy(&dd
->out_sg
, &dd
->out_offset
,
302 dd
->buf_out
, dd
->buflen
, dd
->dma_size
, 1);
303 if (count
!= dd
->dma_size
) {
305 dev_dbg(dd
->dev
, "not all data converted: %zu\n", count
);
312 static int atmel_tdes_buff_init(struct atmel_tdes_dev
*dd
)
316 dd
->buf_in
= (void *)__get_free_pages(GFP_KERNEL
, 0);
317 dd
->buf_out
= (void *)__get_free_pages(GFP_KERNEL
, 0);
318 dd
->buflen
= PAGE_SIZE
;
319 dd
->buflen
&= ~(DES_BLOCK_SIZE
- 1);
321 if (!dd
->buf_in
|| !dd
->buf_out
) {
322 dev_dbg(dd
->dev
, "unable to alloc pages.\n");
327 dd
->dma_addr_in
= dma_map_single(dd
->dev
, dd
->buf_in
,
328 dd
->buflen
, DMA_TO_DEVICE
);
329 err
= dma_mapping_error(dd
->dev
, dd
->dma_addr_in
);
331 dev_dbg(dd
->dev
, "dma %zd bytes error\n", dd
->buflen
);
335 dd
->dma_addr_out
= dma_map_single(dd
->dev
, dd
->buf_out
,
336 dd
->buflen
, DMA_FROM_DEVICE
);
337 err
= dma_mapping_error(dd
->dev
, dd
->dma_addr_out
);
339 dev_dbg(dd
->dev
, "dma %zd bytes error\n", dd
->buflen
);
346 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
,
350 free_page((unsigned long)dd
->buf_out
);
351 free_page((unsigned long)dd
->buf_in
);
355 static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev
*dd
)
357 dma_unmap_single(dd
->dev
, dd
->dma_addr_out
, dd
->buflen
,
359 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
,
361 free_page((unsigned long)dd
->buf_out
);
362 free_page((unsigned long)dd
->buf_in
);
365 static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev
*dd
,
366 dma_addr_t dma_addr_in
,
367 dma_addr_t dma_addr_out
, int length
)
371 dd
->dma_size
= length
;
373 if (!(dd
->flags
& TDES_FLAGS_FAST
)) {
374 dma_sync_single_for_device(dd
->dev
, dma_addr_in
, length
,
378 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
380 atmel_tdes_write(dd
, TDES_PTCR
, TDES_PTCR_TXTDIS
|TDES_PTCR_RXTDIS
);
381 atmel_tdes_write(dd
, TDES_TPR
, dma_addr_in
);
382 atmel_tdes_write(dd
, TDES_TCR
, len32
);
383 atmel_tdes_write(dd
, TDES_RPR
, dma_addr_out
);
384 atmel_tdes_write(dd
, TDES_RCR
, len32
);
386 /* Enable Interrupt */
387 atmel_tdes_write(dd
, TDES_IER
, TDES_INT_ENDRX
);
389 /* Start DMA transfer */
390 atmel_tdes_write(dd
, TDES_PTCR
, TDES_PTCR_TXTEN
| TDES_PTCR_RXTEN
);
395 static int atmel_tdes_crypt_dma(struct atmel_tdes_dev
*dd
,
396 dma_addr_t dma_addr_in
,
397 dma_addr_t dma_addr_out
, int length
)
399 struct scatterlist sg
[2];
400 struct dma_async_tx_descriptor
*in_desc
, *out_desc
;
401 enum dma_slave_buswidth addr_width
;
403 dd
->dma_size
= length
;
405 if (!(dd
->flags
& TDES_FLAGS_FAST
)) {
406 dma_sync_single_for_device(dd
->dev
, dma_addr_in
, length
,
410 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
412 dd
->dma_lch_in
.dma_conf
.dst_addr_width
= addr_width
;
413 dd
->dma_lch_out
.dma_conf
.src_addr_width
= addr_width
;
415 dmaengine_slave_config(dd
->dma_lch_in
.chan
, &dd
->dma_lch_in
.dma_conf
);
416 dmaengine_slave_config(dd
->dma_lch_out
.chan
, &dd
->dma_lch_out
.dma_conf
);
418 dd
->flags
|= TDES_FLAGS_DMA
;
420 sg_init_table(&sg
[0], 1);
421 sg_dma_address(&sg
[0]) = dma_addr_in
;
422 sg_dma_len(&sg
[0]) = length
;
424 sg_init_table(&sg
[1], 1);
425 sg_dma_address(&sg
[1]) = dma_addr_out
;
426 sg_dma_len(&sg
[1]) = length
;
428 in_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_in
.chan
, &sg
[0],
430 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
434 out_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_out
.chan
, &sg
[1],
436 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
440 out_desc
->callback
= atmel_tdes_dma_callback
;
441 out_desc
->callback_param
= dd
;
443 dmaengine_submit(out_desc
);
444 dma_async_issue_pending(dd
->dma_lch_out
.chan
);
446 dmaengine_submit(in_desc
);
447 dma_async_issue_pending(dd
->dma_lch_in
.chan
);
452 static int atmel_tdes_crypt_start(struct atmel_tdes_dev
*dd
)
454 int err
, fast
= 0, in
, out
;
456 dma_addr_t addr_in
, addr_out
;
458 if ((!dd
->in_offset
) && (!dd
->out_offset
)) {
459 /* check for alignment */
460 in
= IS_ALIGNED((u32
)dd
->in_sg
->offset
, sizeof(u32
)) &&
461 IS_ALIGNED(dd
->in_sg
->length
, dd
->ctx
->block_size
);
462 out
= IS_ALIGNED((u32
)dd
->out_sg
->offset
, sizeof(u32
)) &&
463 IS_ALIGNED(dd
->out_sg
->length
, dd
->ctx
->block_size
);
466 if (sg_dma_len(dd
->in_sg
) != sg_dma_len(dd
->out_sg
))
472 count
= min_t(size_t, dd
->total
, sg_dma_len(dd
->in_sg
));
473 count
= min_t(size_t, count
, sg_dma_len(dd
->out_sg
));
475 err
= dma_map_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
477 dev_dbg(dd
->dev
, "dma_map_sg() error\n");
481 err
= dma_map_sg(dd
->dev
, dd
->out_sg
, 1,
484 dev_dbg(dd
->dev
, "dma_map_sg() error\n");
485 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1,
490 addr_in
= sg_dma_address(dd
->in_sg
);
491 addr_out
= sg_dma_address(dd
->out_sg
);
493 dd
->flags
|= TDES_FLAGS_FAST
;
496 /* use cache buffers */
497 count
= atmel_tdes_sg_copy(&dd
->in_sg
, &dd
->in_offset
,
498 dd
->buf_in
, dd
->buflen
, dd
->total
, 0);
500 addr_in
= dd
->dma_addr_in
;
501 addr_out
= dd
->dma_addr_out
;
503 dd
->flags
&= ~TDES_FLAGS_FAST
;
508 if (dd
->caps
.has_dma
)
509 err
= atmel_tdes_crypt_dma(dd
, addr_in
, addr_out
, count
);
511 err
= atmel_tdes_crypt_pdc(dd
, addr_in
, addr_out
, count
);
513 if (err
&& (dd
->flags
& TDES_FLAGS_FAST
)) {
514 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
515 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_TO_DEVICE
);
522 atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev
*dd
)
524 struct skcipher_request
*req
= dd
->req
;
525 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(req
);
526 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
527 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
529 if (req
->cryptlen
< ivsize
)
532 if (rctx
->mode
& TDES_FLAGS_ENCRYPT
)
533 scatterwalk_map_and_copy(req
->iv
, req
->dst
,
534 req
->cryptlen
- ivsize
, ivsize
, 0);
536 memcpy(req
->iv
, rctx
->lastc
, ivsize
);
540 static void atmel_tdes_finish_req(struct atmel_tdes_dev
*dd
, int err
)
542 struct skcipher_request
*req
= dd
->req
;
543 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(req
);
545 clk_disable_unprepare(dd
->iclk
);
547 dd
->flags
&= ~TDES_FLAGS_BUSY
;
549 if (!err
&& (rctx
->mode
& TDES_FLAGS_OPMODE_MASK
) != TDES_FLAGS_ECB
)
550 atmel_tdes_set_iv_as_last_ciphertext_block(dd
);
552 skcipher_request_complete(req
, err
);
555 static int atmel_tdes_handle_queue(struct atmel_tdes_dev
*dd
,
556 struct skcipher_request
*req
)
558 struct crypto_async_request
*async_req
, *backlog
;
559 struct atmel_tdes_ctx
*ctx
;
560 struct atmel_tdes_reqctx
*rctx
;
564 spin_lock_irqsave(&dd
->lock
, flags
);
566 ret
= crypto_enqueue_request(&dd
->queue
, &req
->base
);
567 if (dd
->flags
& TDES_FLAGS_BUSY
) {
568 spin_unlock_irqrestore(&dd
->lock
, flags
);
571 backlog
= crypto_get_backlog(&dd
->queue
);
572 async_req
= crypto_dequeue_request(&dd
->queue
);
574 dd
->flags
|= TDES_FLAGS_BUSY
;
575 spin_unlock_irqrestore(&dd
->lock
, flags
);
581 crypto_request_complete(backlog
, -EINPROGRESS
);
583 req
= skcipher_request_cast(async_req
);
585 /* assign new request to device */
587 dd
->total
= req
->cryptlen
;
589 dd
->in_sg
= req
->src
;
591 dd
->out_sg
= req
->dst
;
593 rctx
= skcipher_request_ctx(req
);
594 ctx
= crypto_skcipher_ctx(crypto_skcipher_reqtfm(req
));
595 rctx
->mode
&= TDES_FLAGS_MODE_MASK
;
596 dd
->flags
= (dd
->flags
& ~TDES_FLAGS_MODE_MASK
) | rctx
->mode
;
599 err
= atmel_tdes_write_ctrl(dd
);
601 err
= atmel_tdes_crypt_start(dd
);
603 /* des_task will not finish it, so do it here */
604 atmel_tdes_finish_req(dd
, err
);
605 tasklet_schedule(&dd
->queue_task
);
611 static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev
*dd
)
616 if (dd
->flags
& TDES_FLAGS_DMA
) {
618 if (dd
->flags
& TDES_FLAGS_FAST
) {
619 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_FROM_DEVICE
);
620 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
622 dma_sync_single_for_device(dd
->dev
, dd
->dma_addr_out
,
623 dd
->dma_size
, DMA_FROM_DEVICE
);
626 count
= atmel_tdes_sg_copy(&dd
->out_sg
, &dd
->out_offset
,
627 dd
->buf_out
, dd
->buflen
, dd
->dma_size
, 1);
628 if (count
!= dd
->dma_size
) {
630 dev_dbg(dd
->dev
, "not all data converted: %zu\n", count
);
637 static int atmel_tdes_crypt(struct skcipher_request
*req
, unsigned long mode
)
639 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
640 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
641 struct atmel_tdes_reqctx
*rctx
= skcipher_request_ctx(req
);
642 struct device
*dev
= ctx
->dd
->dev
;
647 if (!IS_ALIGNED(req
->cryptlen
, DES_BLOCK_SIZE
)) {
648 dev_dbg(dev
, "request size is not exact amount of DES blocks\n");
651 ctx
->block_size
= DES_BLOCK_SIZE
;
655 if ((mode
& TDES_FLAGS_OPMODE_MASK
) != TDES_FLAGS_ECB
&&
656 !(mode
& TDES_FLAGS_ENCRYPT
)) {
657 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
659 if (req
->cryptlen
>= ivsize
)
660 scatterwalk_map_and_copy(rctx
->lastc
, req
->src
,
661 req
->cryptlen
- ivsize
,
665 return atmel_tdes_handle_queue(ctx
->dd
, req
);
668 static int atmel_tdes_dma_init(struct atmel_tdes_dev
*dd
)
672 /* Try to grab 2 DMA channels */
673 dd
->dma_lch_in
.chan
= dma_request_chan(dd
->dev
, "tx");
674 if (IS_ERR(dd
->dma_lch_in
.chan
)) {
675 ret
= PTR_ERR(dd
->dma_lch_in
.chan
);
679 dd
->dma_lch_in
.dma_conf
.dst_addr
= dd
->phys_base
+
681 dd
->dma_lch_in
.dma_conf
.src_maxburst
= 1;
682 dd
->dma_lch_in
.dma_conf
.src_addr_width
=
683 DMA_SLAVE_BUSWIDTH_4_BYTES
;
684 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= 1;
685 dd
->dma_lch_in
.dma_conf
.dst_addr_width
=
686 DMA_SLAVE_BUSWIDTH_4_BYTES
;
687 dd
->dma_lch_in
.dma_conf
.device_fc
= false;
689 dd
->dma_lch_out
.chan
= dma_request_chan(dd
->dev
, "rx");
690 if (IS_ERR(dd
->dma_lch_out
.chan
)) {
691 ret
= PTR_ERR(dd
->dma_lch_out
.chan
);
695 dd
->dma_lch_out
.dma_conf
.src_addr
= dd
->phys_base
+
697 dd
->dma_lch_out
.dma_conf
.src_maxburst
= 1;
698 dd
->dma_lch_out
.dma_conf
.src_addr_width
=
699 DMA_SLAVE_BUSWIDTH_4_BYTES
;
700 dd
->dma_lch_out
.dma_conf
.dst_maxburst
= 1;
701 dd
->dma_lch_out
.dma_conf
.dst_addr_width
=
702 DMA_SLAVE_BUSWIDTH_4_BYTES
;
703 dd
->dma_lch_out
.dma_conf
.device_fc
= false;
708 dma_release_channel(dd
->dma_lch_in
.chan
);
710 dev_err(dd
->dev
, "no DMA channel available\n");
714 static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev
*dd
)
716 dma_release_channel(dd
->dma_lch_in
.chan
);
717 dma_release_channel(dd
->dma_lch_out
.chan
);
720 static int atmel_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
723 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
726 err
= verify_skcipher_des_key(tfm
, key
);
730 memcpy(ctx
->key
, key
, keylen
);
731 ctx
->keylen
= keylen
;
736 static int atmel_tdes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
739 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
742 err
= verify_skcipher_des3_key(tfm
, key
);
746 memcpy(ctx
->key
, key
, keylen
);
747 ctx
->keylen
= keylen
;
752 static int atmel_tdes_ecb_encrypt(struct skcipher_request
*req
)
754 return atmel_tdes_crypt(req
, TDES_FLAGS_ECB
| TDES_FLAGS_ENCRYPT
);
757 static int atmel_tdes_ecb_decrypt(struct skcipher_request
*req
)
759 return atmel_tdes_crypt(req
, TDES_FLAGS_ECB
);
762 static int atmel_tdes_cbc_encrypt(struct skcipher_request
*req
)
764 return atmel_tdes_crypt(req
, TDES_FLAGS_CBC
| TDES_FLAGS_ENCRYPT
);
767 static int atmel_tdes_cbc_decrypt(struct skcipher_request
*req
)
769 return atmel_tdes_crypt(req
, TDES_FLAGS_CBC
);
772 static int atmel_tdes_init_tfm(struct crypto_skcipher
*tfm
)
774 struct atmel_tdes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
776 ctx
->dd
= atmel_tdes_dev_alloc();
780 crypto_skcipher_set_reqsize(tfm
, sizeof(struct atmel_tdes_reqctx
));
785 static void atmel_tdes_skcipher_alg_init(struct skcipher_alg
*alg
)
787 alg
->base
.cra_priority
= ATMEL_TDES_PRIORITY
;
788 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
;
789 alg
->base
.cra_ctxsize
= sizeof(struct atmel_tdes_ctx
);
790 alg
->base
.cra_module
= THIS_MODULE
;
792 alg
->init
= atmel_tdes_init_tfm
;
795 static struct skcipher_alg tdes_algs
[] = {
797 .base
.cra_name
= "ecb(des)",
798 .base
.cra_driver_name
= "atmel-ecb-des",
799 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
800 .base
.cra_alignmask
= 0x7,
802 .min_keysize
= DES_KEY_SIZE
,
803 .max_keysize
= DES_KEY_SIZE
,
804 .setkey
= atmel_des_setkey
,
805 .encrypt
= atmel_tdes_ecb_encrypt
,
806 .decrypt
= atmel_tdes_ecb_decrypt
,
809 .base
.cra_name
= "cbc(des)",
810 .base
.cra_driver_name
= "atmel-cbc-des",
811 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
812 .base
.cra_alignmask
= 0x7,
814 .min_keysize
= DES_KEY_SIZE
,
815 .max_keysize
= DES_KEY_SIZE
,
816 .ivsize
= DES_BLOCK_SIZE
,
817 .setkey
= atmel_des_setkey
,
818 .encrypt
= atmel_tdes_cbc_encrypt
,
819 .decrypt
= atmel_tdes_cbc_decrypt
,
822 .base
.cra_name
= "ecb(des3_ede)",
823 .base
.cra_driver_name
= "atmel-ecb-tdes",
824 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
825 .base
.cra_alignmask
= 0x7,
827 .min_keysize
= DES3_EDE_KEY_SIZE
,
828 .max_keysize
= DES3_EDE_KEY_SIZE
,
829 .setkey
= atmel_tdes_setkey
,
830 .encrypt
= atmel_tdes_ecb_encrypt
,
831 .decrypt
= atmel_tdes_ecb_decrypt
,
834 .base
.cra_name
= "cbc(des3_ede)",
835 .base
.cra_driver_name
= "atmel-cbc-tdes",
836 .base
.cra_blocksize
= DES_BLOCK_SIZE
,
837 .base
.cra_alignmask
= 0x7,
839 .min_keysize
= DES3_EDE_KEY_SIZE
,
840 .max_keysize
= DES3_EDE_KEY_SIZE
,
841 .setkey
= atmel_tdes_setkey
,
842 .encrypt
= atmel_tdes_cbc_encrypt
,
843 .decrypt
= atmel_tdes_cbc_decrypt
,
844 .ivsize
= DES_BLOCK_SIZE
,
848 static void atmel_tdes_queue_task(unsigned long data
)
850 struct atmel_tdes_dev
*dd
= (struct atmel_tdes_dev
*)data
;
852 atmel_tdes_handle_queue(dd
, NULL
);
855 static void atmel_tdes_done_task(unsigned long data
)
857 struct atmel_tdes_dev
*dd
= (struct atmel_tdes_dev
*) data
;
860 if (!(dd
->flags
& TDES_FLAGS_DMA
))
861 err
= atmel_tdes_crypt_pdc_stop(dd
);
863 err
= atmel_tdes_crypt_dma_stop(dd
);
865 if (dd
->total
&& !err
) {
866 if (dd
->flags
& TDES_FLAGS_FAST
) {
867 dd
->in_sg
= sg_next(dd
->in_sg
);
868 dd
->out_sg
= sg_next(dd
->out_sg
);
869 if (!dd
->in_sg
|| !dd
->out_sg
)
873 err
= atmel_tdes_crypt_start(dd
);
875 return; /* DMA started. Not finishing. */
878 atmel_tdes_finish_req(dd
, err
);
879 atmel_tdes_handle_queue(dd
, NULL
);
882 static irqreturn_t
atmel_tdes_irq(int irq
, void *dev_id
)
884 struct atmel_tdes_dev
*tdes_dd
= dev_id
;
887 reg
= atmel_tdes_read(tdes_dd
, TDES_ISR
);
888 if (reg
& atmel_tdes_read(tdes_dd
, TDES_IMR
)) {
889 atmel_tdes_write(tdes_dd
, TDES_IDR
, reg
);
890 if (TDES_FLAGS_BUSY
& tdes_dd
->flags
)
891 tasklet_schedule(&tdes_dd
->done_task
);
893 dev_warn(tdes_dd
->dev
, "TDES interrupt when no active requests.\n");
900 static void atmel_tdes_unregister_algs(struct atmel_tdes_dev
*dd
)
904 for (i
= 0; i
< ARRAY_SIZE(tdes_algs
); i
++)
905 crypto_unregister_skcipher(&tdes_algs
[i
]);
908 static int atmel_tdes_register_algs(struct atmel_tdes_dev
*dd
)
912 for (i
= 0; i
< ARRAY_SIZE(tdes_algs
); i
++) {
913 atmel_tdes_skcipher_alg_init(&tdes_algs
[i
]);
915 err
= crypto_register_skcipher(&tdes_algs
[i
]);
923 for (j
= 0; j
< i
; j
++)
924 crypto_unregister_skcipher(&tdes_algs
[j
]);
929 static void atmel_tdes_get_cap(struct atmel_tdes_dev
*dd
)
932 dd
->caps
.has_dma
= 0;
934 /* keep only major version number */
935 switch (dd
->hw_version
& 0xf00) {
938 dd
->caps
.has_dma
= 1;
944 "Unmanaged tdes version, set minimum capabilities\n");
949 static const struct of_device_id atmel_tdes_dt_ids
[] = {
950 { .compatible
= "atmel,at91sam9g46-tdes" },
953 MODULE_DEVICE_TABLE(of
, atmel_tdes_dt_ids
);
955 static int atmel_tdes_probe(struct platform_device
*pdev
)
957 struct atmel_tdes_dev
*tdes_dd
;
958 struct device
*dev
= &pdev
->dev
;
959 struct resource
*tdes_res
;
962 tdes_dd
= devm_kmalloc(&pdev
->dev
, sizeof(*tdes_dd
), GFP_KERNEL
);
968 platform_set_drvdata(pdev
, tdes_dd
);
970 INIT_LIST_HEAD(&tdes_dd
->list
);
971 spin_lock_init(&tdes_dd
->lock
);
973 tasklet_init(&tdes_dd
->done_task
, atmel_tdes_done_task
,
974 (unsigned long)tdes_dd
);
975 tasklet_init(&tdes_dd
->queue_task
, atmel_tdes_queue_task
,
976 (unsigned long)tdes_dd
);
978 crypto_init_queue(&tdes_dd
->queue
, ATMEL_TDES_QUEUE_LENGTH
);
980 tdes_dd
->io_base
= devm_platform_get_and_ioremap_resource(pdev
, 0, &tdes_res
);
981 if (IS_ERR(tdes_dd
->io_base
)) {
982 err
= PTR_ERR(tdes_dd
->io_base
);
983 goto err_tasklet_kill
;
985 tdes_dd
->phys_base
= tdes_res
->start
;
988 tdes_dd
->irq
= platform_get_irq(pdev
, 0);
989 if (tdes_dd
->irq
< 0) {
991 goto err_tasklet_kill
;
994 err
= devm_request_irq(&pdev
->dev
, tdes_dd
->irq
, atmel_tdes_irq
,
995 IRQF_SHARED
, "atmel-tdes", tdes_dd
);
997 dev_err(dev
, "unable to request tdes irq.\n");
998 goto err_tasklet_kill
;
1001 /* Initializing the clock */
1002 tdes_dd
->iclk
= devm_clk_get(&pdev
->dev
, "tdes_clk");
1003 if (IS_ERR(tdes_dd
->iclk
)) {
1004 dev_err(dev
, "clock initialization failed.\n");
1005 err
= PTR_ERR(tdes_dd
->iclk
);
1006 goto err_tasklet_kill
;
1009 err
= atmel_tdes_hw_version_init(tdes_dd
);
1011 goto err_tasklet_kill
;
1013 atmel_tdes_get_cap(tdes_dd
);
1015 err
= atmel_tdes_buff_init(tdes_dd
);
1017 goto err_tasklet_kill
;
1019 if (tdes_dd
->caps
.has_dma
) {
1020 err
= atmel_tdes_dma_init(tdes_dd
);
1022 goto err_buff_cleanup
;
1024 dev_info(dev
, "using %s, %s for DMA transfers\n",
1025 dma_chan_name(tdes_dd
->dma_lch_in
.chan
),
1026 dma_chan_name(tdes_dd
->dma_lch_out
.chan
));
1029 spin_lock(&atmel_tdes
.lock
);
1030 list_add_tail(&tdes_dd
->list
, &atmel_tdes
.dev_list
);
1031 spin_unlock(&atmel_tdes
.lock
);
1033 err
= atmel_tdes_register_algs(tdes_dd
);
1037 dev_info(dev
, "Atmel DES/TDES\n");
1042 spin_lock(&atmel_tdes
.lock
);
1043 list_del(&tdes_dd
->list
);
1044 spin_unlock(&atmel_tdes
.lock
);
1045 if (tdes_dd
->caps
.has_dma
)
1046 atmel_tdes_dma_cleanup(tdes_dd
);
1048 atmel_tdes_buff_cleanup(tdes_dd
);
1050 tasklet_kill(&tdes_dd
->done_task
);
1051 tasklet_kill(&tdes_dd
->queue_task
);
1056 static void atmel_tdes_remove(struct platform_device
*pdev
)
1058 struct atmel_tdes_dev
*tdes_dd
= platform_get_drvdata(pdev
);
1060 spin_lock(&atmel_tdes
.lock
);
1061 list_del(&tdes_dd
->list
);
1062 spin_unlock(&atmel_tdes
.lock
);
1064 atmel_tdes_unregister_algs(tdes_dd
);
1066 tasklet_kill(&tdes_dd
->done_task
);
1067 tasklet_kill(&tdes_dd
->queue_task
);
1069 if (tdes_dd
->caps
.has_dma
)
1070 atmel_tdes_dma_cleanup(tdes_dd
);
1072 atmel_tdes_buff_cleanup(tdes_dd
);
1075 static struct platform_driver atmel_tdes_driver
= {
1076 .probe
= atmel_tdes_probe
,
1077 .remove
= atmel_tdes_remove
,
1079 .name
= "atmel_tdes",
1080 .of_match_table
= atmel_tdes_dt_ids
,
1084 module_platform_driver(atmel_tdes_driver
);
1086 MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1087 MODULE_LICENSE("GPL v2");
1088 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");