4 * Support for ATMEL SHA1/SHA256 HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-sham.c drivers.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <linux/cryptohash.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/algapi.h>
39 #include <crypto/sha.h>
40 #include <crypto/hash.h>
41 #include <crypto/internal/hash.h>
42 #include <linux/platform_data/crypto-atmel.h>
43 #include "atmel-sha-regs.h"
46 #define SHA_FLAGS_BUSY BIT(0)
47 #define SHA_FLAGS_FINAL BIT(1)
48 #define SHA_FLAGS_DMA_ACTIVE BIT(2)
49 #define SHA_FLAGS_OUTPUT_READY BIT(3)
50 #define SHA_FLAGS_INIT BIT(4)
51 #define SHA_FLAGS_CPU BIT(5)
52 #define SHA_FLAGS_DMA_READY BIT(6)
54 #define SHA_FLAGS_FINUP BIT(16)
55 #define SHA_FLAGS_SG BIT(17)
56 #define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
57 #define SHA_FLAGS_SHA1 BIT(18)
58 #define SHA_FLAGS_SHA224 BIT(19)
59 #define SHA_FLAGS_SHA256 BIT(20)
60 #define SHA_FLAGS_SHA384 BIT(21)
61 #define SHA_FLAGS_SHA512 BIT(22)
62 #define SHA_FLAGS_ERROR BIT(23)
63 #define SHA_FLAGS_PAD BIT(24)
64 #define SHA_FLAGS_RESTORE BIT(25)
66 #define SHA_OP_UPDATE 1
67 #define SHA_OP_FINAL 2
69 #define SHA_BUFFER_LEN (PAGE_SIZE / 16)
71 #define ATMEL_SHA_DMA_THRESHOLD 56
73 struct atmel_sha_caps
{
84 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
85 * tested by the ahash_prepare_alg() function.
87 struct atmel_sha_reqctx
{
88 struct atmel_sha_dev
*dd
;
92 u8 digest
[SHA512_DIGEST_SIZE
] __aligned(sizeof(u32
));
99 struct scatterlist
*sg
;
100 unsigned int offset
; /* offset in current sg */
101 unsigned int total
; /* total request */
105 u8 buffer
[SHA_BUFFER_LEN
+ SHA512_BLOCK_SIZE
] __aligned(sizeof(u32
));
108 struct atmel_sha_ctx
{
109 struct atmel_sha_dev
*dd
;
114 #define ATMEL_SHA_QUEUE_LENGTH 50
116 struct atmel_sha_dma
{
117 struct dma_chan
*chan
;
118 struct dma_slave_config dma_conf
;
121 struct atmel_sha_dev
{
122 struct list_head list
;
123 unsigned long phys_base
;
127 void __iomem
*io_base
;
131 struct tasklet_struct done_task
;
132 struct tasklet_struct queue_task
;
135 struct crypto_queue queue
;
136 struct ahash_request
*req
;
138 struct atmel_sha_dma dma_lch_in
;
140 struct atmel_sha_caps caps
;
145 struct atmel_sha_drv
{
146 struct list_head dev_list
;
150 static struct atmel_sha_drv atmel_sha
= {
151 .dev_list
= LIST_HEAD_INIT(atmel_sha
.dev_list
),
152 .lock
= __SPIN_LOCK_UNLOCKED(atmel_sha
.lock
),
155 static inline u32
atmel_sha_read(struct atmel_sha_dev
*dd
, u32 offset
)
157 return readl_relaxed(dd
->io_base
+ offset
);
160 static inline void atmel_sha_write(struct atmel_sha_dev
*dd
,
161 u32 offset
, u32 value
)
163 writel_relaxed(value
, dd
->io_base
+ offset
);
166 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx
*ctx
)
170 while ((ctx
->bufcnt
< ctx
->buflen
) && ctx
->total
) {
171 count
= min(ctx
->sg
->length
- ctx
->offset
, ctx
->total
);
172 count
= min(count
, ctx
->buflen
- ctx
->bufcnt
);
176 * Check if count <= 0 because the buffer is full or
177 * because the sg length is 0. In the latest case,
178 * check if there is another sg in the list, a 0 length
179 * sg doesn't necessarily mean the end of the sg list.
181 if ((ctx
->sg
->length
== 0) && !sg_is_last(ctx
->sg
)) {
182 ctx
->sg
= sg_next(ctx
->sg
);
189 scatterwalk_map_and_copy(ctx
->buffer
+ ctx
->bufcnt
, ctx
->sg
,
190 ctx
->offset
, count
, 0);
192 ctx
->bufcnt
+= count
;
193 ctx
->offset
+= count
;
196 if (ctx
->offset
== ctx
->sg
->length
) {
197 ctx
->sg
= sg_next(ctx
->sg
);
209 * The purpose of this padding is to ensure that the padded message is a
210 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
211 * The bit "1" is appended at the end of the message followed by
212 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
213 * 128 bits block (SHA384/SHA512) equals to the message length in bits
216 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
217 * - if message length < 56 bytes then padlen = 56 - message length
218 * - else padlen = 64 + 56 - message length
220 * For SHA384/SHA512, padlen is calculated as followed:
221 * - if message length < 112 bytes then padlen = 112 - message length
222 * - else padlen = 128 + 112 - message length
224 static void atmel_sha_fill_padding(struct atmel_sha_reqctx
*ctx
, int length
)
226 unsigned int index
, padlen
;
230 size
[0] = ctx
->digcnt
[0];
231 size
[1] = ctx
->digcnt
[1];
233 size
[0] += ctx
->bufcnt
;
234 if (size
[0] < ctx
->bufcnt
)
238 if (size
[0] < length
)
241 bits
[1] = cpu_to_be64(size
[0] << 3);
242 bits
[0] = cpu_to_be64(size
[1] << 3 | size
[0] >> 61);
244 if (ctx
->flags
& (SHA_FLAGS_SHA384
| SHA_FLAGS_SHA512
)) {
245 index
= ctx
->bufcnt
& 0x7f;
246 padlen
= (index
< 112) ? (112 - index
) : ((128+112) - index
);
247 *(ctx
->buffer
+ ctx
->bufcnt
) = 0x80;
248 memset(ctx
->buffer
+ ctx
->bufcnt
+ 1, 0, padlen
-1);
249 memcpy(ctx
->buffer
+ ctx
->bufcnt
+ padlen
, bits
, 16);
250 ctx
->bufcnt
+= padlen
+ 16;
251 ctx
->flags
|= SHA_FLAGS_PAD
;
253 index
= ctx
->bufcnt
& 0x3f;
254 padlen
= (index
< 56) ? (56 - index
) : ((64+56) - index
);
255 *(ctx
->buffer
+ ctx
->bufcnt
) = 0x80;
256 memset(ctx
->buffer
+ ctx
->bufcnt
+ 1, 0, padlen
-1);
257 memcpy(ctx
->buffer
+ ctx
->bufcnt
+ padlen
, &bits
[1], 8);
258 ctx
->bufcnt
+= padlen
+ 8;
259 ctx
->flags
|= SHA_FLAGS_PAD
;
263 static int atmel_sha_init(struct ahash_request
*req
)
265 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
266 struct atmel_sha_ctx
*tctx
= crypto_ahash_ctx(tfm
);
267 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
268 struct atmel_sha_dev
*dd
= NULL
;
269 struct atmel_sha_dev
*tmp
;
271 spin_lock_bh(&atmel_sha
.lock
);
273 list_for_each_entry(tmp
, &atmel_sha
.dev_list
, list
) {
282 spin_unlock_bh(&atmel_sha
.lock
);
288 dev_dbg(dd
->dev
, "init: digest size: %d\n",
289 crypto_ahash_digestsize(tfm
));
291 switch (crypto_ahash_digestsize(tfm
)) {
292 case SHA1_DIGEST_SIZE
:
293 ctx
->flags
|= SHA_FLAGS_SHA1
;
294 ctx
->block_size
= SHA1_BLOCK_SIZE
;
296 case SHA224_DIGEST_SIZE
:
297 ctx
->flags
|= SHA_FLAGS_SHA224
;
298 ctx
->block_size
= SHA224_BLOCK_SIZE
;
300 case SHA256_DIGEST_SIZE
:
301 ctx
->flags
|= SHA_FLAGS_SHA256
;
302 ctx
->block_size
= SHA256_BLOCK_SIZE
;
304 case SHA384_DIGEST_SIZE
:
305 ctx
->flags
|= SHA_FLAGS_SHA384
;
306 ctx
->block_size
= SHA384_BLOCK_SIZE
;
308 case SHA512_DIGEST_SIZE
:
309 ctx
->flags
|= SHA_FLAGS_SHA512
;
310 ctx
->block_size
= SHA512_BLOCK_SIZE
;
320 ctx
->buflen
= SHA_BUFFER_LEN
;
325 static void atmel_sha_write_ctrl(struct atmel_sha_dev
*dd
, int dma
)
327 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
328 u32 valmr
= SHA_MR_MODE_AUTO
;
329 unsigned int i
, hashsize
= 0;
332 if (!dd
->caps
.has_dma
)
333 atmel_sha_write(dd
, SHA_IER
, SHA_INT_TXBUFE
);
334 valmr
= SHA_MR_MODE_PDC
;
335 if (dd
->caps
.has_dualbuff
)
336 valmr
|= SHA_MR_DUALBUFF
;
338 atmel_sha_write(dd
, SHA_IER
, SHA_INT_DATARDY
);
341 switch (ctx
->flags
& SHA_FLAGS_ALGO_MASK
) {
343 valmr
|= SHA_MR_ALGO_SHA1
;
344 hashsize
= SHA1_DIGEST_SIZE
;
347 case SHA_FLAGS_SHA224
:
348 valmr
|= SHA_MR_ALGO_SHA224
;
349 hashsize
= SHA256_DIGEST_SIZE
;
352 case SHA_FLAGS_SHA256
:
353 valmr
|= SHA_MR_ALGO_SHA256
;
354 hashsize
= SHA256_DIGEST_SIZE
;
357 case SHA_FLAGS_SHA384
:
358 valmr
|= SHA_MR_ALGO_SHA384
;
359 hashsize
= SHA512_DIGEST_SIZE
;
362 case SHA_FLAGS_SHA512
:
363 valmr
|= SHA_MR_ALGO_SHA512
;
364 hashsize
= SHA512_DIGEST_SIZE
;
371 /* Setting CR_FIRST only for the first iteration */
372 if (!(ctx
->digcnt
[0] || ctx
->digcnt
[1])) {
373 atmel_sha_write(dd
, SHA_CR
, SHA_CR_FIRST
);
374 } else if (dd
->caps
.has_uihv
&& (ctx
->flags
& SHA_FLAGS_RESTORE
)) {
375 const u32
*hash
= (const u32
*)ctx
->digest
;
378 * Restore the hardware context: update the User Initialize
379 * Hash Value (UIHV) with the value saved when the latest
380 * 'update' operation completed on this very same crypto
383 ctx
->flags
&= ~SHA_FLAGS_RESTORE
;
384 atmel_sha_write(dd
, SHA_CR
, SHA_CR_WUIHV
);
385 for (i
= 0; i
< hashsize
/ sizeof(u32
); ++i
)
386 atmel_sha_write(dd
, SHA_REG_DIN(i
), hash
[i
]);
387 atmel_sha_write(dd
, SHA_CR
, SHA_CR_FIRST
);
388 valmr
|= SHA_MR_UIHV
;
391 * WARNING: If the UIHV feature is not available, the hardware CANNOT
392 * process concurrent requests: the internal registers used to store
393 * the hash/digest are still set to the partial digest output values
394 * computed during the latest round.
397 atmel_sha_write(dd
, SHA_MR
, valmr
);
400 static int atmel_sha_xmit_cpu(struct atmel_sha_dev
*dd
, const u8
*buf
,
401 size_t length
, int final
)
403 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
405 const u32
*buffer
= (const u32
*)buf
;
407 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
408 ctx
->digcnt
[1], ctx
->digcnt
[0], length
, final
);
410 atmel_sha_write_ctrl(dd
, 0);
412 /* should be non-zero before next lines to disable clocks later */
413 ctx
->digcnt
[0] += length
;
414 if (ctx
->digcnt
[0] < length
)
418 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
420 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
422 dd
->flags
|= SHA_FLAGS_CPU
;
424 for (count
= 0; count
< len32
; count
++)
425 atmel_sha_write(dd
, SHA_REG_DIN(count
), buffer
[count
]);
430 static int atmel_sha_xmit_pdc(struct atmel_sha_dev
*dd
, dma_addr_t dma_addr1
,
431 size_t length1
, dma_addr_t dma_addr2
, size_t length2
, int final
)
433 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
436 dev_dbg(dd
->dev
, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
437 ctx
->digcnt
[1], ctx
->digcnt
[0], length1
, final
);
439 len32
= DIV_ROUND_UP(length1
, sizeof(u32
));
440 atmel_sha_write(dd
, SHA_PTCR
, SHA_PTCR_TXTDIS
);
441 atmel_sha_write(dd
, SHA_TPR
, dma_addr1
);
442 atmel_sha_write(dd
, SHA_TCR
, len32
);
444 len32
= DIV_ROUND_UP(length2
, sizeof(u32
));
445 atmel_sha_write(dd
, SHA_TNPR
, dma_addr2
);
446 atmel_sha_write(dd
, SHA_TNCR
, len32
);
448 atmel_sha_write_ctrl(dd
, 1);
450 /* should be non-zero before next lines to disable clocks later */
451 ctx
->digcnt
[0] += length1
;
452 if (ctx
->digcnt
[0] < length1
)
456 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
458 dd
->flags
|= SHA_FLAGS_DMA_ACTIVE
;
460 /* Start DMA transfer */
461 atmel_sha_write(dd
, SHA_PTCR
, SHA_PTCR_TXTEN
);
466 static void atmel_sha_dma_callback(void *data
)
468 struct atmel_sha_dev
*dd
= data
;
470 /* dma_lch_in - completed - wait DATRDY */
471 atmel_sha_write(dd
, SHA_IER
, SHA_INT_DATARDY
);
474 static int atmel_sha_xmit_dma(struct atmel_sha_dev
*dd
, dma_addr_t dma_addr1
,
475 size_t length1
, dma_addr_t dma_addr2
, size_t length2
, int final
)
477 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
478 struct dma_async_tx_descriptor
*in_desc
;
479 struct scatterlist sg
[2];
481 dev_dbg(dd
->dev
, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
482 ctx
->digcnt
[1], ctx
->digcnt
[0], length1
, final
);
484 dd
->dma_lch_in
.dma_conf
.src_maxburst
= 16;
485 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= 16;
487 dmaengine_slave_config(dd
->dma_lch_in
.chan
, &dd
->dma_lch_in
.dma_conf
);
490 sg_init_table(sg
, 2);
491 sg_dma_address(&sg
[0]) = dma_addr1
;
492 sg_dma_len(&sg
[0]) = length1
;
493 sg_dma_address(&sg
[1]) = dma_addr2
;
494 sg_dma_len(&sg
[1]) = length2
;
495 in_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_in
.chan
, sg
, 2,
496 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
498 sg_init_table(sg
, 1);
499 sg_dma_address(&sg
[0]) = dma_addr1
;
500 sg_dma_len(&sg
[0]) = length1
;
501 in_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_in
.chan
, sg
, 1,
502 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
507 in_desc
->callback
= atmel_sha_dma_callback
;
508 in_desc
->callback_param
= dd
;
510 atmel_sha_write_ctrl(dd
, 1);
512 /* should be non-zero before next lines to disable clocks later */
513 ctx
->digcnt
[0] += length1
;
514 if (ctx
->digcnt
[0] < length1
)
518 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
520 dd
->flags
|= SHA_FLAGS_DMA_ACTIVE
;
522 /* Start DMA transfer */
523 dmaengine_submit(in_desc
);
524 dma_async_issue_pending(dd
->dma_lch_in
.chan
);
529 static int atmel_sha_xmit_start(struct atmel_sha_dev
*dd
, dma_addr_t dma_addr1
,
530 size_t length1
, dma_addr_t dma_addr2
, size_t length2
, int final
)
532 if (dd
->caps
.has_dma
)
533 return atmel_sha_xmit_dma(dd
, dma_addr1
, length1
,
534 dma_addr2
, length2
, final
);
536 return atmel_sha_xmit_pdc(dd
, dma_addr1
, length1
,
537 dma_addr2
, length2
, final
);
540 static int atmel_sha_update_cpu(struct atmel_sha_dev
*dd
)
542 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
545 atmel_sha_append_sg(ctx
);
546 atmel_sha_fill_padding(ctx
, 0);
547 bufcnt
= ctx
->bufcnt
;
550 return atmel_sha_xmit_cpu(dd
, ctx
->buffer
, bufcnt
, 1);
553 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev
*dd
,
554 struct atmel_sha_reqctx
*ctx
,
555 size_t length
, int final
)
557 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
,
558 ctx
->buflen
+ ctx
->block_size
, DMA_TO_DEVICE
);
559 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
560 dev_err(dd
->dev
, "dma %u bytes error\n", ctx
->buflen
+
565 ctx
->flags
&= ~SHA_FLAGS_SG
;
567 /* next call does not fail... so no unmap in the case of error */
568 return atmel_sha_xmit_start(dd
, ctx
->dma_addr
, length
, 0, 0, final
);
571 static int atmel_sha_update_dma_slow(struct atmel_sha_dev
*dd
)
573 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
577 atmel_sha_append_sg(ctx
);
579 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
581 dev_dbg(dd
->dev
, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
582 ctx
->bufcnt
, ctx
->digcnt
[1], ctx
->digcnt
[0], final
);
585 atmel_sha_fill_padding(ctx
, 0);
587 if (final
|| (ctx
->bufcnt
== ctx
->buflen
)) {
590 return atmel_sha_xmit_dma_map(dd
, ctx
, count
, final
);
596 static int atmel_sha_update_dma_start(struct atmel_sha_dev
*dd
)
598 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
599 unsigned int length
, final
, tail
;
600 struct scatterlist
*sg
;
606 if (ctx
->bufcnt
|| ctx
->offset
)
607 return atmel_sha_update_dma_slow(dd
);
609 dev_dbg(dd
->dev
, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
610 ctx
->digcnt
[1], ctx
->digcnt
[0], ctx
->bufcnt
, ctx
->total
);
614 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
615 return atmel_sha_update_dma_slow(dd
);
617 if (!sg_is_last(sg
) && !IS_ALIGNED(sg
->length
, ctx
->block_size
))
618 /* size is not ctx->block_size aligned */
619 return atmel_sha_update_dma_slow(dd
);
621 length
= min(ctx
->total
, sg
->length
);
623 if (sg_is_last(sg
)) {
624 if (!(ctx
->flags
& SHA_FLAGS_FINUP
)) {
625 /* not last sg must be ctx->block_size aligned */
626 tail
= length
& (ctx
->block_size
- 1);
631 ctx
->total
-= length
;
632 ctx
->offset
= length
; /* offset where to start slow */
634 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
638 tail
= length
& (ctx
->block_size
- 1);
641 ctx
->offset
= length
; /* offset where to start slow */
644 atmel_sha_append_sg(ctx
);
646 atmel_sha_fill_padding(ctx
, length
);
648 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
,
649 ctx
->buflen
+ ctx
->block_size
, DMA_TO_DEVICE
);
650 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
651 dev_err(dd
->dev
, "dma %u bytes error\n",
652 ctx
->buflen
+ ctx
->block_size
);
657 ctx
->flags
&= ~SHA_FLAGS_SG
;
660 return atmel_sha_xmit_start(dd
, ctx
->dma_addr
, count
, 0,
664 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1,
666 dev_err(dd
->dev
, "dma_map_sg error\n");
670 ctx
->flags
|= SHA_FLAGS_SG
;
674 return atmel_sha_xmit_start(dd
, sg_dma_address(ctx
->sg
),
675 length
, ctx
->dma_addr
, count
, final
);
679 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
680 dev_err(dd
->dev
, "dma_map_sg error\n");
684 ctx
->flags
|= SHA_FLAGS_SG
;
686 /* next call does not fail... so no unmap in the case of error */
687 return atmel_sha_xmit_start(dd
, sg_dma_address(ctx
->sg
), length
, 0,
691 static int atmel_sha_update_dma_stop(struct atmel_sha_dev
*dd
)
693 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
695 if (ctx
->flags
& SHA_FLAGS_SG
) {
696 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
697 if (ctx
->sg
->length
== ctx
->offset
) {
698 ctx
->sg
= sg_next(ctx
->sg
);
702 if (ctx
->flags
& SHA_FLAGS_PAD
) {
703 dma_unmap_single(dd
->dev
, ctx
->dma_addr
,
704 ctx
->buflen
+ ctx
->block_size
, DMA_TO_DEVICE
);
707 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
+
708 ctx
->block_size
, DMA_TO_DEVICE
);
714 static int atmel_sha_update_req(struct atmel_sha_dev
*dd
)
716 struct ahash_request
*req
= dd
->req
;
717 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
720 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
721 ctx
->total
, ctx
->digcnt
[1], ctx
->digcnt
[0]);
723 if (ctx
->flags
& SHA_FLAGS_CPU
)
724 err
= atmel_sha_update_cpu(dd
);
726 err
= atmel_sha_update_dma_start(dd
);
728 /* wait for dma completion before can take more data */
729 dev_dbg(dd
->dev
, "update: err: %d, digcnt: 0x%llx 0%llx\n",
730 err
, ctx
->digcnt
[1], ctx
->digcnt
[0]);
735 static int atmel_sha_final_req(struct atmel_sha_dev
*dd
)
737 struct ahash_request
*req
= dd
->req
;
738 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
742 if (ctx
->bufcnt
>= ATMEL_SHA_DMA_THRESHOLD
) {
743 atmel_sha_fill_padding(ctx
, 0);
746 err
= atmel_sha_xmit_dma_map(dd
, ctx
, count
, 1);
748 /* faster to handle last block with cpu */
750 atmel_sha_fill_padding(ctx
, 0);
753 err
= atmel_sha_xmit_cpu(dd
, ctx
->buffer
, count
, 1);
756 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
761 static void atmel_sha_copy_hash(struct ahash_request
*req
)
763 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
764 u32
*hash
= (u32
*)ctx
->digest
;
765 unsigned int i
, hashsize
;
767 switch (ctx
->flags
& SHA_FLAGS_ALGO_MASK
) {
769 hashsize
= SHA1_DIGEST_SIZE
;
772 case SHA_FLAGS_SHA224
:
773 case SHA_FLAGS_SHA256
:
774 hashsize
= SHA256_DIGEST_SIZE
;
777 case SHA_FLAGS_SHA384
:
778 case SHA_FLAGS_SHA512
:
779 hashsize
= SHA512_DIGEST_SIZE
;
783 /* Should not happen... */
787 for (i
= 0; i
< hashsize
/ sizeof(u32
); ++i
)
788 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
789 ctx
->flags
|= SHA_FLAGS_RESTORE
;
792 static void atmel_sha_copy_ready_hash(struct ahash_request
*req
)
794 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
799 if (ctx
->flags
& SHA_FLAGS_SHA1
)
800 memcpy(req
->result
, ctx
->digest
, SHA1_DIGEST_SIZE
);
801 else if (ctx
->flags
& SHA_FLAGS_SHA224
)
802 memcpy(req
->result
, ctx
->digest
, SHA224_DIGEST_SIZE
);
803 else if (ctx
->flags
& SHA_FLAGS_SHA256
)
804 memcpy(req
->result
, ctx
->digest
, SHA256_DIGEST_SIZE
);
805 else if (ctx
->flags
& SHA_FLAGS_SHA384
)
806 memcpy(req
->result
, ctx
->digest
, SHA384_DIGEST_SIZE
);
808 memcpy(req
->result
, ctx
->digest
, SHA512_DIGEST_SIZE
);
811 static int atmel_sha_finish(struct ahash_request
*req
)
813 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
814 struct atmel_sha_dev
*dd
= ctx
->dd
;
816 if (ctx
->digcnt
[0] || ctx
->digcnt
[1])
817 atmel_sha_copy_ready_hash(req
);
819 dev_dbg(dd
->dev
, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx
->digcnt
[1],
820 ctx
->digcnt
[0], ctx
->bufcnt
);
825 static void atmel_sha_finish_req(struct ahash_request
*req
, int err
)
827 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
828 struct atmel_sha_dev
*dd
= ctx
->dd
;
831 atmel_sha_copy_hash(req
);
832 if (SHA_FLAGS_FINAL
& dd
->flags
)
833 err
= atmel_sha_finish(req
);
835 ctx
->flags
|= SHA_FLAGS_ERROR
;
838 /* atomic operation is not needed here */
839 dd
->flags
&= ~(SHA_FLAGS_BUSY
| SHA_FLAGS_FINAL
| SHA_FLAGS_CPU
|
840 SHA_FLAGS_DMA_READY
| SHA_FLAGS_OUTPUT_READY
);
842 clk_disable(dd
->iclk
);
844 if (req
->base
.complete
)
845 req
->base
.complete(&req
->base
, err
);
847 /* handle new request */
848 tasklet_schedule(&dd
->queue_task
);
851 static int atmel_sha_hw_init(struct atmel_sha_dev
*dd
)
855 err
= clk_enable(dd
->iclk
);
859 if (!(SHA_FLAGS_INIT
& dd
->flags
)) {
860 atmel_sha_write(dd
, SHA_CR
, SHA_CR_SWRST
);
861 dd
->flags
|= SHA_FLAGS_INIT
;
868 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev
*dd
)
870 return atmel_sha_read(dd
, SHA_HW_VERSION
) & 0x00000fff;
873 static void atmel_sha_hw_version_init(struct atmel_sha_dev
*dd
)
875 atmel_sha_hw_init(dd
);
877 dd
->hw_version
= atmel_sha_get_version(dd
);
880 "version: 0x%x\n", dd
->hw_version
);
882 clk_disable(dd
->iclk
);
885 static int atmel_sha_handle_queue(struct atmel_sha_dev
*dd
,
886 struct ahash_request
*req
)
888 struct crypto_async_request
*async_req
, *backlog
;
889 struct atmel_sha_reqctx
*ctx
;
891 int err
= 0, ret
= 0;
893 spin_lock_irqsave(&dd
->lock
, flags
);
895 ret
= ahash_enqueue_request(&dd
->queue
, req
);
897 if (SHA_FLAGS_BUSY
& dd
->flags
) {
898 spin_unlock_irqrestore(&dd
->lock
, flags
);
902 backlog
= crypto_get_backlog(&dd
->queue
);
903 async_req
= crypto_dequeue_request(&dd
->queue
);
905 dd
->flags
|= SHA_FLAGS_BUSY
;
907 spin_unlock_irqrestore(&dd
->lock
, flags
);
913 backlog
->complete(backlog
, -EINPROGRESS
);
915 req
= ahash_request_cast(async_req
);
917 ctx
= ahash_request_ctx(req
);
919 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
920 ctx
->op
, req
->nbytes
);
922 err
= atmel_sha_hw_init(dd
);
927 if (ctx
->op
== SHA_OP_UPDATE
) {
928 err
= atmel_sha_update_req(dd
);
929 if (err
!= -EINPROGRESS
&& (ctx
->flags
& SHA_FLAGS_FINUP
))
930 /* no final() after finup() */
931 err
= atmel_sha_final_req(dd
);
932 } else if (ctx
->op
== SHA_OP_FINAL
) {
933 err
= atmel_sha_final_req(dd
);
937 if (err
!= -EINPROGRESS
)
938 /* done_task will not finish it, so do it here */
939 atmel_sha_finish_req(req
, err
);
941 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
946 static int atmel_sha_enqueue(struct ahash_request
*req
, unsigned int op
)
948 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
949 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
950 struct atmel_sha_dev
*dd
= tctx
->dd
;
954 return atmel_sha_handle_queue(dd
, req
);
957 static int atmel_sha_update(struct ahash_request
*req
)
959 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
964 ctx
->total
= req
->nbytes
;
968 if (ctx
->flags
& SHA_FLAGS_FINUP
) {
969 if (ctx
->bufcnt
+ ctx
->total
< ATMEL_SHA_DMA_THRESHOLD
)
970 /* faster to use CPU for short transfers */
971 ctx
->flags
|= SHA_FLAGS_CPU
;
972 } else if (ctx
->bufcnt
+ ctx
->total
< ctx
->buflen
) {
973 atmel_sha_append_sg(ctx
);
976 return atmel_sha_enqueue(req
, SHA_OP_UPDATE
);
979 static int atmel_sha_final(struct ahash_request
*req
)
981 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
983 ctx
->flags
|= SHA_FLAGS_FINUP
;
985 if (ctx
->flags
& SHA_FLAGS_ERROR
)
986 return 0; /* uncompleted hash is not needed */
988 if (ctx
->flags
& SHA_FLAGS_PAD
)
989 /* copy ready hash (+ finalize hmac) */
990 return atmel_sha_finish(req
);
992 return atmel_sha_enqueue(req
, SHA_OP_FINAL
);
995 static int atmel_sha_finup(struct ahash_request
*req
)
997 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
1000 ctx
->flags
|= SHA_FLAGS_FINUP
;
1002 err1
= atmel_sha_update(req
);
1003 if (err1
== -EINPROGRESS
|| err1
== -EBUSY
)
1007 * final() has to be always called to cleanup resources
1008 * even if udpate() failed, except EINPROGRESS
1010 err2
= atmel_sha_final(req
);
1012 return err1
?: err2
;
1015 static int atmel_sha_digest(struct ahash_request
*req
)
1017 return atmel_sha_init(req
) ?: atmel_sha_finup(req
);
1021 static int atmel_sha_export(struct ahash_request
*req
, void *out
)
1023 const struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
1025 memcpy(out
, ctx
, sizeof(*ctx
));
1029 static int atmel_sha_import(struct ahash_request
*req
, const void *in
)
1031 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
1033 memcpy(ctx
, in
, sizeof(*ctx
));
1037 static int atmel_sha_cra_init(struct crypto_tfm
*tfm
)
1039 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1040 sizeof(struct atmel_sha_reqctx
));
1045 static struct ahash_alg sha_1_256_algs
[] = {
1047 .init
= atmel_sha_init
,
1048 .update
= atmel_sha_update
,
1049 .final
= atmel_sha_final
,
1050 .finup
= atmel_sha_finup
,
1051 .digest
= atmel_sha_digest
,
1052 .export
= atmel_sha_export
,
1053 .import
= atmel_sha_import
,
1055 .digestsize
= SHA1_DIGEST_SIZE
,
1056 .statesize
= sizeof(struct atmel_sha_reqctx
),
1059 .cra_driver_name
= "atmel-sha1",
1060 .cra_priority
= 100,
1061 .cra_flags
= CRYPTO_ALG_ASYNC
,
1062 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1063 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1065 .cra_module
= THIS_MODULE
,
1066 .cra_init
= atmel_sha_cra_init
,
1071 .init
= atmel_sha_init
,
1072 .update
= atmel_sha_update
,
1073 .final
= atmel_sha_final
,
1074 .finup
= atmel_sha_finup
,
1075 .digest
= atmel_sha_digest
,
1076 .export
= atmel_sha_export
,
1077 .import
= atmel_sha_import
,
1079 .digestsize
= SHA256_DIGEST_SIZE
,
1080 .statesize
= sizeof(struct atmel_sha_reqctx
),
1082 .cra_name
= "sha256",
1083 .cra_driver_name
= "atmel-sha256",
1084 .cra_priority
= 100,
1085 .cra_flags
= CRYPTO_ALG_ASYNC
,
1086 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1087 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1089 .cra_module
= THIS_MODULE
,
1090 .cra_init
= atmel_sha_cra_init
,
1096 static struct ahash_alg sha_224_alg
= {
1097 .init
= atmel_sha_init
,
1098 .update
= atmel_sha_update
,
1099 .final
= atmel_sha_final
,
1100 .finup
= atmel_sha_finup
,
1101 .digest
= atmel_sha_digest
,
1102 .export
= atmel_sha_export
,
1103 .import
= atmel_sha_import
,
1105 .digestsize
= SHA224_DIGEST_SIZE
,
1106 .statesize
= sizeof(struct atmel_sha_reqctx
),
1108 .cra_name
= "sha224",
1109 .cra_driver_name
= "atmel-sha224",
1110 .cra_priority
= 100,
1111 .cra_flags
= CRYPTO_ALG_ASYNC
,
1112 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1113 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1115 .cra_module
= THIS_MODULE
,
1116 .cra_init
= atmel_sha_cra_init
,
1121 static struct ahash_alg sha_384_512_algs
[] = {
1123 .init
= atmel_sha_init
,
1124 .update
= atmel_sha_update
,
1125 .final
= atmel_sha_final
,
1126 .finup
= atmel_sha_finup
,
1127 .digest
= atmel_sha_digest
,
1128 .export
= atmel_sha_export
,
1129 .import
= atmel_sha_import
,
1131 .digestsize
= SHA384_DIGEST_SIZE
,
1132 .statesize
= sizeof(struct atmel_sha_reqctx
),
1134 .cra_name
= "sha384",
1135 .cra_driver_name
= "atmel-sha384",
1136 .cra_priority
= 100,
1137 .cra_flags
= CRYPTO_ALG_ASYNC
,
1138 .cra_blocksize
= SHA384_BLOCK_SIZE
,
1139 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1140 .cra_alignmask
= 0x3,
1141 .cra_module
= THIS_MODULE
,
1142 .cra_init
= atmel_sha_cra_init
,
1147 .init
= atmel_sha_init
,
1148 .update
= atmel_sha_update
,
1149 .final
= atmel_sha_final
,
1150 .finup
= atmel_sha_finup
,
1151 .digest
= atmel_sha_digest
,
1152 .export
= atmel_sha_export
,
1153 .import
= atmel_sha_import
,
1155 .digestsize
= SHA512_DIGEST_SIZE
,
1156 .statesize
= sizeof(struct atmel_sha_reqctx
),
1158 .cra_name
= "sha512",
1159 .cra_driver_name
= "atmel-sha512",
1160 .cra_priority
= 100,
1161 .cra_flags
= CRYPTO_ALG_ASYNC
,
1162 .cra_blocksize
= SHA512_BLOCK_SIZE
,
1163 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1164 .cra_alignmask
= 0x3,
1165 .cra_module
= THIS_MODULE
,
1166 .cra_init
= atmel_sha_cra_init
,
1172 static void atmel_sha_queue_task(unsigned long data
)
1174 struct atmel_sha_dev
*dd
= (struct atmel_sha_dev
*)data
;
1176 atmel_sha_handle_queue(dd
, NULL
);
1179 static void atmel_sha_done_task(unsigned long data
)
1181 struct atmel_sha_dev
*dd
= (struct atmel_sha_dev
*)data
;
1184 if (SHA_FLAGS_CPU
& dd
->flags
) {
1185 if (SHA_FLAGS_OUTPUT_READY
& dd
->flags
) {
1186 dd
->flags
&= ~SHA_FLAGS_OUTPUT_READY
;
1189 } else if (SHA_FLAGS_DMA_READY
& dd
->flags
) {
1190 if (SHA_FLAGS_DMA_ACTIVE
& dd
->flags
) {
1191 dd
->flags
&= ~SHA_FLAGS_DMA_ACTIVE
;
1192 atmel_sha_update_dma_stop(dd
);
1198 if (SHA_FLAGS_OUTPUT_READY
& dd
->flags
) {
1199 /* hash or semi-hash ready */
1200 dd
->flags
&= ~(SHA_FLAGS_DMA_READY
|
1201 SHA_FLAGS_OUTPUT_READY
);
1202 err
= atmel_sha_update_dma_start(dd
);
1203 if (err
!= -EINPROGRESS
)
1210 /* finish curent request */
1211 atmel_sha_finish_req(dd
->req
, err
);
1214 static irqreturn_t
atmel_sha_irq(int irq
, void *dev_id
)
1216 struct atmel_sha_dev
*sha_dd
= dev_id
;
1219 reg
= atmel_sha_read(sha_dd
, SHA_ISR
);
1220 if (reg
& atmel_sha_read(sha_dd
, SHA_IMR
)) {
1221 atmel_sha_write(sha_dd
, SHA_IDR
, reg
);
1222 if (SHA_FLAGS_BUSY
& sha_dd
->flags
) {
1223 sha_dd
->flags
|= SHA_FLAGS_OUTPUT_READY
;
1224 if (!(SHA_FLAGS_CPU
& sha_dd
->flags
))
1225 sha_dd
->flags
|= SHA_FLAGS_DMA_READY
;
1226 tasklet_schedule(&sha_dd
->done_task
);
1228 dev_warn(sha_dd
->dev
, "SHA interrupt when no active requests.\n");
1236 static void atmel_sha_unregister_algs(struct atmel_sha_dev
*dd
)
1240 for (i
= 0; i
< ARRAY_SIZE(sha_1_256_algs
); i
++)
1241 crypto_unregister_ahash(&sha_1_256_algs
[i
]);
1243 if (dd
->caps
.has_sha224
)
1244 crypto_unregister_ahash(&sha_224_alg
);
1246 if (dd
->caps
.has_sha_384_512
) {
1247 for (i
= 0; i
< ARRAY_SIZE(sha_384_512_algs
); i
++)
1248 crypto_unregister_ahash(&sha_384_512_algs
[i
]);
1252 static int atmel_sha_register_algs(struct atmel_sha_dev
*dd
)
1256 for (i
= 0; i
< ARRAY_SIZE(sha_1_256_algs
); i
++) {
1257 err
= crypto_register_ahash(&sha_1_256_algs
[i
]);
1259 goto err_sha_1_256_algs
;
1262 if (dd
->caps
.has_sha224
) {
1263 err
= crypto_register_ahash(&sha_224_alg
);
1265 goto err_sha_224_algs
;
1268 if (dd
->caps
.has_sha_384_512
) {
1269 for (i
= 0; i
< ARRAY_SIZE(sha_384_512_algs
); i
++) {
1270 err
= crypto_register_ahash(&sha_384_512_algs
[i
]);
1272 goto err_sha_384_512_algs
;
1278 err_sha_384_512_algs
:
1279 for (j
= 0; j
< i
; j
++)
1280 crypto_unregister_ahash(&sha_384_512_algs
[j
]);
1281 crypto_unregister_ahash(&sha_224_alg
);
1283 i
= ARRAY_SIZE(sha_1_256_algs
);
1285 for (j
= 0; j
< i
; j
++)
1286 crypto_unregister_ahash(&sha_1_256_algs
[j
]);
1291 static bool atmel_sha_filter(struct dma_chan
*chan
, void *slave
)
1293 struct at_dma_slave
*sl
= slave
;
1295 if (sl
&& sl
->dma_dev
== chan
->device
->dev
) {
1303 static int atmel_sha_dma_init(struct atmel_sha_dev
*dd
,
1304 struct crypto_platform_data
*pdata
)
1307 dma_cap_mask_t mask_in
;
1309 /* Try to grab DMA channel */
1310 dma_cap_zero(mask_in
);
1311 dma_cap_set(DMA_SLAVE
, mask_in
);
1313 dd
->dma_lch_in
.chan
= dma_request_slave_channel_compat(mask_in
,
1314 atmel_sha_filter
, &pdata
->dma_slave
->rxdata
, dd
->dev
, "tx");
1315 if (!dd
->dma_lch_in
.chan
) {
1316 dev_warn(dd
->dev
, "no DMA channel available\n");
1320 dd
->dma_lch_in
.dma_conf
.direction
= DMA_MEM_TO_DEV
;
1321 dd
->dma_lch_in
.dma_conf
.dst_addr
= dd
->phys_base
+
1323 dd
->dma_lch_in
.dma_conf
.src_maxburst
= 1;
1324 dd
->dma_lch_in
.dma_conf
.src_addr_width
=
1325 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1326 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= 1;
1327 dd
->dma_lch_in
.dma_conf
.dst_addr_width
=
1328 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1329 dd
->dma_lch_in
.dma_conf
.device_fc
= false;
1334 static void atmel_sha_dma_cleanup(struct atmel_sha_dev
*dd
)
1336 dma_release_channel(dd
->dma_lch_in
.chan
);
1339 static void atmel_sha_get_cap(struct atmel_sha_dev
*dd
)
1342 dd
->caps
.has_dma
= 0;
1343 dd
->caps
.has_dualbuff
= 0;
1344 dd
->caps
.has_sha224
= 0;
1345 dd
->caps
.has_sha_384_512
= 0;
1346 dd
->caps
.has_uihv
= 0;
1348 /* keep only major version number */
1349 switch (dd
->hw_version
& 0xff0) {
1351 dd
->caps
.has_dma
= 1;
1352 dd
->caps
.has_dualbuff
= 1;
1353 dd
->caps
.has_sha224
= 1;
1354 dd
->caps
.has_sha_384_512
= 1;
1355 dd
->caps
.has_uihv
= 1;
1358 dd
->caps
.has_dma
= 1;
1359 dd
->caps
.has_dualbuff
= 1;
1360 dd
->caps
.has_sha224
= 1;
1361 dd
->caps
.has_sha_384_512
= 1;
1362 dd
->caps
.has_uihv
= 1;
1365 dd
->caps
.has_dma
= 1;
1366 dd
->caps
.has_dualbuff
= 1;
1367 dd
->caps
.has_sha224
= 1;
1368 dd
->caps
.has_sha_384_512
= 1;
1371 dd
->caps
.has_dma
= 1;
1372 dd
->caps
.has_dualbuff
= 1;
1373 dd
->caps
.has_sha224
= 1;
1379 "Unmanaged sha version, set minimum capabilities\n");
1384 #if defined(CONFIG_OF)
1385 static const struct of_device_id atmel_sha_dt_ids
[] = {
1386 { .compatible
= "atmel,at91sam9g46-sha" },
1390 MODULE_DEVICE_TABLE(of
, atmel_sha_dt_ids
);
1392 static struct crypto_platform_data
*atmel_sha_of_init(struct platform_device
*pdev
)
1394 struct device_node
*np
= pdev
->dev
.of_node
;
1395 struct crypto_platform_data
*pdata
;
1398 dev_err(&pdev
->dev
, "device node not found\n");
1399 return ERR_PTR(-EINVAL
);
1402 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1404 dev_err(&pdev
->dev
, "could not allocate memory for pdata\n");
1405 return ERR_PTR(-ENOMEM
);
1408 pdata
->dma_slave
= devm_kzalloc(&pdev
->dev
,
1409 sizeof(*(pdata
->dma_slave
)),
1411 if (!pdata
->dma_slave
) {
1412 dev_err(&pdev
->dev
, "could not allocate memory for dma_slave\n");
1413 return ERR_PTR(-ENOMEM
);
1418 #else /* CONFIG_OF */
1419 static inline struct crypto_platform_data
*atmel_sha_of_init(struct platform_device
*dev
)
1421 return ERR_PTR(-EINVAL
);
1425 static int atmel_sha_probe(struct platform_device
*pdev
)
1427 struct atmel_sha_dev
*sha_dd
;
1428 struct crypto_platform_data
*pdata
;
1429 struct device
*dev
= &pdev
->dev
;
1430 struct resource
*sha_res
;
1433 sha_dd
= devm_kzalloc(&pdev
->dev
, sizeof(*sha_dd
), GFP_KERNEL
);
1434 if (sha_dd
== NULL
) {
1435 dev_err(dev
, "unable to alloc data struct.\n");
1442 platform_set_drvdata(pdev
, sha_dd
);
1444 INIT_LIST_HEAD(&sha_dd
->list
);
1445 spin_lock_init(&sha_dd
->lock
);
1447 tasklet_init(&sha_dd
->done_task
, atmel_sha_done_task
,
1448 (unsigned long)sha_dd
);
1449 tasklet_init(&sha_dd
->queue_task
, atmel_sha_queue_task
,
1450 (unsigned long)sha_dd
);
1452 crypto_init_queue(&sha_dd
->queue
, ATMEL_SHA_QUEUE_LENGTH
);
1456 /* Get the base address */
1457 sha_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1459 dev_err(dev
, "no MEM resource info\n");
1463 sha_dd
->phys_base
= sha_res
->start
;
1466 sha_dd
->irq
= platform_get_irq(pdev
, 0);
1467 if (sha_dd
->irq
< 0) {
1468 dev_err(dev
, "no IRQ resource info\n");
1473 err
= devm_request_irq(&pdev
->dev
, sha_dd
->irq
, atmel_sha_irq
,
1474 IRQF_SHARED
, "atmel-sha", sha_dd
);
1476 dev_err(dev
, "unable to request sha irq.\n");
1480 /* Initializing the clock */
1481 sha_dd
->iclk
= devm_clk_get(&pdev
->dev
, "sha_clk");
1482 if (IS_ERR(sha_dd
->iclk
)) {
1483 dev_err(dev
, "clock initialization failed.\n");
1484 err
= PTR_ERR(sha_dd
->iclk
);
1488 sha_dd
->io_base
= devm_ioremap_resource(&pdev
->dev
, sha_res
);
1489 if (IS_ERR(sha_dd
->io_base
)) {
1490 dev_err(dev
, "can't ioremap\n");
1491 err
= PTR_ERR(sha_dd
->io_base
);
1495 err
= clk_prepare(sha_dd
->iclk
);
1499 atmel_sha_hw_version_init(sha_dd
);
1501 atmel_sha_get_cap(sha_dd
);
1503 if (sha_dd
->caps
.has_dma
) {
1504 pdata
= pdev
->dev
.platform_data
;
1506 pdata
= atmel_sha_of_init(pdev
);
1507 if (IS_ERR(pdata
)) {
1508 dev_err(&pdev
->dev
, "platform data not available\n");
1509 err
= PTR_ERR(pdata
);
1510 goto iclk_unprepare
;
1513 if (!pdata
->dma_slave
) {
1515 goto iclk_unprepare
;
1517 err
= atmel_sha_dma_init(sha_dd
, pdata
);
1521 dev_info(dev
, "using %s for DMA transfers\n",
1522 dma_chan_name(sha_dd
->dma_lch_in
.chan
));
1525 spin_lock(&atmel_sha
.lock
);
1526 list_add_tail(&sha_dd
->list
, &atmel_sha
.dev_list
);
1527 spin_unlock(&atmel_sha
.lock
);
1529 err
= atmel_sha_register_algs(sha_dd
);
1533 dev_info(dev
, "Atmel SHA1/SHA256%s%s\n",
1534 sha_dd
->caps
.has_sha224
? "/SHA224" : "",
1535 sha_dd
->caps
.has_sha_384_512
? "/SHA384/SHA512" : "");
1540 spin_lock(&atmel_sha
.lock
);
1541 list_del(&sha_dd
->list
);
1542 spin_unlock(&atmel_sha
.lock
);
1543 if (sha_dd
->caps
.has_dma
)
1544 atmel_sha_dma_cleanup(sha_dd
);
1547 clk_unprepare(sha_dd
->iclk
);
1549 tasklet_kill(&sha_dd
->queue_task
);
1550 tasklet_kill(&sha_dd
->done_task
);
1552 dev_err(dev
, "initialization failed.\n");
1557 static int atmel_sha_remove(struct platform_device
*pdev
)
1559 static struct atmel_sha_dev
*sha_dd
;
1561 sha_dd
= platform_get_drvdata(pdev
);
1564 spin_lock(&atmel_sha
.lock
);
1565 list_del(&sha_dd
->list
);
1566 spin_unlock(&atmel_sha
.lock
);
1568 atmel_sha_unregister_algs(sha_dd
);
1570 tasklet_kill(&sha_dd
->queue_task
);
1571 tasklet_kill(&sha_dd
->done_task
);
1573 if (sha_dd
->caps
.has_dma
)
1574 atmel_sha_dma_cleanup(sha_dd
);
1576 clk_unprepare(sha_dd
->iclk
);
1581 static struct platform_driver atmel_sha_driver
= {
1582 .probe
= atmel_sha_probe
,
1583 .remove
= atmel_sha_remove
,
1585 .name
= "atmel_sha",
1586 .of_match_table
= of_match_ptr(atmel_sha_dt_ids
),
1590 module_platform_driver(atmel_sha_driver
);
1592 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
1593 MODULE_LICENSE("GPL v2");
1594 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");