4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from old omap-sha1-md5.c driver.
16 #define pr_fmt(fmt) "%s: " fmt, __func__
18 #include <linux/err.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/kernel.h>
25 #include <linux/clk.h>
26 #include <linux/irq.h>
28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/delay.h>
32 #include <linux/crypto.h>
33 #include <linux/cryptohash.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/sha.h>
37 #include <crypto/hash.h>
38 #include <crypto/internal/hash.h>
42 #include <mach/irqs.h>
44 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
47 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48 #define MD5_DIGEST_SIZE 16
50 #define SHA_REG_DIGCNT 0x14
52 #define SHA_REG_CTRL 0x18
53 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
54 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
55 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
56 #define SHA_REG_CTRL_ALGO (1 << 2)
57 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
58 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
60 #define SHA_REG_REV 0x5C
61 #define SHA_REG_REV_MAJOR 0xF0
62 #define SHA_REG_REV_MINOR 0x0F
64 #define SHA_REG_MASK 0x60
65 #define SHA_REG_MASK_DMA_EN (1 << 3)
66 #define SHA_REG_MASK_IT_EN (1 << 2)
67 #define SHA_REG_MASK_SOFTRESET (1 << 1)
68 #define SHA_REG_AUTOIDLE (1 << 0)
70 #define SHA_REG_SYSSTATUS 0x64
71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
73 #define DEFAULT_TIMEOUT_INTERVAL HZ
75 /* mostly device flags */
78 #define FLAGS_DMA_ACTIVE 2
79 #define FLAGS_OUTPUT_READY 3
82 #define FLAGS_DMA_READY 6
84 #define FLAGS_FINUP 16
88 #define FLAGS_ERROR 20
93 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
94 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
96 #define BUFLEN PAGE_SIZE
100 struct omap_sham_reqctx
{
101 struct omap_sham_dev
*dd
;
105 u8 digest
[SHA1_DIGEST_SIZE
] OMAP_ALIGNED
;
112 struct scatterlist
*sg
;
113 unsigned int offset
; /* offset in current sg */
114 unsigned int total
; /* total request */
116 u8 buffer
[0] OMAP_ALIGNED
;
119 struct omap_sham_hmac_ctx
{
120 struct crypto_shash
*shash
;
121 u8 ipad
[SHA1_MD5_BLOCK_SIZE
];
122 u8 opad
[SHA1_MD5_BLOCK_SIZE
];
125 struct omap_sham_ctx
{
126 struct omap_sham_dev
*dd
;
131 struct crypto_shash
*fallback
;
133 struct omap_sham_hmac_ctx base
[0];
136 #define OMAP_SHAM_QUEUE_LENGTH 1
138 struct omap_sham_dev
{
139 struct list_head list
;
140 unsigned long phys_base
;
142 void __iomem
*io_base
;
149 struct tasklet_struct done_task
;
152 struct crypto_queue queue
;
153 struct ahash_request
*req
;
156 struct omap_sham_drv
{
157 struct list_head dev_list
;
162 static struct omap_sham_drv sham
= {
163 .dev_list
= LIST_HEAD_INIT(sham
.dev_list
),
164 .lock
= __SPIN_LOCK_UNLOCKED(sham
.lock
),
167 static inline u32
omap_sham_read(struct omap_sham_dev
*dd
, u32 offset
)
169 return __raw_readl(dd
->io_base
+ offset
);
172 static inline void omap_sham_write(struct omap_sham_dev
*dd
,
173 u32 offset
, u32 value
)
175 __raw_writel(value
, dd
->io_base
+ offset
);
178 static inline void omap_sham_write_mask(struct omap_sham_dev
*dd
, u32 address
,
183 val
= omap_sham_read(dd
, address
);
186 omap_sham_write(dd
, address
, val
);
189 static inline int omap_sham_wait(struct omap_sham_dev
*dd
, u32 offset
, u32 bit
)
191 unsigned long timeout
= jiffies
+ DEFAULT_TIMEOUT_INTERVAL
;
193 while (!(omap_sham_read(dd
, offset
) & bit
)) {
194 if (time_is_before_jiffies(timeout
))
201 static void omap_sham_copy_hash(struct ahash_request
*req
, int out
)
203 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
204 u32
*hash
= (u32
*)ctx
->digest
;
207 /* MD5 is almost unused. So copy sha1 size to reduce code */
208 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++) {
210 hash
[i
] = omap_sham_read(ctx
->dd
,
213 omap_sham_write(ctx
->dd
,
214 SHA_REG_DIGEST(i
), hash
[i
]);
218 static void omap_sham_copy_ready_hash(struct ahash_request
*req
)
220 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
221 u32
*in
= (u32
*)ctx
->digest
;
222 u32
*hash
= (u32
*)req
->result
;
228 if (likely(ctx
->flags
& BIT(FLAGS_SHA1
))) {
229 /* SHA1 results are in big endian */
230 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++)
231 hash
[i
] = be32_to_cpu(in
[i
]);
233 /* MD5 results are in little endian */
234 for (i
= 0; i
< MD5_DIGEST_SIZE
/ sizeof(u32
); i
++)
235 hash
[i
] = le32_to_cpu(in
[i
]);
239 static int omap_sham_hw_init(struct omap_sham_dev
*dd
)
241 clk_enable(dd
->iclk
);
243 if (!test_bit(FLAGS_INIT
, &dd
->flags
)) {
244 omap_sham_write_mask(dd
, SHA_REG_MASK
,
245 SHA_REG_MASK_SOFTRESET
, SHA_REG_MASK_SOFTRESET
);
247 if (omap_sham_wait(dd
, SHA_REG_SYSSTATUS
,
248 SHA_REG_SYSSTATUS_RESETDONE
))
251 set_bit(FLAGS_INIT
, &dd
->flags
);
258 static void omap_sham_write_ctrl(struct omap_sham_dev
*dd
, size_t length
,
261 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
262 u32 val
= length
<< 5, mask
;
264 if (likely(ctx
->digcnt
))
265 omap_sham_write(dd
, SHA_REG_DIGCNT
, ctx
->digcnt
);
267 omap_sham_write_mask(dd
, SHA_REG_MASK
,
268 SHA_REG_MASK_IT_EN
| (dma
? SHA_REG_MASK_DMA_EN
: 0),
269 SHA_REG_MASK_IT_EN
| SHA_REG_MASK_DMA_EN
);
271 * Setting ALGO_CONST only for the first iteration
272 * and CLOSE_HASH only for the last one.
274 if (ctx
->flags
& BIT(FLAGS_SHA1
))
275 val
|= SHA_REG_CTRL_ALGO
;
277 val
|= SHA_REG_CTRL_ALGO_CONST
;
279 val
|= SHA_REG_CTRL_CLOSE_HASH
;
281 mask
= SHA_REG_CTRL_ALGO_CONST
| SHA_REG_CTRL_CLOSE_HASH
|
282 SHA_REG_CTRL_ALGO
| SHA_REG_CTRL_LENGTH
;
284 omap_sham_write_mask(dd
, SHA_REG_CTRL
, val
, mask
);
287 static int omap_sham_xmit_cpu(struct omap_sham_dev
*dd
, const u8
*buf
,
288 size_t length
, int final
)
290 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
292 const u32
*buffer
= (const u32
*)buf
;
294 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
295 ctx
->digcnt
, length
, final
);
297 omap_sham_write_ctrl(dd
, length
, final
, 0);
299 /* should be non-zero before next lines to disable clocks later */
300 ctx
->digcnt
+= length
;
302 if (omap_sham_wait(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_INPUT_READY
))
306 set_bit(FLAGS_FINAL
, &dd
->flags
); /* catch last interrupt */
308 set_bit(FLAGS_CPU
, &dd
->flags
);
310 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
312 for (count
= 0; count
< len32
; count
++)
313 omap_sham_write(dd
, SHA_REG_DIN(count
), buffer
[count
]);
318 static int omap_sham_xmit_dma(struct omap_sham_dev
*dd
, dma_addr_t dma_addr
,
319 size_t length
, int final
)
321 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
324 dev_dbg(dd
->dev
, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
325 ctx
->digcnt
, length
, final
);
327 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
329 omap_set_dma_transfer_params(dd
->dma_lch
, OMAP_DMA_DATA_TYPE_S32
, len32
,
330 1, OMAP_DMA_SYNC_PACKET
, dd
->dma
,
331 OMAP_DMA_DST_SYNC_PREFETCH
);
333 omap_set_dma_src_params(dd
->dma_lch
, 0, OMAP_DMA_AMODE_POST_INC
,
336 omap_sham_write_ctrl(dd
, length
, final
, 1);
338 ctx
->digcnt
+= length
;
341 set_bit(FLAGS_FINAL
, &dd
->flags
); /* catch last interrupt */
343 set_bit(FLAGS_DMA_ACTIVE
, &dd
->flags
);
345 omap_start_dma(dd
->dma_lch
);
350 static size_t omap_sham_append_buffer(struct omap_sham_reqctx
*ctx
,
351 const u8
*data
, size_t length
)
353 size_t count
= min(length
, ctx
->buflen
- ctx
->bufcnt
);
355 count
= min(count
, ctx
->total
);
358 memcpy(ctx
->buffer
+ ctx
->bufcnt
, data
, count
);
359 ctx
->bufcnt
+= count
;
364 static size_t omap_sham_append_sg(struct omap_sham_reqctx
*ctx
)
369 count
= omap_sham_append_buffer(ctx
,
370 sg_virt(ctx
->sg
) + ctx
->offset
,
371 ctx
->sg
->length
- ctx
->offset
);
374 ctx
->offset
+= count
;
376 if (ctx
->offset
== ctx
->sg
->length
) {
377 ctx
->sg
= sg_next(ctx
->sg
);
388 static int omap_sham_xmit_dma_map(struct omap_sham_dev
*dd
,
389 struct omap_sham_reqctx
*ctx
,
390 size_t length
, int final
)
392 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
, ctx
->buflen
,
394 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
395 dev_err(dd
->dev
, "dma %u bytes error\n", ctx
->buflen
);
399 ctx
->flags
&= ~BIT(FLAGS_SG
);
401 /* next call does not fail... so no unmap in the case of error */
402 return omap_sham_xmit_dma(dd
, ctx
->dma_addr
, length
, final
);
405 static int omap_sham_update_dma_slow(struct omap_sham_dev
*dd
)
407 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
411 omap_sham_append_sg(ctx
);
413 final
= (ctx
->flags
& BIT(FLAGS_FINUP
)) && !ctx
->total
;
415 dev_dbg(dd
->dev
, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
416 ctx
->bufcnt
, ctx
->digcnt
, final
);
418 if (final
|| (ctx
->bufcnt
== ctx
->buflen
&& ctx
->total
)) {
421 return omap_sham_xmit_dma_map(dd
, ctx
, count
, final
);
427 /* Start address alignment */
428 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
429 /* SHA1 block size alignment */
430 #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
432 static int omap_sham_update_dma_start(struct omap_sham_dev
*dd
)
434 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
435 unsigned int length
, final
, tail
;
436 struct scatterlist
*sg
;
441 if (ctx
->bufcnt
|| ctx
->offset
)
442 return omap_sham_update_dma_slow(dd
);
444 dev_dbg(dd
->dev
, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
445 ctx
->digcnt
, ctx
->bufcnt
, ctx
->total
);
450 return omap_sham_update_dma_slow(dd
);
452 if (!sg_is_last(sg
) && !SG_SA(sg
))
453 /* size is not SHA1_BLOCK_SIZE aligned */
454 return omap_sham_update_dma_slow(dd
);
456 length
= min(ctx
->total
, sg
->length
);
458 if (sg_is_last(sg
)) {
459 if (!(ctx
->flags
& BIT(FLAGS_FINUP
))) {
460 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
461 tail
= length
& (SHA1_MD5_BLOCK_SIZE
- 1);
462 /* without finup() we need one block to close hash */
464 tail
= SHA1_MD5_BLOCK_SIZE
;
469 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
470 dev_err(dd
->dev
, "dma_map_sg error\n");
474 ctx
->flags
|= BIT(FLAGS_SG
);
476 ctx
->total
-= length
;
477 ctx
->offset
= length
; /* offset where to start slow */
479 final
= (ctx
->flags
& BIT(FLAGS_FINUP
)) && !ctx
->total
;
481 /* next call does not fail... so no unmap in the case of error */
482 return omap_sham_xmit_dma(dd
, sg_dma_address(ctx
->sg
), length
, final
);
485 static int omap_sham_update_cpu(struct omap_sham_dev
*dd
)
487 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
490 omap_sham_append_sg(ctx
);
491 bufcnt
= ctx
->bufcnt
;
494 return omap_sham_xmit_cpu(dd
, ctx
->buffer
, bufcnt
, 1);
497 static int omap_sham_update_dma_stop(struct omap_sham_dev
*dd
)
499 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
501 omap_stop_dma(dd
->dma_lch
);
502 if (ctx
->flags
& BIT(FLAGS_SG
)) {
503 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
504 if (ctx
->sg
->length
== ctx
->offset
) {
505 ctx
->sg
= sg_next(ctx
->sg
);
510 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
,
517 static int omap_sham_init(struct ahash_request
*req
)
519 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
520 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
521 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
522 struct omap_sham_dev
*dd
= NULL
, *tmp
;
524 spin_lock_bh(&sham
.lock
);
526 list_for_each_entry(tmp
, &sham
.dev_list
, list
) {
534 spin_unlock_bh(&sham
.lock
);
540 dev_dbg(dd
->dev
, "init: digest size: %d\n",
541 crypto_ahash_digestsize(tfm
));
543 if (crypto_ahash_digestsize(tfm
) == SHA1_DIGEST_SIZE
)
544 ctx
->flags
|= BIT(FLAGS_SHA1
);
548 ctx
->buflen
= BUFLEN
;
550 if (tctx
->flags
& BIT(FLAGS_HMAC
)) {
551 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
553 memcpy(ctx
->buffer
, bctx
->ipad
, SHA1_MD5_BLOCK_SIZE
);
554 ctx
->bufcnt
= SHA1_MD5_BLOCK_SIZE
;
555 ctx
->flags
|= BIT(FLAGS_HMAC
);
562 static int omap_sham_update_req(struct omap_sham_dev
*dd
)
564 struct ahash_request
*req
= dd
->req
;
565 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
568 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: %d, finup: %d\n",
569 ctx
->total
, ctx
->digcnt
, (ctx
->flags
& BIT(FLAGS_FINUP
)) != 0);
571 if (ctx
->flags
& BIT(FLAGS_CPU
))
572 err
= omap_sham_update_cpu(dd
);
574 err
= omap_sham_update_dma_start(dd
);
576 /* wait for dma completion before can take more data */
577 dev_dbg(dd
->dev
, "update: err: %d, digcnt: %d\n", err
, ctx
->digcnt
);
582 static int omap_sham_final_req(struct omap_sham_dev
*dd
)
584 struct ahash_request
*req
= dd
->req
;
585 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
586 int err
= 0, use_dma
= 1;
588 if (ctx
->bufcnt
<= 64)
589 /* faster to handle last block with cpu */
593 err
= omap_sham_xmit_dma_map(dd
, ctx
, ctx
->bufcnt
, 1);
595 err
= omap_sham_xmit_cpu(dd
, ctx
->buffer
, ctx
->bufcnt
, 1);
599 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
604 static int omap_sham_finish_hmac(struct ahash_request
*req
)
606 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
607 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
608 int bs
= crypto_shash_blocksize(bctx
->shash
);
609 int ds
= crypto_shash_digestsize(bctx
->shash
);
611 struct shash_desc shash
;
612 char ctx
[crypto_shash_descsize(bctx
->shash
)];
615 desc
.shash
.tfm
= bctx
->shash
;
616 desc
.shash
.flags
= 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
618 return crypto_shash_init(&desc
.shash
) ?:
619 crypto_shash_update(&desc
.shash
, bctx
->opad
, bs
) ?:
620 crypto_shash_finup(&desc
.shash
, req
->result
, ds
, req
->result
);
623 static int omap_sham_finish(struct ahash_request
*req
)
625 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
626 struct omap_sham_dev
*dd
= ctx
->dd
;
630 omap_sham_copy_ready_hash(req
);
631 if (ctx
->flags
& BIT(FLAGS_HMAC
))
632 err
= omap_sham_finish_hmac(req
);
635 dev_dbg(dd
->dev
, "digcnt: %d, bufcnt: %d\n", ctx
->digcnt
, ctx
->bufcnt
);
640 static void omap_sham_finish_req(struct ahash_request
*req
, int err
)
642 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
643 struct omap_sham_dev
*dd
= ctx
->dd
;
646 omap_sham_copy_hash(req
, 1);
647 if (test_bit(FLAGS_FINAL
, &dd
->flags
))
648 err
= omap_sham_finish(req
);
650 ctx
->flags
|= BIT(FLAGS_ERROR
);
653 /* atomic operation is not needed here */
654 dd
->flags
&= ~(BIT(FLAGS_BUSY
) | BIT(FLAGS_FINAL
) | BIT(FLAGS_CPU
) |
655 BIT(FLAGS_DMA_READY
) | BIT(FLAGS_OUTPUT_READY
));
656 clk_disable(dd
->iclk
);
658 if (req
->base
.complete
)
659 req
->base
.complete(&req
->base
, err
);
661 /* handle new request */
662 tasklet_schedule(&dd
->done_task
);
665 static int omap_sham_handle_queue(struct omap_sham_dev
*dd
,
666 struct ahash_request
*req
)
668 struct crypto_async_request
*async_req
, *backlog
;
669 struct omap_sham_reqctx
*ctx
;
671 int err
= 0, ret
= 0;
673 spin_lock_irqsave(&dd
->lock
, flags
);
675 ret
= ahash_enqueue_request(&dd
->queue
, req
);
676 if (test_bit(FLAGS_BUSY
, &dd
->flags
)) {
677 spin_unlock_irqrestore(&dd
->lock
, flags
);
680 backlog
= crypto_get_backlog(&dd
->queue
);
681 async_req
= crypto_dequeue_request(&dd
->queue
);
683 set_bit(FLAGS_BUSY
, &dd
->flags
);
684 spin_unlock_irqrestore(&dd
->lock
, flags
);
690 backlog
->complete(backlog
, -EINPROGRESS
);
692 req
= ahash_request_cast(async_req
);
694 ctx
= ahash_request_ctx(req
);
696 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
697 ctx
->op
, req
->nbytes
);
699 err
= omap_sham_hw_init(dd
);
703 omap_set_dma_dest_params(dd
->dma_lch
, 0,
704 OMAP_DMA_AMODE_CONSTANT
,
705 dd
->phys_base
+ SHA_REG_DIN(0), 0, 16);
707 omap_set_dma_dest_burst_mode(dd
->dma_lch
,
708 OMAP_DMA_DATA_BURST_16
);
710 omap_set_dma_src_burst_mode(dd
->dma_lch
,
711 OMAP_DMA_DATA_BURST_4
);
714 /* request has changed - restore hash */
715 omap_sham_copy_hash(req
, 0);
717 if (ctx
->op
== OP_UPDATE
) {
718 err
= omap_sham_update_req(dd
);
719 if (err
!= -EINPROGRESS
&& (ctx
->flags
& BIT(FLAGS_FINUP
)))
720 /* no final() after finup() */
721 err
= omap_sham_final_req(dd
);
722 } else if (ctx
->op
== OP_FINAL
) {
723 err
= omap_sham_final_req(dd
);
726 if (err
!= -EINPROGRESS
)
727 /* done_task will not finish it, so do it here */
728 omap_sham_finish_req(req
, err
);
730 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
735 static int omap_sham_enqueue(struct ahash_request
*req
, unsigned int op
)
737 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
738 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
739 struct omap_sham_dev
*dd
= tctx
->dd
;
743 return omap_sham_handle_queue(dd
, req
);
746 static int omap_sham_update(struct ahash_request
*req
)
748 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
753 ctx
->total
= req
->nbytes
;
757 if (ctx
->flags
& BIT(FLAGS_FINUP
)) {
758 if ((ctx
->digcnt
+ ctx
->bufcnt
+ ctx
->total
) < 9) {
760 * OMAP HW accel works only with buffers >= 9
761 * will switch to bypass in final()
762 * final has the same request and data
764 omap_sham_append_sg(ctx
);
766 } else if (ctx
->bufcnt
+ ctx
->total
<= SHA1_MD5_BLOCK_SIZE
) {
768 * faster to use CPU for short transfers
770 ctx
->flags
|= BIT(FLAGS_CPU
);
772 } else if (ctx
->bufcnt
+ ctx
->total
< ctx
->buflen
) {
773 omap_sham_append_sg(ctx
);
777 return omap_sham_enqueue(req
, OP_UPDATE
);
780 static int omap_sham_shash_digest(struct crypto_shash
*shash
, u32 flags
,
781 const u8
*data
, unsigned int len
, u8
*out
)
784 struct shash_desc shash
;
785 char ctx
[crypto_shash_descsize(shash
)];
788 desc
.shash
.tfm
= shash
;
789 desc
.shash
.flags
= flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
791 return crypto_shash_digest(&desc
.shash
, data
, len
, out
);
794 static int omap_sham_final_shash(struct ahash_request
*req
)
796 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
797 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
799 return omap_sham_shash_digest(tctx
->fallback
, req
->base
.flags
,
800 ctx
->buffer
, ctx
->bufcnt
, req
->result
);
803 static int omap_sham_final(struct ahash_request
*req
)
805 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
807 ctx
->flags
|= BIT(FLAGS_FINUP
);
809 if (ctx
->flags
& BIT(FLAGS_ERROR
))
810 return 0; /* uncompleted hash is not needed */
812 /* OMAP HW accel works only with buffers >= 9 */
813 /* HMAC is always >= 9 because ipad == block size */
814 if ((ctx
->digcnt
+ ctx
->bufcnt
) < 9)
815 return omap_sham_final_shash(req
);
816 else if (ctx
->bufcnt
)
817 return omap_sham_enqueue(req
, OP_FINAL
);
819 /* copy ready hash (+ finalize hmac) */
820 return omap_sham_finish(req
);
823 static int omap_sham_finup(struct ahash_request
*req
)
825 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
828 ctx
->flags
|= BIT(FLAGS_FINUP
);
830 err1
= omap_sham_update(req
);
831 if (err1
== -EINPROGRESS
|| err1
== -EBUSY
)
834 * final() has to be always called to cleanup resources
835 * even if udpate() failed, except EINPROGRESS
837 err2
= omap_sham_final(req
);
842 static int omap_sham_digest(struct ahash_request
*req
)
844 return omap_sham_init(req
) ?: omap_sham_finup(req
);
847 static int omap_sham_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
850 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
851 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
852 int bs
= crypto_shash_blocksize(bctx
->shash
);
853 int ds
= crypto_shash_digestsize(bctx
->shash
);
855 err
= crypto_shash_setkey(tctx
->fallback
, key
, keylen
);
860 err
= omap_sham_shash_digest(bctx
->shash
,
861 crypto_shash_get_flags(bctx
->shash
),
862 key
, keylen
, bctx
->ipad
);
867 memcpy(bctx
->ipad
, key
, keylen
);
870 memset(bctx
->ipad
+ keylen
, 0, bs
- keylen
);
871 memcpy(bctx
->opad
, bctx
->ipad
, bs
);
873 for (i
= 0; i
< bs
; i
++) {
874 bctx
->ipad
[i
] ^= 0x36;
875 bctx
->opad
[i
] ^= 0x5c;
881 static int omap_sham_cra_init_alg(struct crypto_tfm
*tfm
, const char *alg_base
)
883 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
884 const char *alg_name
= crypto_tfm_alg_name(tfm
);
886 /* Allocate a fallback and abort if it failed. */
887 tctx
->fallback
= crypto_alloc_shash(alg_name
, 0,
888 CRYPTO_ALG_NEED_FALLBACK
);
889 if (IS_ERR(tctx
->fallback
)) {
890 pr_err("omap-sham: fallback driver '%s' "
891 "could not be loaded.\n", alg_name
);
892 return PTR_ERR(tctx
->fallback
);
895 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
896 sizeof(struct omap_sham_reqctx
) + BUFLEN
);
899 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
900 tctx
->flags
|= BIT(FLAGS_HMAC
);
901 bctx
->shash
= crypto_alloc_shash(alg_base
, 0,
902 CRYPTO_ALG_NEED_FALLBACK
);
903 if (IS_ERR(bctx
->shash
)) {
904 pr_err("omap-sham: base driver '%s' "
905 "could not be loaded.\n", alg_base
);
906 crypto_free_shash(tctx
->fallback
);
907 return PTR_ERR(bctx
->shash
);
915 static int omap_sham_cra_init(struct crypto_tfm
*tfm
)
917 return omap_sham_cra_init_alg(tfm
, NULL
);
920 static int omap_sham_cra_sha1_init(struct crypto_tfm
*tfm
)
922 return omap_sham_cra_init_alg(tfm
, "sha1");
925 static int omap_sham_cra_md5_init(struct crypto_tfm
*tfm
)
927 return omap_sham_cra_init_alg(tfm
, "md5");
930 static void omap_sham_cra_exit(struct crypto_tfm
*tfm
)
932 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
934 crypto_free_shash(tctx
->fallback
);
935 tctx
->fallback
= NULL
;
937 if (tctx
->flags
& BIT(FLAGS_HMAC
)) {
938 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
939 crypto_free_shash(bctx
->shash
);
943 static struct ahash_alg algs
[] = {
945 .init
= omap_sham_init
,
946 .update
= omap_sham_update
,
947 .final
= omap_sham_final
,
948 .finup
= omap_sham_finup
,
949 .digest
= omap_sham_digest
,
950 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
953 .cra_driver_name
= "omap-sha1",
955 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
956 CRYPTO_ALG_KERN_DRIVER_ONLY
|
958 CRYPTO_ALG_NEED_FALLBACK
,
959 .cra_blocksize
= SHA1_BLOCK_SIZE
,
960 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
962 .cra_module
= THIS_MODULE
,
963 .cra_init
= omap_sham_cra_init
,
964 .cra_exit
= omap_sham_cra_exit
,
968 .init
= omap_sham_init
,
969 .update
= omap_sham_update
,
970 .final
= omap_sham_final
,
971 .finup
= omap_sham_finup
,
972 .digest
= omap_sham_digest
,
973 .halg
.digestsize
= MD5_DIGEST_SIZE
,
976 .cra_driver_name
= "omap-md5",
978 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
979 CRYPTO_ALG_KERN_DRIVER_ONLY
|
981 CRYPTO_ALG_NEED_FALLBACK
,
982 .cra_blocksize
= SHA1_BLOCK_SIZE
,
983 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
984 .cra_alignmask
= OMAP_ALIGN_MASK
,
985 .cra_module
= THIS_MODULE
,
986 .cra_init
= omap_sham_cra_init
,
987 .cra_exit
= omap_sham_cra_exit
,
991 .init
= omap_sham_init
,
992 .update
= omap_sham_update
,
993 .final
= omap_sham_final
,
994 .finup
= omap_sham_finup
,
995 .digest
= omap_sham_digest
,
996 .setkey
= omap_sham_setkey
,
997 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
999 .cra_name
= "hmac(sha1)",
1000 .cra_driver_name
= "omap-hmac-sha1",
1001 .cra_priority
= 100,
1002 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1003 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1005 CRYPTO_ALG_NEED_FALLBACK
,
1006 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1007 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1008 sizeof(struct omap_sham_hmac_ctx
),
1009 .cra_alignmask
= OMAP_ALIGN_MASK
,
1010 .cra_module
= THIS_MODULE
,
1011 .cra_init
= omap_sham_cra_sha1_init
,
1012 .cra_exit
= omap_sham_cra_exit
,
1016 .init
= omap_sham_init
,
1017 .update
= omap_sham_update
,
1018 .final
= omap_sham_final
,
1019 .finup
= omap_sham_finup
,
1020 .digest
= omap_sham_digest
,
1021 .setkey
= omap_sham_setkey
,
1022 .halg
.digestsize
= MD5_DIGEST_SIZE
,
1024 .cra_name
= "hmac(md5)",
1025 .cra_driver_name
= "omap-hmac-md5",
1026 .cra_priority
= 100,
1027 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1028 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1030 CRYPTO_ALG_NEED_FALLBACK
,
1031 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1032 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1033 sizeof(struct omap_sham_hmac_ctx
),
1034 .cra_alignmask
= OMAP_ALIGN_MASK
,
1035 .cra_module
= THIS_MODULE
,
1036 .cra_init
= omap_sham_cra_md5_init
,
1037 .cra_exit
= omap_sham_cra_exit
,
1042 static void omap_sham_done_task(unsigned long data
)
1044 struct omap_sham_dev
*dd
= (struct omap_sham_dev
*)data
;
1047 if (!test_bit(FLAGS_BUSY
, &dd
->flags
)) {
1048 omap_sham_handle_queue(dd
, NULL
);
1052 if (test_bit(FLAGS_CPU
, &dd
->flags
)) {
1053 if (test_and_clear_bit(FLAGS_OUTPUT_READY
, &dd
->flags
))
1055 } else if (test_bit(FLAGS_DMA_READY
, &dd
->flags
)) {
1056 if (test_and_clear_bit(FLAGS_DMA_ACTIVE
, &dd
->flags
)) {
1057 omap_sham_update_dma_stop(dd
);
1063 if (test_and_clear_bit(FLAGS_OUTPUT_READY
, &dd
->flags
)) {
1064 /* hash or semi-hash ready */
1065 clear_bit(FLAGS_DMA_READY
, &dd
->flags
);
1066 err
= omap_sham_update_dma_start(dd
);
1067 if (err
!= -EINPROGRESS
)
1075 dev_dbg(dd
->dev
, "update done: err: %d\n", err
);
1076 /* finish curent request */
1077 omap_sham_finish_req(dd
->req
, err
);
1080 static irqreturn_t
omap_sham_irq(int irq
, void *dev_id
)
1082 struct omap_sham_dev
*dd
= dev_id
;
1084 if (unlikely(test_bit(FLAGS_FINAL
, &dd
->flags
)))
1085 /* final -> allow device to go to power-saving mode */
1086 omap_sham_write_mask(dd
, SHA_REG_CTRL
, 0, SHA_REG_CTRL_LENGTH
);
1088 omap_sham_write_mask(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_OUTPUT_READY
,
1089 SHA_REG_CTRL_OUTPUT_READY
);
1090 omap_sham_read(dd
, SHA_REG_CTRL
);
1092 if (!test_bit(FLAGS_BUSY
, &dd
->flags
)) {
1093 dev_warn(dd
->dev
, "Interrupt when no active requests.\n");
1097 set_bit(FLAGS_OUTPUT_READY
, &dd
->flags
);
1098 tasklet_schedule(&dd
->done_task
);
1103 static void omap_sham_dma_callback(int lch
, u16 ch_status
, void *data
)
1105 struct omap_sham_dev
*dd
= data
;
1107 if (ch_status
!= OMAP_DMA_BLOCK_IRQ
) {
1108 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status
);
1110 clear_bit(FLAGS_INIT
, &dd
->flags
);/* request to re-initialize */
1113 set_bit(FLAGS_DMA_READY
, &dd
->flags
);
1114 tasklet_schedule(&dd
->done_task
);
1117 static int omap_sham_dma_init(struct omap_sham_dev
*dd
)
1123 err
= omap_request_dma(dd
->dma
, dev_name(dd
->dev
),
1124 omap_sham_dma_callback
, dd
, &dd
->dma_lch
);
1126 dev_err(dd
->dev
, "Unable to request DMA channel\n");
1133 static void omap_sham_dma_cleanup(struct omap_sham_dev
*dd
)
1135 if (dd
->dma_lch
>= 0) {
1136 omap_free_dma(dd
->dma_lch
);
1141 static int __devinit
omap_sham_probe(struct platform_device
*pdev
)
1143 struct omap_sham_dev
*dd
;
1144 struct device
*dev
= &pdev
->dev
;
1145 struct resource
*res
;
1148 dd
= kzalloc(sizeof(struct omap_sham_dev
), GFP_KERNEL
);
1150 dev_err(dev
, "unable to alloc data struct.\n");
1155 platform_set_drvdata(pdev
, dd
);
1157 INIT_LIST_HEAD(&dd
->list
);
1158 spin_lock_init(&dd
->lock
);
1159 tasklet_init(&dd
->done_task
, omap_sham_done_task
, (unsigned long)dd
);
1160 crypto_init_queue(&dd
->queue
, OMAP_SHAM_QUEUE_LENGTH
);
1164 /* Get the base address */
1165 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1167 dev_err(dev
, "no MEM resource info\n");
1171 dd
->phys_base
= res
->start
;
1174 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1176 dev_err(dev
, "no DMA resource info\n");
1180 dd
->dma
= res
->start
;
1183 dd
->irq
= platform_get_irq(pdev
, 0);
1185 dev_err(dev
, "no IRQ resource info\n");
1190 err
= request_irq(dd
->irq
, omap_sham_irq
,
1191 IRQF_TRIGGER_LOW
, dev_name(dev
), dd
);
1193 dev_err(dev
, "unable to request irq.\n");
1197 err
= omap_sham_dma_init(dd
);
1201 /* Initializing the clock */
1202 dd
->iclk
= clk_get(dev
, "ick");
1203 if (IS_ERR(dd
->iclk
)) {
1204 dev_err(dev
, "clock intialization failed.\n");
1205 err
= PTR_ERR(dd
->iclk
);
1209 dd
->io_base
= ioremap(dd
->phys_base
, SZ_4K
);
1211 dev_err(dev
, "can't ioremap\n");
1216 clk_enable(dd
->iclk
);
1217 dev_info(dev
, "hw accel on OMAP rev %u.%u\n",
1218 (omap_sham_read(dd
, SHA_REG_REV
) & SHA_REG_REV_MAJOR
) >> 4,
1219 omap_sham_read(dd
, SHA_REG_REV
) & SHA_REG_REV_MINOR
);
1220 clk_disable(dd
->iclk
);
1222 spin_lock(&sham
.lock
);
1223 list_add_tail(&dd
->list
, &sham
.dev_list
);
1224 spin_unlock(&sham
.lock
);
1226 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++) {
1227 err
= crypto_register_ahash(&algs
[i
]);
1235 for (j
= 0; j
< i
; j
++)
1236 crypto_unregister_ahash(&algs
[j
]);
1237 iounmap(dd
->io_base
);
1241 omap_sham_dma_cleanup(dd
);
1244 free_irq(dd
->irq
, dd
);
1249 dev_err(dev
, "initialization failed.\n");
1254 static int __devexit
omap_sham_remove(struct platform_device
*pdev
)
1256 static struct omap_sham_dev
*dd
;
1259 dd
= platform_get_drvdata(pdev
);
1262 spin_lock(&sham
.lock
);
1263 list_del(&dd
->list
);
1264 spin_unlock(&sham
.lock
);
1265 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++)
1266 crypto_unregister_ahash(&algs
[i
]);
1267 tasklet_kill(&dd
->done_task
);
1268 iounmap(dd
->io_base
);
1270 omap_sham_dma_cleanup(dd
);
1272 free_irq(dd
->irq
, dd
);
1279 static struct platform_driver omap_sham_driver
= {
1280 .probe
= omap_sham_probe
,
1281 .remove
= omap_sham_remove
,
1283 .name
= "omap-sham",
1284 .owner
= THIS_MODULE
,
1288 static int __init
omap_sham_mod_init(void)
1290 pr_info("loading %s driver\n", "omap-sham");
1292 if (!cpu_class_is_omap2() ||
1293 (omap_type() != OMAP2_DEVICE_TYPE_SEC
&&
1294 omap_type() != OMAP2_DEVICE_TYPE_EMU
)) {
1295 pr_err("Unsupported cpu\n");
1299 return platform_driver_register(&omap_sham_driver
);
1302 static void __exit
omap_sham_mod_exit(void)
1304 platform_driver_unregister(&omap_sham_driver
);
1307 module_init(omap_sham_mod_init
);
1308 module_exit(omap_sham_mod_exit
);
1310 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1311 MODULE_LICENSE("GPL v2");
1312 MODULE_AUTHOR("Dmitry Kasatkin");