4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Some ideas are from old omap-sha1-md5.c driver.
17 #define pr_fmt(fmt) "%s: " fmt, __func__
19 #include <linux/err.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/irq.h>
28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/of_device.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/delay.h>
38 #include <linux/crypto.h>
39 #include <linux/cryptohash.h>
40 #include <crypto/scatterwalk.h>
41 #include <crypto/algapi.h>
42 #include <crypto/sha.h>
43 #include <crypto/hash.h>
44 #include <crypto/hmac.h>
45 #include <crypto/internal/hash.h>
47 #define MD5_DIGEST_SIZE 16
49 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
50 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
51 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
53 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
55 #define SHA_REG_CTRL 0x18
56 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
57 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
58 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
59 #define SHA_REG_CTRL_ALGO (1 << 2)
60 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
61 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
63 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
65 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
66 #define SHA_REG_MASK_DMA_EN (1 << 3)
67 #define SHA_REG_MASK_IT_EN (1 << 2)
68 #define SHA_REG_MASK_SOFTRESET (1 << 1)
69 #define SHA_REG_AUTOIDLE (1 << 0)
71 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
72 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
74 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
75 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
76 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
77 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
78 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
80 #define SHA_REG_MODE_ALGO_MASK (7 << 0)
81 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
82 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
83 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
84 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
85 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
86 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
88 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
90 #define SHA_REG_IRQSTATUS 0x118
91 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
92 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
93 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
94 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
96 #define SHA_REG_IRQENA 0x11C
97 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
98 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
99 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
100 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
102 #define DEFAULT_TIMEOUT_INTERVAL HZ
104 #define DEFAULT_AUTOSUSPEND_DELAY 1000
106 /* mostly device flags */
108 #define FLAGS_FINAL 1
109 #define FLAGS_DMA_ACTIVE 2
110 #define FLAGS_OUTPUT_READY 3
113 #define FLAGS_DMA_READY 6
114 #define FLAGS_AUTO_XOR 7
115 #define FLAGS_BE32_SHA1 8
116 #define FLAGS_SGS_COPIED 9
117 #define FLAGS_SGS_ALLOCED 10
119 #define FLAGS_FINUP 16
121 #define FLAGS_MODE_SHIFT 18
122 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
125 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
126 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
127 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
128 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
130 #define FLAGS_HMAC 21
131 #define FLAGS_ERROR 22
136 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
137 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
139 #define BUFLEN SHA512_BLOCK_SIZE
140 #define OMAP_SHA_DMA_THRESHOLD 256
142 struct omap_sham_dev
;
144 struct omap_sham_reqctx
{
145 struct omap_sham_dev
*dd
;
149 u8 digest
[SHA512_DIGEST_SIZE
] OMAP_ALIGNED
;
155 struct scatterlist
*sg
;
156 struct scatterlist sgl
[2];
157 int offset
; /* offset in current sg */
159 unsigned int total
; /* total request */
161 u8 buffer
[0] OMAP_ALIGNED
;
164 struct omap_sham_hmac_ctx
{
165 struct crypto_shash
*shash
;
166 u8 ipad
[SHA512_BLOCK_SIZE
] OMAP_ALIGNED
;
167 u8 opad
[SHA512_BLOCK_SIZE
] OMAP_ALIGNED
;
170 struct omap_sham_ctx
{
171 struct omap_sham_dev
*dd
;
176 struct crypto_shash
*fallback
;
178 struct omap_sham_hmac_ctx base
[0];
181 #define OMAP_SHAM_QUEUE_LENGTH 10
183 struct omap_sham_algs_info
{
184 struct ahash_alg
*algs_list
;
186 unsigned int registered
;
189 struct omap_sham_pdata
{
190 struct omap_sham_algs_info
*algs_info
;
191 unsigned int algs_info_size
;
195 void (*copy_hash
)(struct ahash_request
*req
, int out
);
196 void (*write_ctrl
)(struct omap_sham_dev
*dd
, size_t length
,
198 void (*trigger
)(struct omap_sham_dev
*dd
, size_t length
);
199 int (*poll_irq
)(struct omap_sham_dev
*dd
);
200 irqreturn_t (*intr_hdlr
)(int irq
, void *dev_id
);
218 struct omap_sham_dev
{
219 struct list_head list
;
220 unsigned long phys_base
;
222 void __iomem
*io_base
;
226 struct dma_chan
*dma_lch
;
227 struct tasklet_struct done_task
;
229 u8 xmit_buf
[BUFLEN
] OMAP_ALIGNED
;
233 struct crypto_queue queue
;
234 struct ahash_request
*req
;
236 const struct omap_sham_pdata
*pdata
;
239 struct omap_sham_drv
{
240 struct list_head dev_list
;
245 static struct omap_sham_drv sham
= {
246 .dev_list
= LIST_HEAD_INIT(sham
.dev_list
),
247 .lock
= __SPIN_LOCK_UNLOCKED(sham
.lock
),
250 static inline u32
omap_sham_read(struct omap_sham_dev
*dd
, u32 offset
)
252 return __raw_readl(dd
->io_base
+ offset
);
255 static inline void omap_sham_write(struct omap_sham_dev
*dd
,
256 u32 offset
, u32 value
)
258 __raw_writel(value
, dd
->io_base
+ offset
);
261 static inline void omap_sham_write_mask(struct omap_sham_dev
*dd
, u32 address
,
266 val
= omap_sham_read(dd
, address
);
269 omap_sham_write(dd
, address
, val
);
272 static inline int omap_sham_wait(struct omap_sham_dev
*dd
, u32 offset
, u32 bit
)
274 unsigned long timeout
= jiffies
+ DEFAULT_TIMEOUT_INTERVAL
;
276 while (!(omap_sham_read(dd
, offset
) & bit
)) {
277 if (time_is_before_jiffies(timeout
))
284 static void omap_sham_copy_hash_omap2(struct ahash_request
*req
, int out
)
286 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
287 struct omap_sham_dev
*dd
= ctx
->dd
;
288 u32
*hash
= (u32
*)ctx
->digest
;
291 for (i
= 0; i
< dd
->pdata
->digest_size
/ sizeof(u32
); i
++) {
293 hash
[i
] = omap_sham_read(dd
, SHA_REG_IDIGEST(dd
, i
));
295 omap_sham_write(dd
, SHA_REG_IDIGEST(dd
, i
), hash
[i
]);
299 static void omap_sham_copy_hash_omap4(struct ahash_request
*req
, int out
)
301 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
302 struct omap_sham_dev
*dd
= ctx
->dd
;
305 if (ctx
->flags
& BIT(FLAGS_HMAC
)) {
306 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(dd
->req
);
307 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
308 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
309 u32
*opad
= (u32
*)bctx
->opad
;
311 for (i
= 0; i
< dd
->pdata
->digest_size
/ sizeof(u32
); i
++) {
313 opad
[i
] = omap_sham_read(dd
,
314 SHA_REG_ODIGEST(dd
, i
));
316 omap_sham_write(dd
, SHA_REG_ODIGEST(dd
, i
),
321 omap_sham_copy_hash_omap2(req
, out
);
324 static void omap_sham_copy_ready_hash(struct ahash_request
*req
)
326 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
327 u32
*in
= (u32
*)ctx
->digest
;
328 u32
*hash
= (u32
*)req
->result
;
329 int i
, d
, big_endian
= 0;
334 switch (ctx
->flags
& FLAGS_MODE_MASK
) {
336 d
= MD5_DIGEST_SIZE
/ sizeof(u32
);
338 case FLAGS_MODE_SHA1
:
339 /* OMAP2 SHA1 is big endian */
340 if (test_bit(FLAGS_BE32_SHA1
, &ctx
->dd
->flags
))
342 d
= SHA1_DIGEST_SIZE
/ sizeof(u32
);
344 case FLAGS_MODE_SHA224
:
345 d
= SHA224_DIGEST_SIZE
/ sizeof(u32
);
347 case FLAGS_MODE_SHA256
:
348 d
= SHA256_DIGEST_SIZE
/ sizeof(u32
);
350 case FLAGS_MODE_SHA384
:
351 d
= SHA384_DIGEST_SIZE
/ sizeof(u32
);
353 case FLAGS_MODE_SHA512
:
354 d
= SHA512_DIGEST_SIZE
/ sizeof(u32
);
361 for (i
= 0; i
< d
; i
++)
362 hash
[i
] = be32_to_cpu(in
[i
]);
364 for (i
= 0; i
< d
; i
++)
365 hash
[i
] = le32_to_cpu(in
[i
]);
368 static int omap_sham_hw_init(struct omap_sham_dev
*dd
)
372 err
= pm_runtime_get_sync(dd
->dev
);
374 dev_err(dd
->dev
, "failed to get sync: %d\n", err
);
378 if (!test_bit(FLAGS_INIT
, &dd
->flags
)) {
379 set_bit(FLAGS_INIT
, &dd
->flags
);
386 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev
*dd
, size_t length
,
389 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
390 u32 val
= length
<< 5, mask
;
392 if (likely(ctx
->digcnt
))
393 omap_sham_write(dd
, SHA_REG_DIGCNT(dd
), ctx
->digcnt
);
395 omap_sham_write_mask(dd
, SHA_REG_MASK(dd
),
396 SHA_REG_MASK_IT_EN
| (dma
? SHA_REG_MASK_DMA_EN
: 0),
397 SHA_REG_MASK_IT_EN
| SHA_REG_MASK_DMA_EN
);
399 * Setting ALGO_CONST only for the first iteration
400 * and CLOSE_HASH only for the last one.
402 if ((ctx
->flags
& FLAGS_MODE_MASK
) == FLAGS_MODE_SHA1
)
403 val
|= SHA_REG_CTRL_ALGO
;
405 val
|= SHA_REG_CTRL_ALGO_CONST
;
407 val
|= SHA_REG_CTRL_CLOSE_HASH
;
409 mask
= SHA_REG_CTRL_ALGO_CONST
| SHA_REG_CTRL_CLOSE_HASH
|
410 SHA_REG_CTRL_ALGO
| SHA_REG_CTRL_LENGTH
;
412 omap_sham_write_mask(dd
, SHA_REG_CTRL
, val
, mask
);
415 static void omap_sham_trigger_omap2(struct omap_sham_dev
*dd
, size_t length
)
419 static int omap_sham_poll_irq_omap2(struct omap_sham_dev
*dd
)
421 return omap_sham_wait(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_INPUT_READY
);
424 static int get_block_size(struct omap_sham_reqctx
*ctx
)
428 switch (ctx
->flags
& FLAGS_MODE_MASK
) {
430 case FLAGS_MODE_SHA1
:
433 case FLAGS_MODE_SHA224
:
434 case FLAGS_MODE_SHA256
:
435 d
= SHA256_BLOCK_SIZE
;
437 case FLAGS_MODE_SHA384
:
438 case FLAGS_MODE_SHA512
:
439 d
= SHA512_BLOCK_SIZE
;
448 static void omap_sham_write_n(struct omap_sham_dev
*dd
, u32 offset
,
449 u32
*value
, int count
)
451 for (; count
--; value
++, offset
+= 4)
452 omap_sham_write(dd
, offset
, *value
);
455 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev
*dd
, size_t length
,
458 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
462 * Setting ALGO_CONST only for the first iteration and
463 * CLOSE_HASH only for the last one. Note that flags mode bits
464 * correspond to algorithm encoding in mode register.
466 val
= (ctx
->flags
& FLAGS_MODE_MASK
) >> (FLAGS_MODE_SHIFT
);
468 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(dd
->req
);
469 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
470 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
473 val
|= SHA_REG_MODE_ALGO_CONSTANT
;
475 if (ctx
->flags
& BIT(FLAGS_HMAC
)) {
476 bs
= get_block_size(ctx
);
477 nr_dr
= bs
/ (2 * sizeof(u32
));
478 val
|= SHA_REG_MODE_HMAC_KEY_PROC
;
479 omap_sham_write_n(dd
, SHA_REG_ODIGEST(dd
, 0),
480 (u32
*)bctx
->ipad
, nr_dr
);
481 omap_sham_write_n(dd
, SHA_REG_IDIGEST(dd
, 0),
482 (u32
*)bctx
->ipad
+ nr_dr
, nr_dr
);
488 val
|= SHA_REG_MODE_CLOSE_HASH
;
490 if (ctx
->flags
& BIT(FLAGS_HMAC
))
491 val
|= SHA_REG_MODE_HMAC_OUTER_HASH
;
494 mask
= SHA_REG_MODE_ALGO_CONSTANT
| SHA_REG_MODE_CLOSE_HASH
|
495 SHA_REG_MODE_ALGO_MASK
| SHA_REG_MODE_HMAC_OUTER_HASH
|
496 SHA_REG_MODE_HMAC_KEY_PROC
;
498 dev_dbg(dd
->dev
, "ctrl: %08x, flags: %08lx\n", val
, ctx
->flags
);
499 omap_sham_write_mask(dd
, SHA_REG_MODE(dd
), val
, mask
);
500 omap_sham_write(dd
, SHA_REG_IRQENA
, SHA_REG_IRQENA_OUTPUT_RDY
);
501 omap_sham_write_mask(dd
, SHA_REG_MASK(dd
),
503 (dma
? SHA_REG_MASK_DMA_EN
: 0),
504 SHA_REG_MASK_IT_EN
| SHA_REG_MASK_DMA_EN
);
507 static void omap_sham_trigger_omap4(struct omap_sham_dev
*dd
, size_t length
)
509 omap_sham_write(dd
, SHA_REG_LENGTH(dd
), length
);
512 static int omap_sham_poll_irq_omap4(struct omap_sham_dev
*dd
)
514 return omap_sham_wait(dd
, SHA_REG_IRQSTATUS
,
515 SHA_REG_IRQSTATUS_INPUT_RDY
);
518 static int omap_sham_xmit_cpu(struct omap_sham_dev
*dd
, size_t length
,
521 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
522 int count
, len32
, bs32
, offset
= 0;
525 struct sg_mapping_iter mi
;
527 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
528 ctx
->digcnt
, length
, final
);
530 dd
->pdata
->write_ctrl(dd
, length
, final
, 0);
531 dd
->pdata
->trigger(dd
, length
);
533 /* should be non-zero before next lines to disable clocks later */
534 ctx
->digcnt
+= length
;
535 ctx
->total
-= length
;
538 set_bit(FLAGS_FINAL
, &dd
->flags
); /* catch last interrupt */
540 set_bit(FLAGS_CPU
, &dd
->flags
);
542 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
543 bs32
= get_block_size(ctx
) / sizeof(u32
);
545 sg_miter_start(&mi
, ctx
->sg
, ctx
->sg_len
,
546 SG_MITER_FROM_SG
| SG_MITER_ATOMIC
);
551 if (dd
->pdata
->poll_irq(dd
))
554 for (count
= 0; count
< min(len32
, bs32
); count
++, offset
++) {
559 pr_err("sg miter failure.\n");
565 omap_sham_write(dd
, SHA_REG_DIN(dd
, count
),
569 len32
-= min(len32
, bs32
);
577 static void omap_sham_dma_callback(void *param
)
579 struct omap_sham_dev
*dd
= param
;
581 set_bit(FLAGS_DMA_READY
, &dd
->flags
);
582 tasklet_schedule(&dd
->done_task
);
585 static int omap_sham_xmit_dma(struct omap_sham_dev
*dd
, size_t length
,
588 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
589 struct dma_async_tx_descriptor
*tx
;
590 struct dma_slave_config cfg
;
593 dev_dbg(dd
->dev
, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
594 ctx
->digcnt
, length
, final
);
596 if (!dma_map_sg(dd
->dev
, ctx
->sg
, ctx
->sg_len
, DMA_TO_DEVICE
)) {
597 dev_err(dd
->dev
, "dma_map_sg error\n");
601 memset(&cfg
, 0, sizeof(cfg
));
603 cfg
.dst_addr
= dd
->phys_base
+ SHA_REG_DIN(dd
, 0);
604 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
605 cfg
.dst_maxburst
= get_block_size(ctx
) / DMA_SLAVE_BUSWIDTH_4_BYTES
;
607 ret
= dmaengine_slave_config(dd
->dma_lch
, &cfg
);
609 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret
);
613 tx
= dmaengine_prep_slave_sg(dd
->dma_lch
, ctx
->sg
, ctx
->sg_len
,
615 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
618 dev_err(dd
->dev
, "prep_slave_sg failed\n");
622 tx
->callback
= omap_sham_dma_callback
;
623 tx
->callback_param
= dd
;
625 dd
->pdata
->write_ctrl(dd
, length
, final
, 1);
627 ctx
->digcnt
+= length
;
628 ctx
->total
-= length
;
631 set_bit(FLAGS_FINAL
, &dd
->flags
); /* catch last interrupt */
633 set_bit(FLAGS_DMA_ACTIVE
, &dd
->flags
);
635 dmaengine_submit(tx
);
636 dma_async_issue_pending(dd
->dma_lch
);
638 dd
->pdata
->trigger(dd
, length
);
643 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx
*ctx
,
644 struct scatterlist
*sg
, int bs
, int new_len
)
646 int n
= sg_nents(sg
);
647 struct scatterlist
*tmp
;
648 int offset
= ctx
->offset
;
653 ctx
->sg
= kmalloc_array(n
, sizeof(*sg
), GFP_KERNEL
);
657 sg_init_table(ctx
->sg
, n
);
664 sg_set_buf(tmp
, ctx
->dd
->xmit_buf
, ctx
->bufcnt
);
669 while (sg
&& new_len
) {
670 int len
= sg
->length
- offset
;
673 offset
-= sg
->length
;
683 sg_set_page(tmp
, sg_page(sg
), len
, sg
->offset
);
693 set_bit(FLAGS_SGS_ALLOCED
, &ctx
->dd
->flags
);
700 static int omap_sham_copy_sgs(struct omap_sham_reqctx
*ctx
,
701 struct scatterlist
*sg
, int bs
, int new_len
)
707 len
= new_len
+ ctx
->bufcnt
;
709 pages
= get_order(ctx
->total
);
711 buf
= (void *)__get_free_pages(GFP_ATOMIC
, pages
);
713 pr_err("Couldn't allocate pages for unaligned cases.\n");
718 memcpy(buf
, ctx
->dd
->xmit_buf
, ctx
->bufcnt
);
720 scatterwalk_map_and_copy(buf
+ ctx
->bufcnt
, sg
, ctx
->offset
,
721 ctx
->total
- ctx
->bufcnt
, 0);
722 sg_init_table(ctx
->sgl
, 1);
723 sg_set_buf(ctx
->sgl
, buf
, len
);
725 set_bit(FLAGS_SGS_COPIED
, &ctx
->dd
->flags
);
733 static int omap_sham_align_sgs(struct scatterlist
*sg
,
734 int nbytes
, int bs
, bool final
,
735 struct omap_sham_reqctx
*rctx
)
740 struct scatterlist
*sg_tmp
= sg
;
742 int offset
= rctx
->offset
;
744 if (!sg
|| !sg
->length
|| !nbytes
)
753 new_len
= DIV_ROUND_UP(new_len
, bs
) * bs
;
755 new_len
= (new_len
- 1) / bs
* bs
;
757 if (nbytes
!= new_len
)
760 while (nbytes
> 0 && sg_tmp
) {
763 #ifdef CONFIG_ZONE_DMA
764 if (page_zonenum(sg_page(sg_tmp
)) != ZONE_DMA
) {
770 if (offset
< sg_tmp
->length
) {
771 if (!IS_ALIGNED(offset
+ sg_tmp
->offset
, 4)) {
776 if (!IS_ALIGNED(sg_tmp
->length
- offset
, bs
)) {
783 offset
-= sg_tmp
->length
;
789 nbytes
-= sg_tmp
->length
;
792 sg_tmp
= sg_next(sg_tmp
);
801 return omap_sham_copy_sgs(rctx
, sg
, bs
, new_len
);
803 return omap_sham_copy_sg_lists(rctx
, sg
, bs
, new_len
);
811 static int omap_sham_prepare_request(struct ahash_request
*req
, bool update
)
813 struct omap_sham_reqctx
*rctx
= ahash_request_ctx(req
);
817 bool final
= rctx
->flags
& BIT(FLAGS_FINUP
);
818 int xmit_len
, hash_later
;
820 bs
= get_block_size(rctx
);
823 nbytes
= req
->nbytes
;
827 rctx
->total
= nbytes
+ rctx
->bufcnt
;
832 if (nbytes
&& (!IS_ALIGNED(rctx
->bufcnt
, bs
))) {
833 int len
= bs
- rctx
->bufcnt
% bs
;
837 scatterwalk_map_and_copy(rctx
->buffer
+ rctx
->bufcnt
, req
->src
,
845 memcpy(rctx
->dd
->xmit_buf
, rctx
->buffer
, rctx
->bufcnt
);
847 ret
= omap_sham_align_sgs(req
->src
, nbytes
, bs
, final
, rctx
);
851 xmit_len
= rctx
->total
;
853 if (!IS_ALIGNED(xmit_len
, bs
)) {
855 xmit_len
= DIV_ROUND_UP(xmit_len
, bs
) * bs
;
857 xmit_len
= xmit_len
/ bs
* bs
;
862 hash_later
= rctx
->total
- xmit_len
;
866 if (rctx
->bufcnt
&& nbytes
) {
867 /* have data from previous operation and current */
868 sg_init_table(rctx
->sgl
, 2);
869 sg_set_buf(rctx
->sgl
, rctx
->dd
->xmit_buf
, rctx
->bufcnt
);
871 sg_chain(rctx
->sgl
, 2, req
->src
);
873 rctx
->sg
= rctx
->sgl
;
876 } else if (rctx
->bufcnt
) {
877 /* have buffered data only */
878 sg_init_table(rctx
->sgl
, 1);
879 sg_set_buf(rctx
->sgl
, rctx
->dd
->xmit_buf
, xmit_len
);
881 rctx
->sg
= rctx
->sgl
;
889 if (hash_later
> req
->nbytes
) {
890 memcpy(rctx
->buffer
, rctx
->buffer
+ xmit_len
,
891 hash_later
- req
->nbytes
);
892 offset
= hash_later
- req
->nbytes
;
896 scatterwalk_map_and_copy(rctx
->buffer
+ offset
,
898 offset
+ req
->nbytes
-
899 hash_later
, hash_later
, 0);
902 rctx
->bufcnt
= hash_later
;
908 rctx
->total
= xmit_len
;
913 static int omap_sham_update_dma_stop(struct omap_sham_dev
*dd
)
915 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
917 dma_unmap_sg(dd
->dev
, ctx
->sg
, ctx
->sg_len
, DMA_TO_DEVICE
);
919 clear_bit(FLAGS_DMA_ACTIVE
, &dd
->flags
);
924 static int omap_sham_init(struct ahash_request
*req
)
926 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
927 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
928 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
929 struct omap_sham_dev
*dd
= NULL
, *tmp
;
932 spin_lock_bh(&sham
.lock
);
934 list_for_each_entry(tmp
, &sham
.dev_list
, list
) {
942 spin_unlock_bh(&sham
.lock
);
948 dev_dbg(dd
->dev
, "init: digest size: %d\n",
949 crypto_ahash_digestsize(tfm
));
951 switch (crypto_ahash_digestsize(tfm
)) {
952 case MD5_DIGEST_SIZE
:
953 ctx
->flags
|= FLAGS_MODE_MD5
;
954 bs
= SHA1_BLOCK_SIZE
;
956 case SHA1_DIGEST_SIZE
:
957 ctx
->flags
|= FLAGS_MODE_SHA1
;
958 bs
= SHA1_BLOCK_SIZE
;
960 case SHA224_DIGEST_SIZE
:
961 ctx
->flags
|= FLAGS_MODE_SHA224
;
962 bs
= SHA224_BLOCK_SIZE
;
964 case SHA256_DIGEST_SIZE
:
965 ctx
->flags
|= FLAGS_MODE_SHA256
;
966 bs
= SHA256_BLOCK_SIZE
;
968 case SHA384_DIGEST_SIZE
:
969 ctx
->flags
|= FLAGS_MODE_SHA384
;
970 bs
= SHA384_BLOCK_SIZE
;
972 case SHA512_DIGEST_SIZE
:
973 ctx
->flags
|= FLAGS_MODE_SHA512
;
974 bs
= SHA512_BLOCK_SIZE
;
982 ctx
->buflen
= BUFLEN
;
984 if (tctx
->flags
& BIT(FLAGS_HMAC
)) {
985 if (!test_bit(FLAGS_AUTO_XOR
, &dd
->flags
)) {
986 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
988 memcpy(ctx
->buffer
, bctx
->ipad
, bs
);
992 ctx
->flags
|= BIT(FLAGS_HMAC
);
999 static int omap_sham_update_req(struct omap_sham_dev
*dd
)
1001 struct ahash_request
*req
= dd
->req
;
1002 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1004 bool final
= ctx
->flags
& BIT(FLAGS_FINUP
);
1006 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: %d, finup: %d\n",
1007 ctx
->total
, ctx
->digcnt
, (ctx
->flags
& BIT(FLAGS_FINUP
)) != 0);
1009 if (ctx
->total
< get_block_size(ctx
) ||
1010 ctx
->total
< dd
->fallback_sz
)
1011 ctx
->flags
|= BIT(FLAGS_CPU
);
1013 if (ctx
->flags
& BIT(FLAGS_CPU
))
1014 err
= omap_sham_xmit_cpu(dd
, ctx
->total
, final
);
1016 err
= omap_sham_xmit_dma(dd
, ctx
->total
, final
);
1018 /* wait for dma completion before can take more data */
1019 dev_dbg(dd
->dev
, "update: err: %d, digcnt: %d\n", err
, ctx
->digcnt
);
1024 static int omap_sham_final_req(struct omap_sham_dev
*dd
)
1026 struct ahash_request
*req
= dd
->req
;
1027 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1028 int err
= 0, use_dma
= 1;
1030 if ((ctx
->total
<= get_block_size(ctx
)) || dd
->polling_mode
)
1032 * faster to handle last block with cpu or
1033 * use cpu when dma is not present.
1038 err
= omap_sham_xmit_dma(dd
, ctx
->total
, 1);
1040 err
= omap_sham_xmit_cpu(dd
, ctx
->total
, 1);
1044 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
1049 static int omap_sham_finish_hmac(struct ahash_request
*req
)
1051 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
1052 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
1053 int bs
= crypto_shash_blocksize(bctx
->shash
);
1054 int ds
= crypto_shash_digestsize(bctx
->shash
);
1055 SHASH_DESC_ON_STACK(shash
, bctx
->shash
);
1057 shash
->tfm
= bctx
->shash
;
1058 shash
->flags
= 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
1060 return crypto_shash_init(shash
) ?:
1061 crypto_shash_update(shash
, bctx
->opad
, bs
) ?:
1062 crypto_shash_finup(shash
, req
->result
, ds
, req
->result
);
1065 static int omap_sham_finish(struct ahash_request
*req
)
1067 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1068 struct omap_sham_dev
*dd
= ctx
->dd
;
1072 omap_sham_copy_ready_hash(req
);
1073 if ((ctx
->flags
& BIT(FLAGS_HMAC
)) &&
1074 !test_bit(FLAGS_AUTO_XOR
, &dd
->flags
))
1075 err
= omap_sham_finish_hmac(req
);
1078 dev_dbg(dd
->dev
, "digcnt: %d, bufcnt: %d\n", ctx
->digcnt
, ctx
->bufcnt
);
1083 static void omap_sham_finish_req(struct ahash_request
*req
, int err
)
1085 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1086 struct omap_sham_dev
*dd
= ctx
->dd
;
1088 if (test_bit(FLAGS_SGS_COPIED
, &dd
->flags
))
1089 free_pages((unsigned long)sg_virt(ctx
->sg
),
1090 get_order(ctx
->sg
->length
+ ctx
->bufcnt
));
1092 if (test_bit(FLAGS_SGS_ALLOCED
, &dd
->flags
))
1097 dd
->flags
&= ~(BIT(FLAGS_SGS_ALLOCED
) | BIT(FLAGS_SGS_COPIED
));
1100 dd
->pdata
->copy_hash(req
, 1);
1101 if (test_bit(FLAGS_FINAL
, &dd
->flags
))
1102 err
= omap_sham_finish(req
);
1104 ctx
->flags
|= BIT(FLAGS_ERROR
);
1107 /* atomic operation is not needed here */
1108 dd
->flags
&= ~(BIT(FLAGS_BUSY
) | BIT(FLAGS_FINAL
) | BIT(FLAGS_CPU
) |
1109 BIT(FLAGS_DMA_READY
) | BIT(FLAGS_OUTPUT_READY
));
1111 pm_runtime_mark_last_busy(dd
->dev
);
1112 pm_runtime_put_autosuspend(dd
->dev
);
1114 if (req
->base
.complete
)
1115 req
->base
.complete(&req
->base
, err
);
1118 static int omap_sham_handle_queue(struct omap_sham_dev
*dd
,
1119 struct ahash_request
*req
)
1121 struct crypto_async_request
*async_req
, *backlog
;
1122 struct omap_sham_reqctx
*ctx
;
1123 unsigned long flags
;
1124 int err
= 0, ret
= 0;
1127 spin_lock_irqsave(&dd
->lock
, flags
);
1129 ret
= ahash_enqueue_request(&dd
->queue
, req
);
1130 if (test_bit(FLAGS_BUSY
, &dd
->flags
)) {
1131 spin_unlock_irqrestore(&dd
->lock
, flags
);
1134 backlog
= crypto_get_backlog(&dd
->queue
);
1135 async_req
= crypto_dequeue_request(&dd
->queue
);
1137 set_bit(FLAGS_BUSY
, &dd
->flags
);
1138 spin_unlock_irqrestore(&dd
->lock
, flags
);
1144 backlog
->complete(backlog
, -EINPROGRESS
);
1146 req
= ahash_request_cast(async_req
);
1148 ctx
= ahash_request_ctx(req
);
1150 err
= omap_sham_prepare_request(req
, ctx
->op
== OP_UPDATE
);
1151 if (err
|| !ctx
->total
)
1154 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
1155 ctx
->op
, req
->nbytes
);
1157 err
= omap_sham_hw_init(dd
);
1162 /* request has changed - restore hash */
1163 dd
->pdata
->copy_hash(req
, 0);
1165 if (ctx
->op
== OP_UPDATE
) {
1166 err
= omap_sham_update_req(dd
);
1167 if (err
!= -EINPROGRESS
&& (ctx
->flags
& BIT(FLAGS_FINUP
)))
1168 /* no final() after finup() */
1169 err
= omap_sham_final_req(dd
);
1170 } else if (ctx
->op
== OP_FINAL
) {
1171 err
= omap_sham_final_req(dd
);
1174 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
1176 if (err
!= -EINPROGRESS
) {
1177 /* done_task will not finish it, so do it here */
1178 omap_sham_finish_req(req
, err
);
1182 * Execute next request immediately if there is anything
1191 static int omap_sham_enqueue(struct ahash_request
*req
, unsigned int op
)
1193 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1194 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
1195 struct omap_sham_dev
*dd
= tctx
->dd
;
1199 return omap_sham_handle_queue(dd
, req
);
1202 static int omap_sham_update(struct ahash_request
*req
)
1204 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1205 struct omap_sham_dev
*dd
= ctx
->dd
;
1210 if (ctx
->bufcnt
+ req
->nbytes
<= ctx
->buflen
) {
1211 scatterwalk_map_and_copy(ctx
->buffer
+ ctx
->bufcnt
, req
->src
,
1213 ctx
->bufcnt
+= req
->nbytes
;
1217 if (dd
->polling_mode
)
1218 ctx
->flags
|= BIT(FLAGS_CPU
);
1220 return omap_sham_enqueue(req
, OP_UPDATE
);
1223 static int omap_sham_shash_digest(struct crypto_shash
*tfm
, u32 flags
,
1224 const u8
*data
, unsigned int len
, u8
*out
)
1226 SHASH_DESC_ON_STACK(shash
, tfm
);
1229 shash
->flags
= flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
1231 return crypto_shash_digest(shash
, data
, len
, out
);
1234 static int omap_sham_final_shash(struct ahash_request
*req
)
1236 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
1237 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1241 * If we are running HMAC on limited hardware support, skip
1242 * the ipad in the beginning of the buffer if we are going for
1243 * software fallback algorithm.
1245 if (test_bit(FLAGS_HMAC
, &ctx
->flags
) &&
1246 !test_bit(FLAGS_AUTO_XOR
, &ctx
->dd
->flags
))
1247 offset
= get_block_size(ctx
);
1249 return omap_sham_shash_digest(tctx
->fallback
, req
->base
.flags
,
1250 ctx
->buffer
+ offset
,
1251 ctx
->bufcnt
- offset
, req
->result
);
1254 static int omap_sham_final(struct ahash_request
*req
)
1256 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1258 ctx
->flags
|= BIT(FLAGS_FINUP
);
1260 if (ctx
->flags
& BIT(FLAGS_ERROR
))
1261 return 0; /* uncompleted hash is not needed */
1264 * OMAP HW accel works only with buffers >= 9.
1265 * HMAC is always >= 9 because ipad == block size.
1266 * If buffersize is less than fallback_sz, we use fallback
1267 * SW encoding, as using DMA + HW in this case doesn't provide
1270 if (!ctx
->digcnt
&& ctx
->bufcnt
< ctx
->dd
->fallback_sz
)
1271 return omap_sham_final_shash(req
);
1272 else if (ctx
->bufcnt
)
1273 return omap_sham_enqueue(req
, OP_FINAL
);
1275 /* copy ready hash (+ finalize hmac) */
1276 return omap_sham_finish(req
);
1279 static int omap_sham_finup(struct ahash_request
*req
)
1281 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1284 ctx
->flags
|= BIT(FLAGS_FINUP
);
1286 err1
= omap_sham_update(req
);
1287 if (err1
== -EINPROGRESS
|| err1
== -EBUSY
)
1290 * final() has to be always called to cleanup resources
1291 * even if udpate() failed, except EINPROGRESS
1293 err2
= omap_sham_final(req
);
1295 return err1
?: err2
;
1298 static int omap_sham_digest(struct ahash_request
*req
)
1300 return omap_sham_init(req
) ?: omap_sham_finup(req
);
1303 static int omap_sham_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1304 unsigned int keylen
)
1306 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
1307 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
1308 int bs
= crypto_shash_blocksize(bctx
->shash
);
1309 int ds
= crypto_shash_digestsize(bctx
->shash
);
1310 struct omap_sham_dev
*dd
= NULL
, *tmp
;
1313 spin_lock_bh(&sham
.lock
);
1315 list_for_each_entry(tmp
, &sham
.dev_list
, list
) {
1323 spin_unlock_bh(&sham
.lock
);
1325 err
= crypto_shash_setkey(tctx
->fallback
, key
, keylen
);
1330 err
= omap_sham_shash_digest(bctx
->shash
,
1331 crypto_shash_get_flags(bctx
->shash
),
1332 key
, keylen
, bctx
->ipad
);
1337 memcpy(bctx
->ipad
, key
, keylen
);
1340 memset(bctx
->ipad
+ keylen
, 0, bs
- keylen
);
1342 if (!test_bit(FLAGS_AUTO_XOR
, &dd
->flags
)) {
1343 memcpy(bctx
->opad
, bctx
->ipad
, bs
);
1345 for (i
= 0; i
< bs
; i
++) {
1346 bctx
->ipad
[i
] ^= HMAC_IPAD_VALUE
;
1347 bctx
->opad
[i
] ^= HMAC_OPAD_VALUE
;
1354 static int omap_sham_cra_init_alg(struct crypto_tfm
*tfm
, const char *alg_base
)
1356 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
1357 const char *alg_name
= crypto_tfm_alg_name(tfm
);
1359 /* Allocate a fallback and abort if it failed. */
1360 tctx
->fallback
= crypto_alloc_shash(alg_name
, 0,
1361 CRYPTO_ALG_NEED_FALLBACK
);
1362 if (IS_ERR(tctx
->fallback
)) {
1363 pr_err("omap-sham: fallback driver '%s' "
1364 "could not be loaded.\n", alg_name
);
1365 return PTR_ERR(tctx
->fallback
);
1368 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1369 sizeof(struct omap_sham_reqctx
) + BUFLEN
);
1372 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
1373 tctx
->flags
|= BIT(FLAGS_HMAC
);
1374 bctx
->shash
= crypto_alloc_shash(alg_base
, 0,
1375 CRYPTO_ALG_NEED_FALLBACK
);
1376 if (IS_ERR(bctx
->shash
)) {
1377 pr_err("omap-sham: base driver '%s' "
1378 "could not be loaded.\n", alg_base
);
1379 crypto_free_shash(tctx
->fallback
);
1380 return PTR_ERR(bctx
->shash
);
1388 static int omap_sham_cra_init(struct crypto_tfm
*tfm
)
1390 return omap_sham_cra_init_alg(tfm
, NULL
);
1393 static int omap_sham_cra_sha1_init(struct crypto_tfm
*tfm
)
1395 return omap_sham_cra_init_alg(tfm
, "sha1");
1398 static int omap_sham_cra_sha224_init(struct crypto_tfm
*tfm
)
1400 return omap_sham_cra_init_alg(tfm
, "sha224");
1403 static int omap_sham_cra_sha256_init(struct crypto_tfm
*tfm
)
1405 return omap_sham_cra_init_alg(tfm
, "sha256");
1408 static int omap_sham_cra_md5_init(struct crypto_tfm
*tfm
)
1410 return omap_sham_cra_init_alg(tfm
, "md5");
1413 static int omap_sham_cra_sha384_init(struct crypto_tfm
*tfm
)
1415 return omap_sham_cra_init_alg(tfm
, "sha384");
1418 static int omap_sham_cra_sha512_init(struct crypto_tfm
*tfm
)
1420 return omap_sham_cra_init_alg(tfm
, "sha512");
1423 static void omap_sham_cra_exit(struct crypto_tfm
*tfm
)
1425 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
1427 crypto_free_shash(tctx
->fallback
);
1428 tctx
->fallback
= NULL
;
1430 if (tctx
->flags
& BIT(FLAGS_HMAC
)) {
1431 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
1432 crypto_free_shash(bctx
->shash
);
1436 static int omap_sham_export(struct ahash_request
*req
, void *out
)
1438 struct omap_sham_reqctx
*rctx
= ahash_request_ctx(req
);
1440 memcpy(out
, rctx
, sizeof(*rctx
) + rctx
->bufcnt
);
1445 static int omap_sham_import(struct ahash_request
*req
, const void *in
)
1447 struct omap_sham_reqctx
*rctx
= ahash_request_ctx(req
);
1448 const struct omap_sham_reqctx
*ctx_in
= in
;
1450 memcpy(rctx
, in
, sizeof(*rctx
) + ctx_in
->bufcnt
);
1455 static struct ahash_alg algs_sha1_md5
[] = {
1457 .init
= omap_sham_init
,
1458 .update
= omap_sham_update
,
1459 .final
= omap_sham_final
,
1460 .finup
= omap_sham_finup
,
1461 .digest
= omap_sham_digest
,
1462 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
1465 .cra_driver_name
= "omap-sha1",
1466 .cra_priority
= 400,
1467 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
|
1469 CRYPTO_ALG_NEED_FALLBACK
,
1470 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1471 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1472 .cra_alignmask
= OMAP_ALIGN_MASK
,
1473 .cra_module
= THIS_MODULE
,
1474 .cra_init
= omap_sham_cra_init
,
1475 .cra_exit
= omap_sham_cra_exit
,
1479 .init
= omap_sham_init
,
1480 .update
= omap_sham_update
,
1481 .final
= omap_sham_final
,
1482 .finup
= omap_sham_finup
,
1483 .digest
= omap_sham_digest
,
1484 .halg
.digestsize
= MD5_DIGEST_SIZE
,
1487 .cra_driver_name
= "omap-md5",
1488 .cra_priority
= 400,
1489 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
|
1491 CRYPTO_ALG_NEED_FALLBACK
,
1492 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1493 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1494 .cra_alignmask
= OMAP_ALIGN_MASK
,
1495 .cra_module
= THIS_MODULE
,
1496 .cra_init
= omap_sham_cra_init
,
1497 .cra_exit
= omap_sham_cra_exit
,
1501 .init
= omap_sham_init
,
1502 .update
= omap_sham_update
,
1503 .final
= omap_sham_final
,
1504 .finup
= omap_sham_finup
,
1505 .digest
= omap_sham_digest
,
1506 .setkey
= omap_sham_setkey
,
1507 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
1509 .cra_name
= "hmac(sha1)",
1510 .cra_driver_name
= "omap-hmac-sha1",
1511 .cra_priority
= 400,
1512 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
|
1514 CRYPTO_ALG_NEED_FALLBACK
,
1515 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1516 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1517 sizeof(struct omap_sham_hmac_ctx
),
1518 .cra_alignmask
= OMAP_ALIGN_MASK
,
1519 .cra_module
= THIS_MODULE
,
1520 .cra_init
= omap_sham_cra_sha1_init
,
1521 .cra_exit
= omap_sham_cra_exit
,
1525 .init
= omap_sham_init
,
1526 .update
= omap_sham_update
,
1527 .final
= omap_sham_final
,
1528 .finup
= omap_sham_finup
,
1529 .digest
= omap_sham_digest
,
1530 .setkey
= omap_sham_setkey
,
1531 .halg
.digestsize
= MD5_DIGEST_SIZE
,
1533 .cra_name
= "hmac(md5)",
1534 .cra_driver_name
= "omap-hmac-md5",
1535 .cra_priority
= 400,
1536 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
|
1538 CRYPTO_ALG_NEED_FALLBACK
,
1539 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1540 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1541 sizeof(struct omap_sham_hmac_ctx
),
1542 .cra_alignmask
= OMAP_ALIGN_MASK
,
1543 .cra_module
= THIS_MODULE
,
1544 .cra_init
= omap_sham_cra_md5_init
,
1545 .cra_exit
= omap_sham_cra_exit
,
1550 /* OMAP4 has some algs in addition to what OMAP2 has */
1551 static struct ahash_alg algs_sha224_sha256
[] = {
1553 .init
= omap_sham_init
,
1554 .update
= omap_sham_update
,
1555 .final
= omap_sham_final
,
1556 .finup
= omap_sham_finup
,
1557 .digest
= omap_sham_digest
,
1558 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
1560 .cra_name
= "sha224",
1561 .cra_driver_name
= "omap-sha224",
1562 .cra_priority
= 400,
1563 .cra_flags
= CRYPTO_ALG_ASYNC
|
1564 CRYPTO_ALG_NEED_FALLBACK
,
1565 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1566 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1567 .cra_alignmask
= OMAP_ALIGN_MASK
,
1568 .cra_module
= THIS_MODULE
,
1569 .cra_init
= omap_sham_cra_init
,
1570 .cra_exit
= omap_sham_cra_exit
,
1574 .init
= omap_sham_init
,
1575 .update
= omap_sham_update
,
1576 .final
= omap_sham_final
,
1577 .finup
= omap_sham_finup
,
1578 .digest
= omap_sham_digest
,
1579 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
1581 .cra_name
= "sha256",
1582 .cra_driver_name
= "omap-sha256",
1583 .cra_priority
= 400,
1584 .cra_flags
= CRYPTO_ALG_ASYNC
|
1585 CRYPTO_ALG_NEED_FALLBACK
,
1586 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1587 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1588 .cra_alignmask
= OMAP_ALIGN_MASK
,
1589 .cra_module
= THIS_MODULE
,
1590 .cra_init
= omap_sham_cra_init
,
1591 .cra_exit
= omap_sham_cra_exit
,
1595 .init
= omap_sham_init
,
1596 .update
= omap_sham_update
,
1597 .final
= omap_sham_final
,
1598 .finup
= omap_sham_finup
,
1599 .digest
= omap_sham_digest
,
1600 .setkey
= omap_sham_setkey
,
1601 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
1603 .cra_name
= "hmac(sha224)",
1604 .cra_driver_name
= "omap-hmac-sha224",
1605 .cra_priority
= 400,
1606 .cra_flags
= CRYPTO_ALG_ASYNC
|
1607 CRYPTO_ALG_NEED_FALLBACK
,
1608 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1609 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1610 sizeof(struct omap_sham_hmac_ctx
),
1611 .cra_alignmask
= OMAP_ALIGN_MASK
,
1612 .cra_module
= THIS_MODULE
,
1613 .cra_init
= omap_sham_cra_sha224_init
,
1614 .cra_exit
= omap_sham_cra_exit
,
1618 .init
= omap_sham_init
,
1619 .update
= omap_sham_update
,
1620 .final
= omap_sham_final
,
1621 .finup
= omap_sham_finup
,
1622 .digest
= omap_sham_digest
,
1623 .setkey
= omap_sham_setkey
,
1624 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
1626 .cra_name
= "hmac(sha256)",
1627 .cra_driver_name
= "omap-hmac-sha256",
1628 .cra_priority
= 400,
1629 .cra_flags
= CRYPTO_ALG_ASYNC
|
1630 CRYPTO_ALG_NEED_FALLBACK
,
1631 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1632 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1633 sizeof(struct omap_sham_hmac_ctx
),
1634 .cra_alignmask
= OMAP_ALIGN_MASK
,
1635 .cra_module
= THIS_MODULE
,
1636 .cra_init
= omap_sham_cra_sha256_init
,
1637 .cra_exit
= omap_sham_cra_exit
,
1642 static struct ahash_alg algs_sha384_sha512
[] = {
1644 .init
= omap_sham_init
,
1645 .update
= omap_sham_update
,
1646 .final
= omap_sham_final
,
1647 .finup
= omap_sham_finup
,
1648 .digest
= omap_sham_digest
,
1649 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
1651 .cra_name
= "sha384",
1652 .cra_driver_name
= "omap-sha384",
1653 .cra_priority
= 400,
1654 .cra_flags
= CRYPTO_ALG_ASYNC
|
1655 CRYPTO_ALG_NEED_FALLBACK
,
1656 .cra_blocksize
= SHA384_BLOCK_SIZE
,
1657 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1658 .cra_alignmask
= OMAP_ALIGN_MASK
,
1659 .cra_module
= THIS_MODULE
,
1660 .cra_init
= omap_sham_cra_init
,
1661 .cra_exit
= omap_sham_cra_exit
,
1665 .init
= omap_sham_init
,
1666 .update
= omap_sham_update
,
1667 .final
= omap_sham_final
,
1668 .finup
= omap_sham_finup
,
1669 .digest
= omap_sham_digest
,
1670 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
1672 .cra_name
= "sha512",
1673 .cra_driver_name
= "omap-sha512",
1674 .cra_priority
= 400,
1675 .cra_flags
= CRYPTO_ALG_ASYNC
|
1676 CRYPTO_ALG_NEED_FALLBACK
,
1677 .cra_blocksize
= SHA512_BLOCK_SIZE
,
1678 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
1679 .cra_alignmask
= OMAP_ALIGN_MASK
,
1680 .cra_module
= THIS_MODULE
,
1681 .cra_init
= omap_sham_cra_init
,
1682 .cra_exit
= omap_sham_cra_exit
,
1686 .init
= omap_sham_init
,
1687 .update
= omap_sham_update
,
1688 .final
= omap_sham_final
,
1689 .finup
= omap_sham_finup
,
1690 .digest
= omap_sham_digest
,
1691 .setkey
= omap_sham_setkey
,
1692 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
1694 .cra_name
= "hmac(sha384)",
1695 .cra_driver_name
= "omap-hmac-sha384",
1696 .cra_priority
= 400,
1697 .cra_flags
= CRYPTO_ALG_ASYNC
|
1698 CRYPTO_ALG_NEED_FALLBACK
,
1699 .cra_blocksize
= SHA384_BLOCK_SIZE
,
1700 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1701 sizeof(struct omap_sham_hmac_ctx
),
1702 .cra_alignmask
= OMAP_ALIGN_MASK
,
1703 .cra_module
= THIS_MODULE
,
1704 .cra_init
= omap_sham_cra_sha384_init
,
1705 .cra_exit
= omap_sham_cra_exit
,
1709 .init
= omap_sham_init
,
1710 .update
= omap_sham_update
,
1711 .final
= omap_sham_final
,
1712 .finup
= omap_sham_finup
,
1713 .digest
= omap_sham_digest
,
1714 .setkey
= omap_sham_setkey
,
1715 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
1717 .cra_name
= "hmac(sha512)",
1718 .cra_driver_name
= "omap-hmac-sha512",
1719 .cra_priority
= 400,
1720 .cra_flags
= CRYPTO_ALG_ASYNC
|
1721 CRYPTO_ALG_NEED_FALLBACK
,
1722 .cra_blocksize
= SHA512_BLOCK_SIZE
,
1723 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1724 sizeof(struct omap_sham_hmac_ctx
),
1725 .cra_alignmask
= OMAP_ALIGN_MASK
,
1726 .cra_module
= THIS_MODULE
,
1727 .cra_init
= omap_sham_cra_sha512_init
,
1728 .cra_exit
= omap_sham_cra_exit
,
1733 static void omap_sham_done_task(unsigned long data
)
1735 struct omap_sham_dev
*dd
= (struct omap_sham_dev
*)data
;
1738 if (!test_bit(FLAGS_BUSY
, &dd
->flags
)) {
1739 omap_sham_handle_queue(dd
, NULL
);
1743 if (test_bit(FLAGS_CPU
, &dd
->flags
)) {
1744 if (test_and_clear_bit(FLAGS_OUTPUT_READY
, &dd
->flags
))
1746 } else if (test_bit(FLAGS_DMA_READY
, &dd
->flags
)) {
1747 if (test_and_clear_bit(FLAGS_DMA_ACTIVE
, &dd
->flags
)) {
1748 omap_sham_update_dma_stop(dd
);
1754 if (test_and_clear_bit(FLAGS_OUTPUT_READY
, &dd
->flags
)) {
1755 /* hash or semi-hash ready */
1756 clear_bit(FLAGS_DMA_READY
, &dd
->flags
);
1764 dev_dbg(dd
->dev
, "update done: err: %d\n", err
);
1765 /* finish curent request */
1766 omap_sham_finish_req(dd
->req
, err
);
1768 /* If we are not busy, process next req */
1769 if (!test_bit(FLAGS_BUSY
, &dd
->flags
))
1770 omap_sham_handle_queue(dd
, NULL
);
1773 static irqreturn_t
omap_sham_irq_common(struct omap_sham_dev
*dd
)
1775 if (!test_bit(FLAGS_BUSY
, &dd
->flags
)) {
1776 dev_warn(dd
->dev
, "Interrupt when no active requests.\n");
1778 set_bit(FLAGS_OUTPUT_READY
, &dd
->flags
);
1779 tasklet_schedule(&dd
->done_task
);
1785 static irqreturn_t
omap_sham_irq_omap2(int irq
, void *dev_id
)
1787 struct omap_sham_dev
*dd
= dev_id
;
1789 if (unlikely(test_bit(FLAGS_FINAL
, &dd
->flags
)))
1790 /* final -> allow device to go to power-saving mode */
1791 omap_sham_write_mask(dd
, SHA_REG_CTRL
, 0, SHA_REG_CTRL_LENGTH
);
1793 omap_sham_write_mask(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_OUTPUT_READY
,
1794 SHA_REG_CTRL_OUTPUT_READY
);
1795 omap_sham_read(dd
, SHA_REG_CTRL
);
1797 return omap_sham_irq_common(dd
);
1800 static irqreturn_t
omap_sham_irq_omap4(int irq
, void *dev_id
)
1802 struct omap_sham_dev
*dd
= dev_id
;
1804 omap_sham_write_mask(dd
, SHA_REG_MASK(dd
), 0, SHA_REG_MASK_IT_EN
);
1806 return omap_sham_irq_common(dd
);
1809 static struct omap_sham_algs_info omap_sham_algs_info_omap2
[] = {
1811 .algs_list
= algs_sha1_md5
,
1812 .size
= ARRAY_SIZE(algs_sha1_md5
),
1816 static const struct omap_sham_pdata omap_sham_pdata_omap2
= {
1817 .algs_info
= omap_sham_algs_info_omap2
,
1818 .algs_info_size
= ARRAY_SIZE(omap_sham_algs_info_omap2
),
1819 .flags
= BIT(FLAGS_BE32_SHA1
),
1820 .digest_size
= SHA1_DIGEST_SIZE
,
1821 .copy_hash
= omap_sham_copy_hash_omap2
,
1822 .write_ctrl
= omap_sham_write_ctrl_omap2
,
1823 .trigger
= omap_sham_trigger_omap2
,
1824 .poll_irq
= omap_sham_poll_irq_omap2
,
1825 .intr_hdlr
= omap_sham_irq_omap2
,
1826 .idigest_ofs
= 0x00,
1831 .sysstatus_ofs
= 0x64,
1839 static struct omap_sham_algs_info omap_sham_algs_info_omap4
[] = {
1841 .algs_list
= algs_sha1_md5
,
1842 .size
= ARRAY_SIZE(algs_sha1_md5
),
1845 .algs_list
= algs_sha224_sha256
,
1846 .size
= ARRAY_SIZE(algs_sha224_sha256
),
1850 static const struct omap_sham_pdata omap_sham_pdata_omap4
= {
1851 .algs_info
= omap_sham_algs_info_omap4
,
1852 .algs_info_size
= ARRAY_SIZE(omap_sham_algs_info_omap4
),
1853 .flags
= BIT(FLAGS_AUTO_XOR
),
1854 .digest_size
= SHA256_DIGEST_SIZE
,
1855 .copy_hash
= omap_sham_copy_hash_omap4
,
1856 .write_ctrl
= omap_sham_write_ctrl_omap4
,
1857 .trigger
= omap_sham_trigger_omap4
,
1858 .poll_irq
= omap_sham_poll_irq_omap4
,
1859 .intr_hdlr
= omap_sham_irq_omap4
,
1860 .idigest_ofs
= 0x020,
1863 .digcnt_ofs
= 0x040,
1866 .sysstatus_ofs
= 0x114,
1869 .major_mask
= 0x0700,
1871 .minor_mask
= 0x003f,
1875 static struct omap_sham_algs_info omap_sham_algs_info_omap5
[] = {
1877 .algs_list
= algs_sha1_md5
,
1878 .size
= ARRAY_SIZE(algs_sha1_md5
),
1881 .algs_list
= algs_sha224_sha256
,
1882 .size
= ARRAY_SIZE(algs_sha224_sha256
),
1885 .algs_list
= algs_sha384_sha512
,
1886 .size
= ARRAY_SIZE(algs_sha384_sha512
),
1890 static const struct omap_sham_pdata omap_sham_pdata_omap5
= {
1891 .algs_info
= omap_sham_algs_info_omap5
,
1892 .algs_info_size
= ARRAY_SIZE(omap_sham_algs_info_omap5
),
1893 .flags
= BIT(FLAGS_AUTO_XOR
),
1894 .digest_size
= SHA512_DIGEST_SIZE
,
1895 .copy_hash
= omap_sham_copy_hash_omap4
,
1896 .write_ctrl
= omap_sham_write_ctrl_omap4
,
1897 .trigger
= omap_sham_trigger_omap4
,
1898 .poll_irq
= omap_sham_poll_irq_omap4
,
1899 .intr_hdlr
= omap_sham_irq_omap4
,
1900 .idigest_ofs
= 0x240,
1901 .odigest_ofs
= 0x200,
1903 .digcnt_ofs
= 0x280,
1906 .sysstatus_ofs
= 0x114,
1908 .length_ofs
= 0x288,
1909 .major_mask
= 0x0700,
1911 .minor_mask
= 0x003f,
1915 static const struct of_device_id omap_sham_of_match
[] = {
1917 .compatible
= "ti,omap2-sham",
1918 .data
= &omap_sham_pdata_omap2
,
1921 .compatible
= "ti,omap3-sham",
1922 .data
= &omap_sham_pdata_omap2
,
1925 .compatible
= "ti,omap4-sham",
1926 .data
= &omap_sham_pdata_omap4
,
1929 .compatible
= "ti,omap5-sham",
1930 .data
= &omap_sham_pdata_omap5
,
1934 MODULE_DEVICE_TABLE(of
, omap_sham_of_match
);
1936 static int omap_sham_get_res_of(struct omap_sham_dev
*dd
,
1937 struct device
*dev
, struct resource
*res
)
1939 struct device_node
*node
= dev
->of_node
;
1942 dd
->pdata
= of_device_get_match_data(dev
);
1944 dev_err(dev
, "no compatible OF match\n");
1949 err
= of_address_to_resource(node
, 0, res
);
1951 dev_err(dev
, "can't translate OF node address\n");
1956 dd
->irq
= irq_of_parse_and_map(node
, 0);
1958 dev_err(dev
, "can't translate OF irq value\n");
1967 static const struct of_device_id omap_sham_of_match
[] = {
1971 static int omap_sham_get_res_of(struct omap_sham_dev
*dd
,
1972 struct device
*dev
, struct resource
*res
)
1978 static int omap_sham_get_res_pdev(struct omap_sham_dev
*dd
,
1979 struct platform_device
*pdev
, struct resource
*res
)
1981 struct device
*dev
= &pdev
->dev
;
1985 /* Get the base address */
1986 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1988 dev_err(dev
, "no MEM resource info\n");
1992 memcpy(res
, r
, sizeof(*res
));
1995 dd
->irq
= platform_get_irq(pdev
, 0);
1997 dev_err(dev
, "no IRQ resource info\n");
2002 /* Only OMAP2/3 can be non-DT */
2003 dd
->pdata
= &omap_sham_pdata_omap2
;
2009 static ssize_t
fallback_show(struct device
*dev
, struct device_attribute
*attr
,
2012 struct omap_sham_dev
*dd
= dev_get_drvdata(dev
);
2014 return sprintf(buf
, "%d\n", dd
->fallback_sz
);
2017 static ssize_t
fallback_store(struct device
*dev
, struct device_attribute
*attr
,
2018 const char *buf
, size_t size
)
2020 struct omap_sham_dev
*dd
= dev_get_drvdata(dev
);
2024 status
= kstrtol(buf
, 0, &value
);
2028 /* HW accelerator only works with buffers > 9 */
2030 dev_err(dev
, "minimum fallback size 9\n");
2034 dd
->fallback_sz
= value
;
2039 static ssize_t
queue_len_show(struct device
*dev
, struct device_attribute
*attr
,
2042 struct omap_sham_dev
*dd
= dev_get_drvdata(dev
);
2044 return sprintf(buf
, "%d\n", dd
->queue
.max_qlen
);
2047 static ssize_t
queue_len_store(struct device
*dev
,
2048 struct device_attribute
*attr
, const char *buf
,
2051 struct omap_sham_dev
*dd
= dev_get_drvdata(dev
);
2054 unsigned long flags
;
2056 status
= kstrtol(buf
, 0, &value
);
2064 * Changing the queue size in fly is safe, if size becomes smaller
2065 * than current size, it will just not accept new entries until
2066 * it has shrank enough.
2068 spin_lock_irqsave(&dd
->lock
, flags
);
2069 dd
->queue
.max_qlen
= value
;
2070 spin_unlock_irqrestore(&dd
->lock
, flags
);
2075 static DEVICE_ATTR_RW(queue_len
);
2076 static DEVICE_ATTR_RW(fallback
);
2078 static struct attribute
*omap_sham_attrs
[] = {
2079 &dev_attr_queue_len
.attr
,
2080 &dev_attr_fallback
.attr
,
2084 static struct attribute_group omap_sham_attr_group
= {
2085 .attrs
= omap_sham_attrs
,
2088 static int omap_sham_probe(struct platform_device
*pdev
)
2090 struct omap_sham_dev
*dd
;
2091 struct device
*dev
= &pdev
->dev
;
2092 struct resource res
;
2093 dma_cap_mask_t mask
;
2097 dd
= devm_kzalloc(dev
, sizeof(struct omap_sham_dev
), GFP_KERNEL
);
2099 dev_err(dev
, "unable to alloc data struct.\n");
2104 platform_set_drvdata(pdev
, dd
);
2106 INIT_LIST_HEAD(&dd
->list
);
2107 spin_lock_init(&dd
->lock
);
2108 tasklet_init(&dd
->done_task
, omap_sham_done_task
, (unsigned long)dd
);
2109 crypto_init_queue(&dd
->queue
, OMAP_SHAM_QUEUE_LENGTH
);
2111 err
= (dev
->of_node
) ? omap_sham_get_res_of(dd
, dev
, &res
) :
2112 omap_sham_get_res_pdev(dd
, pdev
, &res
);
2116 dd
->io_base
= devm_ioremap_resource(dev
, &res
);
2117 if (IS_ERR(dd
->io_base
)) {
2118 err
= PTR_ERR(dd
->io_base
);
2121 dd
->phys_base
= res
.start
;
2123 err
= devm_request_irq(dev
, dd
->irq
, dd
->pdata
->intr_hdlr
,
2124 IRQF_TRIGGER_NONE
, dev_name(dev
), dd
);
2126 dev_err(dev
, "unable to request irq %d, err = %d\n",
2132 dma_cap_set(DMA_SLAVE
, mask
);
2134 dd
->dma_lch
= dma_request_chan(dev
, "rx");
2135 if (IS_ERR(dd
->dma_lch
)) {
2136 err
= PTR_ERR(dd
->dma_lch
);
2137 if (err
== -EPROBE_DEFER
)
2140 dd
->polling_mode
= 1;
2141 dev_dbg(dev
, "using polling mode instead of dma\n");
2144 dd
->flags
|= dd
->pdata
->flags
;
2146 pm_runtime_use_autosuspend(dev
);
2147 pm_runtime_set_autosuspend_delay(dev
, DEFAULT_AUTOSUSPEND_DELAY
);
2149 dd
->fallback_sz
= OMAP_SHA_DMA_THRESHOLD
;
2151 pm_runtime_enable(dev
);
2152 pm_runtime_irq_safe(dev
);
2154 err
= pm_runtime_get_sync(dev
);
2156 dev_err(dev
, "failed to get sync: %d\n", err
);
2160 rev
= omap_sham_read(dd
, SHA_REG_REV(dd
));
2161 pm_runtime_put_sync(&pdev
->dev
);
2163 dev_info(dev
, "hw accel on OMAP rev %u.%u\n",
2164 (rev
& dd
->pdata
->major_mask
) >> dd
->pdata
->major_shift
,
2165 (rev
& dd
->pdata
->minor_mask
) >> dd
->pdata
->minor_shift
);
2167 spin_lock(&sham
.lock
);
2168 list_add_tail(&dd
->list
, &sham
.dev_list
);
2169 spin_unlock(&sham
.lock
);
2171 for (i
= 0; i
< dd
->pdata
->algs_info_size
; i
++) {
2172 for (j
= 0; j
< dd
->pdata
->algs_info
[i
].size
; j
++) {
2173 struct ahash_alg
*alg
;
2175 alg
= &dd
->pdata
->algs_info
[i
].algs_list
[j
];
2176 alg
->export
= omap_sham_export
;
2177 alg
->import
= omap_sham_import
;
2178 alg
->halg
.statesize
= sizeof(struct omap_sham_reqctx
) +
2180 err
= crypto_register_ahash(alg
);
2184 dd
->pdata
->algs_info
[i
].registered
++;
2188 err
= sysfs_create_group(&dev
->kobj
, &omap_sham_attr_group
);
2190 dev_err(dev
, "could not create sysfs device attrs\n");
2197 for (i
= dd
->pdata
->algs_info_size
- 1; i
>= 0; i
--)
2198 for (j
= dd
->pdata
->algs_info
[i
].registered
- 1; j
>= 0; j
--)
2199 crypto_unregister_ahash(
2200 &dd
->pdata
->algs_info
[i
].algs_list
[j
]);
2202 pm_runtime_disable(dev
);
2203 if (!dd
->polling_mode
)
2204 dma_release_channel(dd
->dma_lch
);
2206 dev_err(dev
, "initialization failed.\n");
2211 static int omap_sham_remove(struct platform_device
*pdev
)
2213 struct omap_sham_dev
*dd
;
2216 dd
= platform_get_drvdata(pdev
);
2219 spin_lock(&sham
.lock
);
2220 list_del(&dd
->list
);
2221 spin_unlock(&sham
.lock
);
2222 for (i
= dd
->pdata
->algs_info_size
- 1; i
>= 0; i
--)
2223 for (j
= dd
->pdata
->algs_info
[i
].registered
- 1; j
>= 0; j
--)
2224 crypto_unregister_ahash(
2225 &dd
->pdata
->algs_info
[i
].algs_list
[j
]);
2226 tasklet_kill(&dd
->done_task
);
2227 pm_runtime_disable(&pdev
->dev
);
2229 if (!dd
->polling_mode
)
2230 dma_release_channel(dd
->dma_lch
);
2235 #ifdef CONFIG_PM_SLEEP
2236 static int omap_sham_suspend(struct device
*dev
)
2238 pm_runtime_put_sync(dev
);
2242 static int omap_sham_resume(struct device
*dev
)
2244 int err
= pm_runtime_get_sync(dev
);
2246 dev_err(dev
, "failed to get sync: %d\n", err
);
2253 static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops
, omap_sham_suspend
, omap_sham_resume
);
2255 static struct platform_driver omap_sham_driver
= {
2256 .probe
= omap_sham_probe
,
2257 .remove
= omap_sham_remove
,
2259 .name
= "omap-sham",
2260 .pm
= &omap_sham_pm_ops
,
2261 .of_match_table
= omap_sham_of_match
,
2265 module_platform_driver(omap_sham_driver
);
2267 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2268 MODULE_LICENSE("GPL v2");
2269 MODULE_AUTHOR("Dmitry Kasatkin");
2270 MODULE_ALIAS("platform:omap-sham");