drm/fsl-dcu: Remove unneeded NULL check
[linux/fpc-iii.git] / drivers / crypto / omap-sham.c
blobd0b16e5e4ee56acb3e5a4b02191198798f7ff918
1 /*
2 * Cryptographic API.
4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Some ideas are from old omap-sha1-md5.c driver.
17 #define pr_fmt(fmt) "%s: " fmt, __func__
19 #include <linux/err.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/irq.h>
27 #include <linux/io.h>
28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/of.h>
34 #include <linux/of_device.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/delay.h>
38 #include <linux/crypto.h>
39 #include <linux/cryptohash.h>
40 #include <crypto/scatterwalk.h>
41 #include <crypto/algapi.h>
42 #include <crypto/sha.h>
43 #include <crypto/hash.h>
44 #include <crypto/internal/hash.h>
46 #define MD5_DIGEST_SIZE 16
48 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
49 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
50 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
52 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
54 #define SHA_REG_CTRL 0x18
55 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
56 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
57 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
58 #define SHA_REG_CTRL_ALGO (1 << 2)
59 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
60 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
62 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
64 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
65 #define SHA_REG_MASK_DMA_EN (1 << 3)
66 #define SHA_REG_MASK_IT_EN (1 << 2)
67 #define SHA_REG_MASK_SOFTRESET (1 << 1)
68 #define SHA_REG_AUTOIDLE (1 << 0)
70 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
73 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
74 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
75 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
76 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
77 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
79 #define SHA_REG_MODE_ALGO_MASK (7 << 0)
80 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
81 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
82 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
83 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
84 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
85 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
87 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
89 #define SHA_REG_IRQSTATUS 0x118
90 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
91 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
92 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
93 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
95 #define SHA_REG_IRQENA 0x11C
96 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
97 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
98 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
99 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
101 #define DEFAULT_TIMEOUT_INTERVAL HZ
103 #define DEFAULT_AUTOSUSPEND_DELAY 1000
105 /* mostly device flags */
106 #define FLAGS_BUSY 0
107 #define FLAGS_FINAL 1
108 #define FLAGS_DMA_ACTIVE 2
109 #define FLAGS_OUTPUT_READY 3
110 #define FLAGS_INIT 4
111 #define FLAGS_CPU 5
112 #define FLAGS_DMA_READY 6
113 #define FLAGS_AUTO_XOR 7
114 #define FLAGS_BE32_SHA1 8
115 #define FLAGS_SGS_COPIED 9
116 #define FLAGS_SGS_ALLOCED 10
117 /* context flags */
118 #define FLAGS_FINUP 16
120 #define FLAGS_MODE_SHIFT 18
121 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
122 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
125 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
126 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
127 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
129 #define FLAGS_HMAC 21
130 #define FLAGS_ERROR 22
132 #define OP_UPDATE 1
133 #define OP_FINAL 2
135 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
136 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
138 #define BUFLEN SHA512_BLOCK_SIZE
139 #define OMAP_SHA_DMA_THRESHOLD 256
141 struct omap_sham_dev;
143 struct omap_sham_reqctx {
144 struct omap_sham_dev *dd;
145 unsigned long flags;
146 unsigned long op;
148 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
149 size_t digcnt;
150 size_t bufcnt;
151 size_t buflen;
153 /* walk state */
154 struct scatterlist *sg;
155 struct scatterlist sgl[2];
156 int offset; /* offset in current sg */
157 int sg_len;
158 unsigned int total; /* total request */
160 u8 buffer[0] OMAP_ALIGNED;
163 struct omap_sham_hmac_ctx {
164 struct crypto_shash *shash;
165 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
166 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
169 struct omap_sham_ctx {
170 struct omap_sham_dev *dd;
172 unsigned long flags;
174 /* fallback stuff */
175 struct crypto_shash *fallback;
177 struct omap_sham_hmac_ctx base[0];
180 #define OMAP_SHAM_QUEUE_LENGTH 10
182 struct omap_sham_algs_info {
183 struct ahash_alg *algs_list;
184 unsigned int size;
185 unsigned int registered;
188 struct omap_sham_pdata {
189 struct omap_sham_algs_info *algs_info;
190 unsigned int algs_info_size;
191 unsigned long flags;
192 int digest_size;
194 void (*copy_hash)(struct ahash_request *req, int out);
195 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
196 int final, int dma);
197 void (*trigger)(struct omap_sham_dev *dd, size_t length);
198 int (*poll_irq)(struct omap_sham_dev *dd);
199 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
201 u32 odigest_ofs;
202 u32 idigest_ofs;
203 u32 din_ofs;
204 u32 digcnt_ofs;
205 u32 rev_ofs;
206 u32 mask_ofs;
207 u32 sysstatus_ofs;
208 u32 mode_ofs;
209 u32 length_ofs;
211 u32 major_mask;
212 u32 major_shift;
213 u32 minor_mask;
214 u32 minor_shift;
217 struct omap_sham_dev {
218 struct list_head list;
219 unsigned long phys_base;
220 struct device *dev;
221 void __iomem *io_base;
222 int irq;
223 spinlock_t lock;
224 int err;
225 struct dma_chan *dma_lch;
226 struct tasklet_struct done_task;
227 u8 polling_mode;
228 u8 xmit_buf[BUFLEN];
230 unsigned long flags;
231 struct crypto_queue queue;
232 struct ahash_request *req;
234 const struct omap_sham_pdata *pdata;
237 struct omap_sham_drv {
238 struct list_head dev_list;
239 spinlock_t lock;
240 unsigned long flags;
243 static struct omap_sham_drv sham = {
244 .dev_list = LIST_HEAD_INIT(sham.dev_list),
245 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
248 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
250 return __raw_readl(dd->io_base + offset);
253 static inline void omap_sham_write(struct omap_sham_dev *dd,
254 u32 offset, u32 value)
256 __raw_writel(value, dd->io_base + offset);
259 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
260 u32 value, u32 mask)
262 u32 val;
264 val = omap_sham_read(dd, address);
265 val &= ~mask;
266 val |= value;
267 omap_sham_write(dd, address, val);
270 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
272 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
274 while (!(omap_sham_read(dd, offset) & bit)) {
275 if (time_is_before_jiffies(timeout))
276 return -ETIMEDOUT;
279 return 0;
282 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
284 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
285 struct omap_sham_dev *dd = ctx->dd;
286 u32 *hash = (u32 *)ctx->digest;
287 int i;
289 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
290 if (out)
291 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
292 else
293 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
297 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
299 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
300 struct omap_sham_dev *dd = ctx->dd;
301 int i;
303 if (ctx->flags & BIT(FLAGS_HMAC)) {
304 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
305 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
306 struct omap_sham_hmac_ctx *bctx = tctx->base;
307 u32 *opad = (u32 *)bctx->opad;
309 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
310 if (out)
311 opad[i] = omap_sham_read(dd,
312 SHA_REG_ODIGEST(dd, i));
313 else
314 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
315 opad[i]);
319 omap_sham_copy_hash_omap2(req, out);
322 static void omap_sham_copy_ready_hash(struct ahash_request *req)
324 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
325 u32 *in = (u32 *)ctx->digest;
326 u32 *hash = (u32 *)req->result;
327 int i, d, big_endian = 0;
329 if (!hash)
330 return;
332 switch (ctx->flags & FLAGS_MODE_MASK) {
333 case FLAGS_MODE_MD5:
334 d = MD5_DIGEST_SIZE / sizeof(u32);
335 break;
336 case FLAGS_MODE_SHA1:
337 /* OMAP2 SHA1 is big endian */
338 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
339 big_endian = 1;
340 d = SHA1_DIGEST_SIZE / sizeof(u32);
341 break;
342 case FLAGS_MODE_SHA224:
343 d = SHA224_DIGEST_SIZE / sizeof(u32);
344 break;
345 case FLAGS_MODE_SHA256:
346 d = SHA256_DIGEST_SIZE / sizeof(u32);
347 break;
348 case FLAGS_MODE_SHA384:
349 d = SHA384_DIGEST_SIZE / sizeof(u32);
350 break;
351 case FLAGS_MODE_SHA512:
352 d = SHA512_DIGEST_SIZE / sizeof(u32);
353 break;
354 default:
355 d = 0;
358 if (big_endian)
359 for (i = 0; i < d; i++)
360 hash[i] = be32_to_cpu(in[i]);
361 else
362 for (i = 0; i < d; i++)
363 hash[i] = le32_to_cpu(in[i]);
366 static int omap_sham_hw_init(struct omap_sham_dev *dd)
368 int err;
370 err = pm_runtime_get_sync(dd->dev);
371 if (err < 0) {
372 dev_err(dd->dev, "failed to get sync: %d\n", err);
373 return err;
376 if (!test_bit(FLAGS_INIT, &dd->flags)) {
377 set_bit(FLAGS_INIT, &dd->flags);
378 dd->err = 0;
381 return 0;
384 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
385 int final, int dma)
387 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
388 u32 val = length << 5, mask;
390 if (likely(ctx->digcnt))
391 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
393 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
394 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
395 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
397 * Setting ALGO_CONST only for the first iteration
398 * and CLOSE_HASH only for the last one.
400 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
401 val |= SHA_REG_CTRL_ALGO;
402 if (!ctx->digcnt)
403 val |= SHA_REG_CTRL_ALGO_CONST;
404 if (final)
405 val |= SHA_REG_CTRL_CLOSE_HASH;
407 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
408 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
410 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
413 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
417 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
419 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
422 static int get_block_size(struct omap_sham_reqctx *ctx)
424 int d;
426 switch (ctx->flags & FLAGS_MODE_MASK) {
427 case FLAGS_MODE_MD5:
428 case FLAGS_MODE_SHA1:
429 d = SHA1_BLOCK_SIZE;
430 break;
431 case FLAGS_MODE_SHA224:
432 case FLAGS_MODE_SHA256:
433 d = SHA256_BLOCK_SIZE;
434 break;
435 case FLAGS_MODE_SHA384:
436 case FLAGS_MODE_SHA512:
437 d = SHA512_BLOCK_SIZE;
438 break;
439 default:
440 d = 0;
443 return d;
446 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
447 u32 *value, int count)
449 for (; count--; value++, offset += 4)
450 omap_sham_write(dd, offset, *value);
453 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
454 int final, int dma)
456 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
457 u32 val, mask;
460 * Setting ALGO_CONST only for the first iteration and
461 * CLOSE_HASH only for the last one. Note that flags mode bits
462 * correspond to algorithm encoding in mode register.
464 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
465 if (!ctx->digcnt) {
466 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
467 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
468 struct omap_sham_hmac_ctx *bctx = tctx->base;
469 int bs, nr_dr;
471 val |= SHA_REG_MODE_ALGO_CONSTANT;
473 if (ctx->flags & BIT(FLAGS_HMAC)) {
474 bs = get_block_size(ctx);
475 nr_dr = bs / (2 * sizeof(u32));
476 val |= SHA_REG_MODE_HMAC_KEY_PROC;
477 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
478 (u32 *)bctx->ipad, nr_dr);
479 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
480 (u32 *)bctx->ipad + nr_dr, nr_dr);
481 ctx->digcnt += bs;
485 if (final) {
486 val |= SHA_REG_MODE_CLOSE_HASH;
488 if (ctx->flags & BIT(FLAGS_HMAC))
489 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
492 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
493 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
494 SHA_REG_MODE_HMAC_KEY_PROC;
496 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
497 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
498 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
499 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
500 SHA_REG_MASK_IT_EN |
501 (dma ? SHA_REG_MASK_DMA_EN : 0),
502 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
505 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
507 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
510 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
512 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
513 SHA_REG_IRQSTATUS_INPUT_RDY);
516 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
517 int final)
519 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
520 int count, len32, bs32, offset = 0;
521 const u32 *buffer;
522 int mlen;
523 struct sg_mapping_iter mi;
525 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
526 ctx->digcnt, length, final);
528 dd->pdata->write_ctrl(dd, length, final, 0);
529 dd->pdata->trigger(dd, length);
531 /* should be non-zero before next lines to disable clocks later */
532 ctx->digcnt += length;
533 ctx->total -= length;
535 if (final)
536 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
538 set_bit(FLAGS_CPU, &dd->flags);
540 len32 = DIV_ROUND_UP(length, sizeof(u32));
541 bs32 = get_block_size(ctx) / sizeof(u32);
543 sg_miter_start(&mi, ctx->sg, ctx->sg_len,
544 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
546 mlen = 0;
548 while (len32) {
549 if (dd->pdata->poll_irq(dd))
550 return -ETIMEDOUT;
552 for (count = 0; count < min(len32, bs32); count++, offset++) {
553 if (!mlen) {
554 sg_miter_next(&mi);
555 mlen = mi.length;
556 if (!mlen) {
557 pr_err("sg miter failure.\n");
558 return -EINVAL;
560 offset = 0;
561 buffer = mi.addr;
563 omap_sham_write(dd, SHA_REG_DIN(dd, count),
564 buffer[offset]);
565 mlen -= 4;
567 len32 -= min(len32, bs32);
570 sg_miter_stop(&mi);
572 return -EINPROGRESS;
575 static void omap_sham_dma_callback(void *param)
577 struct omap_sham_dev *dd = param;
579 set_bit(FLAGS_DMA_READY, &dd->flags);
580 tasklet_schedule(&dd->done_task);
583 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
584 int final)
586 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
587 struct dma_async_tx_descriptor *tx;
588 struct dma_slave_config cfg;
589 int ret;
591 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
592 ctx->digcnt, length, final);
594 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
595 dev_err(dd->dev, "dma_map_sg error\n");
596 return -EINVAL;
599 memset(&cfg, 0, sizeof(cfg));
601 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
602 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
603 cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
605 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
606 if (ret) {
607 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
608 return ret;
611 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
612 DMA_MEM_TO_DEV,
613 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
615 if (!tx) {
616 dev_err(dd->dev, "prep_slave_sg failed\n");
617 return -EINVAL;
620 tx->callback = omap_sham_dma_callback;
621 tx->callback_param = dd;
623 dd->pdata->write_ctrl(dd, length, final, 1);
625 ctx->digcnt += length;
626 ctx->total -= length;
628 if (final)
629 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
631 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
633 dmaengine_submit(tx);
634 dma_async_issue_pending(dd->dma_lch);
636 dd->pdata->trigger(dd, length);
638 return -EINPROGRESS;
641 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
642 struct scatterlist *sg, int bs, int new_len)
644 int n = sg_nents(sg);
645 struct scatterlist *tmp;
646 int offset = ctx->offset;
648 if (ctx->bufcnt)
649 n++;
651 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
652 if (!ctx->sg)
653 return -ENOMEM;
655 sg_init_table(ctx->sg, n);
657 tmp = ctx->sg;
659 ctx->sg_len = 0;
661 if (ctx->bufcnt) {
662 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
663 tmp = sg_next(tmp);
664 ctx->sg_len++;
667 while (sg && new_len) {
668 int len = sg->length - offset;
670 if (offset) {
671 offset -= sg->length;
672 if (offset < 0)
673 offset = 0;
676 if (new_len < len)
677 len = new_len;
679 if (len > 0) {
680 new_len -= len;
681 sg_set_page(tmp, sg_page(sg), len, sg->offset);
682 if (new_len <= 0)
683 sg_mark_end(tmp);
684 tmp = sg_next(tmp);
685 ctx->sg_len++;
688 sg = sg_next(sg);
691 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
693 ctx->bufcnt = 0;
695 return 0;
698 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
699 struct scatterlist *sg, int bs, int new_len)
701 int pages;
702 void *buf;
703 int len;
705 len = new_len + ctx->bufcnt;
707 pages = get_order(ctx->total);
709 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
710 if (!buf) {
711 pr_err("Couldn't allocate pages for unaligned cases.\n");
712 return -ENOMEM;
715 if (ctx->bufcnt)
716 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
718 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
719 ctx->total - ctx->bufcnt, 0);
720 sg_init_table(ctx->sgl, 1);
721 sg_set_buf(ctx->sgl, buf, len);
722 ctx->sg = ctx->sgl;
723 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
724 ctx->sg_len = 1;
725 ctx->bufcnt = 0;
726 ctx->offset = 0;
728 return 0;
731 static int omap_sham_align_sgs(struct scatterlist *sg,
732 int nbytes, int bs, bool final,
733 struct omap_sham_reqctx *rctx)
735 int n = 0;
736 bool aligned = true;
737 bool list_ok = true;
738 struct scatterlist *sg_tmp = sg;
739 int new_len;
740 int offset = rctx->offset;
742 if (!sg || !sg->length || !nbytes)
743 return 0;
745 new_len = nbytes;
747 if (offset)
748 list_ok = false;
750 if (final)
751 new_len = DIV_ROUND_UP(new_len, bs) * bs;
752 else
753 new_len = new_len / bs * bs;
755 while (nbytes > 0 && sg_tmp) {
756 n++;
758 if (offset < sg_tmp->length) {
759 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
760 aligned = false;
761 break;
764 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
765 aligned = false;
766 break;
770 if (offset) {
771 offset -= sg_tmp->length;
772 if (offset < 0) {
773 nbytes += offset;
774 offset = 0;
776 } else {
777 nbytes -= sg_tmp->length;
780 sg_tmp = sg_next(sg_tmp);
782 if (nbytes < 0) {
783 list_ok = false;
784 break;
788 if (!aligned)
789 return omap_sham_copy_sgs(rctx, sg, bs, new_len);
790 else if (!list_ok)
791 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
793 rctx->sg_len = n;
794 rctx->sg = sg;
796 return 0;
799 static int omap_sham_prepare_request(struct ahash_request *req, bool update)
801 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
802 int bs;
803 int ret;
804 int nbytes;
805 bool final = rctx->flags & BIT(FLAGS_FINUP);
806 int xmit_len, hash_later;
808 if (!req)
809 return 0;
811 bs = get_block_size(rctx);
813 if (update)
814 nbytes = req->nbytes;
815 else
816 nbytes = 0;
818 rctx->total = nbytes + rctx->bufcnt;
820 if (!rctx->total)
821 return 0;
823 if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
824 int len = bs - rctx->bufcnt % bs;
826 if (len > nbytes)
827 len = nbytes;
828 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
829 0, len, 0);
830 rctx->bufcnt += len;
831 nbytes -= len;
832 rctx->offset = len;
835 if (rctx->bufcnt)
836 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
838 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
839 if (ret)
840 return ret;
842 xmit_len = rctx->total;
844 if (!IS_ALIGNED(xmit_len, bs)) {
845 if (final)
846 xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
847 else
848 xmit_len = xmit_len / bs * bs;
851 hash_later = rctx->total - xmit_len;
852 if (hash_later < 0)
853 hash_later = 0;
855 if (rctx->bufcnt && nbytes) {
856 /* have data from previous operation and current */
857 sg_init_table(rctx->sgl, 2);
858 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
860 sg_chain(rctx->sgl, 2, req->src);
862 rctx->sg = rctx->sgl;
864 rctx->sg_len++;
865 } else if (rctx->bufcnt) {
866 /* have buffered data only */
867 sg_init_table(rctx->sgl, 1);
868 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
870 rctx->sg = rctx->sgl;
872 rctx->sg_len = 1;
875 if (hash_later) {
876 if (req->nbytes) {
877 scatterwalk_map_and_copy(rctx->buffer, req->src,
878 req->nbytes - hash_later,
879 hash_later, 0);
880 } else {
881 memcpy(rctx->buffer, rctx->buffer + xmit_len,
882 hash_later);
884 rctx->bufcnt = hash_later;
885 } else {
886 rctx->bufcnt = 0;
889 if (!final)
890 rctx->total = xmit_len;
892 return 0;
895 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
897 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
899 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
901 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
903 return 0;
906 static int omap_sham_init(struct ahash_request *req)
908 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
909 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
910 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
911 struct omap_sham_dev *dd = NULL, *tmp;
912 int bs = 0;
914 spin_lock_bh(&sham.lock);
915 if (!tctx->dd) {
916 list_for_each_entry(tmp, &sham.dev_list, list) {
917 dd = tmp;
918 break;
920 tctx->dd = dd;
921 } else {
922 dd = tctx->dd;
924 spin_unlock_bh(&sham.lock);
926 ctx->dd = dd;
928 ctx->flags = 0;
930 dev_dbg(dd->dev, "init: digest size: %d\n",
931 crypto_ahash_digestsize(tfm));
933 switch (crypto_ahash_digestsize(tfm)) {
934 case MD5_DIGEST_SIZE:
935 ctx->flags |= FLAGS_MODE_MD5;
936 bs = SHA1_BLOCK_SIZE;
937 break;
938 case SHA1_DIGEST_SIZE:
939 ctx->flags |= FLAGS_MODE_SHA1;
940 bs = SHA1_BLOCK_SIZE;
941 break;
942 case SHA224_DIGEST_SIZE:
943 ctx->flags |= FLAGS_MODE_SHA224;
944 bs = SHA224_BLOCK_SIZE;
945 break;
946 case SHA256_DIGEST_SIZE:
947 ctx->flags |= FLAGS_MODE_SHA256;
948 bs = SHA256_BLOCK_SIZE;
949 break;
950 case SHA384_DIGEST_SIZE:
951 ctx->flags |= FLAGS_MODE_SHA384;
952 bs = SHA384_BLOCK_SIZE;
953 break;
954 case SHA512_DIGEST_SIZE:
955 ctx->flags |= FLAGS_MODE_SHA512;
956 bs = SHA512_BLOCK_SIZE;
957 break;
960 ctx->bufcnt = 0;
961 ctx->digcnt = 0;
962 ctx->total = 0;
963 ctx->offset = 0;
964 ctx->buflen = BUFLEN;
966 if (tctx->flags & BIT(FLAGS_HMAC)) {
967 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
968 struct omap_sham_hmac_ctx *bctx = tctx->base;
970 memcpy(ctx->buffer, bctx->ipad, bs);
971 ctx->bufcnt = bs;
974 ctx->flags |= BIT(FLAGS_HMAC);
977 return 0;
981 static int omap_sham_update_req(struct omap_sham_dev *dd)
983 struct ahash_request *req = dd->req;
984 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
985 int err;
986 bool final = ctx->flags & BIT(FLAGS_FINUP);
988 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
989 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
991 if (ctx->total < get_block_size(ctx) ||
992 ctx->total < OMAP_SHA_DMA_THRESHOLD)
993 ctx->flags |= BIT(FLAGS_CPU);
995 if (ctx->flags & BIT(FLAGS_CPU))
996 err = omap_sham_xmit_cpu(dd, ctx->total, final);
997 else
998 err = omap_sham_xmit_dma(dd, ctx->total, final);
1000 /* wait for dma completion before can take more data */
1001 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
1003 return err;
1006 static int omap_sham_final_req(struct omap_sham_dev *dd)
1008 struct ahash_request *req = dd->req;
1009 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1010 int err = 0, use_dma = 1;
1012 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1014 * faster to handle last block with cpu or
1015 * use cpu when dma is not present.
1017 use_dma = 0;
1019 if (use_dma)
1020 err = omap_sham_xmit_dma(dd, ctx->total, 1);
1021 else
1022 err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1024 ctx->bufcnt = 0;
1026 dev_dbg(dd->dev, "final_req: err: %d\n", err);
1028 return err;
1031 static int omap_sham_finish_hmac(struct ahash_request *req)
1033 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1034 struct omap_sham_hmac_ctx *bctx = tctx->base;
1035 int bs = crypto_shash_blocksize(bctx->shash);
1036 int ds = crypto_shash_digestsize(bctx->shash);
1037 SHASH_DESC_ON_STACK(shash, bctx->shash);
1039 shash->tfm = bctx->shash;
1040 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
1042 return crypto_shash_init(shash) ?:
1043 crypto_shash_update(shash, bctx->opad, bs) ?:
1044 crypto_shash_finup(shash, req->result, ds, req->result);
1047 static int omap_sham_finish(struct ahash_request *req)
1049 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1050 struct omap_sham_dev *dd = ctx->dd;
1051 int err = 0;
1053 if (ctx->digcnt) {
1054 omap_sham_copy_ready_hash(req);
1055 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1056 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
1057 err = omap_sham_finish_hmac(req);
1060 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
1062 return err;
1065 static void omap_sham_finish_req(struct ahash_request *req, int err)
1067 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1068 struct omap_sham_dev *dd = ctx->dd;
1070 if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1071 free_pages((unsigned long)sg_virt(ctx->sg),
1072 get_order(ctx->sg->length));
1074 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1075 kfree(ctx->sg);
1077 ctx->sg = NULL;
1079 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
1081 if (!err) {
1082 dd->pdata->copy_hash(req, 1);
1083 if (test_bit(FLAGS_FINAL, &dd->flags))
1084 err = omap_sham_finish(req);
1085 } else {
1086 ctx->flags |= BIT(FLAGS_ERROR);
1089 /* atomic operation is not needed here */
1090 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1091 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1093 pm_runtime_mark_last_busy(dd->dev);
1094 pm_runtime_put_autosuspend(dd->dev);
1096 if (req->base.complete)
1097 req->base.complete(&req->base, err);
1100 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1101 struct ahash_request *req)
1103 struct crypto_async_request *async_req, *backlog;
1104 struct omap_sham_reqctx *ctx;
1105 unsigned long flags;
1106 int err = 0, ret = 0;
1108 retry:
1109 spin_lock_irqsave(&dd->lock, flags);
1110 if (req)
1111 ret = ahash_enqueue_request(&dd->queue, req);
1112 if (test_bit(FLAGS_BUSY, &dd->flags)) {
1113 spin_unlock_irqrestore(&dd->lock, flags);
1114 return ret;
1116 backlog = crypto_get_backlog(&dd->queue);
1117 async_req = crypto_dequeue_request(&dd->queue);
1118 if (async_req)
1119 set_bit(FLAGS_BUSY, &dd->flags);
1120 spin_unlock_irqrestore(&dd->lock, flags);
1122 if (!async_req)
1123 return ret;
1125 if (backlog)
1126 backlog->complete(backlog, -EINPROGRESS);
1128 req = ahash_request_cast(async_req);
1129 dd->req = req;
1130 ctx = ahash_request_ctx(req);
1132 err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1133 if (err)
1134 goto err1;
1136 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1137 ctx->op, req->nbytes);
1139 err = omap_sham_hw_init(dd);
1140 if (err)
1141 goto err1;
1143 if (ctx->digcnt)
1144 /* request has changed - restore hash */
1145 dd->pdata->copy_hash(req, 0);
1147 if (ctx->op == OP_UPDATE) {
1148 err = omap_sham_update_req(dd);
1149 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
1150 /* no final() after finup() */
1151 err = omap_sham_final_req(dd);
1152 } else if (ctx->op == OP_FINAL) {
1153 err = omap_sham_final_req(dd);
1155 err1:
1156 dev_dbg(dd->dev, "exit, err: %d\n", err);
1158 if (err != -EINPROGRESS) {
1159 /* done_task will not finish it, so do it here */
1160 omap_sham_finish_req(req, err);
1161 req = NULL;
1164 * Execute next request immediately if there is anything
1165 * in queue.
1167 goto retry;
1170 return ret;
1173 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1175 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1176 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1177 struct omap_sham_dev *dd = tctx->dd;
1179 ctx->op = op;
1181 return omap_sham_handle_queue(dd, req);
1184 static int omap_sham_update(struct ahash_request *req)
1186 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1187 struct omap_sham_dev *dd = ctx->dd;
1189 if (!req->nbytes)
1190 return 0;
1192 if (ctx->total + req->nbytes < ctx->buflen) {
1193 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1194 0, req->nbytes, 0);
1195 ctx->bufcnt += req->nbytes;
1196 ctx->total += req->nbytes;
1197 return 0;
1200 if (dd->polling_mode)
1201 ctx->flags |= BIT(FLAGS_CPU);
1203 return omap_sham_enqueue(req, OP_UPDATE);
1206 static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
1207 const u8 *data, unsigned int len, u8 *out)
1209 SHASH_DESC_ON_STACK(shash, tfm);
1211 shash->tfm = tfm;
1212 shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1214 return crypto_shash_digest(shash, data, len, out);
1217 static int omap_sham_final_shash(struct ahash_request *req)
1219 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1220 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1221 int offset = 0;
1224 * If we are running HMAC on limited hardware support, skip
1225 * the ipad in the beginning of the buffer if we are going for
1226 * software fallback algorithm.
1228 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1229 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1230 offset = get_block_size(ctx);
1232 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
1233 ctx->buffer + offset,
1234 ctx->bufcnt - offset, req->result);
1237 static int omap_sham_final(struct ahash_request *req)
1239 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1241 ctx->flags |= BIT(FLAGS_FINUP);
1243 if (ctx->flags & BIT(FLAGS_ERROR))
1244 return 0; /* uncompleted hash is not needed */
1247 * OMAP HW accel works only with buffers >= 9.
1248 * HMAC is always >= 9 because ipad == block size.
1249 * If buffersize is less than DMA_THRESHOLD, we use fallback
1250 * SW encoding, as using DMA + HW in this case doesn't provide
1251 * any benefit.
1253 if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD)
1254 return omap_sham_final_shash(req);
1255 else if (ctx->bufcnt)
1256 return omap_sham_enqueue(req, OP_FINAL);
1258 /* copy ready hash (+ finalize hmac) */
1259 return omap_sham_finish(req);
1262 static int omap_sham_finup(struct ahash_request *req)
1264 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1265 int err1, err2;
1267 ctx->flags |= BIT(FLAGS_FINUP);
1269 err1 = omap_sham_update(req);
1270 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1271 return err1;
1273 * final() has to be always called to cleanup resources
1274 * even if udpate() failed, except EINPROGRESS
1276 err2 = omap_sham_final(req);
1278 return err1 ?: err2;
1281 static int omap_sham_digest(struct ahash_request *req)
1283 return omap_sham_init(req) ?: omap_sham_finup(req);
1286 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1287 unsigned int keylen)
1289 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1290 struct omap_sham_hmac_ctx *bctx = tctx->base;
1291 int bs = crypto_shash_blocksize(bctx->shash);
1292 int ds = crypto_shash_digestsize(bctx->shash);
1293 struct omap_sham_dev *dd = NULL, *tmp;
1294 int err, i;
1296 spin_lock_bh(&sham.lock);
1297 if (!tctx->dd) {
1298 list_for_each_entry(tmp, &sham.dev_list, list) {
1299 dd = tmp;
1300 break;
1302 tctx->dd = dd;
1303 } else {
1304 dd = tctx->dd;
1306 spin_unlock_bh(&sham.lock);
1308 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1309 if (err)
1310 return err;
1312 if (keylen > bs) {
1313 err = omap_sham_shash_digest(bctx->shash,
1314 crypto_shash_get_flags(bctx->shash),
1315 key, keylen, bctx->ipad);
1316 if (err)
1317 return err;
1318 keylen = ds;
1319 } else {
1320 memcpy(bctx->ipad, key, keylen);
1323 memset(bctx->ipad + keylen, 0, bs - keylen);
1325 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1326 memcpy(bctx->opad, bctx->ipad, bs);
1328 for (i = 0; i < bs; i++) {
1329 bctx->ipad[i] ^= 0x36;
1330 bctx->opad[i] ^= 0x5c;
1334 return err;
1337 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1339 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1340 const char *alg_name = crypto_tfm_alg_name(tfm);
1342 /* Allocate a fallback and abort if it failed. */
1343 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1344 CRYPTO_ALG_NEED_FALLBACK);
1345 if (IS_ERR(tctx->fallback)) {
1346 pr_err("omap-sham: fallback driver '%s' "
1347 "could not be loaded.\n", alg_name);
1348 return PTR_ERR(tctx->fallback);
1351 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1352 sizeof(struct omap_sham_reqctx) + BUFLEN);
1354 if (alg_base) {
1355 struct omap_sham_hmac_ctx *bctx = tctx->base;
1356 tctx->flags |= BIT(FLAGS_HMAC);
1357 bctx->shash = crypto_alloc_shash(alg_base, 0,
1358 CRYPTO_ALG_NEED_FALLBACK);
1359 if (IS_ERR(bctx->shash)) {
1360 pr_err("omap-sham: base driver '%s' "
1361 "could not be loaded.\n", alg_base);
1362 crypto_free_shash(tctx->fallback);
1363 return PTR_ERR(bctx->shash);
1368 return 0;
1371 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1373 return omap_sham_cra_init_alg(tfm, NULL);
1376 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1378 return omap_sham_cra_init_alg(tfm, "sha1");
1381 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1383 return omap_sham_cra_init_alg(tfm, "sha224");
1386 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1388 return omap_sham_cra_init_alg(tfm, "sha256");
1391 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1393 return omap_sham_cra_init_alg(tfm, "md5");
1396 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1398 return omap_sham_cra_init_alg(tfm, "sha384");
1401 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1403 return omap_sham_cra_init_alg(tfm, "sha512");
1406 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1408 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1410 crypto_free_shash(tctx->fallback);
1411 tctx->fallback = NULL;
1413 if (tctx->flags & BIT(FLAGS_HMAC)) {
1414 struct omap_sham_hmac_ctx *bctx = tctx->base;
1415 crypto_free_shash(bctx->shash);
1419 static int omap_sham_export(struct ahash_request *req, void *out)
1421 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1423 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1425 return 0;
1428 static int omap_sham_import(struct ahash_request *req, const void *in)
1430 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1431 const struct omap_sham_reqctx *ctx_in = in;
1433 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1435 return 0;
1438 static struct ahash_alg algs_sha1_md5[] = {
1440 .init = omap_sham_init,
1441 .update = omap_sham_update,
1442 .final = omap_sham_final,
1443 .finup = omap_sham_finup,
1444 .digest = omap_sham_digest,
1445 .halg.digestsize = SHA1_DIGEST_SIZE,
1446 .halg.base = {
1447 .cra_name = "sha1",
1448 .cra_driver_name = "omap-sha1",
1449 .cra_priority = 400,
1450 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1451 CRYPTO_ALG_KERN_DRIVER_ONLY |
1452 CRYPTO_ALG_ASYNC |
1453 CRYPTO_ALG_NEED_FALLBACK,
1454 .cra_blocksize = SHA1_BLOCK_SIZE,
1455 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1456 .cra_alignmask = OMAP_ALIGN_MASK,
1457 .cra_module = THIS_MODULE,
1458 .cra_init = omap_sham_cra_init,
1459 .cra_exit = omap_sham_cra_exit,
1463 .init = omap_sham_init,
1464 .update = omap_sham_update,
1465 .final = omap_sham_final,
1466 .finup = omap_sham_finup,
1467 .digest = omap_sham_digest,
1468 .halg.digestsize = MD5_DIGEST_SIZE,
1469 .halg.base = {
1470 .cra_name = "md5",
1471 .cra_driver_name = "omap-md5",
1472 .cra_priority = 400,
1473 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1474 CRYPTO_ALG_KERN_DRIVER_ONLY |
1475 CRYPTO_ALG_ASYNC |
1476 CRYPTO_ALG_NEED_FALLBACK,
1477 .cra_blocksize = SHA1_BLOCK_SIZE,
1478 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1479 .cra_alignmask = OMAP_ALIGN_MASK,
1480 .cra_module = THIS_MODULE,
1481 .cra_init = omap_sham_cra_init,
1482 .cra_exit = omap_sham_cra_exit,
1486 .init = omap_sham_init,
1487 .update = omap_sham_update,
1488 .final = omap_sham_final,
1489 .finup = omap_sham_finup,
1490 .digest = omap_sham_digest,
1491 .setkey = omap_sham_setkey,
1492 .halg.digestsize = SHA1_DIGEST_SIZE,
1493 .halg.base = {
1494 .cra_name = "hmac(sha1)",
1495 .cra_driver_name = "omap-hmac-sha1",
1496 .cra_priority = 400,
1497 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1498 CRYPTO_ALG_KERN_DRIVER_ONLY |
1499 CRYPTO_ALG_ASYNC |
1500 CRYPTO_ALG_NEED_FALLBACK,
1501 .cra_blocksize = SHA1_BLOCK_SIZE,
1502 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1503 sizeof(struct omap_sham_hmac_ctx),
1504 .cra_alignmask = OMAP_ALIGN_MASK,
1505 .cra_module = THIS_MODULE,
1506 .cra_init = omap_sham_cra_sha1_init,
1507 .cra_exit = omap_sham_cra_exit,
1511 .init = omap_sham_init,
1512 .update = omap_sham_update,
1513 .final = omap_sham_final,
1514 .finup = omap_sham_finup,
1515 .digest = omap_sham_digest,
1516 .setkey = omap_sham_setkey,
1517 .halg.digestsize = MD5_DIGEST_SIZE,
1518 .halg.base = {
1519 .cra_name = "hmac(md5)",
1520 .cra_driver_name = "omap-hmac-md5",
1521 .cra_priority = 400,
1522 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1523 CRYPTO_ALG_KERN_DRIVER_ONLY |
1524 CRYPTO_ALG_ASYNC |
1525 CRYPTO_ALG_NEED_FALLBACK,
1526 .cra_blocksize = SHA1_BLOCK_SIZE,
1527 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1528 sizeof(struct omap_sham_hmac_ctx),
1529 .cra_alignmask = OMAP_ALIGN_MASK,
1530 .cra_module = THIS_MODULE,
1531 .cra_init = omap_sham_cra_md5_init,
1532 .cra_exit = omap_sham_cra_exit,
1537 /* OMAP4 has some algs in addition to what OMAP2 has */
1538 static struct ahash_alg algs_sha224_sha256[] = {
1540 .init = omap_sham_init,
1541 .update = omap_sham_update,
1542 .final = omap_sham_final,
1543 .finup = omap_sham_finup,
1544 .digest = omap_sham_digest,
1545 .halg.digestsize = SHA224_DIGEST_SIZE,
1546 .halg.base = {
1547 .cra_name = "sha224",
1548 .cra_driver_name = "omap-sha224",
1549 .cra_priority = 400,
1550 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1551 CRYPTO_ALG_ASYNC |
1552 CRYPTO_ALG_NEED_FALLBACK,
1553 .cra_blocksize = SHA224_BLOCK_SIZE,
1554 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1555 .cra_alignmask = OMAP_ALIGN_MASK,
1556 .cra_module = THIS_MODULE,
1557 .cra_init = omap_sham_cra_init,
1558 .cra_exit = omap_sham_cra_exit,
1562 .init = omap_sham_init,
1563 .update = omap_sham_update,
1564 .final = omap_sham_final,
1565 .finup = omap_sham_finup,
1566 .digest = omap_sham_digest,
1567 .halg.digestsize = SHA256_DIGEST_SIZE,
1568 .halg.base = {
1569 .cra_name = "sha256",
1570 .cra_driver_name = "omap-sha256",
1571 .cra_priority = 400,
1572 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1573 CRYPTO_ALG_ASYNC |
1574 CRYPTO_ALG_NEED_FALLBACK,
1575 .cra_blocksize = SHA256_BLOCK_SIZE,
1576 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1577 .cra_alignmask = OMAP_ALIGN_MASK,
1578 .cra_module = THIS_MODULE,
1579 .cra_init = omap_sham_cra_init,
1580 .cra_exit = omap_sham_cra_exit,
1584 .init = omap_sham_init,
1585 .update = omap_sham_update,
1586 .final = omap_sham_final,
1587 .finup = omap_sham_finup,
1588 .digest = omap_sham_digest,
1589 .setkey = omap_sham_setkey,
1590 .halg.digestsize = SHA224_DIGEST_SIZE,
1591 .halg.base = {
1592 .cra_name = "hmac(sha224)",
1593 .cra_driver_name = "omap-hmac-sha224",
1594 .cra_priority = 400,
1595 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1596 CRYPTO_ALG_ASYNC |
1597 CRYPTO_ALG_NEED_FALLBACK,
1598 .cra_blocksize = SHA224_BLOCK_SIZE,
1599 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1600 sizeof(struct omap_sham_hmac_ctx),
1601 .cra_alignmask = OMAP_ALIGN_MASK,
1602 .cra_module = THIS_MODULE,
1603 .cra_init = omap_sham_cra_sha224_init,
1604 .cra_exit = omap_sham_cra_exit,
1608 .init = omap_sham_init,
1609 .update = omap_sham_update,
1610 .final = omap_sham_final,
1611 .finup = omap_sham_finup,
1612 .digest = omap_sham_digest,
1613 .setkey = omap_sham_setkey,
1614 .halg.digestsize = SHA256_DIGEST_SIZE,
1615 .halg.base = {
1616 .cra_name = "hmac(sha256)",
1617 .cra_driver_name = "omap-hmac-sha256",
1618 .cra_priority = 400,
1619 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1620 CRYPTO_ALG_ASYNC |
1621 CRYPTO_ALG_NEED_FALLBACK,
1622 .cra_blocksize = SHA256_BLOCK_SIZE,
1623 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1624 sizeof(struct omap_sham_hmac_ctx),
1625 .cra_alignmask = OMAP_ALIGN_MASK,
1626 .cra_module = THIS_MODULE,
1627 .cra_init = omap_sham_cra_sha256_init,
1628 .cra_exit = omap_sham_cra_exit,
1633 static struct ahash_alg algs_sha384_sha512[] = {
1635 .init = omap_sham_init,
1636 .update = omap_sham_update,
1637 .final = omap_sham_final,
1638 .finup = omap_sham_finup,
1639 .digest = omap_sham_digest,
1640 .halg.digestsize = SHA384_DIGEST_SIZE,
1641 .halg.base = {
1642 .cra_name = "sha384",
1643 .cra_driver_name = "omap-sha384",
1644 .cra_priority = 400,
1645 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1646 CRYPTO_ALG_ASYNC |
1647 CRYPTO_ALG_NEED_FALLBACK,
1648 .cra_blocksize = SHA384_BLOCK_SIZE,
1649 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1650 .cra_alignmask = OMAP_ALIGN_MASK,
1651 .cra_module = THIS_MODULE,
1652 .cra_init = omap_sham_cra_init,
1653 .cra_exit = omap_sham_cra_exit,
1657 .init = omap_sham_init,
1658 .update = omap_sham_update,
1659 .final = omap_sham_final,
1660 .finup = omap_sham_finup,
1661 .digest = omap_sham_digest,
1662 .halg.digestsize = SHA512_DIGEST_SIZE,
1663 .halg.base = {
1664 .cra_name = "sha512",
1665 .cra_driver_name = "omap-sha512",
1666 .cra_priority = 400,
1667 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1668 CRYPTO_ALG_ASYNC |
1669 CRYPTO_ALG_NEED_FALLBACK,
1670 .cra_blocksize = SHA512_BLOCK_SIZE,
1671 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1672 .cra_alignmask = OMAP_ALIGN_MASK,
1673 .cra_module = THIS_MODULE,
1674 .cra_init = omap_sham_cra_init,
1675 .cra_exit = omap_sham_cra_exit,
1679 .init = omap_sham_init,
1680 .update = omap_sham_update,
1681 .final = omap_sham_final,
1682 .finup = omap_sham_finup,
1683 .digest = omap_sham_digest,
1684 .setkey = omap_sham_setkey,
1685 .halg.digestsize = SHA384_DIGEST_SIZE,
1686 .halg.base = {
1687 .cra_name = "hmac(sha384)",
1688 .cra_driver_name = "omap-hmac-sha384",
1689 .cra_priority = 400,
1690 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1691 CRYPTO_ALG_ASYNC |
1692 CRYPTO_ALG_NEED_FALLBACK,
1693 .cra_blocksize = SHA384_BLOCK_SIZE,
1694 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1695 sizeof(struct omap_sham_hmac_ctx),
1696 .cra_alignmask = OMAP_ALIGN_MASK,
1697 .cra_module = THIS_MODULE,
1698 .cra_init = omap_sham_cra_sha384_init,
1699 .cra_exit = omap_sham_cra_exit,
1703 .init = omap_sham_init,
1704 .update = omap_sham_update,
1705 .final = omap_sham_final,
1706 .finup = omap_sham_finup,
1707 .digest = omap_sham_digest,
1708 .setkey = omap_sham_setkey,
1709 .halg.digestsize = SHA512_DIGEST_SIZE,
1710 .halg.base = {
1711 .cra_name = "hmac(sha512)",
1712 .cra_driver_name = "omap-hmac-sha512",
1713 .cra_priority = 400,
1714 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1715 CRYPTO_ALG_ASYNC |
1716 CRYPTO_ALG_NEED_FALLBACK,
1717 .cra_blocksize = SHA512_BLOCK_SIZE,
1718 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1719 sizeof(struct omap_sham_hmac_ctx),
1720 .cra_alignmask = OMAP_ALIGN_MASK,
1721 .cra_module = THIS_MODULE,
1722 .cra_init = omap_sham_cra_sha512_init,
1723 .cra_exit = omap_sham_cra_exit,
1728 static void omap_sham_done_task(unsigned long data)
1730 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1731 int err = 0;
1733 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1734 omap_sham_handle_queue(dd, NULL);
1735 return;
1738 if (test_bit(FLAGS_CPU, &dd->flags)) {
1739 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1740 goto finish;
1741 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1742 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1743 omap_sham_update_dma_stop(dd);
1744 if (dd->err) {
1745 err = dd->err;
1746 goto finish;
1749 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1750 /* hash or semi-hash ready */
1751 clear_bit(FLAGS_DMA_READY, &dd->flags);
1752 goto finish;
1756 return;
1758 finish:
1759 dev_dbg(dd->dev, "update done: err: %d\n", err);
1760 /* finish curent request */
1761 omap_sham_finish_req(dd->req, err);
1763 /* If we are not busy, process next req */
1764 if (!test_bit(FLAGS_BUSY, &dd->flags))
1765 omap_sham_handle_queue(dd, NULL);
1768 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1770 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1771 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1772 } else {
1773 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1774 tasklet_schedule(&dd->done_task);
1777 return IRQ_HANDLED;
1780 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1782 struct omap_sham_dev *dd = dev_id;
1784 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1785 /* final -> allow device to go to power-saving mode */
1786 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1788 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1789 SHA_REG_CTRL_OUTPUT_READY);
1790 omap_sham_read(dd, SHA_REG_CTRL);
1792 return omap_sham_irq_common(dd);
1795 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1797 struct omap_sham_dev *dd = dev_id;
1799 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1801 return omap_sham_irq_common(dd);
1804 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1806 .algs_list = algs_sha1_md5,
1807 .size = ARRAY_SIZE(algs_sha1_md5),
1811 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1812 .algs_info = omap_sham_algs_info_omap2,
1813 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1814 .flags = BIT(FLAGS_BE32_SHA1),
1815 .digest_size = SHA1_DIGEST_SIZE,
1816 .copy_hash = omap_sham_copy_hash_omap2,
1817 .write_ctrl = omap_sham_write_ctrl_omap2,
1818 .trigger = omap_sham_trigger_omap2,
1819 .poll_irq = omap_sham_poll_irq_omap2,
1820 .intr_hdlr = omap_sham_irq_omap2,
1821 .idigest_ofs = 0x00,
1822 .din_ofs = 0x1c,
1823 .digcnt_ofs = 0x14,
1824 .rev_ofs = 0x5c,
1825 .mask_ofs = 0x60,
1826 .sysstatus_ofs = 0x64,
1827 .major_mask = 0xf0,
1828 .major_shift = 4,
1829 .minor_mask = 0x0f,
1830 .minor_shift = 0,
1833 #ifdef CONFIG_OF
1834 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1836 .algs_list = algs_sha1_md5,
1837 .size = ARRAY_SIZE(algs_sha1_md5),
1840 .algs_list = algs_sha224_sha256,
1841 .size = ARRAY_SIZE(algs_sha224_sha256),
1845 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1846 .algs_info = omap_sham_algs_info_omap4,
1847 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1848 .flags = BIT(FLAGS_AUTO_XOR),
1849 .digest_size = SHA256_DIGEST_SIZE,
1850 .copy_hash = omap_sham_copy_hash_omap4,
1851 .write_ctrl = omap_sham_write_ctrl_omap4,
1852 .trigger = omap_sham_trigger_omap4,
1853 .poll_irq = omap_sham_poll_irq_omap4,
1854 .intr_hdlr = omap_sham_irq_omap4,
1855 .idigest_ofs = 0x020,
1856 .odigest_ofs = 0x0,
1857 .din_ofs = 0x080,
1858 .digcnt_ofs = 0x040,
1859 .rev_ofs = 0x100,
1860 .mask_ofs = 0x110,
1861 .sysstatus_ofs = 0x114,
1862 .mode_ofs = 0x44,
1863 .length_ofs = 0x48,
1864 .major_mask = 0x0700,
1865 .major_shift = 8,
1866 .minor_mask = 0x003f,
1867 .minor_shift = 0,
1870 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1872 .algs_list = algs_sha1_md5,
1873 .size = ARRAY_SIZE(algs_sha1_md5),
1876 .algs_list = algs_sha224_sha256,
1877 .size = ARRAY_SIZE(algs_sha224_sha256),
1880 .algs_list = algs_sha384_sha512,
1881 .size = ARRAY_SIZE(algs_sha384_sha512),
1885 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1886 .algs_info = omap_sham_algs_info_omap5,
1887 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1888 .flags = BIT(FLAGS_AUTO_XOR),
1889 .digest_size = SHA512_DIGEST_SIZE,
1890 .copy_hash = omap_sham_copy_hash_omap4,
1891 .write_ctrl = omap_sham_write_ctrl_omap4,
1892 .trigger = omap_sham_trigger_omap4,
1893 .poll_irq = omap_sham_poll_irq_omap4,
1894 .intr_hdlr = omap_sham_irq_omap4,
1895 .idigest_ofs = 0x240,
1896 .odigest_ofs = 0x200,
1897 .din_ofs = 0x080,
1898 .digcnt_ofs = 0x280,
1899 .rev_ofs = 0x100,
1900 .mask_ofs = 0x110,
1901 .sysstatus_ofs = 0x114,
1902 .mode_ofs = 0x284,
1903 .length_ofs = 0x288,
1904 .major_mask = 0x0700,
1905 .major_shift = 8,
1906 .minor_mask = 0x003f,
1907 .minor_shift = 0,
1910 static const struct of_device_id omap_sham_of_match[] = {
1912 .compatible = "ti,omap2-sham",
1913 .data = &omap_sham_pdata_omap2,
1916 .compatible = "ti,omap3-sham",
1917 .data = &omap_sham_pdata_omap2,
1920 .compatible = "ti,omap4-sham",
1921 .data = &omap_sham_pdata_omap4,
1924 .compatible = "ti,omap5-sham",
1925 .data = &omap_sham_pdata_omap5,
1929 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1931 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1932 struct device *dev, struct resource *res)
1934 struct device_node *node = dev->of_node;
1935 const struct of_device_id *match;
1936 int err = 0;
1938 match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
1939 if (!match) {
1940 dev_err(dev, "no compatible OF match\n");
1941 err = -EINVAL;
1942 goto err;
1945 err = of_address_to_resource(node, 0, res);
1946 if (err < 0) {
1947 dev_err(dev, "can't translate OF node address\n");
1948 err = -EINVAL;
1949 goto err;
1952 dd->irq = irq_of_parse_and_map(node, 0);
1953 if (!dd->irq) {
1954 dev_err(dev, "can't translate OF irq value\n");
1955 err = -EINVAL;
1956 goto err;
1959 dd->pdata = match->data;
1961 err:
1962 return err;
1964 #else
1965 static const struct of_device_id omap_sham_of_match[] = {
1969 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1970 struct device *dev, struct resource *res)
1972 return -EINVAL;
1974 #endif
1976 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1977 struct platform_device *pdev, struct resource *res)
1979 struct device *dev = &pdev->dev;
1980 struct resource *r;
1981 int err = 0;
1983 /* Get the base address */
1984 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1985 if (!r) {
1986 dev_err(dev, "no MEM resource info\n");
1987 err = -ENODEV;
1988 goto err;
1990 memcpy(res, r, sizeof(*res));
1992 /* Get the IRQ */
1993 dd->irq = platform_get_irq(pdev, 0);
1994 if (dd->irq < 0) {
1995 dev_err(dev, "no IRQ resource info\n");
1996 err = dd->irq;
1997 goto err;
2000 /* Only OMAP2/3 can be non-DT */
2001 dd->pdata = &omap_sham_pdata_omap2;
2003 err:
2004 return err;
2007 static int omap_sham_probe(struct platform_device *pdev)
2009 struct omap_sham_dev *dd;
2010 struct device *dev = &pdev->dev;
2011 struct resource res;
2012 dma_cap_mask_t mask;
2013 int err, i, j;
2014 u32 rev;
2016 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2017 if (dd == NULL) {
2018 dev_err(dev, "unable to alloc data struct.\n");
2019 err = -ENOMEM;
2020 goto data_err;
2022 dd->dev = dev;
2023 platform_set_drvdata(pdev, dd);
2025 INIT_LIST_HEAD(&dd->list);
2026 spin_lock_init(&dd->lock);
2027 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2028 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2030 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2031 omap_sham_get_res_pdev(dd, pdev, &res);
2032 if (err)
2033 goto data_err;
2035 dd->io_base = devm_ioremap_resource(dev, &res);
2036 if (IS_ERR(dd->io_base)) {
2037 err = PTR_ERR(dd->io_base);
2038 goto data_err;
2040 dd->phys_base = res.start;
2042 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2043 IRQF_TRIGGER_NONE, dev_name(dev), dd);
2044 if (err) {
2045 dev_err(dev, "unable to request irq %d, err = %d\n",
2046 dd->irq, err);
2047 goto data_err;
2050 dma_cap_zero(mask);
2051 dma_cap_set(DMA_SLAVE, mask);
2053 dd->dma_lch = dma_request_chan(dev, "rx");
2054 if (IS_ERR(dd->dma_lch)) {
2055 err = PTR_ERR(dd->dma_lch);
2056 if (err == -EPROBE_DEFER)
2057 goto data_err;
2059 dd->polling_mode = 1;
2060 dev_dbg(dev, "using polling mode instead of dma\n");
2063 dd->flags |= dd->pdata->flags;
2065 pm_runtime_use_autosuspend(dev);
2066 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2068 pm_runtime_enable(dev);
2069 pm_runtime_irq_safe(dev);
2071 err = pm_runtime_get_sync(dev);
2072 if (err < 0) {
2073 dev_err(dev, "failed to get sync: %d\n", err);
2074 goto err_pm;
2077 rev = omap_sham_read(dd, SHA_REG_REV(dd));
2078 pm_runtime_put_sync(&pdev->dev);
2080 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2081 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2082 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2084 spin_lock(&sham.lock);
2085 list_add_tail(&dd->list, &sham.dev_list);
2086 spin_unlock(&sham.lock);
2088 for (i = 0; i < dd->pdata->algs_info_size; i++) {
2089 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2090 struct ahash_alg *alg;
2092 alg = &dd->pdata->algs_info[i].algs_list[j];
2093 alg->export = omap_sham_export;
2094 alg->import = omap_sham_import;
2095 alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2096 BUFLEN;
2097 err = crypto_register_ahash(alg);
2098 if (err)
2099 goto err_algs;
2101 dd->pdata->algs_info[i].registered++;
2105 return 0;
2107 err_algs:
2108 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2109 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2110 crypto_unregister_ahash(
2111 &dd->pdata->algs_info[i].algs_list[j]);
2112 err_pm:
2113 pm_runtime_disable(dev);
2114 if (!dd->polling_mode)
2115 dma_release_channel(dd->dma_lch);
2116 data_err:
2117 dev_err(dev, "initialization failed.\n");
2119 return err;
2122 static int omap_sham_remove(struct platform_device *pdev)
2124 static struct omap_sham_dev *dd;
2125 int i, j;
2127 dd = platform_get_drvdata(pdev);
2128 if (!dd)
2129 return -ENODEV;
2130 spin_lock(&sham.lock);
2131 list_del(&dd->list);
2132 spin_unlock(&sham.lock);
2133 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2134 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2135 crypto_unregister_ahash(
2136 &dd->pdata->algs_info[i].algs_list[j]);
2137 tasklet_kill(&dd->done_task);
2138 pm_runtime_disable(&pdev->dev);
2140 if (!dd->polling_mode)
2141 dma_release_channel(dd->dma_lch);
2143 return 0;
2146 #ifdef CONFIG_PM_SLEEP
2147 static int omap_sham_suspend(struct device *dev)
2149 pm_runtime_put_sync(dev);
2150 return 0;
2153 static int omap_sham_resume(struct device *dev)
2155 int err = pm_runtime_get_sync(dev);
2156 if (err < 0) {
2157 dev_err(dev, "failed to get sync: %d\n", err);
2158 return err;
2160 return 0;
2162 #endif
2164 static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2166 static struct platform_driver omap_sham_driver = {
2167 .probe = omap_sham_probe,
2168 .remove = omap_sham_remove,
2169 .driver = {
2170 .name = "omap-sham",
2171 .pm = &omap_sham_pm_ops,
2172 .of_match_table = omap_sham_of_match,
2176 module_platform_driver(omap_sham_driver);
2178 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2179 MODULE_LICENSE("GPL v2");
2180 MODULE_AUTHOR("Dmitry Kasatkin");
2181 MODULE_ALIAS("platform:omap-sham");