dmaengine: imx-sdma: Let the core do the device node validation
[linux/fpc-iii.git] / drivers / crypto / stm32 / stm32-hash.c
blobbfc49e67124bb8ef21a9c20b2fca4f630511ef4e
1 /*
2 * This file is part of STM32 Crypto driver for Linux.
4 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
5 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7 * License terms: GPL V2.0.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/iopoll.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of_device.h>
33 #include <linux/platform_device.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/reset.h>
37 #include <crypto/engine.h>
38 #include <crypto/hash.h>
39 #include <crypto/md5.h>
40 #include <crypto/scatterwalk.h>
41 #include <crypto/sha.h>
42 #include <crypto/internal/hash.h>
44 #define HASH_CR 0x00
45 #define HASH_DIN 0x04
46 #define HASH_STR 0x08
47 #define HASH_IMR 0x20
48 #define HASH_SR 0x24
49 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
50 #define HASH_HREG(x) (0x310 + ((x) * 0x04))
51 #define HASH_HWCFGR 0x3F0
52 #define HASH_VER 0x3F4
53 #define HASH_ID 0x3F8
55 /* Control Register */
56 #define HASH_CR_INIT BIT(2)
57 #define HASH_CR_DMAE BIT(3)
58 #define HASH_CR_DATATYPE_POS 4
59 #define HASH_CR_MODE BIT(6)
60 #define HASH_CR_MDMAT BIT(13)
61 #define HASH_CR_DMAA BIT(14)
62 #define HASH_CR_LKEY BIT(16)
64 #define HASH_CR_ALGO_SHA1 0x0
65 #define HASH_CR_ALGO_MD5 0x80
66 #define HASH_CR_ALGO_SHA224 0x40000
67 #define HASH_CR_ALGO_SHA256 0x40080
69 /* Interrupt */
70 #define HASH_DINIE BIT(0)
71 #define HASH_DCIE BIT(1)
73 /* Interrupt Mask */
74 #define HASH_MASK_CALC_COMPLETION BIT(0)
75 #define HASH_MASK_DATA_INPUT BIT(1)
77 /* Context swap register */
78 #define HASH_CSR_REGISTER_NUMBER 53
80 /* Status Flags */
81 #define HASH_SR_DATA_INPUT_READY BIT(0)
82 #define HASH_SR_OUTPUT_READY BIT(1)
83 #define HASH_SR_DMA_ACTIVE BIT(2)
84 #define HASH_SR_BUSY BIT(3)
86 /* STR Register */
87 #define HASH_STR_NBLW_MASK GENMASK(4, 0)
88 #define HASH_STR_DCAL BIT(8)
90 #define HASH_FLAGS_INIT BIT(0)
91 #define HASH_FLAGS_OUTPUT_READY BIT(1)
92 #define HASH_FLAGS_CPU BIT(2)
93 #define HASH_FLAGS_DMA_READY BIT(3)
94 #define HASH_FLAGS_DMA_ACTIVE BIT(4)
95 #define HASH_FLAGS_HMAC_INIT BIT(5)
96 #define HASH_FLAGS_HMAC_FINAL BIT(6)
97 #define HASH_FLAGS_HMAC_KEY BIT(7)
99 #define HASH_FLAGS_FINAL BIT(15)
100 #define HASH_FLAGS_FINUP BIT(16)
101 #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
102 #define HASH_FLAGS_MD5 BIT(18)
103 #define HASH_FLAGS_SHA1 BIT(19)
104 #define HASH_FLAGS_SHA224 BIT(20)
105 #define HASH_FLAGS_SHA256 BIT(21)
106 #define HASH_FLAGS_ERRORS BIT(22)
107 #define HASH_FLAGS_HMAC BIT(23)
109 #define HASH_OP_UPDATE 1
110 #define HASH_OP_FINAL 2
112 enum stm32_hash_data_format {
113 HASH_DATA_32_BITS = 0x0,
114 HASH_DATA_16_BITS = 0x1,
115 HASH_DATA_8_BITS = 0x2,
116 HASH_DATA_1_BIT = 0x3
119 #define HASH_BUFLEN 256
120 #define HASH_LONG_KEY 64
121 #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
122 #define HASH_QUEUE_LENGTH 16
123 #define HASH_DMA_THRESHOLD 50
125 #define HASH_AUTOSUSPEND_DELAY 50
127 struct stm32_hash_ctx {
128 struct crypto_engine_ctx enginectx;
129 struct stm32_hash_dev *hdev;
130 unsigned long flags;
132 u8 key[HASH_MAX_KEY_SIZE];
133 int keylen;
136 struct stm32_hash_request_ctx {
137 struct stm32_hash_dev *hdev;
138 unsigned long flags;
139 unsigned long op;
141 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
142 size_t digcnt;
143 size_t bufcnt;
144 size_t buflen;
146 /* DMA */
147 struct scatterlist *sg;
148 unsigned int offset;
149 unsigned int total;
150 struct scatterlist sg_key;
152 dma_addr_t dma_addr;
153 size_t dma_ct;
154 int nents;
156 u8 data_type;
158 u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
160 /* Export Context */
161 u32 *hw_context;
164 struct stm32_hash_algs_info {
165 struct ahash_alg *algs_list;
166 size_t size;
169 struct stm32_hash_pdata {
170 struct stm32_hash_algs_info *algs_info;
171 size_t algs_info_size;
174 struct stm32_hash_dev {
175 struct list_head list;
176 struct device *dev;
177 struct clk *clk;
178 struct reset_control *rst;
179 void __iomem *io_base;
180 phys_addr_t phys_base;
181 u32 dma_mode;
182 u32 dma_maxburst;
184 struct ahash_request *req;
185 struct crypto_engine *engine;
187 int err;
188 unsigned long flags;
190 struct dma_chan *dma_lch;
191 struct completion dma_completion;
193 const struct stm32_hash_pdata *pdata;
196 struct stm32_hash_drv {
197 struct list_head dev_list;
198 spinlock_t lock; /* List protection access */
201 static struct stm32_hash_drv stm32_hash = {
202 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
203 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
206 static void stm32_hash_dma_callback(void *param);
208 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
210 return readl_relaxed(hdev->io_base + offset);
213 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
214 u32 offset, u32 value)
216 writel_relaxed(value, hdev->io_base + offset);
219 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
221 u32 status;
223 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
224 !(status & HASH_SR_BUSY), 10, 10000);
227 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
229 u32 reg;
231 reg = stm32_hash_read(hdev, HASH_STR);
232 reg &= ~(HASH_STR_NBLW_MASK);
233 reg |= (8U * ((length) % 4U));
234 stm32_hash_write(hdev, HASH_STR, reg);
237 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
239 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
240 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
241 u32 reg;
242 int keylen = ctx->keylen;
243 void *key = ctx->key;
245 if (keylen) {
246 stm32_hash_set_nblw(hdev, keylen);
248 while (keylen > 0) {
249 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
250 keylen -= 4;
251 key += 4;
254 reg = stm32_hash_read(hdev, HASH_STR);
255 reg |= HASH_STR_DCAL;
256 stm32_hash_write(hdev, HASH_STR, reg);
258 return -EINPROGRESS;
261 return 0;
264 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
266 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
267 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
268 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
270 u32 reg = HASH_CR_INIT;
272 if (!(hdev->flags & HASH_FLAGS_INIT)) {
273 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
274 case HASH_FLAGS_MD5:
275 reg |= HASH_CR_ALGO_MD5;
276 break;
277 case HASH_FLAGS_SHA1:
278 reg |= HASH_CR_ALGO_SHA1;
279 break;
280 case HASH_FLAGS_SHA224:
281 reg |= HASH_CR_ALGO_SHA224;
282 break;
283 case HASH_FLAGS_SHA256:
284 reg |= HASH_CR_ALGO_SHA256;
285 break;
286 default:
287 reg |= HASH_CR_ALGO_MD5;
290 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
292 if (rctx->flags & HASH_FLAGS_HMAC) {
293 hdev->flags |= HASH_FLAGS_HMAC;
294 reg |= HASH_CR_MODE;
295 if (ctx->keylen > HASH_LONG_KEY)
296 reg |= HASH_CR_LKEY;
299 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
301 stm32_hash_write(hdev, HASH_CR, reg);
303 hdev->flags |= HASH_FLAGS_INIT;
305 dev_dbg(hdev->dev, "Write Control %x\n", reg);
309 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
311 size_t count;
313 while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
314 count = min(rctx->sg->length - rctx->offset, rctx->total);
315 count = min(count, rctx->buflen - rctx->bufcnt);
317 if (count <= 0) {
318 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
319 rctx->sg = sg_next(rctx->sg);
320 continue;
321 } else {
322 break;
326 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
327 rctx->offset, count, 0);
329 rctx->bufcnt += count;
330 rctx->offset += count;
331 rctx->total -= count;
333 if (rctx->offset == rctx->sg->length) {
334 rctx->sg = sg_next(rctx->sg);
335 if (rctx->sg)
336 rctx->offset = 0;
337 else
338 rctx->total = 0;
343 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
344 const u8 *buf, size_t length, int final)
346 unsigned int count, len32;
347 const u32 *buffer = (const u32 *)buf;
348 u32 reg;
350 if (final)
351 hdev->flags |= HASH_FLAGS_FINAL;
353 len32 = DIV_ROUND_UP(length, sizeof(u32));
355 dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
356 __func__, length, final, len32);
358 hdev->flags |= HASH_FLAGS_CPU;
360 stm32_hash_write_ctrl(hdev);
362 if (stm32_hash_wait_busy(hdev))
363 return -ETIMEDOUT;
365 if ((hdev->flags & HASH_FLAGS_HMAC) &&
366 (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
367 hdev->flags |= HASH_FLAGS_HMAC_KEY;
368 stm32_hash_write_key(hdev);
369 if (stm32_hash_wait_busy(hdev))
370 return -ETIMEDOUT;
373 for (count = 0; count < len32; count++)
374 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
376 if (final) {
377 stm32_hash_set_nblw(hdev, length);
378 reg = stm32_hash_read(hdev, HASH_STR);
379 reg |= HASH_STR_DCAL;
380 stm32_hash_write(hdev, HASH_STR, reg);
381 if (hdev->flags & HASH_FLAGS_HMAC) {
382 if (stm32_hash_wait_busy(hdev))
383 return -ETIMEDOUT;
384 stm32_hash_write_key(hdev);
386 return -EINPROGRESS;
389 return 0;
392 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
394 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
395 int bufcnt, err = 0, final;
397 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
399 final = (rctx->flags & HASH_FLAGS_FINUP);
401 while ((rctx->total >= rctx->buflen) ||
402 (rctx->bufcnt + rctx->total >= rctx->buflen)) {
403 stm32_hash_append_sg(rctx);
404 bufcnt = rctx->bufcnt;
405 rctx->bufcnt = 0;
406 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
409 stm32_hash_append_sg(rctx);
411 if (final) {
412 bufcnt = rctx->bufcnt;
413 rctx->bufcnt = 0;
414 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
415 (rctx->flags & HASH_FLAGS_FINUP));
418 return err;
421 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
422 struct scatterlist *sg, int length, int mdma)
424 struct dma_async_tx_descriptor *in_desc;
425 dma_cookie_t cookie;
426 u32 reg;
427 int err;
429 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
430 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
431 DMA_CTRL_ACK);
432 if (!in_desc) {
433 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
434 return -ENOMEM;
437 reinit_completion(&hdev->dma_completion);
438 in_desc->callback = stm32_hash_dma_callback;
439 in_desc->callback_param = hdev;
441 hdev->flags |= HASH_FLAGS_FINAL;
442 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
444 reg = stm32_hash_read(hdev, HASH_CR);
446 if (mdma)
447 reg |= HASH_CR_MDMAT;
448 else
449 reg &= ~HASH_CR_MDMAT;
451 reg |= HASH_CR_DMAE;
453 stm32_hash_write(hdev, HASH_CR, reg);
455 stm32_hash_set_nblw(hdev, length);
457 cookie = dmaengine_submit(in_desc);
458 err = dma_submit_error(cookie);
459 if (err)
460 return -ENOMEM;
462 dma_async_issue_pending(hdev->dma_lch);
464 if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
465 msecs_to_jiffies(100)))
466 err = -ETIMEDOUT;
468 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
469 NULL, NULL) != DMA_COMPLETE)
470 err = -ETIMEDOUT;
472 if (err) {
473 dev_err(hdev->dev, "DMA Error %i\n", err);
474 dmaengine_terminate_all(hdev->dma_lch);
475 return err;
478 return -EINPROGRESS;
481 static void stm32_hash_dma_callback(void *param)
483 struct stm32_hash_dev *hdev = param;
485 complete(&hdev->dma_completion);
487 hdev->flags |= HASH_FLAGS_DMA_READY;
490 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
492 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
493 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
494 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
495 int err;
497 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
498 err = stm32_hash_write_key(hdev);
499 if (stm32_hash_wait_busy(hdev))
500 return -ETIMEDOUT;
501 } else {
502 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
503 sg_init_one(&rctx->sg_key, ctx->key,
504 ALIGN(ctx->keylen, sizeof(u32)));
506 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
507 DMA_TO_DEVICE);
508 if (rctx->dma_ct == 0) {
509 dev_err(hdev->dev, "dma_map_sg error\n");
510 return -ENOMEM;
513 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
515 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
518 return err;
521 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
523 struct dma_slave_config dma_conf;
524 int err;
526 memset(&dma_conf, 0, sizeof(dma_conf));
528 dma_conf.direction = DMA_MEM_TO_DEV;
529 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
530 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
531 dma_conf.src_maxburst = hdev->dma_maxburst;
532 dma_conf.dst_maxburst = hdev->dma_maxburst;
533 dma_conf.device_fc = false;
535 hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
536 if (!hdev->dma_lch) {
537 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
538 return -EBUSY;
541 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
542 if (err) {
543 dma_release_channel(hdev->dma_lch);
544 hdev->dma_lch = NULL;
545 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
546 return err;
549 init_completion(&hdev->dma_completion);
551 return 0;
554 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
556 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
557 struct scatterlist sg[1], *tsg;
558 int err = 0, len = 0, reg, ncp = 0;
559 unsigned int i;
560 u32 *buffer = (void *)rctx->buffer;
562 rctx->sg = hdev->req->src;
563 rctx->total = hdev->req->nbytes;
565 rctx->nents = sg_nents(rctx->sg);
567 if (rctx->nents < 0)
568 return -EINVAL;
570 stm32_hash_write_ctrl(hdev);
572 if (hdev->flags & HASH_FLAGS_HMAC) {
573 err = stm32_hash_hmac_dma_send(hdev);
574 if (err != -EINPROGRESS)
575 return err;
578 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
579 len = sg->length;
581 sg[0] = *tsg;
582 if (sg_is_last(sg)) {
583 if (hdev->dma_mode == 1) {
584 len = (ALIGN(sg->length, 16) - 16);
586 ncp = sg_pcopy_to_buffer(
587 rctx->sg, rctx->nents,
588 rctx->buffer, sg->length - len,
589 rctx->total - sg->length + len);
591 sg->length = len;
592 } else {
593 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
594 len = sg->length;
595 sg->length = ALIGN(sg->length,
596 sizeof(u32));
601 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
602 DMA_TO_DEVICE);
603 if (rctx->dma_ct == 0) {
604 dev_err(hdev->dev, "dma_map_sg error\n");
605 return -ENOMEM;
608 err = stm32_hash_xmit_dma(hdev, sg, len,
609 !sg_is_last(sg));
611 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
613 if (err == -ENOMEM)
614 return err;
617 if (hdev->dma_mode == 1) {
618 if (stm32_hash_wait_busy(hdev))
619 return -ETIMEDOUT;
620 reg = stm32_hash_read(hdev, HASH_CR);
621 reg &= ~HASH_CR_DMAE;
622 reg |= HASH_CR_DMAA;
623 stm32_hash_write(hdev, HASH_CR, reg);
625 if (ncp) {
626 memset(buffer + ncp, 0,
627 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
628 writesl(hdev->io_base + HASH_DIN, buffer,
629 DIV_ROUND_UP(ncp, sizeof(u32)));
631 stm32_hash_set_nblw(hdev, ncp);
632 reg = stm32_hash_read(hdev, HASH_STR);
633 reg |= HASH_STR_DCAL;
634 stm32_hash_write(hdev, HASH_STR, reg);
635 err = -EINPROGRESS;
638 if (hdev->flags & HASH_FLAGS_HMAC) {
639 if (stm32_hash_wait_busy(hdev))
640 return -ETIMEDOUT;
641 err = stm32_hash_hmac_dma_send(hdev);
644 return err;
647 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
649 struct stm32_hash_dev *hdev = NULL, *tmp;
651 spin_lock_bh(&stm32_hash.lock);
652 if (!ctx->hdev) {
653 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
654 hdev = tmp;
655 break;
657 ctx->hdev = hdev;
658 } else {
659 hdev = ctx->hdev;
662 spin_unlock_bh(&stm32_hash.lock);
664 return hdev;
667 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
669 struct scatterlist *sg;
670 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
671 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
672 int i;
674 if (req->nbytes <= HASH_DMA_THRESHOLD)
675 return false;
677 if (sg_nents(req->src) > 1) {
678 if (hdev->dma_mode == 1)
679 return false;
680 for_each_sg(req->src, sg, sg_nents(req->src), i) {
681 if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
682 (!sg_is_last(sg)))
683 return false;
687 if (req->src->offset % 4)
688 return false;
690 return true;
693 static int stm32_hash_init(struct ahash_request *req)
695 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
696 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
697 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
698 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
700 rctx->hdev = hdev;
702 rctx->flags = HASH_FLAGS_CPU;
704 rctx->digcnt = crypto_ahash_digestsize(tfm);
705 switch (rctx->digcnt) {
706 case MD5_DIGEST_SIZE:
707 rctx->flags |= HASH_FLAGS_MD5;
708 break;
709 case SHA1_DIGEST_SIZE:
710 rctx->flags |= HASH_FLAGS_SHA1;
711 break;
712 case SHA224_DIGEST_SIZE:
713 rctx->flags |= HASH_FLAGS_SHA224;
714 break;
715 case SHA256_DIGEST_SIZE:
716 rctx->flags |= HASH_FLAGS_SHA256;
717 break;
718 default:
719 return -EINVAL;
722 rctx->bufcnt = 0;
723 rctx->buflen = HASH_BUFLEN;
724 rctx->total = 0;
725 rctx->offset = 0;
726 rctx->data_type = HASH_DATA_8_BITS;
728 memset(rctx->buffer, 0, HASH_BUFLEN);
730 if (ctx->flags & HASH_FLAGS_HMAC)
731 rctx->flags |= HASH_FLAGS_HMAC;
733 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
735 return 0;
738 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
740 return stm32_hash_update_cpu(hdev);
743 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
745 struct ahash_request *req = hdev->req;
746 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
747 int err;
748 int buflen = rctx->bufcnt;
750 rctx->bufcnt = 0;
752 if (!(rctx->flags & HASH_FLAGS_CPU))
753 err = stm32_hash_dma_send(hdev);
754 else
755 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
758 return err;
761 static void stm32_hash_copy_hash(struct ahash_request *req)
763 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
764 u32 *hash = (u32 *)rctx->digest;
765 unsigned int i, hashsize;
767 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
768 case HASH_FLAGS_MD5:
769 hashsize = MD5_DIGEST_SIZE;
770 break;
771 case HASH_FLAGS_SHA1:
772 hashsize = SHA1_DIGEST_SIZE;
773 break;
774 case HASH_FLAGS_SHA224:
775 hashsize = SHA224_DIGEST_SIZE;
776 break;
777 case HASH_FLAGS_SHA256:
778 hashsize = SHA256_DIGEST_SIZE;
779 break;
780 default:
781 return;
784 for (i = 0; i < hashsize / sizeof(u32); i++)
785 hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
786 HASH_HREG(i)));
789 static int stm32_hash_finish(struct ahash_request *req)
791 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
793 if (!req->result)
794 return -EINVAL;
796 memcpy(req->result, rctx->digest, rctx->digcnt);
798 return 0;
801 static void stm32_hash_finish_req(struct ahash_request *req, int err)
803 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
804 struct stm32_hash_dev *hdev = rctx->hdev;
806 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
807 stm32_hash_copy_hash(req);
808 err = stm32_hash_finish(req);
809 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
810 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
811 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
812 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
813 HASH_FLAGS_HMAC_KEY);
814 } else {
815 rctx->flags |= HASH_FLAGS_ERRORS;
818 pm_runtime_mark_last_busy(hdev->dev);
819 pm_runtime_put_autosuspend(hdev->dev);
821 crypto_finalize_hash_request(hdev->engine, req, err);
824 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
825 struct stm32_hash_request_ctx *rctx)
827 pm_runtime_get_sync(hdev->dev);
829 if (!(HASH_FLAGS_INIT & hdev->flags)) {
830 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
831 stm32_hash_write(hdev, HASH_STR, 0);
832 stm32_hash_write(hdev, HASH_DIN, 0);
833 stm32_hash_write(hdev, HASH_IMR, 0);
834 hdev->err = 0;
837 return 0;
840 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
841 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
843 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
844 struct ahash_request *req)
846 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
849 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
851 struct ahash_request *req = container_of(areq, struct ahash_request,
852 base);
853 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
854 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
855 struct stm32_hash_request_ctx *rctx;
857 if (!hdev)
858 return -ENODEV;
860 hdev->req = req;
862 rctx = ahash_request_ctx(req);
864 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
865 rctx->op, req->nbytes);
867 return stm32_hash_hw_init(hdev, rctx);
870 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
872 struct ahash_request *req = container_of(areq, struct ahash_request,
873 base);
874 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
875 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
876 struct stm32_hash_request_ctx *rctx;
877 int err = 0;
879 if (!hdev)
880 return -ENODEV;
882 hdev->req = req;
884 rctx = ahash_request_ctx(req);
886 if (rctx->op == HASH_OP_UPDATE)
887 err = stm32_hash_update_req(hdev);
888 else if (rctx->op == HASH_OP_FINAL)
889 err = stm32_hash_final_req(hdev);
891 if (err != -EINPROGRESS)
892 /* done task will not finish it, so do it here */
893 stm32_hash_finish_req(req, err);
895 return 0;
898 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
900 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
901 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
902 struct stm32_hash_dev *hdev = ctx->hdev;
904 rctx->op = op;
906 return stm32_hash_handle_queue(hdev, req);
909 static int stm32_hash_update(struct ahash_request *req)
911 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
913 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
914 return 0;
916 rctx->total = req->nbytes;
917 rctx->sg = req->src;
918 rctx->offset = 0;
920 if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
921 stm32_hash_append_sg(rctx);
922 return 0;
925 return stm32_hash_enqueue(req, HASH_OP_UPDATE);
928 static int stm32_hash_final(struct ahash_request *req)
930 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
932 rctx->flags |= HASH_FLAGS_FINUP;
934 return stm32_hash_enqueue(req, HASH_OP_FINAL);
937 static int stm32_hash_finup(struct ahash_request *req)
939 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
940 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
941 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
942 int err1, err2;
944 rctx->flags |= HASH_FLAGS_FINUP;
946 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
947 rctx->flags &= ~HASH_FLAGS_CPU;
949 err1 = stm32_hash_update(req);
951 if (err1 == -EINPROGRESS || err1 == -EBUSY)
952 return err1;
955 * final() has to be always called to cleanup resources
956 * even if update() failed, except EINPROGRESS
958 err2 = stm32_hash_final(req);
960 return err1 ?: err2;
963 static int stm32_hash_digest(struct ahash_request *req)
965 return stm32_hash_init(req) ?: stm32_hash_finup(req);
968 static int stm32_hash_export(struct ahash_request *req, void *out)
970 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
971 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
972 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
973 u32 *preg;
974 unsigned int i;
976 pm_runtime_get_sync(hdev->dev);
978 while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
979 cpu_relax();
981 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
982 sizeof(u32),
983 GFP_KERNEL);
985 preg = rctx->hw_context;
987 *preg++ = stm32_hash_read(hdev, HASH_IMR);
988 *preg++ = stm32_hash_read(hdev, HASH_STR);
989 *preg++ = stm32_hash_read(hdev, HASH_CR);
990 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
991 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
993 pm_runtime_mark_last_busy(hdev->dev);
994 pm_runtime_put_autosuspend(hdev->dev);
996 memcpy(out, rctx, sizeof(*rctx));
998 return 0;
1001 static int stm32_hash_import(struct ahash_request *req, const void *in)
1003 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1004 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1005 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1006 const u32 *preg = in;
1007 u32 reg;
1008 unsigned int i;
1010 memcpy(rctx, in, sizeof(*rctx));
1012 preg = rctx->hw_context;
1014 pm_runtime_get_sync(hdev->dev);
1016 stm32_hash_write(hdev, HASH_IMR, *preg++);
1017 stm32_hash_write(hdev, HASH_STR, *preg++);
1018 stm32_hash_write(hdev, HASH_CR, *preg);
1019 reg = *preg++ | HASH_CR_INIT;
1020 stm32_hash_write(hdev, HASH_CR, reg);
1022 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1023 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1025 pm_runtime_mark_last_busy(hdev->dev);
1026 pm_runtime_put_autosuspend(hdev->dev);
1028 kfree(rctx->hw_context);
1030 return 0;
1033 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1034 const u8 *key, unsigned int keylen)
1036 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1038 if (keylen <= HASH_MAX_KEY_SIZE) {
1039 memcpy(ctx->key, key, keylen);
1040 ctx->keylen = keylen;
1041 } else {
1042 return -ENOMEM;
1045 return 0;
1048 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1049 const char *algs_hmac_name)
1051 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1053 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1054 sizeof(struct stm32_hash_request_ctx));
1056 ctx->keylen = 0;
1058 if (algs_hmac_name)
1059 ctx->flags |= HASH_FLAGS_HMAC;
1061 ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1062 ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1063 ctx->enginectx.op.unprepare_request = NULL;
1064 return 0;
1067 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1069 return stm32_hash_cra_init_algs(tfm, NULL);
1072 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1074 return stm32_hash_cra_init_algs(tfm, "md5");
1077 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1079 return stm32_hash_cra_init_algs(tfm, "sha1");
1082 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1084 return stm32_hash_cra_init_algs(tfm, "sha224");
1087 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1089 return stm32_hash_cra_init_algs(tfm, "sha256");
1092 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1094 struct stm32_hash_dev *hdev = dev_id;
1096 if (HASH_FLAGS_CPU & hdev->flags) {
1097 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1098 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1099 goto finish;
1101 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1102 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1103 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1104 goto finish;
1108 return IRQ_HANDLED;
1110 finish:
1111 /* Finish current request */
1112 stm32_hash_finish_req(hdev->req, 0);
1114 return IRQ_HANDLED;
1117 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1119 struct stm32_hash_dev *hdev = dev_id;
1120 u32 reg;
1122 reg = stm32_hash_read(hdev, HASH_SR);
1123 if (reg & HASH_SR_OUTPUT_READY) {
1124 reg &= ~HASH_SR_OUTPUT_READY;
1125 stm32_hash_write(hdev, HASH_SR, reg);
1126 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1127 /* Disable IT*/
1128 stm32_hash_write(hdev, HASH_IMR, 0);
1129 return IRQ_WAKE_THREAD;
1132 return IRQ_NONE;
1135 static struct ahash_alg algs_md5_sha1[] = {
1137 .init = stm32_hash_init,
1138 .update = stm32_hash_update,
1139 .final = stm32_hash_final,
1140 .finup = stm32_hash_finup,
1141 .digest = stm32_hash_digest,
1142 .export = stm32_hash_export,
1143 .import = stm32_hash_import,
1144 .halg = {
1145 .digestsize = MD5_DIGEST_SIZE,
1146 .statesize = sizeof(struct stm32_hash_request_ctx),
1147 .base = {
1148 .cra_name = "md5",
1149 .cra_driver_name = "stm32-md5",
1150 .cra_priority = 200,
1151 .cra_flags = CRYPTO_ALG_ASYNC |
1152 CRYPTO_ALG_KERN_DRIVER_ONLY,
1153 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1154 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1155 .cra_alignmask = 3,
1156 .cra_init = stm32_hash_cra_init,
1157 .cra_module = THIS_MODULE,
1162 .init = stm32_hash_init,
1163 .update = stm32_hash_update,
1164 .final = stm32_hash_final,
1165 .finup = stm32_hash_finup,
1166 .digest = stm32_hash_digest,
1167 .export = stm32_hash_export,
1168 .import = stm32_hash_import,
1169 .setkey = stm32_hash_setkey,
1170 .halg = {
1171 .digestsize = MD5_DIGEST_SIZE,
1172 .statesize = sizeof(struct stm32_hash_request_ctx),
1173 .base = {
1174 .cra_name = "hmac(md5)",
1175 .cra_driver_name = "stm32-hmac-md5",
1176 .cra_priority = 200,
1177 .cra_flags = CRYPTO_ALG_ASYNC |
1178 CRYPTO_ALG_KERN_DRIVER_ONLY,
1179 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1180 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1181 .cra_alignmask = 3,
1182 .cra_init = stm32_hash_cra_md5_init,
1183 .cra_module = THIS_MODULE,
1188 .init = stm32_hash_init,
1189 .update = stm32_hash_update,
1190 .final = stm32_hash_final,
1191 .finup = stm32_hash_finup,
1192 .digest = stm32_hash_digest,
1193 .export = stm32_hash_export,
1194 .import = stm32_hash_import,
1195 .halg = {
1196 .digestsize = SHA1_DIGEST_SIZE,
1197 .statesize = sizeof(struct stm32_hash_request_ctx),
1198 .base = {
1199 .cra_name = "sha1",
1200 .cra_driver_name = "stm32-sha1",
1201 .cra_priority = 200,
1202 .cra_flags = CRYPTO_ALG_ASYNC |
1203 CRYPTO_ALG_KERN_DRIVER_ONLY,
1204 .cra_blocksize = SHA1_BLOCK_SIZE,
1205 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1206 .cra_alignmask = 3,
1207 .cra_init = stm32_hash_cra_init,
1208 .cra_module = THIS_MODULE,
1213 .init = stm32_hash_init,
1214 .update = stm32_hash_update,
1215 .final = stm32_hash_final,
1216 .finup = stm32_hash_finup,
1217 .digest = stm32_hash_digest,
1218 .export = stm32_hash_export,
1219 .import = stm32_hash_import,
1220 .setkey = stm32_hash_setkey,
1221 .halg = {
1222 .digestsize = SHA1_DIGEST_SIZE,
1223 .statesize = sizeof(struct stm32_hash_request_ctx),
1224 .base = {
1225 .cra_name = "hmac(sha1)",
1226 .cra_driver_name = "stm32-hmac-sha1",
1227 .cra_priority = 200,
1228 .cra_flags = CRYPTO_ALG_ASYNC |
1229 CRYPTO_ALG_KERN_DRIVER_ONLY,
1230 .cra_blocksize = SHA1_BLOCK_SIZE,
1231 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1232 .cra_alignmask = 3,
1233 .cra_init = stm32_hash_cra_sha1_init,
1234 .cra_module = THIS_MODULE,
1240 static struct ahash_alg algs_sha224_sha256[] = {
1242 .init = stm32_hash_init,
1243 .update = stm32_hash_update,
1244 .final = stm32_hash_final,
1245 .finup = stm32_hash_finup,
1246 .digest = stm32_hash_digest,
1247 .export = stm32_hash_export,
1248 .import = stm32_hash_import,
1249 .halg = {
1250 .digestsize = SHA224_DIGEST_SIZE,
1251 .statesize = sizeof(struct stm32_hash_request_ctx),
1252 .base = {
1253 .cra_name = "sha224",
1254 .cra_driver_name = "stm32-sha224",
1255 .cra_priority = 200,
1256 .cra_flags = CRYPTO_ALG_ASYNC |
1257 CRYPTO_ALG_KERN_DRIVER_ONLY,
1258 .cra_blocksize = SHA224_BLOCK_SIZE,
1259 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1260 .cra_alignmask = 3,
1261 .cra_init = stm32_hash_cra_init,
1262 .cra_module = THIS_MODULE,
1267 .init = stm32_hash_init,
1268 .update = stm32_hash_update,
1269 .final = stm32_hash_final,
1270 .finup = stm32_hash_finup,
1271 .digest = stm32_hash_digest,
1272 .setkey = stm32_hash_setkey,
1273 .export = stm32_hash_export,
1274 .import = stm32_hash_import,
1275 .halg = {
1276 .digestsize = SHA224_DIGEST_SIZE,
1277 .statesize = sizeof(struct stm32_hash_request_ctx),
1278 .base = {
1279 .cra_name = "hmac(sha224)",
1280 .cra_driver_name = "stm32-hmac-sha224",
1281 .cra_priority = 200,
1282 .cra_flags = CRYPTO_ALG_ASYNC |
1283 CRYPTO_ALG_KERN_DRIVER_ONLY,
1284 .cra_blocksize = SHA224_BLOCK_SIZE,
1285 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1286 .cra_alignmask = 3,
1287 .cra_init = stm32_hash_cra_sha224_init,
1288 .cra_module = THIS_MODULE,
1293 .init = stm32_hash_init,
1294 .update = stm32_hash_update,
1295 .final = stm32_hash_final,
1296 .finup = stm32_hash_finup,
1297 .digest = stm32_hash_digest,
1298 .export = stm32_hash_export,
1299 .import = stm32_hash_import,
1300 .halg = {
1301 .digestsize = SHA256_DIGEST_SIZE,
1302 .statesize = sizeof(struct stm32_hash_request_ctx),
1303 .base = {
1304 .cra_name = "sha256",
1305 .cra_driver_name = "stm32-sha256",
1306 .cra_priority = 200,
1307 .cra_flags = CRYPTO_ALG_ASYNC |
1308 CRYPTO_ALG_KERN_DRIVER_ONLY,
1309 .cra_blocksize = SHA256_BLOCK_SIZE,
1310 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1311 .cra_alignmask = 3,
1312 .cra_init = stm32_hash_cra_init,
1313 .cra_module = THIS_MODULE,
1318 .init = stm32_hash_init,
1319 .update = stm32_hash_update,
1320 .final = stm32_hash_final,
1321 .finup = stm32_hash_finup,
1322 .digest = stm32_hash_digest,
1323 .export = stm32_hash_export,
1324 .import = stm32_hash_import,
1325 .setkey = stm32_hash_setkey,
1326 .halg = {
1327 .digestsize = SHA256_DIGEST_SIZE,
1328 .statesize = sizeof(struct stm32_hash_request_ctx),
1329 .base = {
1330 .cra_name = "hmac(sha256)",
1331 .cra_driver_name = "stm32-hmac-sha256",
1332 .cra_priority = 200,
1333 .cra_flags = CRYPTO_ALG_ASYNC |
1334 CRYPTO_ALG_KERN_DRIVER_ONLY,
1335 .cra_blocksize = SHA256_BLOCK_SIZE,
1336 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1337 .cra_alignmask = 3,
1338 .cra_init = stm32_hash_cra_sha256_init,
1339 .cra_module = THIS_MODULE,
1345 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1347 unsigned int i, j;
1348 int err;
1350 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1351 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1352 err = crypto_register_ahash(
1353 &hdev->pdata->algs_info[i].algs_list[j]);
1354 if (err)
1355 goto err_algs;
1359 return 0;
1360 err_algs:
1361 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1362 for (; i--; ) {
1363 for (; j--;)
1364 crypto_unregister_ahash(
1365 &hdev->pdata->algs_info[i].algs_list[j]);
1368 return err;
1371 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1373 unsigned int i, j;
1375 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1376 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1377 crypto_unregister_ahash(
1378 &hdev->pdata->algs_info[i].algs_list[j]);
1381 return 0;
1384 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1386 .algs_list = algs_md5_sha1,
1387 .size = ARRAY_SIZE(algs_md5_sha1),
1391 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1392 .algs_info = stm32_hash_algs_info_stm32f4,
1393 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1396 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1398 .algs_list = algs_md5_sha1,
1399 .size = ARRAY_SIZE(algs_md5_sha1),
1402 .algs_list = algs_sha224_sha256,
1403 .size = ARRAY_SIZE(algs_sha224_sha256),
1407 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1408 .algs_info = stm32_hash_algs_info_stm32f7,
1409 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1412 static const struct of_device_id stm32_hash_of_match[] = {
1414 .compatible = "st,stm32f456-hash",
1415 .data = &stm32_hash_pdata_stm32f4,
1418 .compatible = "st,stm32f756-hash",
1419 .data = &stm32_hash_pdata_stm32f7,
1424 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1426 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1427 struct device *dev)
1429 hdev->pdata = of_device_get_match_data(dev);
1430 if (!hdev->pdata) {
1431 dev_err(dev, "no compatible OF match\n");
1432 return -EINVAL;
1435 if (of_property_read_u32(dev->of_node, "dma-maxburst",
1436 &hdev->dma_maxburst)) {
1437 dev_info(dev, "dma-maxburst not specified, using 0\n");
1438 hdev->dma_maxburst = 0;
1441 return 0;
1444 static int stm32_hash_probe(struct platform_device *pdev)
1446 struct stm32_hash_dev *hdev;
1447 struct device *dev = &pdev->dev;
1448 struct resource *res;
1449 int ret, irq;
1451 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1452 if (!hdev)
1453 return -ENOMEM;
1455 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1456 hdev->io_base = devm_ioremap_resource(dev, res);
1457 if (IS_ERR(hdev->io_base))
1458 return PTR_ERR(hdev->io_base);
1460 hdev->phys_base = res->start;
1462 ret = stm32_hash_get_of_match(hdev, dev);
1463 if (ret)
1464 return ret;
1466 irq = platform_get_irq(pdev, 0);
1467 if (irq < 0) {
1468 dev_err(dev, "Cannot get IRQ resource\n");
1469 return irq;
1472 ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1473 stm32_hash_irq_thread, IRQF_ONESHOT,
1474 dev_name(dev), hdev);
1475 if (ret) {
1476 dev_err(dev, "Cannot grab IRQ\n");
1477 return ret;
1480 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1481 if (IS_ERR(hdev->clk)) {
1482 dev_err(dev, "failed to get clock for hash (%lu)\n",
1483 PTR_ERR(hdev->clk));
1484 return PTR_ERR(hdev->clk);
1487 ret = clk_prepare_enable(hdev->clk);
1488 if (ret) {
1489 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1490 return ret;
1493 pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1494 pm_runtime_use_autosuspend(dev);
1496 pm_runtime_get_noresume(dev);
1497 pm_runtime_set_active(dev);
1498 pm_runtime_enable(dev);
1500 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1501 if (!IS_ERR(hdev->rst)) {
1502 reset_control_assert(hdev->rst);
1503 udelay(2);
1504 reset_control_deassert(hdev->rst);
1507 hdev->dev = dev;
1509 platform_set_drvdata(pdev, hdev);
1511 ret = stm32_hash_dma_init(hdev);
1512 if (ret)
1513 dev_dbg(dev, "DMA mode not available\n");
1515 spin_lock(&stm32_hash.lock);
1516 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1517 spin_unlock(&stm32_hash.lock);
1519 /* Initialize crypto engine */
1520 hdev->engine = crypto_engine_alloc_init(dev, 1);
1521 if (!hdev->engine) {
1522 ret = -ENOMEM;
1523 goto err_engine;
1526 ret = crypto_engine_start(hdev->engine);
1527 if (ret)
1528 goto err_engine_start;
1530 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1532 /* Register algos */
1533 ret = stm32_hash_register_algs(hdev);
1534 if (ret)
1535 goto err_algs;
1537 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1538 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1540 pm_runtime_put_sync(dev);
1542 return 0;
1544 err_algs:
1545 err_engine_start:
1546 crypto_engine_exit(hdev->engine);
1547 err_engine:
1548 spin_lock(&stm32_hash.lock);
1549 list_del(&hdev->list);
1550 spin_unlock(&stm32_hash.lock);
1552 if (hdev->dma_lch)
1553 dma_release_channel(hdev->dma_lch);
1555 pm_runtime_disable(dev);
1556 pm_runtime_put_noidle(dev);
1558 clk_disable_unprepare(hdev->clk);
1560 return ret;
1563 static int stm32_hash_remove(struct platform_device *pdev)
1565 struct stm32_hash_dev *hdev;
1566 int ret;
1568 hdev = platform_get_drvdata(pdev);
1569 if (!hdev)
1570 return -ENODEV;
1572 ret = pm_runtime_get_sync(hdev->dev);
1573 if (ret < 0)
1574 return ret;
1576 stm32_hash_unregister_algs(hdev);
1578 crypto_engine_exit(hdev->engine);
1580 spin_lock(&stm32_hash.lock);
1581 list_del(&hdev->list);
1582 spin_unlock(&stm32_hash.lock);
1584 if (hdev->dma_lch)
1585 dma_release_channel(hdev->dma_lch);
1587 pm_runtime_disable(hdev->dev);
1588 pm_runtime_put_noidle(hdev->dev);
1590 clk_disable_unprepare(hdev->clk);
1592 return 0;
1595 #ifdef CONFIG_PM
1596 static int stm32_hash_runtime_suspend(struct device *dev)
1598 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1600 clk_disable_unprepare(hdev->clk);
1602 return 0;
1605 static int stm32_hash_runtime_resume(struct device *dev)
1607 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1608 int ret;
1610 ret = clk_prepare_enable(hdev->clk);
1611 if (ret) {
1612 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1613 return ret;
1616 return 0;
1618 #endif
1620 static const struct dev_pm_ops stm32_hash_pm_ops = {
1621 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1622 pm_runtime_force_resume)
1623 SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1624 stm32_hash_runtime_resume, NULL)
1627 static struct platform_driver stm32_hash_driver = {
1628 .probe = stm32_hash_probe,
1629 .remove = stm32_hash_remove,
1630 .driver = {
1631 .name = "stm32-hash",
1632 .pm = &stm32_hash_pm_ops,
1633 .of_match_table = stm32_hash_of_match,
1637 module_platform_driver(stm32_hash_driver);
1639 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1640 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1641 MODULE_LICENSE("GPL v2");