Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / crypto / stm32 / stm32-hash.c
blob4ca4a264a833333827981cd3aac612a9c569aa87
1 /*
2 * This file is part of STM32 Crypto driver for Linux.
4 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
5 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7 * License terms: GPL V2.0.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/iopoll.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of_device.h>
33 #include <linux/platform_device.h>
34 #include <linux/reset.h>
36 #include <crypto/engine.h>
37 #include <crypto/hash.h>
38 #include <crypto/md5.h>
39 #include <crypto/scatterwalk.h>
40 #include <crypto/sha.h>
41 #include <crypto/internal/hash.h>
43 #define HASH_CR 0x00
44 #define HASH_DIN 0x04
45 #define HASH_STR 0x08
46 #define HASH_IMR 0x20
47 #define HASH_SR 0x24
48 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
49 #define HASH_HREG(x) (0x310 + ((x) * 0x04))
50 #define HASH_HWCFGR 0x3F0
51 #define HASH_VER 0x3F4
52 #define HASH_ID 0x3F8
54 /* Control Register */
55 #define HASH_CR_INIT BIT(2)
56 #define HASH_CR_DMAE BIT(3)
57 #define HASH_CR_DATATYPE_POS 4
58 #define HASH_CR_MODE BIT(6)
59 #define HASH_CR_MDMAT BIT(13)
60 #define HASH_CR_DMAA BIT(14)
61 #define HASH_CR_LKEY BIT(16)
63 #define HASH_CR_ALGO_SHA1 0x0
64 #define HASH_CR_ALGO_MD5 0x80
65 #define HASH_CR_ALGO_SHA224 0x40000
66 #define HASH_CR_ALGO_SHA256 0x40080
68 /* Interrupt */
69 #define HASH_DINIE BIT(0)
70 #define HASH_DCIE BIT(1)
72 /* Interrupt Mask */
73 #define HASH_MASK_CALC_COMPLETION BIT(0)
74 #define HASH_MASK_DATA_INPUT BIT(1)
76 /* Context swap register */
77 #define HASH_CSR_REGISTER_NUMBER 53
79 /* Status Flags */
80 #define HASH_SR_DATA_INPUT_READY BIT(0)
81 #define HASH_SR_OUTPUT_READY BIT(1)
82 #define HASH_SR_DMA_ACTIVE BIT(2)
83 #define HASH_SR_BUSY BIT(3)
85 /* STR Register */
86 #define HASH_STR_NBLW_MASK GENMASK(4, 0)
87 #define HASH_STR_DCAL BIT(8)
89 #define HASH_FLAGS_INIT BIT(0)
90 #define HASH_FLAGS_OUTPUT_READY BIT(1)
91 #define HASH_FLAGS_CPU BIT(2)
92 #define HASH_FLAGS_DMA_READY BIT(3)
93 #define HASH_FLAGS_DMA_ACTIVE BIT(4)
94 #define HASH_FLAGS_HMAC_INIT BIT(5)
95 #define HASH_FLAGS_HMAC_FINAL BIT(6)
96 #define HASH_FLAGS_HMAC_KEY BIT(7)
98 #define HASH_FLAGS_FINAL BIT(15)
99 #define HASH_FLAGS_FINUP BIT(16)
100 #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
101 #define HASH_FLAGS_MD5 BIT(18)
102 #define HASH_FLAGS_SHA1 BIT(19)
103 #define HASH_FLAGS_SHA224 BIT(20)
104 #define HASH_FLAGS_SHA256 BIT(21)
105 #define HASH_FLAGS_ERRORS BIT(22)
106 #define HASH_FLAGS_HMAC BIT(23)
108 #define HASH_OP_UPDATE 1
109 #define HASH_OP_FINAL 2
111 enum stm32_hash_data_format {
112 HASH_DATA_32_BITS = 0x0,
113 HASH_DATA_16_BITS = 0x1,
114 HASH_DATA_8_BITS = 0x2,
115 HASH_DATA_1_BIT = 0x3
118 #define HASH_BUFLEN 256
119 #define HASH_LONG_KEY 64
120 #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
121 #define HASH_QUEUE_LENGTH 16
122 #define HASH_DMA_THRESHOLD 50
124 struct stm32_hash_ctx {
125 struct stm32_hash_dev *hdev;
126 unsigned long flags;
128 u8 key[HASH_MAX_KEY_SIZE];
129 int keylen;
132 struct stm32_hash_request_ctx {
133 struct stm32_hash_dev *hdev;
134 unsigned long flags;
135 unsigned long op;
137 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
138 size_t digcnt;
139 size_t bufcnt;
140 size_t buflen;
142 /* DMA */
143 struct scatterlist *sg;
144 unsigned int offset;
145 unsigned int total;
146 struct scatterlist sg_key;
148 dma_addr_t dma_addr;
149 size_t dma_ct;
150 int nents;
152 u8 data_type;
154 u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
156 /* Export Context */
157 u32 *hw_context;
160 struct stm32_hash_algs_info {
161 struct ahash_alg *algs_list;
162 size_t size;
165 struct stm32_hash_pdata {
166 struct stm32_hash_algs_info *algs_info;
167 size_t algs_info_size;
170 struct stm32_hash_dev {
171 struct list_head list;
172 struct device *dev;
173 struct clk *clk;
174 struct reset_control *rst;
175 void __iomem *io_base;
176 phys_addr_t phys_base;
177 u32 dma_mode;
178 u32 dma_maxburst;
180 spinlock_t lock; /* lock to protect queue */
182 struct ahash_request *req;
183 struct crypto_engine *engine;
185 int err;
186 unsigned long flags;
188 struct dma_chan *dma_lch;
189 struct completion dma_completion;
191 const struct stm32_hash_pdata *pdata;
194 struct stm32_hash_drv {
195 struct list_head dev_list;
196 spinlock_t lock; /* List protection access */
199 static struct stm32_hash_drv stm32_hash = {
200 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
201 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
204 static void stm32_hash_dma_callback(void *param);
206 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
208 return readl_relaxed(hdev->io_base + offset);
211 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
212 u32 offset, u32 value)
214 writel_relaxed(value, hdev->io_base + offset);
217 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
219 u32 status;
221 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
222 !(status & HASH_SR_BUSY), 10, 10000);
225 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
227 u32 reg;
229 reg = stm32_hash_read(hdev, HASH_STR);
230 reg &= ~(HASH_STR_NBLW_MASK);
231 reg |= (8U * ((length) % 4U));
232 stm32_hash_write(hdev, HASH_STR, reg);
235 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
237 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
238 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
239 u32 reg;
240 int keylen = ctx->keylen;
241 void *key = ctx->key;
243 if (keylen) {
244 stm32_hash_set_nblw(hdev, keylen);
246 while (keylen > 0) {
247 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
248 keylen -= 4;
249 key += 4;
252 reg = stm32_hash_read(hdev, HASH_STR);
253 reg |= HASH_STR_DCAL;
254 stm32_hash_write(hdev, HASH_STR, reg);
256 return -EINPROGRESS;
259 return 0;
262 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
264 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
265 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
266 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
268 u32 reg = HASH_CR_INIT;
270 if (!(hdev->flags & HASH_FLAGS_INIT)) {
271 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
272 case HASH_FLAGS_MD5:
273 reg |= HASH_CR_ALGO_MD5;
274 break;
275 case HASH_FLAGS_SHA1:
276 reg |= HASH_CR_ALGO_SHA1;
277 break;
278 case HASH_FLAGS_SHA224:
279 reg |= HASH_CR_ALGO_SHA224;
280 break;
281 case HASH_FLAGS_SHA256:
282 reg |= HASH_CR_ALGO_SHA256;
283 break;
284 default:
285 reg |= HASH_CR_ALGO_MD5;
288 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
290 if (rctx->flags & HASH_FLAGS_HMAC) {
291 hdev->flags |= HASH_FLAGS_HMAC;
292 reg |= HASH_CR_MODE;
293 if (ctx->keylen > HASH_LONG_KEY)
294 reg |= HASH_CR_LKEY;
297 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
299 stm32_hash_write(hdev, HASH_CR, reg);
301 hdev->flags |= HASH_FLAGS_INIT;
303 dev_dbg(hdev->dev, "Write Control %x\n", reg);
307 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
309 size_t count;
311 while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
312 count = min(rctx->sg->length - rctx->offset, rctx->total);
313 count = min(count, rctx->buflen - rctx->bufcnt);
315 if (count <= 0) {
316 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
317 rctx->sg = sg_next(rctx->sg);
318 continue;
319 } else {
320 break;
324 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
325 rctx->offset, count, 0);
327 rctx->bufcnt += count;
328 rctx->offset += count;
329 rctx->total -= count;
331 if (rctx->offset == rctx->sg->length) {
332 rctx->sg = sg_next(rctx->sg);
333 if (rctx->sg)
334 rctx->offset = 0;
335 else
336 rctx->total = 0;
341 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
342 const u8 *buf, size_t length, int final)
344 unsigned int count, len32;
345 const u32 *buffer = (const u32 *)buf;
346 u32 reg;
348 if (final)
349 hdev->flags |= HASH_FLAGS_FINAL;
351 len32 = DIV_ROUND_UP(length, sizeof(u32));
353 dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
354 __func__, length, final, len32);
356 hdev->flags |= HASH_FLAGS_CPU;
358 stm32_hash_write_ctrl(hdev);
360 if (stm32_hash_wait_busy(hdev))
361 return -ETIMEDOUT;
363 if ((hdev->flags & HASH_FLAGS_HMAC) &&
364 (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
365 hdev->flags |= HASH_FLAGS_HMAC_KEY;
366 stm32_hash_write_key(hdev);
367 if (stm32_hash_wait_busy(hdev))
368 return -ETIMEDOUT;
371 for (count = 0; count < len32; count++)
372 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
374 if (final) {
375 stm32_hash_set_nblw(hdev, length);
376 reg = stm32_hash_read(hdev, HASH_STR);
377 reg |= HASH_STR_DCAL;
378 stm32_hash_write(hdev, HASH_STR, reg);
379 if (hdev->flags & HASH_FLAGS_HMAC) {
380 if (stm32_hash_wait_busy(hdev))
381 return -ETIMEDOUT;
382 stm32_hash_write_key(hdev);
384 return -EINPROGRESS;
387 return 0;
390 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
392 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
393 int bufcnt, err = 0, final;
395 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
397 final = (rctx->flags & HASH_FLAGS_FINUP);
399 while ((rctx->total >= rctx->buflen) ||
400 (rctx->bufcnt + rctx->total >= rctx->buflen)) {
401 stm32_hash_append_sg(rctx);
402 bufcnt = rctx->bufcnt;
403 rctx->bufcnt = 0;
404 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
407 stm32_hash_append_sg(rctx);
409 if (final) {
410 bufcnt = rctx->bufcnt;
411 rctx->bufcnt = 0;
412 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
413 (rctx->flags & HASH_FLAGS_FINUP));
416 return err;
419 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
420 struct scatterlist *sg, int length, int mdma)
422 struct dma_async_tx_descriptor *in_desc;
423 dma_cookie_t cookie;
424 u32 reg;
425 int err;
427 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
428 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
429 DMA_CTRL_ACK);
430 if (!in_desc) {
431 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
432 return -ENOMEM;
435 reinit_completion(&hdev->dma_completion);
436 in_desc->callback = stm32_hash_dma_callback;
437 in_desc->callback_param = hdev;
439 hdev->flags |= HASH_FLAGS_FINAL;
440 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
442 reg = stm32_hash_read(hdev, HASH_CR);
444 if (mdma)
445 reg |= HASH_CR_MDMAT;
446 else
447 reg &= ~HASH_CR_MDMAT;
449 reg |= HASH_CR_DMAE;
451 stm32_hash_write(hdev, HASH_CR, reg);
453 stm32_hash_set_nblw(hdev, length);
455 cookie = dmaengine_submit(in_desc);
456 err = dma_submit_error(cookie);
457 if (err)
458 return -ENOMEM;
460 dma_async_issue_pending(hdev->dma_lch);
462 if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
463 msecs_to_jiffies(100)))
464 err = -ETIMEDOUT;
466 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
467 NULL, NULL) != DMA_COMPLETE)
468 err = -ETIMEDOUT;
470 if (err) {
471 dev_err(hdev->dev, "DMA Error %i\n", err);
472 dmaengine_terminate_all(hdev->dma_lch);
473 return err;
476 return -EINPROGRESS;
479 static void stm32_hash_dma_callback(void *param)
481 struct stm32_hash_dev *hdev = param;
483 complete(&hdev->dma_completion);
485 hdev->flags |= HASH_FLAGS_DMA_READY;
488 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
490 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
491 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
492 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
493 int err;
495 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
496 err = stm32_hash_write_key(hdev);
497 if (stm32_hash_wait_busy(hdev))
498 return -ETIMEDOUT;
499 } else {
500 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
501 sg_init_one(&rctx->sg_key, ctx->key,
502 ALIGN(ctx->keylen, sizeof(u32)));
504 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
505 DMA_TO_DEVICE);
506 if (rctx->dma_ct == 0) {
507 dev_err(hdev->dev, "dma_map_sg error\n");
508 return -ENOMEM;
511 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
513 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
516 return err;
519 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
521 struct dma_slave_config dma_conf;
522 int err;
524 memset(&dma_conf, 0, sizeof(dma_conf));
526 dma_conf.direction = DMA_MEM_TO_DEV;
527 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
528 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
529 dma_conf.src_maxburst = hdev->dma_maxburst;
530 dma_conf.dst_maxburst = hdev->dma_maxburst;
531 dma_conf.device_fc = false;
533 hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
534 if (!hdev->dma_lch) {
535 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
536 return -EBUSY;
539 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
540 if (err) {
541 dma_release_channel(hdev->dma_lch);
542 hdev->dma_lch = NULL;
543 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
544 return err;
547 init_completion(&hdev->dma_completion);
549 return 0;
552 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
554 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
555 struct scatterlist sg[1], *tsg;
556 int err = 0, len = 0, reg, ncp = 0;
557 unsigned int i;
558 u32 *buffer = (void *)rctx->buffer;
560 rctx->sg = hdev->req->src;
561 rctx->total = hdev->req->nbytes;
563 rctx->nents = sg_nents(rctx->sg);
565 if (rctx->nents < 0)
566 return -EINVAL;
568 stm32_hash_write_ctrl(hdev);
570 if (hdev->flags & HASH_FLAGS_HMAC) {
571 err = stm32_hash_hmac_dma_send(hdev);
572 if (err != -EINPROGRESS)
573 return err;
576 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
577 len = sg->length;
579 sg[0] = *tsg;
580 if (sg_is_last(sg)) {
581 if (hdev->dma_mode == 1) {
582 len = (ALIGN(sg->length, 16) - 16);
584 ncp = sg_pcopy_to_buffer(
585 rctx->sg, rctx->nents,
586 rctx->buffer, sg->length - len,
587 rctx->total - sg->length + len);
589 sg->length = len;
590 } else {
591 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
592 len = sg->length;
593 sg->length = ALIGN(sg->length,
594 sizeof(u32));
599 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
600 DMA_TO_DEVICE);
601 if (rctx->dma_ct == 0) {
602 dev_err(hdev->dev, "dma_map_sg error\n");
603 return -ENOMEM;
606 err = stm32_hash_xmit_dma(hdev, sg, len,
607 !sg_is_last(sg));
609 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
611 if (err == -ENOMEM)
612 return err;
615 if (hdev->dma_mode == 1) {
616 if (stm32_hash_wait_busy(hdev))
617 return -ETIMEDOUT;
618 reg = stm32_hash_read(hdev, HASH_CR);
619 reg &= ~HASH_CR_DMAE;
620 reg |= HASH_CR_DMAA;
621 stm32_hash_write(hdev, HASH_CR, reg);
623 if (ncp) {
624 memset(buffer + ncp, 0,
625 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
626 writesl(hdev->io_base + HASH_DIN, buffer,
627 DIV_ROUND_UP(ncp, sizeof(u32)));
629 stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
630 reg = stm32_hash_read(hdev, HASH_STR);
631 reg |= HASH_STR_DCAL;
632 stm32_hash_write(hdev, HASH_STR, reg);
633 err = -EINPROGRESS;
636 if (hdev->flags & HASH_FLAGS_HMAC) {
637 if (stm32_hash_wait_busy(hdev))
638 return -ETIMEDOUT;
639 err = stm32_hash_hmac_dma_send(hdev);
642 return err;
645 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
647 struct stm32_hash_dev *hdev = NULL, *tmp;
649 spin_lock_bh(&stm32_hash.lock);
650 if (!ctx->hdev) {
651 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
652 hdev = tmp;
653 break;
655 ctx->hdev = hdev;
656 } else {
657 hdev = ctx->hdev;
660 spin_unlock_bh(&stm32_hash.lock);
662 return hdev;
665 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
667 struct scatterlist *sg;
668 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
669 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
670 int i;
672 if (req->nbytes <= HASH_DMA_THRESHOLD)
673 return false;
675 if (sg_nents(req->src) > 1) {
676 if (hdev->dma_mode == 1)
677 return false;
678 for_each_sg(req->src, sg, sg_nents(req->src), i) {
679 if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
680 (!sg_is_last(sg)))
681 return false;
685 if (req->src->offset % 4)
686 return false;
688 return true;
691 static int stm32_hash_init(struct ahash_request *req)
693 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
694 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
695 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
696 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
698 rctx->hdev = hdev;
700 rctx->flags = HASH_FLAGS_CPU;
702 rctx->digcnt = crypto_ahash_digestsize(tfm);
703 switch (rctx->digcnt) {
704 case MD5_DIGEST_SIZE:
705 rctx->flags |= HASH_FLAGS_MD5;
706 break;
707 case SHA1_DIGEST_SIZE:
708 rctx->flags |= HASH_FLAGS_SHA1;
709 break;
710 case SHA224_DIGEST_SIZE:
711 rctx->flags |= HASH_FLAGS_SHA224;
712 break;
713 case SHA256_DIGEST_SIZE:
714 rctx->flags |= HASH_FLAGS_SHA256;
715 break;
716 default:
717 return -EINVAL;
720 rctx->bufcnt = 0;
721 rctx->buflen = HASH_BUFLEN;
722 rctx->total = 0;
723 rctx->offset = 0;
724 rctx->data_type = HASH_DATA_8_BITS;
726 memset(rctx->buffer, 0, HASH_BUFLEN);
728 if (ctx->flags & HASH_FLAGS_HMAC)
729 rctx->flags |= HASH_FLAGS_HMAC;
731 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
733 return 0;
736 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
738 return stm32_hash_update_cpu(hdev);
741 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
743 struct ahash_request *req = hdev->req;
744 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
745 int err;
747 if (!(rctx->flags & HASH_FLAGS_CPU))
748 err = stm32_hash_dma_send(hdev);
749 else
750 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
752 rctx->bufcnt = 0;
754 return err;
757 static void stm32_hash_copy_hash(struct ahash_request *req)
759 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
760 u32 *hash = (u32 *)rctx->digest;
761 unsigned int i, hashsize;
763 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
764 case HASH_FLAGS_MD5:
765 hashsize = MD5_DIGEST_SIZE;
766 break;
767 case HASH_FLAGS_SHA1:
768 hashsize = SHA1_DIGEST_SIZE;
769 break;
770 case HASH_FLAGS_SHA224:
771 hashsize = SHA224_DIGEST_SIZE;
772 break;
773 case HASH_FLAGS_SHA256:
774 hashsize = SHA256_DIGEST_SIZE;
775 break;
776 default:
777 return;
780 for (i = 0; i < hashsize / sizeof(u32); i++)
781 hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
782 HASH_HREG(i)));
785 static int stm32_hash_finish(struct ahash_request *req)
787 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
789 if (!req->result)
790 return -EINVAL;
792 memcpy(req->result, rctx->digest, rctx->digcnt);
794 return 0;
797 static void stm32_hash_finish_req(struct ahash_request *req, int err)
799 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
800 struct stm32_hash_dev *hdev = rctx->hdev;
802 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
803 stm32_hash_copy_hash(req);
804 err = stm32_hash_finish(req);
805 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
806 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
807 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
808 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
809 HASH_FLAGS_HMAC_KEY);
810 } else {
811 rctx->flags |= HASH_FLAGS_ERRORS;
814 crypto_finalize_hash_request(hdev->engine, req, err);
817 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
818 struct stm32_hash_request_ctx *rctx)
820 if (!(HASH_FLAGS_INIT & hdev->flags)) {
821 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
822 stm32_hash_write(hdev, HASH_STR, 0);
823 stm32_hash_write(hdev, HASH_DIN, 0);
824 stm32_hash_write(hdev, HASH_IMR, 0);
825 hdev->err = 0;
828 return 0;
831 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
832 struct ahash_request *req)
834 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
837 static int stm32_hash_prepare_req(struct crypto_engine *engine,
838 struct ahash_request *req)
840 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
841 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
842 struct stm32_hash_request_ctx *rctx;
844 if (!hdev)
845 return -ENODEV;
847 hdev->req = req;
849 rctx = ahash_request_ctx(req);
851 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
852 rctx->op, req->nbytes);
854 return stm32_hash_hw_init(hdev, rctx);
857 static int stm32_hash_one_request(struct crypto_engine *engine,
858 struct ahash_request *req)
860 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
861 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
862 struct stm32_hash_request_ctx *rctx;
863 int err = 0;
865 if (!hdev)
866 return -ENODEV;
868 hdev->req = req;
870 rctx = ahash_request_ctx(req);
872 if (rctx->op == HASH_OP_UPDATE)
873 err = stm32_hash_update_req(hdev);
874 else if (rctx->op == HASH_OP_FINAL)
875 err = stm32_hash_final_req(hdev);
877 if (err != -EINPROGRESS)
878 /* done task will not finish it, so do it here */
879 stm32_hash_finish_req(req, err);
881 return 0;
884 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
886 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
887 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
888 struct stm32_hash_dev *hdev = ctx->hdev;
890 rctx->op = op;
892 return stm32_hash_handle_queue(hdev, req);
895 static int stm32_hash_update(struct ahash_request *req)
897 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
899 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
900 return 0;
902 rctx->total = req->nbytes;
903 rctx->sg = req->src;
904 rctx->offset = 0;
906 if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
907 stm32_hash_append_sg(rctx);
908 return 0;
911 return stm32_hash_enqueue(req, HASH_OP_UPDATE);
914 static int stm32_hash_final(struct ahash_request *req)
916 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
918 rctx->flags |= HASH_FLAGS_FINUP;
920 return stm32_hash_enqueue(req, HASH_OP_FINAL);
923 static int stm32_hash_finup(struct ahash_request *req)
925 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
926 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
927 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
928 int err1, err2;
930 rctx->flags |= HASH_FLAGS_FINUP;
932 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
933 rctx->flags &= ~HASH_FLAGS_CPU;
935 err1 = stm32_hash_update(req);
937 if (err1 == -EINPROGRESS || err1 == -EBUSY)
938 return err1;
941 * final() has to be always called to cleanup resources
942 * even if update() failed, except EINPROGRESS
944 err2 = stm32_hash_final(req);
946 return err1 ?: err2;
949 static int stm32_hash_digest(struct ahash_request *req)
951 return stm32_hash_init(req) ?: stm32_hash_finup(req);
954 static int stm32_hash_export(struct ahash_request *req, void *out)
956 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
957 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
958 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
959 u32 *preg;
960 unsigned int i;
962 while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
963 cpu_relax();
965 rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER),
966 GFP_KERNEL);
968 preg = rctx->hw_context;
970 *preg++ = stm32_hash_read(hdev, HASH_IMR);
971 *preg++ = stm32_hash_read(hdev, HASH_STR);
972 *preg++ = stm32_hash_read(hdev, HASH_CR);
973 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
974 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
976 memcpy(out, rctx, sizeof(*rctx));
978 return 0;
981 static int stm32_hash_import(struct ahash_request *req, const void *in)
983 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
984 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
985 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
986 const u32 *preg = in;
987 u32 reg;
988 unsigned int i;
990 memcpy(rctx, in, sizeof(*rctx));
992 preg = rctx->hw_context;
994 stm32_hash_write(hdev, HASH_IMR, *preg++);
995 stm32_hash_write(hdev, HASH_STR, *preg++);
996 stm32_hash_write(hdev, HASH_CR, *preg);
997 reg = *preg++ | HASH_CR_INIT;
998 stm32_hash_write(hdev, HASH_CR, reg);
1000 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1001 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1003 kfree(rctx->hw_context);
1005 return 0;
1008 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1009 const u8 *key, unsigned int keylen)
1011 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1013 if (keylen <= HASH_MAX_KEY_SIZE) {
1014 memcpy(ctx->key, key, keylen);
1015 ctx->keylen = keylen;
1016 } else {
1017 return -ENOMEM;
1020 return 0;
1023 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1024 const char *algs_hmac_name)
1026 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1028 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1029 sizeof(struct stm32_hash_request_ctx));
1031 ctx->keylen = 0;
1033 if (algs_hmac_name)
1034 ctx->flags |= HASH_FLAGS_HMAC;
1036 return 0;
1039 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1041 return stm32_hash_cra_init_algs(tfm, NULL);
1044 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1046 return stm32_hash_cra_init_algs(tfm, "md5");
1049 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1051 return stm32_hash_cra_init_algs(tfm, "sha1");
1054 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1056 return stm32_hash_cra_init_algs(tfm, "sha224");
1059 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1061 return stm32_hash_cra_init_algs(tfm, "sha256");
1064 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1066 struct stm32_hash_dev *hdev = dev_id;
1068 if (HASH_FLAGS_CPU & hdev->flags) {
1069 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1070 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1071 goto finish;
1073 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1074 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1075 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1076 goto finish;
1080 return IRQ_HANDLED;
1082 finish:
1083 /* Finish current request */
1084 stm32_hash_finish_req(hdev->req, 0);
1086 return IRQ_HANDLED;
1089 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1091 struct stm32_hash_dev *hdev = dev_id;
1092 u32 reg;
1094 reg = stm32_hash_read(hdev, HASH_SR);
1095 if (reg & HASH_SR_OUTPUT_READY) {
1096 reg &= ~HASH_SR_OUTPUT_READY;
1097 stm32_hash_write(hdev, HASH_SR, reg);
1098 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1099 return IRQ_WAKE_THREAD;
1102 return IRQ_NONE;
1105 static struct ahash_alg algs_md5_sha1[] = {
1107 .init = stm32_hash_init,
1108 .update = stm32_hash_update,
1109 .final = stm32_hash_final,
1110 .finup = stm32_hash_finup,
1111 .digest = stm32_hash_digest,
1112 .export = stm32_hash_export,
1113 .import = stm32_hash_import,
1114 .halg = {
1115 .digestsize = MD5_DIGEST_SIZE,
1116 .statesize = sizeof(struct stm32_hash_request_ctx),
1117 .base = {
1118 .cra_name = "md5",
1119 .cra_driver_name = "stm32-md5",
1120 .cra_priority = 200,
1121 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1122 CRYPTO_ALG_ASYNC |
1123 CRYPTO_ALG_KERN_DRIVER_ONLY,
1124 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1125 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1126 .cra_alignmask = 3,
1127 .cra_init = stm32_hash_cra_init,
1128 .cra_module = THIS_MODULE,
1133 .init = stm32_hash_init,
1134 .update = stm32_hash_update,
1135 .final = stm32_hash_final,
1136 .finup = stm32_hash_finup,
1137 .digest = stm32_hash_digest,
1138 .export = stm32_hash_export,
1139 .import = stm32_hash_import,
1140 .setkey = stm32_hash_setkey,
1141 .halg = {
1142 .digestsize = MD5_DIGEST_SIZE,
1143 .statesize = sizeof(struct stm32_hash_request_ctx),
1144 .base = {
1145 .cra_name = "hmac(md5)",
1146 .cra_driver_name = "stm32-hmac-md5",
1147 .cra_priority = 200,
1148 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1149 CRYPTO_ALG_ASYNC |
1150 CRYPTO_ALG_KERN_DRIVER_ONLY,
1151 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1152 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1153 .cra_alignmask = 3,
1154 .cra_init = stm32_hash_cra_md5_init,
1155 .cra_module = THIS_MODULE,
1160 .init = stm32_hash_init,
1161 .update = stm32_hash_update,
1162 .final = stm32_hash_final,
1163 .finup = stm32_hash_finup,
1164 .digest = stm32_hash_digest,
1165 .export = stm32_hash_export,
1166 .import = stm32_hash_import,
1167 .halg = {
1168 .digestsize = SHA1_DIGEST_SIZE,
1169 .statesize = sizeof(struct stm32_hash_request_ctx),
1170 .base = {
1171 .cra_name = "sha1",
1172 .cra_driver_name = "stm32-sha1",
1173 .cra_priority = 200,
1174 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1175 CRYPTO_ALG_ASYNC |
1176 CRYPTO_ALG_KERN_DRIVER_ONLY,
1177 .cra_blocksize = SHA1_BLOCK_SIZE,
1178 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1179 .cra_alignmask = 3,
1180 .cra_init = stm32_hash_cra_init,
1181 .cra_module = THIS_MODULE,
1186 .init = stm32_hash_init,
1187 .update = stm32_hash_update,
1188 .final = stm32_hash_final,
1189 .finup = stm32_hash_finup,
1190 .digest = stm32_hash_digest,
1191 .export = stm32_hash_export,
1192 .import = stm32_hash_import,
1193 .setkey = stm32_hash_setkey,
1194 .halg = {
1195 .digestsize = SHA1_DIGEST_SIZE,
1196 .statesize = sizeof(struct stm32_hash_request_ctx),
1197 .base = {
1198 .cra_name = "hmac(sha1)",
1199 .cra_driver_name = "stm32-hmac-sha1",
1200 .cra_priority = 200,
1201 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1202 CRYPTO_ALG_ASYNC |
1203 CRYPTO_ALG_KERN_DRIVER_ONLY,
1204 .cra_blocksize = SHA1_BLOCK_SIZE,
1205 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1206 .cra_alignmask = 3,
1207 .cra_init = stm32_hash_cra_sha1_init,
1208 .cra_module = THIS_MODULE,
1214 static struct ahash_alg algs_sha224_sha256[] = {
1216 .init = stm32_hash_init,
1217 .update = stm32_hash_update,
1218 .final = stm32_hash_final,
1219 .finup = stm32_hash_finup,
1220 .digest = stm32_hash_digest,
1221 .export = stm32_hash_export,
1222 .import = stm32_hash_import,
1223 .halg = {
1224 .digestsize = SHA224_DIGEST_SIZE,
1225 .statesize = sizeof(struct stm32_hash_request_ctx),
1226 .base = {
1227 .cra_name = "sha224",
1228 .cra_driver_name = "stm32-sha224",
1229 .cra_priority = 200,
1230 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1231 CRYPTO_ALG_ASYNC |
1232 CRYPTO_ALG_KERN_DRIVER_ONLY,
1233 .cra_blocksize = SHA224_BLOCK_SIZE,
1234 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1235 .cra_alignmask = 3,
1236 .cra_init = stm32_hash_cra_init,
1237 .cra_module = THIS_MODULE,
1242 .init = stm32_hash_init,
1243 .update = stm32_hash_update,
1244 .final = stm32_hash_final,
1245 .finup = stm32_hash_finup,
1246 .digest = stm32_hash_digest,
1247 .setkey = stm32_hash_setkey,
1248 .export = stm32_hash_export,
1249 .import = stm32_hash_import,
1250 .halg = {
1251 .digestsize = SHA224_DIGEST_SIZE,
1252 .statesize = sizeof(struct stm32_hash_request_ctx),
1253 .base = {
1254 .cra_name = "hmac(sha224)",
1255 .cra_driver_name = "stm32-hmac-sha224",
1256 .cra_priority = 200,
1257 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1258 CRYPTO_ALG_ASYNC |
1259 CRYPTO_ALG_KERN_DRIVER_ONLY,
1260 .cra_blocksize = SHA224_BLOCK_SIZE,
1261 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1262 .cra_alignmask = 3,
1263 .cra_init = stm32_hash_cra_sha224_init,
1264 .cra_module = THIS_MODULE,
1269 .init = stm32_hash_init,
1270 .update = stm32_hash_update,
1271 .final = stm32_hash_final,
1272 .finup = stm32_hash_finup,
1273 .digest = stm32_hash_digest,
1274 .export = stm32_hash_export,
1275 .import = stm32_hash_import,
1276 .halg = {
1277 .digestsize = SHA256_DIGEST_SIZE,
1278 .statesize = sizeof(struct stm32_hash_request_ctx),
1279 .base = {
1280 .cra_name = "sha256",
1281 .cra_driver_name = "stm32-sha256",
1282 .cra_priority = 200,
1283 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1284 CRYPTO_ALG_ASYNC |
1285 CRYPTO_ALG_KERN_DRIVER_ONLY,
1286 .cra_blocksize = SHA256_BLOCK_SIZE,
1287 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1288 .cra_alignmask = 3,
1289 .cra_init = stm32_hash_cra_init,
1290 .cra_module = THIS_MODULE,
1295 .init = stm32_hash_init,
1296 .update = stm32_hash_update,
1297 .final = stm32_hash_final,
1298 .finup = stm32_hash_finup,
1299 .digest = stm32_hash_digest,
1300 .export = stm32_hash_export,
1301 .import = stm32_hash_import,
1302 .setkey = stm32_hash_setkey,
1303 .halg = {
1304 .digestsize = SHA256_DIGEST_SIZE,
1305 .statesize = sizeof(struct stm32_hash_request_ctx),
1306 .base = {
1307 .cra_name = "hmac(sha256)",
1308 .cra_driver_name = "stm32-hmac-sha256",
1309 .cra_priority = 200,
1310 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1311 CRYPTO_ALG_ASYNC |
1312 CRYPTO_ALG_KERN_DRIVER_ONLY,
1313 .cra_blocksize = SHA256_BLOCK_SIZE,
1314 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1315 .cra_alignmask = 3,
1316 .cra_init = stm32_hash_cra_sha256_init,
1317 .cra_module = THIS_MODULE,
1323 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1325 unsigned int i, j;
1326 int err;
1328 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1329 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1330 err = crypto_register_ahash(
1331 &hdev->pdata->algs_info[i].algs_list[j]);
1332 if (err)
1333 goto err_algs;
1337 return 0;
1338 err_algs:
1339 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1340 for (; i--; ) {
1341 for (; j--;)
1342 crypto_unregister_ahash(
1343 &hdev->pdata->algs_info[i].algs_list[j]);
1346 return err;
1349 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1351 unsigned int i, j;
1353 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1354 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1355 crypto_unregister_ahash(
1356 &hdev->pdata->algs_info[i].algs_list[j]);
1359 return 0;
1362 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1364 .algs_list = algs_md5_sha1,
1365 .size = ARRAY_SIZE(algs_md5_sha1),
1369 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1370 .algs_info = stm32_hash_algs_info_stm32f4,
1371 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1374 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1376 .algs_list = algs_md5_sha1,
1377 .size = ARRAY_SIZE(algs_md5_sha1),
1380 .algs_list = algs_sha224_sha256,
1381 .size = ARRAY_SIZE(algs_sha224_sha256),
1385 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1386 .algs_info = stm32_hash_algs_info_stm32f7,
1387 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1390 static const struct of_device_id stm32_hash_of_match[] = {
1392 .compatible = "st,stm32f456-hash",
1393 .data = &stm32_hash_pdata_stm32f4,
1396 .compatible = "st,stm32f756-hash",
1397 .data = &stm32_hash_pdata_stm32f7,
1402 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1404 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1405 struct device *dev)
1407 int err;
1409 hdev->pdata = of_device_get_match_data(dev);
1410 if (!hdev->pdata) {
1411 dev_err(dev, "no compatible OF match\n");
1412 return -EINVAL;
1415 err = of_property_read_u32(dev->of_node, "dma-maxburst",
1416 &hdev->dma_maxburst);
1418 return err;
1421 static int stm32_hash_probe(struct platform_device *pdev)
1423 struct stm32_hash_dev *hdev;
1424 struct device *dev = &pdev->dev;
1425 struct resource *res;
1426 int ret, irq;
1428 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1429 if (!hdev)
1430 return -ENOMEM;
1432 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1433 hdev->io_base = devm_ioremap_resource(dev, res);
1434 if (IS_ERR(hdev->io_base))
1435 return PTR_ERR(hdev->io_base);
1437 hdev->phys_base = res->start;
1439 ret = stm32_hash_get_of_match(hdev, dev);
1440 if (ret)
1441 return ret;
1443 irq = platform_get_irq(pdev, 0);
1444 if (irq < 0) {
1445 dev_err(dev, "Cannot get IRQ resource\n");
1446 return irq;
1449 ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1450 stm32_hash_irq_thread, IRQF_ONESHOT,
1451 dev_name(dev), hdev);
1452 if (ret) {
1453 dev_err(dev, "Cannot grab IRQ\n");
1454 return ret;
1457 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1458 if (IS_ERR(hdev->clk)) {
1459 dev_err(dev, "failed to get clock for hash (%lu)\n",
1460 PTR_ERR(hdev->clk));
1461 return PTR_ERR(hdev->clk);
1464 ret = clk_prepare_enable(hdev->clk);
1465 if (ret) {
1466 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1467 return ret;
1470 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1471 if (!IS_ERR(hdev->rst)) {
1472 reset_control_assert(hdev->rst);
1473 udelay(2);
1474 reset_control_deassert(hdev->rst);
1477 hdev->dev = dev;
1479 platform_set_drvdata(pdev, hdev);
1481 ret = stm32_hash_dma_init(hdev);
1482 if (ret)
1483 dev_dbg(dev, "DMA mode not available\n");
1485 spin_lock(&stm32_hash.lock);
1486 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1487 spin_unlock(&stm32_hash.lock);
1489 /* Initialize crypto engine */
1490 hdev->engine = crypto_engine_alloc_init(dev, 1);
1491 if (!hdev->engine) {
1492 ret = -ENOMEM;
1493 goto err_engine;
1496 hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
1497 hdev->engine->hash_one_request = stm32_hash_one_request;
1499 ret = crypto_engine_start(hdev->engine);
1500 if (ret)
1501 goto err_engine_start;
1503 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1505 /* Register algos */
1506 ret = stm32_hash_register_algs(hdev);
1507 if (ret)
1508 goto err_algs;
1510 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1511 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1513 return 0;
1515 err_algs:
1516 err_engine_start:
1517 crypto_engine_exit(hdev->engine);
1518 err_engine:
1519 spin_lock(&stm32_hash.lock);
1520 list_del(&hdev->list);
1521 spin_unlock(&stm32_hash.lock);
1523 if (hdev->dma_lch)
1524 dma_release_channel(hdev->dma_lch);
1526 clk_disable_unprepare(hdev->clk);
1528 return ret;
1531 static int stm32_hash_remove(struct platform_device *pdev)
1533 static struct stm32_hash_dev *hdev;
1535 hdev = platform_get_drvdata(pdev);
1536 if (!hdev)
1537 return -ENODEV;
1539 stm32_hash_unregister_algs(hdev);
1541 crypto_engine_exit(hdev->engine);
1543 spin_lock(&stm32_hash.lock);
1544 list_del(&hdev->list);
1545 spin_unlock(&stm32_hash.lock);
1547 if (hdev->dma_lch)
1548 dma_release_channel(hdev->dma_lch);
1550 clk_disable_unprepare(hdev->clk);
1552 return 0;
1555 static struct platform_driver stm32_hash_driver = {
1556 .probe = stm32_hash_probe,
1557 .remove = stm32_hash_remove,
1558 .driver = {
1559 .name = "stm32-hash",
1560 .of_match_table = stm32_hash_of_match,
1564 module_platform_driver(stm32_hash_driver);
1566 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1567 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1568 MODULE_LICENSE("GPL v2");