1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014 Imagination Technologies
4 * Authors: Will Thomas, James Hartley
6 * Interface structure taken from omap-sham driver
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/scatterlist.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/md5.h>
22 #include <crypto/sha1.h>
23 #include <crypto/sha2.h>
26 #define CR_RESET_SET 1
27 #define CR_RESET_UNSET 0
29 #define CR_MESSAGE_LENGTH_H 0x4
30 #define CR_MESSAGE_LENGTH_L 0x8
32 #define CR_CONTROL 0xc
33 #define CR_CONTROL_BYTE_ORDER_3210 0
34 #define CR_CONTROL_BYTE_ORDER_0123 1
35 #define CR_CONTROL_BYTE_ORDER_2310 2
36 #define CR_CONTROL_BYTE_ORDER_1032 3
37 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
38 #define CR_CONTROL_ALGO_MD5 0
39 #define CR_CONTROL_ALGO_SHA1 1
40 #define CR_CONTROL_ALGO_SHA224 2
41 #define CR_CONTROL_ALGO_SHA256 3
43 #define CR_INTSTAT 0x10
44 #define CR_INTENAB 0x14
45 #define CR_INTCLEAR 0x18
46 #define CR_INT_RESULTS_AVAILABLE BIT(0)
47 #define CR_INT_NEW_RESULTS_SET BIT(1)
48 #define CR_INT_RESULT_READ_ERR BIT(2)
49 #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
50 #define CR_INT_STATUS BIT(8)
52 #define CR_RESULT_QUEUE 0x1c
54 #define CR_CORE_REV 0x50
55 #define CR_CORE_DES1 0x60
56 #define CR_CORE_DES2 0x70
58 #define DRIVER_FLAGS_BUSY BIT(0)
59 #define DRIVER_FLAGS_FINAL BIT(1)
60 #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
61 #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
62 #define DRIVER_FLAGS_INIT BIT(4)
63 #define DRIVER_FLAGS_CPU BIT(5)
64 #define DRIVER_FLAGS_DMA_READY BIT(6)
65 #define DRIVER_FLAGS_ERROR BIT(7)
66 #define DRIVER_FLAGS_SG BIT(8)
67 #define DRIVER_FLAGS_SHA1 BIT(18)
68 #define DRIVER_FLAGS_SHA224 BIT(19)
69 #define DRIVER_FLAGS_SHA256 BIT(20)
70 #define DRIVER_FLAGS_MD5 BIT(21)
72 #define IMG_HASH_QUEUE_LENGTH 20
73 #define IMG_HASH_DMA_BURST 4
74 #define IMG_HASH_DMA_THRESHOLD 64
76 #ifdef __LITTLE_ENDIAN
77 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
79 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
84 struct img_hash_request_ctx
{
85 struct img_hash_dev
*hdev
;
86 u8 digest
[SHA256_DIGEST_SIZE
] __aligned(sizeof(u32
));
94 struct scatterlist
*sgfirst
;
96 struct scatterlist
*sg
;
105 struct ahash_request fallback_req
;
107 /* Zero length buffer must remain last member of struct */
108 u8 buffer
[] __aligned(sizeof(u32
));
111 struct img_hash_ctx
{
112 struct img_hash_dev
*hdev
;
114 struct crypto_ahash
*fallback
;
117 struct img_hash_dev
{
118 struct list_head list
;
120 struct clk
*hash_clk
;
122 void __iomem
*io_base
;
124 phys_addr_t bus_addr
;
125 void __iomem
*cpu_addr
;
129 struct tasklet_struct done_task
;
130 struct tasklet_struct dma_task
;
133 struct crypto_queue queue
;
134 struct ahash_request
*req
;
136 struct dma_chan
*dma_lch
;
139 struct img_hash_drv
{
140 struct list_head dev_list
;
144 static struct img_hash_drv img_hash
= {
145 .dev_list
= LIST_HEAD_INIT(img_hash
.dev_list
),
146 .lock
= __SPIN_LOCK_UNLOCKED(img_hash
.lock
),
149 static inline u32
img_hash_read(struct img_hash_dev
*hdev
, u32 offset
)
151 return readl_relaxed(hdev
->io_base
+ offset
);
154 static inline void img_hash_write(struct img_hash_dev
*hdev
,
155 u32 offset
, u32 value
)
157 writel_relaxed(value
, hdev
->io_base
+ offset
);
160 static inline u32
img_hash_read_result_queue(struct img_hash_dev
*hdev
)
162 return be32_to_cpu(img_hash_read(hdev
, CR_RESULT_QUEUE
));
165 static void img_hash_start(struct img_hash_dev
*hdev
, bool dma
)
167 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
168 u32 cr
= IMG_HASH_BYTE_ORDER
<< CR_CONTROL_BYTE_ORDER_SHIFT
;
170 if (ctx
->flags
& DRIVER_FLAGS_MD5
)
171 cr
|= CR_CONTROL_ALGO_MD5
;
172 else if (ctx
->flags
& DRIVER_FLAGS_SHA1
)
173 cr
|= CR_CONTROL_ALGO_SHA1
;
174 else if (ctx
->flags
& DRIVER_FLAGS_SHA224
)
175 cr
|= CR_CONTROL_ALGO_SHA224
;
176 else if (ctx
->flags
& DRIVER_FLAGS_SHA256
)
177 cr
|= CR_CONTROL_ALGO_SHA256
;
178 dev_dbg(hdev
->dev
, "Starting hash process\n");
179 img_hash_write(hdev
, CR_CONTROL
, cr
);
182 * The hardware block requires two cycles between writing the control
183 * register and writing the first word of data in non DMA mode, to
184 * ensure the first data write is not grouped in burst with the control
185 * register write a read is issued to 'flush' the bus.
188 img_hash_read(hdev
, CR_CONTROL
);
191 static int img_hash_xmit_cpu(struct img_hash_dev
*hdev
, const u8
*buf
,
192 size_t length
, int final
)
195 const u32
*buffer
= (const u32
*)buf
;
197 dev_dbg(hdev
->dev
, "xmit_cpu: length: %zu bytes\n", length
);
200 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
202 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
204 for (count
= 0; count
< len32
; count
++)
205 writel_relaxed(buffer
[count
], hdev
->cpu_addr
);
210 static void img_hash_dma_callback(void *data
)
212 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
213 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
216 img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->bufcnt
, 0);
220 tasklet_schedule(&hdev
->dma_task
);
223 static int img_hash_xmit_dma(struct img_hash_dev
*hdev
, struct scatterlist
*sg
)
225 struct dma_async_tx_descriptor
*desc
;
226 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
228 ctx
->dma_ct
= dma_map_sg(hdev
->dev
, sg
, 1, DMA_TO_DEVICE
);
229 if (ctx
->dma_ct
== 0) {
230 dev_err(hdev
->dev
, "Invalid DMA sg\n");
235 desc
= dmaengine_prep_slave_sg(hdev
->dma_lch
,
239 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
241 dev_err(hdev
->dev
, "Null DMA descriptor\n");
243 dma_unmap_sg(hdev
->dev
, sg
, 1, DMA_TO_DEVICE
);
246 desc
->callback
= img_hash_dma_callback
;
247 desc
->callback_param
= hdev
;
248 dmaengine_submit(desc
);
249 dma_async_issue_pending(hdev
->dma_lch
);
254 static int img_hash_write_via_cpu(struct img_hash_dev
*hdev
)
256 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
258 ctx
->bufcnt
= sg_copy_to_buffer(hdev
->req
->src
, sg_nents(ctx
->sg
),
259 ctx
->buffer
, hdev
->req
->nbytes
);
261 ctx
->total
= hdev
->req
->nbytes
;
264 hdev
->flags
|= (DRIVER_FLAGS_CPU
| DRIVER_FLAGS_FINAL
);
266 img_hash_start(hdev
, false);
268 return img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->total
, 1);
271 static int img_hash_finish(struct ahash_request
*req
)
273 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
278 memcpy(req
->result
, ctx
->digest
, ctx
->digsize
);
283 static void img_hash_copy_hash(struct ahash_request
*req
)
285 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
286 u32
*hash
= (u32
*)ctx
->digest
;
289 for (i
= (ctx
->digsize
/ sizeof(u32
)) - 1; i
>= 0; i
--)
290 hash
[i
] = img_hash_read_result_queue(ctx
->hdev
);
293 static void img_hash_finish_req(struct ahash_request
*req
, int err
)
295 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
296 struct img_hash_dev
*hdev
= ctx
->hdev
;
299 img_hash_copy_hash(req
);
300 if (DRIVER_FLAGS_FINAL
& hdev
->flags
)
301 err
= img_hash_finish(req
);
303 dev_warn(hdev
->dev
, "Hash failed with error %d\n", err
);
304 ctx
->flags
|= DRIVER_FLAGS_ERROR
;
307 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
| DRIVER_FLAGS_OUTPUT_READY
|
308 DRIVER_FLAGS_CPU
| DRIVER_FLAGS_BUSY
| DRIVER_FLAGS_FINAL
);
310 if (req
->base
.complete
)
311 req
->base
.complete(&req
->base
, err
);
314 static int img_hash_write_via_dma(struct img_hash_dev
*hdev
)
316 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
318 img_hash_start(hdev
, true);
320 dev_dbg(hdev
->dev
, "xmit dma size: %d\n", ctx
->total
);
323 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
325 hdev
->flags
|= DRIVER_FLAGS_DMA_ACTIVE
| DRIVER_FLAGS_FINAL
;
327 tasklet_schedule(&hdev
->dma_task
);
332 static int img_hash_dma_init(struct img_hash_dev
*hdev
)
334 struct dma_slave_config dma_conf
;
337 hdev
->dma_lch
= dma_request_chan(hdev
->dev
, "tx");
338 if (IS_ERR(hdev
->dma_lch
)) {
339 dev_err(hdev
->dev
, "Couldn't acquire a slave DMA channel.\n");
340 return PTR_ERR(hdev
->dma_lch
);
342 dma_conf
.direction
= DMA_MEM_TO_DEV
;
343 dma_conf
.dst_addr
= hdev
->bus_addr
;
344 dma_conf
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
345 dma_conf
.dst_maxburst
= IMG_HASH_DMA_BURST
;
346 dma_conf
.device_fc
= false;
348 err
= dmaengine_slave_config(hdev
->dma_lch
, &dma_conf
);
350 dev_err(hdev
->dev
, "Couldn't configure DMA slave.\n");
351 dma_release_channel(hdev
->dma_lch
);
358 static void img_hash_dma_task(unsigned long d
)
360 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)d
;
361 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
363 size_t nbytes
, bleft
, wsend
, len
, tbc
;
364 struct scatterlist tsg
;
366 if (!hdev
->req
|| !ctx
->sg
)
369 addr
= sg_virt(ctx
->sg
);
370 nbytes
= ctx
->sg
->length
- ctx
->offset
;
373 * The hash accelerator does not support a data valid mask. This means
374 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
375 * padding bytes in the last word written by that dma would erroneously
376 * be included in the hash. To avoid this we round down the transfer,
377 * and add the excess to the start of the next dma. It does not matter
378 * that the final dma may not be a multiple of 4 bytes as the hashing
379 * block is programmed to accept the correct number of bytes.
383 wsend
= (nbytes
/ 4);
386 sg_init_one(&tsg
, addr
+ ctx
->offset
, wsend
* 4);
387 if (img_hash_xmit_dma(hdev
, &tsg
)) {
388 dev_err(hdev
->dev
, "DMA failed, falling back to CPU");
389 ctx
->flags
|= DRIVER_FLAGS_CPU
;
391 img_hash_xmit_cpu(hdev
, addr
+ ctx
->offset
,
393 ctx
->sent
+= wsend
* 4;
396 ctx
->sent
+= wsend
* 4;
401 ctx
->bufcnt
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
402 ctx
->buffer
, bleft
, ctx
->sent
);
404 ctx
->sg
= sg_next(ctx
->sg
);
405 while (ctx
->sg
&& (ctx
->bufcnt
< 4)) {
406 len
= ctx
->sg
->length
;
407 if (likely(len
> (4 - ctx
->bufcnt
)))
408 len
= 4 - ctx
->bufcnt
;
409 tbc
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
410 ctx
->buffer
+ ctx
->bufcnt
, len
,
411 ctx
->sent
+ ctx
->bufcnt
);
413 if (tbc
>= ctx
->sg
->length
) {
414 ctx
->sg
= sg_next(ctx
->sg
);
419 ctx
->sent
+= ctx
->bufcnt
;
423 img_hash_dma_callback(hdev
);
426 ctx
->sg
= sg_next(ctx
->sg
);
430 static int img_hash_write_via_dma_stop(struct img_hash_dev
*hdev
)
432 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
434 if (ctx
->flags
& DRIVER_FLAGS_SG
)
435 dma_unmap_sg(hdev
->dev
, ctx
->sg
, ctx
->dma_ct
, DMA_TO_DEVICE
);
440 static int img_hash_process_data(struct img_hash_dev
*hdev
)
442 struct ahash_request
*req
= hdev
->req
;
443 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
448 if (req
->nbytes
>= IMG_HASH_DMA_THRESHOLD
) {
449 dev_dbg(hdev
->dev
, "process data request(%d bytes) using DMA\n",
451 err
= img_hash_write_via_dma(hdev
);
453 dev_dbg(hdev
->dev
, "process data request(%d bytes) using CPU\n",
455 err
= img_hash_write_via_cpu(hdev
);
460 static int img_hash_hw_init(struct img_hash_dev
*hdev
)
462 unsigned long long nbits
;
465 img_hash_write(hdev
, CR_RESET
, CR_RESET_SET
);
466 img_hash_write(hdev
, CR_RESET
, CR_RESET_UNSET
);
467 img_hash_write(hdev
, CR_INTENAB
, CR_INT_NEW_RESULTS_SET
);
469 nbits
= (u64
)hdev
->req
->nbytes
<< 3;
472 img_hash_write(hdev
, CR_MESSAGE_LENGTH_H
, u
);
473 img_hash_write(hdev
, CR_MESSAGE_LENGTH_L
, l
);
475 if (!(DRIVER_FLAGS_INIT
& hdev
->flags
)) {
476 hdev
->flags
|= DRIVER_FLAGS_INIT
;
479 dev_dbg(hdev
->dev
, "hw initialized, nbits: %llx\n", nbits
);
483 static int img_hash_init(struct ahash_request
*req
)
485 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
486 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
487 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
489 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
490 rctx
->fallback_req
.base
.flags
= req
->base
.flags
491 & CRYPTO_TFM_REQ_MAY_SLEEP
;
493 return crypto_ahash_init(&rctx
->fallback_req
);
496 static int img_hash_handle_queue(struct img_hash_dev
*hdev
,
497 struct ahash_request
*req
)
499 struct crypto_async_request
*async_req
, *backlog
;
500 struct img_hash_request_ctx
*ctx
;
502 int err
= 0, res
= 0;
504 spin_lock_irqsave(&hdev
->lock
, flags
);
507 res
= ahash_enqueue_request(&hdev
->queue
, req
);
509 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
510 spin_unlock_irqrestore(&hdev
->lock
, flags
);
514 backlog
= crypto_get_backlog(&hdev
->queue
);
515 async_req
= crypto_dequeue_request(&hdev
->queue
);
517 hdev
->flags
|= DRIVER_FLAGS_BUSY
;
519 spin_unlock_irqrestore(&hdev
->lock
, flags
);
525 backlog
->complete(backlog
, -EINPROGRESS
);
527 req
= ahash_request_cast(async_req
);
530 ctx
= ahash_request_ctx(req
);
532 dev_info(hdev
->dev
, "processing req, op: %lu, bytes: %d\n",
533 ctx
->op
, req
->nbytes
);
535 err
= img_hash_hw_init(hdev
);
538 err
= img_hash_process_data(hdev
);
540 if (err
!= -EINPROGRESS
) {
541 /* done_task will not finish so do it here */
542 img_hash_finish_req(req
, err
);
547 static int img_hash_update(struct ahash_request
*req
)
549 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
550 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
551 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
553 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
554 rctx
->fallback_req
.base
.flags
= req
->base
.flags
555 & CRYPTO_TFM_REQ_MAY_SLEEP
;
556 rctx
->fallback_req
.nbytes
= req
->nbytes
;
557 rctx
->fallback_req
.src
= req
->src
;
559 return crypto_ahash_update(&rctx
->fallback_req
);
562 static int img_hash_final(struct ahash_request
*req
)
564 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
565 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
566 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
568 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
569 rctx
->fallback_req
.base
.flags
= req
->base
.flags
570 & CRYPTO_TFM_REQ_MAY_SLEEP
;
571 rctx
->fallback_req
.result
= req
->result
;
573 return crypto_ahash_final(&rctx
->fallback_req
);
576 static int img_hash_finup(struct ahash_request
*req
)
578 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
579 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
580 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
582 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
583 rctx
->fallback_req
.base
.flags
= req
->base
.flags
584 & CRYPTO_TFM_REQ_MAY_SLEEP
;
585 rctx
->fallback_req
.nbytes
= req
->nbytes
;
586 rctx
->fallback_req
.src
= req
->src
;
587 rctx
->fallback_req
.result
= req
->result
;
589 return crypto_ahash_finup(&rctx
->fallback_req
);
592 static int img_hash_import(struct ahash_request
*req
, const void *in
)
594 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
595 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
596 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
598 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
599 rctx
->fallback_req
.base
.flags
= req
->base
.flags
600 & CRYPTO_TFM_REQ_MAY_SLEEP
;
602 return crypto_ahash_import(&rctx
->fallback_req
, in
);
605 static int img_hash_export(struct ahash_request
*req
, void *out
)
607 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
608 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
609 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
611 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
612 rctx
->fallback_req
.base
.flags
= req
->base
.flags
613 & CRYPTO_TFM_REQ_MAY_SLEEP
;
615 return crypto_ahash_export(&rctx
->fallback_req
, out
);
618 static int img_hash_digest(struct ahash_request
*req
)
620 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
621 struct img_hash_ctx
*tctx
= crypto_ahash_ctx(tfm
);
622 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
623 struct img_hash_dev
*hdev
= NULL
;
624 struct img_hash_dev
*tmp
;
627 spin_lock(&img_hash
.lock
);
629 list_for_each_entry(tmp
, &img_hash
.dev_list
, list
) {
639 spin_unlock(&img_hash
.lock
);
642 ctx
->digsize
= crypto_ahash_digestsize(tfm
);
644 switch (ctx
->digsize
) {
645 case SHA1_DIGEST_SIZE
:
646 ctx
->flags
|= DRIVER_FLAGS_SHA1
;
648 case SHA256_DIGEST_SIZE
:
649 ctx
->flags
|= DRIVER_FLAGS_SHA256
;
651 case SHA224_DIGEST_SIZE
:
652 ctx
->flags
|= DRIVER_FLAGS_SHA224
;
654 case MD5_DIGEST_SIZE
:
655 ctx
->flags
|= DRIVER_FLAGS_MD5
;
664 ctx
->total
= req
->nbytes
;
666 ctx
->sgfirst
= req
->src
;
667 ctx
->nents
= sg_nents(ctx
->sg
);
669 err
= img_hash_handle_queue(tctx
->hdev
, req
);
674 static int img_hash_cra_init(struct crypto_tfm
*tfm
, const char *alg_name
)
676 struct img_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
679 ctx
->fallback
= crypto_alloc_ahash(alg_name
, 0,
680 CRYPTO_ALG_NEED_FALLBACK
);
681 if (IS_ERR(ctx
->fallback
)) {
682 pr_err("img_hash: Could not load fallback driver.\n");
683 err
= PTR_ERR(ctx
->fallback
);
686 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
687 sizeof(struct img_hash_request_ctx
) +
688 crypto_ahash_reqsize(ctx
->fallback
) +
689 IMG_HASH_DMA_THRESHOLD
);
697 static int img_hash_cra_md5_init(struct crypto_tfm
*tfm
)
699 return img_hash_cra_init(tfm
, "md5-generic");
702 static int img_hash_cra_sha1_init(struct crypto_tfm
*tfm
)
704 return img_hash_cra_init(tfm
, "sha1-generic");
707 static int img_hash_cra_sha224_init(struct crypto_tfm
*tfm
)
709 return img_hash_cra_init(tfm
, "sha224-generic");
712 static int img_hash_cra_sha256_init(struct crypto_tfm
*tfm
)
714 return img_hash_cra_init(tfm
, "sha256-generic");
717 static void img_hash_cra_exit(struct crypto_tfm
*tfm
)
719 struct img_hash_ctx
*tctx
= crypto_tfm_ctx(tfm
);
721 crypto_free_ahash(tctx
->fallback
);
724 static irqreturn_t
img_irq_handler(int irq
, void *dev_id
)
726 struct img_hash_dev
*hdev
= dev_id
;
729 reg
= img_hash_read(hdev
, CR_INTSTAT
);
730 img_hash_write(hdev
, CR_INTCLEAR
, reg
);
732 if (reg
& CR_INT_NEW_RESULTS_SET
) {
733 dev_dbg(hdev
->dev
, "IRQ CR_INT_NEW_RESULTS_SET\n");
734 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
735 hdev
->flags
|= DRIVER_FLAGS_OUTPUT_READY
;
736 if (!(DRIVER_FLAGS_CPU
& hdev
->flags
))
737 hdev
->flags
|= DRIVER_FLAGS_DMA_READY
;
738 tasklet_schedule(&hdev
->done_task
);
741 "HASH interrupt when no active requests.\n");
743 } else if (reg
& CR_INT_RESULTS_AVAILABLE
) {
745 "IRQ triggered before the hash had completed\n");
746 } else if (reg
& CR_INT_RESULT_READ_ERR
) {
748 "Attempt to read from an empty result queue\n");
749 } else if (reg
& CR_INT_MESSAGE_WRITE_ERROR
) {
751 "Data written before the hardware was configured\n");
756 static struct ahash_alg img_algs
[] = {
758 .init
= img_hash_init
,
759 .update
= img_hash_update
,
760 .final
= img_hash_final
,
761 .finup
= img_hash_finup
,
762 .export
= img_hash_export
,
763 .import
= img_hash_import
,
764 .digest
= img_hash_digest
,
766 .digestsize
= MD5_DIGEST_SIZE
,
767 .statesize
= sizeof(struct md5_state
),
770 .cra_driver_name
= "img-md5",
774 CRYPTO_ALG_NEED_FALLBACK
,
775 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
776 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
777 .cra_init
= img_hash_cra_md5_init
,
778 .cra_exit
= img_hash_cra_exit
,
779 .cra_module
= THIS_MODULE
,
784 .init
= img_hash_init
,
785 .update
= img_hash_update
,
786 .final
= img_hash_final
,
787 .finup
= img_hash_finup
,
788 .export
= img_hash_export
,
789 .import
= img_hash_import
,
790 .digest
= img_hash_digest
,
792 .digestsize
= SHA1_DIGEST_SIZE
,
793 .statesize
= sizeof(struct sha1_state
),
796 .cra_driver_name
= "img-sha1",
800 CRYPTO_ALG_NEED_FALLBACK
,
801 .cra_blocksize
= SHA1_BLOCK_SIZE
,
802 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
803 .cra_init
= img_hash_cra_sha1_init
,
804 .cra_exit
= img_hash_cra_exit
,
805 .cra_module
= THIS_MODULE
,
810 .init
= img_hash_init
,
811 .update
= img_hash_update
,
812 .final
= img_hash_final
,
813 .finup
= img_hash_finup
,
814 .export
= img_hash_export
,
815 .import
= img_hash_import
,
816 .digest
= img_hash_digest
,
818 .digestsize
= SHA224_DIGEST_SIZE
,
819 .statesize
= sizeof(struct sha256_state
),
821 .cra_name
= "sha224",
822 .cra_driver_name
= "img-sha224",
826 CRYPTO_ALG_NEED_FALLBACK
,
827 .cra_blocksize
= SHA224_BLOCK_SIZE
,
828 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
829 .cra_init
= img_hash_cra_sha224_init
,
830 .cra_exit
= img_hash_cra_exit
,
831 .cra_module
= THIS_MODULE
,
836 .init
= img_hash_init
,
837 .update
= img_hash_update
,
838 .final
= img_hash_final
,
839 .finup
= img_hash_finup
,
840 .export
= img_hash_export
,
841 .import
= img_hash_import
,
842 .digest
= img_hash_digest
,
844 .digestsize
= SHA256_DIGEST_SIZE
,
845 .statesize
= sizeof(struct sha256_state
),
847 .cra_name
= "sha256",
848 .cra_driver_name
= "img-sha256",
852 CRYPTO_ALG_NEED_FALLBACK
,
853 .cra_blocksize
= SHA256_BLOCK_SIZE
,
854 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
855 .cra_init
= img_hash_cra_sha256_init
,
856 .cra_exit
= img_hash_cra_exit
,
857 .cra_module
= THIS_MODULE
,
863 static int img_register_algs(struct img_hash_dev
*hdev
)
867 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++) {
868 err
= crypto_register_ahash(&img_algs
[i
]);
876 crypto_unregister_ahash(&img_algs
[i
]);
881 static int img_unregister_algs(struct img_hash_dev
*hdev
)
885 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++)
886 crypto_unregister_ahash(&img_algs
[i
]);
890 static void img_hash_done_task(unsigned long data
)
892 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
895 if (hdev
->err
== -EINVAL
) {
900 if (!(DRIVER_FLAGS_BUSY
& hdev
->flags
)) {
901 img_hash_handle_queue(hdev
, NULL
);
905 if (DRIVER_FLAGS_CPU
& hdev
->flags
) {
906 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
907 hdev
->flags
&= ~DRIVER_FLAGS_OUTPUT_READY
;
910 } else if (DRIVER_FLAGS_DMA_READY
& hdev
->flags
) {
911 if (DRIVER_FLAGS_DMA_ACTIVE
& hdev
->flags
) {
912 hdev
->flags
&= ~DRIVER_FLAGS_DMA_ACTIVE
;
913 img_hash_write_via_dma_stop(hdev
);
919 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
920 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
|
921 DRIVER_FLAGS_OUTPUT_READY
);
928 img_hash_finish_req(hdev
->req
, err
);
931 static const struct of_device_id img_hash_match
[] = {
932 { .compatible
= "img,hash-accelerator" },
935 MODULE_DEVICE_TABLE(of
, img_hash_match
);
937 static int img_hash_probe(struct platform_device
*pdev
)
939 struct img_hash_dev
*hdev
;
940 struct device
*dev
= &pdev
->dev
;
941 struct resource
*hash_res
;
945 hdev
= devm_kzalloc(dev
, sizeof(*hdev
), GFP_KERNEL
);
949 spin_lock_init(&hdev
->lock
);
953 platform_set_drvdata(pdev
, hdev
);
955 INIT_LIST_HEAD(&hdev
->list
);
957 tasklet_init(&hdev
->done_task
, img_hash_done_task
, (unsigned long)hdev
);
958 tasklet_init(&hdev
->dma_task
, img_hash_dma_task
, (unsigned long)hdev
);
960 crypto_init_queue(&hdev
->queue
, IMG_HASH_QUEUE_LENGTH
);
963 hdev
->io_base
= devm_platform_ioremap_resource(pdev
, 0);
964 if (IS_ERR(hdev
->io_base
)) {
965 err
= PTR_ERR(hdev
->io_base
);
966 dev_err(dev
, "can't ioremap, returned %d\n", err
);
971 /* Write port (DMA or CPU) */
972 hash_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
973 hdev
->cpu_addr
= devm_ioremap_resource(dev
, hash_res
);
974 if (IS_ERR(hdev
->cpu_addr
)) {
975 dev_err(dev
, "can't ioremap write port\n");
976 err
= PTR_ERR(hdev
->cpu_addr
);
979 hdev
->bus_addr
= hash_res
->start
;
981 irq
= platform_get_irq(pdev
, 0);
987 err
= devm_request_irq(dev
, irq
, img_irq_handler
, 0,
988 dev_name(dev
), hdev
);
990 dev_err(dev
, "unable to request irq\n");
993 dev_dbg(dev
, "using IRQ channel %d\n", irq
);
995 hdev
->hash_clk
= devm_clk_get(&pdev
->dev
, "hash");
996 if (IS_ERR(hdev
->hash_clk
)) {
997 dev_err(dev
, "clock initialization failed.\n");
998 err
= PTR_ERR(hdev
->hash_clk
);
1002 hdev
->sys_clk
= devm_clk_get(&pdev
->dev
, "sys");
1003 if (IS_ERR(hdev
->sys_clk
)) {
1004 dev_err(dev
, "clock initialization failed.\n");
1005 err
= PTR_ERR(hdev
->sys_clk
);
1009 err
= clk_prepare_enable(hdev
->hash_clk
);
1013 err
= clk_prepare_enable(hdev
->sys_clk
);
1017 err
= img_hash_dma_init(hdev
);
1021 dev_dbg(dev
, "using %s for DMA transfers\n",
1022 dma_chan_name(hdev
->dma_lch
));
1024 spin_lock(&img_hash
.lock
);
1025 list_add_tail(&hdev
->list
, &img_hash
.dev_list
);
1026 spin_unlock(&img_hash
.lock
);
1028 err
= img_register_algs(hdev
);
1031 dev_info(dev
, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1036 spin_lock(&img_hash
.lock
);
1037 list_del(&hdev
->list
);
1038 spin_unlock(&img_hash
.lock
);
1039 dma_release_channel(hdev
->dma_lch
);
1041 clk_disable_unprepare(hdev
->sys_clk
);
1043 clk_disable_unprepare(hdev
->hash_clk
);
1045 tasklet_kill(&hdev
->done_task
);
1046 tasklet_kill(&hdev
->dma_task
);
1051 static int img_hash_remove(struct platform_device
*pdev
)
1053 struct img_hash_dev
*hdev
;
1055 hdev
= platform_get_drvdata(pdev
);
1056 spin_lock(&img_hash
.lock
);
1057 list_del(&hdev
->list
);
1058 spin_unlock(&img_hash
.lock
);
1060 img_unregister_algs(hdev
);
1062 tasklet_kill(&hdev
->done_task
);
1063 tasklet_kill(&hdev
->dma_task
);
1065 dma_release_channel(hdev
->dma_lch
);
1067 clk_disable_unprepare(hdev
->hash_clk
);
1068 clk_disable_unprepare(hdev
->sys_clk
);
1073 #ifdef CONFIG_PM_SLEEP
1074 static int img_hash_suspend(struct device
*dev
)
1076 struct img_hash_dev
*hdev
= dev_get_drvdata(dev
);
1078 clk_disable_unprepare(hdev
->hash_clk
);
1079 clk_disable_unprepare(hdev
->sys_clk
);
1084 static int img_hash_resume(struct device
*dev
)
1086 struct img_hash_dev
*hdev
= dev_get_drvdata(dev
);
1089 ret
= clk_prepare_enable(hdev
->hash_clk
);
1093 ret
= clk_prepare_enable(hdev
->sys_clk
);
1095 clk_disable_unprepare(hdev
->hash_clk
);
1101 #endif /* CONFIG_PM_SLEEP */
1103 static const struct dev_pm_ops img_hash_pm_ops
= {
1104 SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend
, img_hash_resume
)
1107 static struct platform_driver img_hash_driver
= {
1108 .probe
= img_hash_probe
,
1109 .remove
= img_hash_remove
,
1111 .name
= "img-hash-accelerator",
1112 .pm
= &img_hash_pm_ops
,
1113 .of_match_table
= of_match_ptr(img_hash_match
),
1116 module_platform_driver(img_hash_driver
);
1118 MODULE_LICENSE("GPL v2");
1119 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1120 MODULE_AUTHOR("Will Thomas.");
1121 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");