2 * Copyright (c) 2014 Imagination Technologies
3 * Authors: Will Thomas, James Hartley
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * Interface structure taken from omap-sham driver
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/scatterlist.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
27 #define CR_RESET_SET 1
28 #define CR_RESET_UNSET 0
30 #define CR_MESSAGE_LENGTH_H 0x4
31 #define CR_MESSAGE_LENGTH_L 0x8
33 #define CR_CONTROL 0xc
34 #define CR_CONTROL_BYTE_ORDER_3210 0
35 #define CR_CONTROL_BYTE_ORDER_0123 1
36 #define CR_CONTROL_BYTE_ORDER_2310 2
37 #define CR_CONTROL_BYTE_ORDER_1032 3
38 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
39 #define CR_CONTROL_ALGO_MD5 0
40 #define CR_CONTROL_ALGO_SHA1 1
41 #define CR_CONTROL_ALGO_SHA224 2
42 #define CR_CONTROL_ALGO_SHA256 3
44 #define CR_INTSTAT 0x10
45 #define CR_INTENAB 0x14
46 #define CR_INTCLEAR 0x18
47 #define CR_INT_RESULTS_AVAILABLE BIT(0)
48 #define CR_INT_NEW_RESULTS_SET BIT(1)
49 #define CR_INT_RESULT_READ_ERR BIT(2)
50 #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
51 #define CR_INT_STATUS BIT(8)
53 #define CR_RESULT_QUEUE 0x1c
55 #define CR_CORE_REV 0x50
56 #define CR_CORE_DES1 0x60
57 #define CR_CORE_DES2 0x70
59 #define DRIVER_FLAGS_BUSY BIT(0)
60 #define DRIVER_FLAGS_FINAL BIT(1)
61 #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
62 #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
63 #define DRIVER_FLAGS_INIT BIT(4)
64 #define DRIVER_FLAGS_CPU BIT(5)
65 #define DRIVER_FLAGS_DMA_READY BIT(6)
66 #define DRIVER_FLAGS_ERROR BIT(7)
67 #define DRIVER_FLAGS_SG BIT(8)
68 #define DRIVER_FLAGS_SHA1 BIT(18)
69 #define DRIVER_FLAGS_SHA224 BIT(19)
70 #define DRIVER_FLAGS_SHA256 BIT(20)
71 #define DRIVER_FLAGS_MD5 BIT(21)
73 #define IMG_HASH_QUEUE_LENGTH 20
74 #define IMG_HASH_DMA_THRESHOLD 64
76 #ifdef __LITTLE_ENDIAN
77 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
79 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
84 struct img_hash_request_ctx
{
85 struct img_hash_dev
*hdev
;
86 u8 digest
[SHA256_DIGEST_SIZE
] __aligned(sizeof(u32
));
94 struct scatterlist
*sgfirst
;
96 struct scatterlist
*sg
;
105 u8 buffer
[0] __aligned(sizeof(u32
));
106 struct ahash_request fallback_req
;
109 struct img_hash_ctx
{
110 struct img_hash_dev
*hdev
;
112 struct crypto_ahash
*fallback
;
115 struct img_hash_dev
{
116 struct list_head list
;
118 struct clk
*hash_clk
;
120 void __iomem
*io_base
;
122 phys_addr_t bus_addr
;
123 void __iomem
*cpu_addr
;
127 struct tasklet_struct done_task
;
128 struct tasklet_struct dma_task
;
131 struct crypto_queue queue
;
132 struct ahash_request
*req
;
134 struct dma_chan
*dma_lch
;
137 struct img_hash_drv
{
138 struct list_head dev_list
;
142 static struct img_hash_drv img_hash
= {
143 .dev_list
= LIST_HEAD_INIT(img_hash
.dev_list
),
144 .lock
= __SPIN_LOCK_UNLOCKED(img_hash
.lock
),
147 static inline u32
img_hash_read(struct img_hash_dev
*hdev
, u32 offset
)
149 return readl_relaxed(hdev
->io_base
+ offset
);
152 static inline void img_hash_write(struct img_hash_dev
*hdev
,
153 u32 offset
, u32 value
)
155 writel_relaxed(value
, hdev
->io_base
+ offset
);
158 static inline u32
img_hash_read_result_queue(struct img_hash_dev
*hdev
)
160 return be32_to_cpu(img_hash_read(hdev
, CR_RESULT_QUEUE
));
163 static void img_hash_start(struct img_hash_dev
*hdev
, bool dma
)
165 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
166 u32 cr
= IMG_HASH_BYTE_ORDER
<< CR_CONTROL_BYTE_ORDER_SHIFT
;
168 if (ctx
->flags
& DRIVER_FLAGS_MD5
)
169 cr
|= CR_CONTROL_ALGO_MD5
;
170 else if (ctx
->flags
& DRIVER_FLAGS_SHA1
)
171 cr
|= CR_CONTROL_ALGO_SHA1
;
172 else if (ctx
->flags
& DRIVER_FLAGS_SHA224
)
173 cr
|= CR_CONTROL_ALGO_SHA224
;
174 else if (ctx
->flags
& DRIVER_FLAGS_SHA256
)
175 cr
|= CR_CONTROL_ALGO_SHA256
;
176 dev_dbg(hdev
->dev
, "Starting hash process\n");
177 img_hash_write(hdev
, CR_CONTROL
, cr
);
180 * The hardware block requires two cycles between writing the control
181 * register and writing the first word of data in non DMA mode, to
182 * ensure the first data write is not grouped in burst with the control
183 * register write a read is issued to 'flush' the bus.
186 img_hash_read(hdev
, CR_CONTROL
);
189 static int img_hash_xmit_cpu(struct img_hash_dev
*hdev
, const u8
*buf
,
190 size_t length
, int final
)
193 const u32
*buffer
= (const u32
*)buf
;
195 dev_dbg(hdev
->dev
, "xmit_cpu: length: %zu bytes\n", length
);
198 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
200 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
202 for (count
= 0; count
< len32
; count
++)
203 writel_relaxed(buffer
[count
], hdev
->cpu_addr
);
208 static void img_hash_dma_callback(void *data
)
210 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
211 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
214 img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->bufcnt
, 0);
218 tasklet_schedule(&hdev
->dma_task
);
221 static int img_hash_xmit_dma(struct img_hash_dev
*hdev
, struct scatterlist
*sg
)
223 struct dma_async_tx_descriptor
*desc
;
224 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
226 ctx
->dma_ct
= dma_map_sg(hdev
->dev
, sg
, 1, DMA_MEM_TO_DEV
);
227 if (ctx
->dma_ct
== 0) {
228 dev_err(hdev
->dev
, "Invalid DMA sg\n");
233 desc
= dmaengine_prep_slave_sg(hdev
->dma_lch
,
237 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
239 dev_err(hdev
->dev
, "Null DMA descriptor\n");
241 dma_unmap_sg(hdev
->dev
, sg
, 1, DMA_MEM_TO_DEV
);
244 desc
->callback
= img_hash_dma_callback
;
245 desc
->callback_param
= hdev
;
246 dmaengine_submit(desc
);
247 dma_async_issue_pending(hdev
->dma_lch
);
252 static int img_hash_write_via_cpu(struct img_hash_dev
*hdev
)
254 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
256 ctx
->bufcnt
= sg_copy_to_buffer(hdev
->req
->src
, sg_nents(ctx
->sg
),
257 ctx
->buffer
, hdev
->req
->nbytes
);
259 ctx
->total
= hdev
->req
->nbytes
;
262 hdev
->flags
|= (DRIVER_FLAGS_CPU
| DRIVER_FLAGS_FINAL
);
264 img_hash_start(hdev
, false);
266 return img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->total
, 1);
269 static int img_hash_finish(struct ahash_request
*req
)
271 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
276 memcpy(req
->result
, ctx
->digest
, ctx
->digsize
);
281 static void img_hash_copy_hash(struct ahash_request
*req
)
283 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
284 u32
*hash
= (u32
*)ctx
->digest
;
287 for (i
= (ctx
->digsize
/ sizeof(u32
)) - 1; i
>= 0; i
--)
288 hash
[i
] = img_hash_read_result_queue(ctx
->hdev
);
291 static void img_hash_finish_req(struct ahash_request
*req
, int err
)
293 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
294 struct img_hash_dev
*hdev
= ctx
->hdev
;
297 img_hash_copy_hash(req
);
298 if (DRIVER_FLAGS_FINAL
& hdev
->flags
)
299 err
= img_hash_finish(req
);
301 dev_warn(hdev
->dev
, "Hash failed with error %d\n", err
);
302 ctx
->flags
|= DRIVER_FLAGS_ERROR
;
305 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
| DRIVER_FLAGS_OUTPUT_READY
|
306 DRIVER_FLAGS_CPU
| DRIVER_FLAGS_BUSY
| DRIVER_FLAGS_FINAL
);
308 if (req
->base
.complete
)
309 req
->base
.complete(&req
->base
, err
);
312 static int img_hash_write_via_dma(struct img_hash_dev
*hdev
)
314 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
316 img_hash_start(hdev
, true);
318 dev_dbg(hdev
->dev
, "xmit dma size: %d\n", ctx
->total
);
321 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
323 hdev
->flags
|= DRIVER_FLAGS_DMA_ACTIVE
| DRIVER_FLAGS_FINAL
;
325 tasklet_schedule(&hdev
->dma_task
);
330 static int img_hash_dma_init(struct img_hash_dev
*hdev
)
332 struct dma_slave_config dma_conf
;
335 hdev
->dma_lch
= dma_request_slave_channel(hdev
->dev
, "tx");
336 if (!hdev
->dma_lch
) {
337 dev_err(hdev
->dev
, "Couldn't aquire a slave DMA channel.\n");
340 dma_conf
.direction
= DMA_MEM_TO_DEV
;
341 dma_conf
.dst_addr
= hdev
->bus_addr
;
342 dma_conf
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
343 dma_conf
.dst_maxburst
= 16;
344 dma_conf
.device_fc
= false;
346 err
= dmaengine_slave_config(hdev
->dma_lch
, &dma_conf
);
348 dev_err(hdev
->dev
, "Couldn't configure DMA slave.\n");
349 dma_release_channel(hdev
->dma_lch
);
356 static void img_hash_dma_task(unsigned long d
)
358 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)d
;
359 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
361 size_t nbytes
, bleft
, wsend
, len
, tbc
;
362 struct scatterlist tsg
;
367 addr
= sg_virt(ctx
->sg
);
368 nbytes
= ctx
->sg
->length
- ctx
->offset
;
371 * The hash accelerator does not support a data valid mask. This means
372 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
373 * padding bytes in the last word written by that dma would erroneously
374 * be included in the hash. To avoid this we round down the transfer,
375 * and add the excess to the start of the next dma. It does not matter
376 * that the final dma may not be a multiple of 4 bytes as the hashing
377 * block is programmed to accept the correct number of bytes.
381 wsend
= (nbytes
/ 4);
384 sg_init_one(&tsg
, addr
+ ctx
->offset
, wsend
* 4);
385 if (img_hash_xmit_dma(hdev
, &tsg
)) {
386 dev_err(hdev
->dev
, "DMA failed, falling back to CPU");
387 ctx
->flags
|= DRIVER_FLAGS_CPU
;
389 img_hash_xmit_cpu(hdev
, addr
+ ctx
->offset
,
391 ctx
->sent
+= wsend
* 4;
394 ctx
->sent
+= wsend
* 4;
399 ctx
->bufcnt
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
400 ctx
->buffer
, bleft
, ctx
->sent
);
402 ctx
->sg
= sg_next(ctx
->sg
);
403 while (ctx
->sg
&& (ctx
->bufcnt
< 4)) {
404 len
= ctx
->sg
->length
;
405 if (likely(len
> (4 - ctx
->bufcnt
)))
406 len
= 4 - ctx
->bufcnt
;
407 tbc
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
408 ctx
->buffer
+ ctx
->bufcnt
, len
,
409 ctx
->sent
+ ctx
->bufcnt
);
411 if (tbc
>= ctx
->sg
->length
) {
412 ctx
->sg
= sg_next(ctx
->sg
);
417 ctx
->sent
+= ctx
->bufcnt
;
421 img_hash_dma_callback(hdev
);
424 ctx
->sg
= sg_next(ctx
->sg
);
428 static int img_hash_write_via_dma_stop(struct img_hash_dev
*hdev
)
430 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
432 if (ctx
->flags
& DRIVER_FLAGS_SG
)
433 dma_unmap_sg(hdev
->dev
, ctx
->sg
, ctx
->dma_ct
, DMA_TO_DEVICE
);
438 static int img_hash_process_data(struct img_hash_dev
*hdev
)
440 struct ahash_request
*req
= hdev
->req
;
441 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
446 if (req
->nbytes
>= IMG_HASH_DMA_THRESHOLD
) {
447 dev_dbg(hdev
->dev
, "process data request(%d bytes) using DMA\n",
449 err
= img_hash_write_via_dma(hdev
);
451 dev_dbg(hdev
->dev
, "process data request(%d bytes) using CPU\n",
453 err
= img_hash_write_via_cpu(hdev
);
458 static int img_hash_hw_init(struct img_hash_dev
*hdev
)
460 unsigned long long nbits
;
463 img_hash_write(hdev
, CR_RESET
, CR_RESET_SET
);
464 img_hash_write(hdev
, CR_RESET
, CR_RESET_UNSET
);
465 img_hash_write(hdev
, CR_INTENAB
, CR_INT_NEW_RESULTS_SET
);
467 nbits
= (u64
)hdev
->req
->nbytes
<< 3;
470 img_hash_write(hdev
, CR_MESSAGE_LENGTH_H
, u
);
471 img_hash_write(hdev
, CR_MESSAGE_LENGTH_L
, l
);
473 if (!(DRIVER_FLAGS_INIT
& hdev
->flags
)) {
474 hdev
->flags
|= DRIVER_FLAGS_INIT
;
477 dev_dbg(hdev
->dev
, "hw initialized, nbits: %llx\n", nbits
);
481 static int img_hash_init(struct ahash_request
*req
)
483 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
484 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
485 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
487 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
488 rctx
->fallback_req
.base
.flags
= req
->base
.flags
489 & CRYPTO_TFM_REQ_MAY_SLEEP
;
491 return crypto_ahash_init(&rctx
->fallback_req
);
494 static int img_hash_handle_queue(struct img_hash_dev
*hdev
,
495 struct ahash_request
*req
)
497 struct crypto_async_request
*async_req
, *backlog
;
498 struct img_hash_request_ctx
*ctx
;
500 int err
= 0, res
= 0;
502 spin_lock_irqsave(&hdev
->lock
, flags
);
505 res
= ahash_enqueue_request(&hdev
->queue
, req
);
507 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
508 spin_unlock_irqrestore(&hdev
->lock
, flags
);
512 backlog
= crypto_get_backlog(&hdev
->queue
);
513 async_req
= crypto_dequeue_request(&hdev
->queue
);
515 hdev
->flags
|= DRIVER_FLAGS_BUSY
;
517 spin_unlock_irqrestore(&hdev
->lock
, flags
);
523 backlog
->complete(backlog
, -EINPROGRESS
);
525 req
= ahash_request_cast(async_req
);
528 ctx
= ahash_request_ctx(req
);
530 dev_info(hdev
->dev
, "processing req, op: %lu, bytes: %d\n",
531 ctx
->op
, req
->nbytes
);
533 err
= img_hash_hw_init(hdev
);
536 err
= img_hash_process_data(hdev
);
538 if (err
!= -EINPROGRESS
) {
539 /* done_task will not finish so do it here */
540 img_hash_finish_req(req
, err
);
545 static int img_hash_update(struct ahash_request
*req
)
547 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
548 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
549 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
551 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
552 rctx
->fallback_req
.base
.flags
= req
->base
.flags
553 & CRYPTO_TFM_REQ_MAY_SLEEP
;
554 rctx
->fallback_req
.nbytes
= req
->nbytes
;
555 rctx
->fallback_req
.src
= req
->src
;
557 return crypto_ahash_update(&rctx
->fallback_req
);
560 static int img_hash_final(struct ahash_request
*req
)
562 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
563 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
564 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
566 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
567 rctx
->fallback_req
.base
.flags
= req
->base
.flags
568 & CRYPTO_TFM_REQ_MAY_SLEEP
;
569 rctx
->fallback_req
.result
= req
->result
;
571 return crypto_ahash_final(&rctx
->fallback_req
);
574 static int img_hash_finup(struct ahash_request
*req
)
576 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
577 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
578 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
580 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
581 rctx
->fallback_req
.base
.flags
= req
->base
.flags
582 & CRYPTO_TFM_REQ_MAY_SLEEP
;
583 rctx
->fallback_req
.nbytes
= req
->nbytes
;
584 rctx
->fallback_req
.src
= req
->src
;
585 rctx
->fallback_req
.result
= req
->result
;
587 return crypto_ahash_finup(&rctx
->fallback_req
);
590 static int img_hash_digest(struct ahash_request
*req
)
592 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
593 struct img_hash_ctx
*tctx
= crypto_ahash_ctx(tfm
);
594 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
595 struct img_hash_dev
*hdev
= NULL
;
596 struct img_hash_dev
*tmp
;
599 spin_lock(&img_hash
.lock
);
601 list_for_each_entry(tmp
, &img_hash
.dev_list
, list
) {
611 spin_unlock(&img_hash
.lock
);
614 ctx
->digsize
= crypto_ahash_digestsize(tfm
);
616 switch (ctx
->digsize
) {
617 case SHA1_DIGEST_SIZE
:
618 ctx
->flags
|= DRIVER_FLAGS_SHA1
;
620 case SHA256_DIGEST_SIZE
:
621 ctx
->flags
|= DRIVER_FLAGS_SHA256
;
623 case SHA224_DIGEST_SIZE
:
624 ctx
->flags
|= DRIVER_FLAGS_SHA224
;
626 case MD5_DIGEST_SIZE
:
627 ctx
->flags
|= DRIVER_FLAGS_MD5
;
636 ctx
->total
= req
->nbytes
;
638 ctx
->sgfirst
= req
->src
;
639 ctx
->nents
= sg_nents(ctx
->sg
);
641 err
= img_hash_handle_queue(tctx
->hdev
, req
);
646 static int img_hash_cra_init(struct crypto_tfm
*tfm
)
648 struct img_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
649 const char *alg_name
= crypto_tfm_alg_name(tfm
);
652 ctx
->fallback
= crypto_alloc_ahash(alg_name
, 0,
653 CRYPTO_ALG_NEED_FALLBACK
);
654 if (IS_ERR(ctx
->fallback
)) {
655 pr_err("img_hash: Could not load fallback driver.\n");
656 err
= PTR_ERR(ctx
->fallback
);
659 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
660 sizeof(struct img_hash_request_ctx
) +
661 IMG_HASH_DMA_THRESHOLD
);
669 static void img_hash_cra_exit(struct crypto_tfm
*tfm
)
671 struct img_hash_ctx
*tctx
= crypto_tfm_ctx(tfm
);
673 crypto_free_ahash(tctx
->fallback
);
676 static irqreturn_t
img_irq_handler(int irq
, void *dev_id
)
678 struct img_hash_dev
*hdev
= dev_id
;
681 reg
= img_hash_read(hdev
, CR_INTSTAT
);
682 img_hash_write(hdev
, CR_INTCLEAR
, reg
);
684 if (reg
& CR_INT_NEW_RESULTS_SET
) {
685 dev_dbg(hdev
->dev
, "IRQ CR_INT_NEW_RESULTS_SET\n");
686 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
687 hdev
->flags
|= DRIVER_FLAGS_OUTPUT_READY
;
688 if (!(DRIVER_FLAGS_CPU
& hdev
->flags
))
689 hdev
->flags
|= DRIVER_FLAGS_DMA_READY
;
690 tasklet_schedule(&hdev
->done_task
);
693 "HASH interrupt when no active requests.\n");
695 } else if (reg
& CR_INT_RESULTS_AVAILABLE
) {
697 "IRQ triggered before the hash had completed\n");
698 } else if (reg
& CR_INT_RESULT_READ_ERR
) {
700 "Attempt to read from an empty result queue\n");
701 } else if (reg
& CR_INT_MESSAGE_WRITE_ERROR
) {
703 "Data written before the hardware was configured\n");
708 static struct ahash_alg img_algs
[] = {
710 .init
= img_hash_init
,
711 .update
= img_hash_update
,
712 .final
= img_hash_final
,
713 .finup
= img_hash_finup
,
714 .digest
= img_hash_digest
,
716 .digestsize
= MD5_DIGEST_SIZE
,
719 .cra_driver_name
= "img-md5",
723 CRYPTO_ALG_NEED_FALLBACK
,
724 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
725 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
726 .cra_init
= img_hash_cra_init
,
727 .cra_exit
= img_hash_cra_exit
,
728 .cra_module
= THIS_MODULE
,
733 .init
= img_hash_init
,
734 .update
= img_hash_update
,
735 .final
= img_hash_final
,
736 .finup
= img_hash_finup
,
737 .digest
= img_hash_digest
,
739 .digestsize
= SHA1_DIGEST_SIZE
,
742 .cra_driver_name
= "img-sha1",
746 CRYPTO_ALG_NEED_FALLBACK
,
747 .cra_blocksize
= SHA1_BLOCK_SIZE
,
748 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
749 .cra_init
= img_hash_cra_init
,
750 .cra_exit
= img_hash_cra_exit
,
751 .cra_module
= THIS_MODULE
,
756 .init
= img_hash_init
,
757 .update
= img_hash_update
,
758 .final
= img_hash_final
,
759 .finup
= img_hash_finup
,
760 .digest
= img_hash_digest
,
762 .digestsize
= SHA224_DIGEST_SIZE
,
764 .cra_name
= "sha224",
765 .cra_driver_name
= "img-sha224",
769 CRYPTO_ALG_NEED_FALLBACK
,
770 .cra_blocksize
= SHA224_BLOCK_SIZE
,
771 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
772 .cra_init
= img_hash_cra_init
,
773 .cra_exit
= img_hash_cra_exit
,
774 .cra_module
= THIS_MODULE
,
779 .init
= img_hash_init
,
780 .update
= img_hash_update
,
781 .final
= img_hash_final
,
782 .finup
= img_hash_finup
,
783 .digest
= img_hash_digest
,
785 .digestsize
= SHA256_DIGEST_SIZE
,
787 .cra_name
= "sha256",
788 .cra_driver_name
= "img-sha256",
792 CRYPTO_ALG_NEED_FALLBACK
,
793 .cra_blocksize
= SHA256_BLOCK_SIZE
,
794 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
795 .cra_init
= img_hash_cra_init
,
796 .cra_exit
= img_hash_cra_exit
,
797 .cra_module
= THIS_MODULE
,
803 static int img_register_algs(struct img_hash_dev
*hdev
)
807 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++) {
808 err
= crypto_register_ahash(&img_algs
[i
]);
816 crypto_unregister_ahash(&img_algs
[i
]);
821 static int img_unregister_algs(struct img_hash_dev
*hdev
)
825 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++)
826 crypto_unregister_ahash(&img_algs
[i
]);
830 static void img_hash_done_task(unsigned long data
)
832 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
835 if (hdev
->err
== -EINVAL
) {
840 if (!(DRIVER_FLAGS_BUSY
& hdev
->flags
)) {
841 img_hash_handle_queue(hdev
, NULL
);
845 if (DRIVER_FLAGS_CPU
& hdev
->flags
) {
846 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
847 hdev
->flags
&= ~DRIVER_FLAGS_OUTPUT_READY
;
850 } else if (DRIVER_FLAGS_DMA_READY
& hdev
->flags
) {
851 if (DRIVER_FLAGS_DMA_ACTIVE
& hdev
->flags
) {
852 hdev
->flags
&= ~DRIVER_FLAGS_DMA_ACTIVE
;
853 img_hash_write_via_dma_stop(hdev
);
859 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
860 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
|
861 DRIVER_FLAGS_OUTPUT_READY
);
868 img_hash_finish_req(hdev
->req
, err
);
871 static const struct of_device_id img_hash_match
[] = {
872 { .compatible
= "img,hash-accelerator" },
875 MODULE_DEVICE_TABLE(of
, img_hash_match
);
877 static int img_hash_probe(struct platform_device
*pdev
)
879 struct img_hash_dev
*hdev
;
880 struct device
*dev
= &pdev
->dev
;
881 struct resource
*hash_res
;
885 hdev
= devm_kzalloc(dev
, sizeof(*hdev
), GFP_KERNEL
);
889 spin_lock_init(&hdev
->lock
);
893 platform_set_drvdata(pdev
, hdev
);
895 INIT_LIST_HEAD(&hdev
->list
);
897 tasklet_init(&hdev
->done_task
, img_hash_done_task
, (unsigned long)hdev
);
898 tasklet_init(&hdev
->dma_task
, img_hash_dma_task
, (unsigned long)hdev
);
900 crypto_init_queue(&hdev
->queue
, IMG_HASH_QUEUE_LENGTH
);
903 hash_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
905 hdev
->io_base
= devm_ioremap_resource(dev
, hash_res
);
906 if (IS_ERR(hdev
->io_base
)) {
907 err
= PTR_ERR(hdev
->io_base
);
908 dev_err(dev
, "can't ioremap, returned %d\n", err
);
913 /* Write port (DMA or CPU) */
914 hash_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
915 hdev
->cpu_addr
= devm_ioremap_resource(dev
, hash_res
);
916 if (IS_ERR(hdev
->cpu_addr
)) {
917 dev_err(dev
, "can't ioremap write port\n");
918 err
= PTR_ERR(hdev
->cpu_addr
);
921 hdev
->bus_addr
= hash_res
->start
;
923 irq
= platform_get_irq(pdev
, 0);
925 dev_err(dev
, "no IRQ resource info\n");
930 err
= devm_request_irq(dev
, irq
, img_irq_handler
, 0,
931 dev_name(dev
), hdev
);
933 dev_err(dev
, "unable to request irq\n");
936 dev_dbg(dev
, "using IRQ channel %d\n", irq
);
938 hdev
->hash_clk
= devm_clk_get(&pdev
->dev
, "hash");
939 if (IS_ERR(hdev
->hash_clk
)) {
940 dev_err(dev
, "clock initialization failed.\n");
941 err
= PTR_ERR(hdev
->hash_clk
);
945 hdev
->sys_clk
= devm_clk_get(&pdev
->dev
, "sys");
946 if (IS_ERR(hdev
->sys_clk
)) {
947 dev_err(dev
, "clock initialization failed.\n");
948 err
= PTR_ERR(hdev
->sys_clk
);
952 err
= clk_prepare_enable(hdev
->hash_clk
);
956 err
= clk_prepare_enable(hdev
->sys_clk
);
960 err
= img_hash_dma_init(hdev
);
964 dev_dbg(dev
, "using %s for DMA transfers\n",
965 dma_chan_name(hdev
->dma_lch
));
967 spin_lock(&img_hash
.lock
);
968 list_add_tail(&hdev
->list
, &img_hash
.dev_list
);
969 spin_unlock(&img_hash
.lock
);
971 err
= img_register_algs(hdev
);
974 dev_dbg(dev
, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
979 spin_lock(&img_hash
.lock
);
980 list_del(&hdev
->list
);
981 spin_unlock(&img_hash
.lock
);
982 dma_release_channel(hdev
->dma_lch
);
984 clk_disable_unprepare(hdev
->sys_clk
);
986 clk_disable_unprepare(hdev
->hash_clk
);
988 tasklet_kill(&hdev
->done_task
);
989 tasklet_kill(&hdev
->dma_task
);
994 static int img_hash_remove(struct platform_device
*pdev
)
996 static struct img_hash_dev
*hdev
;
998 hdev
= platform_get_drvdata(pdev
);
999 spin_lock(&img_hash
.lock
);
1000 list_del(&hdev
->list
);
1001 spin_unlock(&img_hash
.lock
);
1003 img_unregister_algs(hdev
);
1005 tasklet_kill(&hdev
->done_task
);
1006 tasklet_kill(&hdev
->dma_task
);
1008 dma_release_channel(hdev
->dma_lch
);
1010 clk_disable_unprepare(hdev
->hash_clk
);
1011 clk_disable_unprepare(hdev
->sys_clk
);
1016 static struct platform_driver img_hash_driver
= {
1017 .probe
= img_hash_probe
,
1018 .remove
= img_hash_remove
,
1020 .name
= "img-hash-accelerator",
1021 .of_match_table
= of_match_ptr(img_hash_match
),
1024 module_platform_driver(img_hash_driver
);
1026 MODULE_LICENSE("GPL v2");
1027 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1028 MODULE_AUTHOR("Will Thomas.");
1029 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");