1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014 Imagination Technologies
4 * Authors: Will Thomas, James Hartley
6 * Interface structure taken from omap-sham driver
10 #include <linux/dmaengine.h>
11 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/scatterlist.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/md5.h>
21 #include <crypto/sha.h>
24 #define CR_RESET_SET 1
25 #define CR_RESET_UNSET 0
27 #define CR_MESSAGE_LENGTH_H 0x4
28 #define CR_MESSAGE_LENGTH_L 0x8
30 #define CR_CONTROL 0xc
31 #define CR_CONTROL_BYTE_ORDER_3210 0
32 #define CR_CONTROL_BYTE_ORDER_0123 1
33 #define CR_CONTROL_BYTE_ORDER_2310 2
34 #define CR_CONTROL_BYTE_ORDER_1032 3
35 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
36 #define CR_CONTROL_ALGO_MD5 0
37 #define CR_CONTROL_ALGO_SHA1 1
38 #define CR_CONTROL_ALGO_SHA224 2
39 #define CR_CONTROL_ALGO_SHA256 3
41 #define CR_INTSTAT 0x10
42 #define CR_INTENAB 0x14
43 #define CR_INTCLEAR 0x18
44 #define CR_INT_RESULTS_AVAILABLE BIT(0)
45 #define CR_INT_NEW_RESULTS_SET BIT(1)
46 #define CR_INT_RESULT_READ_ERR BIT(2)
47 #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
48 #define CR_INT_STATUS BIT(8)
50 #define CR_RESULT_QUEUE 0x1c
52 #define CR_CORE_REV 0x50
53 #define CR_CORE_DES1 0x60
54 #define CR_CORE_DES2 0x70
56 #define DRIVER_FLAGS_BUSY BIT(0)
57 #define DRIVER_FLAGS_FINAL BIT(1)
58 #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
59 #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
60 #define DRIVER_FLAGS_INIT BIT(4)
61 #define DRIVER_FLAGS_CPU BIT(5)
62 #define DRIVER_FLAGS_DMA_READY BIT(6)
63 #define DRIVER_FLAGS_ERROR BIT(7)
64 #define DRIVER_FLAGS_SG BIT(8)
65 #define DRIVER_FLAGS_SHA1 BIT(18)
66 #define DRIVER_FLAGS_SHA224 BIT(19)
67 #define DRIVER_FLAGS_SHA256 BIT(20)
68 #define DRIVER_FLAGS_MD5 BIT(21)
70 #define IMG_HASH_QUEUE_LENGTH 20
71 #define IMG_HASH_DMA_BURST 4
72 #define IMG_HASH_DMA_THRESHOLD 64
74 #ifdef __LITTLE_ENDIAN
75 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
77 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
82 struct img_hash_request_ctx
{
83 struct img_hash_dev
*hdev
;
84 u8 digest
[SHA256_DIGEST_SIZE
] __aligned(sizeof(u32
));
92 struct scatterlist
*sgfirst
;
94 struct scatterlist
*sg
;
103 struct ahash_request fallback_req
;
105 /* Zero length buffer must remain last member of struct */
106 u8 buffer
[] __aligned(sizeof(u32
));
109 struct img_hash_ctx
{
110 struct img_hash_dev
*hdev
;
112 struct crypto_ahash
*fallback
;
115 struct img_hash_dev
{
116 struct list_head list
;
118 struct clk
*hash_clk
;
120 void __iomem
*io_base
;
122 phys_addr_t bus_addr
;
123 void __iomem
*cpu_addr
;
127 struct tasklet_struct done_task
;
128 struct tasklet_struct dma_task
;
131 struct crypto_queue queue
;
132 struct ahash_request
*req
;
134 struct dma_chan
*dma_lch
;
137 struct img_hash_drv
{
138 struct list_head dev_list
;
142 static struct img_hash_drv img_hash
= {
143 .dev_list
= LIST_HEAD_INIT(img_hash
.dev_list
),
144 .lock
= __SPIN_LOCK_UNLOCKED(img_hash
.lock
),
147 static inline u32
img_hash_read(struct img_hash_dev
*hdev
, u32 offset
)
149 return readl_relaxed(hdev
->io_base
+ offset
);
152 static inline void img_hash_write(struct img_hash_dev
*hdev
,
153 u32 offset
, u32 value
)
155 writel_relaxed(value
, hdev
->io_base
+ offset
);
158 static inline u32
img_hash_read_result_queue(struct img_hash_dev
*hdev
)
160 return be32_to_cpu(img_hash_read(hdev
, CR_RESULT_QUEUE
));
163 static void img_hash_start(struct img_hash_dev
*hdev
, bool dma
)
165 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
166 u32 cr
= IMG_HASH_BYTE_ORDER
<< CR_CONTROL_BYTE_ORDER_SHIFT
;
168 if (ctx
->flags
& DRIVER_FLAGS_MD5
)
169 cr
|= CR_CONTROL_ALGO_MD5
;
170 else if (ctx
->flags
& DRIVER_FLAGS_SHA1
)
171 cr
|= CR_CONTROL_ALGO_SHA1
;
172 else if (ctx
->flags
& DRIVER_FLAGS_SHA224
)
173 cr
|= CR_CONTROL_ALGO_SHA224
;
174 else if (ctx
->flags
& DRIVER_FLAGS_SHA256
)
175 cr
|= CR_CONTROL_ALGO_SHA256
;
176 dev_dbg(hdev
->dev
, "Starting hash process\n");
177 img_hash_write(hdev
, CR_CONTROL
, cr
);
180 * The hardware block requires two cycles between writing the control
181 * register and writing the first word of data in non DMA mode, to
182 * ensure the first data write is not grouped in burst with the control
183 * register write a read is issued to 'flush' the bus.
186 img_hash_read(hdev
, CR_CONTROL
);
189 static int img_hash_xmit_cpu(struct img_hash_dev
*hdev
, const u8
*buf
,
190 size_t length
, int final
)
193 const u32
*buffer
= (const u32
*)buf
;
195 dev_dbg(hdev
->dev
, "xmit_cpu: length: %zu bytes\n", length
);
198 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
200 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
202 for (count
= 0; count
< len32
; count
++)
203 writel_relaxed(buffer
[count
], hdev
->cpu_addr
);
208 static void img_hash_dma_callback(void *data
)
210 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
211 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
214 img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->bufcnt
, 0);
218 tasklet_schedule(&hdev
->dma_task
);
221 static int img_hash_xmit_dma(struct img_hash_dev
*hdev
, struct scatterlist
*sg
)
223 struct dma_async_tx_descriptor
*desc
;
224 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
226 ctx
->dma_ct
= dma_map_sg(hdev
->dev
, sg
, 1, DMA_TO_DEVICE
);
227 if (ctx
->dma_ct
== 0) {
228 dev_err(hdev
->dev
, "Invalid DMA sg\n");
233 desc
= dmaengine_prep_slave_sg(hdev
->dma_lch
,
237 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
239 dev_err(hdev
->dev
, "Null DMA descriptor\n");
241 dma_unmap_sg(hdev
->dev
, sg
, 1, DMA_TO_DEVICE
);
244 desc
->callback
= img_hash_dma_callback
;
245 desc
->callback_param
= hdev
;
246 dmaengine_submit(desc
);
247 dma_async_issue_pending(hdev
->dma_lch
);
252 static int img_hash_write_via_cpu(struct img_hash_dev
*hdev
)
254 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
256 ctx
->bufcnt
= sg_copy_to_buffer(hdev
->req
->src
, sg_nents(ctx
->sg
),
257 ctx
->buffer
, hdev
->req
->nbytes
);
259 ctx
->total
= hdev
->req
->nbytes
;
262 hdev
->flags
|= (DRIVER_FLAGS_CPU
| DRIVER_FLAGS_FINAL
);
264 img_hash_start(hdev
, false);
266 return img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->total
, 1);
269 static int img_hash_finish(struct ahash_request
*req
)
271 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
276 memcpy(req
->result
, ctx
->digest
, ctx
->digsize
);
281 static void img_hash_copy_hash(struct ahash_request
*req
)
283 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
284 u32
*hash
= (u32
*)ctx
->digest
;
287 for (i
= (ctx
->digsize
/ sizeof(u32
)) - 1; i
>= 0; i
--)
288 hash
[i
] = img_hash_read_result_queue(ctx
->hdev
);
291 static void img_hash_finish_req(struct ahash_request
*req
, int err
)
293 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
294 struct img_hash_dev
*hdev
= ctx
->hdev
;
297 img_hash_copy_hash(req
);
298 if (DRIVER_FLAGS_FINAL
& hdev
->flags
)
299 err
= img_hash_finish(req
);
301 dev_warn(hdev
->dev
, "Hash failed with error %d\n", err
);
302 ctx
->flags
|= DRIVER_FLAGS_ERROR
;
305 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
| DRIVER_FLAGS_OUTPUT_READY
|
306 DRIVER_FLAGS_CPU
| DRIVER_FLAGS_BUSY
| DRIVER_FLAGS_FINAL
);
308 if (req
->base
.complete
)
309 req
->base
.complete(&req
->base
, err
);
312 static int img_hash_write_via_dma(struct img_hash_dev
*hdev
)
314 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
316 img_hash_start(hdev
, true);
318 dev_dbg(hdev
->dev
, "xmit dma size: %d\n", ctx
->total
);
321 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
323 hdev
->flags
|= DRIVER_FLAGS_DMA_ACTIVE
| DRIVER_FLAGS_FINAL
;
325 tasklet_schedule(&hdev
->dma_task
);
330 static int img_hash_dma_init(struct img_hash_dev
*hdev
)
332 struct dma_slave_config dma_conf
;
335 hdev
->dma_lch
= dma_request_chan(hdev
->dev
, "tx");
336 if (IS_ERR(hdev
->dma_lch
)) {
337 dev_err(hdev
->dev
, "Couldn't acquire a slave DMA channel.\n");
338 return PTR_ERR(hdev
->dma_lch
);
340 dma_conf
.direction
= DMA_MEM_TO_DEV
;
341 dma_conf
.dst_addr
= hdev
->bus_addr
;
342 dma_conf
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
343 dma_conf
.dst_maxburst
= IMG_HASH_DMA_BURST
;
344 dma_conf
.device_fc
= false;
346 err
= dmaengine_slave_config(hdev
->dma_lch
, &dma_conf
);
348 dev_err(hdev
->dev
, "Couldn't configure DMA slave.\n");
349 dma_release_channel(hdev
->dma_lch
);
356 static void img_hash_dma_task(unsigned long d
)
358 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)d
;
359 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
361 size_t nbytes
, bleft
, wsend
, len
, tbc
;
362 struct scatterlist tsg
;
364 if (!hdev
->req
|| !ctx
->sg
)
367 addr
= sg_virt(ctx
->sg
);
368 nbytes
= ctx
->sg
->length
- ctx
->offset
;
371 * The hash accelerator does not support a data valid mask. This means
372 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
373 * padding bytes in the last word written by that dma would erroneously
374 * be included in the hash. To avoid this we round down the transfer,
375 * and add the excess to the start of the next dma. It does not matter
376 * that the final dma may not be a multiple of 4 bytes as the hashing
377 * block is programmed to accept the correct number of bytes.
381 wsend
= (nbytes
/ 4);
384 sg_init_one(&tsg
, addr
+ ctx
->offset
, wsend
* 4);
385 if (img_hash_xmit_dma(hdev
, &tsg
)) {
386 dev_err(hdev
->dev
, "DMA failed, falling back to CPU");
387 ctx
->flags
|= DRIVER_FLAGS_CPU
;
389 img_hash_xmit_cpu(hdev
, addr
+ ctx
->offset
,
391 ctx
->sent
+= wsend
* 4;
394 ctx
->sent
+= wsend
* 4;
399 ctx
->bufcnt
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
400 ctx
->buffer
, bleft
, ctx
->sent
);
402 ctx
->sg
= sg_next(ctx
->sg
);
403 while (ctx
->sg
&& (ctx
->bufcnt
< 4)) {
404 len
= ctx
->sg
->length
;
405 if (likely(len
> (4 - ctx
->bufcnt
)))
406 len
= 4 - ctx
->bufcnt
;
407 tbc
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
408 ctx
->buffer
+ ctx
->bufcnt
, len
,
409 ctx
->sent
+ ctx
->bufcnt
);
411 if (tbc
>= ctx
->sg
->length
) {
412 ctx
->sg
= sg_next(ctx
->sg
);
417 ctx
->sent
+= ctx
->bufcnt
;
421 img_hash_dma_callback(hdev
);
424 ctx
->sg
= sg_next(ctx
->sg
);
428 static int img_hash_write_via_dma_stop(struct img_hash_dev
*hdev
)
430 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
432 if (ctx
->flags
& DRIVER_FLAGS_SG
)
433 dma_unmap_sg(hdev
->dev
, ctx
->sg
, ctx
->dma_ct
, DMA_TO_DEVICE
);
438 static int img_hash_process_data(struct img_hash_dev
*hdev
)
440 struct ahash_request
*req
= hdev
->req
;
441 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
446 if (req
->nbytes
>= IMG_HASH_DMA_THRESHOLD
) {
447 dev_dbg(hdev
->dev
, "process data request(%d bytes) using DMA\n",
449 err
= img_hash_write_via_dma(hdev
);
451 dev_dbg(hdev
->dev
, "process data request(%d bytes) using CPU\n",
453 err
= img_hash_write_via_cpu(hdev
);
458 static int img_hash_hw_init(struct img_hash_dev
*hdev
)
460 unsigned long long nbits
;
463 img_hash_write(hdev
, CR_RESET
, CR_RESET_SET
);
464 img_hash_write(hdev
, CR_RESET
, CR_RESET_UNSET
);
465 img_hash_write(hdev
, CR_INTENAB
, CR_INT_NEW_RESULTS_SET
);
467 nbits
= (u64
)hdev
->req
->nbytes
<< 3;
470 img_hash_write(hdev
, CR_MESSAGE_LENGTH_H
, u
);
471 img_hash_write(hdev
, CR_MESSAGE_LENGTH_L
, l
);
473 if (!(DRIVER_FLAGS_INIT
& hdev
->flags
)) {
474 hdev
->flags
|= DRIVER_FLAGS_INIT
;
477 dev_dbg(hdev
->dev
, "hw initialized, nbits: %llx\n", nbits
);
481 static int img_hash_init(struct ahash_request
*req
)
483 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
484 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
485 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
487 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
488 rctx
->fallback_req
.base
.flags
= req
->base
.flags
489 & CRYPTO_TFM_REQ_MAY_SLEEP
;
491 return crypto_ahash_init(&rctx
->fallback_req
);
494 static int img_hash_handle_queue(struct img_hash_dev
*hdev
,
495 struct ahash_request
*req
)
497 struct crypto_async_request
*async_req
, *backlog
;
498 struct img_hash_request_ctx
*ctx
;
500 int err
= 0, res
= 0;
502 spin_lock_irqsave(&hdev
->lock
, flags
);
505 res
= ahash_enqueue_request(&hdev
->queue
, req
);
507 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
508 spin_unlock_irqrestore(&hdev
->lock
, flags
);
512 backlog
= crypto_get_backlog(&hdev
->queue
);
513 async_req
= crypto_dequeue_request(&hdev
->queue
);
515 hdev
->flags
|= DRIVER_FLAGS_BUSY
;
517 spin_unlock_irqrestore(&hdev
->lock
, flags
);
523 backlog
->complete(backlog
, -EINPROGRESS
);
525 req
= ahash_request_cast(async_req
);
528 ctx
= ahash_request_ctx(req
);
530 dev_info(hdev
->dev
, "processing req, op: %lu, bytes: %d\n",
531 ctx
->op
, req
->nbytes
);
533 err
= img_hash_hw_init(hdev
);
536 err
= img_hash_process_data(hdev
);
538 if (err
!= -EINPROGRESS
) {
539 /* done_task will not finish so do it here */
540 img_hash_finish_req(req
, err
);
545 static int img_hash_update(struct ahash_request
*req
)
547 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
548 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
549 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
551 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
552 rctx
->fallback_req
.base
.flags
= req
->base
.flags
553 & CRYPTO_TFM_REQ_MAY_SLEEP
;
554 rctx
->fallback_req
.nbytes
= req
->nbytes
;
555 rctx
->fallback_req
.src
= req
->src
;
557 return crypto_ahash_update(&rctx
->fallback_req
);
560 static int img_hash_final(struct ahash_request
*req
)
562 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
563 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
564 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
566 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
567 rctx
->fallback_req
.base
.flags
= req
->base
.flags
568 & CRYPTO_TFM_REQ_MAY_SLEEP
;
569 rctx
->fallback_req
.result
= req
->result
;
571 return crypto_ahash_final(&rctx
->fallback_req
);
574 static int img_hash_finup(struct ahash_request
*req
)
576 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
577 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
578 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
580 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
581 rctx
->fallback_req
.base
.flags
= req
->base
.flags
582 & CRYPTO_TFM_REQ_MAY_SLEEP
;
583 rctx
->fallback_req
.nbytes
= req
->nbytes
;
584 rctx
->fallback_req
.src
= req
->src
;
585 rctx
->fallback_req
.result
= req
->result
;
587 return crypto_ahash_finup(&rctx
->fallback_req
);
590 static int img_hash_import(struct ahash_request
*req
, const void *in
)
592 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
593 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
594 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
596 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
597 rctx
->fallback_req
.base
.flags
= req
->base
.flags
598 & CRYPTO_TFM_REQ_MAY_SLEEP
;
600 return crypto_ahash_import(&rctx
->fallback_req
, in
);
603 static int img_hash_export(struct ahash_request
*req
, void *out
)
605 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
606 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
607 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
609 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
610 rctx
->fallback_req
.base
.flags
= req
->base
.flags
611 & CRYPTO_TFM_REQ_MAY_SLEEP
;
613 return crypto_ahash_export(&rctx
->fallback_req
, out
);
616 static int img_hash_digest(struct ahash_request
*req
)
618 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
619 struct img_hash_ctx
*tctx
= crypto_ahash_ctx(tfm
);
620 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
621 struct img_hash_dev
*hdev
= NULL
;
622 struct img_hash_dev
*tmp
;
625 spin_lock(&img_hash
.lock
);
627 list_for_each_entry(tmp
, &img_hash
.dev_list
, list
) {
637 spin_unlock(&img_hash
.lock
);
640 ctx
->digsize
= crypto_ahash_digestsize(tfm
);
642 switch (ctx
->digsize
) {
643 case SHA1_DIGEST_SIZE
:
644 ctx
->flags
|= DRIVER_FLAGS_SHA1
;
646 case SHA256_DIGEST_SIZE
:
647 ctx
->flags
|= DRIVER_FLAGS_SHA256
;
649 case SHA224_DIGEST_SIZE
:
650 ctx
->flags
|= DRIVER_FLAGS_SHA224
;
652 case MD5_DIGEST_SIZE
:
653 ctx
->flags
|= DRIVER_FLAGS_MD5
;
662 ctx
->total
= req
->nbytes
;
664 ctx
->sgfirst
= req
->src
;
665 ctx
->nents
= sg_nents(ctx
->sg
);
667 err
= img_hash_handle_queue(tctx
->hdev
, req
);
672 static int img_hash_cra_init(struct crypto_tfm
*tfm
, const char *alg_name
)
674 struct img_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
677 ctx
->fallback
= crypto_alloc_ahash(alg_name
, 0,
678 CRYPTO_ALG_NEED_FALLBACK
);
679 if (IS_ERR(ctx
->fallback
)) {
680 pr_err("img_hash: Could not load fallback driver.\n");
681 err
= PTR_ERR(ctx
->fallback
);
684 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
685 sizeof(struct img_hash_request_ctx
) +
686 crypto_ahash_reqsize(ctx
->fallback
) +
687 IMG_HASH_DMA_THRESHOLD
);
695 static int img_hash_cra_md5_init(struct crypto_tfm
*tfm
)
697 return img_hash_cra_init(tfm
, "md5-generic");
700 static int img_hash_cra_sha1_init(struct crypto_tfm
*tfm
)
702 return img_hash_cra_init(tfm
, "sha1-generic");
705 static int img_hash_cra_sha224_init(struct crypto_tfm
*tfm
)
707 return img_hash_cra_init(tfm
, "sha224-generic");
710 static int img_hash_cra_sha256_init(struct crypto_tfm
*tfm
)
712 return img_hash_cra_init(tfm
, "sha256-generic");
715 static void img_hash_cra_exit(struct crypto_tfm
*tfm
)
717 struct img_hash_ctx
*tctx
= crypto_tfm_ctx(tfm
);
719 crypto_free_ahash(tctx
->fallback
);
722 static irqreturn_t
img_irq_handler(int irq
, void *dev_id
)
724 struct img_hash_dev
*hdev
= dev_id
;
727 reg
= img_hash_read(hdev
, CR_INTSTAT
);
728 img_hash_write(hdev
, CR_INTCLEAR
, reg
);
730 if (reg
& CR_INT_NEW_RESULTS_SET
) {
731 dev_dbg(hdev
->dev
, "IRQ CR_INT_NEW_RESULTS_SET\n");
732 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
733 hdev
->flags
|= DRIVER_FLAGS_OUTPUT_READY
;
734 if (!(DRIVER_FLAGS_CPU
& hdev
->flags
))
735 hdev
->flags
|= DRIVER_FLAGS_DMA_READY
;
736 tasklet_schedule(&hdev
->done_task
);
739 "HASH interrupt when no active requests.\n");
741 } else if (reg
& CR_INT_RESULTS_AVAILABLE
) {
743 "IRQ triggered before the hash had completed\n");
744 } else if (reg
& CR_INT_RESULT_READ_ERR
) {
746 "Attempt to read from an empty result queue\n");
747 } else if (reg
& CR_INT_MESSAGE_WRITE_ERROR
) {
749 "Data written before the hardware was configured\n");
754 static struct ahash_alg img_algs
[] = {
756 .init
= img_hash_init
,
757 .update
= img_hash_update
,
758 .final
= img_hash_final
,
759 .finup
= img_hash_finup
,
760 .export
= img_hash_export
,
761 .import
= img_hash_import
,
762 .digest
= img_hash_digest
,
764 .digestsize
= MD5_DIGEST_SIZE
,
765 .statesize
= sizeof(struct md5_state
),
768 .cra_driver_name
= "img-md5",
772 CRYPTO_ALG_NEED_FALLBACK
,
773 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
774 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
775 .cra_init
= img_hash_cra_md5_init
,
776 .cra_exit
= img_hash_cra_exit
,
777 .cra_module
= THIS_MODULE
,
782 .init
= img_hash_init
,
783 .update
= img_hash_update
,
784 .final
= img_hash_final
,
785 .finup
= img_hash_finup
,
786 .export
= img_hash_export
,
787 .import
= img_hash_import
,
788 .digest
= img_hash_digest
,
790 .digestsize
= SHA1_DIGEST_SIZE
,
791 .statesize
= sizeof(struct sha1_state
),
794 .cra_driver_name
= "img-sha1",
798 CRYPTO_ALG_NEED_FALLBACK
,
799 .cra_blocksize
= SHA1_BLOCK_SIZE
,
800 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
801 .cra_init
= img_hash_cra_sha1_init
,
802 .cra_exit
= img_hash_cra_exit
,
803 .cra_module
= THIS_MODULE
,
808 .init
= img_hash_init
,
809 .update
= img_hash_update
,
810 .final
= img_hash_final
,
811 .finup
= img_hash_finup
,
812 .export
= img_hash_export
,
813 .import
= img_hash_import
,
814 .digest
= img_hash_digest
,
816 .digestsize
= SHA224_DIGEST_SIZE
,
817 .statesize
= sizeof(struct sha256_state
),
819 .cra_name
= "sha224",
820 .cra_driver_name
= "img-sha224",
824 CRYPTO_ALG_NEED_FALLBACK
,
825 .cra_blocksize
= SHA224_BLOCK_SIZE
,
826 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
827 .cra_init
= img_hash_cra_sha224_init
,
828 .cra_exit
= img_hash_cra_exit
,
829 .cra_module
= THIS_MODULE
,
834 .init
= img_hash_init
,
835 .update
= img_hash_update
,
836 .final
= img_hash_final
,
837 .finup
= img_hash_finup
,
838 .export
= img_hash_export
,
839 .import
= img_hash_import
,
840 .digest
= img_hash_digest
,
842 .digestsize
= SHA256_DIGEST_SIZE
,
843 .statesize
= sizeof(struct sha256_state
),
845 .cra_name
= "sha256",
846 .cra_driver_name
= "img-sha256",
850 CRYPTO_ALG_NEED_FALLBACK
,
851 .cra_blocksize
= SHA256_BLOCK_SIZE
,
852 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
853 .cra_init
= img_hash_cra_sha256_init
,
854 .cra_exit
= img_hash_cra_exit
,
855 .cra_module
= THIS_MODULE
,
861 static int img_register_algs(struct img_hash_dev
*hdev
)
865 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++) {
866 err
= crypto_register_ahash(&img_algs
[i
]);
874 crypto_unregister_ahash(&img_algs
[i
]);
879 static int img_unregister_algs(struct img_hash_dev
*hdev
)
883 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++)
884 crypto_unregister_ahash(&img_algs
[i
]);
888 static void img_hash_done_task(unsigned long data
)
890 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
893 if (hdev
->err
== -EINVAL
) {
898 if (!(DRIVER_FLAGS_BUSY
& hdev
->flags
)) {
899 img_hash_handle_queue(hdev
, NULL
);
903 if (DRIVER_FLAGS_CPU
& hdev
->flags
) {
904 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
905 hdev
->flags
&= ~DRIVER_FLAGS_OUTPUT_READY
;
908 } else if (DRIVER_FLAGS_DMA_READY
& hdev
->flags
) {
909 if (DRIVER_FLAGS_DMA_ACTIVE
& hdev
->flags
) {
910 hdev
->flags
&= ~DRIVER_FLAGS_DMA_ACTIVE
;
911 img_hash_write_via_dma_stop(hdev
);
917 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
918 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
|
919 DRIVER_FLAGS_OUTPUT_READY
);
926 img_hash_finish_req(hdev
->req
, err
);
929 static const struct of_device_id img_hash_match
[] = {
930 { .compatible
= "img,hash-accelerator" },
933 MODULE_DEVICE_TABLE(of
, img_hash_match
);
935 static int img_hash_probe(struct platform_device
*pdev
)
937 struct img_hash_dev
*hdev
;
938 struct device
*dev
= &pdev
->dev
;
939 struct resource
*hash_res
;
943 hdev
= devm_kzalloc(dev
, sizeof(*hdev
), GFP_KERNEL
);
947 spin_lock_init(&hdev
->lock
);
951 platform_set_drvdata(pdev
, hdev
);
953 INIT_LIST_HEAD(&hdev
->list
);
955 tasklet_init(&hdev
->done_task
, img_hash_done_task
, (unsigned long)hdev
);
956 tasklet_init(&hdev
->dma_task
, img_hash_dma_task
, (unsigned long)hdev
);
958 crypto_init_queue(&hdev
->queue
, IMG_HASH_QUEUE_LENGTH
);
961 hdev
->io_base
= devm_platform_ioremap_resource(pdev
, 0);
962 if (IS_ERR(hdev
->io_base
)) {
963 err
= PTR_ERR(hdev
->io_base
);
964 dev_err(dev
, "can't ioremap, returned %d\n", err
);
969 /* Write port (DMA or CPU) */
970 hash_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
971 hdev
->cpu_addr
= devm_ioremap_resource(dev
, hash_res
);
972 if (IS_ERR(hdev
->cpu_addr
)) {
973 dev_err(dev
, "can't ioremap write port\n");
974 err
= PTR_ERR(hdev
->cpu_addr
);
977 hdev
->bus_addr
= hash_res
->start
;
979 irq
= platform_get_irq(pdev
, 0);
985 err
= devm_request_irq(dev
, irq
, img_irq_handler
, 0,
986 dev_name(dev
), hdev
);
988 dev_err(dev
, "unable to request irq\n");
991 dev_dbg(dev
, "using IRQ channel %d\n", irq
);
993 hdev
->hash_clk
= devm_clk_get(&pdev
->dev
, "hash");
994 if (IS_ERR(hdev
->hash_clk
)) {
995 dev_err(dev
, "clock initialization failed.\n");
996 err
= PTR_ERR(hdev
->hash_clk
);
1000 hdev
->sys_clk
= devm_clk_get(&pdev
->dev
, "sys");
1001 if (IS_ERR(hdev
->sys_clk
)) {
1002 dev_err(dev
, "clock initialization failed.\n");
1003 err
= PTR_ERR(hdev
->sys_clk
);
1007 err
= clk_prepare_enable(hdev
->hash_clk
);
1011 err
= clk_prepare_enable(hdev
->sys_clk
);
1015 err
= img_hash_dma_init(hdev
);
1019 dev_dbg(dev
, "using %s for DMA transfers\n",
1020 dma_chan_name(hdev
->dma_lch
));
1022 spin_lock(&img_hash
.lock
);
1023 list_add_tail(&hdev
->list
, &img_hash
.dev_list
);
1024 spin_unlock(&img_hash
.lock
);
1026 err
= img_register_algs(hdev
);
1029 dev_info(dev
, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1034 spin_lock(&img_hash
.lock
);
1035 list_del(&hdev
->list
);
1036 spin_unlock(&img_hash
.lock
);
1037 dma_release_channel(hdev
->dma_lch
);
1039 clk_disable_unprepare(hdev
->sys_clk
);
1041 clk_disable_unprepare(hdev
->hash_clk
);
1043 tasklet_kill(&hdev
->done_task
);
1044 tasklet_kill(&hdev
->dma_task
);
1049 static int img_hash_remove(struct platform_device
*pdev
)
1051 struct img_hash_dev
*hdev
;
1053 hdev
= platform_get_drvdata(pdev
);
1054 spin_lock(&img_hash
.lock
);
1055 list_del(&hdev
->list
);
1056 spin_unlock(&img_hash
.lock
);
1058 img_unregister_algs(hdev
);
1060 tasklet_kill(&hdev
->done_task
);
1061 tasklet_kill(&hdev
->dma_task
);
1063 dma_release_channel(hdev
->dma_lch
);
1065 clk_disable_unprepare(hdev
->hash_clk
);
1066 clk_disable_unprepare(hdev
->sys_clk
);
1071 #ifdef CONFIG_PM_SLEEP
1072 static int img_hash_suspend(struct device
*dev
)
1074 struct img_hash_dev
*hdev
= dev_get_drvdata(dev
);
1076 clk_disable_unprepare(hdev
->hash_clk
);
1077 clk_disable_unprepare(hdev
->sys_clk
);
1082 static int img_hash_resume(struct device
*dev
)
1084 struct img_hash_dev
*hdev
= dev_get_drvdata(dev
);
1087 ret
= clk_prepare_enable(hdev
->hash_clk
);
1091 ret
= clk_prepare_enable(hdev
->sys_clk
);
1093 clk_disable_unprepare(hdev
->hash_clk
);
1099 #endif /* CONFIG_PM_SLEEP */
1101 static const struct dev_pm_ops img_hash_pm_ops
= {
1102 SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend
, img_hash_resume
)
1105 static struct platform_driver img_hash_driver
= {
1106 .probe
= img_hash_probe
,
1107 .remove
= img_hash_remove
,
1109 .name
= "img-hash-accelerator",
1110 .pm
= &img_hash_pm_ops
,
1111 .of_match_table
= of_match_ptr(img_hash_match
),
1114 module_platform_driver(img_hash_driver
);
1116 MODULE_LICENSE("GPL v2");
1117 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1118 MODULE_AUTHOR("Will Thomas.");
1119 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");