1 // SPDX-License-Identifier: GPL-2.0-only
3 * Crypto acceleration support for Rockchip RK3288
5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
7 * Author: Zain Wang <zain.wang@rock-chips.com>
9 * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
12 #include <linux/unaligned.h>
13 #include <crypto/internal/hash.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include "rk3288_crypto.h"
23 * IC can not process zero message hash,
24 * so we put the fixed hash out when met zero message.
27 static bool rk_ahash_need_fallback(struct ahash_request
*req
)
29 struct scatterlist
*sg
;
33 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
))) {
44 static int rk_ahash_digest_fb(struct ahash_request
*areq
)
46 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(areq
);
47 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
48 struct rk_ahash_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
49 struct ahash_alg
*alg
= crypto_ahash_alg(tfm
);
50 struct rk_crypto_tmp
*algt
= container_of(alg
, struct rk_crypto_tmp
, alg
.hash
.base
);
54 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
55 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
&
56 CRYPTO_TFM_REQ_MAY_SLEEP
;
58 rctx
->fallback_req
.nbytes
= areq
->nbytes
;
59 rctx
->fallback_req
.src
= areq
->src
;
60 rctx
->fallback_req
.result
= areq
->result
;
62 return crypto_ahash_digest(&rctx
->fallback_req
);
65 static int zero_message_process(struct ahash_request
*req
)
67 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
68 int rk_digest_size
= crypto_ahash_digestsize(tfm
);
70 switch (rk_digest_size
) {
71 case SHA1_DIGEST_SIZE
:
72 memcpy(req
->result
, sha1_zero_message_hash
, rk_digest_size
);
74 case SHA256_DIGEST_SIZE
:
75 memcpy(req
->result
, sha256_zero_message_hash
, rk_digest_size
);
78 memcpy(req
->result
, md5_zero_message_hash
, rk_digest_size
);
87 static void rk_ahash_reg_init(struct ahash_request
*req
,
88 struct rk_crypto_info
*dev
)
90 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
93 reg_status
= CRYPTO_READ(dev
, RK_CRYPTO_CTRL
) |
94 RK_CRYPTO_HASH_FLUSH
| _SBF(0xffff, 16);
95 CRYPTO_WRITE(dev
, RK_CRYPTO_CTRL
, reg_status
);
97 reg_status
= CRYPTO_READ(dev
, RK_CRYPTO_CTRL
);
98 reg_status
&= (~RK_CRYPTO_HASH_FLUSH
);
99 reg_status
|= _SBF(0xffff, 16);
100 CRYPTO_WRITE(dev
, RK_CRYPTO_CTRL
, reg_status
);
102 memset_io(dev
->reg
+ RK_CRYPTO_HASH_DOUT_0
, 0, 32);
104 CRYPTO_WRITE(dev
, RK_CRYPTO_INTENA
, RK_CRYPTO_HRDMA_ERR_ENA
|
105 RK_CRYPTO_HRDMA_DONE_ENA
);
107 CRYPTO_WRITE(dev
, RK_CRYPTO_INTSTS
, RK_CRYPTO_HRDMA_ERR_INT
|
108 RK_CRYPTO_HRDMA_DONE_INT
);
110 CRYPTO_WRITE(dev
, RK_CRYPTO_HASH_CTRL
, rctx
->mode
|
111 RK_CRYPTO_HASH_SWAP_DO
);
113 CRYPTO_WRITE(dev
, RK_CRYPTO_CONF
, RK_CRYPTO_BYTESWAP_HRFIFO
|
114 RK_CRYPTO_BYTESWAP_BRFIFO
|
115 RK_CRYPTO_BYTESWAP_BTFIFO
);
117 CRYPTO_WRITE(dev
, RK_CRYPTO_HASH_MSG_LEN
, req
->nbytes
);
120 static int rk_ahash_init(struct ahash_request
*req
)
122 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
123 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
124 struct rk_ahash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
126 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
127 rctx
->fallback_req
.base
.flags
= req
->base
.flags
&
128 CRYPTO_TFM_REQ_MAY_SLEEP
;
130 return crypto_ahash_init(&rctx
->fallback_req
);
133 static int rk_ahash_update(struct ahash_request
*req
)
135 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
136 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
137 struct rk_ahash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
139 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
140 rctx
->fallback_req
.base
.flags
= req
->base
.flags
&
141 CRYPTO_TFM_REQ_MAY_SLEEP
;
142 rctx
->fallback_req
.nbytes
= req
->nbytes
;
143 rctx
->fallback_req
.src
= req
->src
;
145 return crypto_ahash_update(&rctx
->fallback_req
);
148 static int rk_ahash_final(struct ahash_request
*req
)
150 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
151 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
152 struct rk_ahash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
154 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
155 rctx
->fallback_req
.base
.flags
= req
->base
.flags
&
156 CRYPTO_TFM_REQ_MAY_SLEEP
;
157 rctx
->fallback_req
.result
= req
->result
;
159 return crypto_ahash_final(&rctx
->fallback_req
);
162 static int rk_ahash_finup(struct ahash_request
*req
)
164 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
165 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
166 struct rk_ahash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
168 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
169 rctx
->fallback_req
.base
.flags
= req
->base
.flags
&
170 CRYPTO_TFM_REQ_MAY_SLEEP
;
172 rctx
->fallback_req
.nbytes
= req
->nbytes
;
173 rctx
->fallback_req
.src
= req
->src
;
174 rctx
->fallback_req
.result
= req
->result
;
176 return crypto_ahash_finup(&rctx
->fallback_req
);
179 static int rk_ahash_import(struct ahash_request
*req
, const void *in
)
181 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
182 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
183 struct rk_ahash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
185 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
186 rctx
->fallback_req
.base
.flags
= req
->base
.flags
&
187 CRYPTO_TFM_REQ_MAY_SLEEP
;
189 return crypto_ahash_import(&rctx
->fallback_req
, in
);
192 static int rk_ahash_export(struct ahash_request
*req
, void *out
)
194 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
195 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
196 struct rk_ahash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
198 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback_tfm
);
199 rctx
->fallback_req
.base
.flags
= req
->base
.flags
&
200 CRYPTO_TFM_REQ_MAY_SLEEP
;
202 return crypto_ahash_export(&rctx
->fallback_req
, out
);
205 static int rk_ahash_digest(struct ahash_request
*req
)
207 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(req
);
208 struct rk_crypto_info
*dev
;
209 struct crypto_engine
*engine
;
211 if (rk_ahash_need_fallback(req
))
212 return rk_ahash_digest_fb(req
);
215 return zero_message_process(req
);
217 dev
= get_rk_crypto();
220 engine
= dev
->engine
;
222 return crypto_transfer_hash_request_to_engine(engine
, req
);
225 static void crypto_ahash_dma_start(struct rk_crypto_info
*dev
, struct scatterlist
*sg
)
227 CRYPTO_WRITE(dev
, RK_CRYPTO_HRDMAS
, sg_dma_address(sg
));
228 CRYPTO_WRITE(dev
, RK_CRYPTO_HRDMAL
, sg_dma_len(sg
) / 4);
229 CRYPTO_WRITE(dev
, RK_CRYPTO_CTRL
, RK_CRYPTO_HASH_START
|
230 (RK_CRYPTO_HASH_START
<< 16));
233 static int rk_hash_prepare(struct crypto_engine
*engine
, void *breq
)
235 struct ahash_request
*areq
= container_of(breq
, struct ahash_request
, base
);
236 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(areq
);
237 struct rk_crypto_info
*rkc
= rctx
->dev
;
240 ret
= dma_map_sg(rkc
->dev
, areq
->src
, sg_nents(areq
->src
), DMA_TO_DEVICE
);
249 static void rk_hash_unprepare(struct crypto_engine
*engine
, void *breq
)
251 struct ahash_request
*areq
= container_of(breq
, struct ahash_request
, base
);
252 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(areq
);
253 struct rk_crypto_info
*rkc
= rctx
->dev
;
255 dma_unmap_sg(rkc
->dev
, areq
->src
, rctx
->nrsg
, DMA_TO_DEVICE
);
258 static int rk_hash_run(struct crypto_engine
*engine
, void *breq
)
260 struct ahash_request
*areq
= container_of(breq
, struct ahash_request
, base
);
261 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
262 struct rk_ahash_rctx
*rctx
= ahash_request_ctx(areq
);
263 struct ahash_alg
*alg
= crypto_ahash_alg(tfm
);
264 struct rk_crypto_tmp
*algt
= container_of(alg
, struct rk_crypto_tmp
, alg
.hash
.base
);
265 struct scatterlist
*sg
= areq
->src
;
266 struct rk_crypto_info
*rkc
= rctx
->dev
;
271 err
= pm_runtime_resume_and_get(rkc
->dev
);
275 err
= rk_hash_prepare(engine
, breq
);
284 switch (crypto_ahash_digestsize(tfm
)) {
285 case SHA1_DIGEST_SIZE
:
286 rctx
->mode
= RK_CRYPTO_HASH_SHA1
;
288 case SHA256_DIGEST_SIZE
:
289 rctx
->mode
= RK_CRYPTO_HASH_SHA256
;
291 case MD5_DIGEST_SIZE
:
292 rctx
->mode
= RK_CRYPTO_HASH_MD5
;
299 rk_ahash_reg_init(areq
, rkc
);
302 reinit_completion(&rkc
->complete
);
304 crypto_ahash_dma_start(rkc
, sg
);
305 wait_for_completion_interruptible_timeout(&rkc
->complete
,
306 msecs_to_jiffies(2000));
308 dev_err(rkc
->dev
, "DMA timeout\n");
316 * it will take some time to process date after last dma
319 * waiting time is relative with the last date len,
320 * so cannot set a fixed time here.
321 * 10us makes system not call here frequently wasting
322 * efficiency, and make it response quickly when dma
325 readl_poll_timeout(rkc
->reg
+ RK_CRYPTO_HASH_STS
, v
, v
== 0, 10, 1000);
327 for (i
= 0; i
< crypto_ahash_digestsize(tfm
) / 4; i
++) {
328 v
= readl(rkc
->reg
+ RK_CRYPTO_HASH_DOUT_0
+ i
* 4);
329 put_unaligned_le32(v
, areq
->result
+ i
* 4);
333 pm_runtime_put_autosuspend(rkc
->dev
);
335 rk_hash_unprepare(engine
, breq
);
338 crypto_finalize_hash_request(engine
, breq
, err
);
344 static int rk_hash_init_tfm(struct crypto_ahash
*tfm
)
346 struct rk_ahash_ctx
*tctx
= crypto_ahash_ctx(tfm
);
347 const char *alg_name
= crypto_ahash_alg_name(tfm
);
348 struct ahash_alg
*alg
= crypto_ahash_alg(tfm
);
349 struct rk_crypto_tmp
*algt
= container_of(alg
, struct rk_crypto_tmp
, alg
.hash
.base
);
352 tctx
->fallback_tfm
= crypto_alloc_ahash(alg_name
, 0,
353 CRYPTO_ALG_NEED_FALLBACK
);
354 if (IS_ERR(tctx
->fallback_tfm
)) {
355 dev_err(algt
->dev
->dev
, "Could not load fallback driver.\n");
356 return PTR_ERR(tctx
->fallback_tfm
);
359 crypto_ahash_set_reqsize(tfm
,
360 sizeof(struct rk_ahash_rctx
) +
361 crypto_ahash_reqsize(tctx
->fallback_tfm
));
366 static void rk_hash_exit_tfm(struct crypto_ahash
*tfm
)
368 struct rk_ahash_ctx
*tctx
= crypto_ahash_ctx(tfm
);
370 crypto_free_ahash(tctx
->fallback_tfm
);
373 struct rk_crypto_tmp rk_ahash_sha1
= {
374 .type
= CRYPTO_ALG_TYPE_AHASH
,
376 .init
= rk_ahash_init
,
377 .update
= rk_ahash_update
,
378 .final
= rk_ahash_final
,
379 .finup
= rk_ahash_finup
,
380 .export
= rk_ahash_export
,
381 .import
= rk_ahash_import
,
382 .digest
= rk_ahash_digest
,
383 .init_tfm
= rk_hash_init_tfm
,
384 .exit_tfm
= rk_hash_exit_tfm
,
386 .digestsize
= SHA1_DIGEST_SIZE
,
387 .statesize
= sizeof(struct sha1_state
),
390 .cra_driver_name
= "rk-sha1",
392 .cra_flags
= CRYPTO_ALG_ASYNC
|
393 CRYPTO_ALG_NEED_FALLBACK
,
394 .cra_blocksize
= SHA1_BLOCK_SIZE
,
395 .cra_ctxsize
= sizeof(struct rk_ahash_ctx
),
396 .cra_module
= THIS_MODULE
,
401 .do_one_request
= rk_hash_run
,
405 struct rk_crypto_tmp rk_ahash_sha256
= {
406 .type
= CRYPTO_ALG_TYPE_AHASH
,
408 .init
= rk_ahash_init
,
409 .update
= rk_ahash_update
,
410 .final
= rk_ahash_final
,
411 .finup
= rk_ahash_finup
,
412 .export
= rk_ahash_export
,
413 .import
= rk_ahash_import
,
414 .digest
= rk_ahash_digest
,
415 .init_tfm
= rk_hash_init_tfm
,
416 .exit_tfm
= rk_hash_exit_tfm
,
418 .digestsize
= SHA256_DIGEST_SIZE
,
419 .statesize
= sizeof(struct sha256_state
),
421 .cra_name
= "sha256",
422 .cra_driver_name
= "rk-sha256",
424 .cra_flags
= CRYPTO_ALG_ASYNC
|
425 CRYPTO_ALG_NEED_FALLBACK
,
426 .cra_blocksize
= SHA256_BLOCK_SIZE
,
427 .cra_ctxsize
= sizeof(struct rk_ahash_ctx
),
428 .cra_module
= THIS_MODULE
,
433 .do_one_request
= rk_hash_run
,
437 struct rk_crypto_tmp rk_ahash_md5
= {
438 .type
= CRYPTO_ALG_TYPE_AHASH
,
440 .init
= rk_ahash_init
,
441 .update
= rk_ahash_update
,
442 .final
= rk_ahash_final
,
443 .finup
= rk_ahash_finup
,
444 .export
= rk_ahash_export
,
445 .import
= rk_ahash_import
,
446 .digest
= rk_ahash_digest
,
447 .init_tfm
= rk_hash_init_tfm
,
448 .exit_tfm
= rk_hash_exit_tfm
,
450 .digestsize
= MD5_DIGEST_SIZE
,
451 .statesize
= sizeof(struct md5_state
),
454 .cra_driver_name
= "rk-md5",
456 .cra_flags
= CRYPTO_ALG_ASYNC
|
457 CRYPTO_ALG_NEED_FALLBACK
,
458 .cra_blocksize
= SHA1_BLOCK_SIZE
,
459 .cra_ctxsize
= sizeof(struct rk_ahash_ctx
),
460 .cra_module
= THIS_MODULE
,
465 .do_one_request
= rk_hash_run
,