1 // SPDX-License-Identifier: GPL-2.0
3 * sun8i-ce-hash.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
8 * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
10 * You could find the datasheet in Documentation/arch/arm/sunxi.rst
13 #include <crypto/internal/hash.h>
14 #include <crypto/md5.h>
15 #include <crypto/sha1.h>
16 #include <crypto/sha2.h>
17 #include <linux/bottom_half.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kernel.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
26 int sun8i_ce_hash_init_tfm(struct crypto_ahash
*tfm
)
28 struct sun8i_ce_hash_tfm_ctx
*op
= crypto_ahash_ctx(tfm
);
29 struct ahash_alg
*alg
= crypto_ahash_alg(tfm
);
30 struct sun8i_ce_alg_template
*algt
;
33 algt
= container_of(alg
, struct sun8i_ce_alg_template
, alg
.hash
.base
);
37 op
->fallback_tfm
= crypto_alloc_ahash(crypto_ahash_alg_name(tfm
), 0,
38 CRYPTO_ALG_NEED_FALLBACK
);
39 if (IS_ERR(op
->fallback_tfm
)) {
40 dev_err(algt
->ce
->dev
, "Fallback driver could no be loaded\n");
41 return PTR_ERR(op
->fallback_tfm
);
44 crypto_ahash_set_statesize(tfm
,
45 crypto_ahash_statesize(op
->fallback_tfm
));
47 crypto_ahash_set_reqsize(tfm
,
48 sizeof(struct sun8i_ce_hash_reqctx
) +
49 crypto_ahash_reqsize(op
->fallback_tfm
));
51 memcpy(algt
->fbname
, crypto_ahash_driver_name(op
->fallback_tfm
),
54 err
= pm_runtime_get_sync(op
->ce
->dev
);
59 pm_runtime_put_noidle(op
->ce
->dev
);
60 crypto_free_ahash(op
->fallback_tfm
);
64 void sun8i_ce_hash_exit_tfm(struct crypto_ahash
*tfm
)
66 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
68 crypto_free_ahash(tfmctx
->fallback_tfm
);
69 pm_runtime_put_sync_suspend(tfmctx
->ce
->dev
);
72 int sun8i_ce_hash_init(struct ahash_request
*areq
)
74 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
75 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
76 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
78 memset(rctx
, 0, sizeof(struct sun8i_ce_hash_reqctx
));
80 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
81 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
83 return crypto_ahash_init(&rctx
->fallback_req
);
86 int sun8i_ce_hash_export(struct ahash_request
*areq
, void *out
)
88 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
89 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
90 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
92 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
93 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
95 return crypto_ahash_export(&rctx
->fallback_req
, out
);
98 int sun8i_ce_hash_import(struct ahash_request
*areq
, const void *in
)
100 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
101 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
102 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
104 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
105 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
107 return crypto_ahash_import(&rctx
->fallback_req
, in
);
110 int sun8i_ce_hash_final(struct ahash_request
*areq
)
112 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
113 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
114 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
116 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
117 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
&
118 CRYPTO_TFM_REQ_MAY_SLEEP
;
119 rctx
->fallback_req
.result
= areq
->result
;
121 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
)) {
122 struct sun8i_ce_alg_template
*algt __maybe_unused
;
123 struct ahash_alg
*alg
= crypto_ahash_alg(tfm
);
125 algt
= container_of(alg
, struct sun8i_ce_alg_template
,
127 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
132 return crypto_ahash_final(&rctx
->fallback_req
);
135 int sun8i_ce_hash_update(struct ahash_request
*areq
)
137 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
138 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
139 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
141 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
142 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
&
143 CRYPTO_TFM_REQ_MAY_SLEEP
;
144 rctx
->fallback_req
.nbytes
= areq
->nbytes
;
145 rctx
->fallback_req
.src
= areq
->src
;
147 return crypto_ahash_update(&rctx
->fallback_req
);
150 int sun8i_ce_hash_finup(struct ahash_request
*areq
)
152 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
153 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
154 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
156 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
157 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
&
158 CRYPTO_TFM_REQ_MAY_SLEEP
;
160 rctx
->fallback_req
.nbytes
= areq
->nbytes
;
161 rctx
->fallback_req
.src
= areq
->src
;
162 rctx
->fallback_req
.result
= areq
->result
;
164 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
)) {
165 struct sun8i_ce_alg_template
*algt __maybe_unused
;
166 struct ahash_alg
*alg
= crypto_ahash_alg(tfm
);
168 algt
= container_of(alg
, struct sun8i_ce_alg_template
,
170 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
175 return crypto_ahash_finup(&rctx
->fallback_req
);
178 static int sun8i_ce_hash_digest_fb(struct ahash_request
*areq
)
180 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
181 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
182 struct sun8i_ce_hash_tfm_ctx
*tfmctx
= crypto_ahash_ctx(tfm
);
184 ahash_request_set_tfm(&rctx
->fallback_req
, tfmctx
->fallback_tfm
);
185 rctx
->fallback_req
.base
.flags
= areq
->base
.flags
&
186 CRYPTO_TFM_REQ_MAY_SLEEP
;
188 rctx
->fallback_req
.nbytes
= areq
->nbytes
;
189 rctx
->fallback_req
.src
= areq
->src
;
190 rctx
->fallback_req
.result
= areq
->result
;
192 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
)) {
193 struct sun8i_ce_alg_template
*algt __maybe_unused
;
194 struct ahash_alg
*alg
= crypto_ahash_alg(tfm
);
196 algt
= container_of(alg
, struct sun8i_ce_alg_template
,
198 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
203 return crypto_ahash_digest(&rctx
->fallback_req
);
206 static bool sun8i_ce_hash_need_fallback(struct ahash_request
*areq
)
208 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
209 struct ahash_alg
*alg
= __crypto_ahash_alg(tfm
->base
.__crt_alg
);
210 struct sun8i_ce_alg_template
*algt
;
211 struct scatterlist
*sg
;
213 algt
= container_of(alg
, struct sun8i_ce_alg_template
, alg
.hash
.base
);
215 if (areq
->nbytes
== 0) {
216 algt
->stat_fb_len0
++;
219 /* we need to reserve one SG for padding one */
220 if (sg_nents_for_len(areq
->src
, areq
->nbytes
) > MAX_SG
- 1) {
221 algt
->stat_fb_maxsg
++;
226 if (sg
->length
% 4) {
227 algt
->stat_fb_srclen
++;
230 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
))) {
231 algt
->stat_fb_srcali
++;
239 int sun8i_ce_hash_digest(struct ahash_request
*areq
)
241 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
242 struct ahash_alg
*alg
= __crypto_ahash_alg(tfm
->base
.__crt_alg
);
243 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
244 struct sun8i_ce_alg_template
*algt
;
245 struct sun8i_ce_dev
*ce
;
246 struct crypto_engine
*engine
;
247 struct scatterlist
*sg
;
250 if (sun8i_ce_hash_need_fallback(areq
))
251 return sun8i_ce_hash_digest_fb(areq
);
253 nr_sgs
= sg_nents_for_len(areq
->src
, areq
->nbytes
);
254 if (nr_sgs
> MAX_SG
- 1)
255 return sun8i_ce_hash_digest_fb(areq
);
257 for_each_sg(areq
->src
, sg
, nr_sgs
, i
) {
258 if (sg
->length
% 4 || !IS_ALIGNED(sg
->offset
, sizeof(u32
)))
259 return sun8i_ce_hash_digest_fb(areq
);
262 algt
= container_of(alg
, struct sun8i_ce_alg_template
, alg
.hash
.base
);
265 e
= sun8i_ce_get_engine_number(ce
);
267 engine
= ce
->chanlist
[e
].engine
;
269 return crypto_transfer_hash_request_to_engine(engine
, areq
);
272 static u64
hash_pad(__le32
*buf
, unsigned int bufsize
, u64 padi
, u64 byte_count
, bool le
, int bs
)
274 u64 fill
, min_fill
, j
, k
;
279 buf
[j
++] = cpu_to_le32(0x80);
282 fill
= 64 - (byte_count
% 64);
283 min_fill
= 2 * sizeof(u32
) + sizeof(u32
);
285 fill
= 128 - (byte_count
% 128);
286 min_fill
= 4 * sizeof(u32
) + sizeof(u32
);
293 j
+= (fill
- min_fill
) / sizeof(u32
);
294 if (j
* 4 > bufsize
) {
295 pr_err("%s OVERFLOW %llu\n", __func__
, j
);
303 lebits
= (__le64
*)&buf
[j
];
304 *lebits
= cpu_to_le64(byte_count
<< 3);
308 /* sha1 sha224 sha256 */
309 bebits
= (__be64
*)&buf
[j
];
310 *bebits
= cpu_to_be64(byte_count
<< 3);
314 bebits
= (__be64
*)&buf
[j
];
315 *bebits
= cpu_to_be64(byte_count
>> 61);
317 bebits
= (__be64
*)&buf
[j
];
318 *bebits
= cpu_to_be64(byte_count
<< 3);
322 if (j
* 4 > bufsize
) {
323 pr_err("%s OVERFLOW %llu\n", __func__
, j
);
330 int sun8i_ce_hash_run(struct crypto_engine
*engine
, void *breq
)
332 struct ahash_request
*areq
= container_of(breq
, struct ahash_request
, base
);
333 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
334 struct ahash_alg
*alg
= __crypto_ahash_alg(tfm
->base
.__crt_alg
);
335 struct sun8i_ce_hash_reqctx
*rctx
= ahash_request_ctx(areq
);
336 struct sun8i_ce_alg_template
*algt
;
337 struct sun8i_ce_dev
*ce
;
338 struct sun8i_ce_flow
*chan
;
340 struct scatterlist
*sg
;
341 int nr_sgs
, flow
, err
;
351 dma_addr_t addr_res
, addr_pad
;
352 int ns
= sg_nents_for_len(areq
->src
, areq
->nbytes
);
354 algt
= container_of(alg
, struct sun8i_ce_alg_template
, alg
.hash
.base
);
357 bs
= algt
->alg
.hash
.base
.halg
.base
.cra_blocksize
;
358 digestsize
= algt
->alg
.hash
.base
.halg
.digestsize
;
359 if (digestsize
== SHA224_DIGEST_SIZE
)
360 digestsize
= SHA256_DIGEST_SIZE
;
361 if (digestsize
== SHA384_DIGEST_SIZE
)
362 digestsize
= SHA512_DIGEST_SIZE
;
364 /* the padding could be up to two block. */
365 buf
= kcalloc(2, bs
, GFP_KERNEL
| GFP_DMA
);
372 result
= kzalloc(digestsize
, GFP_KERNEL
| GFP_DMA
);
379 chan
= &ce
->chanlist
[flow
];
381 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
384 dev_dbg(ce
->dev
, "%s %s len=%d\n", __func__
, crypto_tfm_alg_name(areq
->base
.tfm
), areq
->nbytes
);
387 memset(cet
, 0, sizeof(struct ce_task
));
389 cet
->t_id
= cpu_to_le32(flow
);
390 common
= ce
->variant
->alg_hash
[algt
->ce_algo_id
];
391 common
|= CE_COMM_INT
;
392 cet
->t_common_ctl
= cpu_to_le32(common
);
397 nr_sgs
= dma_map_sg(ce
->dev
, areq
->src
, ns
, DMA_TO_DEVICE
);
398 if (nr_sgs
<= 0 || nr_sgs
> MAX_SG
) {
399 dev_err(ce
->dev
, "Invalid sg number %d\n", nr_sgs
);
405 for_each_sg(areq
->src
, sg
, nr_sgs
, i
) {
406 cet
->t_src
[i
].addr
= desc_addr_val_le32(ce
, sg_dma_address(sg
));
407 todo
= min(len
, sg_dma_len(sg
));
408 cet
->t_src
[i
].len
= cpu_to_le32(todo
/ 4);
412 dev_err(ce
->dev
, "remaining len %d\n", len
);
416 addr_res
= dma_map_single(ce
->dev
, result
, digestsize
, DMA_FROM_DEVICE
);
417 cet
->t_dst
[0].addr
= desc_addr_val_le32(ce
, addr_res
);
418 cet
->t_dst
[0].len
= cpu_to_le32(digestsize
/ 4);
419 if (dma_mapping_error(ce
->dev
, addr_res
)) {
420 dev_err(ce
->dev
, "DMA map dest\n");
425 byte_count
= areq
->nbytes
;
428 switch (algt
->ce_algo_id
) {
430 j
= hash_pad(bf
, 2 * bs
, j
, byte_count
, true, bs
);
432 case CE_ID_HASH_SHA1
:
433 case CE_ID_HASH_SHA224
:
434 case CE_ID_HASH_SHA256
:
435 j
= hash_pad(bf
, 2 * bs
, j
, byte_count
, false, bs
);
437 case CE_ID_HASH_SHA384
:
438 case CE_ID_HASH_SHA512
:
439 j
= hash_pad(bf
, 2 * bs
, j
, byte_count
, false, bs
);
447 addr_pad
= dma_map_single(ce
->dev
, buf
, j
* 4, DMA_TO_DEVICE
);
448 cet
->t_src
[i
].addr
= desc_addr_val_le32(ce
, addr_pad
);
449 cet
->t_src
[i
].len
= cpu_to_le32(j
);
450 if (dma_mapping_error(ce
->dev
, addr_pad
)) {
451 dev_err(ce
->dev
, "DMA error on padding SG\n");
456 if (ce
->variant
->hash_t_dlen_in_bits
)
457 cet
->t_dlen
= cpu_to_le32((areq
->nbytes
+ j
* 4) * 8);
459 cet
->t_dlen
= cpu_to_le32(areq
->nbytes
/ 4 + j
);
461 chan
->timeout
= areq
->nbytes
;
463 err
= sun8i_ce_run_task(ce
, flow
, crypto_ahash_alg_name(tfm
));
465 dma_unmap_single(ce
->dev
, addr_pad
, j
* 4, DMA_TO_DEVICE
);
466 dma_unmap_sg(ce
->dev
, areq
->src
, ns
, DMA_TO_DEVICE
);
467 dma_unmap_single(ce
->dev
, addr_res
, digestsize
, DMA_FROM_DEVICE
);
470 memcpy(areq
->result
, result
, algt
->alg
.hash
.base
.halg
.digestsize
);
475 crypto_finalize_hash_request(engine
, breq
, err
);