1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <crypto/internal/hash.h>
14 /* crypto hw padding constant for first operation */
15 #define SHA_PADDING 64
16 #define SHA_PADDING_MASK (SHA_PADDING - 1)
18 static LIST_HEAD(ahash_algs
);
20 static const u32 std_iv_sha1
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] = {
21 SHA1_H0
, SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
, 0, 0, 0
24 static const u32 std_iv_sha256
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] = {
25 SHA256_H0
, SHA256_H1
, SHA256_H2
, SHA256_H3
,
26 SHA256_H4
, SHA256_H5
, SHA256_H6
, SHA256_H7
29 static void qce_ahash_done(void *data
)
31 struct crypto_async_request
*async_req
= data
;
32 struct ahash_request
*req
= ahash_request_cast(async_req
);
33 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
34 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
35 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
36 struct qce_device
*qce
= tmpl
->qce
;
37 struct qce_result_dump
*result
= qce
->dma
.result_buf
;
38 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
42 error
= qce_dma_terminate_all(&qce
->dma
);
44 dev_dbg(qce
->dev
, "ahash dma termination error (%d)\n", error
);
46 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
47 dma_unmap_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
49 memcpy(rctx
->digest
, result
->auth_iv
, digestsize
);
51 memcpy(req
->result
, result
->auth_iv
, digestsize
);
53 rctx
->byte_count
[0] = cpu_to_be32(result
->auth_byte_count
[0]);
54 rctx
->byte_count
[1] = cpu_to_be32(result
->auth_byte_count
[1]);
56 error
= qce_check_status(qce
, &status
);
58 dev_dbg(qce
->dev
, "ahash operation error (%x)\n", status
);
60 req
->src
= rctx
->src_orig
;
61 req
->nbytes
= rctx
->nbytes_orig
;
62 rctx
->last_blk
= false;
63 rctx
->first_blk
= false;
65 qce
->async_req_done(tmpl
->qce
, error
);
68 static int qce_ahash_async_req_handle(struct crypto_async_request
*async_req
)
70 struct ahash_request
*req
= ahash_request_cast(async_req
);
71 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
72 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(async_req
->tfm
);
73 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
74 struct qce_device
*qce
= tmpl
->qce
;
75 unsigned long flags
= rctx
->flags
;
78 if (IS_SHA_HMAC(flags
)) {
79 rctx
->authkey
= ctx
->authkey
;
80 rctx
->authklen
= QCE_SHA_HMAC_KEY_SIZE
;
81 } else if (IS_CMAC(flags
)) {
82 rctx
->authkey
= ctx
->authkey
;
83 rctx
->authklen
= AES_KEYSIZE_128
;
86 rctx
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
87 if (rctx
->src_nents
< 0) {
88 dev_err(qce
->dev
, "Invalid numbers of src SG.\n");
89 return rctx
->src_nents
;
92 ret
= dma_map_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
96 sg_init_one(&rctx
->result_sg
, qce
->dma
.result_buf
, QCE_RESULT_BUF_SZ
);
98 ret
= dma_map_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
100 goto error_unmap_src
;
102 ret
= qce_dma_prep_sgs(&qce
->dma
, req
->src
, rctx
->src_nents
,
103 &rctx
->result_sg
, 1, qce_ahash_done
, async_req
);
105 goto error_unmap_dst
;
107 qce_dma_issue_pending(&qce
->dma
);
109 ret
= qce_start(async_req
, tmpl
->crypto_alg_type
, 0, 0);
111 goto error_terminate
;
116 qce_dma_terminate_all(&qce
->dma
);
118 dma_unmap_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
120 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
124 static int qce_ahash_init(struct ahash_request
*req
)
126 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
127 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
128 const u32
*std_iv
= tmpl
->std_iv
;
130 memset(rctx
, 0, sizeof(*rctx
));
131 rctx
->first_blk
= true;
132 rctx
->last_blk
= false;
133 rctx
->flags
= tmpl
->alg_flags
;
134 memcpy(rctx
->digest
, std_iv
, sizeof(rctx
->digest
));
139 static int qce_ahash_export(struct ahash_request
*req
, void *out
)
141 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
142 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
143 unsigned long flags
= rctx
->flags
;
144 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
145 unsigned int blocksize
=
146 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash
));
148 if (IS_SHA1(flags
) || IS_SHA1_HMAC(flags
)) {
149 struct sha1_state
*out_state
= out
;
151 out_state
->count
= rctx
->count
;
152 qce_cpu_to_be32p_array((__be32
*)out_state
->state
,
153 rctx
->digest
, digestsize
);
154 memcpy(out_state
->buffer
, rctx
->buf
, blocksize
);
155 } else if (IS_SHA256(flags
) || IS_SHA256_HMAC(flags
)) {
156 struct sha256_state
*out_state
= out
;
158 out_state
->count
= rctx
->count
;
159 qce_cpu_to_be32p_array((__be32
*)out_state
->state
,
160 rctx
->digest
, digestsize
);
161 memcpy(out_state
->buf
, rctx
->buf
, blocksize
);
169 static int qce_import_common(struct ahash_request
*req
, u64 in_count
,
170 const u32
*state
, const u8
*buffer
, bool hmac
)
172 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
173 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
174 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
175 unsigned int blocksize
;
176 u64 count
= in_count
;
178 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash
));
179 rctx
->count
= in_count
;
180 memcpy(rctx
->buf
, buffer
, blocksize
);
182 if (in_count
<= blocksize
) {
187 * For HMAC, there is a hardware padding done when first block
188 * is set. Therefore the byte_count must be incremened by 64
189 * after the first block operation.
192 count
+= SHA_PADDING
;
195 rctx
->byte_count
[0] = (__force __be32
)(count
& ~SHA_PADDING_MASK
);
196 rctx
->byte_count
[1] = (__force __be32
)(count
>> 32);
197 qce_cpu_to_be32p_array((__be32
*)rctx
->digest
, (const u8
*)state
,
199 rctx
->buflen
= (unsigned int)(in_count
& (blocksize
- 1));
204 static int qce_ahash_import(struct ahash_request
*req
, const void *in
)
206 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
207 unsigned long flags
= rctx
->flags
;
208 bool hmac
= IS_SHA_HMAC(flags
);
211 if (IS_SHA1(flags
) || IS_SHA1_HMAC(flags
)) {
212 const struct sha1_state
*state
= in
;
214 ret
= qce_import_common(req
, state
->count
, state
->state
,
215 state
->buffer
, hmac
);
216 } else if (IS_SHA256(flags
) || IS_SHA256_HMAC(flags
)) {
217 const struct sha256_state
*state
= in
;
219 ret
= qce_import_common(req
, state
->count
, state
->state
,
226 static int qce_ahash_update(struct ahash_request
*req
)
228 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
229 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
230 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
231 struct qce_device
*qce
= tmpl
->qce
;
232 struct scatterlist
*sg_last
, *sg
;
233 unsigned int total
, len
;
234 unsigned int hash_later
;
236 unsigned int blocksize
;
238 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
239 rctx
->count
+= req
->nbytes
;
241 /* check for buffer from previous updates and append it */
242 total
= req
->nbytes
+ rctx
->buflen
;
244 if (total
<= blocksize
) {
245 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buflen
, req
->src
,
247 rctx
->buflen
+= req
->nbytes
;
251 /* save the original req structure fields */
252 rctx
->src_orig
= req
->src
;
253 rctx
->nbytes_orig
= req
->nbytes
;
256 * if we have data from previous update copy them on buffer. The old
257 * data will be combined with current request bytes.
260 memcpy(rctx
->tmpbuf
, rctx
->buf
, rctx
->buflen
);
262 /* calculate how many bytes will be hashed later */
263 hash_later
= total
% blocksize
;
265 unsigned int src_offset
= req
->nbytes
- hash_later
;
266 scatterwalk_map_and_copy(rctx
->buf
, req
->src
, src_offset
,
270 /* here nbytes is multiple of blocksize */
271 nbytes
= total
- hash_later
;
274 sg
= sg_last
= req
->src
;
276 while (len
< nbytes
&& sg
) {
277 if (len
+ sg_dma_len(sg
) > nbytes
)
279 len
+= sg_dma_len(sg
);
287 sg_mark_end(sg_last
);
290 sg_init_table(rctx
->sg
, 2);
291 sg_set_buf(rctx
->sg
, rctx
->tmpbuf
, rctx
->buflen
);
292 sg_chain(rctx
->sg
, 2, req
->src
);
296 req
->nbytes
= nbytes
;
297 rctx
->buflen
= hash_later
;
299 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
302 static int qce_ahash_final(struct ahash_request
*req
)
304 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
305 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
306 struct qce_device
*qce
= tmpl
->qce
;
311 rctx
->last_blk
= true;
313 rctx
->src_orig
= req
->src
;
314 rctx
->nbytes_orig
= req
->nbytes
;
316 memcpy(rctx
->tmpbuf
, rctx
->buf
, rctx
->buflen
);
317 sg_init_one(rctx
->sg
, rctx
->tmpbuf
, rctx
->buflen
);
320 req
->nbytes
= rctx
->buflen
;
322 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
325 static int qce_ahash_digest(struct ahash_request
*req
)
327 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
328 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
329 struct qce_device
*qce
= tmpl
->qce
;
332 ret
= qce_ahash_init(req
);
336 rctx
->src_orig
= req
->src
;
337 rctx
->nbytes_orig
= req
->nbytes
;
338 rctx
->first_blk
= true;
339 rctx
->last_blk
= true;
341 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
344 static int qce_ahash_hmac_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
347 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
348 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(&tfm
->base
);
349 struct crypto_wait wait
;
350 struct ahash_request
*req
;
351 struct scatterlist sg
;
352 unsigned int blocksize
;
353 struct crypto_ahash
*ahash_tfm
;
356 const char *alg_name
;
358 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
359 memset(ctx
->authkey
, 0, sizeof(ctx
->authkey
));
361 if (keylen
<= blocksize
) {
362 memcpy(ctx
->authkey
, key
, keylen
);
366 if (digestsize
== SHA1_DIGEST_SIZE
)
367 alg_name
= "sha1-qce";
368 else if (digestsize
== SHA256_DIGEST_SIZE
)
369 alg_name
= "sha256-qce";
373 ahash_tfm
= crypto_alloc_ahash(alg_name
, 0, 0);
374 if (IS_ERR(ahash_tfm
))
375 return PTR_ERR(ahash_tfm
);
377 req
= ahash_request_alloc(ahash_tfm
, GFP_KERNEL
);
383 crypto_init_wait(&wait
);
384 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
385 crypto_req_done
, &wait
);
386 crypto_ahash_clear_flags(ahash_tfm
, ~0);
388 buf
= kzalloc(keylen
+ QCE_MAX_ALIGN_SIZE
, GFP_KERNEL
);
394 memcpy(buf
, key
, keylen
);
395 sg_init_one(&sg
, buf
, keylen
);
396 ahash_request_set_crypt(req
, &sg
, ctx
->authkey
, keylen
);
398 ret
= crypto_wait_req(crypto_ahash_digest(req
), &wait
);
402 ahash_request_free(req
);
404 crypto_free_ahash(ahash_tfm
);
408 static int qce_ahash_cra_init(struct crypto_tfm
*tfm
)
410 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
411 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(tfm
);
413 crypto_ahash_set_reqsize(ahash
, sizeof(struct qce_sha_reqctx
));
414 memset(ctx
, 0, sizeof(*ctx
));
418 struct qce_ahash_def
{
421 const char *drv_name
;
422 unsigned int digestsize
;
423 unsigned int blocksize
;
424 unsigned int statesize
;
428 static const struct qce_ahash_def ahash_def
[] = {
430 .flags
= QCE_HASH_SHA1
,
432 .drv_name
= "sha1-qce",
433 .digestsize
= SHA1_DIGEST_SIZE
,
434 .blocksize
= SHA1_BLOCK_SIZE
,
435 .statesize
= sizeof(struct sha1_state
),
436 .std_iv
= std_iv_sha1
,
439 .flags
= QCE_HASH_SHA256
,
441 .drv_name
= "sha256-qce",
442 .digestsize
= SHA256_DIGEST_SIZE
,
443 .blocksize
= SHA256_BLOCK_SIZE
,
444 .statesize
= sizeof(struct sha256_state
),
445 .std_iv
= std_iv_sha256
,
448 .flags
= QCE_HASH_SHA1_HMAC
,
449 .name
= "hmac(sha1)",
450 .drv_name
= "hmac-sha1-qce",
451 .digestsize
= SHA1_DIGEST_SIZE
,
452 .blocksize
= SHA1_BLOCK_SIZE
,
453 .statesize
= sizeof(struct sha1_state
),
454 .std_iv
= std_iv_sha1
,
457 .flags
= QCE_HASH_SHA256_HMAC
,
458 .name
= "hmac(sha256)",
459 .drv_name
= "hmac-sha256-qce",
460 .digestsize
= SHA256_DIGEST_SIZE
,
461 .blocksize
= SHA256_BLOCK_SIZE
,
462 .statesize
= sizeof(struct sha256_state
),
463 .std_iv
= std_iv_sha256
,
467 static int qce_ahash_register_one(const struct qce_ahash_def
*def
,
468 struct qce_device
*qce
)
470 struct qce_alg_template
*tmpl
;
471 struct ahash_alg
*alg
;
472 struct crypto_alg
*base
;
475 tmpl
= kzalloc(sizeof(*tmpl
), GFP_KERNEL
);
479 tmpl
->std_iv
= def
->std_iv
;
481 alg
= &tmpl
->alg
.ahash
;
482 alg
->init
= qce_ahash_init
;
483 alg
->update
= qce_ahash_update
;
484 alg
->final
= qce_ahash_final
;
485 alg
->digest
= qce_ahash_digest
;
486 alg
->export
= qce_ahash_export
;
487 alg
->import
= qce_ahash_import
;
488 if (IS_SHA_HMAC(def
->flags
))
489 alg
->setkey
= qce_ahash_hmac_setkey
;
490 alg
->halg
.digestsize
= def
->digestsize
;
491 alg
->halg
.statesize
= def
->statesize
;
493 base
= &alg
->halg
.base
;
494 base
->cra_blocksize
= def
->blocksize
;
495 base
->cra_priority
= 300;
496 base
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
497 base
->cra_ctxsize
= sizeof(struct qce_sha_ctx
);
498 base
->cra_alignmask
= 0;
499 base
->cra_module
= THIS_MODULE
;
500 base
->cra_init
= qce_ahash_cra_init
;
502 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
503 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
506 INIT_LIST_HEAD(&tmpl
->entry
);
507 tmpl
->crypto_alg_type
= CRYPTO_ALG_TYPE_AHASH
;
508 tmpl
->alg_flags
= def
->flags
;
511 ret
= crypto_register_ahash(alg
);
514 dev_err(qce
->dev
, "%s registration failed\n", base
->cra_name
);
518 list_add_tail(&tmpl
->entry
, &ahash_algs
);
519 dev_dbg(qce
->dev
, "%s is registered\n", base
->cra_name
);
523 static void qce_ahash_unregister(struct qce_device
*qce
)
525 struct qce_alg_template
*tmpl
, *n
;
527 list_for_each_entry_safe(tmpl
, n
, &ahash_algs
, entry
) {
528 crypto_unregister_ahash(&tmpl
->alg
.ahash
);
529 list_del(&tmpl
->entry
);
534 static int qce_ahash_register(struct qce_device
*qce
)
538 for (i
= 0; i
< ARRAY_SIZE(ahash_def
); i
++) {
539 ret
= qce_ahash_register_one(&ahash_def
[i
], qce
);
546 qce_ahash_unregister(qce
);
550 const struct qce_algo_ops ahash_ops
= {
551 .type
= CRYPTO_ALG_TYPE_AHASH
,
552 .register_algs
= qce_ahash_register
,
553 .unregister_algs
= qce_ahash_unregister
,
554 .async_req_handle
= qce_ahash_async_req_handle
,