1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <crypto/internal/hash.h>
15 struct qce_sha_saved_state
{
16 u8 pending_buf
[QCE_SHA_MAX_BLOCKSIZE
];
17 u8 partial_digest
[QCE_SHA_MAX_DIGESTSIZE
];
19 unsigned int pending_buflen
;
25 static LIST_HEAD(ahash_algs
);
27 static const u32 std_iv_sha1
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] = {
28 SHA1_H0
, SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
, 0, 0, 0
31 static const u32 std_iv_sha256
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] = {
32 SHA256_H0
, SHA256_H1
, SHA256_H2
, SHA256_H3
,
33 SHA256_H4
, SHA256_H5
, SHA256_H6
, SHA256_H7
36 static void qce_ahash_done(void *data
)
38 struct crypto_async_request
*async_req
= data
;
39 struct ahash_request
*req
= ahash_request_cast(async_req
);
40 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
41 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
42 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
43 struct qce_device
*qce
= tmpl
->qce
;
44 struct qce_result_dump
*result
= qce
->dma
.result_buf
;
45 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
49 error
= qce_dma_terminate_all(&qce
->dma
);
51 dev_dbg(qce
->dev
, "ahash dma termination error (%d)\n", error
);
53 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
54 dma_unmap_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
56 memcpy(rctx
->digest
, result
->auth_iv
, digestsize
);
57 if (req
->result
&& rctx
->last_blk
)
58 memcpy(req
->result
, result
->auth_iv
, digestsize
);
60 rctx
->byte_count
[0] = cpu_to_be32(result
->auth_byte_count
[0]);
61 rctx
->byte_count
[1] = cpu_to_be32(result
->auth_byte_count
[1]);
63 error
= qce_check_status(qce
, &status
);
65 dev_dbg(qce
->dev
, "ahash operation error (%x)\n", status
);
67 req
->src
= rctx
->src_orig
;
68 req
->nbytes
= rctx
->nbytes_orig
;
69 rctx
->last_blk
= false;
70 rctx
->first_blk
= false;
72 qce
->async_req_done(tmpl
->qce
, error
);
75 static int qce_ahash_async_req_handle(struct crypto_async_request
*async_req
)
77 struct ahash_request
*req
= ahash_request_cast(async_req
);
78 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
79 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(async_req
->tfm
);
80 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
81 struct qce_device
*qce
= tmpl
->qce
;
82 unsigned long flags
= rctx
->flags
;
85 if (IS_SHA_HMAC(flags
)) {
86 rctx
->authkey
= ctx
->authkey
;
87 rctx
->authklen
= QCE_SHA_HMAC_KEY_SIZE
;
88 } else if (IS_CMAC(flags
)) {
89 rctx
->authkey
= ctx
->authkey
;
90 rctx
->authklen
= AES_KEYSIZE_128
;
93 rctx
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
94 if (rctx
->src_nents
< 0) {
95 dev_err(qce
->dev
, "Invalid numbers of src SG.\n");
96 return rctx
->src_nents
;
99 ret
= dma_map_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
103 sg_init_one(&rctx
->result_sg
, qce
->dma
.result_buf
, QCE_RESULT_BUF_SZ
);
105 ret
= dma_map_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
108 goto error_unmap_src
;
111 ret
= qce_dma_prep_sgs(&qce
->dma
, req
->src
, rctx
->src_nents
,
112 &rctx
->result_sg
, 1, qce_ahash_done
, async_req
);
114 goto error_unmap_dst
;
116 qce_dma_issue_pending(&qce
->dma
);
118 ret
= qce_start(async_req
, tmpl
->crypto_alg_type
);
120 goto error_terminate
;
125 qce_dma_terminate_all(&qce
->dma
);
127 dma_unmap_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
129 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
133 static int qce_ahash_init(struct ahash_request
*req
)
135 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
136 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
137 const u32
*std_iv
= tmpl
->std_iv
;
139 memset(rctx
, 0, sizeof(*rctx
));
140 rctx
->first_blk
= true;
141 rctx
->last_blk
= false;
142 rctx
->flags
= tmpl
->alg_flags
;
143 memcpy(rctx
->digest
, std_iv
, sizeof(rctx
->digest
));
148 static int qce_ahash_export(struct ahash_request
*req
, void *out
)
150 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
151 struct qce_sha_saved_state
*export_state
= out
;
153 memcpy(export_state
->pending_buf
, rctx
->buf
, rctx
->buflen
);
154 memcpy(export_state
->partial_digest
, rctx
->digest
, sizeof(rctx
->digest
));
155 export_state
->byte_count
[0] = rctx
->byte_count
[0];
156 export_state
->byte_count
[1] = rctx
->byte_count
[1];
157 export_state
->pending_buflen
= rctx
->buflen
;
158 export_state
->count
= rctx
->count
;
159 export_state
->first_blk
= rctx
->first_blk
;
160 export_state
->flags
= rctx
->flags
;
165 static int qce_ahash_import(struct ahash_request
*req
, const void *in
)
167 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
168 const struct qce_sha_saved_state
*import_state
= in
;
170 memset(rctx
, 0, sizeof(*rctx
));
171 rctx
->count
= import_state
->count
;
172 rctx
->buflen
= import_state
->pending_buflen
;
173 rctx
->first_blk
= import_state
->first_blk
;
174 rctx
->flags
= import_state
->flags
;
175 rctx
->byte_count
[0] = import_state
->byte_count
[0];
176 rctx
->byte_count
[1] = import_state
->byte_count
[1];
177 memcpy(rctx
->buf
, import_state
->pending_buf
, rctx
->buflen
);
178 memcpy(rctx
->digest
, import_state
->partial_digest
, sizeof(rctx
->digest
));
183 static int qce_ahash_update(struct ahash_request
*req
)
185 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
186 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
187 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
188 struct qce_device
*qce
= tmpl
->qce
;
189 struct scatterlist
*sg_last
, *sg
;
190 unsigned int total
, len
;
191 unsigned int hash_later
;
193 unsigned int blocksize
;
195 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
196 rctx
->count
+= req
->nbytes
;
198 /* check for buffer from previous updates and append it */
199 total
= req
->nbytes
+ rctx
->buflen
;
201 if (total
<= blocksize
) {
202 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buflen
, req
->src
,
204 rctx
->buflen
+= req
->nbytes
;
208 /* save the original req structure fields */
209 rctx
->src_orig
= req
->src
;
210 rctx
->nbytes_orig
= req
->nbytes
;
213 * if we have data from previous update copy them on buffer. The old
214 * data will be combined with current request bytes.
217 memcpy(rctx
->tmpbuf
, rctx
->buf
, rctx
->buflen
);
219 /* calculate how many bytes will be hashed later */
220 hash_later
= total
% blocksize
;
223 * At this point, there is more than one block size of data. If
224 * the available data to transfer is exactly a multiple of block
225 * size, save the last block to be transferred in qce_ahash_final
226 * (with the last block bit set) if this is indeed the end of data
227 * stream. If not this saved block will be transferred as part of
228 * next update. If this block is not held back and if this is
229 * indeed the end of data stream, the digest obtained will be wrong
230 * since qce_ahash_final will see that rctx->buflen is 0 and return
231 * doing nothing which in turn means that a digest will not be
232 * copied to the destination result buffer. qce_ahash_final cannot
233 * be made to alter this behavior and allowed to proceed if
234 * rctx->buflen is 0 because the crypto engine BAM does not allow
235 * for zero length transfers.
238 hash_later
= blocksize
;
241 unsigned int src_offset
= req
->nbytes
- hash_later
;
242 scatterwalk_map_and_copy(rctx
->buf
, req
->src
, src_offset
,
246 /* here nbytes is multiple of blocksize */
247 nbytes
= total
- hash_later
;
250 sg
= sg_last
= req
->src
;
252 while (len
< nbytes
&& sg
) {
253 if (len
+ sg_dma_len(sg
) > nbytes
)
255 len
+= sg_dma_len(sg
);
264 sg_init_table(rctx
->sg
, 2);
265 sg_set_buf(rctx
->sg
, rctx
->tmpbuf
, rctx
->buflen
);
266 sg_chain(rctx
->sg
, 2, req
->src
);
270 req
->nbytes
= nbytes
;
271 rctx
->buflen
= hash_later
;
273 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
276 static int qce_ahash_final(struct ahash_request
*req
)
278 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
279 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
280 struct qce_device
*qce
= tmpl
->qce
;
284 memcpy(req
->result
, tmpl
->hash_zero
,
285 tmpl
->alg
.ahash
.halg
.digestsize
);
289 rctx
->last_blk
= true;
291 rctx
->src_orig
= req
->src
;
292 rctx
->nbytes_orig
= req
->nbytes
;
294 memcpy(rctx
->tmpbuf
, rctx
->buf
, rctx
->buflen
);
295 sg_init_one(rctx
->sg
, rctx
->tmpbuf
, rctx
->buflen
);
298 req
->nbytes
= rctx
->buflen
;
300 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
303 static int qce_ahash_digest(struct ahash_request
*req
)
305 struct qce_sha_reqctx
*rctx
= ahash_request_ctx_dma(req
);
306 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
307 struct qce_device
*qce
= tmpl
->qce
;
310 ret
= qce_ahash_init(req
);
314 rctx
->src_orig
= req
->src
;
315 rctx
->nbytes_orig
= req
->nbytes
;
316 rctx
->first_blk
= true;
317 rctx
->last_blk
= true;
319 if (!rctx
->nbytes_orig
) {
321 memcpy(req
->result
, tmpl
->hash_zero
,
322 tmpl
->alg
.ahash
.halg
.digestsize
);
326 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
329 static int qce_ahash_hmac_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
332 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
333 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(&tfm
->base
);
334 struct crypto_wait wait
;
335 struct ahash_request
*req
;
336 struct scatterlist sg
;
337 unsigned int blocksize
;
338 struct crypto_ahash
*ahash_tfm
;
341 const char *alg_name
;
343 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
344 memset(ctx
->authkey
, 0, sizeof(ctx
->authkey
));
346 if (keylen
<= blocksize
) {
347 memcpy(ctx
->authkey
, key
, keylen
);
351 if (digestsize
== SHA1_DIGEST_SIZE
)
352 alg_name
= "sha1-qce";
353 else if (digestsize
== SHA256_DIGEST_SIZE
)
354 alg_name
= "sha256-qce";
358 ahash_tfm
= crypto_alloc_ahash(alg_name
, 0, 0);
359 if (IS_ERR(ahash_tfm
))
360 return PTR_ERR(ahash_tfm
);
362 req
= ahash_request_alloc(ahash_tfm
, GFP_KERNEL
);
368 crypto_init_wait(&wait
);
369 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
370 crypto_req_done
, &wait
);
371 crypto_ahash_clear_flags(ahash_tfm
, ~0);
373 buf
= kzalloc(keylen
+ QCE_MAX_ALIGN_SIZE
, GFP_KERNEL
);
379 memcpy(buf
, key
, keylen
);
380 sg_init_one(&sg
, buf
, keylen
);
381 ahash_request_set_crypt(req
, &sg
, ctx
->authkey
, keylen
);
383 ret
= crypto_wait_req(crypto_ahash_digest(req
), &wait
);
387 ahash_request_free(req
);
389 crypto_free_ahash(ahash_tfm
);
393 static int qce_ahash_cra_init(struct crypto_tfm
*tfm
)
395 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
396 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(tfm
);
398 crypto_ahash_set_reqsize_dma(ahash
, sizeof(struct qce_sha_reqctx
));
399 memset(ctx
, 0, sizeof(*ctx
));
403 struct qce_ahash_def
{
406 const char *drv_name
;
407 unsigned int digestsize
;
408 unsigned int blocksize
;
409 unsigned int statesize
;
413 static const struct qce_ahash_def ahash_def
[] = {
415 .flags
= QCE_HASH_SHA1
,
417 .drv_name
= "sha1-qce",
418 .digestsize
= SHA1_DIGEST_SIZE
,
419 .blocksize
= SHA1_BLOCK_SIZE
,
420 .statesize
= sizeof(struct qce_sha_saved_state
),
421 .std_iv
= std_iv_sha1
,
424 .flags
= QCE_HASH_SHA256
,
426 .drv_name
= "sha256-qce",
427 .digestsize
= SHA256_DIGEST_SIZE
,
428 .blocksize
= SHA256_BLOCK_SIZE
,
429 .statesize
= sizeof(struct qce_sha_saved_state
),
430 .std_iv
= std_iv_sha256
,
433 .flags
= QCE_HASH_SHA1_HMAC
,
434 .name
= "hmac(sha1)",
435 .drv_name
= "hmac-sha1-qce",
436 .digestsize
= SHA1_DIGEST_SIZE
,
437 .blocksize
= SHA1_BLOCK_SIZE
,
438 .statesize
= sizeof(struct qce_sha_saved_state
),
439 .std_iv
= std_iv_sha1
,
442 .flags
= QCE_HASH_SHA256_HMAC
,
443 .name
= "hmac(sha256)",
444 .drv_name
= "hmac-sha256-qce",
445 .digestsize
= SHA256_DIGEST_SIZE
,
446 .blocksize
= SHA256_BLOCK_SIZE
,
447 .statesize
= sizeof(struct qce_sha_saved_state
),
448 .std_iv
= std_iv_sha256
,
452 static int qce_ahash_register_one(const struct qce_ahash_def
*def
,
453 struct qce_device
*qce
)
455 struct qce_alg_template
*tmpl
;
456 struct ahash_alg
*alg
;
457 struct crypto_alg
*base
;
460 tmpl
= kzalloc(sizeof(*tmpl
), GFP_KERNEL
);
464 tmpl
->std_iv
= def
->std_iv
;
466 alg
= &tmpl
->alg
.ahash
;
467 alg
->init
= qce_ahash_init
;
468 alg
->update
= qce_ahash_update
;
469 alg
->final
= qce_ahash_final
;
470 alg
->digest
= qce_ahash_digest
;
471 alg
->export
= qce_ahash_export
;
472 alg
->import
= qce_ahash_import
;
473 if (IS_SHA_HMAC(def
->flags
))
474 alg
->setkey
= qce_ahash_hmac_setkey
;
475 alg
->halg
.digestsize
= def
->digestsize
;
476 alg
->halg
.statesize
= def
->statesize
;
478 if (IS_SHA1(def
->flags
))
479 tmpl
->hash_zero
= sha1_zero_message_hash
;
480 else if (IS_SHA256(def
->flags
))
481 tmpl
->hash_zero
= sha256_zero_message_hash
;
483 base
= &alg
->halg
.base
;
484 base
->cra_blocksize
= def
->blocksize
;
485 base
->cra_priority
= 300;
486 base
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
487 base
->cra_ctxsize
= sizeof(struct qce_sha_ctx
);
488 base
->cra_alignmask
= 0;
489 base
->cra_module
= THIS_MODULE
;
490 base
->cra_init
= qce_ahash_cra_init
;
492 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
493 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
496 INIT_LIST_HEAD(&tmpl
->entry
);
497 tmpl
->crypto_alg_type
= CRYPTO_ALG_TYPE_AHASH
;
498 tmpl
->alg_flags
= def
->flags
;
501 ret
= crypto_register_ahash(alg
);
503 dev_err(qce
->dev
, "%s registration failed\n", base
->cra_name
);
508 list_add_tail(&tmpl
->entry
, &ahash_algs
);
509 dev_dbg(qce
->dev
, "%s is registered\n", base
->cra_name
);
513 static void qce_ahash_unregister(struct qce_device
*qce
)
515 struct qce_alg_template
*tmpl
, *n
;
517 list_for_each_entry_safe(tmpl
, n
, &ahash_algs
, entry
) {
518 crypto_unregister_ahash(&tmpl
->alg
.ahash
);
519 list_del(&tmpl
->entry
);
524 static int qce_ahash_register(struct qce_device
*qce
)
528 for (i
= 0; i
< ARRAY_SIZE(ahash_def
); i
++) {
529 ret
= qce_ahash_register_one(&ahash_def
[i
], qce
);
536 qce_ahash_unregister(qce
);
540 const struct qce_algo_ops ahash_ops
= {
541 .type
= CRYPTO_ALG_TYPE_AHASH
,
542 .register_algs
= qce_ahash_register
,
543 .unregister_algs
= qce_ahash_unregister
,
544 .async_req_handle
= qce_ahash_async_req_handle
,