2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <crypto/internal/hash.h>
22 /* crypto hw padding constant for first operation */
23 #define SHA_PADDING 64
24 #define SHA_PADDING_MASK (SHA_PADDING - 1)
26 static LIST_HEAD(ahash_algs
);
28 static const u32 std_iv_sha1
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] = {
29 SHA1_H0
, SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
, 0, 0, 0
32 static const u32 std_iv_sha256
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] = {
33 SHA256_H0
, SHA256_H1
, SHA256_H2
, SHA256_H3
,
34 SHA256_H4
, SHA256_H5
, SHA256_H6
, SHA256_H7
37 static void qce_ahash_done(void *data
)
39 struct crypto_async_request
*async_req
= data
;
40 struct ahash_request
*req
= ahash_request_cast(async_req
);
41 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
42 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
43 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
44 struct qce_device
*qce
= tmpl
->qce
;
45 struct qce_result_dump
*result
= qce
->dma
.result_buf
;
46 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
50 error
= qce_dma_terminate_all(&qce
->dma
);
52 dev_dbg(qce
->dev
, "ahash dma termination error (%d)\n", error
);
54 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
55 dma_unmap_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
57 memcpy(rctx
->digest
, result
->auth_iv
, digestsize
);
59 memcpy(req
->result
, result
->auth_iv
, digestsize
);
61 rctx
->byte_count
[0] = cpu_to_be32(result
->auth_byte_count
[0]);
62 rctx
->byte_count
[1] = cpu_to_be32(result
->auth_byte_count
[1]);
64 error
= qce_check_status(qce
, &status
);
66 dev_dbg(qce
->dev
, "ahash operation error (%x)\n", status
);
68 req
->src
= rctx
->src_orig
;
69 req
->nbytes
= rctx
->nbytes_orig
;
70 rctx
->last_blk
= false;
71 rctx
->first_blk
= false;
73 qce
->async_req_done(tmpl
->qce
, error
);
76 static int qce_ahash_async_req_handle(struct crypto_async_request
*async_req
)
78 struct ahash_request
*req
= ahash_request_cast(async_req
);
79 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
80 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(async_req
->tfm
);
81 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
82 struct qce_device
*qce
= tmpl
->qce
;
83 unsigned long flags
= rctx
->flags
;
86 if (IS_SHA_HMAC(flags
)) {
87 rctx
->authkey
= ctx
->authkey
;
88 rctx
->authklen
= QCE_SHA_HMAC_KEY_SIZE
;
89 } else if (IS_CMAC(flags
)) {
90 rctx
->authkey
= ctx
->authkey
;
91 rctx
->authklen
= AES_KEYSIZE_128
;
94 rctx
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
95 if (rctx
->src_nents
< 0) {
96 dev_err(qce
->dev
, "Invalid numbers of src SG.\n");
97 return rctx
->src_nents
;
100 ret
= dma_map_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
104 sg_init_one(&rctx
->result_sg
, qce
->dma
.result_buf
, QCE_RESULT_BUF_SZ
);
106 ret
= dma_map_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
108 goto error_unmap_src
;
110 ret
= qce_dma_prep_sgs(&qce
->dma
, req
->src
, rctx
->src_nents
,
111 &rctx
->result_sg
, 1, qce_ahash_done
, async_req
);
113 goto error_unmap_dst
;
115 qce_dma_issue_pending(&qce
->dma
);
117 ret
= qce_start(async_req
, tmpl
->crypto_alg_type
, 0, 0);
119 goto error_terminate
;
124 qce_dma_terminate_all(&qce
->dma
);
126 dma_unmap_sg(qce
->dev
, &rctx
->result_sg
, 1, DMA_FROM_DEVICE
);
128 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
132 static int qce_ahash_init(struct ahash_request
*req
)
134 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
135 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
136 const u32
*std_iv
= tmpl
->std_iv
;
138 memset(rctx
, 0, sizeof(*rctx
));
139 rctx
->first_blk
= true;
140 rctx
->last_blk
= false;
141 rctx
->flags
= tmpl
->alg_flags
;
142 memcpy(rctx
->digest
, std_iv
, sizeof(rctx
->digest
));
147 static int qce_ahash_export(struct ahash_request
*req
, void *out
)
149 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
150 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
151 unsigned long flags
= rctx
->flags
;
152 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
153 unsigned int blocksize
=
154 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash
));
156 if (IS_SHA1(flags
) || IS_SHA1_HMAC(flags
)) {
157 struct sha1_state
*out_state
= out
;
159 out_state
->count
= rctx
->count
;
160 qce_cpu_to_be32p_array((__be32
*)out_state
->state
,
161 rctx
->digest
, digestsize
);
162 memcpy(out_state
->buffer
, rctx
->buf
, blocksize
);
163 } else if (IS_SHA256(flags
) || IS_SHA256_HMAC(flags
)) {
164 struct sha256_state
*out_state
= out
;
166 out_state
->count
= rctx
->count
;
167 qce_cpu_to_be32p_array((__be32
*)out_state
->state
,
168 rctx
->digest
, digestsize
);
169 memcpy(out_state
->buf
, rctx
->buf
, blocksize
);
177 static int qce_import_common(struct ahash_request
*req
, u64 in_count
,
178 const u32
*state
, const u8
*buffer
, bool hmac
)
180 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
181 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
182 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
183 unsigned int blocksize
;
184 u64 count
= in_count
;
186 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash
));
187 rctx
->count
= in_count
;
188 memcpy(rctx
->buf
, buffer
, blocksize
);
190 if (in_count
<= blocksize
) {
195 * For HMAC, there is a hardware padding done when first block
196 * is set. Therefore the byte_count must be incremened by 64
197 * after the first block operation.
200 count
+= SHA_PADDING
;
203 rctx
->byte_count
[0] = (__force __be32
)(count
& ~SHA_PADDING_MASK
);
204 rctx
->byte_count
[1] = (__force __be32
)(count
>> 32);
205 qce_cpu_to_be32p_array((__be32
*)rctx
->digest
, (const u8
*)state
,
207 rctx
->buflen
= (unsigned int)(in_count
& (blocksize
- 1));
212 static int qce_ahash_import(struct ahash_request
*req
, const void *in
)
214 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
215 unsigned long flags
= rctx
->flags
;
216 bool hmac
= IS_SHA_HMAC(flags
);
219 if (IS_SHA1(flags
) || IS_SHA1_HMAC(flags
)) {
220 const struct sha1_state
*state
= in
;
222 ret
= qce_import_common(req
, state
->count
, state
->state
,
223 state
->buffer
, hmac
);
224 } else if (IS_SHA256(flags
) || IS_SHA256_HMAC(flags
)) {
225 const struct sha256_state
*state
= in
;
227 ret
= qce_import_common(req
, state
->count
, state
->state
,
234 static int qce_ahash_update(struct ahash_request
*req
)
236 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
237 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
238 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
239 struct qce_device
*qce
= tmpl
->qce
;
240 struct scatterlist
*sg_last
, *sg
;
241 unsigned int total
, len
;
242 unsigned int hash_later
;
244 unsigned int blocksize
;
246 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
247 rctx
->count
+= req
->nbytes
;
249 /* check for buffer from previous updates and append it */
250 total
= req
->nbytes
+ rctx
->buflen
;
252 if (total
<= blocksize
) {
253 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buflen
, req
->src
,
255 rctx
->buflen
+= req
->nbytes
;
259 /* save the original req structure fields */
260 rctx
->src_orig
= req
->src
;
261 rctx
->nbytes_orig
= req
->nbytes
;
264 * if we have data from previous update copy them on buffer. The old
265 * data will be combined with current request bytes.
268 memcpy(rctx
->tmpbuf
, rctx
->buf
, rctx
->buflen
);
270 /* calculate how many bytes will be hashed later */
271 hash_later
= total
% blocksize
;
273 unsigned int src_offset
= req
->nbytes
- hash_later
;
274 scatterwalk_map_and_copy(rctx
->buf
, req
->src
, src_offset
,
278 /* here nbytes is multiple of blocksize */
279 nbytes
= total
- hash_later
;
282 sg
= sg_last
= req
->src
;
284 while (len
< nbytes
&& sg
) {
285 if (len
+ sg_dma_len(sg
) > nbytes
)
287 len
+= sg_dma_len(sg
);
295 sg_mark_end(sg_last
);
298 sg_init_table(rctx
->sg
, 2);
299 sg_set_buf(rctx
->sg
, rctx
->tmpbuf
, rctx
->buflen
);
300 sg_chain(rctx
->sg
, 2, req
->src
);
304 req
->nbytes
= nbytes
;
305 rctx
->buflen
= hash_later
;
307 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
310 static int qce_ahash_final(struct ahash_request
*req
)
312 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
313 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
314 struct qce_device
*qce
= tmpl
->qce
;
319 rctx
->last_blk
= true;
321 rctx
->src_orig
= req
->src
;
322 rctx
->nbytes_orig
= req
->nbytes
;
324 memcpy(rctx
->tmpbuf
, rctx
->buf
, rctx
->buflen
);
325 sg_init_one(rctx
->sg
, rctx
->tmpbuf
, rctx
->buflen
);
328 req
->nbytes
= rctx
->buflen
;
330 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
333 static int qce_ahash_digest(struct ahash_request
*req
)
335 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
336 struct qce_alg_template
*tmpl
= to_ahash_tmpl(req
->base
.tfm
);
337 struct qce_device
*qce
= tmpl
->qce
;
340 ret
= qce_ahash_init(req
);
344 rctx
->src_orig
= req
->src
;
345 rctx
->nbytes_orig
= req
->nbytes
;
346 rctx
->first_blk
= true;
347 rctx
->last_blk
= true;
349 return qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
352 struct qce_ahash_result
{
353 struct completion completion
;
357 static void qce_digest_complete(struct crypto_async_request
*req
, int error
)
359 struct qce_ahash_result
*result
= req
->data
;
361 if (error
== -EINPROGRESS
)
364 result
->error
= error
;
365 complete(&result
->completion
);
368 static int qce_ahash_hmac_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
371 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
372 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(&tfm
->base
);
373 struct qce_ahash_result result
;
374 struct ahash_request
*req
;
375 struct scatterlist sg
;
376 unsigned int blocksize
;
377 struct crypto_ahash
*ahash_tfm
;
380 const char *alg_name
;
382 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
383 memset(ctx
->authkey
, 0, sizeof(ctx
->authkey
));
385 if (keylen
<= blocksize
) {
386 memcpy(ctx
->authkey
, key
, keylen
);
390 if (digestsize
== SHA1_DIGEST_SIZE
)
391 alg_name
= "sha1-qce";
392 else if (digestsize
== SHA256_DIGEST_SIZE
)
393 alg_name
= "sha256-qce";
397 ahash_tfm
= crypto_alloc_ahash(alg_name
, CRYPTO_ALG_TYPE_AHASH
,
398 CRYPTO_ALG_TYPE_AHASH_MASK
);
399 if (IS_ERR(ahash_tfm
))
400 return PTR_ERR(ahash_tfm
);
402 req
= ahash_request_alloc(ahash_tfm
, GFP_KERNEL
);
408 init_completion(&result
.completion
);
409 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
410 qce_digest_complete
, &result
);
411 crypto_ahash_clear_flags(ahash_tfm
, ~0);
413 buf
= kzalloc(keylen
+ QCE_MAX_ALIGN_SIZE
, GFP_KERNEL
);
419 memcpy(buf
, key
, keylen
);
420 sg_init_one(&sg
, buf
, keylen
);
421 ahash_request_set_crypt(req
, &sg
, ctx
->authkey
, keylen
);
423 ret
= crypto_ahash_digest(req
);
424 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
425 ret
= wait_for_completion_interruptible(&result
.completion
);
431 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
435 ahash_request_free(req
);
437 crypto_free_ahash(ahash_tfm
);
441 static int qce_ahash_cra_init(struct crypto_tfm
*tfm
)
443 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
444 struct qce_sha_ctx
*ctx
= crypto_tfm_ctx(tfm
);
446 crypto_ahash_set_reqsize(ahash
, sizeof(struct qce_sha_reqctx
));
447 memset(ctx
, 0, sizeof(*ctx
));
451 struct qce_ahash_def
{
454 const char *drv_name
;
455 unsigned int digestsize
;
456 unsigned int blocksize
;
457 unsigned int statesize
;
461 static const struct qce_ahash_def ahash_def
[] = {
463 .flags
= QCE_HASH_SHA1
,
465 .drv_name
= "sha1-qce",
466 .digestsize
= SHA1_DIGEST_SIZE
,
467 .blocksize
= SHA1_BLOCK_SIZE
,
468 .statesize
= sizeof(struct sha1_state
),
469 .std_iv
= std_iv_sha1
,
472 .flags
= QCE_HASH_SHA256
,
474 .drv_name
= "sha256-qce",
475 .digestsize
= SHA256_DIGEST_SIZE
,
476 .blocksize
= SHA256_BLOCK_SIZE
,
477 .statesize
= sizeof(struct sha256_state
),
478 .std_iv
= std_iv_sha256
,
481 .flags
= QCE_HASH_SHA1_HMAC
,
482 .name
= "hmac(sha1)",
483 .drv_name
= "hmac-sha1-qce",
484 .digestsize
= SHA1_DIGEST_SIZE
,
485 .blocksize
= SHA1_BLOCK_SIZE
,
486 .statesize
= sizeof(struct sha1_state
),
487 .std_iv
= std_iv_sha1
,
490 .flags
= QCE_HASH_SHA256_HMAC
,
491 .name
= "hmac(sha256)",
492 .drv_name
= "hmac-sha256-qce",
493 .digestsize
= SHA256_DIGEST_SIZE
,
494 .blocksize
= SHA256_BLOCK_SIZE
,
495 .statesize
= sizeof(struct sha256_state
),
496 .std_iv
= std_iv_sha256
,
500 static int qce_ahash_register_one(const struct qce_ahash_def
*def
,
501 struct qce_device
*qce
)
503 struct qce_alg_template
*tmpl
;
504 struct ahash_alg
*alg
;
505 struct crypto_alg
*base
;
508 tmpl
= kzalloc(sizeof(*tmpl
), GFP_KERNEL
);
512 tmpl
->std_iv
= def
->std_iv
;
514 alg
= &tmpl
->alg
.ahash
;
515 alg
->init
= qce_ahash_init
;
516 alg
->update
= qce_ahash_update
;
517 alg
->final
= qce_ahash_final
;
518 alg
->digest
= qce_ahash_digest
;
519 alg
->export
= qce_ahash_export
;
520 alg
->import
= qce_ahash_import
;
521 if (IS_SHA_HMAC(def
->flags
))
522 alg
->setkey
= qce_ahash_hmac_setkey
;
523 alg
->halg
.digestsize
= def
->digestsize
;
524 alg
->halg
.statesize
= def
->statesize
;
526 base
= &alg
->halg
.base
;
527 base
->cra_blocksize
= def
->blocksize
;
528 base
->cra_priority
= 300;
529 base
->cra_flags
= CRYPTO_ALG_ASYNC
;
530 base
->cra_ctxsize
= sizeof(struct qce_sha_ctx
);
531 base
->cra_alignmask
= 0;
532 base
->cra_module
= THIS_MODULE
;
533 base
->cra_init
= qce_ahash_cra_init
;
534 INIT_LIST_HEAD(&base
->cra_list
);
536 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
537 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
540 INIT_LIST_HEAD(&tmpl
->entry
);
541 tmpl
->crypto_alg_type
= CRYPTO_ALG_TYPE_AHASH
;
542 tmpl
->alg_flags
= def
->flags
;
545 ret
= crypto_register_ahash(alg
);
548 dev_err(qce
->dev
, "%s registration failed\n", base
->cra_name
);
552 list_add_tail(&tmpl
->entry
, &ahash_algs
);
553 dev_dbg(qce
->dev
, "%s is registered\n", base
->cra_name
);
557 static void qce_ahash_unregister(struct qce_device
*qce
)
559 struct qce_alg_template
*tmpl
, *n
;
561 list_for_each_entry_safe(tmpl
, n
, &ahash_algs
, entry
) {
562 crypto_unregister_ahash(&tmpl
->alg
.ahash
);
563 list_del(&tmpl
->entry
);
568 static int qce_ahash_register(struct qce_device
*qce
)
572 for (i
= 0; i
< ARRAY_SIZE(ahash_def
); i
++) {
573 ret
= qce_ahash_register_one(&ahash_def
[i
], qce
);
580 qce_ahash_unregister(qce
);
584 const struct qce_algo_ops ahash_ops
= {
585 .type
= CRYPTO_ALG_TYPE_AHASH
,
586 .register_algs
= qce_ahash_register
,
587 .unregister_algs
= qce_ahash_unregister
,
588 .async_req_handle
= qce_ahash_async_req_handle
,