2 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/delay.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <crypto/algapi.h>
19 #include <crypto/hash.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/sha.h>
22 #include <crypto/scatterwalk.h>
24 #include "ccp-crypto.h"
27 struct ccp_sha_result
{
28 struct completion completion
;
32 static void ccp_sync_hash_complete(struct crypto_async_request
*req
, int err
)
34 struct ccp_sha_result
*result
= req
->data
;
36 if (err
== -EINPROGRESS
)
40 complete(&result
->completion
);
43 static int ccp_sync_hash(struct crypto_ahash
*tfm
, u8
*buf
,
44 struct scatterlist
*sg
, unsigned int len
)
46 struct ccp_sha_result result
;
47 struct ahash_request
*req
;
50 init_completion(&result
.completion
);
52 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
56 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
57 ccp_sync_hash_complete
, &result
);
58 ahash_request_set_crypt(req
, sg
, buf
, len
);
60 ret
= crypto_ahash_digest(req
);
61 if ((ret
== -EINPROGRESS
) || (ret
== -EBUSY
)) {
62 ret
= wait_for_completion_interruptible(&result
.completion
);
67 ahash_request_free(req
);
72 static int ccp_sha_finish_hmac(struct crypto_async_request
*async_req
)
74 struct ahash_request
*req
= ahash_request_cast(async_req
);
75 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
76 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
77 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
78 struct scatterlist sg
[2];
79 unsigned int block_size
=
80 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
81 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
83 sg_init_table(sg
, ARRAY_SIZE(sg
));
84 sg_set_buf(&sg
[0], ctx
->u
.sha
.opad
, block_size
);
85 sg_set_buf(&sg
[1], rctx
->ctx
, digest_size
);
87 return ccp_sync_hash(ctx
->u
.sha
.hmac_tfm
, req
->result
, sg
,
88 block_size
+ digest_size
);
91 static int ccp_sha_complete(struct crypto_async_request
*async_req
, int ret
)
93 struct ahash_request
*req
= ahash_request_cast(async_req
);
94 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
95 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
96 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
97 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
102 if (rctx
->hash_rem
) {
103 /* Save remaining data to buffer */
104 unsigned int offset
= rctx
->nbytes
- rctx
->hash_rem
;
105 scatterwalk_map_and_copy(rctx
->buf
, rctx
->src
,
106 offset
, rctx
->hash_rem
, 0);
107 rctx
->buf_count
= rctx
->hash_rem
;
111 /* Update result area if supplied */
113 memcpy(req
->result
, rctx
->ctx
, digest_size
);
115 /* If we're doing an HMAC, we need to perform that on the final op */
116 if (rctx
->final
&& ctx
->u
.sha
.key_len
)
117 ret
= ccp_sha_finish_hmac(async_req
);
120 sg_free_table(&rctx
->data_sg
);
125 static int ccp_do_sha_update(struct ahash_request
*req
, unsigned int nbytes
,
128 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
129 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
130 struct scatterlist
*sg
;
131 unsigned int block_size
=
132 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
133 unsigned int sg_count
;
138 len
= (u64
)rctx
->buf_count
+ (u64
)nbytes
;
140 if (!final
&& (len
<= block_size
)) {
141 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buf_count
, req
->src
,
143 rctx
->buf_count
+= nbytes
;
148 rctx
->src
= req
->src
;
149 rctx
->nbytes
= nbytes
;
152 rctx
->hash_rem
= final
? 0 : len
& (block_size
- 1);
153 rctx
->hash_cnt
= len
- rctx
->hash_rem
;
154 if (!final
&& !rctx
->hash_rem
) {
155 /* CCP can't do zero length final, so keep some data around */
156 rctx
->hash_cnt
-= block_size
;
157 rctx
->hash_rem
= block_size
;
160 /* Initialize the context scatterlist */
161 sg_init_one(&rctx
->ctx_sg
, rctx
->ctx
, sizeof(rctx
->ctx
));
164 if (rctx
->buf_count
&& nbytes
) {
165 /* Build the data scatterlist table - allocate enough entries
166 * for both data pieces (buffer and input data)
168 gfp
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
169 GFP_KERNEL
: GFP_ATOMIC
;
170 sg_count
= sg_nents(req
->src
) + 1;
171 ret
= sg_alloc_table(&rctx
->data_sg
, sg_count
, gfp
);
175 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
176 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, &rctx
->buf_sg
);
177 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, req
->src
);
180 sg
= rctx
->data_sg
.sgl
;
181 } else if (rctx
->buf_count
) {
182 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
189 rctx
->msg_bits
+= (rctx
->hash_cnt
<< 3); /* Total in bits */
191 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
192 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
193 rctx
->cmd
.engine
= CCP_ENGINE_SHA
;
194 rctx
->cmd
.u
.sha
.type
= rctx
->type
;
195 rctx
->cmd
.u
.sha
.ctx
= &rctx
->ctx_sg
;
196 rctx
->cmd
.u
.sha
.ctx_len
= sizeof(rctx
->ctx
);
197 rctx
->cmd
.u
.sha
.src
= sg
;
198 rctx
->cmd
.u
.sha
.src_len
= rctx
->hash_cnt
;
199 rctx
->cmd
.u
.sha
.final
= rctx
->final
;
200 rctx
->cmd
.u
.sha
.msg_bits
= rctx
->msg_bits
;
204 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
209 static int ccp_sha_init(struct ahash_request
*req
)
211 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
212 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
213 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
214 struct ccp_crypto_ahash_alg
*alg
=
215 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm
));
216 unsigned int block_size
=
217 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
219 memset(rctx
, 0, sizeof(*rctx
));
221 memcpy(rctx
->ctx
, alg
->init
, sizeof(rctx
->ctx
));
222 rctx
->type
= alg
->type
;
225 if (ctx
->u
.sha
.key_len
) {
226 /* Buffer the HMAC key for first update */
227 memcpy(rctx
->buf
, ctx
->u
.sha
.ipad
, block_size
);
228 rctx
->buf_count
= block_size
;
234 static int ccp_sha_update(struct ahash_request
*req
)
236 return ccp_do_sha_update(req
, req
->nbytes
, 0);
239 static int ccp_sha_final(struct ahash_request
*req
)
241 return ccp_do_sha_update(req
, 0, 1);
244 static int ccp_sha_finup(struct ahash_request
*req
)
246 return ccp_do_sha_update(req
, req
->nbytes
, 1);
249 static int ccp_sha_digest(struct ahash_request
*req
)
253 ret
= ccp_sha_init(req
);
257 return ccp_sha_finup(req
);
260 static int ccp_sha_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
261 unsigned int key_len
)
263 struct ccp_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
264 struct scatterlist sg
;
265 unsigned int block_size
=
266 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
267 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
270 /* Set to zero until complete */
271 ctx
->u
.sha
.key_len
= 0;
273 /* Clear key area to provide zero padding for keys smaller
274 * than the block size
276 memset(ctx
->u
.sha
.key
, 0, sizeof(ctx
->u
.sha
.key
));
278 if (key_len
> block_size
) {
279 /* Must hash the input key */
280 sg_init_one(&sg
, key
, key_len
);
281 ret
= ccp_sync_hash(tfm
, ctx
->u
.sha
.key
, &sg
, key_len
);
283 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
287 key_len
= digest_size
;
289 memcpy(ctx
->u
.sha
.key
, key
, key_len
);
291 for (i
= 0; i
< block_size
; i
++) {
292 ctx
->u
.sha
.ipad
[i
] = ctx
->u
.sha
.key
[i
] ^ 0x36;
293 ctx
->u
.sha
.opad
[i
] = ctx
->u
.sha
.key
[i
] ^ 0x5c;
296 ctx
->u
.sha
.key_len
= key_len
;
301 static int ccp_sha_cra_init(struct crypto_tfm
*tfm
)
303 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
304 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
306 ctx
->complete
= ccp_sha_complete
;
307 ctx
->u
.sha
.key_len
= 0;
309 crypto_ahash_set_reqsize(ahash
, sizeof(struct ccp_sha_req_ctx
));
314 static void ccp_sha_cra_exit(struct crypto_tfm
*tfm
)
318 static int ccp_hmac_sha_cra_init(struct crypto_tfm
*tfm
)
320 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
321 struct ccp_crypto_ahash_alg
*alg
= ccp_crypto_ahash_alg(tfm
);
322 struct crypto_ahash
*hmac_tfm
;
324 hmac_tfm
= crypto_alloc_ahash(alg
->child_alg
,
325 CRYPTO_ALG_TYPE_AHASH
, 0);
326 if (IS_ERR(hmac_tfm
)) {
327 pr_warn("could not load driver %s need for HMAC support\n",
329 return PTR_ERR(hmac_tfm
);
332 ctx
->u
.sha
.hmac_tfm
= hmac_tfm
;
334 return ccp_sha_cra_init(tfm
);
337 static void ccp_hmac_sha_cra_exit(struct crypto_tfm
*tfm
)
339 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
341 if (ctx
->u
.sha
.hmac_tfm
)
342 crypto_free_ahash(ctx
->u
.sha
.hmac_tfm
);
344 ccp_sha_cra_exit(tfm
);
347 static const __be32 sha1_init
[CCP_SHA_CTXSIZE
/ sizeof(__be32
)] = {
348 cpu_to_be32(SHA1_H0
), cpu_to_be32(SHA1_H1
),
349 cpu_to_be32(SHA1_H2
), cpu_to_be32(SHA1_H3
),
350 cpu_to_be32(SHA1_H4
), 0, 0, 0,
353 static const __be32 sha224_init
[CCP_SHA_CTXSIZE
/ sizeof(__be32
)] = {
354 cpu_to_be32(SHA224_H0
), cpu_to_be32(SHA224_H1
),
355 cpu_to_be32(SHA224_H2
), cpu_to_be32(SHA224_H3
),
356 cpu_to_be32(SHA224_H4
), cpu_to_be32(SHA224_H5
),
357 cpu_to_be32(SHA224_H6
), cpu_to_be32(SHA224_H7
),
360 static const __be32 sha256_init
[CCP_SHA_CTXSIZE
/ sizeof(__be32
)] = {
361 cpu_to_be32(SHA256_H0
), cpu_to_be32(SHA256_H1
),
362 cpu_to_be32(SHA256_H2
), cpu_to_be32(SHA256_H3
),
363 cpu_to_be32(SHA256_H4
), cpu_to_be32(SHA256_H5
),
364 cpu_to_be32(SHA256_H6
), cpu_to_be32(SHA256_H7
),
369 const char *drv_name
;
371 enum ccp_sha_type type
;
376 static struct ccp_sha_def sha_algs
[] = {
379 .drv_name
= "sha1-ccp",
381 .type
= CCP_SHA_TYPE_1
,
382 .digest_size
= SHA1_DIGEST_SIZE
,
383 .block_size
= SHA1_BLOCK_SIZE
,
387 .drv_name
= "sha224-ccp",
389 .type
= CCP_SHA_TYPE_224
,
390 .digest_size
= SHA224_DIGEST_SIZE
,
391 .block_size
= SHA224_BLOCK_SIZE
,
395 .drv_name
= "sha256-ccp",
397 .type
= CCP_SHA_TYPE_256
,
398 .digest_size
= SHA256_DIGEST_SIZE
,
399 .block_size
= SHA256_BLOCK_SIZE
,
403 static int ccp_register_hmac_alg(struct list_head
*head
,
404 const struct ccp_sha_def
*def
,
405 const struct ccp_crypto_ahash_alg
*base_alg
)
407 struct ccp_crypto_ahash_alg
*ccp_alg
;
408 struct ahash_alg
*alg
;
409 struct hash_alg_common
*halg
;
410 struct crypto_alg
*base
;
413 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
417 /* Copy the base algorithm and only change what's necessary */
418 *ccp_alg
= *base_alg
;
419 INIT_LIST_HEAD(&ccp_alg
->entry
);
421 strncpy(ccp_alg
->child_alg
, def
->name
, CRYPTO_MAX_ALG_NAME
);
424 alg
->setkey
= ccp_sha_setkey
;
429 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)", def
->name
);
430 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "hmac-%s",
432 base
->cra_init
= ccp_hmac_sha_cra_init
;
433 base
->cra_exit
= ccp_hmac_sha_cra_exit
;
435 ret
= crypto_register_ahash(alg
);
437 pr_err("%s ahash algorithm registration error (%d)\n",
438 base
->cra_name
, ret
);
443 list_add(&ccp_alg
->entry
, head
);
448 static int ccp_register_sha_alg(struct list_head
*head
,
449 const struct ccp_sha_def
*def
)
451 struct ccp_crypto_ahash_alg
*ccp_alg
;
452 struct ahash_alg
*alg
;
453 struct hash_alg_common
*halg
;
454 struct crypto_alg
*base
;
457 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
461 INIT_LIST_HEAD(&ccp_alg
->entry
);
463 ccp_alg
->init
= def
->init
;
464 ccp_alg
->type
= def
->type
;
467 alg
->init
= ccp_sha_init
;
468 alg
->update
= ccp_sha_update
;
469 alg
->final
= ccp_sha_final
;
470 alg
->finup
= ccp_sha_finup
;
471 alg
->digest
= ccp_sha_digest
;
474 halg
->digestsize
= def
->digest_size
;
477 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
478 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
480 base
->cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
|
481 CRYPTO_ALG_KERN_DRIVER_ONLY
|
482 CRYPTO_ALG_NEED_FALLBACK
;
483 base
->cra_blocksize
= def
->block_size
;
484 base
->cra_ctxsize
= sizeof(struct ccp_ctx
);
485 base
->cra_priority
= CCP_CRA_PRIORITY
;
486 base
->cra_type
= &crypto_ahash_type
;
487 base
->cra_init
= ccp_sha_cra_init
;
488 base
->cra_exit
= ccp_sha_cra_exit
;
489 base
->cra_module
= THIS_MODULE
;
491 ret
= crypto_register_ahash(alg
);
493 pr_err("%s ahash algorithm registration error (%d)\n",
494 base
->cra_name
, ret
);
499 list_add(&ccp_alg
->entry
, head
);
501 ret
= ccp_register_hmac_alg(head
, def
, ccp_alg
);
506 int ccp_register_sha_algs(struct list_head
*head
)
510 for (i
= 0; i
< ARRAY_SIZE(sha_algs
); i
++) {
511 ret
= ccp_register_sha_alg(head
, &sha_algs
[i
]);