1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/delay.h>
14 #include <linux/scatterlist.h>
15 #include <linux/crypto.h>
16 #include <crypto/algapi.h>
17 #include <crypto/hash.h>
18 #include <crypto/hmac.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/sha1.h>
21 #include <crypto/sha2.h>
22 #include <crypto/scatterwalk.h>
23 #include <linux/string.h>
25 #include "ccp-crypto.h"
27 static int ccp_sha_complete(struct crypto_async_request
*async_req
, int ret
)
29 struct ahash_request
*req
= ahash_request_cast(async_req
);
30 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
31 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx_dma(req
);
32 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
38 /* Save remaining data to buffer */
39 unsigned int offset
= rctx
->nbytes
- rctx
->hash_rem
;
41 scatterwalk_map_and_copy(rctx
->buf
, rctx
->src
,
42 offset
, rctx
->hash_rem
, 0);
43 rctx
->buf_count
= rctx
->hash_rem
;
48 /* Update result area if supplied */
49 if (req
->result
&& rctx
->final
)
50 memcpy(req
->result
, rctx
->ctx
, digest_size
);
53 sg_free_table(&rctx
->data_sg
);
58 static int ccp_do_sha_update(struct ahash_request
*req
, unsigned int nbytes
,
61 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
62 struct ccp_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
63 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx_dma(req
);
64 struct scatterlist
*sg
;
65 unsigned int block_size
=
66 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
67 unsigned int sg_count
;
72 len
= (u64
)rctx
->buf_count
+ (u64
)nbytes
;
74 if (!final
&& (len
<= block_size
)) {
75 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buf_count
, req
->src
,
77 rctx
->buf_count
+= nbytes
;
83 rctx
->nbytes
= nbytes
;
86 rctx
->hash_rem
= final
? 0 : len
& (block_size
- 1);
87 rctx
->hash_cnt
= len
- rctx
->hash_rem
;
88 if (!final
&& !rctx
->hash_rem
) {
89 /* CCP can't do zero length final, so keep some data around */
90 rctx
->hash_cnt
-= block_size
;
91 rctx
->hash_rem
= block_size
;
94 /* Initialize the context scatterlist */
95 sg_init_one(&rctx
->ctx_sg
, rctx
->ctx
, sizeof(rctx
->ctx
));
98 if (rctx
->buf_count
&& nbytes
) {
99 /* Build the data scatterlist table - allocate enough entries
100 * for both data pieces (buffer and input data)
102 gfp
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
103 GFP_KERNEL
: GFP_ATOMIC
;
104 sg_count
= sg_nents(req
->src
) + 1;
105 ret
= sg_alloc_table(&rctx
->data_sg
, sg_count
, gfp
);
109 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
110 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, &rctx
->buf_sg
);
115 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, req
->src
);
122 sg
= rctx
->data_sg
.sgl
;
123 } else if (rctx
->buf_count
) {
124 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
131 rctx
->msg_bits
+= (rctx
->hash_cnt
<< 3); /* Total in bits */
133 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
134 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
135 rctx
->cmd
.engine
= CCP_ENGINE_SHA
;
136 rctx
->cmd
.u
.sha
.type
= rctx
->type
;
137 rctx
->cmd
.u
.sha
.ctx
= &rctx
->ctx_sg
;
139 switch (rctx
->type
) {
141 rctx
->cmd
.u
.sha
.ctx_len
= SHA1_DIGEST_SIZE
;
143 case CCP_SHA_TYPE_224
:
144 rctx
->cmd
.u
.sha
.ctx_len
= SHA224_DIGEST_SIZE
;
146 case CCP_SHA_TYPE_256
:
147 rctx
->cmd
.u
.sha
.ctx_len
= SHA256_DIGEST_SIZE
;
149 case CCP_SHA_TYPE_384
:
150 rctx
->cmd
.u
.sha
.ctx_len
= SHA384_DIGEST_SIZE
;
152 case CCP_SHA_TYPE_512
:
153 rctx
->cmd
.u
.sha
.ctx_len
= SHA512_DIGEST_SIZE
;
156 /* Should never get here */
160 rctx
->cmd
.u
.sha
.src
= sg
;
161 rctx
->cmd
.u
.sha
.src_len
= rctx
->hash_cnt
;
162 rctx
->cmd
.u
.sha
.opad
= ctx
->u
.sha
.key_len
?
163 &ctx
->u
.sha
.opad_sg
: NULL
;
164 rctx
->cmd
.u
.sha
.opad_len
= ctx
->u
.sha
.key_len
?
165 ctx
->u
.sha
.opad_count
: 0;
166 rctx
->cmd
.u
.sha
.first
= rctx
->first
;
167 rctx
->cmd
.u
.sha
.final
= rctx
->final
;
168 rctx
->cmd
.u
.sha
.msg_bits
= rctx
->msg_bits
;
172 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
177 sg_free_table(&rctx
->data_sg
);
182 static int ccp_sha_init(struct ahash_request
*req
)
184 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
185 struct ccp_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
186 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx_dma(req
);
187 struct ccp_crypto_ahash_alg
*alg
=
188 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm
));
189 unsigned int block_size
=
190 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
192 memset(rctx
, 0, sizeof(*rctx
));
194 rctx
->type
= alg
->type
;
197 if (ctx
->u
.sha
.key_len
) {
198 /* Buffer the HMAC key for first update */
199 memcpy(rctx
->buf
, ctx
->u
.sha
.ipad
, block_size
);
200 rctx
->buf_count
= block_size
;
206 static int ccp_sha_update(struct ahash_request
*req
)
208 return ccp_do_sha_update(req
, req
->nbytes
, 0);
211 static int ccp_sha_final(struct ahash_request
*req
)
213 return ccp_do_sha_update(req
, 0, 1);
216 static int ccp_sha_finup(struct ahash_request
*req
)
218 return ccp_do_sha_update(req
, req
->nbytes
, 1);
221 static int ccp_sha_digest(struct ahash_request
*req
)
225 ret
= ccp_sha_init(req
);
229 return ccp_sha_finup(req
);
232 static int ccp_sha_export(struct ahash_request
*req
, void *out
)
234 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx_dma(req
);
235 struct ccp_sha_exp_ctx state
;
237 /* Don't let anything leak to 'out' */
238 memset(&state
, 0, sizeof(state
));
240 state
.type
= rctx
->type
;
241 state
.msg_bits
= rctx
->msg_bits
;
242 state
.first
= rctx
->first
;
243 memcpy(state
.ctx
, rctx
->ctx
, sizeof(state
.ctx
));
244 state
.buf_count
= rctx
->buf_count
;
245 memcpy(state
.buf
, rctx
->buf
, sizeof(state
.buf
));
247 /* 'out' may not be aligned so memcpy from local variable */
248 memcpy(out
, &state
, sizeof(state
));
253 static int ccp_sha_import(struct ahash_request
*req
, const void *in
)
255 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx_dma(req
);
256 struct ccp_sha_exp_ctx state
;
258 /* 'in' may not be aligned so memcpy to local variable */
259 memcpy(&state
, in
, sizeof(state
));
261 memset(rctx
, 0, sizeof(*rctx
));
262 rctx
->type
= state
.type
;
263 rctx
->msg_bits
= state
.msg_bits
;
264 rctx
->first
= state
.first
;
265 memcpy(rctx
->ctx
, state
.ctx
, sizeof(rctx
->ctx
));
266 rctx
->buf_count
= state
.buf_count
;
267 memcpy(rctx
->buf
, state
.buf
, sizeof(rctx
->buf
));
272 static int ccp_sha_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
273 unsigned int key_len
)
275 struct ccp_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
276 struct crypto_shash
*shash
= ctx
->u
.sha
.hmac_tfm
;
277 unsigned int block_size
= crypto_shash_blocksize(shash
);
278 unsigned int digest_size
= crypto_shash_digestsize(shash
);
281 /* Set to zero until complete */
282 ctx
->u
.sha
.key_len
= 0;
284 /* Clear key area to provide zero padding for keys smaller
285 * than the block size
287 memset(ctx
->u
.sha
.key
, 0, sizeof(ctx
->u
.sha
.key
));
289 if (key_len
> block_size
) {
290 /* Must hash the input key */
291 ret
= crypto_shash_tfm_digest(shash
, key
, key_len
,
296 key_len
= digest_size
;
298 memcpy(ctx
->u
.sha
.key
, key
, key_len
);
301 for (i
= 0; i
< block_size
; i
++) {
302 ctx
->u
.sha
.ipad
[i
] = ctx
->u
.sha
.key
[i
] ^ HMAC_IPAD_VALUE
;
303 ctx
->u
.sha
.opad
[i
] = ctx
->u
.sha
.key
[i
] ^ HMAC_OPAD_VALUE
;
306 sg_init_one(&ctx
->u
.sha
.opad_sg
, ctx
->u
.sha
.opad
, block_size
);
307 ctx
->u
.sha
.opad_count
= block_size
;
309 ctx
->u
.sha
.key_len
= key_len
;
314 static int ccp_sha_cra_init(struct crypto_tfm
*tfm
)
316 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
317 struct ccp_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
319 ctx
->complete
= ccp_sha_complete
;
320 ctx
->u
.sha
.key_len
= 0;
322 crypto_ahash_set_reqsize_dma(ahash
, sizeof(struct ccp_sha_req_ctx
));
327 static void ccp_sha_cra_exit(struct crypto_tfm
*tfm
)
331 static int ccp_hmac_sha_cra_init(struct crypto_tfm
*tfm
)
333 struct ccp_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
334 struct ccp_crypto_ahash_alg
*alg
= ccp_crypto_ahash_alg(tfm
);
335 struct crypto_shash
*hmac_tfm
;
337 hmac_tfm
= crypto_alloc_shash(alg
->child_alg
, 0, 0);
338 if (IS_ERR(hmac_tfm
)) {
339 pr_warn("could not load driver %s need for HMAC support\n",
341 return PTR_ERR(hmac_tfm
);
344 ctx
->u
.sha
.hmac_tfm
= hmac_tfm
;
346 return ccp_sha_cra_init(tfm
);
349 static void ccp_hmac_sha_cra_exit(struct crypto_tfm
*tfm
)
351 struct ccp_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
353 if (ctx
->u
.sha
.hmac_tfm
)
354 crypto_free_shash(ctx
->u
.sha
.hmac_tfm
);
356 ccp_sha_cra_exit(tfm
);
360 unsigned int version
;
362 const char *drv_name
;
363 enum ccp_sha_type type
;
368 static struct ccp_sha_def sha_algs
[] = {
370 .version
= CCP_VERSION(3, 0),
372 .drv_name
= "sha1-ccp",
373 .type
= CCP_SHA_TYPE_1
,
374 .digest_size
= SHA1_DIGEST_SIZE
,
375 .block_size
= SHA1_BLOCK_SIZE
,
378 .version
= CCP_VERSION(3, 0),
380 .drv_name
= "sha224-ccp",
381 .type
= CCP_SHA_TYPE_224
,
382 .digest_size
= SHA224_DIGEST_SIZE
,
383 .block_size
= SHA224_BLOCK_SIZE
,
386 .version
= CCP_VERSION(3, 0),
388 .drv_name
= "sha256-ccp",
389 .type
= CCP_SHA_TYPE_256
,
390 .digest_size
= SHA256_DIGEST_SIZE
,
391 .block_size
= SHA256_BLOCK_SIZE
,
394 .version
= CCP_VERSION(5, 0),
396 .drv_name
= "sha384-ccp",
397 .type
= CCP_SHA_TYPE_384
,
398 .digest_size
= SHA384_DIGEST_SIZE
,
399 .block_size
= SHA384_BLOCK_SIZE
,
402 .version
= CCP_VERSION(5, 0),
404 .drv_name
= "sha512-ccp",
405 .type
= CCP_SHA_TYPE_512
,
406 .digest_size
= SHA512_DIGEST_SIZE
,
407 .block_size
= SHA512_BLOCK_SIZE
,
411 static int ccp_register_hmac_alg(struct list_head
*head
,
412 const struct ccp_sha_def
*def
,
413 const struct ccp_crypto_ahash_alg
*base_alg
)
415 struct ccp_crypto_ahash_alg
*ccp_alg
;
416 struct ahash_alg
*alg
;
417 struct hash_alg_common
*halg
;
418 struct crypto_alg
*base
;
421 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
425 /* Copy the base algorithm and only change what's necessary */
426 *ccp_alg
= *base_alg
;
427 INIT_LIST_HEAD(&ccp_alg
->entry
);
429 strscpy(ccp_alg
->child_alg
, def
->name
, CRYPTO_MAX_ALG_NAME
);
432 alg
->setkey
= ccp_sha_setkey
;
437 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)", def
->name
);
438 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "hmac-%s",
440 base
->cra_init
= ccp_hmac_sha_cra_init
;
441 base
->cra_exit
= ccp_hmac_sha_cra_exit
;
443 ret
= crypto_register_ahash(alg
);
445 pr_err("%s ahash algorithm registration error (%d)\n",
446 base
->cra_name
, ret
);
451 list_add(&ccp_alg
->entry
, head
);
456 static int ccp_register_sha_alg(struct list_head
*head
,
457 const struct ccp_sha_def
*def
)
459 struct ccp_crypto_ahash_alg
*ccp_alg
;
460 struct ahash_alg
*alg
;
461 struct hash_alg_common
*halg
;
462 struct crypto_alg
*base
;
465 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
469 INIT_LIST_HEAD(&ccp_alg
->entry
);
471 ccp_alg
->type
= def
->type
;
474 alg
->init
= ccp_sha_init
;
475 alg
->update
= ccp_sha_update
;
476 alg
->final
= ccp_sha_final
;
477 alg
->finup
= ccp_sha_finup
;
478 alg
->digest
= ccp_sha_digest
;
479 alg
->export
= ccp_sha_export
;
480 alg
->import
= ccp_sha_import
;
483 halg
->digestsize
= def
->digest_size
;
484 halg
->statesize
= sizeof(struct ccp_sha_exp_ctx
);
487 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
488 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
490 base
->cra_flags
= CRYPTO_ALG_ASYNC
|
491 CRYPTO_ALG_ALLOCATES_MEMORY
|
492 CRYPTO_ALG_KERN_DRIVER_ONLY
|
493 CRYPTO_ALG_NEED_FALLBACK
;
494 base
->cra_blocksize
= def
->block_size
;
495 base
->cra_ctxsize
= sizeof(struct ccp_ctx
) + crypto_dma_padding();
496 base
->cra_priority
= CCP_CRA_PRIORITY
;
497 base
->cra_init
= ccp_sha_cra_init
;
498 base
->cra_exit
= ccp_sha_cra_exit
;
499 base
->cra_module
= THIS_MODULE
;
501 ret
= crypto_register_ahash(alg
);
503 pr_err("%s ahash algorithm registration error (%d)\n",
504 base
->cra_name
, ret
);
509 list_add(&ccp_alg
->entry
, head
);
511 ret
= ccp_register_hmac_alg(head
, def
, ccp_alg
);
516 int ccp_register_sha_algs(struct list_head
*head
)
519 unsigned int ccpversion
= ccp_version();
521 for (i
= 0; i
< ARRAY_SIZE(sha_algs
); i
++) {
522 if (sha_algs
[i
].version
> ccpversion
)
524 ret
= ccp_register_sha_alg(head
, &sha_algs
[i
]);