2 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/delay.h>
17 #include <linux/scatterlist.h>
18 #include <linux/crypto.h>
19 #include <crypto/algapi.h>
20 #include <crypto/hash.h>
21 #include <crypto/hmac.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/sha.h>
24 #include <crypto/scatterwalk.h>
26 #include "ccp-crypto.h"
28 static int ccp_sha_complete(struct crypto_async_request
*async_req
, int ret
)
30 struct ahash_request
*req
= ahash_request_cast(async_req
);
31 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
32 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
33 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
39 /* Save remaining data to buffer */
40 unsigned int offset
= rctx
->nbytes
- rctx
->hash_rem
;
42 scatterwalk_map_and_copy(rctx
->buf
, rctx
->src
,
43 offset
, rctx
->hash_rem
, 0);
44 rctx
->buf_count
= rctx
->hash_rem
;
49 /* Update result area if supplied */
50 if (req
->result
&& rctx
->final
)
51 memcpy(req
->result
, rctx
->ctx
, digest_size
);
54 sg_free_table(&rctx
->data_sg
);
59 static int ccp_do_sha_update(struct ahash_request
*req
, unsigned int nbytes
,
62 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
63 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
64 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
65 struct scatterlist
*sg
;
66 unsigned int block_size
=
67 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
68 unsigned int sg_count
;
73 len
= (u64
)rctx
->buf_count
+ (u64
)nbytes
;
75 if (!final
&& (len
<= block_size
)) {
76 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buf_count
, req
->src
,
78 rctx
->buf_count
+= nbytes
;
84 rctx
->nbytes
= nbytes
;
87 rctx
->hash_rem
= final
? 0 : len
& (block_size
- 1);
88 rctx
->hash_cnt
= len
- rctx
->hash_rem
;
89 if (!final
&& !rctx
->hash_rem
) {
90 /* CCP can't do zero length final, so keep some data around */
91 rctx
->hash_cnt
-= block_size
;
92 rctx
->hash_rem
= block_size
;
95 /* Initialize the context scatterlist */
96 sg_init_one(&rctx
->ctx_sg
, rctx
->ctx
, sizeof(rctx
->ctx
));
99 if (rctx
->buf_count
&& nbytes
) {
100 /* Build the data scatterlist table - allocate enough entries
101 * for both data pieces (buffer and input data)
103 gfp
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
104 GFP_KERNEL
: GFP_ATOMIC
;
105 sg_count
= sg_nents(req
->src
) + 1;
106 ret
= sg_alloc_table(&rctx
->data_sg
, sg_count
, gfp
);
110 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
111 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, &rctx
->buf_sg
);
116 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, req
->src
);
123 sg
= rctx
->data_sg
.sgl
;
124 } else if (rctx
->buf_count
) {
125 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
132 rctx
->msg_bits
+= (rctx
->hash_cnt
<< 3); /* Total in bits */
134 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
135 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
136 rctx
->cmd
.engine
= CCP_ENGINE_SHA
;
137 rctx
->cmd
.u
.sha
.type
= rctx
->type
;
138 rctx
->cmd
.u
.sha
.ctx
= &rctx
->ctx_sg
;
140 switch (rctx
->type
) {
142 rctx
->cmd
.u
.sha
.ctx_len
= SHA1_DIGEST_SIZE
;
144 case CCP_SHA_TYPE_224
:
145 rctx
->cmd
.u
.sha
.ctx_len
= SHA224_DIGEST_SIZE
;
147 case CCP_SHA_TYPE_256
:
148 rctx
->cmd
.u
.sha
.ctx_len
= SHA256_DIGEST_SIZE
;
150 case CCP_SHA_TYPE_384
:
151 rctx
->cmd
.u
.sha
.ctx_len
= SHA384_DIGEST_SIZE
;
153 case CCP_SHA_TYPE_512
:
154 rctx
->cmd
.u
.sha
.ctx_len
= SHA512_DIGEST_SIZE
;
157 /* Should never get here */
161 rctx
->cmd
.u
.sha
.src
= sg
;
162 rctx
->cmd
.u
.sha
.src_len
= rctx
->hash_cnt
;
163 rctx
->cmd
.u
.sha
.opad
= ctx
->u
.sha
.key_len
?
164 &ctx
->u
.sha
.opad_sg
: NULL
;
165 rctx
->cmd
.u
.sha
.opad_len
= ctx
->u
.sha
.key_len
?
166 ctx
->u
.sha
.opad_count
: 0;
167 rctx
->cmd
.u
.sha
.first
= rctx
->first
;
168 rctx
->cmd
.u
.sha
.final
= rctx
->final
;
169 rctx
->cmd
.u
.sha
.msg_bits
= rctx
->msg_bits
;
173 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
178 sg_free_table(&rctx
->data_sg
);
183 static int ccp_sha_init(struct ahash_request
*req
)
185 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
186 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
187 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
188 struct ccp_crypto_ahash_alg
*alg
=
189 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm
));
190 unsigned int block_size
=
191 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
193 memset(rctx
, 0, sizeof(*rctx
));
195 rctx
->type
= alg
->type
;
198 if (ctx
->u
.sha
.key_len
) {
199 /* Buffer the HMAC key for first update */
200 memcpy(rctx
->buf
, ctx
->u
.sha
.ipad
, block_size
);
201 rctx
->buf_count
= block_size
;
207 static int ccp_sha_update(struct ahash_request
*req
)
209 return ccp_do_sha_update(req
, req
->nbytes
, 0);
212 static int ccp_sha_final(struct ahash_request
*req
)
214 return ccp_do_sha_update(req
, 0, 1);
217 static int ccp_sha_finup(struct ahash_request
*req
)
219 return ccp_do_sha_update(req
, req
->nbytes
, 1);
222 static int ccp_sha_digest(struct ahash_request
*req
)
226 ret
= ccp_sha_init(req
);
230 return ccp_sha_finup(req
);
233 static int ccp_sha_export(struct ahash_request
*req
, void *out
)
235 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
236 struct ccp_sha_exp_ctx state
;
238 /* Don't let anything leak to 'out' */
239 memset(&state
, 0, sizeof(state
));
241 state
.type
= rctx
->type
;
242 state
.msg_bits
= rctx
->msg_bits
;
243 state
.first
= rctx
->first
;
244 memcpy(state
.ctx
, rctx
->ctx
, sizeof(state
.ctx
));
245 state
.buf_count
= rctx
->buf_count
;
246 memcpy(state
.buf
, rctx
->buf
, sizeof(state
.buf
));
248 /* 'out' may not be aligned so memcpy from local variable */
249 memcpy(out
, &state
, sizeof(state
));
254 static int ccp_sha_import(struct ahash_request
*req
, const void *in
)
256 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
257 struct ccp_sha_exp_ctx state
;
259 /* 'in' may not be aligned so memcpy to local variable */
260 memcpy(&state
, in
, sizeof(state
));
262 memset(rctx
, 0, sizeof(*rctx
));
263 rctx
->type
= state
.type
;
264 rctx
->msg_bits
= state
.msg_bits
;
265 rctx
->first
= state
.first
;
266 memcpy(rctx
->ctx
, state
.ctx
, sizeof(rctx
->ctx
));
267 rctx
->buf_count
= state
.buf_count
;
268 memcpy(rctx
->buf
, state
.buf
, sizeof(rctx
->buf
));
273 static int ccp_sha_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
274 unsigned int key_len
)
276 struct ccp_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
277 struct crypto_shash
*shash
= ctx
->u
.sha
.hmac_tfm
;
279 SHASH_DESC_ON_STACK(sdesc
, shash
);
281 unsigned int block_size
= crypto_shash_blocksize(shash
);
282 unsigned int digest_size
= crypto_shash_digestsize(shash
);
285 /* Set to zero until complete */
286 ctx
->u
.sha
.key_len
= 0;
288 /* Clear key area to provide zero padding for keys smaller
289 * than the block size
291 memset(ctx
->u
.sha
.key
, 0, sizeof(ctx
->u
.sha
.key
));
293 if (key_len
> block_size
) {
294 /* Must hash the input key */
296 sdesc
->flags
= crypto_ahash_get_flags(tfm
) &
297 CRYPTO_TFM_REQ_MAY_SLEEP
;
299 ret
= crypto_shash_digest(sdesc
, key
, key_len
,
302 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
306 key_len
= digest_size
;
308 memcpy(ctx
->u
.sha
.key
, key
, key_len
);
311 for (i
= 0; i
< block_size
; i
++) {
312 ctx
->u
.sha
.ipad
[i
] = ctx
->u
.sha
.key
[i
] ^ HMAC_IPAD_VALUE
;
313 ctx
->u
.sha
.opad
[i
] = ctx
->u
.sha
.key
[i
] ^ HMAC_OPAD_VALUE
;
316 sg_init_one(&ctx
->u
.sha
.opad_sg
, ctx
->u
.sha
.opad
, block_size
);
317 ctx
->u
.sha
.opad_count
= block_size
;
319 ctx
->u
.sha
.key_len
= key_len
;
324 static int ccp_sha_cra_init(struct crypto_tfm
*tfm
)
326 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
327 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
329 ctx
->complete
= ccp_sha_complete
;
330 ctx
->u
.sha
.key_len
= 0;
332 crypto_ahash_set_reqsize(ahash
, sizeof(struct ccp_sha_req_ctx
));
337 static void ccp_sha_cra_exit(struct crypto_tfm
*tfm
)
341 static int ccp_hmac_sha_cra_init(struct crypto_tfm
*tfm
)
343 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
344 struct ccp_crypto_ahash_alg
*alg
= ccp_crypto_ahash_alg(tfm
);
345 struct crypto_shash
*hmac_tfm
;
347 hmac_tfm
= crypto_alloc_shash(alg
->child_alg
, 0, 0);
348 if (IS_ERR(hmac_tfm
)) {
349 pr_warn("could not load driver %s need for HMAC support\n",
351 return PTR_ERR(hmac_tfm
);
354 ctx
->u
.sha
.hmac_tfm
= hmac_tfm
;
356 return ccp_sha_cra_init(tfm
);
359 static void ccp_hmac_sha_cra_exit(struct crypto_tfm
*tfm
)
361 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
363 if (ctx
->u
.sha
.hmac_tfm
)
364 crypto_free_shash(ctx
->u
.sha
.hmac_tfm
);
366 ccp_sha_cra_exit(tfm
);
370 unsigned int version
;
372 const char *drv_name
;
373 enum ccp_sha_type type
;
378 static struct ccp_sha_def sha_algs
[] = {
380 .version
= CCP_VERSION(3, 0),
382 .drv_name
= "sha1-ccp",
383 .type
= CCP_SHA_TYPE_1
,
384 .digest_size
= SHA1_DIGEST_SIZE
,
385 .block_size
= SHA1_BLOCK_SIZE
,
388 .version
= CCP_VERSION(3, 0),
390 .drv_name
= "sha224-ccp",
391 .type
= CCP_SHA_TYPE_224
,
392 .digest_size
= SHA224_DIGEST_SIZE
,
393 .block_size
= SHA224_BLOCK_SIZE
,
396 .version
= CCP_VERSION(3, 0),
398 .drv_name
= "sha256-ccp",
399 .type
= CCP_SHA_TYPE_256
,
400 .digest_size
= SHA256_DIGEST_SIZE
,
401 .block_size
= SHA256_BLOCK_SIZE
,
404 .version
= CCP_VERSION(5, 0),
406 .drv_name
= "sha384-ccp",
407 .type
= CCP_SHA_TYPE_384
,
408 .digest_size
= SHA384_DIGEST_SIZE
,
409 .block_size
= SHA384_BLOCK_SIZE
,
412 .version
= CCP_VERSION(5, 0),
414 .drv_name
= "sha512-ccp",
415 .type
= CCP_SHA_TYPE_512
,
416 .digest_size
= SHA512_DIGEST_SIZE
,
417 .block_size
= SHA512_BLOCK_SIZE
,
421 static int ccp_register_hmac_alg(struct list_head
*head
,
422 const struct ccp_sha_def
*def
,
423 const struct ccp_crypto_ahash_alg
*base_alg
)
425 struct ccp_crypto_ahash_alg
*ccp_alg
;
426 struct ahash_alg
*alg
;
427 struct hash_alg_common
*halg
;
428 struct crypto_alg
*base
;
431 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
435 /* Copy the base algorithm and only change what's necessary */
436 *ccp_alg
= *base_alg
;
437 INIT_LIST_HEAD(&ccp_alg
->entry
);
439 strncpy(ccp_alg
->child_alg
, def
->name
, CRYPTO_MAX_ALG_NAME
);
442 alg
->setkey
= ccp_sha_setkey
;
447 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)", def
->name
);
448 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "hmac-%s",
450 base
->cra_init
= ccp_hmac_sha_cra_init
;
451 base
->cra_exit
= ccp_hmac_sha_cra_exit
;
453 ret
= crypto_register_ahash(alg
);
455 pr_err("%s ahash algorithm registration error (%d)\n",
456 base
->cra_name
, ret
);
461 list_add(&ccp_alg
->entry
, head
);
466 static int ccp_register_sha_alg(struct list_head
*head
,
467 const struct ccp_sha_def
*def
)
469 struct ccp_crypto_ahash_alg
*ccp_alg
;
470 struct ahash_alg
*alg
;
471 struct hash_alg_common
*halg
;
472 struct crypto_alg
*base
;
475 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
479 INIT_LIST_HEAD(&ccp_alg
->entry
);
481 ccp_alg
->type
= def
->type
;
484 alg
->init
= ccp_sha_init
;
485 alg
->update
= ccp_sha_update
;
486 alg
->final
= ccp_sha_final
;
487 alg
->finup
= ccp_sha_finup
;
488 alg
->digest
= ccp_sha_digest
;
489 alg
->export
= ccp_sha_export
;
490 alg
->import
= ccp_sha_import
;
493 halg
->digestsize
= def
->digest_size
;
494 halg
->statesize
= sizeof(struct ccp_sha_exp_ctx
);
497 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
498 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
500 base
->cra_flags
= CRYPTO_ALG_ASYNC
|
501 CRYPTO_ALG_KERN_DRIVER_ONLY
|
502 CRYPTO_ALG_NEED_FALLBACK
;
503 base
->cra_blocksize
= def
->block_size
;
504 base
->cra_ctxsize
= sizeof(struct ccp_ctx
);
505 base
->cra_priority
= CCP_CRA_PRIORITY
;
506 base
->cra_init
= ccp_sha_cra_init
;
507 base
->cra_exit
= ccp_sha_cra_exit
;
508 base
->cra_module
= THIS_MODULE
;
510 ret
= crypto_register_ahash(alg
);
512 pr_err("%s ahash algorithm registration error (%d)\n",
513 base
->cra_name
, ret
);
518 list_add(&ccp_alg
->entry
, head
);
520 ret
= ccp_register_hmac_alg(head
, def
, ccp_alg
);
525 int ccp_register_sha_algs(struct list_head
*head
)
528 unsigned int ccpversion
= ccp_version();
530 for (i
= 0; i
< ARRAY_SIZE(sha_algs
); i
++) {
531 if (sha_algs
[i
].version
> ccpversion
)
533 ret
= ccp_register_sha_alg(head
, &sha_algs
[i
]);