2 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/delay.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <crypto/algapi.h>
19 #include <crypto/hash.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/sha.h>
22 #include <crypto/scatterwalk.h>
24 #include "ccp-crypto.h"
26 static int ccp_sha_complete(struct crypto_async_request
*async_req
, int ret
)
28 struct ahash_request
*req
= ahash_request_cast(async_req
);
29 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
30 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
31 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
37 /* Save remaining data to buffer */
38 unsigned int offset
= rctx
->nbytes
- rctx
->hash_rem
;
40 scatterwalk_map_and_copy(rctx
->buf
, rctx
->src
,
41 offset
, rctx
->hash_rem
, 0);
42 rctx
->buf_count
= rctx
->hash_rem
;
47 /* Update result area if supplied */
49 memcpy(req
->result
, rctx
->ctx
, digest_size
);
52 sg_free_table(&rctx
->data_sg
);
57 static int ccp_do_sha_update(struct ahash_request
*req
, unsigned int nbytes
,
60 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
61 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
62 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
63 struct scatterlist
*sg
;
64 unsigned int block_size
=
65 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
66 unsigned int sg_count
;
71 len
= (u64
)rctx
->buf_count
+ (u64
)nbytes
;
73 if (!final
&& (len
<= block_size
)) {
74 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buf_count
, req
->src
,
76 rctx
->buf_count
+= nbytes
;
82 rctx
->nbytes
= nbytes
;
85 rctx
->hash_rem
= final
? 0 : len
& (block_size
- 1);
86 rctx
->hash_cnt
= len
- rctx
->hash_rem
;
87 if (!final
&& !rctx
->hash_rem
) {
88 /* CCP can't do zero length final, so keep some data around */
89 rctx
->hash_cnt
-= block_size
;
90 rctx
->hash_rem
= block_size
;
93 /* Initialize the context scatterlist */
94 sg_init_one(&rctx
->ctx_sg
, rctx
->ctx
, sizeof(rctx
->ctx
));
97 if (rctx
->buf_count
&& nbytes
) {
98 /* Build the data scatterlist table - allocate enough entries
99 * for both data pieces (buffer and input data)
101 gfp
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
102 GFP_KERNEL
: GFP_ATOMIC
;
103 sg_count
= sg_nents(req
->src
) + 1;
104 ret
= sg_alloc_table(&rctx
->data_sg
, sg_count
, gfp
);
108 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
109 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, &rctx
->buf_sg
);
114 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, req
->src
);
121 sg
= rctx
->data_sg
.sgl
;
122 } else if (rctx
->buf_count
) {
123 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
130 rctx
->msg_bits
+= (rctx
->hash_cnt
<< 3); /* Total in bits */
132 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
133 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
134 rctx
->cmd
.engine
= CCP_ENGINE_SHA
;
135 rctx
->cmd
.u
.sha
.type
= rctx
->type
;
136 rctx
->cmd
.u
.sha
.ctx
= &rctx
->ctx_sg
;
137 rctx
->cmd
.u
.sha
.ctx_len
= sizeof(rctx
->ctx
);
138 rctx
->cmd
.u
.sha
.src
= sg
;
139 rctx
->cmd
.u
.sha
.src_len
= rctx
->hash_cnt
;
140 rctx
->cmd
.u
.sha
.opad
= ctx
->u
.sha
.key_len
?
141 &ctx
->u
.sha
.opad_sg
: NULL
;
142 rctx
->cmd
.u
.sha
.opad_len
= ctx
->u
.sha
.key_len
?
143 ctx
->u
.sha
.opad_count
: 0;
144 rctx
->cmd
.u
.sha
.first
= rctx
->first
;
145 rctx
->cmd
.u
.sha
.final
= rctx
->final
;
146 rctx
->cmd
.u
.sha
.msg_bits
= rctx
->msg_bits
;
150 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
155 sg_free_table(&rctx
->data_sg
);
160 static int ccp_sha_init(struct ahash_request
*req
)
162 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
163 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
164 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
165 struct ccp_crypto_ahash_alg
*alg
=
166 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm
));
167 unsigned int block_size
=
168 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
170 memset(rctx
, 0, sizeof(*rctx
));
172 rctx
->type
= alg
->type
;
175 if (ctx
->u
.sha
.key_len
) {
176 /* Buffer the HMAC key for first update */
177 memcpy(rctx
->buf
, ctx
->u
.sha
.ipad
, block_size
);
178 rctx
->buf_count
= block_size
;
184 static int ccp_sha_update(struct ahash_request
*req
)
186 return ccp_do_sha_update(req
, req
->nbytes
, 0);
189 static int ccp_sha_final(struct ahash_request
*req
)
191 return ccp_do_sha_update(req
, 0, 1);
194 static int ccp_sha_finup(struct ahash_request
*req
)
196 return ccp_do_sha_update(req
, req
->nbytes
, 1);
199 static int ccp_sha_digest(struct ahash_request
*req
)
203 ret
= ccp_sha_init(req
);
207 return ccp_sha_finup(req
);
210 static int ccp_sha_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
211 unsigned int key_len
)
213 struct ccp_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
214 struct crypto_shash
*shash
= ctx
->u
.sha
.hmac_tfm
;
216 SHASH_DESC_ON_STACK(sdesc
, shash
);
218 unsigned int block_size
= crypto_shash_blocksize(shash
);
219 unsigned int digest_size
= crypto_shash_digestsize(shash
);
222 /* Set to zero until complete */
223 ctx
->u
.sha
.key_len
= 0;
225 /* Clear key area to provide zero padding for keys smaller
226 * than the block size
228 memset(ctx
->u
.sha
.key
, 0, sizeof(ctx
->u
.sha
.key
));
230 if (key_len
> block_size
) {
231 /* Must hash the input key */
233 sdesc
->flags
= crypto_ahash_get_flags(tfm
) &
234 CRYPTO_TFM_REQ_MAY_SLEEP
;
236 ret
= crypto_shash_digest(sdesc
, key
, key_len
,
239 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
243 key_len
= digest_size
;
245 memcpy(ctx
->u
.sha
.key
, key
, key_len
);
248 for (i
= 0; i
< block_size
; i
++) {
249 ctx
->u
.sha
.ipad
[i
] = ctx
->u
.sha
.key
[i
] ^ 0x36;
250 ctx
->u
.sha
.opad
[i
] = ctx
->u
.sha
.key
[i
] ^ 0x5c;
253 sg_init_one(&ctx
->u
.sha
.opad_sg
, ctx
->u
.sha
.opad
, block_size
);
254 ctx
->u
.sha
.opad_count
= block_size
;
256 ctx
->u
.sha
.key_len
= key_len
;
261 static int ccp_sha_cra_init(struct crypto_tfm
*tfm
)
263 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
264 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
266 ctx
->complete
= ccp_sha_complete
;
267 ctx
->u
.sha
.key_len
= 0;
269 crypto_ahash_set_reqsize(ahash
, sizeof(struct ccp_sha_req_ctx
));
274 static void ccp_sha_cra_exit(struct crypto_tfm
*tfm
)
278 static int ccp_hmac_sha_cra_init(struct crypto_tfm
*tfm
)
280 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
281 struct ccp_crypto_ahash_alg
*alg
= ccp_crypto_ahash_alg(tfm
);
282 struct crypto_shash
*hmac_tfm
;
284 hmac_tfm
= crypto_alloc_shash(alg
->child_alg
, 0, 0);
285 if (IS_ERR(hmac_tfm
)) {
286 pr_warn("could not load driver %s need for HMAC support\n",
288 return PTR_ERR(hmac_tfm
);
291 ctx
->u
.sha
.hmac_tfm
= hmac_tfm
;
293 return ccp_sha_cra_init(tfm
);
296 static void ccp_hmac_sha_cra_exit(struct crypto_tfm
*tfm
)
298 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
300 if (ctx
->u
.sha
.hmac_tfm
)
301 crypto_free_shash(ctx
->u
.sha
.hmac_tfm
);
303 ccp_sha_cra_exit(tfm
);
308 const char *drv_name
;
309 enum ccp_sha_type type
;
314 static struct ccp_sha_def sha_algs
[] = {
317 .drv_name
= "sha1-ccp",
318 .type
= CCP_SHA_TYPE_1
,
319 .digest_size
= SHA1_DIGEST_SIZE
,
320 .block_size
= SHA1_BLOCK_SIZE
,
324 .drv_name
= "sha224-ccp",
325 .type
= CCP_SHA_TYPE_224
,
326 .digest_size
= SHA224_DIGEST_SIZE
,
327 .block_size
= SHA224_BLOCK_SIZE
,
331 .drv_name
= "sha256-ccp",
332 .type
= CCP_SHA_TYPE_256
,
333 .digest_size
= SHA256_DIGEST_SIZE
,
334 .block_size
= SHA256_BLOCK_SIZE
,
338 static int ccp_register_hmac_alg(struct list_head
*head
,
339 const struct ccp_sha_def
*def
,
340 const struct ccp_crypto_ahash_alg
*base_alg
)
342 struct ccp_crypto_ahash_alg
*ccp_alg
;
343 struct ahash_alg
*alg
;
344 struct hash_alg_common
*halg
;
345 struct crypto_alg
*base
;
348 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
352 /* Copy the base algorithm and only change what's necessary */
353 *ccp_alg
= *base_alg
;
354 INIT_LIST_HEAD(&ccp_alg
->entry
);
356 strncpy(ccp_alg
->child_alg
, def
->name
, CRYPTO_MAX_ALG_NAME
);
359 alg
->setkey
= ccp_sha_setkey
;
364 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)", def
->name
);
365 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "hmac-%s",
367 base
->cra_init
= ccp_hmac_sha_cra_init
;
368 base
->cra_exit
= ccp_hmac_sha_cra_exit
;
370 ret
= crypto_register_ahash(alg
);
372 pr_err("%s ahash algorithm registration error (%d)\n",
373 base
->cra_name
, ret
);
378 list_add(&ccp_alg
->entry
, head
);
383 static int ccp_register_sha_alg(struct list_head
*head
,
384 const struct ccp_sha_def
*def
)
386 struct ccp_crypto_ahash_alg
*ccp_alg
;
387 struct ahash_alg
*alg
;
388 struct hash_alg_common
*halg
;
389 struct crypto_alg
*base
;
392 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
396 INIT_LIST_HEAD(&ccp_alg
->entry
);
398 ccp_alg
->type
= def
->type
;
401 alg
->init
= ccp_sha_init
;
402 alg
->update
= ccp_sha_update
;
403 alg
->final
= ccp_sha_final
;
404 alg
->finup
= ccp_sha_finup
;
405 alg
->digest
= ccp_sha_digest
;
408 halg
->digestsize
= def
->digest_size
;
411 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
412 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
414 base
->cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
|
415 CRYPTO_ALG_KERN_DRIVER_ONLY
|
416 CRYPTO_ALG_NEED_FALLBACK
;
417 base
->cra_blocksize
= def
->block_size
;
418 base
->cra_ctxsize
= sizeof(struct ccp_ctx
);
419 base
->cra_priority
= CCP_CRA_PRIORITY
;
420 base
->cra_type
= &crypto_ahash_type
;
421 base
->cra_init
= ccp_sha_cra_init
;
422 base
->cra_exit
= ccp_sha_cra_exit
;
423 base
->cra_module
= THIS_MODULE
;
425 ret
= crypto_register_ahash(alg
);
427 pr_err("%s ahash algorithm registration error (%d)\n",
428 base
->cra_name
, ret
);
433 list_add(&ccp_alg
->entry
, head
);
435 ret
= ccp_register_hmac_alg(head
, def
, ccp_alg
);
440 int ccp_register_sha_algs(struct list_head
*head
)
444 for (i
= 0; i
< ARRAY_SIZE(sha_algs
); i
++) {
445 ret
= ccp_register_sha_alg(head
, &sha_algs
[i
]);