2 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/delay.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <crypto/algapi.h>
19 #include <crypto/hash.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/sha.h>
22 #include <crypto/scatterwalk.h>
24 #include "ccp-crypto.h"
26 static int ccp_sha_complete(struct crypto_async_request
*async_req
, int ret
)
28 struct ahash_request
*req
= ahash_request_cast(async_req
);
29 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
30 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
31 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
37 /* Save remaining data to buffer */
38 unsigned int offset
= rctx
->nbytes
- rctx
->hash_rem
;
40 scatterwalk_map_and_copy(rctx
->buf
, rctx
->src
,
41 offset
, rctx
->hash_rem
, 0);
42 rctx
->buf_count
= rctx
->hash_rem
;
47 /* Update result area if supplied */
49 memcpy(req
->result
, rctx
->ctx
, digest_size
);
52 sg_free_table(&rctx
->data_sg
);
57 static int ccp_do_sha_update(struct ahash_request
*req
, unsigned int nbytes
,
60 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
61 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
62 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
63 struct scatterlist
*sg
;
64 unsigned int block_size
=
65 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
66 unsigned int sg_count
;
71 len
= (u64
)rctx
->buf_count
+ (u64
)nbytes
;
73 if (!final
&& (len
<= block_size
)) {
74 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buf_count
, req
->src
,
76 rctx
->buf_count
+= nbytes
;
82 rctx
->nbytes
= nbytes
;
85 rctx
->hash_rem
= final
? 0 : len
& (block_size
- 1);
86 rctx
->hash_cnt
= len
- rctx
->hash_rem
;
87 if (!final
&& !rctx
->hash_rem
) {
88 /* CCP can't do zero length final, so keep some data around */
89 rctx
->hash_cnt
-= block_size
;
90 rctx
->hash_rem
= block_size
;
93 /* Initialize the context scatterlist */
94 sg_init_one(&rctx
->ctx_sg
, rctx
->ctx
, sizeof(rctx
->ctx
));
97 if (rctx
->buf_count
&& nbytes
) {
98 /* Build the data scatterlist table - allocate enough entries
99 * for both data pieces (buffer and input data)
101 gfp
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
102 GFP_KERNEL
: GFP_ATOMIC
;
103 sg_count
= sg_nents(req
->src
) + 1;
104 ret
= sg_alloc_table(&rctx
->data_sg
, sg_count
, gfp
);
108 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
109 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, &rctx
->buf_sg
);
110 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, req
->src
);
113 sg
= rctx
->data_sg
.sgl
;
114 } else if (rctx
->buf_count
) {
115 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
122 rctx
->msg_bits
+= (rctx
->hash_cnt
<< 3); /* Total in bits */
124 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
125 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
126 rctx
->cmd
.engine
= CCP_ENGINE_SHA
;
127 rctx
->cmd
.u
.sha
.type
= rctx
->type
;
128 rctx
->cmd
.u
.sha
.ctx
= &rctx
->ctx_sg
;
129 rctx
->cmd
.u
.sha
.ctx_len
= sizeof(rctx
->ctx
);
130 rctx
->cmd
.u
.sha
.src
= sg
;
131 rctx
->cmd
.u
.sha
.src_len
= rctx
->hash_cnt
;
132 rctx
->cmd
.u
.sha
.opad
= ctx
->u
.sha
.key_len
?
133 &ctx
->u
.sha
.opad_sg
: NULL
;
134 rctx
->cmd
.u
.sha
.opad_len
= ctx
->u
.sha
.key_len
?
135 ctx
->u
.sha
.opad_count
: 0;
136 rctx
->cmd
.u
.sha
.first
= rctx
->first
;
137 rctx
->cmd
.u
.sha
.final
= rctx
->final
;
138 rctx
->cmd
.u
.sha
.msg_bits
= rctx
->msg_bits
;
142 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
147 static int ccp_sha_init(struct ahash_request
*req
)
149 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
150 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
151 struct ccp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
152 struct ccp_crypto_ahash_alg
*alg
=
153 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm
));
154 unsigned int block_size
=
155 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
157 memset(rctx
, 0, sizeof(*rctx
));
159 rctx
->type
= alg
->type
;
162 if (ctx
->u
.sha
.key_len
) {
163 /* Buffer the HMAC key for first update */
164 memcpy(rctx
->buf
, ctx
->u
.sha
.ipad
, block_size
);
165 rctx
->buf_count
= block_size
;
171 static int ccp_sha_update(struct ahash_request
*req
)
173 return ccp_do_sha_update(req
, req
->nbytes
, 0);
176 static int ccp_sha_final(struct ahash_request
*req
)
178 return ccp_do_sha_update(req
, 0, 1);
181 static int ccp_sha_finup(struct ahash_request
*req
)
183 return ccp_do_sha_update(req
, req
->nbytes
, 1);
186 static int ccp_sha_digest(struct ahash_request
*req
)
190 ret
= ccp_sha_init(req
);
194 return ccp_sha_finup(req
);
197 static int ccp_sha_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
198 unsigned int key_len
)
200 struct ccp_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
201 struct crypto_shash
*shash
= ctx
->u
.sha
.hmac_tfm
;
203 SHASH_DESC_ON_STACK(sdesc
, shash
);
205 unsigned int block_size
= crypto_shash_blocksize(shash
);
206 unsigned int digest_size
= crypto_shash_digestsize(shash
);
209 /* Set to zero until complete */
210 ctx
->u
.sha
.key_len
= 0;
212 /* Clear key area to provide zero padding for keys smaller
213 * than the block size
215 memset(ctx
->u
.sha
.key
, 0, sizeof(ctx
->u
.sha
.key
));
217 if (key_len
> block_size
) {
218 /* Must hash the input key */
220 sdesc
->flags
= crypto_ahash_get_flags(tfm
) &
221 CRYPTO_TFM_REQ_MAY_SLEEP
;
223 ret
= crypto_shash_digest(sdesc
, key
, key_len
,
226 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
230 key_len
= digest_size
;
232 memcpy(ctx
->u
.sha
.key
, key
, key_len
);
235 for (i
= 0; i
< block_size
; i
++) {
236 ctx
->u
.sha
.ipad
[i
] = ctx
->u
.sha
.key
[i
] ^ 0x36;
237 ctx
->u
.sha
.opad
[i
] = ctx
->u
.sha
.key
[i
] ^ 0x5c;
240 sg_init_one(&ctx
->u
.sha
.opad_sg
, ctx
->u
.sha
.opad
, block_size
);
241 ctx
->u
.sha
.opad_count
= block_size
;
243 ctx
->u
.sha
.key_len
= key_len
;
248 static int ccp_sha_cra_init(struct crypto_tfm
*tfm
)
250 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
251 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
253 ctx
->complete
= ccp_sha_complete
;
254 ctx
->u
.sha
.key_len
= 0;
256 crypto_ahash_set_reqsize(ahash
, sizeof(struct ccp_sha_req_ctx
));
261 static void ccp_sha_cra_exit(struct crypto_tfm
*tfm
)
265 static int ccp_hmac_sha_cra_init(struct crypto_tfm
*tfm
)
267 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
268 struct ccp_crypto_ahash_alg
*alg
= ccp_crypto_ahash_alg(tfm
);
269 struct crypto_shash
*hmac_tfm
;
271 hmac_tfm
= crypto_alloc_shash(alg
->child_alg
, 0, 0);
272 if (IS_ERR(hmac_tfm
)) {
273 pr_warn("could not load driver %s need for HMAC support\n",
275 return PTR_ERR(hmac_tfm
);
278 ctx
->u
.sha
.hmac_tfm
= hmac_tfm
;
280 return ccp_sha_cra_init(tfm
);
283 static void ccp_hmac_sha_cra_exit(struct crypto_tfm
*tfm
)
285 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
287 if (ctx
->u
.sha
.hmac_tfm
)
288 crypto_free_shash(ctx
->u
.sha
.hmac_tfm
);
290 ccp_sha_cra_exit(tfm
);
295 const char *drv_name
;
296 enum ccp_sha_type type
;
301 static struct ccp_sha_def sha_algs
[] = {
304 .drv_name
= "sha1-ccp",
305 .type
= CCP_SHA_TYPE_1
,
306 .digest_size
= SHA1_DIGEST_SIZE
,
307 .block_size
= SHA1_BLOCK_SIZE
,
311 .drv_name
= "sha224-ccp",
312 .type
= CCP_SHA_TYPE_224
,
313 .digest_size
= SHA224_DIGEST_SIZE
,
314 .block_size
= SHA224_BLOCK_SIZE
,
318 .drv_name
= "sha256-ccp",
319 .type
= CCP_SHA_TYPE_256
,
320 .digest_size
= SHA256_DIGEST_SIZE
,
321 .block_size
= SHA256_BLOCK_SIZE
,
325 static int ccp_register_hmac_alg(struct list_head
*head
,
326 const struct ccp_sha_def
*def
,
327 const struct ccp_crypto_ahash_alg
*base_alg
)
329 struct ccp_crypto_ahash_alg
*ccp_alg
;
330 struct ahash_alg
*alg
;
331 struct hash_alg_common
*halg
;
332 struct crypto_alg
*base
;
335 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
339 /* Copy the base algorithm and only change what's necessary */
340 *ccp_alg
= *base_alg
;
341 INIT_LIST_HEAD(&ccp_alg
->entry
);
343 strncpy(ccp_alg
->child_alg
, def
->name
, CRYPTO_MAX_ALG_NAME
);
346 alg
->setkey
= ccp_sha_setkey
;
351 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)", def
->name
);
352 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "hmac-%s",
354 base
->cra_init
= ccp_hmac_sha_cra_init
;
355 base
->cra_exit
= ccp_hmac_sha_cra_exit
;
357 ret
= crypto_register_ahash(alg
);
359 pr_err("%s ahash algorithm registration error (%d)\n",
360 base
->cra_name
, ret
);
365 list_add(&ccp_alg
->entry
, head
);
370 static int ccp_register_sha_alg(struct list_head
*head
,
371 const struct ccp_sha_def
*def
)
373 struct ccp_crypto_ahash_alg
*ccp_alg
;
374 struct ahash_alg
*alg
;
375 struct hash_alg_common
*halg
;
376 struct crypto_alg
*base
;
379 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
383 INIT_LIST_HEAD(&ccp_alg
->entry
);
385 ccp_alg
->type
= def
->type
;
388 alg
->init
= ccp_sha_init
;
389 alg
->update
= ccp_sha_update
;
390 alg
->final
= ccp_sha_final
;
391 alg
->finup
= ccp_sha_finup
;
392 alg
->digest
= ccp_sha_digest
;
395 halg
->digestsize
= def
->digest_size
;
398 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
399 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
401 base
->cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
|
402 CRYPTO_ALG_KERN_DRIVER_ONLY
|
403 CRYPTO_ALG_NEED_FALLBACK
;
404 base
->cra_blocksize
= def
->block_size
;
405 base
->cra_ctxsize
= sizeof(struct ccp_ctx
);
406 base
->cra_priority
= CCP_CRA_PRIORITY
;
407 base
->cra_type
= &crypto_ahash_type
;
408 base
->cra_init
= ccp_sha_cra_init
;
409 base
->cra_exit
= ccp_sha_cra_exit
;
410 base
->cra_module
= THIS_MODULE
;
412 ret
= crypto_register_ahash(alg
);
414 pr_err("%s ahash algorithm registration error (%d)\n",
415 base
->cra_name
, ret
);
420 list_add(&ccp_alg
->entry
, head
);
422 ret
= ccp_register_hmac_alg(head
, def
, ccp_alg
);
427 int ccp_register_sha_algs(struct list_head
*head
)
431 for (i
= 0; i
< ARRAY_SIZE(sha_algs
); i
++) {
432 ret
= ccp_register_sha_alg(head
, &sha_algs
[i
]);