2 * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/delay.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <crypto/algapi.h>
19 #include <crypto/aes.h>
20 #include <crypto/hash.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/scatterwalk.h>
24 #include "ccp-crypto.h"
26 static int ccp_aes_cmac_complete(struct crypto_async_request
*async_req
,
29 struct ahash_request
*req
= ahash_request_cast(async_req
);
30 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
31 struct ccp_aes_cmac_req_ctx
*rctx
= ahash_request_ctx(req
);
32 unsigned int digest_size
= crypto_ahash_digestsize(tfm
);
38 /* Save remaining data to buffer */
39 unsigned int offset
= rctx
->nbytes
- rctx
->hash_rem
;
41 scatterwalk_map_and_copy(rctx
->buf
, rctx
->src
,
42 offset
, rctx
->hash_rem
, 0);
43 rctx
->buf_count
= rctx
->hash_rem
;
48 /* Update result area if supplied */
49 if (req
->result
&& rctx
->final
)
50 memcpy(req
->result
, rctx
->iv
, digest_size
);
53 sg_free_table(&rctx
->data_sg
);
58 static int ccp_do_cmac_update(struct ahash_request
*req
, unsigned int nbytes
,
61 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
62 struct ccp_ctx
*ctx
= crypto_ahash_ctx(tfm
);
63 struct ccp_aes_cmac_req_ctx
*rctx
= ahash_request_ctx(req
);
64 struct scatterlist
*sg
, *cmac_key_sg
= NULL
;
65 unsigned int block_size
=
66 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
67 unsigned int need_pad
, sg_count
;
72 if (!ctx
->u
.aes
.key_len
)
78 len
= (u64
)rctx
->buf_count
+ (u64
)nbytes
;
80 if (!final
&& (len
<= block_size
)) {
81 scatterwalk_map_and_copy(rctx
->buf
+ rctx
->buf_count
, req
->src
,
83 rctx
->buf_count
+= nbytes
;
89 rctx
->nbytes
= nbytes
;
92 rctx
->hash_rem
= final
? 0 : len
& (block_size
- 1);
93 rctx
->hash_cnt
= len
- rctx
->hash_rem
;
94 if (!final
&& !rctx
->hash_rem
) {
95 /* CCP can't do zero length final, so keep some data around */
96 rctx
->hash_cnt
-= block_size
;
97 rctx
->hash_rem
= block_size
;
100 if (final
&& (rctx
->null_msg
|| (len
& (block_size
- 1))))
105 sg_init_one(&rctx
->iv_sg
, rctx
->iv
, sizeof(rctx
->iv
));
107 /* Build the data scatterlist table - allocate enough entries for all
108 * possible data pieces (buffer, input data, padding)
110 sg_count
= (nbytes
) ? sg_nents(req
->src
) + 2 : 2;
111 gfp
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
112 GFP_KERNEL
: GFP_ATOMIC
;
113 ret
= sg_alloc_table(&rctx
->data_sg
, sg_count
, gfp
);
118 if (rctx
->buf_count
) {
119 sg_init_one(&rctx
->buf_sg
, rctx
->buf
, rctx
->buf_count
);
120 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, &rctx
->buf_sg
);
128 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, req
->src
);
136 int pad_length
= block_size
- (len
& (block_size
- 1));
138 rctx
->hash_cnt
+= pad_length
;
140 memset(rctx
->pad
, 0, sizeof(rctx
->pad
));
142 sg_init_one(&rctx
->pad_sg
, rctx
->pad
, pad_length
);
143 sg
= ccp_crypto_sg_table_add(&rctx
->data_sg
, &rctx
->pad_sg
);
151 sg
= rctx
->data_sg
.sgl
;
154 /* Initialize the K1/K2 scatterlist */
156 cmac_key_sg
= (need_pad
) ? &ctx
->u
.aes
.k2_sg
159 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
160 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
161 rctx
->cmd
.engine
= CCP_ENGINE_AES
;
162 rctx
->cmd
.u
.aes
.type
= ctx
->u
.aes
.type
;
163 rctx
->cmd
.u
.aes
.mode
= ctx
->u
.aes
.mode
;
164 rctx
->cmd
.u
.aes
.action
= CCP_AES_ACTION_ENCRYPT
;
165 rctx
->cmd
.u
.aes
.key
= &ctx
->u
.aes
.key_sg
;
166 rctx
->cmd
.u
.aes
.key_len
= ctx
->u
.aes
.key_len
;
167 rctx
->cmd
.u
.aes
.iv
= &rctx
->iv_sg
;
168 rctx
->cmd
.u
.aes
.iv_len
= AES_BLOCK_SIZE
;
169 rctx
->cmd
.u
.aes
.src
= sg
;
170 rctx
->cmd
.u
.aes
.src_len
= rctx
->hash_cnt
;
171 rctx
->cmd
.u
.aes
.dst
= NULL
;
172 rctx
->cmd
.u
.aes
.cmac_key
= cmac_key_sg
;
173 rctx
->cmd
.u
.aes
.cmac_key_len
= ctx
->u
.aes
.kn_len
;
174 rctx
->cmd
.u
.aes
.cmac_final
= final
;
176 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
181 sg_free_table(&rctx
->data_sg
);
186 static int ccp_aes_cmac_init(struct ahash_request
*req
)
188 struct ccp_aes_cmac_req_ctx
*rctx
= ahash_request_ctx(req
);
190 memset(rctx
, 0, sizeof(*rctx
));
197 static int ccp_aes_cmac_update(struct ahash_request
*req
)
199 return ccp_do_cmac_update(req
, req
->nbytes
, 0);
202 static int ccp_aes_cmac_final(struct ahash_request
*req
)
204 return ccp_do_cmac_update(req
, 0, 1);
207 static int ccp_aes_cmac_finup(struct ahash_request
*req
)
209 return ccp_do_cmac_update(req
, req
->nbytes
, 1);
212 static int ccp_aes_cmac_digest(struct ahash_request
*req
)
216 ret
= ccp_aes_cmac_init(req
);
220 return ccp_aes_cmac_finup(req
);
223 static int ccp_aes_cmac_export(struct ahash_request
*req
, void *out
)
225 struct ccp_aes_cmac_req_ctx
*rctx
= ahash_request_ctx(req
);
226 struct ccp_aes_cmac_exp_ctx state
;
228 /* Don't let anything leak to 'out' */
229 memset(&state
, 0, sizeof(state
));
231 state
.null_msg
= rctx
->null_msg
;
232 memcpy(state
.iv
, rctx
->iv
, sizeof(state
.iv
));
233 state
.buf_count
= rctx
->buf_count
;
234 memcpy(state
.buf
, rctx
->buf
, sizeof(state
.buf
));
236 /* 'out' may not be aligned so memcpy from local variable */
237 memcpy(out
, &state
, sizeof(state
));
242 static int ccp_aes_cmac_import(struct ahash_request
*req
, const void *in
)
244 struct ccp_aes_cmac_req_ctx
*rctx
= ahash_request_ctx(req
);
245 struct ccp_aes_cmac_exp_ctx state
;
247 /* 'in' may not be aligned so memcpy to local variable */
248 memcpy(&state
, in
, sizeof(state
));
250 memset(rctx
, 0, sizeof(*rctx
));
251 rctx
->null_msg
= state
.null_msg
;
252 memcpy(rctx
->iv
, state
.iv
, sizeof(rctx
->iv
));
253 rctx
->buf_count
= state
.buf_count
;
254 memcpy(rctx
->buf
, state
.buf
, sizeof(rctx
->buf
));
259 static int ccp_aes_cmac_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
260 unsigned int key_len
)
262 struct ccp_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
263 struct ccp_crypto_ahash_alg
*alg
=
264 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm
));
265 u64 k0_hi
, k0_lo
, k1_hi
, k1_lo
, k2_hi
, k2_lo
;
266 u64 rb_hi
= 0x00, rb_lo
= 0x87;
271 case AES_KEYSIZE_128
:
272 ctx
->u
.aes
.type
= CCP_AES_TYPE_128
;
274 case AES_KEYSIZE_192
:
275 ctx
->u
.aes
.type
= CCP_AES_TYPE_192
;
277 case AES_KEYSIZE_256
:
278 ctx
->u
.aes
.type
= CCP_AES_TYPE_256
;
281 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
284 ctx
->u
.aes
.mode
= alg
->mode
;
286 /* Set to zero until complete */
287 ctx
->u
.aes
.key_len
= 0;
289 /* Set the key for the AES cipher used to generate the keys */
290 ret
= crypto_cipher_setkey(ctx
->u
.aes
.tfm_cipher
, key
, key_len
);
294 /* Encrypt a block of zeroes - use key area in context */
295 memset(ctx
->u
.aes
.key
, 0, sizeof(ctx
->u
.aes
.key
));
296 crypto_cipher_encrypt_one(ctx
->u
.aes
.tfm_cipher
, ctx
->u
.aes
.key
,
299 /* Generate K1 and K2 */
300 k0_hi
= be64_to_cpu(*((__be64
*)ctx
->u
.aes
.key
));
301 k0_lo
= be64_to_cpu(*((__be64
*)ctx
->u
.aes
.key
+ 1));
303 k1_hi
= (k0_hi
<< 1) | (k0_lo
>> 63);
305 if (ctx
->u
.aes
.key
[0] & 0x80) {
309 gk
= (__be64
*)ctx
->u
.aes
.k1
;
310 *gk
= cpu_to_be64(k1_hi
);
312 *gk
= cpu_to_be64(k1_lo
);
314 k2_hi
= (k1_hi
<< 1) | (k1_lo
>> 63);
316 if (ctx
->u
.aes
.k1
[0] & 0x80) {
320 gk
= (__be64
*)ctx
->u
.aes
.k2
;
321 *gk
= cpu_to_be64(k2_hi
);
323 *gk
= cpu_to_be64(k2_lo
);
325 ctx
->u
.aes
.kn_len
= sizeof(ctx
->u
.aes
.k1
);
326 sg_init_one(&ctx
->u
.aes
.k1_sg
, ctx
->u
.aes
.k1
, sizeof(ctx
->u
.aes
.k1
));
327 sg_init_one(&ctx
->u
.aes
.k2_sg
, ctx
->u
.aes
.k2
, sizeof(ctx
->u
.aes
.k2
));
329 /* Save the supplied key */
330 memset(ctx
->u
.aes
.key
, 0, sizeof(ctx
->u
.aes
.key
));
331 memcpy(ctx
->u
.aes
.key
, key
, key_len
);
332 ctx
->u
.aes
.key_len
= key_len
;
333 sg_init_one(&ctx
->u
.aes
.key_sg
, ctx
->u
.aes
.key
, key_len
);
338 static int ccp_aes_cmac_cra_init(struct crypto_tfm
*tfm
)
340 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
341 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
342 struct crypto_cipher
*cipher_tfm
;
344 ctx
->complete
= ccp_aes_cmac_complete
;
345 ctx
->u
.aes
.key_len
= 0;
347 crypto_ahash_set_reqsize(ahash
, sizeof(struct ccp_aes_cmac_req_ctx
));
349 cipher_tfm
= crypto_alloc_cipher("aes", 0,
351 CRYPTO_ALG_NEED_FALLBACK
);
352 if (IS_ERR(cipher_tfm
)) {
353 pr_warn("could not load aes cipher driver\n");
354 return PTR_ERR(cipher_tfm
);
356 ctx
->u
.aes
.tfm_cipher
= cipher_tfm
;
361 static void ccp_aes_cmac_cra_exit(struct crypto_tfm
*tfm
)
363 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
365 if (ctx
->u
.aes
.tfm_cipher
)
366 crypto_free_cipher(ctx
->u
.aes
.tfm_cipher
);
367 ctx
->u
.aes
.tfm_cipher
= NULL
;
370 int ccp_register_aes_cmac_algs(struct list_head
*head
)
372 struct ccp_crypto_ahash_alg
*ccp_alg
;
373 struct ahash_alg
*alg
;
374 struct hash_alg_common
*halg
;
375 struct crypto_alg
*base
;
378 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
382 INIT_LIST_HEAD(&ccp_alg
->entry
);
383 ccp_alg
->mode
= CCP_AES_MODE_CMAC
;
386 alg
->init
= ccp_aes_cmac_init
;
387 alg
->update
= ccp_aes_cmac_update
;
388 alg
->final
= ccp_aes_cmac_final
;
389 alg
->finup
= ccp_aes_cmac_finup
;
390 alg
->digest
= ccp_aes_cmac_digest
;
391 alg
->export
= ccp_aes_cmac_export
;
392 alg
->import
= ccp_aes_cmac_import
;
393 alg
->setkey
= ccp_aes_cmac_setkey
;
396 halg
->digestsize
= AES_BLOCK_SIZE
;
397 halg
->statesize
= sizeof(struct ccp_aes_cmac_exp_ctx
);
400 snprintf(base
->cra_name
, CRYPTO_MAX_ALG_NAME
, "cmac(aes)");
401 snprintf(base
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "cmac-aes-ccp");
402 base
->cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
|
403 CRYPTO_ALG_KERN_DRIVER_ONLY
|
404 CRYPTO_ALG_NEED_FALLBACK
;
405 base
->cra_blocksize
= AES_BLOCK_SIZE
;
406 base
->cra_ctxsize
= sizeof(struct ccp_ctx
);
407 base
->cra_priority
= CCP_CRA_PRIORITY
;
408 base
->cra_type
= &crypto_ahash_type
;
409 base
->cra_init
= ccp_aes_cmac_cra_init
;
410 base
->cra_exit
= ccp_aes_cmac_cra_exit
;
411 base
->cra_module
= THIS_MODULE
;
413 ret
= crypto_register_ahash(alg
);
415 pr_err("%s ahash algorithm registration error (%d)\n",
416 base
->cra_name
, ret
);
421 list_add(&ccp_alg
->entry
, head
);