2 * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/delay.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <crypto/algapi.h>
19 #include <crypto/aes.h>
20 #include <crypto/scatterwalk.h>
22 #include "ccp-crypto.h"
25 struct ccp_aes_xts_def
{
30 static struct ccp_aes_xts_def aes_xts_algs
[] = {
33 .drv_name
= "xts-aes-ccp",
37 struct ccp_unit_size_map
{
42 static struct ccp_unit_size_map unit_size_map
[] = {
45 .value
= CCP_XTS_AES_UNIT_SIZE_4096
,
49 .value
= CCP_XTS_AES_UNIT_SIZE_2048
,
53 .value
= CCP_XTS_AES_UNIT_SIZE_1024
,
57 .value
= CCP_XTS_AES_UNIT_SIZE_512
,
61 .value
= CCP_XTS_AES_UNIT_SIZE__LAST
,
65 .value
= CCP_XTS_AES_UNIT_SIZE__LAST
,
69 .value
= CCP_XTS_AES_UNIT_SIZE__LAST
,
73 .value
= CCP_XTS_AES_UNIT_SIZE__LAST
,
77 .value
= CCP_XTS_AES_UNIT_SIZE_16
,
81 .value
= CCP_XTS_AES_UNIT_SIZE__LAST
,
85 static int ccp_aes_xts_complete(struct crypto_async_request
*async_req
, int ret
)
87 struct ablkcipher_request
*req
= ablkcipher_request_cast(async_req
);
88 struct ccp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
93 memcpy(req
->info
, rctx
->iv
, AES_BLOCK_SIZE
);
98 static int ccp_aes_xts_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
101 struct ccp_ctx
*ctx
= crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm
));
103 /* Only support 128-bit AES key with a 128-bit Tweak key,
104 * otherwise use the fallback
107 case AES_KEYSIZE_128
* 2:
108 memcpy(ctx
->u
.aes
.key
, key
, key_len
);
111 ctx
->u
.aes
.key_len
= key_len
/ 2;
112 sg_init_one(&ctx
->u
.aes
.key_sg
, ctx
->u
.aes
.key
, key_len
);
114 return crypto_ablkcipher_setkey(ctx
->u
.aes
.tfm_ablkcipher
, key
,
118 static int ccp_aes_xts_crypt(struct ablkcipher_request
*req
,
119 unsigned int encrypt
)
121 struct crypto_tfm
*tfm
=
122 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
123 struct ccp_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
124 struct ccp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
128 if (!ctx
->u
.aes
.key_len
)
131 if (req
->nbytes
& (AES_BLOCK_SIZE
- 1))
137 for (unit
= 0; unit
< ARRAY_SIZE(unit_size_map
); unit
++)
138 if (!(req
->nbytes
& (unit_size_map
[unit
].size
- 1)))
141 if ((unit_size_map
[unit
].value
== CCP_XTS_AES_UNIT_SIZE__LAST
) ||
142 (ctx
->u
.aes
.key_len
!= AES_KEYSIZE_128
)) {
143 /* Use the fallback to process the request for any
144 * unsupported unit sizes or key sizes
146 ablkcipher_request_set_tfm(req
, ctx
->u
.aes
.tfm_ablkcipher
);
147 ret
= (encrypt
) ? crypto_ablkcipher_encrypt(req
) :
148 crypto_ablkcipher_decrypt(req
);
149 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
154 memcpy(rctx
->iv
, req
->info
, AES_BLOCK_SIZE
);
155 sg_init_one(&rctx
->iv_sg
, rctx
->iv
, AES_BLOCK_SIZE
);
157 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
158 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
159 rctx
->cmd
.engine
= CCP_ENGINE_XTS_AES_128
;
160 rctx
->cmd
.u
.xts
.action
= (encrypt
) ? CCP_AES_ACTION_ENCRYPT
161 : CCP_AES_ACTION_DECRYPT
;
162 rctx
->cmd
.u
.xts
.unit_size
= unit_size_map
[unit
].value
;
163 rctx
->cmd
.u
.xts
.key
= &ctx
->u
.aes
.key_sg
;
164 rctx
->cmd
.u
.xts
.key_len
= ctx
->u
.aes
.key_len
;
165 rctx
->cmd
.u
.xts
.iv
= &rctx
->iv_sg
;
166 rctx
->cmd
.u
.xts
.iv_len
= AES_BLOCK_SIZE
;
167 rctx
->cmd
.u
.xts
.src
= req
->src
;
168 rctx
->cmd
.u
.xts
.src_len
= req
->nbytes
;
169 rctx
->cmd
.u
.xts
.dst
= req
->dst
;
171 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
176 static int ccp_aes_xts_encrypt(struct ablkcipher_request
*req
)
178 return ccp_aes_xts_crypt(req
, 1);
181 static int ccp_aes_xts_decrypt(struct ablkcipher_request
*req
)
183 return ccp_aes_xts_crypt(req
, 0);
186 static int ccp_aes_xts_cra_init(struct crypto_tfm
*tfm
)
188 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
189 struct crypto_ablkcipher
*fallback_tfm
;
191 ctx
->complete
= ccp_aes_xts_complete
;
192 ctx
->u
.aes
.key_len
= 0;
194 fallback_tfm
= crypto_alloc_ablkcipher(tfm
->__crt_alg
->cra_name
, 0,
196 CRYPTO_ALG_NEED_FALLBACK
);
197 if (IS_ERR(fallback_tfm
)) {
198 pr_warn("could not load fallback driver %s\n",
199 tfm
->__crt_alg
->cra_name
);
200 return PTR_ERR(fallback_tfm
);
202 ctx
->u
.aes
.tfm_ablkcipher
= fallback_tfm
;
204 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ccp_aes_req_ctx
) +
205 fallback_tfm
->base
.crt_ablkcipher
.reqsize
;
210 static void ccp_aes_xts_cra_exit(struct crypto_tfm
*tfm
)
212 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
214 if (ctx
->u
.aes
.tfm_ablkcipher
)
215 crypto_free_ablkcipher(ctx
->u
.aes
.tfm_ablkcipher
);
216 ctx
->u
.aes
.tfm_ablkcipher
= NULL
;
220 static int ccp_register_aes_xts_alg(struct list_head
*head
,
221 const struct ccp_aes_xts_def
*def
)
223 struct ccp_crypto_ablkcipher_alg
*ccp_alg
;
224 struct crypto_alg
*alg
;
227 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
231 INIT_LIST_HEAD(&ccp_alg
->entry
);
235 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
236 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
238 alg
->cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
239 CRYPTO_ALG_KERN_DRIVER_ONLY
|
240 CRYPTO_ALG_NEED_FALLBACK
;
241 alg
->cra_blocksize
= AES_BLOCK_SIZE
;
242 alg
->cra_ctxsize
= sizeof(struct ccp_ctx
);
243 alg
->cra_priority
= CCP_CRA_PRIORITY
;
244 alg
->cra_type
= &crypto_ablkcipher_type
;
245 alg
->cra_ablkcipher
.setkey
= ccp_aes_xts_setkey
;
246 alg
->cra_ablkcipher
.encrypt
= ccp_aes_xts_encrypt
;
247 alg
->cra_ablkcipher
.decrypt
= ccp_aes_xts_decrypt
;
248 alg
->cra_ablkcipher
.min_keysize
= AES_MIN_KEY_SIZE
* 2;
249 alg
->cra_ablkcipher
.max_keysize
= AES_MAX_KEY_SIZE
* 2;
250 alg
->cra_ablkcipher
.ivsize
= AES_BLOCK_SIZE
;
251 alg
->cra_init
= ccp_aes_xts_cra_init
;
252 alg
->cra_exit
= ccp_aes_xts_cra_exit
;
253 alg
->cra_module
= THIS_MODULE
;
255 ret
= crypto_register_alg(alg
);
257 pr_err("%s ablkcipher algorithm registration error (%d)\n",
263 list_add(&ccp_alg
->entry
, head
);
268 int ccp_register_aes_xts_algs(struct list_head
*head
)
272 for (i
= 0; i
< ARRAY_SIZE(aes_xts_algs
); i
++) {
273 ret
= ccp_register_aes_xts_alg(head
, &aes_xts_algs
[i
]);