1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
7 * Author: Gary R Hook <gary.hook@amd.com>
8 * Author: Tom Lendacky <thomas.lendacky@amd.com>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/delay.h>
14 #include <linux/scatterlist.h>
15 #include <crypto/aes.h>
16 #include <crypto/xts.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/scatterwalk.h>
20 #include "ccp-crypto.h"
22 struct ccp_aes_xts_def
{
27 static const struct ccp_aes_xts_def aes_xts_algs
[] = {
30 .drv_name
= "xts-aes-ccp",
34 struct ccp_unit_size_map
{
39 static struct ccp_unit_size_map xts_unit_sizes
[] = {
42 .value
= CCP_XTS_AES_UNIT_SIZE_16
,
46 .value
= CCP_XTS_AES_UNIT_SIZE_512
,
50 .value
= CCP_XTS_AES_UNIT_SIZE_1024
,
54 .value
= CCP_XTS_AES_UNIT_SIZE_2048
,
58 .value
= CCP_XTS_AES_UNIT_SIZE_4096
,
62 static int ccp_aes_xts_complete(struct crypto_async_request
*async_req
, int ret
)
64 struct skcipher_request
*req
= skcipher_request_cast(async_req
);
65 struct ccp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
70 memcpy(req
->iv
, rctx
->iv
, AES_BLOCK_SIZE
);
75 static int ccp_aes_xts_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
78 struct ccp_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
79 unsigned int ccpversion
= ccp_version();
82 ret
= xts_verify_key(tfm
, key
, key_len
);
86 /* Version 3 devices support 128-bit keys; version 5 devices can
87 * accommodate 128- and 256-bit keys.
90 case AES_KEYSIZE_128
* 2:
91 memcpy(ctx
->u
.aes
.key
, key
, key_len
);
93 case AES_KEYSIZE_256
* 2:
94 if (ccpversion
> CCP_VERSION(3, 0))
95 memcpy(ctx
->u
.aes
.key
, key
, key_len
);
98 ctx
->u
.aes
.key_len
= key_len
/ 2;
99 sg_init_one(&ctx
->u
.aes
.key_sg
, ctx
->u
.aes
.key
, key_len
);
101 return crypto_skcipher_setkey(ctx
->u
.aes
.tfm_skcipher
, key
, key_len
);
104 static int ccp_aes_xts_crypt(struct skcipher_request
*req
,
105 unsigned int encrypt
)
107 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
108 struct ccp_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
109 struct ccp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
110 unsigned int ccpversion
= ccp_version();
111 unsigned int fallback
= 0;
116 if (!ctx
->u
.aes
.key_len
)
122 /* Check conditions under which the CCP can fulfill a request. The
123 * device can handle input plaintext of a length that is a multiple
124 * of the unit_size, bug the crypto implementation only supports
125 * the unit_size being equal to the input length. This limits the
126 * number of scenarios we can handle.
128 unit_size
= CCP_XTS_AES_UNIT_SIZE__LAST
;
129 for (unit
= 0; unit
< ARRAY_SIZE(xts_unit_sizes
); unit
++) {
130 if (req
->cryptlen
== xts_unit_sizes
[unit
].size
) {
135 /* The CCP has restrictions on block sizes. Also, a version 3 device
136 * only supports AES-128 operations; version 5 CCPs support both
137 * AES-128 and -256 operations.
139 if (unit_size
== CCP_XTS_AES_UNIT_SIZE__LAST
)
141 if ((ccpversion
< CCP_VERSION(5, 0)) &&
142 (ctx
->u
.aes
.key_len
!= AES_KEYSIZE_128
))
144 if ((ctx
->u
.aes
.key_len
!= AES_KEYSIZE_128
) &&
145 (ctx
->u
.aes
.key_len
!= AES_KEYSIZE_256
))
148 /* Use the fallback to process the request for any
149 * unsupported unit sizes or key sizes
151 skcipher_request_set_tfm(&rctx
->fallback_req
,
152 ctx
->u
.aes
.tfm_skcipher
);
153 skcipher_request_set_callback(&rctx
->fallback_req
,
157 skcipher_request_set_crypt(&rctx
->fallback_req
, req
->src
,
158 req
->dst
, req
->cryptlen
, req
->iv
);
159 ret
= encrypt
? crypto_skcipher_encrypt(&rctx
->fallback_req
) :
160 crypto_skcipher_decrypt(&rctx
->fallback_req
);
164 memcpy(rctx
->iv
, req
->iv
, AES_BLOCK_SIZE
);
165 sg_init_one(&rctx
->iv_sg
, rctx
->iv
, AES_BLOCK_SIZE
);
167 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
168 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
169 rctx
->cmd
.engine
= CCP_ENGINE_XTS_AES_128
;
170 rctx
->cmd
.u
.xts
.type
= CCP_AES_TYPE_128
;
171 rctx
->cmd
.u
.xts
.action
= (encrypt
) ? CCP_AES_ACTION_ENCRYPT
172 : CCP_AES_ACTION_DECRYPT
;
173 rctx
->cmd
.u
.xts
.unit_size
= unit_size
;
174 rctx
->cmd
.u
.xts
.key
= &ctx
->u
.aes
.key_sg
;
175 rctx
->cmd
.u
.xts
.key_len
= ctx
->u
.aes
.key_len
;
176 rctx
->cmd
.u
.xts
.iv
= &rctx
->iv_sg
;
177 rctx
->cmd
.u
.xts
.iv_len
= AES_BLOCK_SIZE
;
178 rctx
->cmd
.u
.xts
.src
= req
->src
;
179 rctx
->cmd
.u
.xts
.src_len
= req
->cryptlen
;
180 rctx
->cmd
.u
.xts
.dst
= req
->dst
;
182 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
187 static int ccp_aes_xts_encrypt(struct skcipher_request
*req
)
189 return ccp_aes_xts_crypt(req
, 1);
192 static int ccp_aes_xts_decrypt(struct skcipher_request
*req
)
194 return ccp_aes_xts_crypt(req
, 0);
197 static int ccp_aes_xts_init_tfm(struct crypto_skcipher
*tfm
)
199 struct ccp_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
200 struct crypto_skcipher
*fallback_tfm
;
202 ctx
->complete
= ccp_aes_xts_complete
;
203 ctx
->u
.aes
.key_len
= 0;
205 fallback_tfm
= crypto_alloc_skcipher("xts(aes)", 0,
206 CRYPTO_ALG_NEED_FALLBACK
);
207 if (IS_ERR(fallback_tfm
)) {
208 pr_warn("could not load fallback driver xts(aes)\n");
209 return PTR_ERR(fallback_tfm
);
211 ctx
->u
.aes
.tfm_skcipher
= fallback_tfm
;
213 crypto_skcipher_set_reqsize(tfm
, sizeof(struct ccp_aes_req_ctx
) +
214 crypto_skcipher_reqsize(fallback_tfm
));
219 static void ccp_aes_xts_exit_tfm(struct crypto_skcipher
*tfm
)
221 struct ccp_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
223 crypto_free_skcipher(ctx
->u
.aes
.tfm_skcipher
);
226 static int ccp_register_aes_xts_alg(struct list_head
*head
,
227 const struct ccp_aes_xts_def
*def
)
229 struct ccp_crypto_skcipher_alg
*ccp_alg
;
230 struct skcipher_alg
*alg
;
233 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
237 INIT_LIST_HEAD(&ccp_alg
->entry
);
241 snprintf(alg
->base
.cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
242 snprintf(alg
->base
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
244 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
|
245 CRYPTO_ALG_ALLOCATES_MEMORY
|
246 CRYPTO_ALG_KERN_DRIVER_ONLY
|
247 CRYPTO_ALG_NEED_FALLBACK
;
248 alg
->base
.cra_blocksize
= AES_BLOCK_SIZE
;
249 alg
->base
.cra_ctxsize
= sizeof(struct ccp_ctx
);
250 alg
->base
.cra_priority
= CCP_CRA_PRIORITY
;
251 alg
->base
.cra_module
= THIS_MODULE
;
253 alg
->setkey
= ccp_aes_xts_setkey
;
254 alg
->encrypt
= ccp_aes_xts_encrypt
;
255 alg
->decrypt
= ccp_aes_xts_decrypt
;
256 alg
->min_keysize
= AES_MIN_KEY_SIZE
* 2;
257 alg
->max_keysize
= AES_MAX_KEY_SIZE
* 2;
258 alg
->ivsize
= AES_BLOCK_SIZE
;
259 alg
->init
= ccp_aes_xts_init_tfm
;
260 alg
->exit
= ccp_aes_xts_exit_tfm
;
262 ret
= crypto_register_skcipher(alg
);
264 pr_err("%s skcipher algorithm registration error (%d)\n",
265 alg
->base
.cra_name
, ret
);
270 list_add(&ccp_alg
->entry
, head
);
275 int ccp_register_aes_xts_algs(struct list_head
*head
)
279 for (i
= 0; i
< ARRAY_SIZE(aes_xts_algs
); i
++) {
280 ret
= ccp_register_aes_xts_alg(head
, &aes_xts_algs
[i
]);