2 * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 * Author: Gary R Hook <gary.hook@amd.com>
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/delay.h>
17 #include <linux/scatterlist.h>
18 #include <crypto/aes.h>
19 #include <crypto/xts.h>
20 #include <crypto/internal/skcipher.h>
21 #include <crypto/scatterwalk.h>
23 #include "ccp-crypto.h"
25 struct ccp_aes_xts_def
{
30 static struct ccp_aes_xts_def aes_xts_algs
[] = {
33 .drv_name
= "xts-aes-ccp",
37 struct ccp_unit_size_map
{
42 static struct ccp_unit_size_map xts_unit_sizes
[] = {
45 .value
= CCP_XTS_AES_UNIT_SIZE_16
,
49 .value
= CCP_XTS_AES_UNIT_SIZE_512
,
53 .value
= CCP_XTS_AES_UNIT_SIZE_1024
,
57 .value
= CCP_XTS_AES_UNIT_SIZE_2048
,
61 .value
= CCP_XTS_AES_UNIT_SIZE_4096
,
65 static int ccp_aes_xts_complete(struct crypto_async_request
*async_req
, int ret
)
67 struct ablkcipher_request
*req
= ablkcipher_request_cast(async_req
);
68 struct ccp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
73 memcpy(req
->info
, rctx
->iv
, AES_BLOCK_SIZE
);
78 static int ccp_aes_xts_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
81 struct crypto_tfm
*xfm
= crypto_ablkcipher_tfm(tfm
);
82 struct ccp_ctx
*ctx
= crypto_tfm_ctx(xfm
);
83 unsigned int ccpversion
= ccp_version();
86 ret
= xts_check_key(xfm
, key
, key_len
);
90 /* Version 3 devices support 128-bit keys; version 5 devices can
91 * accommodate 128- and 256-bit keys.
94 case AES_KEYSIZE_128
* 2:
95 memcpy(ctx
->u
.aes
.key
, key
, key_len
);
97 case AES_KEYSIZE_256
* 2:
98 if (ccpversion
> CCP_VERSION(3, 0))
99 memcpy(ctx
->u
.aes
.key
, key
, key_len
);
102 ctx
->u
.aes
.key_len
= key_len
/ 2;
103 sg_init_one(&ctx
->u
.aes
.key_sg
, ctx
->u
.aes
.key
, key_len
);
105 return crypto_sync_skcipher_setkey(ctx
->u
.aes
.tfm_skcipher
, key
, key_len
);
108 static int ccp_aes_xts_crypt(struct ablkcipher_request
*req
,
109 unsigned int encrypt
)
111 struct ccp_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
112 struct ccp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
113 unsigned int ccpversion
= ccp_version();
114 unsigned int fallback
= 0;
119 if (!ctx
->u
.aes
.key_len
)
122 if (req
->nbytes
& (AES_BLOCK_SIZE
- 1))
128 /* Check conditions under which the CCP can fulfill a request. The
129 * device can handle input plaintext of a length that is a multiple
130 * of the unit_size, bug the crypto implementation only supports
131 * the unit_size being equal to the input length. This limits the
132 * number of scenarios we can handle.
134 unit_size
= CCP_XTS_AES_UNIT_SIZE__LAST
;
135 for (unit
= 0; unit
< ARRAY_SIZE(xts_unit_sizes
); unit
++) {
136 if (req
->nbytes
== xts_unit_sizes
[unit
].size
) {
141 /* The CCP has restrictions on block sizes. Also, a version 3 device
142 * only supports AES-128 operations; version 5 CCPs support both
143 * AES-128 and -256 operations.
145 if (unit_size
== CCP_XTS_AES_UNIT_SIZE__LAST
)
147 if ((ccpversion
< CCP_VERSION(5, 0)) &&
148 (ctx
->u
.aes
.key_len
!= AES_KEYSIZE_128
))
150 if ((ctx
->u
.aes
.key_len
!= AES_KEYSIZE_128
) &&
151 (ctx
->u
.aes
.key_len
!= AES_KEYSIZE_256
))
154 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
,
155 ctx
->u
.aes
.tfm_skcipher
);
157 /* Use the fallback to process the request for any
158 * unsupported unit sizes or key sizes
160 skcipher_request_set_sync_tfm(subreq
, ctx
->u
.aes
.tfm_skcipher
);
161 skcipher_request_set_callback(subreq
, req
->base
.flags
,
163 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
164 req
->nbytes
, req
->info
);
165 ret
= encrypt
? crypto_skcipher_encrypt(subreq
) :
166 crypto_skcipher_decrypt(subreq
);
167 skcipher_request_zero(subreq
);
171 memcpy(rctx
->iv
, req
->info
, AES_BLOCK_SIZE
);
172 sg_init_one(&rctx
->iv_sg
, rctx
->iv
, AES_BLOCK_SIZE
);
174 memset(&rctx
->cmd
, 0, sizeof(rctx
->cmd
));
175 INIT_LIST_HEAD(&rctx
->cmd
.entry
);
176 rctx
->cmd
.engine
= CCP_ENGINE_XTS_AES_128
;
177 rctx
->cmd
.u
.xts
.type
= CCP_AES_TYPE_128
;
178 rctx
->cmd
.u
.xts
.action
= (encrypt
) ? CCP_AES_ACTION_ENCRYPT
179 : CCP_AES_ACTION_DECRYPT
;
180 rctx
->cmd
.u
.xts
.unit_size
= unit_size
;
181 rctx
->cmd
.u
.xts
.key
= &ctx
->u
.aes
.key_sg
;
182 rctx
->cmd
.u
.xts
.key_len
= ctx
->u
.aes
.key_len
;
183 rctx
->cmd
.u
.xts
.iv
= &rctx
->iv_sg
;
184 rctx
->cmd
.u
.xts
.iv_len
= AES_BLOCK_SIZE
;
185 rctx
->cmd
.u
.xts
.src
= req
->src
;
186 rctx
->cmd
.u
.xts
.src_len
= req
->nbytes
;
187 rctx
->cmd
.u
.xts
.dst
= req
->dst
;
189 ret
= ccp_crypto_enqueue_request(&req
->base
, &rctx
->cmd
);
194 static int ccp_aes_xts_encrypt(struct ablkcipher_request
*req
)
196 return ccp_aes_xts_crypt(req
, 1);
199 static int ccp_aes_xts_decrypt(struct ablkcipher_request
*req
)
201 return ccp_aes_xts_crypt(req
, 0);
204 static int ccp_aes_xts_cra_init(struct crypto_tfm
*tfm
)
206 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
207 struct crypto_sync_skcipher
*fallback_tfm
;
209 ctx
->complete
= ccp_aes_xts_complete
;
210 ctx
->u
.aes
.key_len
= 0;
212 fallback_tfm
= crypto_alloc_sync_skcipher("xts(aes)", 0,
214 CRYPTO_ALG_NEED_FALLBACK
);
215 if (IS_ERR(fallback_tfm
)) {
216 pr_warn("could not load fallback driver xts(aes)\n");
217 return PTR_ERR(fallback_tfm
);
219 ctx
->u
.aes
.tfm_skcipher
= fallback_tfm
;
221 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ccp_aes_req_ctx
);
226 static void ccp_aes_xts_cra_exit(struct crypto_tfm
*tfm
)
228 struct ccp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
230 crypto_free_sync_skcipher(ctx
->u
.aes
.tfm_skcipher
);
233 static int ccp_register_aes_xts_alg(struct list_head
*head
,
234 const struct ccp_aes_xts_def
*def
)
236 struct ccp_crypto_ablkcipher_alg
*ccp_alg
;
237 struct crypto_alg
*alg
;
240 ccp_alg
= kzalloc(sizeof(*ccp_alg
), GFP_KERNEL
);
244 INIT_LIST_HEAD(&ccp_alg
->entry
);
248 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
249 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
251 alg
->cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
252 CRYPTO_ALG_KERN_DRIVER_ONLY
|
253 CRYPTO_ALG_NEED_FALLBACK
;
254 alg
->cra_blocksize
= AES_BLOCK_SIZE
;
255 alg
->cra_ctxsize
= sizeof(struct ccp_ctx
);
256 alg
->cra_priority
= CCP_CRA_PRIORITY
;
257 alg
->cra_type
= &crypto_ablkcipher_type
;
258 alg
->cra_ablkcipher
.setkey
= ccp_aes_xts_setkey
;
259 alg
->cra_ablkcipher
.encrypt
= ccp_aes_xts_encrypt
;
260 alg
->cra_ablkcipher
.decrypt
= ccp_aes_xts_decrypt
;
261 alg
->cra_ablkcipher
.min_keysize
= AES_MIN_KEY_SIZE
* 2;
262 alg
->cra_ablkcipher
.max_keysize
= AES_MAX_KEY_SIZE
* 2;
263 alg
->cra_ablkcipher
.ivsize
= AES_BLOCK_SIZE
;
264 alg
->cra_init
= ccp_aes_xts_cra_init
;
265 alg
->cra_exit
= ccp_aes_xts_cra_exit
;
266 alg
->cra_module
= THIS_MODULE
;
268 ret
= crypto_register_alg(alg
);
270 pr_err("%s ablkcipher algorithm registration error (%d)\n",
276 list_add(&ccp_alg
->entry
, head
);
281 int ccp_register_aes_xts_algs(struct list_head
*head
)
285 for (i
= 0; i
< ARRAY_SIZE(aes_xts_algs
); i
++) {
286 ret
= ccp_register_aes_xts_alg(head
, &aes_xts_algs
[i
]);