1 // SPDX-License-Identifier: GPL-2.0+
5 * s390 implementation of the AES Cipher Algorithm.
8 * Copyright IBM Corp. 2005, 2017
9 * Author(s): Jan Glauber (jang@de.ibm.com)
10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11 * Patrick Steuer <patrick.steuer@de.ibm.com>
12 * Harald Freudenberger <freude@de.ibm.com>
14 * Derived from "crypto/aes_generic.c"
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/err.h>
27 #include <linux/module.h>
28 #include <linux/cpufeature.h>
29 #include <linux/init.h>
30 #include <linux/mutex.h>
31 #include <linux/fips.h>
32 #include <linux/string.h>
33 #include <crypto/xts.h>
34 #include <asm/cpacf.h>
37 static DEFINE_MUTEX(ctrblk_lock
);
39 static cpacf_mask_t km_functions
, kmc_functions
, kmctr_functions
,
43 u8 key
[AES_MAX_KEY_SIZE
];
47 struct crypto_skcipher
*skcipher
;
48 struct crypto_cipher
*cip
;
57 struct crypto_skcipher
*fallback
;
61 struct scatter_walk walk
;
62 unsigned int walk_bytes
;
64 unsigned int walk_bytes_remain
;
65 u8 buf
[AES_BLOCK_SIZE
];
66 unsigned int buf_bytes
;
71 static int setkey_fallback_cip(struct crypto_tfm
*tfm
, const u8
*in_key
,
74 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
76 sctx
->fallback
.cip
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
77 sctx
->fallback
.cip
->base
.crt_flags
|= (tfm
->crt_flags
&
80 return crypto_cipher_setkey(sctx
->fallback
.cip
, in_key
, key_len
);
83 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
86 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
89 /* Pick the correct function code based on the key length */
90 fc
= (key_len
== 16) ? CPACF_KM_AES_128
:
91 (key_len
== 24) ? CPACF_KM_AES_192
:
92 (key_len
== 32) ? CPACF_KM_AES_256
: 0;
94 /* Check if the function code is available */
95 sctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
97 return setkey_fallback_cip(tfm
, in_key
, key_len
);
99 sctx
->key_len
= key_len
;
100 memcpy(sctx
->key
, in_key
, key_len
);
104 static void crypto_aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
106 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
108 if (unlikely(!sctx
->fc
)) {
109 crypto_cipher_encrypt_one(sctx
->fallback
.cip
, out
, in
);
112 cpacf_km(sctx
->fc
, &sctx
->key
, out
, in
, AES_BLOCK_SIZE
);
115 static void crypto_aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
117 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
119 if (unlikely(!sctx
->fc
)) {
120 crypto_cipher_decrypt_one(sctx
->fallback
.cip
, out
, in
);
123 cpacf_km(sctx
->fc
| CPACF_DECRYPT
,
124 &sctx
->key
, out
, in
, AES_BLOCK_SIZE
);
127 static int fallback_init_cip(struct crypto_tfm
*tfm
)
129 const char *name
= tfm
->__crt_alg
->cra_name
;
130 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
132 sctx
->fallback
.cip
= crypto_alloc_cipher(name
, 0,
133 CRYPTO_ALG_NEED_FALLBACK
);
135 if (IS_ERR(sctx
->fallback
.cip
)) {
136 pr_err("Allocating AES fallback algorithm %s failed\n",
138 return PTR_ERR(sctx
->fallback
.cip
);
144 static void fallback_exit_cip(struct crypto_tfm
*tfm
)
146 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
148 crypto_free_cipher(sctx
->fallback
.cip
);
149 sctx
->fallback
.cip
= NULL
;
152 static struct crypto_alg aes_alg
= {
154 .cra_driver_name
= "aes-s390",
156 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
|
157 CRYPTO_ALG_NEED_FALLBACK
,
158 .cra_blocksize
= AES_BLOCK_SIZE
,
159 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
160 .cra_module
= THIS_MODULE
,
161 .cra_init
= fallback_init_cip
,
162 .cra_exit
= fallback_exit_cip
,
165 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
166 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
167 .cia_setkey
= aes_set_key
,
168 .cia_encrypt
= crypto_aes_encrypt
,
169 .cia_decrypt
= crypto_aes_decrypt
,
174 static int setkey_fallback_skcipher(struct crypto_skcipher
*tfm
, const u8
*key
,
177 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
179 crypto_skcipher_clear_flags(sctx
->fallback
.skcipher
,
180 CRYPTO_TFM_REQ_MASK
);
181 crypto_skcipher_set_flags(sctx
->fallback
.skcipher
,
182 crypto_skcipher_get_flags(tfm
) &
183 CRYPTO_TFM_REQ_MASK
);
184 return crypto_skcipher_setkey(sctx
->fallback
.skcipher
, key
, len
);
187 static int fallback_skcipher_crypt(struct s390_aes_ctx
*sctx
,
188 struct skcipher_request
*req
,
189 unsigned long modifier
)
191 struct skcipher_request
*subreq
= skcipher_request_ctx(req
);
194 skcipher_request_set_tfm(subreq
, sctx
->fallback
.skcipher
);
195 return (modifier
& CPACF_DECRYPT
) ?
196 crypto_skcipher_decrypt(subreq
) :
197 crypto_skcipher_encrypt(subreq
);
200 static int ecb_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
201 unsigned int key_len
)
203 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
206 /* Pick the correct function code based on the key length */
207 fc
= (key_len
== 16) ? CPACF_KM_AES_128
:
208 (key_len
== 24) ? CPACF_KM_AES_192
:
209 (key_len
== 32) ? CPACF_KM_AES_256
: 0;
211 /* Check if the function code is available */
212 sctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
214 return setkey_fallback_skcipher(tfm
, in_key
, key_len
);
216 sctx
->key_len
= key_len
;
217 memcpy(sctx
->key
, in_key
, key_len
);
221 static int ecb_aes_crypt(struct skcipher_request
*req
, unsigned long modifier
)
223 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
224 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
225 struct skcipher_walk walk
;
226 unsigned int nbytes
, n
;
229 if (unlikely(!sctx
->fc
))
230 return fallback_skcipher_crypt(sctx
, req
, modifier
);
232 ret
= skcipher_walk_virt(&walk
, req
, false);
233 while ((nbytes
= walk
.nbytes
) != 0) {
234 /* only use complete blocks */
235 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
236 cpacf_km(sctx
->fc
| modifier
, sctx
->key
,
237 walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
, n
);
238 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
243 static int ecb_aes_encrypt(struct skcipher_request
*req
)
245 return ecb_aes_crypt(req
, 0);
248 static int ecb_aes_decrypt(struct skcipher_request
*req
)
250 return ecb_aes_crypt(req
, CPACF_DECRYPT
);
253 static int fallback_init_skcipher(struct crypto_skcipher
*tfm
)
255 const char *name
= crypto_tfm_alg_name(&tfm
->base
);
256 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
258 sctx
->fallback
.skcipher
= crypto_alloc_skcipher(name
, 0,
259 CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
);
261 if (IS_ERR(sctx
->fallback
.skcipher
)) {
262 pr_err("Allocating AES fallback algorithm %s failed\n",
264 return PTR_ERR(sctx
->fallback
.skcipher
);
267 crypto_skcipher_set_reqsize(tfm
, sizeof(struct skcipher_request
) +
268 crypto_skcipher_reqsize(sctx
->fallback
.skcipher
));
272 static void fallback_exit_skcipher(struct crypto_skcipher
*tfm
)
274 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
276 crypto_free_skcipher(sctx
->fallback
.skcipher
);
279 static struct skcipher_alg ecb_aes_alg
= {
280 .base
.cra_name
= "ecb(aes)",
281 .base
.cra_driver_name
= "ecb-aes-s390",
282 .base
.cra_priority
= 401, /* combo: aes + ecb + 1 */
283 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
284 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
285 .base
.cra_ctxsize
= sizeof(struct s390_aes_ctx
),
286 .base
.cra_module
= THIS_MODULE
,
287 .init
= fallback_init_skcipher
,
288 .exit
= fallback_exit_skcipher
,
289 .min_keysize
= AES_MIN_KEY_SIZE
,
290 .max_keysize
= AES_MAX_KEY_SIZE
,
291 .setkey
= ecb_aes_set_key
,
292 .encrypt
= ecb_aes_encrypt
,
293 .decrypt
= ecb_aes_decrypt
,
296 static int cbc_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
297 unsigned int key_len
)
299 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
302 /* Pick the correct function code based on the key length */
303 fc
= (key_len
== 16) ? CPACF_KMC_AES_128
:
304 (key_len
== 24) ? CPACF_KMC_AES_192
:
305 (key_len
== 32) ? CPACF_KMC_AES_256
: 0;
307 /* Check if the function code is available */
308 sctx
->fc
= (fc
&& cpacf_test_func(&kmc_functions
, fc
)) ? fc
: 0;
310 return setkey_fallback_skcipher(tfm
, in_key
, key_len
);
312 sctx
->key_len
= key_len
;
313 memcpy(sctx
->key
, in_key
, key_len
);
317 static int cbc_aes_crypt(struct skcipher_request
*req
, unsigned long modifier
)
319 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
320 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
321 struct skcipher_walk walk
;
322 unsigned int nbytes
, n
;
325 u8 iv
[AES_BLOCK_SIZE
];
326 u8 key
[AES_MAX_KEY_SIZE
];
329 if (unlikely(!sctx
->fc
))
330 return fallback_skcipher_crypt(sctx
, req
, modifier
);
332 ret
= skcipher_walk_virt(&walk
, req
, false);
335 memcpy(param
.iv
, walk
.iv
, AES_BLOCK_SIZE
);
336 memcpy(param
.key
, sctx
->key
, sctx
->key_len
);
337 while ((nbytes
= walk
.nbytes
) != 0) {
338 /* only use complete blocks */
339 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
340 cpacf_kmc(sctx
->fc
| modifier
, ¶m
,
341 walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
, n
);
342 memcpy(walk
.iv
, param
.iv
, AES_BLOCK_SIZE
);
343 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
345 memzero_explicit(¶m
, sizeof(param
));
349 static int cbc_aes_encrypt(struct skcipher_request
*req
)
351 return cbc_aes_crypt(req
, 0);
354 static int cbc_aes_decrypt(struct skcipher_request
*req
)
356 return cbc_aes_crypt(req
, CPACF_DECRYPT
);
359 static struct skcipher_alg cbc_aes_alg
= {
360 .base
.cra_name
= "cbc(aes)",
361 .base
.cra_driver_name
= "cbc-aes-s390",
362 .base
.cra_priority
= 402, /* ecb-aes-s390 + 1 */
363 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
364 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
365 .base
.cra_ctxsize
= sizeof(struct s390_aes_ctx
),
366 .base
.cra_module
= THIS_MODULE
,
367 .init
= fallback_init_skcipher
,
368 .exit
= fallback_exit_skcipher
,
369 .min_keysize
= AES_MIN_KEY_SIZE
,
370 .max_keysize
= AES_MAX_KEY_SIZE
,
371 .ivsize
= AES_BLOCK_SIZE
,
372 .setkey
= cbc_aes_set_key
,
373 .encrypt
= cbc_aes_encrypt
,
374 .decrypt
= cbc_aes_decrypt
,
377 static int xts_fallback_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
380 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
382 crypto_skcipher_clear_flags(xts_ctx
->fallback
, CRYPTO_TFM_REQ_MASK
);
383 crypto_skcipher_set_flags(xts_ctx
->fallback
,
384 crypto_skcipher_get_flags(tfm
) &
385 CRYPTO_TFM_REQ_MASK
);
386 return crypto_skcipher_setkey(xts_ctx
->fallback
, key
, len
);
389 static int xts_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
390 unsigned int key_len
)
392 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
396 err
= xts_fallback_setkey(tfm
, in_key
, key_len
);
400 /* In fips mode only 128 bit or 256 bit keys are valid */
401 if (fips_enabled
&& key_len
!= 32 && key_len
!= 64)
404 /* Pick the correct function code based on the key length */
405 fc
= (key_len
== 32) ? CPACF_KM_XTS_128
:
406 (key_len
== 64) ? CPACF_KM_XTS_256
: 0;
408 /* Check if the function code is available */
409 xts_ctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
413 /* Split the XTS key into the two subkeys */
414 key_len
= key_len
/ 2;
415 xts_ctx
->key_len
= key_len
;
416 memcpy(xts_ctx
->key
, in_key
, key_len
);
417 memcpy(xts_ctx
->pcc_key
, in_key
+ key_len
, key_len
);
421 static int xts_aes_crypt(struct skcipher_request
*req
, unsigned long modifier
)
423 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
424 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
425 struct skcipher_walk walk
;
426 unsigned int offset
, nbytes
, n
;
440 if (req
->cryptlen
< AES_BLOCK_SIZE
)
443 if (unlikely(!xts_ctx
->fc
|| (req
->cryptlen
% AES_BLOCK_SIZE
) != 0)) {
444 struct skcipher_request
*subreq
= skcipher_request_ctx(req
);
447 skcipher_request_set_tfm(subreq
, xts_ctx
->fallback
);
448 return (modifier
& CPACF_DECRYPT
) ?
449 crypto_skcipher_decrypt(subreq
) :
450 crypto_skcipher_encrypt(subreq
);
453 ret
= skcipher_walk_virt(&walk
, req
, false);
456 offset
= xts_ctx
->key_len
& 0x10;
457 memset(pcc_param
.block
, 0, sizeof(pcc_param
.block
));
458 memset(pcc_param
.bit
, 0, sizeof(pcc_param
.bit
));
459 memset(pcc_param
.xts
, 0, sizeof(pcc_param
.xts
));
460 memcpy(pcc_param
.tweak
, walk
.iv
, sizeof(pcc_param
.tweak
));
461 memcpy(pcc_param
.key
+ offset
, xts_ctx
->pcc_key
, xts_ctx
->key_len
);
462 cpacf_pcc(xts_ctx
->fc
, pcc_param
.key
+ offset
);
464 memcpy(xts_param
.key
+ offset
, xts_ctx
->key
, xts_ctx
->key_len
);
465 memcpy(xts_param
.init
, pcc_param
.xts
, 16);
467 while ((nbytes
= walk
.nbytes
) != 0) {
468 /* only use complete blocks */
469 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
470 cpacf_km(xts_ctx
->fc
| modifier
, xts_param
.key
+ offset
,
471 walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
, n
);
472 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
474 memzero_explicit(&pcc_param
, sizeof(pcc_param
));
475 memzero_explicit(&xts_param
, sizeof(xts_param
));
479 static int xts_aes_encrypt(struct skcipher_request
*req
)
481 return xts_aes_crypt(req
, 0);
484 static int xts_aes_decrypt(struct skcipher_request
*req
)
486 return xts_aes_crypt(req
, CPACF_DECRYPT
);
489 static int xts_fallback_init(struct crypto_skcipher
*tfm
)
491 const char *name
= crypto_tfm_alg_name(&tfm
->base
);
492 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
494 xts_ctx
->fallback
= crypto_alloc_skcipher(name
, 0,
495 CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
);
497 if (IS_ERR(xts_ctx
->fallback
)) {
498 pr_err("Allocating XTS fallback algorithm %s failed\n",
500 return PTR_ERR(xts_ctx
->fallback
);
502 crypto_skcipher_set_reqsize(tfm
, sizeof(struct skcipher_request
) +
503 crypto_skcipher_reqsize(xts_ctx
->fallback
));
507 static void xts_fallback_exit(struct crypto_skcipher
*tfm
)
509 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
511 crypto_free_skcipher(xts_ctx
->fallback
);
514 static struct skcipher_alg xts_aes_alg
= {
515 .base
.cra_name
= "xts(aes)",
516 .base
.cra_driver_name
= "xts-aes-s390",
517 .base
.cra_priority
= 402, /* ecb-aes-s390 + 1 */
518 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
519 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
520 .base
.cra_ctxsize
= sizeof(struct s390_xts_ctx
),
521 .base
.cra_module
= THIS_MODULE
,
522 .init
= xts_fallback_init
,
523 .exit
= xts_fallback_exit
,
524 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
525 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
526 .ivsize
= AES_BLOCK_SIZE
,
527 .setkey
= xts_aes_set_key
,
528 .encrypt
= xts_aes_encrypt
,
529 .decrypt
= xts_aes_decrypt
,
532 static int ctr_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
533 unsigned int key_len
)
535 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
538 /* Pick the correct function code based on the key length */
539 fc
= (key_len
== 16) ? CPACF_KMCTR_AES_128
:
540 (key_len
== 24) ? CPACF_KMCTR_AES_192
:
541 (key_len
== 32) ? CPACF_KMCTR_AES_256
: 0;
543 /* Check if the function code is available */
544 sctx
->fc
= (fc
&& cpacf_test_func(&kmctr_functions
, fc
)) ? fc
: 0;
546 return setkey_fallback_skcipher(tfm
, in_key
, key_len
);
548 sctx
->key_len
= key_len
;
549 memcpy(sctx
->key
, in_key
, key_len
);
553 static unsigned int __ctrblk_init(u8
*ctrptr
, u8
*iv
, unsigned int nbytes
)
557 /* only use complete blocks, max. PAGE_SIZE */
558 memcpy(ctrptr
, iv
, AES_BLOCK_SIZE
);
559 n
= (nbytes
> PAGE_SIZE
) ? PAGE_SIZE
: nbytes
& ~(AES_BLOCK_SIZE
- 1);
560 for (i
= (n
/ AES_BLOCK_SIZE
) - 1; i
> 0; i
--) {
561 memcpy(ctrptr
+ AES_BLOCK_SIZE
, ctrptr
, AES_BLOCK_SIZE
);
562 crypto_inc(ctrptr
+ AES_BLOCK_SIZE
, AES_BLOCK_SIZE
);
563 ctrptr
+= AES_BLOCK_SIZE
;
568 static int ctr_aes_crypt(struct skcipher_request
*req
)
570 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
571 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
572 u8 buf
[AES_BLOCK_SIZE
], *ctrptr
;
573 struct skcipher_walk walk
;
574 unsigned int n
, nbytes
;
577 if (unlikely(!sctx
->fc
))
578 return fallback_skcipher_crypt(sctx
, req
, 0);
580 locked
= mutex_trylock(&ctrblk_lock
);
582 ret
= skcipher_walk_virt(&walk
, req
, false);
583 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
586 if (nbytes
>= 2*AES_BLOCK_SIZE
&& locked
)
587 n
= __ctrblk_init(ctrblk
, walk
.iv
, nbytes
);
588 ctrptr
= (n
> AES_BLOCK_SIZE
) ? ctrblk
: walk
.iv
;
589 cpacf_kmctr(sctx
->fc
, sctx
->key
, walk
.dst
.virt
.addr
,
590 walk
.src
.virt
.addr
, n
, ctrptr
);
591 if (ctrptr
== ctrblk
)
592 memcpy(walk
.iv
, ctrptr
+ n
- AES_BLOCK_SIZE
,
594 crypto_inc(walk
.iv
, AES_BLOCK_SIZE
);
595 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
598 mutex_unlock(&ctrblk_lock
);
600 * final block may be < AES_BLOCK_SIZE, copy only nbytes
603 cpacf_kmctr(sctx
->fc
, sctx
->key
, buf
, walk
.src
.virt
.addr
,
604 AES_BLOCK_SIZE
, walk
.iv
);
605 memcpy(walk
.dst
.virt
.addr
, buf
, nbytes
);
606 crypto_inc(walk
.iv
, AES_BLOCK_SIZE
);
607 ret
= skcipher_walk_done(&walk
, 0);
613 static struct skcipher_alg ctr_aes_alg
= {
614 .base
.cra_name
= "ctr(aes)",
615 .base
.cra_driver_name
= "ctr-aes-s390",
616 .base
.cra_priority
= 402, /* ecb-aes-s390 + 1 */
617 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
618 .base
.cra_blocksize
= 1,
619 .base
.cra_ctxsize
= sizeof(struct s390_aes_ctx
),
620 .base
.cra_module
= THIS_MODULE
,
621 .init
= fallback_init_skcipher
,
622 .exit
= fallback_exit_skcipher
,
623 .min_keysize
= AES_MIN_KEY_SIZE
,
624 .max_keysize
= AES_MAX_KEY_SIZE
,
625 .ivsize
= AES_BLOCK_SIZE
,
626 .setkey
= ctr_aes_set_key
,
627 .encrypt
= ctr_aes_crypt
,
628 .decrypt
= ctr_aes_crypt
,
629 .chunksize
= AES_BLOCK_SIZE
,
632 static int gcm_aes_setkey(struct crypto_aead
*tfm
, const u8
*key
,
635 struct s390_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
638 case AES_KEYSIZE_128
:
639 ctx
->fc
= CPACF_KMA_GCM_AES_128
;
641 case AES_KEYSIZE_192
:
642 ctx
->fc
= CPACF_KMA_GCM_AES_192
;
644 case AES_KEYSIZE_256
:
645 ctx
->fc
= CPACF_KMA_GCM_AES_256
;
651 memcpy(ctx
->key
, key
, keylen
);
652 ctx
->key_len
= keylen
;
656 static int gcm_aes_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
674 static void gcm_walk_start(struct gcm_sg_walk
*gw
, struct scatterlist
*sg
,
677 memset(gw
, 0, sizeof(*gw
));
678 gw
->walk_bytes_remain
= len
;
679 scatterwalk_start(&gw
->walk
, sg
);
682 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk
*gw
)
684 struct scatterlist
*nextsg
;
686 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
, gw
->walk_bytes_remain
);
687 while (!gw
->walk_bytes
) {
688 nextsg
= sg_next(gw
->walk
.sg
);
691 scatterwalk_start(&gw
->walk
, nextsg
);
692 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
,
693 gw
->walk_bytes_remain
);
695 gw
->walk_ptr
= scatterwalk_map(&gw
->walk
);
696 return gw
->walk_bytes
;
699 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk
*gw
,
702 gw
->walk_bytes_remain
-= nbytes
;
703 scatterwalk_unmap(&gw
->walk
);
704 scatterwalk_advance(&gw
->walk
, nbytes
);
705 scatterwalk_done(&gw
->walk
, 0, gw
->walk_bytes_remain
);
709 static int gcm_in_walk_go(struct gcm_sg_walk
*gw
, unsigned int minbytesneeded
)
713 if (gw
->buf_bytes
&& gw
->buf_bytes
>= minbytesneeded
) {
715 gw
->nbytes
= gw
->buf_bytes
;
719 if (gw
->walk_bytes_remain
== 0) {
725 if (!_gcm_sg_clamp_and_map(gw
)) {
731 if (!gw
->buf_bytes
&& gw
->walk_bytes
>= minbytesneeded
) {
732 gw
->ptr
= gw
->walk_ptr
;
733 gw
->nbytes
= gw
->walk_bytes
;
738 n
= min(gw
->walk_bytes
, AES_BLOCK_SIZE
- gw
->buf_bytes
);
739 memcpy(gw
->buf
+ gw
->buf_bytes
, gw
->walk_ptr
, n
);
741 _gcm_sg_unmap_and_advance(gw
, n
);
742 if (gw
->buf_bytes
>= minbytesneeded
) {
744 gw
->nbytes
= gw
->buf_bytes
;
747 if (!_gcm_sg_clamp_and_map(gw
)) {
758 static int gcm_out_walk_go(struct gcm_sg_walk
*gw
, unsigned int minbytesneeded
)
760 if (gw
->walk_bytes_remain
== 0) {
766 if (!_gcm_sg_clamp_and_map(gw
)) {
772 if (gw
->walk_bytes
>= minbytesneeded
) {
773 gw
->ptr
= gw
->walk_ptr
;
774 gw
->nbytes
= gw
->walk_bytes
;
778 scatterwalk_unmap(&gw
->walk
);
782 gw
->nbytes
= sizeof(gw
->buf
);
788 static int gcm_in_walk_done(struct gcm_sg_walk
*gw
, unsigned int bytesdone
)
793 if (gw
->ptr
== gw
->buf
) {
794 int n
= gw
->buf_bytes
- bytesdone
;
796 memmove(gw
->buf
, gw
->buf
+ bytesdone
, n
);
801 _gcm_sg_unmap_and_advance(gw
, bytesdone
);
806 static int gcm_out_walk_done(struct gcm_sg_walk
*gw
, unsigned int bytesdone
)
813 if (gw
->ptr
== gw
->buf
) {
814 for (i
= 0; i
< bytesdone
; i
+= n
) {
815 if (!_gcm_sg_clamp_and_map(gw
))
817 n
= min(gw
->walk_bytes
, bytesdone
- i
);
818 memcpy(gw
->walk_ptr
, gw
->buf
+ i
, n
);
819 _gcm_sg_unmap_and_advance(gw
, n
);
822 _gcm_sg_unmap_and_advance(gw
, bytesdone
);
827 static int gcm_aes_crypt(struct aead_request
*req
, unsigned int flags
)
829 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
830 struct s390_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
831 unsigned int ivsize
= crypto_aead_ivsize(tfm
);
832 unsigned int taglen
= crypto_aead_authsize(tfm
);
833 unsigned int aadlen
= req
->assoclen
;
834 unsigned int pclen
= req
->cryptlen
;
837 unsigned int n
, len
, in_bytes
, out_bytes
,
838 min_bytes
, bytes
, aad_bytes
, pc_bytes
;
839 struct gcm_sg_walk gw_in
, gw_out
;
840 u8 tag
[GHASH_DIGEST_SIZE
];
843 u32 _
[3]; /* reserved */
844 u32 cv
; /* Counter Value */
845 u8 t
[GHASH_DIGEST_SIZE
];/* Tag */
846 u8 h
[AES_BLOCK_SIZE
]; /* Hash-subkey */
847 u64 taadl
; /* Total AAD Length */
848 u64 tpcl
; /* Total Plain-/Cipher-text Length */
849 u8 j0
[GHASH_BLOCK_SIZE
];/* initial counter value */
850 u8 k
[AES_MAX_KEY_SIZE
]; /* Key */
855 * req->src: aad||plaintext
856 * req->dst: aad||ciphertext||tag
858 * req->src: aad||ciphertext||tag
859 * req->dst: aad||plaintext, return 0 or -EBADMSG
860 * aad, plaintext and ciphertext may be empty.
862 if (flags
& CPACF_DECRYPT
)
864 len
= aadlen
+ pclen
;
866 memset(¶m
, 0, sizeof(param
));
868 param
.taadl
= aadlen
* 8;
869 param
.tpcl
= pclen
* 8;
870 memcpy(param
.j0
, req
->iv
, ivsize
);
871 *(u32
*)(param
.j0
+ ivsize
) = 1;
872 memcpy(param
.k
, ctx
->key
, ctx
->key_len
);
874 gcm_walk_start(&gw_in
, req
->src
, len
);
875 gcm_walk_start(&gw_out
, req
->dst
, len
);
878 min_bytes
= min_t(unsigned int,
879 aadlen
> 0 ? aadlen
: pclen
, AES_BLOCK_SIZE
);
880 in_bytes
= gcm_in_walk_go(&gw_in
, min_bytes
);
881 out_bytes
= gcm_out_walk_go(&gw_out
, min_bytes
);
882 bytes
= min(in_bytes
, out_bytes
);
884 if (aadlen
+ pclen
<= bytes
) {
887 flags
|= CPACF_KMA_LAAD
| CPACF_KMA_LPC
;
889 if (aadlen
<= bytes
) {
891 pc_bytes
= (bytes
- aadlen
) &
892 ~(AES_BLOCK_SIZE
- 1);
893 flags
|= CPACF_KMA_LAAD
;
895 aad_bytes
= bytes
& ~(AES_BLOCK_SIZE
- 1);
901 memcpy(gw_out
.ptr
, gw_in
.ptr
, aad_bytes
);
903 cpacf_kma(ctx
->fc
| flags
, ¶m
,
904 gw_out
.ptr
+ aad_bytes
,
905 gw_in
.ptr
+ aad_bytes
, pc_bytes
,
906 gw_in
.ptr
, aad_bytes
);
908 n
= aad_bytes
+ pc_bytes
;
909 if (gcm_in_walk_done(&gw_in
, n
) != n
)
911 if (gcm_out_walk_done(&gw_out
, n
) != n
)
915 } while (aadlen
+ pclen
> 0);
917 if (flags
& CPACF_DECRYPT
) {
918 scatterwalk_map_and_copy(tag
, req
->src
, len
, taglen
, 0);
919 if (crypto_memneq(tag
, param
.t
, taglen
))
922 scatterwalk_map_and_copy(param
.t
, req
->dst
, len
, taglen
, 1);
924 memzero_explicit(¶m
, sizeof(param
));
928 static int gcm_aes_encrypt(struct aead_request
*req
)
930 return gcm_aes_crypt(req
, CPACF_ENCRYPT
);
933 static int gcm_aes_decrypt(struct aead_request
*req
)
935 return gcm_aes_crypt(req
, CPACF_DECRYPT
);
938 static struct aead_alg gcm_aes_aead
= {
939 .setkey
= gcm_aes_setkey
,
940 .setauthsize
= gcm_aes_setauthsize
,
941 .encrypt
= gcm_aes_encrypt
,
942 .decrypt
= gcm_aes_decrypt
,
944 .ivsize
= GHASH_BLOCK_SIZE
- sizeof(u32
),
945 .maxauthsize
= GHASH_DIGEST_SIZE
,
946 .chunksize
= AES_BLOCK_SIZE
,
950 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
952 .cra_name
= "gcm(aes)",
953 .cra_driver_name
= "gcm-aes-s390",
954 .cra_module
= THIS_MODULE
,
958 static struct crypto_alg
*aes_s390_alg
;
959 static struct skcipher_alg
*aes_s390_skcipher_algs
[4];
960 static int aes_s390_skciphers_num
;
961 static struct aead_alg
*aes_s390_aead_alg
;
963 static int aes_s390_register_skcipher(struct skcipher_alg
*alg
)
967 ret
= crypto_register_skcipher(alg
);
969 aes_s390_skcipher_algs
[aes_s390_skciphers_num
++] = alg
;
973 static void aes_s390_fini(void)
976 crypto_unregister_alg(aes_s390_alg
);
977 while (aes_s390_skciphers_num
--)
978 crypto_unregister_skcipher(aes_s390_skcipher_algs
[aes_s390_skciphers_num
]);
980 free_page((unsigned long) ctrblk
);
982 if (aes_s390_aead_alg
)
983 crypto_unregister_aead(aes_s390_aead_alg
);
986 static int __init
aes_s390_init(void)
990 /* Query available functions for KM, KMC, KMCTR and KMA */
991 cpacf_query(CPACF_KM
, &km_functions
);
992 cpacf_query(CPACF_KMC
, &kmc_functions
);
993 cpacf_query(CPACF_KMCTR
, &kmctr_functions
);
994 cpacf_query(CPACF_KMA
, &kma_functions
);
996 if (cpacf_test_func(&km_functions
, CPACF_KM_AES_128
) ||
997 cpacf_test_func(&km_functions
, CPACF_KM_AES_192
) ||
998 cpacf_test_func(&km_functions
, CPACF_KM_AES_256
)) {
999 ret
= crypto_register_alg(&aes_alg
);
1002 aes_s390_alg
= &aes_alg
;
1003 ret
= aes_s390_register_skcipher(&ecb_aes_alg
);
1008 if (cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_128
) ||
1009 cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_192
) ||
1010 cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_256
)) {
1011 ret
= aes_s390_register_skcipher(&cbc_aes_alg
);
1016 if (cpacf_test_func(&km_functions
, CPACF_KM_XTS_128
) ||
1017 cpacf_test_func(&km_functions
, CPACF_KM_XTS_256
)) {
1018 ret
= aes_s390_register_skcipher(&xts_aes_alg
);
1023 if (cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_128
) ||
1024 cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_192
) ||
1025 cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_256
)) {
1026 ctrblk
= (u8
*) __get_free_page(GFP_KERNEL
);
1031 ret
= aes_s390_register_skcipher(&ctr_aes_alg
);
1036 if (cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_128
) ||
1037 cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_192
) ||
1038 cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_256
)) {
1039 ret
= crypto_register_aead(&gcm_aes_aead
);
1042 aes_s390_aead_alg
= &gcm_aes_aead
;
1051 module_cpu_feature_match(MSA
, aes_s390_init
);
1052 module_exit(aes_s390_fini
);
1054 MODULE_ALIAS_CRYPTO("aes-all");
1056 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1057 MODULE_LICENSE("GPL");