1 // SPDX-License-Identifier: GPL-2.0+
5 * s390 implementation of the AES Cipher Algorithm.
8 * Copyright IBM Corp. 2005, 2017
9 * Author(s): Jan Glauber (jang@de.ibm.com)
10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11 * Patrick Steuer <patrick.steuer@de.ibm.com>
12 * Harald Freudenberger <freude@de.ibm.com>
14 * Derived from "crypto/aes_generic.c"
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/err.h>
27 #include <linux/module.h>
28 #include <linux/cpufeature.h>
29 #include <linux/init.h>
30 #include <linux/mutex.h>
31 #include <linux/fips.h>
32 #include <linux/string.h>
33 #include <crypto/xts.h>
34 #include <asm/cpacf.h>
37 static DEFINE_MUTEX(ctrblk_lock
);
39 static cpacf_mask_t km_functions
, kmc_functions
, kmctr_functions
,
43 u8 key
[AES_MAX_KEY_SIZE
];
47 struct crypto_skcipher
*skcipher
;
48 struct crypto_cipher
*cip
;
57 struct crypto_skcipher
*fallback
;
61 struct scatter_walk walk
;
62 unsigned int walk_bytes
;
64 unsigned int walk_bytes_remain
;
65 u8 buf
[AES_BLOCK_SIZE
];
66 unsigned int buf_bytes
;
71 static int setkey_fallback_cip(struct crypto_tfm
*tfm
, const u8
*in_key
,
74 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
76 sctx
->fallback
.cip
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
77 sctx
->fallback
.cip
->base
.crt_flags
|= (tfm
->crt_flags
&
80 return crypto_cipher_setkey(sctx
->fallback
.cip
, in_key
, key_len
);
83 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
86 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
89 /* Pick the correct function code based on the key length */
90 fc
= (key_len
== 16) ? CPACF_KM_AES_128
:
91 (key_len
== 24) ? CPACF_KM_AES_192
:
92 (key_len
== 32) ? CPACF_KM_AES_256
: 0;
94 /* Check if the function code is available */
95 sctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
97 return setkey_fallback_cip(tfm
, in_key
, key_len
);
99 sctx
->key_len
= key_len
;
100 memcpy(sctx
->key
, in_key
, key_len
);
104 static void crypto_aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
106 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
108 if (unlikely(!sctx
->fc
)) {
109 crypto_cipher_encrypt_one(sctx
->fallback
.cip
, out
, in
);
112 cpacf_km(sctx
->fc
, &sctx
->key
, out
, in
, AES_BLOCK_SIZE
);
115 static void crypto_aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
117 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
119 if (unlikely(!sctx
->fc
)) {
120 crypto_cipher_decrypt_one(sctx
->fallback
.cip
, out
, in
);
123 cpacf_km(sctx
->fc
| CPACF_DECRYPT
,
124 &sctx
->key
, out
, in
, AES_BLOCK_SIZE
);
127 static int fallback_init_cip(struct crypto_tfm
*tfm
)
129 const char *name
= tfm
->__crt_alg
->cra_name
;
130 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
132 sctx
->fallback
.cip
= crypto_alloc_cipher(name
, 0,
133 CRYPTO_ALG_NEED_FALLBACK
);
135 if (IS_ERR(sctx
->fallback
.cip
)) {
136 pr_err("Allocating AES fallback algorithm %s failed\n",
138 return PTR_ERR(sctx
->fallback
.cip
);
144 static void fallback_exit_cip(struct crypto_tfm
*tfm
)
146 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
148 crypto_free_cipher(sctx
->fallback
.cip
);
149 sctx
->fallback
.cip
= NULL
;
152 static struct crypto_alg aes_alg
= {
154 .cra_driver_name
= "aes-s390",
156 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
|
157 CRYPTO_ALG_NEED_FALLBACK
,
158 .cra_blocksize
= AES_BLOCK_SIZE
,
159 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
160 .cra_module
= THIS_MODULE
,
161 .cra_init
= fallback_init_cip
,
162 .cra_exit
= fallback_exit_cip
,
165 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
166 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
167 .cia_setkey
= aes_set_key
,
168 .cia_encrypt
= crypto_aes_encrypt
,
169 .cia_decrypt
= crypto_aes_decrypt
,
174 static int setkey_fallback_skcipher(struct crypto_skcipher
*tfm
, const u8
*key
,
177 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
179 crypto_skcipher_clear_flags(sctx
->fallback
.skcipher
,
180 CRYPTO_TFM_REQ_MASK
);
181 crypto_skcipher_set_flags(sctx
->fallback
.skcipher
,
182 crypto_skcipher_get_flags(tfm
) &
183 CRYPTO_TFM_REQ_MASK
);
184 return crypto_skcipher_setkey(sctx
->fallback
.skcipher
, key
, len
);
187 static int fallback_skcipher_crypt(struct s390_aes_ctx
*sctx
,
188 struct skcipher_request
*req
,
189 unsigned long modifier
)
191 struct skcipher_request
*subreq
= skcipher_request_ctx(req
);
194 skcipher_request_set_tfm(subreq
, sctx
->fallback
.skcipher
);
195 return (modifier
& CPACF_DECRYPT
) ?
196 crypto_skcipher_decrypt(subreq
) :
197 crypto_skcipher_encrypt(subreq
);
200 static int ecb_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
201 unsigned int key_len
)
203 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
206 /* Pick the correct function code based on the key length */
207 fc
= (key_len
== 16) ? CPACF_KM_AES_128
:
208 (key_len
== 24) ? CPACF_KM_AES_192
:
209 (key_len
== 32) ? CPACF_KM_AES_256
: 0;
211 /* Check if the function code is available */
212 sctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
214 return setkey_fallback_skcipher(tfm
, in_key
, key_len
);
216 sctx
->key_len
= key_len
;
217 memcpy(sctx
->key
, in_key
, key_len
);
221 static int ecb_aes_crypt(struct skcipher_request
*req
, unsigned long modifier
)
223 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
224 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
225 struct skcipher_walk walk
;
226 unsigned int nbytes
, n
;
229 if (unlikely(!sctx
->fc
))
230 return fallback_skcipher_crypt(sctx
, req
, modifier
);
232 ret
= skcipher_walk_virt(&walk
, req
, false);
233 while ((nbytes
= walk
.nbytes
) != 0) {
234 /* only use complete blocks */
235 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
236 cpacf_km(sctx
->fc
| modifier
, sctx
->key
,
237 walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
, n
);
238 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
243 static int ecb_aes_encrypt(struct skcipher_request
*req
)
245 return ecb_aes_crypt(req
, 0);
248 static int ecb_aes_decrypt(struct skcipher_request
*req
)
250 return ecb_aes_crypt(req
, CPACF_DECRYPT
);
253 static int fallback_init_skcipher(struct crypto_skcipher
*tfm
)
255 const char *name
= crypto_tfm_alg_name(&tfm
->base
);
256 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
258 sctx
->fallback
.skcipher
= crypto_alloc_skcipher(name
, 0,
259 CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
);
261 if (IS_ERR(sctx
->fallback
.skcipher
)) {
262 pr_err("Allocating AES fallback algorithm %s failed\n",
264 return PTR_ERR(sctx
->fallback
.skcipher
);
267 crypto_skcipher_set_reqsize(tfm
, sizeof(struct skcipher_request
) +
268 crypto_skcipher_reqsize(sctx
->fallback
.skcipher
));
272 static void fallback_exit_skcipher(struct crypto_skcipher
*tfm
)
274 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
276 crypto_free_skcipher(sctx
->fallback
.skcipher
);
279 static struct skcipher_alg ecb_aes_alg
= {
280 .base
.cra_name
= "ecb(aes)",
281 .base
.cra_driver_name
= "ecb-aes-s390",
282 .base
.cra_priority
= 401, /* combo: aes + ecb + 1 */
283 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
284 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
285 .base
.cra_ctxsize
= sizeof(struct s390_aes_ctx
),
286 .base
.cra_module
= THIS_MODULE
,
287 .init
= fallback_init_skcipher
,
288 .exit
= fallback_exit_skcipher
,
289 .min_keysize
= AES_MIN_KEY_SIZE
,
290 .max_keysize
= AES_MAX_KEY_SIZE
,
291 .setkey
= ecb_aes_set_key
,
292 .encrypt
= ecb_aes_encrypt
,
293 .decrypt
= ecb_aes_decrypt
,
296 static int cbc_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
297 unsigned int key_len
)
299 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
302 /* Pick the correct function code based on the key length */
303 fc
= (key_len
== 16) ? CPACF_KMC_AES_128
:
304 (key_len
== 24) ? CPACF_KMC_AES_192
:
305 (key_len
== 32) ? CPACF_KMC_AES_256
: 0;
307 /* Check if the function code is available */
308 sctx
->fc
= (fc
&& cpacf_test_func(&kmc_functions
, fc
)) ? fc
: 0;
310 return setkey_fallback_skcipher(tfm
, in_key
, key_len
);
312 sctx
->key_len
= key_len
;
313 memcpy(sctx
->key
, in_key
, key_len
);
317 static int cbc_aes_crypt(struct skcipher_request
*req
, unsigned long modifier
)
319 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
320 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
321 struct skcipher_walk walk
;
322 unsigned int nbytes
, n
;
325 u8 iv
[AES_BLOCK_SIZE
];
326 u8 key
[AES_MAX_KEY_SIZE
];
329 if (unlikely(!sctx
->fc
))
330 return fallback_skcipher_crypt(sctx
, req
, modifier
);
332 ret
= skcipher_walk_virt(&walk
, req
, false);
335 memcpy(param
.iv
, walk
.iv
, AES_BLOCK_SIZE
);
336 memcpy(param
.key
, sctx
->key
, sctx
->key_len
);
337 while ((nbytes
= walk
.nbytes
) != 0) {
338 /* only use complete blocks */
339 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
340 cpacf_kmc(sctx
->fc
| modifier
, ¶m
,
341 walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
, n
);
342 memcpy(walk
.iv
, param
.iv
, AES_BLOCK_SIZE
);
343 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
348 static int cbc_aes_encrypt(struct skcipher_request
*req
)
350 return cbc_aes_crypt(req
, 0);
353 static int cbc_aes_decrypt(struct skcipher_request
*req
)
355 return cbc_aes_crypt(req
, CPACF_DECRYPT
);
358 static struct skcipher_alg cbc_aes_alg
= {
359 .base
.cra_name
= "cbc(aes)",
360 .base
.cra_driver_name
= "cbc-aes-s390",
361 .base
.cra_priority
= 402, /* ecb-aes-s390 + 1 */
362 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
363 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
364 .base
.cra_ctxsize
= sizeof(struct s390_aes_ctx
),
365 .base
.cra_module
= THIS_MODULE
,
366 .init
= fallback_init_skcipher
,
367 .exit
= fallback_exit_skcipher
,
368 .min_keysize
= AES_MIN_KEY_SIZE
,
369 .max_keysize
= AES_MAX_KEY_SIZE
,
370 .ivsize
= AES_BLOCK_SIZE
,
371 .setkey
= cbc_aes_set_key
,
372 .encrypt
= cbc_aes_encrypt
,
373 .decrypt
= cbc_aes_decrypt
,
376 static int xts_fallback_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
379 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
381 crypto_skcipher_clear_flags(xts_ctx
->fallback
, CRYPTO_TFM_REQ_MASK
);
382 crypto_skcipher_set_flags(xts_ctx
->fallback
,
383 crypto_skcipher_get_flags(tfm
) &
384 CRYPTO_TFM_REQ_MASK
);
385 return crypto_skcipher_setkey(xts_ctx
->fallback
, key
, len
);
388 static int xts_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
389 unsigned int key_len
)
391 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
395 err
= xts_fallback_setkey(tfm
, in_key
, key_len
);
399 /* In fips mode only 128 bit or 256 bit keys are valid */
400 if (fips_enabled
&& key_len
!= 32 && key_len
!= 64)
403 /* Pick the correct function code based on the key length */
404 fc
= (key_len
== 32) ? CPACF_KM_XTS_128
:
405 (key_len
== 64) ? CPACF_KM_XTS_256
: 0;
407 /* Check if the function code is available */
408 xts_ctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
412 /* Split the XTS key into the two subkeys */
413 key_len
= key_len
/ 2;
414 xts_ctx
->key_len
= key_len
;
415 memcpy(xts_ctx
->key
, in_key
, key_len
);
416 memcpy(xts_ctx
->pcc_key
, in_key
+ key_len
, key_len
);
420 static int xts_aes_crypt(struct skcipher_request
*req
, unsigned long modifier
)
422 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
423 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
424 struct skcipher_walk walk
;
425 unsigned int offset
, nbytes
, n
;
439 if (req
->cryptlen
< AES_BLOCK_SIZE
)
442 if (unlikely(!xts_ctx
->fc
|| (req
->cryptlen
% AES_BLOCK_SIZE
) != 0)) {
443 struct skcipher_request
*subreq
= skcipher_request_ctx(req
);
446 skcipher_request_set_tfm(subreq
, xts_ctx
->fallback
);
447 return (modifier
& CPACF_DECRYPT
) ?
448 crypto_skcipher_decrypt(subreq
) :
449 crypto_skcipher_encrypt(subreq
);
452 ret
= skcipher_walk_virt(&walk
, req
, false);
455 offset
= xts_ctx
->key_len
& 0x10;
456 memset(pcc_param
.block
, 0, sizeof(pcc_param
.block
));
457 memset(pcc_param
.bit
, 0, sizeof(pcc_param
.bit
));
458 memset(pcc_param
.xts
, 0, sizeof(pcc_param
.xts
));
459 memcpy(pcc_param
.tweak
, walk
.iv
, sizeof(pcc_param
.tweak
));
460 memcpy(pcc_param
.key
+ offset
, xts_ctx
->pcc_key
, xts_ctx
->key_len
);
461 cpacf_pcc(xts_ctx
->fc
, pcc_param
.key
+ offset
);
463 memcpy(xts_param
.key
+ offset
, xts_ctx
->key
, xts_ctx
->key_len
);
464 memcpy(xts_param
.init
, pcc_param
.xts
, 16);
466 while ((nbytes
= walk
.nbytes
) != 0) {
467 /* only use complete blocks */
468 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
469 cpacf_km(xts_ctx
->fc
| modifier
, xts_param
.key
+ offset
,
470 walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
, n
);
471 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
476 static int xts_aes_encrypt(struct skcipher_request
*req
)
478 return xts_aes_crypt(req
, 0);
481 static int xts_aes_decrypt(struct skcipher_request
*req
)
483 return xts_aes_crypt(req
, CPACF_DECRYPT
);
486 static int xts_fallback_init(struct crypto_skcipher
*tfm
)
488 const char *name
= crypto_tfm_alg_name(&tfm
->base
);
489 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
491 xts_ctx
->fallback
= crypto_alloc_skcipher(name
, 0,
492 CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
);
494 if (IS_ERR(xts_ctx
->fallback
)) {
495 pr_err("Allocating XTS fallback algorithm %s failed\n",
497 return PTR_ERR(xts_ctx
->fallback
);
499 crypto_skcipher_set_reqsize(tfm
, sizeof(struct skcipher_request
) +
500 crypto_skcipher_reqsize(xts_ctx
->fallback
));
504 static void xts_fallback_exit(struct crypto_skcipher
*tfm
)
506 struct s390_xts_ctx
*xts_ctx
= crypto_skcipher_ctx(tfm
);
508 crypto_free_skcipher(xts_ctx
->fallback
);
511 static struct skcipher_alg xts_aes_alg
= {
512 .base
.cra_name
= "xts(aes)",
513 .base
.cra_driver_name
= "xts-aes-s390",
514 .base
.cra_priority
= 402, /* ecb-aes-s390 + 1 */
515 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
516 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
517 .base
.cra_ctxsize
= sizeof(struct s390_xts_ctx
),
518 .base
.cra_module
= THIS_MODULE
,
519 .init
= xts_fallback_init
,
520 .exit
= xts_fallback_exit
,
521 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
522 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
523 .ivsize
= AES_BLOCK_SIZE
,
524 .setkey
= xts_aes_set_key
,
525 .encrypt
= xts_aes_encrypt
,
526 .decrypt
= xts_aes_decrypt
,
529 static int ctr_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
530 unsigned int key_len
)
532 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
535 /* Pick the correct function code based on the key length */
536 fc
= (key_len
== 16) ? CPACF_KMCTR_AES_128
:
537 (key_len
== 24) ? CPACF_KMCTR_AES_192
:
538 (key_len
== 32) ? CPACF_KMCTR_AES_256
: 0;
540 /* Check if the function code is available */
541 sctx
->fc
= (fc
&& cpacf_test_func(&kmctr_functions
, fc
)) ? fc
: 0;
543 return setkey_fallback_skcipher(tfm
, in_key
, key_len
);
545 sctx
->key_len
= key_len
;
546 memcpy(sctx
->key
, in_key
, key_len
);
550 static unsigned int __ctrblk_init(u8
*ctrptr
, u8
*iv
, unsigned int nbytes
)
554 /* only use complete blocks, max. PAGE_SIZE */
555 memcpy(ctrptr
, iv
, AES_BLOCK_SIZE
);
556 n
= (nbytes
> PAGE_SIZE
) ? PAGE_SIZE
: nbytes
& ~(AES_BLOCK_SIZE
- 1);
557 for (i
= (n
/ AES_BLOCK_SIZE
) - 1; i
> 0; i
--) {
558 memcpy(ctrptr
+ AES_BLOCK_SIZE
, ctrptr
, AES_BLOCK_SIZE
);
559 crypto_inc(ctrptr
+ AES_BLOCK_SIZE
, AES_BLOCK_SIZE
);
560 ctrptr
+= AES_BLOCK_SIZE
;
565 static int ctr_aes_crypt(struct skcipher_request
*req
)
567 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
568 struct s390_aes_ctx
*sctx
= crypto_skcipher_ctx(tfm
);
569 u8 buf
[AES_BLOCK_SIZE
], *ctrptr
;
570 struct skcipher_walk walk
;
571 unsigned int n
, nbytes
;
574 if (unlikely(!sctx
->fc
))
575 return fallback_skcipher_crypt(sctx
, req
, 0);
577 locked
= mutex_trylock(&ctrblk_lock
);
579 ret
= skcipher_walk_virt(&walk
, req
, false);
580 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
583 if (nbytes
>= 2*AES_BLOCK_SIZE
&& locked
)
584 n
= __ctrblk_init(ctrblk
, walk
.iv
, nbytes
);
585 ctrptr
= (n
> AES_BLOCK_SIZE
) ? ctrblk
: walk
.iv
;
586 cpacf_kmctr(sctx
->fc
, sctx
->key
, walk
.dst
.virt
.addr
,
587 walk
.src
.virt
.addr
, n
, ctrptr
);
588 if (ctrptr
== ctrblk
)
589 memcpy(walk
.iv
, ctrptr
+ n
- AES_BLOCK_SIZE
,
591 crypto_inc(walk
.iv
, AES_BLOCK_SIZE
);
592 ret
= skcipher_walk_done(&walk
, nbytes
- n
);
595 mutex_unlock(&ctrblk_lock
);
597 * final block may be < AES_BLOCK_SIZE, copy only nbytes
600 cpacf_kmctr(sctx
->fc
, sctx
->key
, buf
, walk
.src
.virt
.addr
,
601 AES_BLOCK_SIZE
, walk
.iv
);
602 memcpy(walk
.dst
.virt
.addr
, buf
, nbytes
);
603 crypto_inc(walk
.iv
, AES_BLOCK_SIZE
);
604 ret
= skcipher_walk_done(&walk
, 0);
610 static struct skcipher_alg ctr_aes_alg
= {
611 .base
.cra_name
= "ctr(aes)",
612 .base
.cra_driver_name
= "ctr-aes-s390",
613 .base
.cra_priority
= 402, /* ecb-aes-s390 + 1 */
614 .base
.cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
615 .base
.cra_blocksize
= 1,
616 .base
.cra_ctxsize
= sizeof(struct s390_aes_ctx
),
617 .base
.cra_module
= THIS_MODULE
,
618 .init
= fallback_init_skcipher
,
619 .exit
= fallback_exit_skcipher
,
620 .min_keysize
= AES_MIN_KEY_SIZE
,
621 .max_keysize
= AES_MAX_KEY_SIZE
,
622 .ivsize
= AES_BLOCK_SIZE
,
623 .setkey
= ctr_aes_set_key
,
624 .encrypt
= ctr_aes_crypt
,
625 .decrypt
= ctr_aes_crypt
,
626 .chunksize
= AES_BLOCK_SIZE
,
629 static int gcm_aes_setkey(struct crypto_aead
*tfm
, const u8
*key
,
632 struct s390_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
635 case AES_KEYSIZE_128
:
636 ctx
->fc
= CPACF_KMA_GCM_AES_128
;
638 case AES_KEYSIZE_192
:
639 ctx
->fc
= CPACF_KMA_GCM_AES_192
;
641 case AES_KEYSIZE_256
:
642 ctx
->fc
= CPACF_KMA_GCM_AES_256
;
648 memcpy(ctx
->key
, key
, keylen
);
649 ctx
->key_len
= keylen
;
653 static int gcm_aes_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
671 static void gcm_walk_start(struct gcm_sg_walk
*gw
, struct scatterlist
*sg
,
674 memset(gw
, 0, sizeof(*gw
));
675 gw
->walk_bytes_remain
= len
;
676 scatterwalk_start(&gw
->walk
, sg
);
679 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk
*gw
)
681 struct scatterlist
*nextsg
;
683 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
, gw
->walk_bytes_remain
);
684 while (!gw
->walk_bytes
) {
685 nextsg
= sg_next(gw
->walk
.sg
);
688 scatterwalk_start(&gw
->walk
, nextsg
);
689 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
,
690 gw
->walk_bytes_remain
);
692 gw
->walk_ptr
= scatterwalk_map(&gw
->walk
);
693 return gw
->walk_bytes
;
696 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk
*gw
,
699 gw
->walk_bytes_remain
-= nbytes
;
700 scatterwalk_unmap(&gw
->walk
);
701 scatterwalk_advance(&gw
->walk
, nbytes
);
702 scatterwalk_done(&gw
->walk
, 0, gw
->walk_bytes_remain
);
706 static int gcm_in_walk_go(struct gcm_sg_walk
*gw
, unsigned int minbytesneeded
)
710 if (gw
->buf_bytes
&& gw
->buf_bytes
>= minbytesneeded
) {
712 gw
->nbytes
= gw
->buf_bytes
;
716 if (gw
->walk_bytes_remain
== 0) {
722 if (!_gcm_sg_clamp_and_map(gw
)) {
728 if (!gw
->buf_bytes
&& gw
->walk_bytes
>= minbytesneeded
) {
729 gw
->ptr
= gw
->walk_ptr
;
730 gw
->nbytes
= gw
->walk_bytes
;
735 n
= min(gw
->walk_bytes
, AES_BLOCK_SIZE
- gw
->buf_bytes
);
736 memcpy(gw
->buf
+ gw
->buf_bytes
, gw
->walk_ptr
, n
);
738 _gcm_sg_unmap_and_advance(gw
, n
);
739 if (gw
->buf_bytes
>= minbytesneeded
) {
741 gw
->nbytes
= gw
->buf_bytes
;
744 if (!_gcm_sg_clamp_and_map(gw
)) {
755 static int gcm_out_walk_go(struct gcm_sg_walk
*gw
, unsigned int minbytesneeded
)
757 if (gw
->walk_bytes_remain
== 0) {
763 if (!_gcm_sg_clamp_and_map(gw
)) {
769 if (gw
->walk_bytes
>= minbytesneeded
) {
770 gw
->ptr
= gw
->walk_ptr
;
771 gw
->nbytes
= gw
->walk_bytes
;
775 scatterwalk_unmap(&gw
->walk
);
779 gw
->nbytes
= sizeof(gw
->buf
);
785 static int gcm_in_walk_done(struct gcm_sg_walk
*gw
, unsigned int bytesdone
)
790 if (gw
->ptr
== gw
->buf
) {
791 int n
= gw
->buf_bytes
- bytesdone
;
793 memmove(gw
->buf
, gw
->buf
+ bytesdone
, n
);
798 _gcm_sg_unmap_and_advance(gw
, bytesdone
);
803 static int gcm_out_walk_done(struct gcm_sg_walk
*gw
, unsigned int bytesdone
)
810 if (gw
->ptr
== gw
->buf
) {
811 for (i
= 0; i
< bytesdone
; i
+= n
) {
812 if (!_gcm_sg_clamp_and_map(gw
))
814 n
= min(gw
->walk_bytes
, bytesdone
- i
);
815 memcpy(gw
->walk_ptr
, gw
->buf
+ i
, n
);
816 _gcm_sg_unmap_and_advance(gw
, n
);
819 _gcm_sg_unmap_and_advance(gw
, bytesdone
);
824 static int gcm_aes_crypt(struct aead_request
*req
, unsigned int flags
)
826 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
827 struct s390_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
828 unsigned int ivsize
= crypto_aead_ivsize(tfm
);
829 unsigned int taglen
= crypto_aead_authsize(tfm
);
830 unsigned int aadlen
= req
->assoclen
;
831 unsigned int pclen
= req
->cryptlen
;
834 unsigned int n
, len
, in_bytes
, out_bytes
,
835 min_bytes
, bytes
, aad_bytes
, pc_bytes
;
836 struct gcm_sg_walk gw_in
, gw_out
;
837 u8 tag
[GHASH_DIGEST_SIZE
];
840 u32 _
[3]; /* reserved */
841 u32 cv
; /* Counter Value */
842 u8 t
[GHASH_DIGEST_SIZE
];/* Tag */
843 u8 h
[AES_BLOCK_SIZE
]; /* Hash-subkey */
844 u64 taadl
; /* Total AAD Length */
845 u64 tpcl
; /* Total Plain-/Cipher-text Length */
846 u8 j0
[GHASH_BLOCK_SIZE
];/* initial counter value */
847 u8 k
[AES_MAX_KEY_SIZE
]; /* Key */
852 * req->src: aad||plaintext
853 * req->dst: aad||ciphertext||tag
855 * req->src: aad||ciphertext||tag
856 * req->dst: aad||plaintext, return 0 or -EBADMSG
857 * aad, plaintext and ciphertext may be empty.
859 if (flags
& CPACF_DECRYPT
)
861 len
= aadlen
+ pclen
;
863 memset(¶m
, 0, sizeof(param
));
865 param
.taadl
= aadlen
* 8;
866 param
.tpcl
= pclen
* 8;
867 memcpy(param
.j0
, req
->iv
, ivsize
);
868 *(u32
*)(param
.j0
+ ivsize
) = 1;
869 memcpy(param
.k
, ctx
->key
, ctx
->key_len
);
871 gcm_walk_start(&gw_in
, req
->src
, len
);
872 gcm_walk_start(&gw_out
, req
->dst
, len
);
875 min_bytes
= min_t(unsigned int,
876 aadlen
> 0 ? aadlen
: pclen
, AES_BLOCK_SIZE
);
877 in_bytes
= gcm_in_walk_go(&gw_in
, min_bytes
);
878 out_bytes
= gcm_out_walk_go(&gw_out
, min_bytes
);
879 bytes
= min(in_bytes
, out_bytes
);
881 if (aadlen
+ pclen
<= bytes
) {
884 flags
|= CPACF_KMA_LAAD
| CPACF_KMA_LPC
;
886 if (aadlen
<= bytes
) {
888 pc_bytes
= (bytes
- aadlen
) &
889 ~(AES_BLOCK_SIZE
- 1);
890 flags
|= CPACF_KMA_LAAD
;
892 aad_bytes
= bytes
& ~(AES_BLOCK_SIZE
- 1);
898 memcpy(gw_out
.ptr
, gw_in
.ptr
, aad_bytes
);
900 cpacf_kma(ctx
->fc
| flags
, ¶m
,
901 gw_out
.ptr
+ aad_bytes
,
902 gw_in
.ptr
+ aad_bytes
, pc_bytes
,
903 gw_in
.ptr
, aad_bytes
);
905 n
= aad_bytes
+ pc_bytes
;
906 if (gcm_in_walk_done(&gw_in
, n
) != n
)
908 if (gcm_out_walk_done(&gw_out
, n
) != n
)
912 } while (aadlen
+ pclen
> 0);
914 if (flags
& CPACF_DECRYPT
) {
915 scatterwalk_map_and_copy(tag
, req
->src
, len
, taglen
, 0);
916 if (crypto_memneq(tag
, param
.t
, taglen
))
919 scatterwalk_map_and_copy(param
.t
, req
->dst
, len
, taglen
, 1);
921 memzero_explicit(¶m
, sizeof(param
));
925 static int gcm_aes_encrypt(struct aead_request
*req
)
927 return gcm_aes_crypt(req
, CPACF_ENCRYPT
);
930 static int gcm_aes_decrypt(struct aead_request
*req
)
932 return gcm_aes_crypt(req
, CPACF_DECRYPT
);
935 static struct aead_alg gcm_aes_aead
= {
936 .setkey
= gcm_aes_setkey
,
937 .setauthsize
= gcm_aes_setauthsize
,
938 .encrypt
= gcm_aes_encrypt
,
939 .decrypt
= gcm_aes_decrypt
,
941 .ivsize
= GHASH_BLOCK_SIZE
- sizeof(u32
),
942 .maxauthsize
= GHASH_DIGEST_SIZE
,
943 .chunksize
= AES_BLOCK_SIZE
,
947 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
949 .cra_name
= "gcm(aes)",
950 .cra_driver_name
= "gcm-aes-s390",
951 .cra_module
= THIS_MODULE
,
955 static struct crypto_alg
*aes_s390_alg
;
956 static struct skcipher_alg
*aes_s390_skcipher_algs
[4];
957 static int aes_s390_skciphers_num
;
958 static struct aead_alg
*aes_s390_aead_alg
;
960 static int aes_s390_register_skcipher(struct skcipher_alg
*alg
)
964 ret
= crypto_register_skcipher(alg
);
966 aes_s390_skcipher_algs
[aes_s390_skciphers_num
++] = alg
;
970 static void aes_s390_fini(void)
973 crypto_unregister_alg(aes_s390_alg
);
974 while (aes_s390_skciphers_num
--)
975 crypto_unregister_skcipher(aes_s390_skcipher_algs
[aes_s390_skciphers_num
]);
977 free_page((unsigned long) ctrblk
);
979 if (aes_s390_aead_alg
)
980 crypto_unregister_aead(aes_s390_aead_alg
);
983 static int __init
aes_s390_init(void)
987 /* Query available functions for KM, KMC, KMCTR and KMA */
988 cpacf_query(CPACF_KM
, &km_functions
);
989 cpacf_query(CPACF_KMC
, &kmc_functions
);
990 cpacf_query(CPACF_KMCTR
, &kmctr_functions
);
991 cpacf_query(CPACF_KMA
, &kma_functions
);
993 if (cpacf_test_func(&km_functions
, CPACF_KM_AES_128
) ||
994 cpacf_test_func(&km_functions
, CPACF_KM_AES_192
) ||
995 cpacf_test_func(&km_functions
, CPACF_KM_AES_256
)) {
996 ret
= crypto_register_alg(&aes_alg
);
999 aes_s390_alg
= &aes_alg
;
1000 ret
= aes_s390_register_skcipher(&ecb_aes_alg
);
1005 if (cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_128
) ||
1006 cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_192
) ||
1007 cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_256
)) {
1008 ret
= aes_s390_register_skcipher(&cbc_aes_alg
);
1013 if (cpacf_test_func(&km_functions
, CPACF_KM_XTS_128
) ||
1014 cpacf_test_func(&km_functions
, CPACF_KM_XTS_256
)) {
1015 ret
= aes_s390_register_skcipher(&xts_aes_alg
);
1020 if (cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_128
) ||
1021 cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_192
) ||
1022 cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_256
)) {
1023 ctrblk
= (u8
*) __get_free_page(GFP_KERNEL
);
1028 ret
= aes_s390_register_skcipher(&ctr_aes_alg
);
1033 if (cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_128
) ||
1034 cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_192
) ||
1035 cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_256
)) {
1036 ret
= crypto_register_aead(&gcm_aes_aead
);
1039 aes_s390_aead_alg
= &gcm_aes_aead
;
1048 module_cpu_feature_match(MSA
, aes_s390_init
);
1049 module_exit(aes_s390_fini
);
1051 MODULE_ALIAS_CRYPTO("aes-all");
1053 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1054 MODULE_LICENSE("GPL");