treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / s390 / crypto / aes_s390.c
blob1c23d84a9097d92d99c107e59e4dab4939ee7c17
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Cryptographic API.
5 * s390 implementation of the AES Cipher Algorithm.
7 * s390 Version:
8 * Copyright IBM Corp. 2005, 2017
9 * Author(s): Jan Glauber (jang@de.ibm.com)
10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11 * Patrick Steuer <patrick.steuer@de.ibm.com>
12 * Harald Freudenberger <freude@de.ibm.com>
14 * Derived from "crypto/aes_generic.c"
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/err.h>
27 #include <linux/module.h>
28 #include <linux/cpufeature.h>
29 #include <linux/init.h>
30 #include <linux/mutex.h>
31 #include <linux/fips.h>
32 #include <linux/string.h>
33 #include <crypto/xts.h>
34 #include <asm/cpacf.h>
36 static u8 *ctrblk;
37 static DEFINE_MUTEX(ctrblk_lock);
39 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
40 kma_functions;
42 struct s390_aes_ctx {
43 u8 key[AES_MAX_KEY_SIZE];
44 int key_len;
45 unsigned long fc;
46 union {
47 struct crypto_skcipher *skcipher;
48 struct crypto_cipher *cip;
49 } fallback;
52 struct s390_xts_ctx {
53 u8 key[32];
54 u8 pcc_key[32];
55 int key_len;
56 unsigned long fc;
57 struct crypto_skcipher *fallback;
60 struct gcm_sg_walk {
61 struct scatter_walk walk;
62 unsigned int walk_bytes;
63 u8 *walk_ptr;
64 unsigned int walk_bytes_remain;
65 u8 buf[AES_BLOCK_SIZE];
66 unsigned int buf_bytes;
67 u8 *ptr;
68 unsigned int nbytes;
71 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
72 unsigned int key_len)
74 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
76 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
77 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
78 CRYPTO_TFM_REQ_MASK);
80 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
83 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
84 unsigned int key_len)
86 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
87 unsigned long fc;
89 /* Pick the correct function code based on the key length */
90 fc = (key_len == 16) ? CPACF_KM_AES_128 :
91 (key_len == 24) ? CPACF_KM_AES_192 :
92 (key_len == 32) ? CPACF_KM_AES_256 : 0;
94 /* Check if the function code is available */
95 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
96 if (!sctx->fc)
97 return setkey_fallback_cip(tfm, in_key, key_len);
99 sctx->key_len = key_len;
100 memcpy(sctx->key, in_key, key_len);
101 return 0;
104 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
106 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
108 if (unlikely(!sctx->fc)) {
109 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
110 return;
112 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
115 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
117 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
119 if (unlikely(!sctx->fc)) {
120 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
121 return;
123 cpacf_km(sctx->fc | CPACF_DECRYPT,
124 &sctx->key, out, in, AES_BLOCK_SIZE);
127 static int fallback_init_cip(struct crypto_tfm *tfm)
129 const char *name = tfm->__crt_alg->cra_name;
130 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
132 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
133 CRYPTO_ALG_NEED_FALLBACK);
135 if (IS_ERR(sctx->fallback.cip)) {
136 pr_err("Allocating AES fallback algorithm %s failed\n",
137 name);
138 return PTR_ERR(sctx->fallback.cip);
141 return 0;
144 static void fallback_exit_cip(struct crypto_tfm *tfm)
146 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
148 crypto_free_cipher(sctx->fallback.cip);
149 sctx->fallback.cip = NULL;
152 static struct crypto_alg aes_alg = {
153 .cra_name = "aes",
154 .cra_driver_name = "aes-s390",
155 .cra_priority = 300,
156 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
157 CRYPTO_ALG_NEED_FALLBACK,
158 .cra_blocksize = AES_BLOCK_SIZE,
159 .cra_ctxsize = sizeof(struct s390_aes_ctx),
160 .cra_module = THIS_MODULE,
161 .cra_init = fallback_init_cip,
162 .cra_exit = fallback_exit_cip,
163 .cra_u = {
164 .cipher = {
165 .cia_min_keysize = AES_MIN_KEY_SIZE,
166 .cia_max_keysize = AES_MAX_KEY_SIZE,
167 .cia_setkey = aes_set_key,
168 .cia_encrypt = crypto_aes_encrypt,
169 .cia_decrypt = crypto_aes_decrypt,
174 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
175 unsigned int len)
177 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
179 crypto_skcipher_clear_flags(sctx->fallback.skcipher,
180 CRYPTO_TFM_REQ_MASK);
181 crypto_skcipher_set_flags(sctx->fallback.skcipher,
182 crypto_skcipher_get_flags(tfm) &
183 CRYPTO_TFM_REQ_MASK);
184 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
187 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
188 struct skcipher_request *req,
189 unsigned long modifier)
191 struct skcipher_request *subreq = skcipher_request_ctx(req);
193 *subreq = *req;
194 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
195 return (modifier & CPACF_DECRYPT) ?
196 crypto_skcipher_decrypt(subreq) :
197 crypto_skcipher_encrypt(subreq);
200 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
201 unsigned int key_len)
203 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
204 unsigned long fc;
206 /* Pick the correct function code based on the key length */
207 fc = (key_len == 16) ? CPACF_KM_AES_128 :
208 (key_len == 24) ? CPACF_KM_AES_192 :
209 (key_len == 32) ? CPACF_KM_AES_256 : 0;
211 /* Check if the function code is available */
212 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
213 if (!sctx->fc)
214 return setkey_fallback_skcipher(tfm, in_key, key_len);
216 sctx->key_len = key_len;
217 memcpy(sctx->key, in_key, key_len);
218 return 0;
221 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
223 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
224 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
225 struct skcipher_walk walk;
226 unsigned int nbytes, n;
227 int ret;
229 if (unlikely(!sctx->fc))
230 return fallback_skcipher_crypt(sctx, req, modifier);
232 ret = skcipher_walk_virt(&walk, req, false);
233 while ((nbytes = walk.nbytes) != 0) {
234 /* only use complete blocks */
235 n = nbytes & ~(AES_BLOCK_SIZE - 1);
236 cpacf_km(sctx->fc | modifier, sctx->key,
237 walk.dst.virt.addr, walk.src.virt.addr, n);
238 ret = skcipher_walk_done(&walk, nbytes - n);
240 return ret;
243 static int ecb_aes_encrypt(struct skcipher_request *req)
245 return ecb_aes_crypt(req, 0);
248 static int ecb_aes_decrypt(struct skcipher_request *req)
250 return ecb_aes_crypt(req, CPACF_DECRYPT);
253 static int fallback_init_skcipher(struct crypto_skcipher *tfm)
255 const char *name = crypto_tfm_alg_name(&tfm->base);
256 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
258 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
259 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
261 if (IS_ERR(sctx->fallback.skcipher)) {
262 pr_err("Allocating AES fallback algorithm %s failed\n",
263 name);
264 return PTR_ERR(sctx->fallback.skcipher);
267 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
268 crypto_skcipher_reqsize(sctx->fallback.skcipher));
269 return 0;
272 static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
274 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
276 crypto_free_skcipher(sctx->fallback.skcipher);
279 static struct skcipher_alg ecb_aes_alg = {
280 .base.cra_name = "ecb(aes)",
281 .base.cra_driver_name = "ecb-aes-s390",
282 .base.cra_priority = 401, /* combo: aes + ecb + 1 */
283 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
284 .base.cra_blocksize = AES_BLOCK_SIZE,
285 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
286 .base.cra_module = THIS_MODULE,
287 .init = fallback_init_skcipher,
288 .exit = fallback_exit_skcipher,
289 .min_keysize = AES_MIN_KEY_SIZE,
290 .max_keysize = AES_MAX_KEY_SIZE,
291 .setkey = ecb_aes_set_key,
292 .encrypt = ecb_aes_encrypt,
293 .decrypt = ecb_aes_decrypt,
296 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
297 unsigned int key_len)
299 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
300 unsigned long fc;
302 /* Pick the correct function code based on the key length */
303 fc = (key_len == 16) ? CPACF_KMC_AES_128 :
304 (key_len == 24) ? CPACF_KMC_AES_192 :
305 (key_len == 32) ? CPACF_KMC_AES_256 : 0;
307 /* Check if the function code is available */
308 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
309 if (!sctx->fc)
310 return setkey_fallback_skcipher(tfm, in_key, key_len);
312 sctx->key_len = key_len;
313 memcpy(sctx->key, in_key, key_len);
314 return 0;
317 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
319 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
320 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
321 struct skcipher_walk walk;
322 unsigned int nbytes, n;
323 int ret;
324 struct {
325 u8 iv[AES_BLOCK_SIZE];
326 u8 key[AES_MAX_KEY_SIZE];
327 } param;
329 if (unlikely(!sctx->fc))
330 return fallback_skcipher_crypt(sctx, req, modifier);
332 ret = skcipher_walk_virt(&walk, req, false);
333 if (ret)
334 return ret;
335 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
336 memcpy(param.key, sctx->key, sctx->key_len);
337 while ((nbytes = walk.nbytes) != 0) {
338 /* only use complete blocks */
339 n = nbytes & ~(AES_BLOCK_SIZE - 1);
340 cpacf_kmc(sctx->fc | modifier, &param,
341 walk.dst.virt.addr, walk.src.virt.addr, n);
342 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
343 ret = skcipher_walk_done(&walk, nbytes - n);
345 return ret;
348 static int cbc_aes_encrypt(struct skcipher_request *req)
350 return cbc_aes_crypt(req, 0);
353 static int cbc_aes_decrypt(struct skcipher_request *req)
355 return cbc_aes_crypt(req, CPACF_DECRYPT);
358 static struct skcipher_alg cbc_aes_alg = {
359 .base.cra_name = "cbc(aes)",
360 .base.cra_driver_name = "cbc-aes-s390",
361 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
362 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
363 .base.cra_blocksize = AES_BLOCK_SIZE,
364 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
365 .base.cra_module = THIS_MODULE,
366 .init = fallback_init_skcipher,
367 .exit = fallback_exit_skcipher,
368 .min_keysize = AES_MIN_KEY_SIZE,
369 .max_keysize = AES_MAX_KEY_SIZE,
370 .ivsize = AES_BLOCK_SIZE,
371 .setkey = cbc_aes_set_key,
372 .encrypt = cbc_aes_encrypt,
373 .decrypt = cbc_aes_decrypt,
376 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
377 unsigned int len)
379 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
381 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
382 crypto_skcipher_set_flags(xts_ctx->fallback,
383 crypto_skcipher_get_flags(tfm) &
384 CRYPTO_TFM_REQ_MASK);
385 return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
388 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
389 unsigned int key_len)
391 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
392 unsigned long fc;
393 int err;
395 err = xts_fallback_setkey(tfm, in_key, key_len);
396 if (err)
397 return err;
399 /* In fips mode only 128 bit or 256 bit keys are valid */
400 if (fips_enabled && key_len != 32 && key_len != 64)
401 return -EINVAL;
403 /* Pick the correct function code based on the key length */
404 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
405 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
407 /* Check if the function code is available */
408 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
409 if (!xts_ctx->fc)
410 return 0;
412 /* Split the XTS key into the two subkeys */
413 key_len = key_len / 2;
414 xts_ctx->key_len = key_len;
415 memcpy(xts_ctx->key, in_key, key_len);
416 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
417 return 0;
420 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
422 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
423 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
424 struct skcipher_walk walk;
425 unsigned int offset, nbytes, n;
426 int ret;
427 struct {
428 u8 key[32];
429 u8 tweak[16];
430 u8 block[16];
431 u8 bit[16];
432 u8 xts[16];
433 } pcc_param;
434 struct {
435 u8 key[32];
436 u8 init[16];
437 } xts_param;
439 if (req->cryptlen < AES_BLOCK_SIZE)
440 return -EINVAL;
442 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
443 struct skcipher_request *subreq = skcipher_request_ctx(req);
445 *subreq = *req;
446 skcipher_request_set_tfm(subreq, xts_ctx->fallback);
447 return (modifier & CPACF_DECRYPT) ?
448 crypto_skcipher_decrypt(subreq) :
449 crypto_skcipher_encrypt(subreq);
452 ret = skcipher_walk_virt(&walk, req, false);
453 if (ret)
454 return ret;
455 offset = xts_ctx->key_len & 0x10;
456 memset(pcc_param.block, 0, sizeof(pcc_param.block));
457 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
458 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
459 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
460 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
461 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
463 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
464 memcpy(xts_param.init, pcc_param.xts, 16);
466 while ((nbytes = walk.nbytes) != 0) {
467 /* only use complete blocks */
468 n = nbytes & ~(AES_BLOCK_SIZE - 1);
469 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
470 walk.dst.virt.addr, walk.src.virt.addr, n);
471 ret = skcipher_walk_done(&walk, nbytes - n);
473 return ret;
476 static int xts_aes_encrypt(struct skcipher_request *req)
478 return xts_aes_crypt(req, 0);
481 static int xts_aes_decrypt(struct skcipher_request *req)
483 return xts_aes_crypt(req, CPACF_DECRYPT);
486 static int xts_fallback_init(struct crypto_skcipher *tfm)
488 const char *name = crypto_tfm_alg_name(&tfm->base);
489 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
491 xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
492 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
494 if (IS_ERR(xts_ctx->fallback)) {
495 pr_err("Allocating XTS fallback algorithm %s failed\n",
496 name);
497 return PTR_ERR(xts_ctx->fallback);
499 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
500 crypto_skcipher_reqsize(xts_ctx->fallback));
501 return 0;
504 static void xts_fallback_exit(struct crypto_skcipher *tfm)
506 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
508 crypto_free_skcipher(xts_ctx->fallback);
511 static struct skcipher_alg xts_aes_alg = {
512 .base.cra_name = "xts(aes)",
513 .base.cra_driver_name = "xts-aes-s390",
514 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
515 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
516 .base.cra_blocksize = AES_BLOCK_SIZE,
517 .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
518 .base.cra_module = THIS_MODULE,
519 .init = xts_fallback_init,
520 .exit = xts_fallback_exit,
521 .min_keysize = 2 * AES_MIN_KEY_SIZE,
522 .max_keysize = 2 * AES_MAX_KEY_SIZE,
523 .ivsize = AES_BLOCK_SIZE,
524 .setkey = xts_aes_set_key,
525 .encrypt = xts_aes_encrypt,
526 .decrypt = xts_aes_decrypt,
529 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
530 unsigned int key_len)
532 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
533 unsigned long fc;
535 /* Pick the correct function code based on the key length */
536 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
537 (key_len == 24) ? CPACF_KMCTR_AES_192 :
538 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
540 /* Check if the function code is available */
541 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
542 if (!sctx->fc)
543 return setkey_fallback_skcipher(tfm, in_key, key_len);
545 sctx->key_len = key_len;
546 memcpy(sctx->key, in_key, key_len);
547 return 0;
550 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
552 unsigned int i, n;
554 /* only use complete blocks, max. PAGE_SIZE */
555 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
556 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
557 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
558 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
559 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
560 ctrptr += AES_BLOCK_SIZE;
562 return n;
565 static int ctr_aes_crypt(struct skcipher_request *req)
567 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
568 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
569 u8 buf[AES_BLOCK_SIZE], *ctrptr;
570 struct skcipher_walk walk;
571 unsigned int n, nbytes;
572 int ret, locked;
574 if (unlikely(!sctx->fc))
575 return fallback_skcipher_crypt(sctx, req, 0);
577 locked = mutex_trylock(&ctrblk_lock);
579 ret = skcipher_walk_virt(&walk, req, false);
580 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
581 n = AES_BLOCK_SIZE;
583 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
584 n = __ctrblk_init(ctrblk, walk.iv, nbytes);
585 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
586 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
587 walk.src.virt.addr, n, ctrptr);
588 if (ctrptr == ctrblk)
589 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
590 AES_BLOCK_SIZE);
591 crypto_inc(walk.iv, AES_BLOCK_SIZE);
592 ret = skcipher_walk_done(&walk, nbytes - n);
594 if (locked)
595 mutex_unlock(&ctrblk_lock);
597 * final block may be < AES_BLOCK_SIZE, copy only nbytes
599 if (nbytes) {
600 cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
601 AES_BLOCK_SIZE, walk.iv);
602 memcpy(walk.dst.virt.addr, buf, nbytes);
603 crypto_inc(walk.iv, AES_BLOCK_SIZE);
604 ret = skcipher_walk_done(&walk, 0);
607 return ret;
610 static struct skcipher_alg ctr_aes_alg = {
611 .base.cra_name = "ctr(aes)",
612 .base.cra_driver_name = "ctr-aes-s390",
613 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
614 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
615 .base.cra_blocksize = 1,
616 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
617 .base.cra_module = THIS_MODULE,
618 .init = fallback_init_skcipher,
619 .exit = fallback_exit_skcipher,
620 .min_keysize = AES_MIN_KEY_SIZE,
621 .max_keysize = AES_MAX_KEY_SIZE,
622 .ivsize = AES_BLOCK_SIZE,
623 .setkey = ctr_aes_set_key,
624 .encrypt = ctr_aes_crypt,
625 .decrypt = ctr_aes_crypt,
626 .chunksize = AES_BLOCK_SIZE,
629 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
630 unsigned int keylen)
632 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
634 switch (keylen) {
635 case AES_KEYSIZE_128:
636 ctx->fc = CPACF_KMA_GCM_AES_128;
637 break;
638 case AES_KEYSIZE_192:
639 ctx->fc = CPACF_KMA_GCM_AES_192;
640 break;
641 case AES_KEYSIZE_256:
642 ctx->fc = CPACF_KMA_GCM_AES_256;
643 break;
644 default:
645 return -EINVAL;
648 memcpy(ctx->key, key, keylen);
649 ctx->key_len = keylen;
650 return 0;
653 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
655 switch (authsize) {
656 case 4:
657 case 8:
658 case 12:
659 case 13:
660 case 14:
661 case 15:
662 case 16:
663 break;
664 default:
665 return -EINVAL;
668 return 0;
671 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
672 unsigned int len)
674 memset(gw, 0, sizeof(*gw));
675 gw->walk_bytes_remain = len;
676 scatterwalk_start(&gw->walk, sg);
679 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
681 struct scatterlist *nextsg;
683 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
684 while (!gw->walk_bytes) {
685 nextsg = sg_next(gw->walk.sg);
686 if (!nextsg)
687 return 0;
688 scatterwalk_start(&gw->walk, nextsg);
689 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
690 gw->walk_bytes_remain);
692 gw->walk_ptr = scatterwalk_map(&gw->walk);
693 return gw->walk_bytes;
696 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
697 unsigned int nbytes)
699 gw->walk_bytes_remain -= nbytes;
700 scatterwalk_unmap(&gw->walk);
701 scatterwalk_advance(&gw->walk, nbytes);
702 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
703 gw->walk_ptr = NULL;
706 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
708 int n;
710 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
711 gw->ptr = gw->buf;
712 gw->nbytes = gw->buf_bytes;
713 goto out;
716 if (gw->walk_bytes_remain == 0) {
717 gw->ptr = NULL;
718 gw->nbytes = 0;
719 goto out;
722 if (!_gcm_sg_clamp_and_map(gw)) {
723 gw->ptr = NULL;
724 gw->nbytes = 0;
725 goto out;
728 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
729 gw->ptr = gw->walk_ptr;
730 gw->nbytes = gw->walk_bytes;
731 goto out;
734 while (1) {
735 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
736 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
737 gw->buf_bytes += n;
738 _gcm_sg_unmap_and_advance(gw, n);
739 if (gw->buf_bytes >= minbytesneeded) {
740 gw->ptr = gw->buf;
741 gw->nbytes = gw->buf_bytes;
742 goto out;
744 if (!_gcm_sg_clamp_and_map(gw)) {
745 gw->ptr = NULL;
746 gw->nbytes = 0;
747 goto out;
751 out:
752 return gw->nbytes;
755 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
757 if (gw->walk_bytes_remain == 0) {
758 gw->ptr = NULL;
759 gw->nbytes = 0;
760 goto out;
763 if (!_gcm_sg_clamp_and_map(gw)) {
764 gw->ptr = NULL;
765 gw->nbytes = 0;
766 goto out;
769 if (gw->walk_bytes >= minbytesneeded) {
770 gw->ptr = gw->walk_ptr;
771 gw->nbytes = gw->walk_bytes;
772 goto out;
775 scatterwalk_unmap(&gw->walk);
776 gw->walk_ptr = NULL;
778 gw->ptr = gw->buf;
779 gw->nbytes = sizeof(gw->buf);
781 out:
782 return gw->nbytes;
785 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
787 if (gw->ptr == NULL)
788 return 0;
790 if (gw->ptr == gw->buf) {
791 int n = gw->buf_bytes - bytesdone;
792 if (n > 0) {
793 memmove(gw->buf, gw->buf + bytesdone, n);
794 gw->buf_bytes = n;
795 } else
796 gw->buf_bytes = 0;
797 } else
798 _gcm_sg_unmap_and_advance(gw, bytesdone);
800 return bytesdone;
803 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
805 int i, n;
807 if (gw->ptr == NULL)
808 return 0;
810 if (gw->ptr == gw->buf) {
811 for (i = 0; i < bytesdone; i += n) {
812 if (!_gcm_sg_clamp_and_map(gw))
813 return i;
814 n = min(gw->walk_bytes, bytesdone - i);
815 memcpy(gw->walk_ptr, gw->buf + i, n);
816 _gcm_sg_unmap_and_advance(gw, n);
818 } else
819 _gcm_sg_unmap_and_advance(gw, bytesdone);
821 return bytesdone;
824 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
826 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
827 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
828 unsigned int ivsize = crypto_aead_ivsize(tfm);
829 unsigned int taglen = crypto_aead_authsize(tfm);
830 unsigned int aadlen = req->assoclen;
831 unsigned int pclen = req->cryptlen;
832 int ret = 0;
834 unsigned int n, len, in_bytes, out_bytes,
835 min_bytes, bytes, aad_bytes, pc_bytes;
836 struct gcm_sg_walk gw_in, gw_out;
837 u8 tag[GHASH_DIGEST_SIZE];
839 struct {
840 u32 _[3]; /* reserved */
841 u32 cv; /* Counter Value */
842 u8 t[GHASH_DIGEST_SIZE];/* Tag */
843 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
844 u64 taadl; /* Total AAD Length */
845 u64 tpcl; /* Total Plain-/Cipher-text Length */
846 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
847 u8 k[AES_MAX_KEY_SIZE]; /* Key */
848 } param;
851 * encrypt
852 * req->src: aad||plaintext
853 * req->dst: aad||ciphertext||tag
854 * decrypt
855 * req->src: aad||ciphertext||tag
856 * req->dst: aad||plaintext, return 0 or -EBADMSG
857 * aad, plaintext and ciphertext may be empty.
859 if (flags & CPACF_DECRYPT)
860 pclen -= taglen;
861 len = aadlen + pclen;
863 memset(&param, 0, sizeof(param));
864 param.cv = 1;
865 param.taadl = aadlen * 8;
866 param.tpcl = pclen * 8;
867 memcpy(param.j0, req->iv, ivsize);
868 *(u32 *)(param.j0 + ivsize) = 1;
869 memcpy(param.k, ctx->key, ctx->key_len);
871 gcm_walk_start(&gw_in, req->src, len);
872 gcm_walk_start(&gw_out, req->dst, len);
874 do {
875 min_bytes = min_t(unsigned int,
876 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
877 in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
878 out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
879 bytes = min(in_bytes, out_bytes);
881 if (aadlen + pclen <= bytes) {
882 aad_bytes = aadlen;
883 pc_bytes = pclen;
884 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
885 } else {
886 if (aadlen <= bytes) {
887 aad_bytes = aadlen;
888 pc_bytes = (bytes - aadlen) &
889 ~(AES_BLOCK_SIZE - 1);
890 flags |= CPACF_KMA_LAAD;
891 } else {
892 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
893 pc_bytes = 0;
897 if (aad_bytes > 0)
898 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
900 cpacf_kma(ctx->fc | flags, &param,
901 gw_out.ptr + aad_bytes,
902 gw_in.ptr + aad_bytes, pc_bytes,
903 gw_in.ptr, aad_bytes);
905 n = aad_bytes + pc_bytes;
906 if (gcm_in_walk_done(&gw_in, n) != n)
907 return -ENOMEM;
908 if (gcm_out_walk_done(&gw_out, n) != n)
909 return -ENOMEM;
910 aadlen -= aad_bytes;
911 pclen -= pc_bytes;
912 } while (aadlen + pclen > 0);
914 if (flags & CPACF_DECRYPT) {
915 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
916 if (crypto_memneq(tag, param.t, taglen))
917 ret = -EBADMSG;
918 } else
919 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
921 memzero_explicit(&param, sizeof(param));
922 return ret;
925 static int gcm_aes_encrypt(struct aead_request *req)
927 return gcm_aes_crypt(req, CPACF_ENCRYPT);
930 static int gcm_aes_decrypt(struct aead_request *req)
932 return gcm_aes_crypt(req, CPACF_DECRYPT);
935 static struct aead_alg gcm_aes_aead = {
936 .setkey = gcm_aes_setkey,
937 .setauthsize = gcm_aes_setauthsize,
938 .encrypt = gcm_aes_encrypt,
939 .decrypt = gcm_aes_decrypt,
941 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
942 .maxauthsize = GHASH_DIGEST_SIZE,
943 .chunksize = AES_BLOCK_SIZE,
945 .base = {
946 .cra_blocksize = 1,
947 .cra_ctxsize = sizeof(struct s390_aes_ctx),
948 .cra_priority = 900,
949 .cra_name = "gcm(aes)",
950 .cra_driver_name = "gcm-aes-s390",
951 .cra_module = THIS_MODULE,
955 static struct crypto_alg *aes_s390_alg;
956 static struct skcipher_alg *aes_s390_skcipher_algs[4];
957 static int aes_s390_skciphers_num;
958 static struct aead_alg *aes_s390_aead_alg;
960 static int aes_s390_register_skcipher(struct skcipher_alg *alg)
962 int ret;
964 ret = crypto_register_skcipher(alg);
965 if (!ret)
966 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
967 return ret;
970 static void aes_s390_fini(void)
972 if (aes_s390_alg)
973 crypto_unregister_alg(aes_s390_alg);
974 while (aes_s390_skciphers_num--)
975 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
976 if (ctrblk)
977 free_page((unsigned long) ctrblk);
979 if (aes_s390_aead_alg)
980 crypto_unregister_aead(aes_s390_aead_alg);
983 static int __init aes_s390_init(void)
985 int ret;
987 /* Query available functions for KM, KMC, KMCTR and KMA */
988 cpacf_query(CPACF_KM, &km_functions);
989 cpacf_query(CPACF_KMC, &kmc_functions);
990 cpacf_query(CPACF_KMCTR, &kmctr_functions);
991 cpacf_query(CPACF_KMA, &kma_functions);
993 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
994 cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
995 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
996 ret = crypto_register_alg(&aes_alg);
997 if (ret)
998 goto out_err;
999 aes_s390_alg = &aes_alg;
1000 ret = aes_s390_register_skcipher(&ecb_aes_alg);
1001 if (ret)
1002 goto out_err;
1005 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1006 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1007 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1008 ret = aes_s390_register_skcipher(&cbc_aes_alg);
1009 if (ret)
1010 goto out_err;
1013 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1014 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1015 ret = aes_s390_register_skcipher(&xts_aes_alg);
1016 if (ret)
1017 goto out_err;
1020 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1021 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1022 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1023 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1024 if (!ctrblk) {
1025 ret = -ENOMEM;
1026 goto out_err;
1028 ret = aes_s390_register_skcipher(&ctr_aes_alg);
1029 if (ret)
1030 goto out_err;
1033 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1034 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1035 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1036 ret = crypto_register_aead(&gcm_aes_aead);
1037 if (ret)
1038 goto out_err;
1039 aes_s390_aead_alg = &gcm_aes_aead;
1042 return 0;
1043 out_err:
1044 aes_s390_fini();
1045 return ret;
1048 module_cpu_feature_match(MSA, aes_s390_init);
1049 module_exit(aes_s390_fini);
1051 MODULE_ALIAS_CRYPTO("aes-all");
1053 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1054 MODULE_LICENSE("GPL");