Linux 4.2.1
[linux/fpc-iii.git] / drivers / crypto / nx / nx-aes-ccm.c
blobe4311ce0cd78cfc93eea1cec30a37a9ec3b54d25
1 /**
2 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
29 #include <asm/vio.h>
31 #include "nx_csbcpb.h"
32 #include "nx.h"
35 static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
36 const u8 *in_key,
37 unsigned int key_len)
39 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
43 nx_ctx_init(nx_ctx, HCOP_FC_AES);
45 switch (key_len) {
46 case AES_KEYSIZE_128:
47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 break;
51 default:
52 return -EINVAL;
55 csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
56 memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
58 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
59 memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
61 return 0;
65 static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
66 const u8 *in_key,
67 unsigned int key_len)
69 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
71 if (key_len < 3)
72 return -EINVAL;
74 key_len -= 3;
76 memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
78 return ccm_aes_nx_set_key(tfm, in_key, key_len);
81 static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
82 unsigned int authsize)
84 switch (authsize) {
85 case 4:
86 case 6:
87 case 8:
88 case 10:
89 case 12:
90 case 14:
91 case 16:
92 break;
93 default:
94 return -EINVAL;
97 crypto_aead_crt(tfm)->authsize = authsize;
99 return 0;
102 static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
103 unsigned int authsize)
105 switch (authsize) {
106 case 8:
107 case 12:
108 case 16:
109 break;
110 default:
111 return -EINVAL;
114 crypto_aead_crt(tfm)->authsize = authsize;
116 return 0;
119 /* taken from crypto/ccm.c */
120 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
122 __be32 data;
124 memset(block, 0, csize);
125 block += csize;
127 if (csize >= 4)
128 csize = 4;
129 else if (msglen > (unsigned int)(1 << (8 * csize)))
130 return -EOVERFLOW;
132 data = cpu_to_be32(msglen);
133 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
135 return 0;
138 /* taken from crypto/ccm.c */
139 static inline int crypto_ccm_check_iv(const u8 *iv)
141 /* 2 <= L <= 8, so 1 <= L' <= 7. */
142 if (1 > iv[0] || iv[0] > 7)
143 return -EINVAL;
145 return 0;
148 /* based on code from crypto/ccm.c */
149 static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
150 unsigned int cryptlen, u8 *b0)
152 unsigned int l, lp, m = authsize;
153 int rc;
155 memcpy(b0, iv, 16);
157 lp = b0[0];
158 l = lp + 1;
160 /* set m, bits 3-5 */
161 *b0 |= (8 * ((m - 2) / 2));
163 /* set adata, bit 6, if associated data is used */
164 if (assoclen)
165 *b0 |= 64;
167 rc = set_msg_len(b0 + 16 - l, cryptlen, l);
169 return rc;
172 static int generate_pat(u8 *iv,
173 struct aead_request *req,
174 struct nx_crypto_ctx *nx_ctx,
175 unsigned int authsize,
176 unsigned int nbytes,
177 u8 *out)
179 struct nx_sg *nx_insg = nx_ctx->in_sg;
180 struct nx_sg *nx_outsg = nx_ctx->out_sg;
181 unsigned int iauth_len = 0;
182 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
183 int rc;
184 unsigned int max_sg_len;
186 /* zero the ctr value */
187 memset(iv + 15 - iv[0], 0, iv[0] + 1);
189 /* page 78 of nx_wb.pdf has,
190 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
191 * in length. If a full message is used, the AES CCA implementation
192 * restricts the maximum AAD length to 2^32 -1 bytes.
193 * If partial messages are used, the implementation supports
194 * 2^64 -1 bytes maximum AAD length.
196 * However, in the cryptoapi's aead_request structure,
197 * assoclen is an unsigned int, thus it cannot hold a length
198 * value greater than 2^32 - 1.
199 * Thus the AAD is further constrained by this and is never
200 * greater than 2^32.
203 if (!req->assoclen) {
204 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
205 } else if (req->assoclen <= 14) {
206 /* if associated data is 14 bytes or less, we do 1 GCM
207 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
208 * which is fed in through the source buffers here */
209 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
210 b1 = nx_ctx->priv.ccm.iauth_tag;
211 iauth_len = req->assoclen;
212 } else if (req->assoclen <= 65280) {
213 /* if associated data is less than (2^16 - 2^8), we construct
214 * B1 differently and feed in the associated data to a CCA
215 * operation */
216 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
217 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
218 iauth_len = 14;
219 } else {
220 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
221 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
222 iauth_len = 10;
225 /* generate B0 */
226 rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
227 if (rc)
228 return rc;
230 /* generate B1:
231 * add control info for associated data
232 * RFC 3610 and NIST Special Publication 800-38C
234 if (b1) {
235 memset(b1, 0, 16);
236 if (req->assoclen <= 65280) {
237 *(u16 *)b1 = (u16)req->assoclen;
238 scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
239 iauth_len, SCATTERWALK_FROM_SG);
240 } else {
241 *(u16 *)b1 = (u16)(0xfffe);
242 *(u32 *)&b1[2] = (u32)req->assoclen;
243 scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
244 iauth_len, SCATTERWALK_FROM_SG);
248 /* now copy any remaining AAD to scatterlist and call nx... */
249 if (!req->assoclen) {
250 return rc;
251 } else if (req->assoclen <= 14) {
252 unsigned int len = 16;
254 nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
256 if (len != 16)
257 return -EINVAL;
259 nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
260 nx_ctx->ap->sglen);
262 if (len != 16)
263 return -EINVAL;
265 /* inlen should be negative, indicating to phyp that its a
266 * pointer to an sg list */
267 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
268 sizeof(struct nx_sg);
269 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
270 sizeof(struct nx_sg);
272 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
273 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
275 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
277 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
278 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
279 if (rc)
280 return rc;
282 atomic_inc(&(nx_ctx->stats->aes_ops));
283 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
285 } else {
286 unsigned int processed = 0, to_process;
288 processed += iauth_len;
290 /* page_limit: number of sg entries that fit on one page */
291 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
292 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
293 max_sg_len = min_t(u64, max_sg_len,
294 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
296 do {
297 to_process = min_t(u32, req->assoclen - processed,
298 nx_ctx->ap->databytelen);
300 nx_insg = nx_walk_and_build(nx_ctx->in_sg,
301 nx_ctx->ap->sglen,
302 req->assoc, processed,
303 &to_process);
305 if ((to_process + processed) < req->assoclen) {
306 NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
307 NX_FDM_INTERMEDIATE;
308 } else {
309 NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
310 ~NX_FDM_INTERMEDIATE;
314 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
315 sizeof(struct nx_sg);
317 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
319 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
320 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
321 if (rc)
322 return rc;
324 memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
325 nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
326 AES_BLOCK_SIZE);
328 NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
330 atomic_inc(&(nx_ctx->stats->aes_ops));
331 atomic64_add(req->assoclen,
332 &(nx_ctx->stats->aes_bytes));
334 processed += to_process;
335 } while (processed < req->assoclen);
337 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
340 memcpy(out, result, AES_BLOCK_SIZE);
342 return rc;
345 static int ccm_nx_decrypt(struct aead_request *req,
346 struct blkcipher_desc *desc)
348 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
349 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
350 unsigned int nbytes = req->cryptlen;
351 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
352 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
353 unsigned long irq_flags;
354 unsigned int processed = 0, to_process;
355 int rc = -1;
357 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
359 nbytes -= authsize;
361 /* copy out the auth tag to compare with later */
362 scatterwalk_map_and_copy(priv->oauth_tag,
363 req->src, nbytes, authsize,
364 SCATTERWALK_FROM_SG);
366 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
367 csbcpb->cpb.aes_ccm.in_pat_or_b0);
368 if (rc)
369 goto out;
371 do {
373 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
374 * update. This value is bound by sg list limits.
376 to_process = nbytes - processed;
378 if ((to_process + processed) < nbytes)
379 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
380 else
381 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
383 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
385 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
386 &to_process, processed,
387 csbcpb->cpb.aes_ccm.iv_or_ctr);
388 if (rc)
389 goto out;
391 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
392 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
393 if (rc)
394 goto out;
396 /* for partial completion, copy following for next
397 * entry into loop...
399 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
400 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
401 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
402 memcpy(csbcpb->cpb.aes_ccm.in_s0,
403 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
405 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
407 /* update stats */
408 atomic_inc(&(nx_ctx->stats->aes_ops));
409 atomic64_add(csbcpb->csb.processed_byte_count,
410 &(nx_ctx->stats->aes_bytes));
412 processed += to_process;
413 } while (processed < nbytes);
415 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
416 authsize) ? -EBADMSG : 0;
417 out:
418 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
419 return rc;
422 static int ccm_nx_encrypt(struct aead_request *req,
423 struct blkcipher_desc *desc)
425 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
426 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
427 unsigned int nbytes = req->cryptlen;
428 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
429 unsigned long irq_flags;
430 unsigned int processed = 0, to_process;
431 int rc = -1;
433 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
435 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
436 csbcpb->cpb.aes_ccm.in_pat_or_b0);
437 if (rc)
438 goto out;
440 do {
441 /* to process: the AES_BLOCK_SIZE data chunk to process in this
442 * update. This value is bound by sg list limits.
444 to_process = nbytes - processed;
446 if ((to_process + processed) < nbytes)
447 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
448 else
449 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
451 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
453 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
454 &to_process, processed,
455 csbcpb->cpb.aes_ccm.iv_or_ctr);
456 if (rc)
457 goto out;
459 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
460 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
461 if (rc)
462 goto out;
464 /* for partial completion, copy following for next
465 * entry into loop...
467 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
468 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
469 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
470 memcpy(csbcpb->cpb.aes_ccm.in_s0,
471 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
473 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
475 /* update stats */
476 atomic_inc(&(nx_ctx->stats->aes_ops));
477 atomic64_add(csbcpb->csb.processed_byte_count,
478 &(nx_ctx->stats->aes_bytes));
480 processed += to_process;
482 } while (processed < nbytes);
484 /* copy out the auth tag */
485 scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
486 req->dst, nbytes, authsize,
487 SCATTERWALK_TO_SG);
489 out:
490 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
491 return rc;
494 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
496 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
497 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
498 struct blkcipher_desc desc;
499 u8 *iv = rctx->iv;
501 iv[0] = 3;
502 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
503 memcpy(iv + 4, req->iv, 8);
505 desc.info = iv;
506 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
508 return ccm_nx_encrypt(req, &desc);
511 static int ccm_aes_nx_encrypt(struct aead_request *req)
513 struct blkcipher_desc desc;
514 int rc;
516 desc.info = req->iv;
517 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
519 rc = crypto_ccm_check_iv(desc.info);
520 if (rc)
521 return rc;
523 return ccm_nx_encrypt(req, &desc);
526 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
528 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
529 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
530 struct blkcipher_desc desc;
531 u8 *iv = rctx->iv;
533 iv[0] = 3;
534 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
535 memcpy(iv + 4, req->iv, 8);
537 desc.info = iv;
538 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
540 return ccm_nx_decrypt(req, &desc);
543 static int ccm_aes_nx_decrypt(struct aead_request *req)
545 struct blkcipher_desc desc;
546 int rc;
548 desc.info = req->iv;
549 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
551 rc = crypto_ccm_check_iv(desc.info);
552 if (rc)
553 return rc;
555 return ccm_nx_decrypt(req, &desc);
558 /* tell the block cipher walk routines that this is a stream cipher by
559 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
560 * during encrypt/decrypt doesn't solve this problem, because it calls
561 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
562 * but instead uses this tfm->blocksize. */
563 struct crypto_alg nx_ccm_aes_alg = {
564 .cra_name = "ccm(aes)",
565 .cra_driver_name = "ccm-aes-nx",
566 .cra_priority = 300,
567 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
568 CRYPTO_ALG_NEED_FALLBACK,
569 .cra_blocksize = 1,
570 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
571 .cra_type = &crypto_aead_type,
572 .cra_module = THIS_MODULE,
573 .cra_init = nx_crypto_ctx_aes_ccm_init,
574 .cra_exit = nx_crypto_ctx_exit,
575 .cra_aead = {
576 .ivsize = AES_BLOCK_SIZE,
577 .maxauthsize = AES_BLOCK_SIZE,
578 .setkey = ccm_aes_nx_set_key,
579 .setauthsize = ccm_aes_nx_setauthsize,
580 .encrypt = ccm_aes_nx_encrypt,
581 .decrypt = ccm_aes_nx_decrypt,
585 struct crypto_alg nx_ccm4309_aes_alg = {
586 .cra_name = "rfc4309(ccm(aes))",
587 .cra_driver_name = "rfc4309-ccm-aes-nx",
588 .cra_priority = 300,
589 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
590 CRYPTO_ALG_NEED_FALLBACK,
591 .cra_blocksize = 1,
592 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
593 .cra_type = &crypto_nivaead_type,
594 .cra_module = THIS_MODULE,
595 .cra_init = nx_crypto_ctx_aes_ccm_init,
596 .cra_exit = nx_crypto_ctx_exit,
597 .cra_aead = {
598 .ivsize = 8,
599 .maxauthsize = AES_BLOCK_SIZE,
600 .setkey = ccm4309_aes_nx_set_key,
601 .setauthsize = ccm4309_aes_nx_setauthsize,
602 .encrypt = ccm4309_aes_nx_encrypt,
603 .decrypt = ccm4309_aes_nx_decrypt,
604 .geniv = "seqiv",