Linux 4.1.16
[linux/fpc-iii.git] / drivers / crypto / nx / nx-aes-gcm.c
blobc6ebeb644db4ccdfd2fa272e2d2929b45130e6ef
1 /**
2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
29 #include <asm/vio.h>
31 #include "nx_csbcpb.h"
32 #include "nx.h"
35 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
36 const u8 *in_key,
37 unsigned int key_len)
39 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
43 nx_ctx_init(nx_ctx, HCOP_FC_AES);
45 switch (key_len) {
46 case AES_KEYSIZE_128:
47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 break;
51 case AES_KEYSIZE_192:
52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
53 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
55 break;
56 case AES_KEYSIZE_256:
57 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
58 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
59 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
60 break;
61 default:
62 return -EINVAL;
65 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
66 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
68 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
69 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
71 return 0;
74 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
75 const u8 *in_key,
76 unsigned int key_len)
78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
79 char *nonce = nx_ctx->priv.gcm.nonce;
80 int rc;
82 if (key_len < 4)
83 return -EINVAL;
85 key_len -= 4;
87 rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
88 if (rc)
89 goto out;
91 memcpy(nonce, in_key + key_len, 4);
92 out:
93 return rc;
96 static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm,
97 unsigned int authsize)
99 if (authsize > crypto_aead_alg(tfm)->maxauthsize)
100 return -EINVAL;
102 crypto_aead_crt(tfm)->authsize = authsize;
104 return 0;
107 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
108 unsigned int authsize)
110 switch (authsize) {
111 case 8:
112 case 12:
113 case 16:
114 break;
115 default:
116 return -EINVAL;
119 crypto_aead_crt(tfm)->authsize = authsize;
121 return 0;
124 static int nx_gca(struct nx_crypto_ctx *nx_ctx,
125 struct aead_request *req,
126 u8 *out)
128 int rc;
129 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
130 struct scatter_walk walk;
131 struct nx_sg *nx_sg = nx_ctx->in_sg;
132 unsigned int nbytes = req->assoclen;
133 unsigned int processed = 0, to_process;
134 unsigned int max_sg_len;
136 if (nbytes <= AES_BLOCK_SIZE) {
137 scatterwalk_start(&walk, req->assoc);
138 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
139 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
140 return 0;
143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
145 /* page_limit: number of sg entries that fit on one page */
146 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
147 nx_ctx->ap->sglen);
148 max_sg_len = min_t(u64, max_sg_len,
149 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
151 do {
153 * to_process: the data chunk to process in this update.
154 * This value is bound by sg list limits.
156 to_process = min_t(u64, nbytes - processed,
157 nx_ctx->ap->databytelen);
158 to_process = min_t(u64, to_process,
159 NX_PAGE_SIZE * (max_sg_len - 1));
161 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
162 req->assoc, processed, &to_process);
164 if ((to_process + processed) < nbytes)
165 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
166 else
167 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
169 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
170 * sizeof(struct nx_sg);
172 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
173 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
174 if (rc)
175 return rc;
177 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
178 csbcpb_aead->cpb.aes_gca.out_pat,
179 AES_BLOCK_SIZE);
180 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
182 atomic_inc(&(nx_ctx->stats->aes_ops));
183 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
185 processed += to_process;
186 } while (processed < nbytes);
188 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
190 return rc;
193 static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
195 int rc;
196 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
197 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
198 struct nx_sg *nx_sg;
199 unsigned int nbytes = req->assoclen;
200 unsigned int processed = 0, to_process;
201 unsigned int max_sg_len;
203 /* Set GMAC mode */
204 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
206 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
208 /* page_limit: number of sg entries that fit on one page */
209 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
210 nx_ctx->ap->sglen);
211 max_sg_len = min_t(u64, max_sg_len,
212 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
214 /* Copy IV */
215 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
217 do {
219 * to_process: the data chunk to process in this update.
220 * This value is bound by sg list limits.
222 to_process = min_t(u64, nbytes - processed,
223 nx_ctx->ap->databytelen);
224 to_process = min_t(u64, to_process,
225 NX_PAGE_SIZE * (max_sg_len - 1));
227 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
228 req->assoc, processed, &to_process);
230 if ((to_process + processed) < nbytes)
231 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
232 else
233 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
235 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
236 * sizeof(struct nx_sg);
238 csbcpb->cpb.aes_gcm.bit_length_data = 0;
239 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
241 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
242 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
243 if (rc)
244 goto out;
246 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
247 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
248 memcpy(csbcpb->cpb.aes_gcm.in_s0,
249 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
251 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
253 atomic_inc(&(nx_ctx->stats->aes_ops));
254 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
256 processed += to_process;
257 } while (processed < nbytes);
259 out:
260 /* Restore GCM mode */
261 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
262 return rc;
265 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
266 int enc)
268 int rc;
269 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
270 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
271 char out[AES_BLOCK_SIZE];
272 struct nx_sg *in_sg, *out_sg;
273 int len;
275 /* For scenarios where the input message is zero length, AES CTR mode
276 * may be used. Set the source data to be a single block (16B) of all
277 * zeros, and set the input IV value to be the same as the GMAC IV
278 * value. - nx_wb 4.8.1.3 */
280 /* Change to ECB mode */
281 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
282 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
283 sizeof(csbcpb->cpb.aes_ecb.key));
284 if (enc)
285 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
286 else
287 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
289 len = AES_BLOCK_SIZE;
291 /* Encrypt the counter/IV */
292 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
293 &len, nx_ctx->ap->sglen);
295 if (len != AES_BLOCK_SIZE)
296 return -EINVAL;
298 len = sizeof(out);
299 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
300 nx_ctx->ap->sglen);
302 if (len != sizeof(out))
303 return -EINVAL;
305 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
306 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
308 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
309 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
310 if (rc)
311 goto out;
312 atomic_inc(&(nx_ctx->stats->aes_ops));
314 /* Copy out the auth tag */
315 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
316 crypto_aead_authsize(crypto_aead_reqtfm(req)));
317 out:
318 /* Restore XCBC mode */
319 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
322 * ECB key uses the same region that GCM AAD and counter, so it's safe
323 * to just fill it with zeroes.
325 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
327 return rc;
330 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
332 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
333 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
334 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
335 struct blkcipher_desc desc;
336 unsigned int nbytes = req->cryptlen;
337 unsigned int processed = 0, to_process;
338 unsigned long irq_flags;
339 int rc = -EINVAL;
341 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
343 desc.info = rctx->iv;
344 /* initialize the counter */
345 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
347 if (nbytes == 0) {
348 if (req->assoclen == 0)
349 rc = gcm_empty(req, &desc, enc);
350 else
351 rc = gmac(req, &desc);
352 if (rc)
353 goto out;
354 else
355 goto mac;
358 /* Process associated data */
359 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
360 if (req->assoclen) {
361 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
362 if (rc)
363 goto out;
366 /* Set flags for encryption */
367 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
368 if (enc) {
369 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
370 } else {
371 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
372 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
375 do {
376 to_process = nbytes - processed;
378 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
379 desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
380 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
381 req->src, &to_process, processed,
382 csbcpb->cpb.aes_gcm.iv_or_cnt);
384 if (rc)
385 goto out;
387 if ((to_process + processed) < nbytes)
388 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
389 else
390 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
393 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
394 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
395 if (rc)
396 goto out;
398 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
399 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
400 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
401 memcpy(csbcpb->cpb.aes_gcm.in_s0,
402 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
404 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
406 atomic_inc(&(nx_ctx->stats->aes_ops));
407 atomic64_add(csbcpb->csb.processed_byte_count,
408 &(nx_ctx->stats->aes_bytes));
410 processed += to_process;
411 } while (processed < nbytes);
413 mac:
414 if (enc) {
415 /* copy out the auth tag */
416 scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
417 req->dst, nbytes,
418 crypto_aead_authsize(crypto_aead_reqtfm(req)),
419 SCATTERWALK_TO_SG);
420 } else {
421 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
422 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
424 scatterwalk_map_and_copy(itag, req->src, nbytes,
425 crypto_aead_authsize(crypto_aead_reqtfm(req)),
426 SCATTERWALK_FROM_SG);
427 rc = memcmp(itag, otag,
428 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
429 -EBADMSG : 0;
431 out:
432 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
433 return rc;
436 static int gcm_aes_nx_encrypt(struct aead_request *req)
438 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
439 char *iv = rctx->iv;
441 memcpy(iv, req->iv, 12);
443 return gcm_aes_nx_crypt(req, 1);
446 static int gcm_aes_nx_decrypt(struct aead_request *req)
448 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
449 char *iv = rctx->iv;
451 memcpy(iv, req->iv, 12);
453 return gcm_aes_nx_crypt(req, 0);
456 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
458 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
459 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
460 char *iv = rctx->iv;
461 char *nonce = nx_ctx->priv.gcm.nonce;
463 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
464 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
466 return gcm_aes_nx_crypt(req, 1);
469 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
471 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
472 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
473 char *iv = rctx->iv;
474 char *nonce = nx_ctx->priv.gcm.nonce;
476 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
477 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
479 return gcm_aes_nx_crypt(req, 0);
482 /* tell the block cipher walk routines that this is a stream cipher by
483 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
484 * during encrypt/decrypt doesn't solve this problem, because it calls
485 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
486 * but instead uses this tfm->blocksize. */
487 struct crypto_alg nx_gcm_aes_alg = {
488 .cra_name = "gcm(aes)",
489 .cra_driver_name = "gcm-aes-nx",
490 .cra_priority = 300,
491 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
492 .cra_blocksize = 1,
493 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
494 .cra_type = &crypto_aead_type,
495 .cra_module = THIS_MODULE,
496 .cra_init = nx_crypto_ctx_aes_gcm_init,
497 .cra_exit = nx_crypto_ctx_exit,
498 .cra_aead = {
499 .ivsize = AES_BLOCK_SIZE,
500 .maxauthsize = AES_BLOCK_SIZE,
501 .setkey = gcm_aes_nx_set_key,
502 .setauthsize = gcm_aes_nx_setauthsize,
503 .encrypt = gcm_aes_nx_encrypt,
504 .decrypt = gcm_aes_nx_decrypt,
508 struct crypto_alg nx_gcm4106_aes_alg = {
509 .cra_name = "rfc4106(gcm(aes))",
510 .cra_driver_name = "rfc4106-gcm-aes-nx",
511 .cra_priority = 300,
512 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
513 .cra_blocksize = 1,
514 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
515 .cra_type = &crypto_nivaead_type,
516 .cra_module = THIS_MODULE,
517 .cra_init = nx_crypto_ctx_aes_gcm_init,
518 .cra_exit = nx_crypto_ctx_exit,
519 .cra_aead = {
520 .ivsize = 8,
521 .maxauthsize = AES_BLOCK_SIZE,
522 .geniv = "seqiv",
523 .setkey = gcm4106_aes_nx_set_key,
524 .setauthsize = gcm4106_aes_nx_setauthsize,
525 .encrypt = gcm4106_aes_nx_encrypt,
526 .decrypt = gcm4106_aes_nx_decrypt,