Linux 4.16.11
[linux/fpc-iii.git] / drivers / crypto / nx / nx-aes-gcm.c
bloba810596b97c2d5e8e7f48edc2de333cad43fc293
1 /**
2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/gcm.h>
26 #include <crypto/scatterwalk.h>
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <asm/vio.h>
31 #include "nx_csbcpb.h"
32 #include "nx.h"
35 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
36 const u8 *in_key,
37 unsigned int key_len)
39 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
43 nx_ctx_init(nx_ctx, HCOP_FC_AES);
45 switch (key_len) {
46 case AES_KEYSIZE_128:
47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 break;
51 case AES_KEYSIZE_192:
52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
53 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
55 break;
56 case AES_KEYSIZE_256:
57 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
58 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
59 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
60 break;
61 default:
62 return -EINVAL;
65 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
66 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
68 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
69 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
71 return 0;
74 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
75 const u8 *in_key,
76 unsigned int key_len)
78 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
79 char *nonce = nx_ctx->priv.gcm.nonce;
80 int rc;
82 if (key_len < 4)
83 return -EINVAL;
85 key_len -= 4;
87 rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
88 if (rc)
89 goto out;
91 memcpy(nonce, in_key + key_len, 4);
92 out:
93 return rc;
96 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
97 unsigned int authsize)
99 switch (authsize) {
100 case 8:
101 case 12:
102 case 16:
103 break;
104 default:
105 return -EINVAL;
108 return 0;
111 static int nx_gca(struct nx_crypto_ctx *nx_ctx,
112 struct aead_request *req,
113 u8 *out,
114 unsigned int assoclen)
116 int rc;
117 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
118 struct scatter_walk walk;
119 struct nx_sg *nx_sg = nx_ctx->in_sg;
120 unsigned int nbytes = assoclen;
121 unsigned int processed = 0, to_process;
122 unsigned int max_sg_len;
124 if (nbytes <= AES_BLOCK_SIZE) {
125 scatterwalk_start(&walk, req->src);
126 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
127 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
128 return 0;
131 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
133 /* page_limit: number of sg entries that fit on one page */
134 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
135 nx_ctx->ap->sglen);
136 max_sg_len = min_t(u64, max_sg_len,
137 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
139 do {
141 * to_process: the data chunk to process in this update.
142 * This value is bound by sg list limits.
144 to_process = min_t(u64, nbytes - processed,
145 nx_ctx->ap->databytelen);
146 to_process = min_t(u64, to_process,
147 NX_PAGE_SIZE * (max_sg_len - 1));
149 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
150 req->src, processed, &to_process);
152 if ((to_process + processed) < nbytes)
153 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
154 else
155 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
157 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
158 * sizeof(struct nx_sg);
160 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
161 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
162 if (rc)
163 return rc;
165 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
166 csbcpb_aead->cpb.aes_gca.out_pat,
167 AES_BLOCK_SIZE);
168 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
170 atomic_inc(&(nx_ctx->stats->aes_ops));
171 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
173 processed += to_process;
174 } while (processed < nbytes);
176 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
178 return rc;
181 static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
182 unsigned int assoclen)
184 int rc;
185 struct nx_crypto_ctx *nx_ctx =
186 crypto_aead_ctx(crypto_aead_reqtfm(req));
187 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
188 struct nx_sg *nx_sg;
189 unsigned int nbytes = assoclen;
190 unsigned int processed = 0, to_process;
191 unsigned int max_sg_len;
193 /* Set GMAC mode */
194 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
196 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
198 /* page_limit: number of sg entries that fit on one page */
199 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
200 nx_ctx->ap->sglen);
201 max_sg_len = min_t(u64, max_sg_len,
202 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
204 /* Copy IV */
205 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
207 do {
209 * to_process: the data chunk to process in this update.
210 * This value is bound by sg list limits.
212 to_process = min_t(u64, nbytes - processed,
213 nx_ctx->ap->databytelen);
214 to_process = min_t(u64, to_process,
215 NX_PAGE_SIZE * (max_sg_len - 1));
217 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
218 req->src, processed, &to_process);
220 if ((to_process + processed) < nbytes)
221 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
222 else
223 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
225 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
226 * sizeof(struct nx_sg);
228 csbcpb->cpb.aes_gcm.bit_length_data = 0;
229 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
231 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
232 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
233 if (rc)
234 goto out;
236 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
237 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
238 memcpy(csbcpb->cpb.aes_gcm.in_s0,
239 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
241 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
243 atomic_inc(&(nx_ctx->stats->aes_ops));
244 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
246 processed += to_process;
247 } while (processed < nbytes);
249 out:
250 /* Restore GCM mode */
251 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
252 return rc;
255 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
256 int enc)
258 int rc;
259 struct nx_crypto_ctx *nx_ctx =
260 crypto_aead_ctx(crypto_aead_reqtfm(req));
261 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
262 char out[AES_BLOCK_SIZE];
263 struct nx_sg *in_sg, *out_sg;
264 int len;
266 /* For scenarios where the input message is zero length, AES CTR mode
267 * may be used. Set the source data to be a single block (16B) of all
268 * zeros, and set the input IV value to be the same as the GMAC IV
269 * value. - nx_wb 4.8.1.3 */
271 /* Change to ECB mode */
272 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
273 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
274 sizeof(csbcpb->cpb.aes_ecb.key));
275 if (enc)
276 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
277 else
278 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
280 len = AES_BLOCK_SIZE;
282 /* Encrypt the counter/IV */
283 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
284 &len, nx_ctx->ap->sglen);
286 if (len != AES_BLOCK_SIZE)
287 return -EINVAL;
289 len = sizeof(out);
290 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
291 nx_ctx->ap->sglen);
293 if (len != sizeof(out))
294 return -EINVAL;
296 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
297 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
299 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
300 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
301 if (rc)
302 goto out;
303 atomic_inc(&(nx_ctx->stats->aes_ops));
305 /* Copy out the auth tag */
306 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
307 crypto_aead_authsize(crypto_aead_reqtfm(req)));
308 out:
309 /* Restore XCBC mode */
310 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
313 * ECB key uses the same region that GCM AAD and counter, so it's safe
314 * to just fill it with zeroes.
316 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
318 return rc;
321 static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
322 unsigned int assoclen)
324 struct nx_crypto_ctx *nx_ctx =
325 crypto_aead_ctx(crypto_aead_reqtfm(req));
326 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
327 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
328 struct blkcipher_desc desc;
329 unsigned int nbytes = req->cryptlen;
330 unsigned int processed = 0, to_process;
331 unsigned long irq_flags;
332 int rc = -EINVAL;
334 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
336 desc.info = rctx->iv;
337 /* initialize the counter */
338 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
340 if (nbytes == 0) {
341 if (assoclen == 0)
342 rc = gcm_empty(req, &desc, enc);
343 else
344 rc = gmac(req, &desc, assoclen);
345 if (rc)
346 goto out;
347 else
348 goto mac;
351 /* Process associated data */
352 csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
353 if (assoclen) {
354 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
355 assoclen);
356 if (rc)
357 goto out;
360 /* Set flags for encryption */
361 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
362 if (enc) {
363 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
364 } else {
365 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
366 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
369 do {
370 to_process = nbytes - processed;
372 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
373 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
374 req->src, &to_process,
375 processed + req->assoclen,
376 csbcpb->cpb.aes_gcm.iv_or_cnt);
378 if (rc)
379 goto out;
381 if ((to_process + processed) < nbytes)
382 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
383 else
384 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
387 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
388 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
389 if (rc)
390 goto out;
392 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
393 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
394 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
395 memcpy(csbcpb->cpb.aes_gcm.in_s0,
396 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
398 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
400 atomic_inc(&(nx_ctx->stats->aes_ops));
401 atomic64_add(csbcpb->csb.processed_byte_count,
402 &(nx_ctx->stats->aes_bytes));
404 processed += to_process;
405 } while (processed < nbytes);
407 mac:
408 if (enc) {
409 /* copy out the auth tag */
410 scatterwalk_map_and_copy(
411 csbcpb->cpb.aes_gcm.out_pat_or_mac,
412 req->dst, req->assoclen + nbytes,
413 crypto_aead_authsize(crypto_aead_reqtfm(req)),
414 SCATTERWALK_TO_SG);
415 } else {
416 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
417 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
419 scatterwalk_map_and_copy(
420 itag, req->src, req->assoclen + nbytes,
421 crypto_aead_authsize(crypto_aead_reqtfm(req)),
422 SCATTERWALK_FROM_SG);
423 rc = crypto_memneq(itag, otag,
424 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
425 -EBADMSG : 0;
427 out:
428 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
429 return rc;
432 static int gcm_aes_nx_encrypt(struct aead_request *req)
434 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
435 char *iv = rctx->iv;
437 memcpy(iv, req->iv, GCM_AES_IV_SIZE);
439 return gcm_aes_nx_crypt(req, 1, req->assoclen);
442 static int gcm_aes_nx_decrypt(struct aead_request *req)
444 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
445 char *iv = rctx->iv;
447 memcpy(iv, req->iv, GCM_AES_IV_SIZE);
449 return gcm_aes_nx_crypt(req, 0, req->assoclen);
452 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
454 struct nx_crypto_ctx *nx_ctx =
455 crypto_aead_ctx(crypto_aead_reqtfm(req));
456 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
457 char *iv = rctx->iv;
458 char *nonce = nx_ctx->priv.gcm.nonce;
460 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
461 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
463 if (req->assoclen < 8)
464 return -EINVAL;
466 return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
469 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
471 struct nx_crypto_ctx *nx_ctx =
472 crypto_aead_ctx(crypto_aead_reqtfm(req));
473 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
474 char *iv = rctx->iv;
475 char *nonce = nx_ctx->priv.gcm.nonce;
477 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
478 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
480 if (req->assoclen < 8)
481 return -EINVAL;
483 return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
486 /* tell the block cipher walk routines that this is a stream cipher by
487 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
488 * during encrypt/decrypt doesn't solve this problem, because it calls
489 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
490 * but instead uses this tfm->blocksize. */
491 struct aead_alg nx_gcm_aes_alg = {
492 .base = {
493 .cra_name = "gcm(aes)",
494 .cra_driver_name = "gcm-aes-nx",
495 .cra_priority = 300,
496 .cra_blocksize = 1,
497 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
498 .cra_module = THIS_MODULE,
500 .init = nx_crypto_ctx_aes_gcm_init,
501 .exit = nx_crypto_ctx_aead_exit,
502 .ivsize = GCM_AES_IV_SIZE,
503 .maxauthsize = AES_BLOCK_SIZE,
504 .setkey = gcm_aes_nx_set_key,
505 .encrypt = gcm_aes_nx_encrypt,
506 .decrypt = gcm_aes_nx_decrypt,
509 struct aead_alg nx_gcm4106_aes_alg = {
510 .base = {
511 .cra_name = "rfc4106(gcm(aes))",
512 .cra_driver_name = "rfc4106-gcm-aes-nx",
513 .cra_priority = 300,
514 .cra_blocksize = 1,
515 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
516 .cra_module = THIS_MODULE,
518 .init = nx_crypto_ctx_aes_gcm_init,
519 .exit = nx_crypto_ctx_aead_exit,
520 .ivsize = GCM_RFC4106_IV_SIZE,
521 .maxauthsize = AES_BLOCK_SIZE,
522 .setkey = gcm4106_aes_nx_set_key,
523 .setauthsize = gcm4106_aes_nx_setauthsize,
524 .encrypt = gcm4106_aes_nx_encrypt,
525 .decrypt = gcm4106_aes_nx_decrypt,