2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
31 #include "nx_csbcpb.h"
35 static int gcm_aes_nx_set_key(struct crypto_aead
*tfm
,
39 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
40 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
41 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
43 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
47 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
49 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
52 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_192
);
53 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_192
);
54 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_192
];
57 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_256
);
58 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_256
);
59 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_256
];
65 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
66 memcpy(csbcpb
->cpb
.aes_gcm
.key
, in_key
, key_len
);
68 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_GCA
;
69 memcpy(csbcpb_aead
->cpb
.aes_gca
.key
, in_key
, key_len
);
74 static int gcm4106_aes_nx_set_key(struct crypto_aead
*tfm
,
78 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
79 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
87 rc
= gcm_aes_nx_set_key(tfm
, in_key
, key_len
);
91 memcpy(nonce
, in_key
+ key_len
, 4);
96 static int gcm_aes_nx_setauthsize(struct crypto_aead
*tfm
,
97 unsigned int authsize
)
99 if (authsize
> crypto_aead_alg(tfm
)->maxauthsize
)
102 crypto_aead_crt(tfm
)->authsize
= authsize
;
107 static int gcm4106_aes_nx_setauthsize(struct crypto_aead
*tfm
,
108 unsigned int authsize
)
119 crypto_aead_crt(tfm
)->authsize
= authsize
;
124 static int nx_gca(struct nx_crypto_ctx
*nx_ctx
,
125 struct aead_request
*req
,
129 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
130 struct scatter_walk walk
;
131 struct nx_sg
*nx_sg
= nx_ctx
->in_sg
;
132 unsigned int nbytes
= req
->assoclen
;
133 unsigned int processed
= 0, to_process
;
134 unsigned int max_sg_len
;
136 if (nbytes
<= AES_BLOCK_SIZE
) {
137 scatterwalk_start(&walk
, req
->assoc
);
138 scatterwalk_copychunks(out
, &walk
, nbytes
, SCATTERWALK_FROM_SG
);
139 scatterwalk_done(&walk
, SCATTERWALK_FROM_SG
, 0);
143 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_CONTINUATION
;
145 /* page_limit: number of sg entries that fit on one page */
146 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
148 max_sg_len
= min_t(u64
, max_sg_len
,
149 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
153 * to_process: the data chunk to process in this update.
154 * This value is bound by sg list limits.
156 to_process
= min_t(u64
, nbytes
- processed
,
157 nx_ctx
->ap
->databytelen
);
158 to_process
= min_t(u64
, to_process
,
159 NX_PAGE_SIZE
* (max_sg_len
- 1));
161 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, max_sg_len
,
162 req
->assoc
, processed
, &to_process
);
164 if ((to_process
+ processed
) < nbytes
)
165 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_INTERMEDIATE
;
167 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_INTERMEDIATE
;
169 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
170 * sizeof(struct nx_sg
);
172 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
173 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
177 memcpy(csbcpb_aead
->cpb
.aes_gca
.in_pat
,
178 csbcpb_aead
->cpb
.aes_gca
.out_pat
,
180 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_CONTINUATION
;
182 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
183 atomic64_add(req
->assoclen
, &(nx_ctx
->stats
->aes_bytes
));
185 processed
+= to_process
;
186 } while (processed
< nbytes
);
188 memcpy(out
, csbcpb_aead
->cpb
.aes_gca
.out_pat
, AES_BLOCK_SIZE
);
193 static int gmac(struct aead_request
*req
, struct blkcipher_desc
*desc
)
196 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
197 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
199 unsigned int nbytes
= req
->assoclen
;
200 unsigned int processed
= 0, to_process
;
201 unsigned int max_sg_len
;
204 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GMAC
;
206 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
208 /* page_limit: number of sg entries that fit on one page */
209 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
211 max_sg_len
= min_t(u64
, max_sg_len
,
212 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
215 memcpy(csbcpb
->cpb
.aes_gcm
.iv_or_cnt
, desc
->info
, AES_BLOCK_SIZE
);
219 * to_process: the data chunk to process in this update.
220 * This value is bound by sg list limits.
222 to_process
= min_t(u64
, nbytes
- processed
,
223 nx_ctx
->ap
->databytelen
);
224 to_process
= min_t(u64
, to_process
,
225 NX_PAGE_SIZE
* (max_sg_len
- 1));
227 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, max_sg_len
,
228 req
->assoc
, processed
, &to_process
);
230 if ((to_process
+ processed
) < nbytes
)
231 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
233 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
235 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
236 * sizeof(struct nx_sg
);
238 csbcpb
->cpb
.aes_gcm
.bit_length_data
= 0;
239 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= 8 * nbytes
;
241 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
242 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
246 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
247 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
248 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
249 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
251 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
253 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
254 atomic64_add(req
->assoclen
, &(nx_ctx
->stats
->aes_bytes
));
256 processed
+= to_process
;
257 } while (processed
< nbytes
);
260 /* Restore GCM mode */
261 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
265 static int gcm_empty(struct aead_request
*req
, struct blkcipher_desc
*desc
,
269 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
270 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
271 char out
[AES_BLOCK_SIZE
];
272 struct nx_sg
*in_sg
, *out_sg
;
275 /* For scenarios where the input message is zero length, AES CTR mode
276 * may be used. Set the source data to be a single block (16B) of all
277 * zeros, and set the input IV value to be the same as the GMAC IV
278 * value. - nx_wb 4.8.1.3 */
280 /* Change to ECB mode */
281 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_ECB
;
282 memcpy(csbcpb
->cpb
.aes_ecb
.key
, csbcpb
->cpb
.aes_gcm
.key
,
283 sizeof(csbcpb
->cpb
.aes_ecb
.key
));
285 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
287 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
289 len
= AES_BLOCK_SIZE
;
291 /* Encrypt the counter/IV */
292 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) desc
->info
,
293 &len
, nx_ctx
->ap
->sglen
);
295 if (len
!= AES_BLOCK_SIZE
)
299 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*) out
, &len
,
302 if (len
!= sizeof(out
))
305 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
306 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
308 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
309 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
312 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
314 /* Copy out the auth tag */
315 memcpy(csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, out
,
316 crypto_aead_authsize(crypto_aead_reqtfm(req
)));
318 /* Restore XCBC mode */
319 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
322 * ECB key uses the same region that GCM AAD and counter, so it's safe
323 * to just fill it with zeroes.
325 memset(csbcpb
->cpb
.aes_ecb
.key
, 0, sizeof(csbcpb
->cpb
.aes_ecb
.key
));
330 static int gcm_aes_nx_crypt(struct aead_request
*req
, int enc
)
332 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
333 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
334 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
335 struct blkcipher_desc desc
;
336 unsigned int nbytes
= req
->cryptlen
;
337 unsigned int processed
= 0, to_process
;
338 unsigned long irq_flags
;
341 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
343 desc
.info
= rctx
->iv
;
344 /* initialize the counter */
345 *(u32
*)(desc
.info
+ NX_GCM_CTR_OFFSET
) = 1;
348 if (req
->assoclen
== 0)
349 rc
= gcm_empty(req
, &desc
, enc
);
351 rc
= gmac(req
, &desc
);
358 /* Process associated data */
359 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= req
->assoclen
* 8;
361 rc
= nx_gca(nx_ctx
, req
, csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
);
366 /* Set flags for encryption */
367 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
369 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
371 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
372 nbytes
-= crypto_aead_authsize(crypto_aead_reqtfm(req
));
376 to_process
= nbytes
- processed
;
378 csbcpb
->cpb
.aes_gcm
.bit_length_data
= nbytes
* 8;
379 desc
.tfm
= (struct crypto_blkcipher
*) req
->base
.tfm
;
380 rc
= nx_build_sg_lists(nx_ctx
, &desc
, req
->dst
,
381 req
->src
, &to_process
, processed
,
382 csbcpb
->cpb
.aes_gcm
.iv_or_cnt
);
387 if ((to_process
+ processed
) < nbytes
)
388 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
390 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
393 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
394 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
398 memcpy(desc
.info
, csbcpb
->cpb
.aes_gcm
.out_cnt
, AES_BLOCK_SIZE
);
399 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
400 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
401 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
402 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
404 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
406 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
407 atomic64_add(csbcpb
->csb
.processed_byte_count
,
408 &(nx_ctx
->stats
->aes_bytes
));
410 processed
+= to_process
;
411 } while (processed
< nbytes
);
415 /* copy out the auth tag */
416 scatterwalk_map_and_copy(csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
,
418 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
421 u8
*itag
= nx_ctx
->priv
.gcm
.iauth_tag
;
422 u8
*otag
= csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
;
424 scatterwalk_map_and_copy(itag
, req
->src
, nbytes
,
425 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
426 SCATTERWALK_FROM_SG
);
427 rc
= memcmp(itag
, otag
,
428 crypto_aead_authsize(crypto_aead_reqtfm(req
))) ?
432 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
436 static int gcm_aes_nx_encrypt(struct aead_request
*req
)
438 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
441 memcpy(iv
, req
->iv
, 12);
443 return gcm_aes_nx_crypt(req
, 1);
446 static int gcm_aes_nx_decrypt(struct aead_request
*req
)
448 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
451 memcpy(iv
, req
->iv
, 12);
453 return gcm_aes_nx_crypt(req
, 0);
456 static int gcm4106_aes_nx_encrypt(struct aead_request
*req
)
458 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
459 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
461 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
463 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
464 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
466 return gcm_aes_nx_crypt(req
, 1);
469 static int gcm4106_aes_nx_decrypt(struct aead_request
*req
)
471 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
472 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
474 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
476 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
477 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
479 return gcm_aes_nx_crypt(req
, 0);
482 /* tell the block cipher walk routines that this is a stream cipher by
483 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
484 * during encrypt/decrypt doesn't solve this problem, because it calls
485 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
486 * but instead uses this tfm->blocksize. */
487 struct crypto_alg nx_gcm_aes_alg
= {
488 .cra_name
= "gcm(aes)",
489 .cra_driver_name
= "gcm-aes-nx",
491 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
493 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
494 .cra_type
= &crypto_aead_type
,
495 .cra_module
= THIS_MODULE
,
496 .cra_init
= nx_crypto_ctx_aes_gcm_init
,
497 .cra_exit
= nx_crypto_ctx_exit
,
499 .ivsize
= AES_BLOCK_SIZE
,
500 .maxauthsize
= AES_BLOCK_SIZE
,
501 .setkey
= gcm_aes_nx_set_key
,
502 .setauthsize
= gcm_aes_nx_setauthsize
,
503 .encrypt
= gcm_aes_nx_encrypt
,
504 .decrypt
= gcm_aes_nx_decrypt
,
508 struct crypto_alg nx_gcm4106_aes_alg
= {
509 .cra_name
= "rfc4106(gcm(aes))",
510 .cra_driver_name
= "rfc4106-gcm-aes-nx",
512 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
514 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
515 .cra_type
= &crypto_nivaead_type
,
516 .cra_module
= THIS_MODULE
,
517 .cra_init
= nx_crypto_ctx_aes_gcm_init
,
518 .cra_exit
= nx_crypto_ctx_exit
,
521 .maxauthsize
= AES_BLOCK_SIZE
,
523 .setkey
= gcm4106_aes_nx_set_key
,
524 .setauthsize
= gcm4106_aes_nx_setauthsize
,
525 .encrypt
= gcm4106_aes_nx_encrypt
,
526 .decrypt
= gcm4106_aes_nx_decrypt
,