2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
31 #include "nx_csbcpb.h"
35 static int gcm_aes_nx_set_key(struct crypto_aead
*tfm
,
39 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
40 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
41 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
43 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
47 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
49 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
52 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_192
);
53 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_192
);
54 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_192
];
57 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_256
);
58 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_256
);
59 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_256
];
65 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
66 memcpy(csbcpb
->cpb
.aes_gcm
.key
, in_key
, key_len
);
68 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_GCA
;
69 memcpy(csbcpb_aead
->cpb
.aes_gca
.key
, in_key
, key_len
);
74 static int gcm4106_aes_nx_set_key(struct crypto_aead
*tfm
,
78 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
79 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
87 rc
= gcm_aes_nx_set_key(tfm
, in_key
, key_len
);
91 memcpy(nonce
, in_key
+ key_len
, 4);
96 static int gcm_aes_nx_setauthsize(struct crypto_aead
*tfm
,
97 unsigned int authsize
)
99 if (authsize
> crypto_aead_alg(tfm
)->maxauthsize
)
102 crypto_aead_crt(tfm
)->authsize
= authsize
;
107 static int gcm4106_aes_nx_setauthsize(struct crypto_aead
*tfm
,
108 unsigned int authsize
)
119 crypto_aead_crt(tfm
)->authsize
= authsize
;
124 static int nx_gca(struct nx_crypto_ctx
*nx_ctx
,
125 struct aead_request
*req
,
129 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
130 struct scatter_walk walk
;
131 struct nx_sg
*nx_sg
= nx_ctx
->in_sg
;
132 unsigned int nbytes
= req
->assoclen
;
133 unsigned int processed
= 0, to_process
;
136 if (nbytes
<= AES_BLOCK_SIZE
) {
137 scatterwalk_start(&walk
, req
->assoc
);
138 scatterwalk_copychunks(out
, &walk
, nbytes
, SCATTERWALK_FROM_SG
);
139 scatterwalk_done(&walk
, SCATTERWALK_FROM_SG
, 0);
143 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_CONTINUATION
;
145 /* page_limit: number of sg entries that fit on one page */
146 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
151 * to_process: the data chunk to process in this update.
152 * This value is bound by sg list limits.
154 to_process
= min_t(u64
, nbytes
- processed
,
155 nx_ctx
->ap
->databytelen
);
156 to_process
= min_t(u64
, to_process
,
157 NX_PAGE_SIZE
* (max_sg_len
- 1));
159 if ((to_process
+ processed
) < nbytes
)
160 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_INTERMEDIATE
;
162 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_INTERMEDIATE
;
164 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, nx_ctx
->ap
->sglen
,
165 req
->assoc
, processed
, to_process
);
166 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
167 * sizeof(struct nx_sg
);
169 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
170 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
174 memcpy(csbcpb_aead
->cpb
.aes_gca
.in_pat
,
175 csbcpb_aead
->cpb
.aes_gca
.out_pat
,
177 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_CONTINUATION
;
179 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
180 atomic64_add(req
->assoclen
, &(nx_ctx
->stats
->aes_bytes
));
182 processed
+= to_process
;
183 } while (processed
< nbytes
);
185 memcpy(out
, csbcpb_aead
->cpb
.aes_gca
.out_pat
, AES_BLOCK_SIZE
);
190 static int gmac(struct aead_request
*req
, struct blkcipher_desc
*desc
)
193 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
194 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
196 unsigned int nbytes
= req
->assoclen
;
197 unsigned int processed
= 0, to_process
;
201 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GMAC
;
203 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
205 /* page_limit: number of sg entries that fit on one page */
206 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
210 memcpy(csbcpb
->cpb
.aes_gcm
.iv_or_cnt
, desc
->info
, AES_BLOCK_SIZE
);
214 * to_process: the data chunk to process in this update.
215 * This value is bound by sg list limits.
217 to_process
= min_t(u64
, nbytes
- processed
,
218 nx_ctx
->ap
->databytelen
);
219 to_process
= min_t(u64
, to_process
,
220 NX_PAGE_SIZE
* (max_sg_len
- 1));
222 if ((to_process
+ processed
) < nbytes
)
223 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
225 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
227 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, nx_ctx
->ap
->sglen
,
228 req
->assoc
, processed
, to_process
);
229 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
230 * sizeof(struct nx_sg
);
232 csbcpb
->cpb
.aes_gcm
.bit_length_data
= 0;
233 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= 8 * nbytes
;
235 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
236 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
240 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
241 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
242 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
243 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
245 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
247 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
248 atomic64_add(req
->assoclen
, &(nx_ctx
->stats
->aes_bytes
));
250 processed
+= to_process
;
251 } while (processed
< nbytes
);
254 /* Restore GCM mode */
255 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
259 static int gcm_empty(struct aead_request
*req
, struct blkcipher_desc
*desc
,
263 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
264 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
265 char out
[AES_BLOCK_SIZE
];
266 struct nx_sg
*in_sg
, *out_sg
;
268 /* For scenarios where the input message is zero length, AES CTR mode
269 * may be used. Set the source data to be a single block (16B) of all
270 * zeros, and set the input IV value to be the same as the GMAC IV
271 * value. - nx_wb 4.8.1.3 */
273 /* Change to ECB mode */
274 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_ECB
;
275 memcpy(csbcpb
->cpb
.aes_ecb
.key
, csbcpb
->cpb
.aes_gcm
.key
,
276 sizeof(csbcpb
->cpb
.aes_ecb
.key
));
278 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
280 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
282 /* Encrypt the counter/IV */
283 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) desc
->info
,
284 AES_BLOCK_SIZE
, nx_ctx
->ap
->sglen
);
285 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*) out
, sizeof(out
),
287 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
288 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
290 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
291 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
294 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
296 /* Copy out the auth tag */
297 memcpy(csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, out
,
298 crypto_aead_authsize(crypto_aead_reqtfm(req
)));
300 /* Restore XCBC mode */
301 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
304 * ECB key uses the same region that GCM AAD and counter, so it's safe
305 * to just fill it with zeroes.
307 memset(csbcpb
->cpb
.aes_ecb
.key
, 0, sizeof(csbcpb
->cpb
.aes_ecb
.key
));
312 static int gcm_aes_nx_crypt(struct aead_request
*req
, int enc
)
314 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
315 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
316 struct blkcipher_desc desc
;
317 unsigned int nbytes
= req
->cryptlen
;
318 unsigned int processed
= 0, to_process
;
319 unsigned long irq_flags
;
323 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
325 desc
.info
= nx_ctx
->priv
.gcm
.iv
;
326 /* initialize the counter */
327 *(u32
*)(desc
.info
+ NX_GCM_CTR_OFFSET
) = 1;
330 if (req
->assoclen
== 0)
331 rc
= gcm_empty(req
, &desc
, enc
);
333 rc
= gmac(req
, &desc
);
340 /* Process associated data */
341 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= req
->assoclen
* 8;
343 rc
= nx_gca(nx_ctx
, req
, csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
);
348 /* Set flags for encryption */
349 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
351 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
353 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
354 nbytes
-= crypto_aead_authsize(crypto_aead_reqtfm(req
));
357 /* page_limit: number of sg entries that fit on one page */
358 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
363 * to_process: the data chunk to process in this update.
364 * This value is bound by sg list limits.
366 to_process
= min_t(u64
, nbytes
- processed
,
367 nx_ctx
->ap
->databytelen
);
368 to_process
= min_t(u64
, to_process
,
369 NX_PAGE_SIZE
* (max_sg_len
- 1));
371 if ((to_process
+ processed
) < nbytes
)
372 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
374 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
376 csbcpb
->cpb
.aes_gcm
.bit_length_data
= nbytes
* 8;
377 desc
.tfm
= (struct crypto_blkcipher
*) req
->base
.tfm
;
378 rc
= nx_build_sg_lists(nx_ctx
, &desc
, req
->dst
,
379 req
->src
, to_process
, processed
,
380 csbcpb
->cpb
.aes_gcm
.iv_or_cnt
);
384 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
385 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
389 memcpy(desc
.info
, csbcpb
->cpb
.aes_gcm
.out_cnt
, AES_BLOCK_SIZE
);
390 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
391 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
392 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
393 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
395 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
397 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
398 atomic64_add(csbcpb
->csb
.processed_byte_count
,
399 &(nx_ctx
->stats
->aes_bytes
));
401 processed
+= to_process
;
402 } while (processed
< nbytes
);
406 /* copy out the auth tag */
407 scatterwalk_map_and_copy(csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
,
409 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
412 u8
*itag
= nx_ctx
->priv
.gcm
.iauth_tag
;
413 u8
*otag
= csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
;
415 scatterwalk_map_and_copy(itag
, req
->src
, nbytes
,
416 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
417 SCATTERWALK_FROM_SG
);
418 rc
= memcmp(itag
, otag
,
419 crypto_aead_authsize(crypto_aead_reqtfm(req
))) ?
423 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
427 static int gcm_aes_nx_encrypt(struct aead_request
*req
)
429 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
430 char *iv
= nx_ctx
->priv
.gcm
.iv
;
432 memcpy(iv
, req
->iv
, 12);
434 return gcm_aes_nx_crypt(req
, 1);
437 static int gcm_aes_nx_decrypt(struct aead_request
*req
)
439 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
440 char *iv
= nx_ctx
->priv
.gcm
.iv
;
442 memcpy(iv
, req
->iv
, 12);
444 return gcm_aes_nx_crypt(req
, 0);
447 static int gcm4106_aes_nx_encrypt(struct aead_request
*req
)
449 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
450 char *iv
= nx_ctx
->priv
.gcm
.iv
;
451 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
453 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
454 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
456 return gcm_aes_nx_crypt(req
, 1);
459 static int gcm4106_aes_nx_decrypt(struct aead_request
*req
)
461 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
462 char *iv
= nx_ctx
->priv
.gcm
.iv
;
463 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
465 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
466 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
468 return gcm_aes_nx_crypt(req
, 0);
471 /* tell the block cipher walk routines that this is a stream cipher by
472 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
473 * during encrypt/decrypt doesn't solve this problem, because it calls
474 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
475 * but instead uses this tfm->blocksize. */
476 struct crypto_alg nx_gcm_aes_alg
= {
477 .cra_name
= "gcm(aes)",
478 .cra_driver_name
= "gcm-aes-nx",
480 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
482 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
483 .cra_type
= &crypto_aead_type
,
484 .cra_module
= THIS_MODULE
,
485 .cra_init
= nx_crypto_ctx_aes_gcm_init
,
486 .cra_exit
= nx_crypto_ctx_exit
,
488 .ivsize
= AES_BLOCK_SIZE
,
489 .maxauthsize
= AES_BLOCK_SIZE
,
490 .setkey
= gcm_aes_nx_set_key
,
491 .setauthsize
= gcm_aes_nx_setauthsize
,
492 .encrypt
= gcm_aes_nx_encrypt
,
493 .decrypt
= gcm_aes_nx_decrypt
,
497 struct crypto_alg nx_gcm4106_aes_alg
= {
498 .cra_name
= "rfc4106(gcm(aes))",
499 .cra_driver_name
= "rfc4106-gcm-aes-nx",
501 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
503 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
504 .cra_type
= &crypto_nivaead_type
,
505 .cra_module
= THIS_MODULE
,
506 .cra_init
= nx_crypto_ctx_aes_gcm_init
,
507 .cra_exit
= nx_crypto_ctx_exit
,
510 .maxauthsize
= AES_BLOCK_SIZE
,
512 .setkey
= gcm4106_aes_nx_set_key
,
513 .setauthsize
= gcm4106_aes_nx_setauthsize
,
514 .encrypt
= gcm4106_aes_nx_encrypt
,
515 .decrypt
= gcm4106_aes_nx_decrypt
,