2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
30 #include "nx_csbcpb.h"
34 static int gcm_aes_nx_set_key(struct crypto_aead
*tfm
,
38 struct nx_crypto_ctx
*nx_ctx
= crypto_aead_ctx(tfm
);
39 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
40 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
42 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
46 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
47 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
48 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
51 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_192
);
52 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_192
);
53 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_192
];
56 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_256
);
57 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_256
);
58 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_256
];
64 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
65 memcpy(csbcpb
->cpb
.aes_gcm
.key
, in_key
, key_len
);
67 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_GCA
;
68 memcpy(csbcpb_aead
->cpb
.aes_gca
.key
, in_key
, key_len
);
73 static int gcm4106_aes_nx_set_key(struct crypto_aead
*tfm
,
77 struct nx_crypto_ctx
*nx_ctx
= crypto_aead_ctx(tfm
);
78 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
86 rc
= gcm_aes_nx_set_key(tfm
, in_key
, key_len
);
90 memcpy(nonce
, in_key
+ key_len
, 4);
95 static int gcm4106_aes_nx_setauthsize(struct crypto_aead
*tfm
,
96 unsigned int authsize
)
110 static int nx_gca(struct nx_crypto_ctx
*nx_ctx
,
111 struct aead_request
*req
,
113 unsigned int assoclen
)
116 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
117 struct scatter_walk walk
;
118 struct nx_sg
*nx_sg
= nx_ctx
->in_sg
;
119 unsigned int nbytes
= assoclen
;
120 unsigned int processed
= 0, to_process
;
121 unsigned int max_sg_len
;
123 if (nbytes
<= AES_BLOCK_SIZE
) {
124 scatterwalk_start(&walk
, req
->src
);
125 scatterwalk_copychunks(out
, &walk
, nbytes
, SCATTERWALK_FROM_SG
);
126 scatterwalk_done(&walk
, SCATTERWALK_FROM_SG
, 0);
130 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_CONTINUATION
;
132 /* page_limit: number of sg entries that fit on one page */
133 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
135 max_sg_len
= min_t(u64
, max_sg_len
,
136 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
140 * to_process: the data chunk to process in this update.
141 * This value is bound by sg list limits.
143 to_process
= min_t(u64
, nbytes
- processed
,
144 nx_ctx
->ap
->databytelen
);
145 to_process
= min_t(u64
, to_process
,
146 NX_PAGE_SIZE
* (max_sg_len
- 1));
148 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, max_sg_len
,
149 req
->src
, processed
, &to_process
);
151 if ((to_process
+ processed
) < nbytes
)
152 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_INTERMEDIATE
;
154 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_INTERMEDIATE
;
156 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
157 * sizeof(struct nx_sg
);
159 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
160 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
164 memcpy(csbcpb_aead
->cpb
.aes_gca
.in_pat
,
165 csbcpb_aead
->cpb
.aes_gca
.out_pat
,
167 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_CONTINUATION
;
169 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
170 atomic64_add(assoclen
, &(nx_ctx
->stats
->aes_bytes
));
172 processed
+= to_process
;
173 } while (processed
< nbytes
);
175 memcpy(out
, csbcpb_aead
->cpb
.aes_gca
.out_pat
, AES_BLOCK_SIZE
);
180 static int gmac(struct aead_request
*req
, struct blkcipher_desc
*desc
,
181 unsigned int assoclen
)
184 struct nx_crypto_ctx
*nx_ctx
=
185 crypto_aead_ctx(crypto_aead_reqtfm(req
));
186 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
188 unsigned int nbytes
= assoclen
;
189 unsigned int processed
= 0, to_process
;
190 unsigned int max_sg_len
;
193 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GMAC
;
195 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
197 /* page_limit: number of sg entries that fit on one page */
198 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
200 max_sg_len
= min_t(u64
, max_sg_len
,
201 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
204 memcpy(csbcpb
->cpb
.aes_gcm
.iv_or_cnt
, desc
->info
, AES_BLOCK_SIZE
);
208 * to_process: the data chunk to process in this update.
209 * This value is bound by sg list limits.
211 to_process
= min_t(u64
, nbytes
- processed
,
212 nx_ctx
->ap
->databytelen
);
213 to_process
= min_t(u64
, to_process
,
214 NX_PAGE_SIZE
* (max_sg_len
- 1));
216 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, max_sg_len
,
217 req
->src
, processed
, &to_process
);
219 if ((to_process
+ processed
) < nbytes
)
220 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
222 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
224 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
225 * sizeof(struct nx_sg
);
227 csbcpb
->cpb
.aes_gcm
.bit_length_data
= 0;
228 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= 8 * nbytes
;
230 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
231 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
235 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
236 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
237 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
238 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
240 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
242 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
243 atomic64_add(assoclen
, &(nx_ctx
->stats
->aes_bytes
));
245 processed
+= to_process
;
246 } while (processed
< nbytes
);
249 /* Restore GCM mode */
250 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
254 static int gcm_empty(struct aead_request
*req
, struct blkcipher_desc
*desc
,
258 struct nx_crypto_ctx
*nx_ctx
=
259 crypto_aead_ctx(crypto_aead_reqtfm(req
));
260 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
261 char out
[AES_BLOCK_SIZE
];
262 struct nx_sg
*in_sg
, *out_sg
;
265 /* For scenarios where the input message is zero length, AES CTR mode
266 * may be used. Set the source data to be a single block (16B) of all
267 * zeros, and set the input IV value to be the same as the GMAC IV
268 * value. - nx_wb 4.8.1.3 */
270 /* Change to ECB mode */
271 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_ECB
;
272 memcpy(csbcpb
->cpb
.aes_ecb
.key
, csbcpb
->cpb
.aes_gcm
.key
,
273 sizeof(csbcpb
->cpb
.aes_ecb
.key
));
275 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
277 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
279 len
= AES_BLOCK_SIZE
;
281 /* Encrypt the counter/IV */
282 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) desc
->info
,
283 &len
, nx_ctx
->ap
->sglen
);
285 if (len
!= AES_BLOCK_SIZE
)
289 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*) out
, &len
,
292 if (len
!= sizeof(out
))
295 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
296 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
298 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
299 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
302 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
304 /* Copy out the auth tag */
305 memcpy(csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, out
,
306 crypto_aead_authsize(crypto_aead_reqtfm(req
)));
308 /* Restore XCBC mode */
309 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
312 * ECB key uses the same region that GCM AAD and counter, so it's safe
313 * to just fill it with zeroes.
315 memset(csbcpb
->cpb
.aes_ecb
.key
, 0, sizeof(csbcpb
->cpb
.aes_ecb
.key
));
320 static int gcm_aes_nx_crypt(struct aead_request
*req
, int enc
,
321 unsigned int assoclen
)
323 struct nx_crypto_ctx
*nx_ctx
=
324 crypto_aead_ctx(crypto_aead_reqtfm(req
));
325 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
326 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
327 struct blkcipher_desc desc
;
328 unsigned int nbytes
= req
->cryptlen
;
329 unsigned int processed
= 0, to_process
;
330 unsigned long irq_flags
;
333 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
335 desc
.info
= rctx
->iv
;
336 /* initialize the counter */
337 *(u32
*)(desc
.info
+ NX_GCM_CTR_OFFSET
) = 1;
341 rc
= gcm_empty(req
, &desc
, enc
);
343 rc
= gmac(req
, &desc
, assoclen
);
350 /* Process associated data */
351 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= assoclen
* 8;
353 rc
= nx_gca(nx_ctx
, req
, csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
359 /* Set flags for encryption */
360 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
362 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
364 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
365 nbytes
-= crypto_aead_authsize(crypto_aead_reqtfm(req
));
369 to_process
= nbytes
- processed
;
371 csbcpb
->cpb
.aes_gcm
.bit_length_data
= nbytes
* 8;
372 rc
= nx_build_sg_lists(nx_ctx
, &desc
, req
->dst
,
373 req
->src
, &to_process
,
374 processed
+ req
->assoclen
,
375 csbcpb
->cpb
.aes_gcm
.iv_or_cnt
);
380 if ((to_process
+ processed
) < nbytes
)
381 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
383 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
386 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
387 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
391 memcpy(desc
.info
, csbcpb
->cpb
.aes_gcm
.out_cnt
, AES_BLOCK_SIZE
);
392 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
393 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
394 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
395 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
397 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
399 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
400 atomic64_add(csbcpb
->csb
.processed_byte_count
,
401 &(nx_ctx
->stats
->aes_bytes
));
403 processed
+= to_process
;
404 } while (processed
< nbytes
);
408 /* copy out the auth tag */
409 scatterwalk_map_and_copy(
410 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
,
411 req
->dst
, req
->assoclen
+ nbytes
,
412 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
415 u8
*itag
= nx_ctx
->priv
.gcm
.iauth_tag
;
416 u8
*otag
= csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
;
418 scatterwalk_map_and_copy(
419 itag
, req
->src
, req
->assoclen
+ nbytes
,
420 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
421 SCATTERWALK_FROM_SG
);
422 rc
= crypto_memneq(itag
, otag
,
423 crypto_aead_authsize(crypto_aead_reqtfm(req
))) ?
427 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
431 static int gcm_aes_nx_encrypt(struct aead_request
*req
)
433 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
436 memcpy(iv
, req
->iv
, 12);
438 return gcm_aes_nx_crypt(req
, 1, req
->assoclen
);
441 static int gcm_aes_nx_decrypt(struct aead_request
*req
)
443 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
446 memcpy(iv
, req
->iv
, 12);
448 return gcm_aes_nx_crypt(req
, 0, req
->assoclen
);
451 static int gcm4106_aes_nx_encrypt(struct aead_request
*req
)
453 struct nx_crypto_ctx
*nx_ctx
=
454 crypto_aead_ctx(crypto_aead_reqtfm(req
));
455 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
457 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
459 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
460 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
462 if (req
->assoclen
< 8)
465 return gcm_aes_nx_crypt(req
, 1, req
->assoclen
- 8);
468 static int gcm4106_aes_nx_decrypt(struct aead_request
*req
)
470 struct nx_crypto_ctx
*nx_ctx
=
471 crypto_aead_ctx(crypto_aead_reqtfm(req
));
472 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
474 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
476 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
477 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
479 if (req
->assoclen
< 8)
482 return gcm_aes_nx_crypt(req
, 0, req
->assoclen
- 8);
485 /* tell the block cipher walk routines that this is a stream cipher by
486 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
487 * during encrypt/decrypt doesn't solve this problem, because it calls
488 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
489 * but instead uses this tfm->blocksize. */
490 struct aead_alg nx_gcm_aes_alg
= {
492 .cra_name
= "gcm(aes)",
493 .cra_driver_name
= "gcm-aes-nx",
496 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
497 .cra_module
= THIS_MODULE
,
499 .init
= nx_crypto_ctx_aes_gcm_init
,
500 .exit
= nx_crypto_ctx_aead_exit
,
502 .maxauthsize
= AES_BLOCK_SIZE
,
503 .setkey
= gcm_aes_nx_set_key
,
504 .encrypt
= gcm_aes_nx_encrypt
,
505 .decrypt
= gcm_aes_nx_decrypt
,
508 struct aead_alg nx_gcm4106_aes_alg
= {
510 .cra_name
= "rfc4106(gcm(aes))",
511 .cra_driver_name
= "rfc4106-gcm-aes-nx",
514 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
515 .cra_module
= THIS_MODULE
,
517 .init
= nx_crypto_ctx_aes_gcm_init
,
518 .exit
= nx_crypto_ctx_aead_exit
,
520 .maxauthsize
= AES_BLOCK_SIZE
,
521 .setkey
= gcm4106_aes_nx_set_key
,
522 .setauthsize
= gcm4106_aes_nx_setauthsize
,
523 .encrypt
= gcm4106_aes_nx_encrypt
,
524 .decrypt
= gcm4106_aes_nx_decrypt
,