2 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
31 #include "nx_csbcpb.h"
35 static int ccm_aes_nx_set_key(struct crypto_aead
*tfm
,
39 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
40 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
41 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
43 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
47 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
49 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
55 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_CCM
;
56 memcpy(csbcpb
->cpb
.aes_ccm
.key
, in_key
, key_len
);
58 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_CCA
;
59 memcpy(csbcpb_aead
->cpb
.aes_cca
.key
, in_key
, key_len
);
65 static int ccm4309_aes_nx_set_key(struct crypto_aead
*tfm
,
69 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
76 memcpy(nx_ctx
->priv
.ccm
.nonce
, in_key
+ key_len
, 3);
78 return ccm_aes_nx_set_key(tfm
, in_key
, key_len
);
81 static int ccm_aes_nx_setauthsize(struct crypto_aead
*tfm
,
82 unsigned int authsize
)
100 static int ccm4309_aes_nx_setauthsize(struct crypto_aead
*tfm
,
101 unsigned int authsize
)
115 /* taken from crypto/ccm.c */
116 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
120 memset(block
, 0, csize
);
125 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
128 data
= cpu_to_be32(msglen
);
129 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
134 /* taken from crypto/ccm.c */
135 static inline int crypto_ccm_check_iv(const u8
*iv
)
137 /* 2 <= L <= 8, so 1 <= L' <= 7. */
138 if (1 > iv
[0] || iv
[0] > 7)
144 /* based on code from crypto/ccm.c */
145 static int generate_b0(u8
*iv
, unsigned int assoclen
, unsigned int authsize
,
146 unsigned int cryptlen
, u8
*b0
)
148 unsigned int l
, lp
, m
= authsize
;
156 /* set m, bits 3-5 */
157 *b0
|= (8 * ((m
- 2) / 2));
159 /* set adata, bit 6, if associated data is used */
163 rc
= set_msg_len(b0
+ 16 - l
, cryptlen
, l
);
168 static int generate_pat(u8
*iv
,
169 struct aead_request
*req
,
170 struct nx_crypto_ctx
*nx_ctx
,
171 unsigned int authsize
,
173 unsigned int assoclen
,
176 struct nx_sg
*nx_insg
= nx_ctx
->in_sg
;
177 struct nx_sg
*nx_outsg
= nx_ctx
->out_sg
;
178 unsigned int iauth_len
= 0;
179 u8 tmp
[16], *b1
= NULL
, *b0
= NULL
, *result
= NULL
;
181 unsigned int max_sg_len
;
183 /* zero the ctr value */
184 memset(iv
+ 15 - iv
[0], 0, iv
[0] + 1);
186 /* page 78 of nx_wb.pdf has,
187 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
188 * in length. If a full message is used, the AES CCA implementation
189 * restricts the maximum AAD length to 2^32 -1 bytes.
190 * If partial messages are used, the implementation supports
191 * 2^64 -1 bytes maximum AAD length.
193 * However, in the cryptoapi's aead_request structure,
194 * assoclen is an unsigned int, thus it cannot hold a length
195 * value greater than 2^32 - 1.
196 * Thus the AAD is further constrained by this and is never
201 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
202 } else if (assoclen
<= 14) {
203 /* if associated data is 14 bytes or less, we do 1 GCM
204 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
205 * which is fed in through the source buffers here */
206 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
207 b1
= nx_ctx
->priv
.ccm
.iauth_tag
;
208 iauth_len
= assoclen
;
209 } else if (assoclen
<= 65280) {
210 /* if associated data is less than (2^16 - 2^8), we construct
211 * B1 differently and feed in the associated data to a CCA
213 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
214 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
217 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
218 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
223 rc
= generate_b0(iv
, assoclen
, authsize
, nbytes
, b0
);
228 * add control info for associated data
229 * RFC 3610 and NIST Special Publication 800-38C
233 if (assoclen
<= 65280) {
234 *(u16
*)b1
= assoclen
;
235 scatterwalk_map_and_copy(b1
+ 2, req
->src
, 0,
236 iauth_len
, SCATTERWALK_FROM_SG
);
238 *(u16
*)b1
= (u16
)(0xfffe);
239 *(u32
*)&b1
[2] = assoclen
;
240 scatterwalk_map_and_copy(b1
+ 6, req
->src
, 0,
241 iauth_len
, SCATTERWALK_FROM_SG
);
245 /* now copy any remaining AAD to scatterlist and call nx... */
248 } else if (assoclen
<= 14) {
249 unsigned int len
= 16;
251 nx_insg
= nx_build_sg_list(nx_insg
, b1
, &len
, nx_ctx
->ap
->sglen
);
256 nx_outsg
= nx_build_sg_list(nx_outsg
, tmp
, &len
,
262 /* inlen should be negative, indicating to phyp that its a
263 * pointer to an sg list */
264 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
265 sizeof(struct nx_sg
);
266 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- nx_outsg
) *
267 sizeof(struct nx_sg
);
269 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
270 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_INTERMEDIATE
;
272 result
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
;
274 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
275 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
279 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
280 atomic64_add(assoclen
, &nx_ctx
->stats
->aes_bytes
);
283 unsigned int processed
= 0, to_process
;
285 processed
+= iauth_len
;
287 /* page_limit: number of sg entries that fit on one page */
288 max_sg_len
= min_t(u64
, nx_ctx
->ap
->sglen
,
289 nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
));
290 max_sg_len
= min_t(u64
, max_sg_len
,
291 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
294 to_process
= min_t(u32
, assoclen
- processed
,
295 nx_ctx
->ap
->databytelen
);
297 nx_insg
= nx_walk_and_build(nx_ctx
->in_sg
,
302 if ((to_process
+ processed
) < assoclen
) {
303 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |=
306 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) &=
307 ~NX_FDM_INTERMEDIATE
;
311 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
312 sizeof(struct nx_sg
);
314 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
316 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
317 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
321 memcpy(nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
,
322 nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
,
325 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |= NX_FDM_CONTINUATION
;
327 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
328 atomic64_add(assoclen
, &nx_ctx
->stats
->aes_bytes
);
330 processed
+= to_process
;
331 } while (processed
< assoclen
);
333 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
336 memcpy(out
, result
, AES_BLOCK_SIZE
);
341 static int ccm_nx_decrypt(struct aead_request
*req
,
342 struct blkcipher_desc
*desc
,
343 unsigned int assoclen
)
345 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
346 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
347 unsigned int nbytes
= req
->cryptlen
;
348 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
349 struct nx_ccm_priv
*priv
= &nx_ctx
->priv
.ccm
;
350 unsigned long irq_flags
;
351 unsigned int processed
= 0, to_process
;
354 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
358 /* copy out the auth tag to compare with later */
359 scatterwalk_map_and_copy(priv
->oauth_tag
,
360 req
->src
, nbytes
+ req
->assoclen
, authsize
,
361 SCATTERWALK_FROM_SG
);
363 rc
= generate_pat(desc
->info
, req
, nx_ctx
, authsize
, nbytes
, assoclen
,
364 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
370 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
371 * update. This value is bound by sg list limits.
373 to_process
= nbytes
- processed
;
375 if ((to_process
+ processed
) < nbytes
)
376 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
378 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
380 NX_CPB_FDM(nx_ctx
->csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
382 rc
= nx_build_sg_lists(nx_ctx
, desc
, req
->dst
, req
->src
,
383 &to_process
, processed
+ req
->assoclen
,
384 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
388 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
389 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
393 /* for partial completion, copy following for next
396 memcpy(desc
->info
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
397 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
398 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
399 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
400 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
402 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
405 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
406 atomic64_add(csbcpb
->csb
.processed_byte_count
,
407 &(nx_ctx
->stats
->aes_bytes
));
409 processed
+= to_process
;
410 } while (processed
< nbytes
);
412 rc
= crypto_memneq(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, priv
->oauth_tag
,
413 authsize
) ? -EBADMSG
: 0;
415 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
419 static int ccm_nx_encrypt(struct aead_request
*req
,
420 struct blkcipher_desc
*desc
,
421 unsigned int assoclen
)
423 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
424 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
425 unsigned int nbytes
= req
->cryptlen
;
426 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
427 unsigned long irq_flags
;
428 unsigned int processed
= 0, to_process
;
431 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
433 rc
= generate_pat(desc
->info
, req
, nx_ctx
, authsize
, nbytes
, assoclen
,
434 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
439 /* to process: the AES_BLOCK_SIZE data chunk to process in this
440 * update. This value is bound by sg list limits.
442 to_process
= nbytes
- processed
;
444 if ((to_process
+ processed
) < nbytes
)
445 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
447 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
449 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
451 rc
= nx_build_sg_lists(nx_ctx
, desc
, req
->dst
, req
->src
,
452 &to_process
, processed
+ req
->assoclen
,
453 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
457 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
458 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
462 /* for partial completion, copy following for next
465 memcpy(desc
->info
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
466 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
467 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
468 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
469 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
471 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
474 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
475 atomic64_add(csbcpb
->csb
.processed_byte_count
,
476 &(nx_ctx
->stats
->aes_bytes
));
478 processed
+= to_process
;
480 } while (processed
< nbytes
);
482 /* copy out the auth tag */
483 scatterwalk_map_and_copy(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
,
484 req
->dst
, nbytes
+ req
->assoclen
, authsize
,
488 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
492 static int ccm4309_aes_nx_encrypt(struct aead_request
*req
)
494 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
495 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
496 struct blkcipher_desc desc
;
500 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
501 memcpy(iv
+ 4, req
->iv
, 8);
505 return ccm_nx_encrypt(req
, &desc
, req
->assoclen
- 8);
508 static int ccm_aes_nx_encrypt(struct aead_request
*req
)
510 struct blkcipher_desc desc
;
515 rc
= crypto_ccm_check_iv(desc
.info
);
519 return ccm_nx_encrypt(req
, &desc
, req
->assoclen
);
522 static int ccm4309_aes_nx_decrypt(struct aead_request
*req
)
524 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
525 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
526 struct blkcipher_desc desc
;
530 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
531 memcpy(iv
+ 4, req
->iv
, 8);
535 return ccm_nx_decrypt(req
, &desc
, req
->assoclen
- 8);
538 static int ccm_aes_nx_decrypt(struct aead_request
*req
)
540 struct blkcipher_desc desc
;
545 rc
= crypto_ccm_check_iv(desc
.info
);
549 return ccm_nx_decrypt(req
, &desc
, req
->assoclen
);
552 /* tell the block cipher walk routines that this is a stream cipher by
553 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
554 * during encrypt/decrypt doesn't solve this problem, because it calls
555 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
556 * but instead uses this tfm->blocksize. */
557 struct aead_alg nx_ccm_aes_alg
= {
559 .cra_name
= "ccm(aes)",
560 .cra_driver_name
= "ccm-aes-nx",
562 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
564 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
565 .cra_module
= THIS_MODULE
,
567 .init
= nx_crypto_ctx_aes_ccm_init
,
568 .exit
= nx_crypto_ctx_aead_exit
,
569 .ivsize
= AES_BLOCK_SIZE
,
570 .maxauthsize
= AES_BLOCK_SIZE
,
571 .setkey
= ccm_aes_nx_set_key
,
572 .setauthsize
= ccm_aes_nx_setauthsize
,
573 .encrypt
= ccm_aes_nx_encrypt
,
574 .decrypt
= ccm_aes_nx_decrypt
,
577 struct aead_alg nx_ccm4309_aes_alg
= {
579 .cra_name
= "rfc4309(ccm(aes))",
580 .cra_driver_name
= "rfc4309-ccm-aes-nx",
582 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
584 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
585 .cra_module
= THIS_MODULE
,
587 .init
= nx_crypto_ctx_aes_ccm_init
,
588 .exit
= nx_crypto_ctx_aead_exit
,
590 .maxauthsize
= AES_BLOCK_SIZE
,
591 .setkey
= ccm4309_aes_nx_set_key
,
592 .setauthsize
= ccm4309_aes_nx_setauthsize
,
593 .encrypt
= ccm4309_aes_nx_encrypt
,
594 .decrypt
= ccm4309_aes_nx_decrypt
,