1 // SPDX-License-Identifier: GPL-2.0-only
3 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
5 * Copyright (C) 2012 International Business Machines Inc.
7 * Author: Kent Yoder <yoder1@us.ibm.com>
10 #include <crypto/internal/aead.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/crypto.h>
19 #include "nx_csbcpb.h"
23 static int ccm_aes_nx_set_key(struct crypto_aead
*tfm
,
27 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
28 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
29 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
31 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
35 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
36 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
37 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
43 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_CCM
;
44 memcpy(csbcpb
->cpb
.aes_ccm
.key
, in_key
, key_len
);
46 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_CCA
;
47 memcpy(csbcpb_aead
->cpb
.aes_cca
.key
, in_key
, key_len
);
53 static int ccm4309_aes_nx_set_key(struct crypto_aead
*tfm
,
57 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
64 memcpy(nx_ctx
->priv
.ccm
.nonce
, in_key
+ key_len
, 3);
66 return ccm_aes_nx_set_key(tfm
, in_key
, key_len
);
69 static int ccm_aes_nx_setauthsize(struct crypto_aead
*tfm
,
70 unsigned int authsize
)
88 static int ccm4309_aes_nx_setauthsize(struct crypto_aead
*tfm
,
89 unsigned int authsize
)
103 /* taken from crypto/ccm.c */
104 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
108 memset(block
, 0, csize
);
113 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
116 data
= cpu_to_be32(msglen
);
117 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
122 /* taken from crypto/ccm.c */
123 static inline int crypto_ccm_check_iv(const u8
*iv
)
125 /* 2 <= L <= 8, so 1 <= L' <= 7. */
126 if (1 > iv
[0] || iv
[0] > 7)
132 /* based on code from crypto/ccm.c */
133 static int generate_b0(u8
*iv
, unsigned int assoclen
, unsigned int authsize
,
134 unsigned int cryptlen
, u8
*b0
)
136 unsigned int l
, lp
, m
= authsize
;
144 /* set m, bits 3-5 */
145 *b0
|= (8 * ((m
- 2) / 2));
147 /* set adata, bit 6, if associated data is used */
151 rc
= set_msg_len(b0
+ 16 - l
, cryptlen
, l
);
156 static int generate_pat(u8
*iv
,
157 struct aead_request
*req
,
158 struct nx_crypto_ctx
*nx_ctx
,
159 unsigned int authsize
,
161 unsigned int assoclen
,
164 struct nx_sg
*nx_insg
= nx_ctx
->in_sg
;
165 struct nx_sg
*nx_outsg
= nx_ctx
->out_sg
;
166 unsigned int iauth_len
= 0;
167 u8 tmp
[16], *b1
= NULL
, *b0
= NULL
, *result
= NULL
;
169 unsigned int max_sg_len
;
171 /* zero the ctr value */
172 memset(iv
+ 15 - iv
[0], 0, iv
[0] + 1);
174 /* page 78 of nx_wb.pdf has,
175 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
176 * in length. If a full message is used, the AES CCA implementation
177 * restricts the maximum AAD length to 2^32 -1 bytes.
178 * If partial messages are used, the implementation supports
179 * 2^64 -1 bytes maximum AAD length.
181 * However, in the cryptoapi's aead_request structure,
182 * assoclen is an unsigned int, thus it cannot hold a length
183 * value greater than 2^32 - 1.
184 * Thus the AAD is further constrained by this and is never
189 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
190 } else if (assoclen
<= 14) {
191 /* if associated data is 14 bytes or less, we do 1 GCM
192 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
193 * which is fed in through the source buffers here */
194 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
195 b1
= nx_ctx
->priv
.ccm
.iauth_tag
;
196 iauth_len
= assoclen
;
197 } else if (assoclen
<= 65280) {
198 /* if associated data is less than (2^16 - 2^8), we construct
199 * B1 differently and feed in the associated data to a CCA
201 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
202 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
205 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
206 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
211 rc
= generate_b0(iv
, assoclen
, authsize
, nbytes
, b0
);
216 * add control info for associated data
217 * RFC 3610 and NIST Special Publication 800-38C
221 if (assoclen
<= 65280) {
222 *(u16
*)b1
= assoclen
;
223 scatterwalk_map_and_copy(b1
+ 2, req
->src
, 0,
224 iauth_len
, SCATTERWALK_FROM_SG
);
226 *(u16
*)b1
= (u16
)(0xfffe);
227 *(u32
*)&b1
[2] = assoclen
;
228 scatterwalk_map_and_copy(b1
+ 6, req
->src
, 0,
229 iauth_len
, SCATTERWALK_FROM_SG
);
233 /* now copy any remaining AAD to scatterlist and call nx... */
236 } else if (assoclen
<= 14) {
237 unsigned int len
= 16;
239 nx_insg
= nx_build_sg_list(nx_insg
, b1
, &len
, nx_ctx
->ap
->sglen
);
244 nx_outsg
= nx_build_sg_list(nx_outsg
, tmp
, &len
,
250 /* inlen should be negative, indicating to phyp that its a
251 * pointer to an sg list */
252 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
253 sizeof(struct nx_sg
);
254 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- nx_outsg
) *
255 sizeof(struct nx_sg
);
257 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
258 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_INTERMEDIATE
;
260 result
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
;
262 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
263 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
267 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
268 atomic64_add(assoclen
, &nx_ctx
->stats
->aes_bytes
);
271 unsigned int processed
= 0, to_process
;
273 processed
+= iauth_len
;
275 /* page_limit: number of sg entries that fit on one page */
276 max_sg_len
= min_t(u64
, nx_ctx
->ap
->sglen
,
277 nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
));
278 max_sg_len
= min_t(u64
, max_sg_len
,
279 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
282 to_process
= min_t(u32
, assoclen
- processed
,
283 nx_ctx
->ap
->databytelen
);
285 nx_insg
= nx_walk_and_build(nx_ctx
->in_sg
,
290 if ((to_process
+ processed
) < assoclen
) {
291 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |=
294 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) &=
295 ~NX_FDM_INTERMEDIATE
;
299 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
300 sizeof(struct nx_sg
);
302 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
304 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
305 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
309 memcpy(nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
,
310 nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
,
313 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |= NX_FDM_CONTINUATION
;
315 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
316 atomic64_add(assoclen
, &nx_ctx
->stats
->aes_bytes
);
318 processed
+= to_process
;
319 } while (processed
< assoclen
);
321 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
324 memcpy(out
, result
, AES_BLOCK_SIZE
);
329 static int ccm_nx_decrypt(struct aead_request
*req
,
331 unsigned int assoclen
)
333 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
334 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
335 unsigned int nbytes
= req
->cryptlen
;
336 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
337 struct nx_ccm_priv
*priv
= &nx_ctx
->priv
.ccm
;
338 unsigned long irq_flags
;
339 unsigned int processed
= 0, to_process
;
342 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
346 /* copy out the auth tag to compare with later */
347 scatterwalk_map_and_copy(priv
->oauth_tag
,
348 req
->src
, nbytes
+ req
->assoclen
, authsize
,
349 SCATTERWALK_FROM_SG
);
351 rc
= generate_pat(iv
, req
, nx_ctx
, authsize
, nbytes
, assoclen
,
352 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
358 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
359 * update. This value is bound by sg list limits.
361 to_process
= nbytes
- processed
;
363 if ((to_process
+ processed
) < nbytes
)
364 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
366 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
368 NX_CPB_FDM(nx_ctx
->csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
370 rc
= nx_build_sg_lists(nx_ctx
, iv
, req
->dst
, req
->src
,
371 &to_process
, processed
+ req
->assoclen
,
372 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
376 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
377 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
381 /* for partial completion, copy following for next
384 memcpy(iv
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
385 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
386 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
387 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
388 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
390 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
393 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
394 atomic64_add(csbcpb
->csb
.processed_byte_count
,
395 &(nx_ctx
->stats
->aes_bytes
));
397 processed
+= to_process
;
398 } while (processed
< nbytes
);
400 rc
= crypto_memneq(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, priv
->oauth_tag
,
401 authsize
) ? -EBADMSG
: 0;
403 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
407 static int ccm_nx_encrypt(struct aead_request
*req
,
409 unsigned int assoclen
)
411 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
412 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
413 unsigned int nbytes
= req
->cryptlen
;
414 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
415 unsigned long irq_flags
;
416 unsigned int processed
= 0, to_process
;
419 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
421 rc
= generate_pat(iv
, req
, nx_ctx
, authsize
, nbytes
, assoclen
,
422 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
427 /* to process: the AES_BLOCK_SIZE data chunk to process in this
428 * update. This value is bound by sg list limits.
430 to_process
= nbytes
- processed
;
432 if ((to_process
+ processed
) < nbytes
)
433 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
435 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
437 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
439 rc
= nx_build_sg_lists(nx_ctx
, iv
, req
->dst
, req
->src
,
440 &to_process
, processed
+ req
->assoclen
,
441 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
445 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
446 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
450 /* for partial completion, copy following for next
453 memcpy(iv
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
454 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
455 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
456 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
457 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
459 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
462 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
463 atomic64_add(csbcpb
->csb
.processed_byte_count
,
464 &(nx_ctx
->stats
->aes_bytes
));
466 processed
+= to_process
;
468 } while (processed
< nbytes
);
470 /* copy out the auth tag */
471 scatterwalk_map_and_copy(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
,
472 req
->dst
, nbytes
+ req
->assoclen
, authsize
,
476 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
480 static int ccm4309_aes_nx_encrypt(struct aead_request
*req
)
482 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
483 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
487 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
488 memcpy(iv
+ 4, req
->iv
, 8);
490 return ccm_nx_encrypt(req
, iv
, req
->assoclen
- 8);
493 static int ccm_aes_nx_encrypt(struct aead_request
*req
)
497 rc
= crypto_ccm_check_iv(req
->iv
);
501 return ccm_nx_encrypt(req
, req
->iv
, req
->assoclen
);
504 static int ccm4309_aes_nx_decrypt(struct aead_request
*req
)
506 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
507 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
511 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
512 memcpy(iv
+ 4, req
->iv
, 8);
514 return ccm_nx_decrypt(req
, iv
, req
->assoclen
- 8);
517 static int ccm_aes_nx_decrypt(struct aead_request
*req
)
521 rc
= crypto_ccm_check_iv(req
->iv
);
525 return ccm_nx_decrypt(req
, req
->iv
, req
->assoclen
);
528 struct aead_alg nx_ccm_aes_alg
= {
530 .cra_name
= "ccm(aes)",
531 .cra_driver_name
= "ccm-aes-nx",
533 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
535 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
536 .cra_module
= THIS_MODULE
,
538 .init
= nx_crypto_ctx_aes_ccm_init
,
539 .exit
= nx_crypto_ctx_aead_exit
,
540 .ivsize
= AES_BLOCK_SIZE
,
541 .maxauthsize
= AES_BLOCK_SIZE
,
542 .setkey
= ccm_aes_nx_set_key
,
543 .setauthsize
= ccm_aes_nx_setauthsize
,
544 .encrypt
= ccm_aes_nx_encrypt
,
545 .decrypt
= ccm_aes_nx_decrypt
,
548 struct aead_alg nx_ccm4309_aes_alg
= {
550 .cra_name
= "rfc4309(ccm(aes))",
551 .cra_driver_name
= "rfc4309-ccm-aes-nx",
553 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
555 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
556 .cra_module
= THIS_MODULE
,
558 .init
= nx_crypto_ctx_aes_ccm_init
,
559 .exit
= nx_crypto_ctx_aead_exit
,
561 .maxauthsize
= AES_BLOCK_SIZE
,
562 .setkey
= ccm4309_aes_nx_set_key
,
563 .setauthsize
= ccm4309_aes_nx_setauthsize
,
564 .encrypt
= ccm4309_aes_nx_encrypt
,
565 .decrypt
= ccm4309_aes_nx_decrypt
,