1 // SPDX-License-Identifier: GPL-2.0-only
3 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
5 * Copyright (C) 2012 International Business Machines Inc.
7 * Author: Kent Yoder <yoder1@us.ibm.com>
10 #include <crypto/internal/aead.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/crypto.h>
19 #include "nx_csbcpb.h"
23 static int ccm_aes_nx_set_key(struct crypto_aead
*tfm
,
27 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
28 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
29 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
31 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
35 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
36 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
37 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
43 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_CCM
;
44 memcpy(csbcpb
->cpb
.aes_ccm
.key
, in_key
, key_len
);
46 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_CCA
;
47 memcpy(csbcpb_aead
->cpb
.aes_cca
.key
, in_key
, key_len
);
53 static int ccm4309_aes_nx_set_key(struct crypto_aead
*tfm
,
57 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
64 memcpy(nx_ctx
->priv
.ccm
.nonce
, in_key
+ key_len
, 3);
66 return ccm_aes_nx_set_key(tfm
, in_key
, key_len
);
69 static int ccm_aes_nx_setauthsize(struct crypto_aead
*tfm
,
70 unsigned int authsize
)
88 static int ccm4309_aes_nx_setauthsize(struct crypto_aead
*tfm
,
89 unsigned int authsize
)
103 /* taken from crypto/ccm.c */
104 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
108 memset(block
, 0, csize
);
113 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
116 data
= cpu_to_be32(msglen
);
117 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
122 /* taken from crypto/ccm.c */
123 static inline int crypto_ccm_check_iv(const u8
*iv
)
125 /* 2 <= L <= 8, so 1 <= L' <= 7. */
126 if (1 > iv
[0] || iv
[0] > 7)
132 /* based on code from crypto/ccm.c */
133 static int generate_b0(u8
*iv
, unsigned int assoclen
, unsigned int authsize
,
134 unsigned int cryptlen
, u8
*b0
)
136 unsigned int l
, lp
, m
= authsize
;
143 /* set m, bits 3-5 */
144 *b0
|= (8 * ((m
- 2) / 2));
146 /* set adata, bit 6, if associated data is used */
150 return set_msg_len(b0
+ 16 - l
, cryptlen
, l
);
153 static int generate_pat(u8
*iv
,
154 struct aead_request
*req
,
155 struct nx_crypto_ctx
*nx_ctx
,
156 unsigned int authsize
,
158 unsigned int assoclen
,
161 struct nx_sg
*nx_insg
= nx_ctx
->in_sg
;
162 struct nx_sg
*nx_outsg
= nx_ctx
->out_sg
;
163 unsigned int iauth_len
= 0;
164 u8 tmp
[16], *b1
= NULL
, *b0
= NULL
, *result
= NULL
;
166 unsigned int max_sg_len
;
168 /* zero the ctr value */
169 memset(iv
+ 15 - iv
[0], 0, iv
[0] + 1);
171 /* page 78 of nx_wb.pdf has,
172 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
173 * in length. If a full message is used, the AES CCA implementation
174 * restricts the maximum AAD length to 2^32 -1 bytes.
175 * If partial messages are used, the implementation supports
176 * 2^64 -1 bytes maximum AAD length.
178 * However, in the cryptoapi's aead_request structure,
179 * assoclen is an unsigned int, thus it cannot hold a length
180 * value greater than 2^32 - 1.
181 * Thus the AAD is further constrained by this and is never
186 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
187 } else if (assoclen
<= 14) {
188 /* if associated data is 14 bytes or less, we do 1 GCM
189 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
190 * which is fed in through the source buffers here */
191 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
192 b1
= nx_ctx
->priv
.ccm
.iauth_tag
;
193 iauth_len
= assoclen
;
194 } else if (assoclen
<= 65280) {
195 /* if associated data is less than (2^16 - 2^8), we construct
196 * B1 differently and feed in the associated data to a CCA
198 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
199 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
202 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
203 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
208 rc
= generate_b0(iv
, assoclen
, authsize
, nbytes
, b0
);
213 * add control info for associated data
214 * RFC 3610 and NIST Special Publication 800-38C
218 if (assoclen
<= 65280) {
219 *(u16
*)b1
= assoclen
;
220 scatterwalk_map_and_copy(b1
+ 2, req
->src
, 0,
221 iauth_len
, SCATTERWALK_FROM_SG
);
223 *(u16
*)b1
= (u16
)(0xfffe);
224 *(u32
*)&b1
[2] = assoclen
;
225 scatterwalk_map_and_copy(b1
+ 6, req
->src
, 0,
226 iauth_len
, SCATTERWALK_FROM_SG
);
230 /* now copy any remaining AAD to scatterlist and call nx... */
233 } else if (assoclen
<= 14) {
234 unsigned int len
= 16;
236 nx_insg
= nx_build_sg_list(nx_insg
, b1
, &len
, nx_ctx
->ap
->sglen
);
241 nx_outsg
= nx_build_sg_list(nx_outsg
, tmp
, &len
,
247 /* inlen should be negative, indicating to phyp that its a
248 * pointer to an sg list */
249 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
250 sizeof(struct nx_sg
);
251 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- nx_outsg
) *
252 sizeof(struct nx_sg
);
254 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
255 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_INTERMEDIATE
;
257 result
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
;
259 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
260 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
264 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
265 atomic64_add(assoclen
, &nx_ctx
->stats
->aes_bytes
);
268 unsigned int processed
= 0, to_process
;
270 processed
+= iauth_len
;
272 /* page_limit: number of sg entries that fit on one page */
273 max_sg_len
= min_t(u64
, nx_ctx
->ap
->sglen
,
274 nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
));
275 max_sg_len
= min_t(u64
, max_sg_len
,
276 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
279 to_process
= min_t(u32
, assoclen
- processed
,
280 nx_ctx
->ap
->databytelen
);
282 nx_insg
= nx_walk_and_build(nx_ctx
->in_sg
,
287 if ((to_process
+ processed
) < assoclen
) {
288 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |=
291 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) &=
292 ~NX_FDM_INTERMEDIATE
;
296 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
297 sizeof(struct nx_sg
);
299 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
301 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
302 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
306 memcpy(nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
,
307 nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
,
310 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |= NX_FDM_CONTINUATION
;
312 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
313 atomic64_add(assoclen
, &nx_ctx
->stats
->aes_bytes
);
315 processed
+= to_process
;
316 } while (processed
< assoclen
);
318 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
321 memcpy(out
, result
, AES_BLOCK_SIZE
);
326 static int ccm_nx_decrypt(struct aead_request
*req
,
328 unsigned int assoclen
)
330 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
331 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
332 unsigned int nbytes
= req
->cryptlen
;
333 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
334 struct nx_ccm_priv
*priv
= &nx_ctx
->priv
.ccm
;
335 unsigned long irq_flags
;
336 unsigned int processed
= 0, to_process
;
339 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
343 /* copy out the auth tag to compare with later */
344 scatterwalk_map_and_copy(priv
->oauth_tag
,
345 req
->src
, nbytes
+ req
->assoclen
, authsize
,
346 SCATTERWALK_FROM_SG
);
348 rc
= generate_pat(iv
, req
, nx_ctx
, authsize
, nbytes
, assoclen
,
349 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
355 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
356 * update. This value is bound by sg list limits.
358 to_process
= nbytes
- processed
;
360 if ((to_process
+ processed
) < nbytes
)
361 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
363 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
365 NX_CPB_FDM(nx_ctx
->csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
367 rc
= nx_build_sg_lists(nx_ctx
, iv
, req
->dst
, req
->src
,
368 &to_process
, processed
+ req
->assoclen
,
369 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
373 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
374 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
378 /* for partial completion, copy following for next
381 memcpy(iv
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
382 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
383 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
384 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
385 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
387 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
390 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
391 atomic64_add(be32_to_cpu(csbcpb
->csb
.processed_byte_count
),
392 &(nx_ctx
->stats
->aes_bytes
));
394 processed
+= to_process
;
395 } while (processed
< nbytes
);
397 rc
= crypto_memneq(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, priv
->oauth_tag
,
398 authsize
) ? -EBADMSG
: 0;
400 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
404 static int ccm_nx_encrypt(struct aead_request
*req
,
406 unsigned int assoclen
)
408 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
409 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
410 unsigned int nbytes
= req
->cryptlen
;
411 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
412 unsigned long irq_flags
;
413 unsigned int processed
= 0, to_process
;
416 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
418 rc
= generate_pat(iv
, req
, nx_ctx
, authsize
, nbytes
, assoclen
,
419 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
424 /* to process: the AES_BLOCK_SIZE data chunk to process in this
425 * update. This value is bound by sg list limits.
427 to_process
= nbytes
- processed
;
429 if ((to_process
+ processed
) < nbytes
)
430 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
432 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
434 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
436 rc
= nx_build_sg_lists(nx_ctx
, iv
, req
->dst
, req
->src
,
437 &to_process
, processed
+ req
->assoclen
,
438 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
442 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
443 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
447 /* for partial completion, copy following for next
450 memcpy(iv
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
451 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
452 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
453 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
454 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
456 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
459 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
460 atomic64_add(be32_to_cpu(csbcpb
->csb
.processed_byte_count
),
461 &(nx_ctx
->stats
->aes_bytes
));
463 processed
+= to_process
;
465 } while (processed
< nbytes
);
467 /* copy out the auth tag */
468 scatterwalk_map_and_copy(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
,
469 req
->dst
, nbytes
+ req
->assoclen
, authsize
,
473 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
477 static int ccm4309_aes_nx_encrypt(struct aead_request
*req
)
479 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
480 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
484 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
485 memcpy(iv
+ 4, req
->iv
, 8);
487 return ccm_nx_encrypt(req
, iv
, req
->assoclen
- 8);
490 static int ccm_aes_nx_encrypt(struct aead_request
*req
)
494 rc
= crypto_ccm_check_iv(req
->iv
);
498 return ccm_nx_encrypt(req
, req
->iv
, req
->assoclen
);
501 static int ccm4309_aes_nx_decrypt(struct aead_request
*req
)
503 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
504 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
508 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
509 memcpy(iv
+ 4, req
->iv
, 8);
511 return ccm_nx_decrypt(req
, iv
, req
->assoclen
- 8);
514 static int ccm_aes_nx_decrypt(struct aead_request
*req
)
518 rc
= crypto_ccm_check_iv(req
->iv
);
522 return ccm_nx_decrypt(req
, req
->iv
, req
->assoclen
);
525 struct aead_alg nx_ccm_aes_alg
= {
527 .cra_name
= "ccm(aes)",
528 .cra_driver_name
= "ccm-aes-nx",
530 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
532 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
533 .cra_module
= THIS_MODULE
,
535 .init
= nx_crypto_ctx_aes_ccm_init
,
536 .exit
= nx_crypto_ctx_aead_exit
,
537 .ivsize
= AES_BLOCK_SIZE
,
538 .maxauthsize
= AES_BLOCK_SIZE
,
539 .setkey
= ccm_aes_nx_set_key
,
540 .setauthsize
= ccm_aes_nx_setauthsize
,
541 .encrypt
= ccm_aes_nx_encrypt
,
542 .decrypt
= ccm_aes_nx_decrypt
,
545 struct aead_alg nx_ccm4309_aes_alg
= {
547 .cra_name
= "rfc4309(ccm(aes))",
548 .cra_driver_name
= "rfc4309-ccm-aes-nx",
550 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
552 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
553 .cra_module
= THIS_MODULE
,
555 .init
= nx_crypto_ctx_aes_ccm_init
,
556 .exit
= nx_crypto_ctx_aead_exit
,
558 .maxauthsize
= AES_BLOCK_SIZE
,
559 .setkey
= ccm4309_aes_nx_set_key
,
560 .setauthsize
= ccm4309_aes_nx_setauthsize
,
561 .encrypt
= ccm4309_aes_nx_encrypt
,
562 .decrypt
= ccm4309_aes_nx_decrypt
,