1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include <crypto/aes.h>
5 #include <crypto/authenc.h>
6 #include <crypto/cryptd.h>
7 #include <crypto/des.h>
8 #include <crypto/internal/aead.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/xts.h>
12 #include <crypto/gcm.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/sort.h>
15 #include <linux/module.h>
16 #include "otx2_cptvf.h"
17 #include "otx2_cptvf_algs.h"
18 #include "otx2_cpt_reqmgr.h"
19 #include "cn10k_cpt.h"
21 /* Size of salt in AES GCM mode */
22 #define AES_GCM_SALT_SIZE 4
23 /* Size of IV in AES GCM mode */
24 #define AES_GCM_IV_SIZE 8
25 /* Size of ICV (Integrity Check Value) in AES GCM mode */
26 #define AES_GCM_ICV_SIZE 16
27 /* Offset of IV in AES GCM mode */
28 #define AES_GCM_IV_OFFSET 8
29 #define CONTROL_WORD_LEN 8
30 #define KEY2_OFFSET 48
31 #define DMA_MODE_FLAG(dma_mode) \
32 (((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
34 /* Truncated SHA digest size */
35 #define SHA1_TRUNC_DIGEST_SIZE 12
36 #define SHA256_TRUNC_DIGEST_SIZE 16
37 #define SHA384_TRUNC_DIGEST_SIZE 24
38 #define SHA512_TRUNC_DIGEST_SIZE 32
40 static DEFINE_MUTEX(mutex
);
41 static int is_crypto_registered
;
43 struct cpt_device_desc
{
48 struct cpt_device_table
{
50 struct cpt_device_desc desc
[OTX2_CPT_MAX_LFS_NUM
];
53 static struct cpt_device_table se_devices
= {
54 .count
= ATOMIC_INIT(0)
57 static struct otx2_cpt_sdesc
*alloc_sdesc(struct crypto_shash
*alg
);
59 static inline int get_se_device(struct pci_dev
**pdev
, int *cpu_num
)
63 count
= atomic_read(&se_devices
.count
);
69 * On OcteonTX2 platform CPT instruction queue is bound to each
70 * local function LF, in turn LFs can be attached to PF
71 * or VF therefore we always use first device. We get maximum
72 * performance if one CPT queue is available for each cpu
73 * otherwise CPT queues need to be shared between cpus.
75 if (*cpu_num
>= se_devices
.desc
[0].num_queues
)
76 *cpu_num
%= se_devices
.desc
[0].num_queues
;
77 *pdev
= se_devices
.desc
[0].dev
;
84 static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info
*cpt_req
)
86 struct otx2_cpt_req_ctx
*rctx
;
87 struct aead_request
*req
;
88 struct crypto_aead
*tfm
;
90 req
= container_of(cpt_req
->areq
, struct aead_request
, base
);
91 tfm
= crypto_aead_reqtfm(req
);
92 rctx
= aead_request_ctx_dma(req
);
93 if (memcmp(rctx
->fctx
.hmac
.s
.hmac_calc
,
94 rctx
->fctx
.hmac
.s
.hmac_recv
,
95 crypto_aead_authsize(tfm
)) != 0)
101 static void otx2_cpt_aead_callback(int status
, void *arg1
, void *arg2
)
103 struct otx2_cpt_inst_info
*inst_info
= arg2
;
104 struct crypto_async_request
*areq
= arg1
;
105 struct otx2_cpt_req_info
*cpt_req
;
106 struct pci_dev
*pdev
;
109 cpt_req
= inst_info
->req
;
112 * When selected cipher is NULL we need to manually
113 * verify whether calculated hmac value matches
114 * received hmac value
116 if (cpt_req
->req_type
==
117 OTX2_CPT_AEAD_ENC_DEC_NULL_REQ
&&
119 status
= validate_hmac_cipher_null(cpt_req
);
121 pdev
= inst_info
->pdev
;
122 otx2_cpt_info_destroy(pdev
, inst_info
);
125 crypto_request_complete(areq
, status
);
128 static void output_iv_copyback(struct crypto_async_request
*areq
)
130 struct otx2_cpt_req_info
*req_info
;
131 struct otx2_cpt_req_ctx
*rctx
;
132 struct skcipher_request
*sreq
;
133 struct crypto_skcipher
*stfm
;
134 struct otx2_cpt_enc_ctx
*ctx
;
137 sreq
= container_of(areq
, struct skcipher_request
, base
);
138 stfm
= crypto_skcipher_reqtfm(sreq
);
139 ctx
= crypto_skcipher_ctx(stfm
);
140 if (ctx
->cipher_type
== OTX2_CPT_AES_CBC
||
141 ctx
->cipher_type
== OTX2_CPT_DES3_CBC
) {
142 rctx
= skcipher_request_ctx_dma(sreq
);
143 req_info
= &rctx
->cpt_req
;
144 ivsize
= crypto_skcipher_ivsize(stfm
);
145 start
= sreq
->cryptlen
- ivsize
;
147 if (req_info
->is_enc
) {
148 scatterwalk_map_and_copy(sreq
->iv
, sreq
->dst
, start
,
151 if (sreq
->src
!= sreq
->dst
) {
152 scatterwalk_map_and_copy(sreq
->iv
, sreq
->src
,
155 memcpy(sreq
->iv
, req_info
->iv_out
, ivsize
);
156 kfree(req_info
->iv_out
);
162 static void otx2_cpt_skcipher_callback(int status
, void *arg1
, void *arg2
)
164 struct otx2_cpt_inst_info
*inst_info
= arg2
;
165 struct crypto_async_request
*areq
= arg1
;
166 struct pci_dev
*pdev
;
170 output_iv_copyback(areq
);
172 pdev
= inst_info
->pdev
;
173 otx2_cpt_info_destroy(pdev
, inst_info
);
175 crypto_request_complete(areq
, status
);
179 static inline void update_input_data(struct otx2_cpt_req_info
*req_info
,
180 struct scatterlist
*inp_sg
,
181 u32 nbytes
, u32
*argcnt
)
183 req_info
->req
.dlen
+= nbytes
;
186 u32 len
= (nbytes
< inp_sg
->length
) ? nbytes
: inp_sg
->length
;
187 u8
*ptr
= sg_virt(inp_sg
);
189 req_info
->in
[*argcnt
].vptr
= (void *)ptr
;
190 req_info
->in
[*argcnt
].size
= len
;
193 inp_sg
= sg_next(inp_sg
);
197 static inline void update_output_data(struct otx2_cpt_req_info
*req_info
,
198 struct scatterlist
*outp_sg
,
199 u32 offset
, u32 nbytes
, u32
*argcnt
)
204 req_info
->rlen
+= nbytes
;
207 sg_len
= outp_sg
->length
- offset
;
208 len
= (nbytes
< sg_len
) ? nbytes
: sg_len
;
209 ptr
= sg_virt(outp_sg
);
211 req_info
->out
[*argcnt
].vptr
= (void *) (ptr
+ offset
);
212 req_info
->out
[*argcnt
].size
= len
;
216 outp_sg
= sg_next(outp_sg
);
220 static inline int create_ctx_hdr(struct skcipher_request
*req
, u32 enc
,
223 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
224 struct otx2_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
225 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(stfm
);
226 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
227 struct otx2_cpt_fc_ctx
*fctx
= &rctx
->fctx
;
228 int ivsize
= crypto_skcipher_ivsize(stfm
);
229 u32 start
= req
->cryptlen
- ivsize
;
232 flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
233 GFP_KERNEL
: GFP_ATOMIC
;
234 req_info
->ctrl
.s
.dma_mode
= OTX2_CPT_DMA_MODE_SG
;
235 req_info
->ctrl
.s
.se_req
= 1;
237 req_info
->req
.opcode
.s
.major
= OTX2_CPT_MAJOR_OP_FC
|
238 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG
);
240 req_info
->req
.opcode
.s
.minor
= 2;
242 req_info
->req
.opcode
.s
.minor
= 3;
243 if ((ctx
->cipher_type
== OTX2_CPT_AES_CBC
||
244 ctx
->cipher_type
== OTX2_CPT_DES3_CBC
) &&
245 req
->src
== req
->dst
) {
246 req_info
->iv_out
= kmalloc(ivsize
, flags
);
247 if (!req_info
->iv_out
)
250 scatterwalk_map_and_copy(req_info
->iv_out
, req
->src
,
254 /* Encryption data length */
255 req_info
->req
.param1
= req
->cryptlen
;
256 /* Authentication data length */
257 req_info
->req
.param2
= 0;
259 fctx
->enc
.enc_ctrl
.e
.enc_cipher
= ctx
->cipher_type
;
260 fctx
->enc
.enc_ctrl
.e
.aes_key
= ctx
->key_type
;
261 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX2_CPT_FROM_CPTR
;
263 if (ctx
->cipher_type
== OTX2_CPT_AES_XTS
)
264 memcpy(fctx
->enc
.encr_key
, ctx
->enc_key
, ctx
->key_len
* 2);
266 memcpy(fctx
->enc
.encr_key
, ctx
->enc_key
, ctx
->key_len
);
268 memcpy(fctx
->enc
.encr_iv
, req
->iv
, crypto_skcipher_ivsize(stfm
));
270 cpu_to_be64s(&fctx
->enc
.enc_ctrl
.u
);
273 * Storing Packet Data Information in offset
274 * Control Word First 8 bytes
276 req_info
->in
[*argcnt
].vptr
= (u8
*)&rctx
->ctrl_word
;
277 req_info
->in
[*argcnt
].size
= CONTROL_WORD_LEN
;
278 req_info
->req
.dlen
+= CONTROL_WORD_LEN
;
281 req_info
->in
[*argcnt
].vptr
= (u8
*)fctx
;
282 req_info
->in
[*argcnt
].size
= sizeof(struct otx2_cpt_fc_ctx
);
283 req_info
->req
.dlen
+= sizeof(struct otx2_cpt_fc_ctx
);
290 static inline int create_input_list(struct skcipher_request
*req
, u32 enc
,
293 struct otx2_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
294 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
298 ret
= create_ctx_hdr(req
, enc
, &argcnt
);
302 update_input_data(req_info
, req
->src
, req
->cryptlen
, &argcnt
);
303 req_info
->in_cnt
= argcnt
;
308 static inline void create_output_list(struct skcipher_request
*req
,
311 struct otx2_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
312 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
316 * OUTPUT Buffer Processing
317 * AES encryption/decryption output would be
318 * received in the following format
320 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
321 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
323 update_output_data(req_info
, req
->dst
, 0, req
->cryptlen
, &argcnt
);
324 req_info
->out_cnt
= argcnt
;
327 static int skcipher_do_fallback(struct skcipher_request
*req
, bool is_enc
)
329 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
330 struct otx2_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
331 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(stfm
);
334 if (ctx
->fbk_cipher
) {
335 skcipher_request_set_tfm(&rctx
->sk_fbk_req
, ctx
->fbk_cipher
);
336 skcipher_request_set_callback(&rctx
->sk_fbk_req
,
340 skcipher_request_set_crypt(&rctx
->sk_fbk_req
, req
->src
,
341 req
->dst
, req
->cryptlen
, req
->iv
);
342 ret
= is_enc
? crypto_skcipher_encrypt(&rctx
->sk_fbk_req
) :
343 crypto_skcipher_decrypt(&rctx
->sk_fbk_req
);
350 static inline int cpt_enc_dec(struct skcipher_request
*req
, u32 enc
)
352 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
353 struct otx2_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
354 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(stfm
);
355 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
356 u32 enc_iv_len
= crypto_skcipher_ivsize(stfm
);
357 struct pci_dev
*pdev
;
360 if (req
->cryptlen
== 0)
363 if (!IS_ALIGNED(req
->cryptlen
, ctx
->enc_align_len
))
366 if (req
->cryptlen
> OTX2_CPT_MAX_REQ_SIZE
)
367 return skcipher_do_fallback(req
, enc
);
369 /* Clear control words */
370 rctx
->ctrl_word
.flags
= 0;
371 rctx
->fctx
.enc
.enc_ctrl
.u
= 0;
373 status
= create_input_list(req
, enc
, enc_iv_len
);
376 create_output_list(req
, enc_iv_len
);
378 status
= get_se_device(&pdev
, &cpu_num
);
382 req_info
->callback
= otx2_cpt_skcipher_callback
;
383 req_info
->areq
= &req
->base
;
384 req_info
->req_type
= OTX2_CPT_ENC_DEC_REQ
;
385 req_info
->is_enc
= enc
;
386 req_info
->is_trunc_hmac
= false;
387 req_info
->ctrl
.s
.grp
= otx2_cpt_get_kcrypto_eng_grp_num(pdev
);
389 req_info
->req
.cptr
= ctx
->er_ctx
.hw_ctx
;
390 req_info
->req
.cptr_dma
= ctx
->er_ctx
.cptr_dma
;
393 * We perform an asynchronous send and once
394 * the request is completed the driver would
395 * intimate through registered call back functions
397 status
= otx2_cpt_do_request(pdev
, req_info
, cpu_num
);
402 static int otx2_cpt_skcipher_encrypt(struct skcipher_request
*req
)
404 return cpt_enc_dec(req
, true);
407 static int otx2_cpt_skcipher_decrypt(struct skcipher_request
*req
)
409 return cpt_enc_dec(req
, false);
412 static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher
*tfm
,
413 const u8
*key
, u32 keylen
)
415 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
416 const u8
*key2
= key
+ (keylen
/ 2);
417 const u8
*key1
= key
;
420 ret
= xts_verify_key(tfm
, key
, keylen
);
423 ctx
->key_len
= keylen
;
424 ctx
->enc_align_len
= 1;
425 memcpy(ctx
->enc_key
, key1
, keylen
/ 2);
426 memcpy(ctx
->enc_key
+ KEY2_OFFSET
, key2
, keylen
/ 2);
427 ctx
->cipher_type
= OTX2_CPT_AES_XTS
;
428 switch (ctx
->key_len
) {
429 case 2 * AES_KEYSIZE_128
:
430 ctx
->key_type
= OTX2_CPT_AES_128_BIT
;
432 case 2 * AES_KEYSIZE_192
:
433 ctx
->key_type
= OTX2_CPT_AES_192_BIT
;
435 case 2 * AES_KEYSIZE_256
:
436 ctx
->key_type
= OTX2_CPT_AES_256_BIT
;
441 return crypto_skcipher_setkey(ctx
->fbk_cipher
, key
, keylen
);
444 static int cpt_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
445 u32 keylen
, u8 cipher_type
)
447 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
449 if (keylen
!= DES3_EDE_KEY_SIZE
)
452 ctx
->key_len
= keylen
;
453 ctx
->cipher_type
= cipher_type
;
454 ctx
->enc_align_len
= 8;
456 memcpy(ctx
->enc_key
, key
, keylen
);
458 return crypto_skcipher_setkey(ctx
->fbk_cipher
, key
, keylen
);
461 static int cpt_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
462 u32 keylen
, u8 cipher_type
)
464 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
467 case AES_KEYSIZE_128
:
468 ctx
->key_type
= OTX2_CPT_AES_128_BIT
;
470 case AES_KEYSIZE_192
:
471 ctx
->key_type
= OTX2_CPT_AES_192_BIT
;
473 case AES_KEYSIZE_256
:
474 ctx
->key_type
= OTX2_CPT_AES_256_BIT
;
479 if (cipher_type
== OTX2_CPT_AES_CBC
|| cipher_type
== OTX2_CPT_AES_ECB
)
480 ctx
->enc_align_len
= 16;
482 ctx
->enc_align_len
= 1;
484 ctx
->key_len
= keylen
;
485 ctx
->cipher_type
= cipher_type
;
487 memcpy(ctx
->enc_key
, key
, keylen
);
489 return crypto_skcipher_setkey(ctx
->fbk_cipher
, key
, keylen
);
492 static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher
*tfm
,
493 const u8
*key
, u32 keylen
)
495 return cpt_aes_setkey(tfm
, key
, keylen
, OTX2_CPT_AES_CBC
);
498 static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher
*tfm
,
499 const u8
*key
, u32 keylen
)
501 return cpt_aes_setkey(tfm
, key
, keylen
, OTX2_CPT_AES_ECB
);
504 static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher
*tfm
,
505 const u8
*key
, u32 keylen
)
507 return cpt_des_setkey(tfm
, key
, keylen
, OTX2_CPT_DES3_CBC
);
510 static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher
*tfm
,
511 const u8
*key
, u32 keylen
)
513 return cpt_des_setkey(tfm
, key
, keylen
, OTX2_CPT_DES3_ECB
);
516 static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx
*ctx
,
517 struct crypto_alg
*alg
)
519 if (alg
->cra_flags
& CRYPTO_ALG_NEED_FALLBACK
) {
521 crypto_alloc_skcipher(alg
->cra_name
, 0,
523 CRYPTO_ALG_NEED_FALLBACK
);
524 if (IS_ERR(ctx
->fbk_cipher
)) {
525 pr_err("%s() failed to allocate fallback for %s\n",
526 __func__
, alg
->cra_name
);
527 return PTR_ERR(ctx
->fbk_cipher
);
533 static int otx2_cpt_enc_dec_init(struct crypto_skcipher
*stfm
)
535 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(stfm
);
536 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(stfm
);
537 struct crypto_alg
*alg
= tfm
->__crt_alg
;
538 struct pci_dev
*pdev
;
541 memset(ctx
, 0, sizeof(*ctx
));
543 * Additional memory for skcipher_request is
544 * allocated since the cryptd daemon uses
545 * this memory for request_ctx information
547 crypto_skcipher_set_reqsize_dma(
548 stfm
, sizeof(struct otx2_cpt_req_ctx
) +
549 sizeof(struct skcipher_request
));
551 ret
= get_se_device(&pdev
, &cpu_num
);
556 ret
= cn10k_cpt_hw_ctx_init(pdev
, &ctx
->er_ctx
);
560 return cpt_skcipher_fallback_init(ctx
, alg
);
563 static void otx2_cpt_skcipher_exit(struct crypto_skcipher
*tfm
)
565 struct otx2_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
567 if (ctx
->fbk_cipher
) {
568 crypto_free_skcipher(ctx
->fbk_cipher
);
569 ctx
->fbk_cipher
= NULL
;
571 cn10k_cpt_hw_ctx_clear(ctx
->pdev
, &ctx
->er_ctx
);
574 static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx
*ctx
,
575 struct crypto_alg
*alg
)
577 if (alg
->cra_flags
& CRYPTO_ALG_NEED_FALLBACK
) {
579 crypto_alloc_aead(alg
->cra_name
, 0,
581 CRYPTO_ALG_NEED_FALLBACK
);
582 if (IS_ERR(ctx
->fbk_cipher
)) {
583 pr_err("%s() failed to allocate fallback for %s\n",
584 __func__
, alg
->cra_name
);
585 return PTR_ERR(ctx
->fbk_cipher
);
591 static int cpt_aead_init(struct crypto_aead
*atfm
, u8 cipher_type
, u8 mac_type
)
593 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(atfm
);
594 struct crypto_tfm
*tfm
= crypto_aead_tfm(atfm
);
595 struct crypto_alg
*alg
= tfm
->__crt_alg
;
596 struct pci_dev
*pdev
;
599 ctx
->cipher_type
= cipher_type
;
600 ctx
->mac_type
= mac_type
;
602 switch (ctx
->mac_type
) {
604 ctx
->hashalg
= crypto_alloc_shash("sha1", 0, 0);
607 case OTX2_CPT_SHA256
:
608 ctx
->hashalg
= crypto_alloc_shash("sha256", 0, 0);
611 case OTX2_CPT_SHA384
:
612 ctx
->hashalg
= crypto_alloc_shash("sha384", 0, 0);
615 case OTX2_CPT_SHA512
:
616 ctx
->hashalg
= crypto_alloc_shash("sha512", 0, 0);
620 if (IS_ERR(ctx
->hashalg
))
621 return PTR_ERR(ctx
->hashalg
);
624 ctx
->sdesc
= alloc_sdesc(ctx
->hashalg
);
626 crypto_free_shash(ctx
->hashalg
);
632 * When selected cipher is NULL we use HMAC opcode instead of
633 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
634 * for calculating ipad and opad
636 if (ctx
->cipher_type
!= OTX2_CPT_CIPHER_NULL
&& ctx
->hashalg
) {
637 int ss
= crypto_shash_statesize(ctx
->hashalg
);
639 ctx
->ipad
= kzalloc(ss
, GFP_KERNEL
);
642 crypto_free_shash(ctx
->hashalg
);
646 ctx
->opad
= kzalloc(ss
, GFP_KERNEL
);
650 crypto_free_shash(ctx
->hashalg
);
654 switch (ctx
->cipher_type
) {
655 case OTX2_CPT_AES_CBC
:
656 case OTX2_CPT_AES_ECB
:
657 ctx
->enc_align_len
= 16;
659 case OTX2_CPT_DES3_CBC
:
660 case OTX2_CPT_DES3_ECB
:
661 ctx
->enc_align_len
= 8;
663 case OTX2_CPT_AES_GCM
:
664 case OTX2_CPT_CIPHER_NULL
:
665 ctx
->enc_align_len
= 1;
668 crypto_aead_set_reqsize_dma(atfm
, sizeof(struct otx2_cpt_req_ctx
));
670 ret
= get_se_device(&pdev
, &cpu_num
);
675 ret
= cn10k_cpt_hw_ctx_init(pdev
, &ctx
->er_ctx
);
679 return cpt_aead_fallback_init(ctx
, alg
);
682 static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead
*tfm
)
684 return cpt_aead_init(tfm
, OTX2_CPT_AES_CBC
, OTX2_CPT_SHA1
);
687 static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead
*tfm
)
689 return cpt_aead_init(tfm
, OTX2_CPT_AES_CBC
, OTX2_CPT_SHA256
);
692 static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead
*tfm
)
694 return cpt_aead_init(tfm
, OTX2_CPT_AES_CBC
, OTX2_CPT_SHA384
);
697 static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead
*tfm
)
699 return cpt_aead_init(tfm
, OTX2_CPT_AES_CBC
, OTX2_CPT_SHA512
);
702 static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead
*tfm
)
704 return cpt_aead_init(tfm
, OTX2_CPT_CIPHER_NULL
, OTX2_CPT_SHA1
);
707 static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead
*tfm
)
709 return cpt_aead_init(tfm
, OTX2_CPT_CIPHER_NULL
, OTX2_CPT_SHA256
);
712 static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead
*tfm
)
714 return cpt_aead_init(tfm
, OTX2_CPT_CIPHER_NULL
, OTX2_CPT_SHA384
);
717 static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead
*tfm
)
719 return cpt_aead_init(tfm
, OTX2_CPT_CIPHER_NULL
, OTX2_CPT_SHA512
);
722 static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead
*tfm
)
724 return cpt_aead_init(tfm
, OTX2_CPT_AES_GCM
, OTX2_CPT_MAC_NULL
);
727 static void otx2_cpt_aead_exit(struct crypto_aead
*tfm
)
729 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
733 crypto_free_shash(ctx
->hashalg
);
736 if (ctx
->fbk_cipher
) {
737 crypto_free_aead(ctx
->fbk_cipher
);
738 ctx
->fbk_cipher
= NULL
;
740 cn10k_cpt_hw_ctx_clear(ctx
->pdev
, &ctx
->er_ctx
);
743 static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead
*tfm
,
744 unsigned int authsize
)
746 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
748 if (crypto_rfc4106_check_authsize(authsize
))
751 tfm
->authsize
= authsize
;
752 /* Set authsize for fallback case */
754 ctx
->fbk_cipher
->authsize
= authsize
;
759 static int otx2_cpt_aead_set_authsize(struct crypto_aead
*tfm
,
760 unsigned int authsize
)
762 tfm
->authsize
= authsize
;
767 static int otx2_cpt_aead_null_set_authsize(struct crypto_aead
*tfm
,
768 unsigned int authsize
)
770 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
772 ctx
->is_trunc_hmac
= true;
773 tfm
->authsize
= authsize
;
778 static struct otx2_cpt_sdesc
*alloc_sdesc(struct crypto_shash
*alg
)
780 struct otx2_cpt_sdesc
*sdesc
;
783 size
= sizeof(struct shash_desc
) + crypto_shash_descsize(alg
);
784 sdesc
= kmalloc(size
, GFP_KERNEL
);
788 sdesc
->shash
.tfm
= alg
;
793 static inline void swap_data32(void *buf
, u32 len
)
795 cpu_to_be32_array(buf
, buf
, len
/ 4);
798 static inline void swap_data64(void *buf
, u32 len
)
803 for (i
= 0 ; i
< len
/ 8; i
++, src
++)
807 static int swap_pad(u8 mac_type
, u8
*pad
)
809 struct sha512_state
*sha512
;
810 struct sha256_state
*sha256
;
811 struct sha1_state
*sha1
;
815 sha1
= (struct sha1_state
*)pad
;
816 swap_data32(sha1
->state
, SHA1_DIGEST_SIZE
);
819 case OTX2_CPT_SHA256
:
820 sha256
= (struct sha256_state
*)pad
;
821 swap_data32(sha256
->state
, SHA256_DIGEST_SIZE
);
824 case OTX2_CPT_SHA384
:
825 case OTX2_CPT_SHA512
:
826 sha512
= (struct sha512_state
*)pad
;
827 swap_data64(sha512
->state
, SHA512_DIGEST_SIZE
);
837 static int aead_hmac_init(struct crypto_aead
*cipher
,
838 struct crypto_authenc_keys
*keys
)
840 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(cipher
);
841 int ds
= crypto_shash_digestsize(ctx
->hashalg
);
842 int bs
= crypto_shash_blocksize(ctx
->hashalg
);
843 int authkeylen
= keys
->authkeylen
;
844 u8
*ipad
= NULL
, *opad
= NULL
;
848 if (authkeylen
> bs
) {
849 ret
= crypto_shash_digest(&ctx
->sdesc
->shash
, keys
->authkey
,
850 authkeylen
, ctx
->key
);
856 memcpy(ctx
->key
, keys
->authkey
, authkeylen
);
858 ctx
->enc_key_len
= keys
->enckeylen
;
859 ctx
->auth_key_len
= authkeylen
;
861 if (ctx
->cipher_type
== OTX2_CPT_CIPHER_NULL
)
862 return keys
->enckeylen
? -EINVAL
: 0;
864 switch (keys
->enckeylen
) {
865 case AES_KEYSIZE_128
:
866 ctx
->key_type
= OTX2_CPT_AES_128_BIT
;
868 case AES_KEYSIZE_192
:
869 ctx
->key_type
= OTX2_CPT_AES_192_BIT
;
871 case AES_KEYSIZE_256
:
872 ctx
->key_type
= OTX2_CPT_AES_256_BIT
;
875 /* Invalid key length */
879 memcpy(ctx
->key
+ authkeylen
, keys
->enckey
, keys
->enckeylen
);
884 memcpy(ipad
, ctx
->key
, authkeylen
);
885 memset(ipad
+ authkeylen
, 0, bs
- authkeylen
);
886 memcpy(opad
, ipad
, bs
);
888 for (icount
= 0; icount
< bs
; icount
++) {
889 ipad
[icount
] ^= 0x36;
890 opad
[icount
] ^= 0x5c;
894 * Partial Hash calculated from the software
895 * algorithm is retrieved for IPAD & OPAD
898 /* IPAD Calculation */
899 crypto_shash_init(&ctx
->sdesc
->shash
);
900 crypto_shash_update(&ctx
->sdesc
->shash
, ipad
, bs
);
901 crypto_shash_export(&ctx
->sdesc
->shash
, ipad
);
902 ret
= swap_pad(ctx
->mac_type
, ipad
);
906 /* OPAD Calculation */
907 crypto_shash_init(&ctx
->sdesc
->shash
);
908 crypto_shash_update(&ctx
->sdesc
->shash
, opad
, bs
);
909 crypto_shash_export(&ctx
->sdesc
->shash
, opad
);
910 ret
= swap_pad(ctx
->mac_type
, opad
);
916 static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead
*cipher
,
917 const unsigned char *key
,
920 struct crypto_authenc_keys authenc_keys
;
922 return crypto_authenc_extractkeys(&authenc_keys
, key
, keylen
) ?:
923 aead_hmac_init(cipher
, &authenc_keys
);
926 static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead
*cipher
,
927 const unsigned char *key
,
930 return otx2_cpt_aead_cbc_aes_sha_setkey(cipher
, key
, keylen
);
933 static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead
*cipher
,
934 const unsigned char *key
,
937 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(cipher
);
940 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
944 case AES_KEYSIZE_128
+ AES_GCM_SALT_SIZE
:
945 ctx
->key_type
= OTX2_CPT_AES_128_BIT
;
946 ctx
->enc_key_len
= AES_KEYSIZE_128
;
948 case AES_KEYSIZE_192
+ AES_GCM_SALT_SIZE
:
949 ctx
->key_type
= OTX2_CPT_AES_192_BIT
;
950 ctx
->enc_key_len
= AES_KEYSIZE_192
;
952 case AES_KEYSIZE_256
+ AES_GCM_SALT_SIZE
:
953 ctx
->key_type
= OTX2_CPT_AES_256_BIT
;
954 ctx
->enc_key_len
= AES_KEYSIZE_256
;
957 /* Invalid key and salt length */
961 /* Store encryption key and salt */
962 memcpy(ctx
->key
, key
, keylen
);
964 return crypto_aead_setkey(ctx
->fbk_cipher
, key
, keylen
);
967 static inline int create_aead_ctx_hdr(struct aead_request
*req
, u32 enc
,
970 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
971 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
972 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
973 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
974 struct otx2_cpt_fc_ctx
*fctx
= &rctx
->fctx
;
975 int mac_len
= crypto_aead_authsize(tfm
);
978 rctx
->ctrl_word
.e
.enc_data_offset
= req
->assoclen
;
980 switch (ctx
->cipher_type
) {
981 case OTX2_CPT_AES_CBC
:
982 if (req
->assoclen
> 248 || !IS_ALIGNED(req
->assoclen
, 8))
985 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX2_CPT_FROM_CPTR
;
986 /* Copy encryption key to context */
987 memcpy(fctx
->enc
.encr_key
, ctx
->key
+ ctx
->auth_key_len
,
989 /* Copy IV to context */
990 memcpy(fctx
->enc
.encr_iv
, req
->iv
, crypto_aead_ivsize(tfm
));
992 ds
= crypto_shash_digestsize(ctx
->hashalg
);
993 if (ctx
->mac_type
== OTX2_CPT_SHA384
)
994 ds
= SHA512_DIGEST_SIZE
;
996 memcpy(fctx
->hmac
.e
.ipad
, ctx
->ipad
, ds
);
998 memcpy(fctx
->hmac
.e
.opad
, ctx
->opad
, ds
);
1001 case OTX2_CPT_AES_GCM
:
1002 if (crypto_ipsec_check_assoclen(req
->assoclen
))
1005 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX2_CPT_FROM_DPTR
;
1006 /* Copy encryption key to context */
1007 memcpy(fctx
->enc
.encr_key
, ctx
->key
, ctx
->enc_key_len
);
1008 /* Copy salt to context */
1009 memcpy(fctx
->enc
.encr_iv
, ctx
->key
+ ctx
->enc_key_len
,
1012 rctx
->ctrl_word
.e
.iv_offset
= req
->assoclen
- AES_GCM_IV_OFFSET
;
1016 /* Unknown cipher type */
1019 cpu_to_be64s(&rctx
->ctrl_word
.flags
);
1021 req_info
->ctrl
.s
.dma_mode
= OTX2_CPT_DMA_MODE_SG
;
1022 req_info
->ctrl
.s
.se_req
= 1;
1023 req_info
->req
.opcode
.s
.major
= OTX2_CPT_MAJOR_OP_FC
|
1024 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG
);
1026 req_info
->req
.opcode
.s
.minor
= 2;
1027 req_info
->req
.param1
= req
->cryptlen
;
1028 req_info
->req
.param2
= req
->cryptlen
+ req
->assoclen
;
1030 req_info
->req
.opcode
.s
.minor
= 3;
1031 req_info
->req
.param1
= req
->cryptlen
- mac_len
;
1032 req_info
->req
.param2
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1035 fctx
->enc
.enc_ctrl
.e
.enc_cipher
= ctx
->cipher_type
;
1036 fctx
->enc
.enc_ctrl
.e
.aes_key
= ctx
->key_type
;
1037 fctx
->enc
.enc_ctrl
.e
.mac_type
= ctx
->mac_type
;
1038 fctx
->enc
.enc_ctrl
.e
.mac_len
= mac_len
;
1039 cpu_to_be64s(&fctx
->enc
.enc_ctrl
.u
);
1042 * Storing Packet Data Information in offset
1043 * Control Word First 8 bytes
1045 req_info
->in
[*argcnt
].vptr
= (u8
*)&rctx
->ctrl_word
;
1046 req_info
->in
[*argcnt
].size
= CONTROL_WORD_LEN
;
1047 req_info
->req
.dlen
+= CONTROL_WORD_LEN
;
1050 req_info
->in
[*argcnt
].vptr
= (u8
*)fctx
;
1051 req_info
->in
[*argcnt
].size
= sizeof(struct otx2_cpt_fc_ctx
);
1052 req_info
->req
.dlen
+= sizeof(struct otx2_cpt_fc_ctx
);
1058 static inline void create_hmac_ctx_hdr(struct aead_request
*req
, u32
*argcnt
,
1061 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1062 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1063 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
1064 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1066 req_info
->ctrl
.s
.dma_mode
= OTX2_CPT_DMA_MODE_SG
;
1067 req_info
->ctrl
.s
.se_req
= 1;
1068 req_info
->req
.opcode
.s
.major
= OTX2_CPT_MAJOR_OP_HMAC
|
1069 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG
);
1070 req_info
->is_trunc_hmac
= ctx
->is_trunc_hmac
;
1072 req_info
->req
.opcode
.s
.minor
= 0;
1073 req_info
->req
.param1
= ctx
->auth_key_len
;
1074 req_info
->req
.param2
= ctx
->mac_type
<< 8;
1076 /* Add authentication key */
1077 req_info
->in
[*argcnt
].vptr
= ctx
->key
;
1078 req_info
->in
[*argcnt
].size
= round_up(ctx
->auth_key_len
, 8);
1079 req_info
->req
.dlen
+= round_up(ctx
->auth_key_len
, 8);
1083 static inline int create_aead_input_list(struct aead_request
*req
, u32 enc
)
1085 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1086 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1087 u32 inputlen
= req
->cryptlen
+ req
->assoclen
;
1088 u32 status
, argcnt
= 0;
1090 status
= create_aead_ctx_hdr(req
, enc
, &argcnt
);
1093 update_input_data(req_info
, req
->src
, inputlen
, &argcnt
);
1094 req_info
->in_cnt
= argcnt
;
1099 static inline void create_aead_output_list(struct aead_request
*req
, u32 enc
,
1102 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1103 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1104 u32 argcnt
= 0, outputlen
= 0;
1107 outputlen
= req
->cryptlen
+ req
->assoclen
+ mac_len
;
1109 outputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1111 update_output_data(req_info
, req
->dst
, 0, outputlen
, &argcnt
);
1112 req_info
->out_cnt
= argcnt
;
1115 static inline void create_aead_null_input_list(struct aead_request
*req
,
1116 u32 enc
, u32 mac_len
)
1118 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1119 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1120 u32 inputlen
, argcnt
= 0;
1123 inputlen
= req
->cryptlen
+ req
->assoclen
;
1125 inputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1127 create_hmac_ctx_hdr(req
, &argcnt
, enc
);
1128 update_input_data(req_info
, req
->src
, inputlen
, &argcnt
);
1129 req_info
->in_cnt
= argcnt
;
1132 static inline int create_aead_null_output_list(struct aead_request
*req
,
1133 u32 enc
, u32 mac_len
)
1135 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1136 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1137 struct scatterlist
*dst
;
1139 int argcnt
= 0, status
, offset
;
1143 inputlen
= req
->cryptlen
+ req
->assoclen
;
1145 inputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1148 * If source and destination are different
1149 * then copy payload to destination
1151 if (req
->src
!= req
->dst
) {
1153 ptr
= kmalloc(inputlen
, (req_info
->areq
->flags
&
1154 CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1155 GFP_KERNEL
: GFP_ATOMIC
);
1159 status
= sg_copy_to_buffer(req
->src
, sg_nents(req
->src
), ptr
,
1161 if (status
!= inputlen
) {
1165 status
= sg_copy_from_buffer(req
->dst
, sg_nents(req
->dst
), ptr
,
1167 if (status
!= inputlen
) {
1176 * In an encryption scenario hmac needs
1177 * to be appended after payload
1181 while (offset
>= dst
->length
) {
1182 offset
-= dst
->length
;
1188 update_output_data(req_info
, dst
, offset
, mac_len
, &argcnt
);
1191 * In a decryption scenario calculated hmac for received
1192 * payload needs to be compare with hmac received
1194 status
= sg_copy_buffer(req
->src
, sg_nents(req
->src
),
1195 rctx
->fctx
.hmac
.s
.hmac_recv
, mac_len
,
1197 if (status
!= mac_len
)
1200 req_info
->out
[argcnt
].vptr
= rctx
->fctx
.hmac
.s
.hmac_calc
;
1201 req_info
->out
[argcnt
].size
= mac_len
;
1205 req_info
->out_cnt
= argcnt
;
1213 static int aead_do_fallback(struct aead_request
*req
, bool is_enc
)
1215 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1216 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1217 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
1220 if (ctx
->fbk_cipher
) {
1221 /* Store the cipher tfm and then use the fallback tfm */
1222 aead_request_set_tfm(&rctx
->fbk_req
, ctx
->fbk_cipher
);
1223 aead_request_set_callback(&rctx
->fbk_req
, req
->base
.flags
,
1224 req
->base
.complete
, req
->base
.data
);
1225 aead_request_set_crypt(&rctx
->fbk_req
, req
->src
,
1226 req
->dst
, req
->cryptlen
, req
->iv
);
1227 aead_request_set_ad(&rctx
->fbk_req
, req
->assoclen
);
1228 ret
= is_enc
? crypto_aead_encrypt(&rctx
->fbk_req
) :
1229 crypto_aead_decrypt(&rctx
->fbk_req
);
1237 static int cpt_aead_enc_dec(struct aead_request
*req
, u8 reg_type
, u8 enc
)
1239 struct otx2_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1240 struct otx2_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1241 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1242 struct otx2_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
1243 struct pci_dev
*pdev
;
1244 int status
, cpu_num
;
1246 /* Clear control words */
1247 rctx
->ctrl_word
.flags
= 0;
1248 rctx
->fctx
.enc
.enc_ctrl
.u
= 0;
1250 req_info
->callback
= otx2_cpt_aead_callback
;
1251 req_info
->areq
= &req
->base
;
1252 req_info
->req_type
= reg_type
;
1253 req_info
->is_enc
= enc
;
1254 req_info
->is_trunc_hmac
= false;
1256 req_info
->req
.cptr
= ctx
->er_ctx
.hw_ctx
;
1257 req_info
->req
.cptr_dma
= ctx
->er_ctx
.cptr_dma
;
1260 case OTX2_CPT_AEAD_ENC_DEC_REQ
:
1261 status
= create_aead_input_list(req
, enc
);
1264 create_aead_output_list(req
, enc
, crypto_aead_authsize(tfm
));
1267 case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ
:
1268 create_aead_null_input_list(req
, enc
,
1269 crypto_aead_authsize(tfm
));
1270 status
= create_aead_null_output_list(req
, enc
,
1271 crypto_aead_authsize(tfm
));
1279 if (!IS_ALIGNED(req_info
->req
.param1
, ctx
->enc_align_len
))
1282 if (!req_info
->req
.param2
||
1283 (req_info
->req
.param1
> OTX2_CPT_MAX_REQ_SIZE
) ||
1284 (req_info
->req
.param2
> OTX2_CPT_MAX_REQ_SIZE
))
1285 return aead_do_fallback(req
, enc
);
1287 status
= get_se_device(&pdev
, &cpu_num
);
1291 req_info
->ctrl
.s
.grp
= otx2_cpt_get_kcrypto_eng_grp_num(pdev
);
1294 * We perform an asynchronous send and once
1295 * the request is completed the driver would
1296 * intimate through registered call back functions
1298 return otx2_cpt_do_request(pdev
, req_info
, cpu_num
);
1301 static int otx2_cpt_aead_encrypt(struct aead_request
*req
)
1303 return cpt_aead_enc_dec(req
, OTX2_CPT_AEAD_ENC_DEC_REQ
, true);
1306 static int otx2_cpt_aead_decrypt(struct aead_request
*req
)
1308 return cpt_aead_enc_dec(req
, OTX2_CPT_AEAD_ENC_DEC_REQ
, false);
1311 static int otx2_cpt_aead_null_encrypt(struct aead_request
*req
)
1313 return cpt_aead_enc_dec(req
, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ
, true);
1316 static int otx2_cpt_aead_null_decrypt(struct aead_request
*req
)
1318 return cpt_aead_enc_dec(req
, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ
, false);
1321 static struct skcipher_alg otx2_cpt_skciphers
[] = { {
1322 .base
.cra_name
= "xts(aes)",
1323 .base
.cra_driver_name
= "cpt_xts_aes",
1324 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1325 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1326 .base
.cra_ctxsize
= sizeof(struct otx2_cpt_enc_ctx
),
1327 .base
.cra_alignmask
= 7,
1328 .base
.cra_priority
= 4001,
1329 .base
.cra_module
= THIS_MODULE
,
1331 .init
= otx2_cpt_enc_dec_init
,
1332 .exit
= otx2_cpt_skcipher_exit
,
1333 .ivsize
= AES_BLOCK_SIZE
,
1334 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1335 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1336 .setkey
= otx2_cpt_skcipher_xts_setkey
,
1337 .encrypt
= otx2_cpt_skcipher_encrypt
,
1338 .decrypt
= otx2_cpt_skcipher_decrypt
,
1340 .base
.cra_name
= "cbc(aes)",
1341 .base
.cra_driver_name
= "cpt_cbc_aes",
1342 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1343 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1344 .base
.cra_ctxsize
= sizeof(struct otx2_cpt_enc_ctx
),
1345 .base
.cra_alignmask
= 7,
1346 .base
.cra_priority
= 4001,
1347 .base
.cra_module
= THIS_MODULE
,
1349 .init
= otx2_cpt_enc_dec_init
,
1350 .exit
= otx2_cpt_skcipher_exit
,
1351 .ivsize
= AES_BLOCK_SIZE
,
1352 .min_keysize
= AES_MIN_KEY_SIZE
,
1353 .max_keysize
= AES_MAX_KEY_SIZE
,
1354 .setkey
= otx2_cpt_skcipher_cbc_aes_setkey
,
1355 .encrypt
= otx2_cpt_skcipher_encrypt
,
1356 .decrypt
= otx2_cpt_skcipher_decrypt
,
1358 .base
.cra_name
= "ecb(aes)",
1359 .base
.cra_driver_name
= "cpt_ecb_aes",
1360 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1361 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1362 .base
.cra_ctxsize
= sizeof(struct otx2_cpt_enc_ctx
),
1363 .base
.cra_alignmask
= 7,
1364 .base
.cra_priority
= 4001,
1365 .base
.cra_module
= THIS_MODULE
,
1367 .init
= otx2_cpt_enc_dec_init
,
1368 .exit
= otx2_cpt_skcipher_exit
,
1370 .min_keysize
= AES_MIN_KEY_SIZE
,
1371 .max_keysize
= AES_MAX_KEY_SIZE
,
1372 .setkey
= otx2_cpt_skcipher_ecb_aes_setkey
,
1373 .encrypt
= otx2_cpt_skcipher_encrypt
,
1374 .decrypt
= otx2_cpt_skcipher_decrypt
,
1376 .base
.cra_name
= "cbc(des3_ede)",
1377 .base
.cra_driver_name
= "cpt_cbc_des3_ede",
1378 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1379 .base
.cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1380 .base
.cra_ctxsize
= sizeof(struct otx2_cpt_enc_ctx
),
1381 .base
.cra_alignmask
= 7,
1382 .base
.cra_priority
= 4001,
1383 .base
.cra_module
= THIS_MODULE
,
1385 .init
= otx2_cpt_enc_dec_init
,
1386 .exit
= otx2_cpt_skcipher_exit
,
1387 .min_keysize
= DES3_EDE_KEY_SIZE
,
1388 .max_keysize
= DES3_EDE_KEY_SIZE
,
1389 .ivsize
= DES_BLOCK_SIZE
,
1390 .setkey
= otx2_cpt_skcipher_cbc_des3_setkey
,
1391 .encrypt
= otx2_cpt_skcipher_encrypt
,
1392 .decrypt
= otx2_cpt_skcipher_decrypt
,
1394 .base
.cra_name
= "ecb(des3_ede)",
1395 .base
.cra_driver_name
= "cpt_ecb_des3_ede",
1396 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1397 .base
.cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1398 .base
.cra_ctxsize
= sizeof(struct otx2_cpt_enc_ctx
),
1399 .base
.cra_alignmask
= 7,
1400 .base
.cra_priority
= 4001,
1401 .base
.cra_module
= THIS_MODULE
,
1403 .init
= otx2_cpt_enc_dec_init
,
1404 .exit
= otx2_cpt_skcipher_exit
,
1405 .min_keysize
= DES3_EDE_KEY_SIZE
,
1406 .max_keysize
= DES3_EDE_KEY_SIZE
,
1408 .setkey
= otx2_cpt_skcipher_ecb_des3_setkey
,
1409 .encrypt
= otx2_cpt_skcipher_encrypt
,
1410 .decrypt
= otx2_cpt_skcipher_decrypt
,
1413 static struct aead_alg otx2_cpt_aeads
[] = { {
1415 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1416 .cra_driver_name
= "cpt_hmac_sha1_cbc_aes",
1417 .cra_blocksize
= AES_BLOCK_SIZE
,
1418 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1419 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1420 .cra_priority
= 4001,
1422 .cra_module
= THIS_MODULE
,
1424 .init
= otx2_cpt_aead_cbc_aes_sha1_init
,
1425 .exit
= otx2_cpt_aead_exit
,
1426 .setkey
= otx2_cpt_aead_cbc_aes_sha_setkey
,
1427 .setauthsize
= otx2_cpt_aead_set_authsize
,
1428 .encrypt
= otx2_cpt_aead_encrypt
,
1429 .decrypt
= otx2_cpt_aead_decrypt
,
1430 .ivsize
= AES_BLOCK_SIZE
,
1431 .maxauthsize
= SHA1_DIGEST_SIZE
,
1434 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1435 .cra_driver_name
= "cpt_hmac_sha256_cbc_aes",
1436 .cra_blocksize
= AES_BLOCK_SIZE
,
1437 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1438 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1439 .cra_priority
= 4001,
1441 .cra_module
= THIS_MODULE
,
1443 .init
= otx2_cpt_aead_cbc_aes_sha256_init
,
1444 .exit
= otx2_cpt_aead_exit
,
1445 .setkey
= otx2_cpt_aead_cbc_aes_sha_setkey
,
1446 .setauthsize
= otx2_cpt_aead_set_authsize
,
1447 .encrypt
= otx2_cpt_aead_encrypt
,
1448 .decrypt
= otx2_cpt_aead_decrypt
,
1449 .ivsize
= AES_BLOCK_SIZE
,
1450 .maxauthsize
= SHA256_DIGEST_SIZE
,
1453 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
1454 .cra_driver_name
= "cpt_hmac_sha384_cbc_aes",
1455 .cra_blocksize
= AES_BLOCK_SIZE
,
1456 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1457 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1458 .cra_priority
= 4001,
1460 .cra_module
= THIS_MODULE
,
1462 .init
= otx2_cpt_aead_cbc_aes_sha384_init
,
1463 .exit
= otx2_cpt_aead_exit
,
1464 .setkey
= otx2_cpt_aead_cbc_aes_sha_setkey
,
1465 .setauthsize
= otx2_cpt_aead_set_authsize
,
1466 .encrypt
= otx2_cpt_aead_encrypt
,
1467 .decrypt
= otx2_cpt_aead_decrypt
,
1468 .ivsize
= AES_BLOCK_SIZE
,
1469 .maxauthsize
= SHA384_DIGEST_SIZE
,
1472 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1473 .cra_driver_name
= "cpt_hmac_sha512_cbc_aes",
1474 .cra_blocksize
= AES_BLOCK_SIZE
,
1475 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1476 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1477 .cra_priority
= 4001,
1479 .cra_module
= THIS_MODULE
,
1481 .init
= otx2_cpt_aead_cbc_aes_sha512_init
,
1482 .exit
= otx2_cpt_aead_exit
,
1483 .setkey
= otx2_cpt_aead_cbc_aes_sha_setkey
,
1484 .setauthsize
= otx2_cpt_aead_set_authsize
,
1485 .encrypt
= otx2_cpt_aead_encrypt
,
1486 .decrypt
= otx2_cpt_aead_decrypt
,
1487 .ivsize
= AES_BLOCK_SIZE
,
1488 .maxauthsize
= SHA512_DIGEST_SIZE
,
1491 .cra_name
= "authenc(hmac(sha1),ecb(cipher_null))",
1492 .cra_driver_name
= "cpt_hmac_sha1_ecb_null",
1494 .cra_flags
= CRYPTO_ALG_ASYNC
,
1495 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1496 .cra_priority
= 4001,
1498 .cra_module
= THIS_MODULE
,
1500 .init
= otx2_cpt_aead_ecb_null_sha1_init
,
1501 .exit
= otx2_cpt_aead_exit
,
1502 .setkey
= otx2_cpt_aead_ecb_null_sha_setkey
,
1503 .setauthsize
= otx2_cpt_aead_null_set_authsize
,
1504 .encrypt
= otx2_cpt_aead_null_encrypt
,
1505 .decrypt
= otx2_cpt_aead_null_decrypt
,
1507 .maxauthsize
= SHA1_DIGEST_SIZE
,
1510 .cra_name
= "authenc(hmac(sha256),ecb(cipher_null))",
1511 .cra_driver_name
= "cpt_hmac_sha256_ecb_null",
1513 .cra_flags
= CRYPTO_ALG_ASYNC
,
1514 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1515 .cra_priority
= 4001,
1517 .cra_module
= THIS_MODULE
,
1519 .init
= otx2_cpt_aead_ecb_null_sha256_init
,
1520 .exit
= otx2_cpt_aead_exit
,
1521 .setkey
= otx2_cpt_aead_ecb_null_sha_setkey
,
1522 .setauthsize
= otx2_cpt_aead_null_set_authsize
,
1523 .encrypt
= otx2_cpt_aead_null_encrypt
,
1524 .decrypt
= otx2_cpt_aead_null_decrypt
,
1526 .maxauthsize
= SHA256_DIGEST_SIZE
,
1529 .cra_name
= "authenc(hmac(sha384),ecb(cipher_null))",
1530 .cra_driver_name
= "cpt_hmac_sha384_ecb_null",
1532 .cra_flags
= CRYPTO_ALG_ASYNC
,
1533 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1534 .cra_priority
= 4001,
1536 .cra_module
= THIS_MODULE
,
1538 .init
= otx2_cpt_aead_ecb_null_sha384_init
,
1539 .exit
= otx2_cpt_aead_exit
,
1540 .setkey
= otx2_cpt_aead_ecb_null_sha_setkey
,
1541 .setauthsize
= otx2_cpt_aead_null_set_authsize
,
1542 .encrypt
= otx2_cpt_aead_null_encrypt
,
1543 .decrypt
= otx2_cpt_aead_null_decrypt
,
1545 .maxauthsize
= SHA384_DIGEST_SIZE
,
1548 .cra_name
= "authenc(hmac(sha512),ecb(cipher_null))",
1549 .cra_driver_name
= "cpt_hmac_sha512_ecb_null",
1551 .cra_flags
= CRYPTO_ALG_ASYNC
,
1552 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1553 .cra_priority
= 4001,
1555 .cra_module
= THIS_MODULE
,
1557 .init
= otx2_cpt_aead_ecb_null_sha512_init
,
1558 .exit
= otx2_cpt_aead_exit
,
1559 .setkey
= otx2_cpt_aead_ecb_null_sha_setkey
,
1560 .setauthsize
= otx2_cpt_aead_null_set_authsize
,
1561 .encrypt
= otx2_cpt_aead_null_encrypt
,
1562 .decrypt
= otx2_cpt_aead_null_decrypt
,
1564 .maxauthsize
= SHA512_DIGEST_SIZE
,
1567 .cra_name
= "rfc4106(gcm(aes))",
1568 .cra_driver_name
= "cpt_rfc4106_gcm_aes",
1570 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
1571 .cra_ctxsize
= sizeof(struct otx2_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1572 .cra_priority
= 4001,
1574 .cra_module
= THIS_MODULE
,
1576 .init
= otx2_cpt_aead_gcm_aes_init
,
1577 .exit
= otx2_cpt_aead_exit
,
1578 .setkey
= otx2_cpt_aead_gcm_aes_setkey
,
1579 .setauthsize
= otx2_cpt_aead_gcm_set_authsize
,
1580 .encrypt
= otx2_cpt_aead_encrypt
,
1581 .decrypt
= otx2_cpt_aead_decrypt
,
1582 .ivsize
= AES_GCM_IV_SIZE
,
1583 .maxauthsize
= AES_GCM_ICV_SIZE
,
1586 static inline int cpt_register_algs(void)
1590 for (i
= 0; i
< ARRAY_SIZE(otx2_cpt_skciphers
); i
++)
1591 otx2_cpt_skciphers
[i
].base
.cra_flags
&= ~CRYPTO_ALG_DEAD
;
1593 err
= crypto_register_skciphers(otx2_cpt_skciphers
,
1594 ARRAY_SIZE(otx2_cpt_skciphers
));
1598 for (i
= 0; i
< ARRAY_SIZE(otx2_cpt_aeads
); i
++)
1599 otx2_cpt_aeads
[i
].base
.cra_flags
&= ~CRYPTO_ALG_DEAD
;
1601 err
= crypto_register_aeads(otx2_cpt_aeads
,
1602 ARRAY_SIZE(otx2_cpt_aeads
));
1604 crypto_unregister_skciphers(otx2_cpt_skciphers
,
1605 ARRAY_SIZE(otx2_cpt_skciphers
));
1612 static inline void cpt_unregister_algs(void)
1614 crypto_unregister_skciphers(otx2_cpt_skciphers
,
1615 ARRAY_SIZE(otx2_cpt_skciphers
));
1616 crypto_unregister_aeads(otx2_cpt_aeads
, ARRAY_SIZE(otx2_cpt_aeads
));
1619 static int compare_func(const void *lptr
, const void *rptr
)
1621 const struct cpt_device_desc
*ldesc
= (struct cpt_device_desc
*) lptr
;
1622 const struct cpt_device_desc
*rdesc
= (struct cpt_device_desc
*) rptr
;
1624 if (ldesc
->dev
->devfn
< rdesc
->dev
->devfn
)
1626 if (ldesc
->dev
->devfn
> rdesc
->dev
->devfn
)
1631 int otx2_cpt_crypto_init(struct pci_dev
*pdev
, struct module
*mod
,
1632 int num_queues
, int num_devices
)
1638 count
= atomic_read(&se_devices
.count
);
1639 if (count
>= OTX2_CPT_MAX_LFS_NUM
) {
1640 dev_err(&pdev
->dev
, "No space to add a new device\n");
1644 se_devices
.desc
[count
].num_queues
= num_queues
;
1645 se_devices
.desc
[count
++].dev
= pdev
;
1646 atomic_inc(&se_devices
.count
);
1648 if (atomic_read(&se_devices
.count
) == num_devices
&&
1649 is_crypto_registered
== false) {
1650 if (cpt_register_algs()) {
1652 "Error in registering crypto algorithms\n");
1656 try_module_get(mod
);
1657 is_crypto_registered
= true;
1659 sort(se_devices
.desc
, count
, sizeof(struct cpt_device_desc
),
1660 compare_func
, NULL
);
1663 mutex_unlock(&mutex
);
1667 void otx2_cpt_crypto_exit(struct pci_dev
*pdev
, struct module
*mod
)
1669 struct cpt_device_table
*dev_tbl
;
1670 bool dev_found
= false;
1675 dev_tbl
= &se_devices
;
1676 count
= atomic_read(&dev_tbl
->count
);
1677 for (i
= 0; i
< count
; i
++) {
1678 if (pdev
== dev_tbl
->desc
[i
].dev
) {
1679 for (j
= i
; j
< count
-1; j
++)
1680 dev_tbl
->desc
[j
] = dev_tbl
->desc
[j
+1];
1687 dev_err(&pdev
->dev
, "%s device not found\n", __func__
);
1690 if (atomic_dec_and_test(&se_devices
.count
)) {
1691 cpt_unregister_algs();
1693 is_crypto_registered
= false;
1697 mutex_unlock(&mutex
);