1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <crypto/aes.h>
12 #include <crypto/authenc.h>
13 #include <crypto/cryptd.h>
14 #include <crypto/des.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/sha1.h>
17 #include <crypto/sha2.h>
18 #include <crypto/xts.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/sort.h>
21 #include <linux/module.h>
22 #include "otx_cptvf.h"
23 #include "otx_cptvf_algs.h"
24 #include "otx_cptvf_reqmgr.h"
26 #define CPT_MAX_VF_NUM 64
27 /* Size of salt in AES GCM mode */
28 #define AES_GCM_SALT_SIZE 4
29 /* Size of IV in AES GCM mode */
30 #define AES_GCM_IV_SIZE 8
31 /* Size of ICV (Integrity Check Value) in AES GCM mode */
32 #define AES_GCM_ICV_SIZE 16
33 /* Offset of IV in AES GCM mode */
34 #define AES_GCM_IV_OFFSET 8
35 #define CONTROL_WORD_LEN 8
36 #define KEY2_OFFSET 48
37 #define DMA_MODE_FLAG(dma_mode) \
38 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
40 /* Truncated SHA digest size */
41 #define SHA1_TRUNC_DIGEST_SIZE 12
42 #define SHA256_TRUNC_DIGEST_SIZE 16
43 #define SHA384_TRUNC_DIGEST_SIZE 24
44 #define SHA512_TRUNC_DIGEST_SIZE 32
46 static DEFINE_MUTEX(mutex
);
47 static int is_crypto_registered
;
49 struct cpt_device_desc
{
50 enum otx_cptpf_type pf_type
;
55 struct cpt_device_table
{
57 struct cpt_device_desc desc
[CPT_MAX_VF_NUM
];
60 static struct cpt_device_table se_devices
= {
61 .count
= ATOMIC_INIT(0)
64 static struct cpt_device_table ae_devices
= {
65 .count
= ATOMIC_INIT(0)
68 static struct otx_cpt_sdesc
*alloc_sdesc(struct crypto_shash
*alg
);
70 static inline int get_se_device(struct pci_dev
**pdev
, int *cpu_num
)
74 count
= atomic_read(&se_devices
.count
);
80 if (se_devices
.desc
[0].pf_type
== OTX_CPT_SE
) {
82 * On OcteonTX platform there is one CPT instruction queue bound
83 * to each VF. We get maximum performance if one CPT queue
84 * is available for each cpu otherwise CPT queues need to be
85 * shared between cpus.
87 if (*cpu_num
>= count
)
89 *pdev
= se_devices
.desc
[*cpu_num
].dev
;
91 pr_err("Unknown PF type %d\n", se_devices
.desc
[0].pf_type
);
99 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info
*cpt_req
)
101 struct otx_cpt_req_ctx
*rctx
;
102 struct aead_request
*req
;
103 struct crypto_aead
*tfm
;
105 req
= container_of(cpt_req
->areq
, struct aead_request
, base
);
106 tfm
= crypto_aead_reqtfm(req
);
107 rctx
= aead_request_ctx_dma(req
);
108 if (memcmp(rctx
->fctx
.hmac
.s
.hmac_calc
,
109 rctx
->fctx
.hmac
.s
.hmac_recv
,
110 crypto_aead_authsize(tfm
)) != 0)
116 static void otx_cpt_aead_callback(int status
, void *arg1
, void *arg2
)
118 struct otx_cpt_info_buffer
*cpt_info
= arg2
;
119 struct crypto_async_request
*areq
= arg1
;
120 struct otx_cpt_req_info
*cpt_req
;
121 struct pci_dev
*pdev
;
126 cpt_req
= cpt_info
->req
;
129 * When selected cipher is NULL we need to manually
130 * verify whether calculated hmac value matches
131 * received hmac value
133 if (cpt_req
->req_type
== OTX_CPT_AEAD_ENC_DEC_NULL_REQ
&&
135 status
= validate_hmac_cipher_null(cpt_req
);
137 pdev
= cpt_info
->pdev
;
138 do_request_cleanup(pdev
, cpt_info
);
142 crypto_request_complete(areq
, status
);
145 static void output_iv_copyback(struct crypto_async_request
*areq
)
147 struct otx_cpt_req_info
*req_info
;
148 struct skcipher_request
*sreq
;
149 struct crypto_skcipher
*stfm
;
150 struct otx_cpt_req_ctx
*rctx
;
151 struct otx_cpt_enc_ctx
*ctx
;
154 sreq
= container_of(areq
, struct skcipher_request
, base
);
155 stfm
= crypto_skcipher_reqtfm(sreq
);
156 ctx
= crypto_skcipher_ctx(stfm
);
157 if (ctx
->cipher_type
== OTX_CPT_AES_CBC
||
158 ctx
->cipher_type
== OTX_CPT_DES3_CBC
) {
159 rctx
= skcipher_request_ctx_dma(sreq
);
160 req_info
= &rctx
->cpt_req
;
161 ivsize
= crypto_skcipher_ivsize(stfm
);
162 start
= sreq
->cryptlen
- ivsize
;
164 if (req_info
->is_enc
) {
165 scatterwalk_map_and_copy(sreq
->iv
, sreq
->dst
, start
,
168 if (sreq
->src
!= sreq
->dst
) {
169 scatterwalk_map_and_copy(sreq
->iv
, sreq
->src
,
172 memcpy(sreq
->iv
, req_info
->iv_out
, ivsize
);
173 kfree(req_info
->iv_out
);
179 static void otx_cpt_skcipher_callback(int status
, void *arg1
, void *arg2
)
181 struct otx_cpt_info_buffer
*cpt_info
= arg2
;
182 struct crypto_async_request
*areq
= arg1
;
183 struct pci_dev
*pdev
;
187 output_iv_copyback(areq
);
189 pdev
= cpt_info
->pdev
;
190 do_request_cleanup(pdev
, cpt_info
);
192 crypto_request_complete(areq
, status
);
196 static inline void update_input_data(struct otx_cpt_req_info
*req_info
,
197 struct scatterlist
*inp_sg
,
198 u32 nbytes
, u32
*argcnt
)
200 req_info
->req
.dlen
+= nbytes
;
203 u32 len
= min(nbytes
, inp_sg
->length
);
204 u8
*ptr
= sg_virt(inp_sg
);
206 req_info
->in
[*argcnt
].vptr
= (void *)ptr
;
207 req_info
->in
[*argcnt
].size
= len
;
210 inp_sg
= sg_next(inp_sg
);
214 static inline void update_output_data(struct otx_cpt_req_info
*req_info
,
215 struct scatterlist
*outp_sg
,
216 u32 offset
, u32 nbytes
, u32
*argcnt
)
218 req_info
->rlen
+= nbytes
;
221 u32 len
= min(nbytes
, outp_sg
->length
- offset
);
222 u8
*ptr
= sg_virt(outp_sg
);
224 req_info
->out
[*argcnt
].vptr
= (void *) (ptr
+ offset
);
225 req_info
->out
[*argcnt
].size
= len
;
229 outp_sg
= sg_next(outp_sg
);
233 static inline u32
create_ctx_hdr(struct skcipher_request
*req
, u32 enc
,
236 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
237 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
238 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
239 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(stfm
);
240 struct otx_cpt_enc_ctx
*ctx
= crypto_tfm_ctx(tfm
);
241 struct otx_cpt_fc_ctx
*fctx
= &rctx
->fctx
;
242 int ivsize
= crypto_skcipher_ivsize(stfm
);
243 u32 start
= req
->cryptlen
- ivsize
;
246 flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
247 GFP_KERNEL
: GFP_ATOMIC
;
248 req_info
->ctrl
.s
.dma_mode
= OTX_CPT_DMA_GATHER_SCATTER
;
249 req_info
->ctrl
.s
.se_req
= OTX_CPT_SE_CORE_REQ
;
251 req_info
->req
.opcode
.s
.major
= OTX_CPT_MAJOR_OP_FC
|
252 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER
);
254 req_info
->req
.opcode
.s
.minor
= 2;
256 req_info
->req
.opcode
.s
.minor
= 3;
257 if ((ctx
->cipher_type
== OTX_CPT_AES_CBC
||
258 ctx
->cipher_type
== OTX_CPT_DES3_CBC
) &&
259 req
->src
== req
->dst
) {
260 req_info
->iv_out
= kmalloc(ivsize
, flags
);
261 if (!req_info
->iv_out
)
264 scatterwalk_map_and_copy(req_info
->iv_out
, req
->src
,
268 /* Encryption data length */
269 req_info
->req
.param1
= req
->cryptlen
;
270 /* Authentication data length */
271 req_info
->req
.param2
= 0;
273 fctx
->enc
.enc_ctrl
.e
.enc_cipher
= ctx
->cipher_type
;
274 fctx
->enc
.enc_ctrl
.e
.aes_key
= ctx
->key_type
;
275 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX_CPT_FROM_CPTR
;
277 if (ctx
->cipher_type
== OTX_CPT_AES_XTS
)
278 memcpy(fctx
->enc
.encr_key
, ctx
->enc_key
, ctx
->key_len
* 2);
280 memcpy(fctx
->enc
.encr_key
, ctx
->enc_key
, ctx
->key_len
);
282 memcpy(fctx
->enc
.encr_iv
, req
->iv
, crypto_skcipher_ivsize(stfm
));
284 fctx
->enc
.enc_ctrl
.flags
= cpu_to_be64(fctx
->enc
.enc_ctrl
.cflags
);
287 * Storing Packet Data Information in offset
288 * Control Word First 8 bytes
290 req_info
->in
[*argcnt
].vptr
= (u8
*)&rctx
->ctrl_word
;
291 req_info
->in
[*argcnt
].size
= CONTROL_WORD_LEN
;
292 req_info
->req
.dlen
+= CONTROL_WORD_LEN
;
295 req_info
->in
[*argcnt
].vptr
= (u8
*)fctx
;
296 req_info
->in
[*argcnt
].size
= sizeof(struct otx_cpt_fc_ctx
);
297 req_info
->req
.dlen
+= sizeof(struct otx_cpt_fc_ctx
);
304 static inline u32
create_input_list(struct skcipher_request
*req
, u32 enc
,
307 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
308 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
312 ret
= create_ctx_hdr(req
, enc
, &argcnt
);
316 update_input_data(req_info
, req
->src
, req
->cryptlen
, &argcnt
);
317 req_info
->incnt
= argcnt
;
322 static inline void create_output_list(struct skcipher_request
*req
,
325 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
326 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
330 * OUTPUT Buffer Processing
331 * AES encryption/decryption output would be
332 * received in the following format
334 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
335 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
337 update_output_data(req_info
, req
->dst
, 0, req
->cryptlen
, &argcnt
);
338 req_info
->outcnt
= argcnt
;
341 static inline int cpt_enc_dec(struct skcipher_request
*req
, u32 enc
)
343 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
344 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx_dma(req
);
345 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
346 u32 enc_iv_len
= crypto_skcipher_ivsize(stfm
);
347 struct pci_dev
*pdev
;
350 /* Validate that request doesn't exceed maximum CPT supported size */
351 if (req
->cryptlen
> OTX_CPT_MAX_REQ_SIZE
)
354 /* Clear control words */
355 rctx
->ctrl_word
.flags
= 0;
356 rctx
->fctx
.enc
.enc_ctrl
.flags
= 0;
358 status
= create_input_list(req
, enc
, enc_iv_len
);
361 create_output_list(req
, enc_iv_len
);
363 status
= get_se_device(&pdev
, &cpu_num
);
367 req_info
->callback
= (void *)otx_cpt_skcipher_callback
;
368 req_info
->areq
= &req
->base
;
369 req_info
->req_type
= OTX_CPT_ENC_DEC_REQ
;
370 req_info
->is_enc
= enc
;
371 req_info
->is_trunc_hmac
= false;
372 req_info
->ctrl
.s
.grp
= 0;
375 * We perform an asynchronous send and once
376 * the request is completed the driver would
377 * intimate through registered call back functions
379 status
= otx_cpt_do_request(pdev
, req_info
, cpu_num
);
384 static int otx_cpt_skcipher_encrypt(struct skcipher_request
*req
)
386 return cpt_enc_dec(req
, true);
389 static int otx_cpt_skcipher_decrypt(struct skcipher_request
*req
)
391 return cpt_enc_dec(req
, false);
394 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher
*tfm
,
395 const u8
*key
, u32 keylen
)
397 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
398 const u8
*key2
= key
+ (keylen
/ 2);
399 const u8
*key1
= key
;
402 ret
= xts_verify_key(tfm
, key
, keylen
);
405 ctx
->key_len
= keylen
;
406 memcpy(ctx
->enc_key
, key1
, keylen
/ 2);
407 memcpy(ctx
->enc_key
+ KEY2_OFFSET
, key2
, keylen
/ 2);
408 ctx
->cipher_type
= OTX_CPT_AES_XTS
;
409 switch (ctx
->key_len
) {
410 case 2 * AES_KEYSIZE_128
:
411 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
413 case 2 * AES_KEYSIZE_256
:
414 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
423 static int cpt_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
424 u32 keylen
, u8 cipher_type
)
426 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
428 if (keylen
!= DES3_EDE_KEY_SIZE
)
431 ctx
->key_len
= keylen
;
432 ctx
->cipher_type
= cipher_type
;
434 memcpy(ctx
->enc_key
, key
, keylen
);
439 static int cpt_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
440 u32 keylen
, u8 cipher_type
)
442 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
445 case AES_KEYSIZE_128
:
446 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
448 case AES_KEYSIZE_192
:
449 ctx
->key_type
= OTX_CPT_AES_192_BIT
;
451 case AES_KEYSIZE_256
:
452 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
457 ctx
->key_len
= keylen
;
458 ctx
->cipher_type
= cipher_type
;
460 memcpy(ctx
->enc_key
, key
, keylen
);
465 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher
*tfm
,
466 const u8
*key
, u32 keylen
)
468 return cpt_aes_setkey(tfm
, key
, keylen
, OTX_CPT_AES_CBC
);
471 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher
*tfm
,
472 const u8
*key
, u32 keylen
)
474 return cpt_aes_setkey(tfm
, key
, keylen
, OTX_CPT_AES_ECB
);
477 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher
*tfm
,
478 const u8
*key
, u32 keylen
)
480 return cpt_des_setkey(tfm
, key
, keylen
, OTX_CPT_DES3_CBC
);
483 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher
*tfm
,
484 const u8
*key
, u32 keylen
)
486 return cpt_des_setkey(tfm
, key
, keylen
, OTX_CPT_DES3_ECB
);
489 static int otx_cpt_enc_dec_init(struct crypto_skcipher
*tfm
)
491 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
493 memset(ctx
, 0, sizeof(*ctx
));
495 * Additional memory for skcipher_request is
496 * allocated since the cryptd daemon uses
497 * this memory for request_ctx information
499 crypto_skcipher_set_reqsize_dma(
500 tfm
, sizeof(struct otx_cpt_req_ctx
) +
501 sizeof(struct skcipher_request
));
506 static int cpt_aead_init(struct crypto_aead
*tfm
, u8 cipher_type
, u8 mac_type
)
508 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
510 ctx
->cipher_type
= cipher_type
;
511 ctx
->mac_type
= mac_type
;
513 switch (ctx
->mac_type
) {
515 ctx
->hashalg
= crypto_alloc_shash("sha1", 0, 0);
519 ctx
->hashalg
= crypto_alloc_shash("sha256", 0, 0);
523 ctx
->hashalg
= crypto_alloc_shash("sha384", 0, 0);
527 ctx
->hashalg
= crypto_alloc_shash("sha512", 0, 0);
531 if (IS_ERR(ctx
->hashalg
))
532 return PTR_ERR(ctx
->hashalg
);
534 crypto_aead_set_reqsize_dma(tfm
, sizeof(struct otx_cpt_req_ctx
));
540 * When selected cipher is NULL we use HMAC opcode instead of
541 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
542 * for calculating ipad and opad
544 if (ctx
->cipher_type
!= OTX_CPT_CIPHER_NULL
) {
545 int ss
= crypto_shash_statesize(ctx
->hashalg
);
547 ctx
->ipad
= kzalloc(ss
, GFP_KERNEL
);
549 crypto_free_shash(ctx
->hashalg
);
553 ctx
->opad
= kzalloc(ss
, GFP_KERNEL
);
556 crypto_free_shash(ctx
->hashalg
);
561 ctx
->sdesc
= alloc_sdesc(ctx
->hashalg
);
565 crypto_free_shash(ctx
->hashalg
);
572 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead
*tfm
)
574 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA1
);
577 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead
*tfm
)
579 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA256
);
582 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead
*tfm
)
584 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA384
);
587 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead
*tfm
)
589 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA512
);
592 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead
*tfm
)
594 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA1
);
597 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead
*tfm
)
599 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA256
);
602 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead
*tfm
)
604 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA384
);
607 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead
*tfm
)
609 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA512
);
612 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead
*tfm
)
614 return cpt_aead_init(tfm
, OTX_CPT_AES_GCM
, OTX_CPT_MAC_NULL
);
617 static void otx_cpt_aead_exit(struct crypto_aead
*tfm
)
619 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
623 crypto_free_shash(ctx
->hashalg
);
628 * This is the Integrity Check Value validation (aka the authentication tag
631 static int otx_cpt_aead_set_authsize(struct crypto_aead
*tfm
,
632 unsigned int authsize
)
634 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
636 switch (ctx
->mac_type
) {
638 if (authsize
!= SHA1_DIGEST_SIZE
&&
639 authsize
!= SHA1_TRUNC_DIGEST_SIZE
)
642 if (authsize
== SHA1_TRUNC_DIGEST_SIZE
)
643 ctx
->is_trunc_hmac
= true;
647 if (authsize
!= SHA256_DIGEST_SIZE
&&
648 authsize
!= SHA256_TRUNC_DIGEST_SIZE
)
651 if (authsize
== SHA256_TRUNC_DIGEST_SIZE
)
652 ctx
->is_trunc_hmac
= true;
656 if (authsize
!= SHA384_DIGEST_SIZE
&&
657 authsize
!= SHA384_TRUNC_DIGEST_SIZE
)
660 if (authsize
== SHA384_TRUNC_DIGEST_SIZE
)
661 ctx
->is_trunc_hmac
= true;
665 if (authsize
!= SHA512_DIGEST_SIZE
&&
666 authsize
!= SHA512_TRUNC_DIGEST_SIZE
)
669 if (authsize
== SHA512_TRUNC_DIGEST_SIZE
)
670 ctx
->is_trunc_hmac
= true;
673 case OTX_CPT_MAC_NULL
:
674 if (ctx
->cipher_type
== OTX_CPT_AES_GCM
) {
675 if (authsize
!= AES_GCM_ICV_SIZE
)
685 tfm
->authsize
= authsize
;
689 static struct otx_cpt_sdesc
*alloc_sdesc(struct crypto_shash
*alg
)
691 struct otx_cpt_sdesc
*sdesc
;
694 size
= sizeof(struct shash_desc
) + crypto_shash_descsize(alg
);
695 sdesc
= kmalloc(size
, GFP_KERNEL
);
699 sdesc
->shash
.tfm
= alg
;
704 static inline void swap_data32(void *buf
, u32 len
)
706 cpu_to_be32_array(buf
, buf
, len
/ 4);
709 static inline void swap_data64(void *buf
, u32 len
)
715 for (i
= 0 ; i
< len
/ 8; i
++, src
++, dst
++)
716 *dst
= cpu_to_be64p(src
);
719 static int swap_pad(u8 mac_type
, u8
*pad
)
721 struct sha512_state
*sha512
;
722 struct sha256_state
*sha256
;
723 struct sha1_state
*sha1
;
727 sha1
= (struct sha1_state
*)pad
;
728 swap_data32(sha1
->state
, SHA1_DIGEST_SIZE
);
732 sha256
= (struct sha256_state
*)pad
;
733 swap_data32(sha256
->state
, SHA256_DIGEST_SIZE
);
738 sha512
= (struct sha512_state
*)pad
;
739 swap_data64(sha512
->state
, SHA512_DIGEST_SIZE
);
749 static int aead_hmac_init(struct crypto_aead
*cipher
,
750 struct crypto_authenc_keys
*keys
)
752 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(cipher
);
753 int ds
= crypto_shash_digestsize(ctx
->hashalg
);
754 int bs
= crypto_shash_blocksize(ctx
->hashalg
);
755 int authkeylen
= keys
->authkeylen
;
756 u8
*ipad
= NULL
, *opad
= NULL
;
760 if (authkeylen
> bs
) {
761 ret
= crypto_shash_digest(&ctx
->sdesc
->shash
, keys
->authkey
,
762 authkeylen
, ctx
->key
);
767 memcpy(ctx
->key
, keys
->authkey
, authkeylen
);
769 ctx
->enc_key_len
= keys
->enckeylen
;
770 ctx
->auth_key_len
= authkeylen
;
772 if (ctx
->cipher_type
== OTX_CPT_CIPHER_NULL
)
773 return keys
->enckeylen
? -EINVAL
: 0;
775 switch (keys
->enckeylen
) {
776 case AES_KEYSIZE_128
:
777 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
779 case AES_KEYSIZE_192
:
780 ctx
->key_type
= OTX_CPT_AES_192_BIT
;
782 case AES_KEYSIZE_256
:
783 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
786 /* Invalid key length */
790 memcpy(ctx
->key
+ authkeylen
, keys
->enckey
, keys
->enckeylen
);
795 memcpy(ipad
, ctx
->key
, authkeylen
);
796 memset(ipad
+ authkeylen
, 0, bs
- authkeylen
);
797 memcpy(opad
, ipad
, bs
);
799 for (icount
= 0; icount
< bs
; icount
++) {
800 ipad
[icount
] ^= 0x36;
801 opad
[icount
] ^= 0x5c;
805 * Partial Hash calculated from the software
806 * algorithm is retrieved for IPAD & OPAD
809 /* IPAD Calculation */
810 crypto_shash_init(&ctx
->sdesc
->shash
);
811 crypto_shash_update(&ctx
->sdesc
->shash
, ipad
, bs
);
812 crypto_shash_export(&ctx
->sdesc
->shash
, ipad
);
813 ret
= swap_pad(ctx
->mac_type
, ipad
);
817 /* OPAD Calculation */
818 crypto_shash_init(&ctx
->sdesc
->shash
);
819 crypto_shash_update(&ctx
->sdesc
->shash
, opad
, bs
);
820 crypto_shash_export(&ctx
->sdesc
->shash
, opad
);
821 ret
= swap_pad(ctx
->mac_type
, opad
);
827 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead
*cipher
,
828 const unsigned char *key
,
831 struct crypto_authenc_keys authenc_keys
;
834 status
= crypto_authenc_extractkeys(&authenc_keys
, key
, keylen
);
838 status
= aead_hmac_init(cipher
, &authenc_keys
);
844 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead
*cipher
,
845 const unsigned char *key
,
848 return otx_cpt_aead_cbc_aes_sha_setkey(cipher
, key
, keylen
);
851 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead
*cipher
,
852 const unsigned char *key
,
855 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(cipher
);
858 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
862 case AES_KEYSIZE_128
+ AES_GCM_SALT_SIZE
:
863 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
864 ctx
->enc_key_len
= AES_KEYSIZE_128
;
866 case AES_KEYSIZE_192
+ AES_GCM_SALT_SIZE
:
867 ctx
->key_type
= OTX_CPT_AES_192_BIT
;
868 ctx
->enc_key_len
= AES_KEYSIZE_192
;
870 case AES_KEYSIZE_256
+ AES_GCM_SALT_SIZE
:
871 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
872 ctx
->enc_key_len
= AES_KEYSIZE_256
;
875 /* Invalid key and salt length */
879 /* Store encryption key and salt */
880 memcpy(ctx
->key
, key
, keylen
);
885 static inline u32
create_aead_ctx_hdr(struct aead_request
*req
, u32 enc
,
888 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
889 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
890 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
891 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
892 struct otx_cpt_fc_ctx
*fctx
= &rctx
->fctx
;
893 int mac_len
= crypto_aead_authsize(tfm
);
896 rctx
->ctrl_word
.e
.enc_data_offset
= req
->assoclen
;
898 switch (ctx
->cipher_type
) {
899 case OTX_CPT_AES_CBC
:
900 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX_CPT_FROM_CPTR
;
901 /* Copy encryption key to context */
902 memcpy(fctx
->enc
.encr_key
, ctx
->key
+ ctx
->auth_key_len
,
904 /* Copy IV to context */
905 memcpy(fctx
->enc
.encr_iv
, req
->iv
, crypto_aead_ivsize(tfm
));
907 ds
= crypto_shash_digestsize(ctx
->hashalg
);
908 if (ctx
->mac_type
== OTX_CPT_SHA384
)
909 ds
= SHA512_DIGEST_SIZE
;
911 memcpy(fctx
->hmac
.e
.ipad
, ctx
->ipad
, ds
);
913 memcpy(fctx
->hmac
.e
.opad
, ctx
->opad
, ds
);
916 case OTX_CPT_AES_GCM
:
917 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX_CPT_FROM_DPTR
;
918 /* Copy encryption key to context */
919 memcpy(fctx
->enc
.encr_key
, ctx
->key
, ctx
->enc_key_len
);
920 /* Copy salt to context */
921 memcpy(fctx
->enc
.encr_iv
, ctx
->key
+ ctx
->enc_key_len
,
924 rctx
->ctrl_word
.e
.iv_offset
= req
->assoclen
- AES_GCM_IV_OFFSET
;
928 /* Unknown cipher type */
931 rctx
->ctrl_word
.flags
= cpu_to_be64(rctx
->ctrl_word
.cflags
);
933 req_info
->ctrl
.s
.dma_mode
= OTX_CPT_DMA_GATHER_SCATTER
;
934 req_info
->ctrl
.s
.se_req
= OTX_CPT_SE_CORE_REQ
;
935 req_info
->req
.opcode
.s
.major
= OTX_CPT_MAJOR_OP_FC
|
936 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER
);
938 req_info
->req
.opcode
.s
.minor
= 2;
939 req_info
->req
.param1
= req
->cryptlen
;
940 req_info
->req
.param2
= req
->cryptlen
+ req
->assoclen
;
942 req_info
->req
.opcode
.s
.minor
= 3;
943 req_info
->req
.param1
= req
->cryptlen
- mac_len
;
944 req_info
->req
.param2
= req
->cryptlen
+ req
->assoclen
- mac_len
;
947 fctx
->enc
.enc_ctrl
.e
.enc_cipher
= ctx
->cipher_type
;
948 fctx
->enc
.enc_ctrl
.e
.aes_key
= ctx
->key_type
;
949 fctx
->enc
.enc_ctrl
.e
.mac_type
= ctx
->mac_type
;
950 fctx
->enc
.enc_ctrl
.e
.mac_len
= mac_len
;
951 fctx
->enc
.enc_ctrl
.flags
= cpu_to_be64(fctx
->enc
.enc_ctrl
.cflags
);
954 * Storing Packet Data Information in offset
955 * Control Word First 8 bytes
957 req_info
->in
[*argcnt
].vptr
= (u8
*)&rctx
->ctrl_word
;
958 req_info
->in
[*argcnt
].size
= CONTROL_WORD_LEN
;
959 req_info
->req
.dlen
+= CONTROL_WORD_LEN
;
962 req_info
->in
[*argcnt
].vptr
= (u8
*)fctx
;
963 req_info
->in
[*argcnt
].size
= sizeof(struct otx_cpt_fc_ctx
);
964 req_info
->req
.dlen
+= sizeof(struct otx_cpt_fc_ctx
);
970 static inline u32
create_hmac_ctx_hdr(struct aead_request
*req
, u32
*argcnt
,
973 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
974 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
975 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx_dma(tfm
);
976 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
978 req_info
->ctrl
.s
.dma_mode
= OTX_CPT_DMA_GATHER_SCATTER
;
979 req_info
->ctrl
.s
.se_req
= OTX_CPT_SE_CORE_REQ
;
980 req_info
->req
.opcode
.s
.major
= OTX_CPT_MAJOR_OP_HMAC
|
981 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER
);
982 req_info
->is_trunc_hmac
= ctx
->is_trunc_hmac
;
984 req_info
->req
.opcode
.s
.minor
= 0;
985 req_info
->req
.param1
= ctx
->auth_key_len
;
986 req_info
->req
.param2
= ctx
->mac_type
<< 8;
988 /* Add authentication key */
989 req_info
->in
[*argcnt
].vptr
= ctx
->key
;
990 req_info
->in
[*argcnt
].size
= round_up(ctx
->auth_key_len
, 8);
991 req_info
->req
.dlen
+= round_up(ctx
->auth_key_len
, 8);
997 static inline u32
create_aead_input_list(struct aead_request
*req
, u32 enc
)
999 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1000 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1001 u32 inputlen
= req
->cryptlen
+ req
->assoclen
;
1002 u32 status
, argcnt
= 0;
1004 status
= create_aead_ctx_hdr(req
, enc
, &argcnt
);
1007 update_input_data(req_info
, req
->src
, inputlen
, &argcnt
);
1008 req_info
->incnt
= argcnt
;
1013 static inline u32
create_aead_output_list(struct aead_request
*req
, u32 enc
,
1016 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1017 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1018 u32 argcnt
= 0, outputlen
= 0;
1021 outputlen
= req
->cryptlen
+ req
->assoclen
+ mac_len
;
1023 outputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1025 update_output_data(req_info
, req
->dst
, 0, outputlen
, &argcnt
);
1026 req_info
->outcnt
= argcnt
;
1031 static inline u32
create_aead_null_input_list(struct aead_request
*req
,
1032 u32 enc
, u32 mac_len
)
1034 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1035 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1036 u32 inputlen
, argcnt
= 0;
1039 inputlen
= req
->cryptlen
+ req
->assoclen
;
1041 inputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1043 create_hmac_ctx_hdr(req
, &argcnt
, enc
);
1044 update_input_data(req_info
, req
->src
, inputlen
, &argcnt
);
1045 req_info
->incnt
= argcnt
;
1050 static inline u32
create_aead_null_output_list(struct aead_request
*req
,
1051 u32 enc
, u32 mac_len
)
1053 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1054 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1055 struct scatterlist
*dst
;
1057 int argcnt
= 0, status
, offset
;
1061 inputlen
= req
->cryptlen
+ req
->assoclen
;
1063 inputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1066 * If source and destination are different
1067 * then copy payload to destination
1069 if (req
->src
!= req
->dst
) {
1071 ptr
= kmalloc(inputlen
, (req_info
->areq
->flags
&
1072 CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1073 GFP_KERNEL
: GFP_ATOMIC
);
1079 status
= sg_copy_to_buffer(req
->src
, sg_nents(req
->src
), ptr
,
1081 if (status
!= inputlen
) {
1085 status
= sg_copy_from_buffer(req
->dst
, sg_nents(req
->dst
), ptr
,
1087 if (status
!= inputlen
) {
1096 * In an encryption scenario hmac needs
1097 * to be appended after payload
1101 while (offset
>= dst
->length
) {
1102 offset
-= dst
->length
;
1110 update_output_data(req_info
, dst
, offset
, mac_len
, &argcnt
);
1113 * In a decryption scenario calculated hmac for received
1114 * payload needs to be compare with hmac received
1116 status
= sg_copy_buffer(req
->src
, sg_nents(req
->src
),
1117 rctx
->fctx
.hmac
.s
.hmac_recv
, mac_len
,
1119 if (status
!= mac_len
) {
1124 req_info
->out
[argcnt
].vptr
= rctx
->fctx
.hmac
.s
.hmac_calc
;
1125 req_info
->out
[argcnt
].size
= mac_len
;
1129 req_info
->outcnt
= argcnt
;
1138 static u32
cpt_aead_enc_dec(struct aead_request
*req
, u8 reg_type
, u8 enc
)
1140 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx_dma(req
);
1141 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1142 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1143 struct pci_dev
*pdev
;
1144 u32 status
, cpu_num
;
1146 /* Clear control words */
1147 rctx
->ctrl_word
.flags
= 0;
1148 rctx
->fctx
.enc
.enc_ctrl
.flags
= 0;
1150 req_info
->callback
= otx_cpt_aead_callback
;
1151 req_info
->areq
= &req
->base
;
1152 req_info
->req_type
= reg_type
;
1153 req_info
->is_enc
= enc
;
1154 req_info
->is_trunc_hmac
= false;
1157 case OTX_CPT_AEAD_ENC_DEC_REQ
:
1158 status
= create_aead_input_list(req
, enc
);
1161 status
= create_aead_output_list(req
, enc
,
1162 crypto_aead_authsize(tfm
));
1167 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ
:
1168 status
= create_aead_null_input_list(req
, enc
,
1169 crypto_aead_authsize(tfm
));
1172 status
= create_aead_null_output_list(req
, enc
,
1173 crypto_aead_authsize(tfm
));
1182 /* Validate that request doesn't exceed maximum CPT supported size */
1183 if (req_info
->req
.param1
> OTX_CPT_MAX_REQ_SIZE
||
1184 req_info
->req
.param2
> OTX_CPT_MAX_REQ_SIZE
)
1187 status
= get_se_device(&pdev
, &cpu_num
);
1191 req_info
->ctrl
.s
.grp
= 0;
1193 status
= otx_cpt_do_request(pdev
, req_info
, cpu_num
);
1195 * We perform an asynchronous send and once
1196 * the request is completed the driver would
1197 * intimate through registered call back functions
1202 static int otx_cpt_aead_encrypt(struct aead_request
*req
)
1204 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_REQ
, true);
1207 static int otx_cpt_aead_decrypt(struct aead_request
*req
)
1209 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_REQ
, false);
1212 static int otx_cpt_aead_null_encrypt(struct aead_request
*req
)
1214 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_NULL_REQ
, true);
1217 static int otx_cpt_aead_null_decrypt(struct aead_request
*req
)
1219 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_NULL_REQ
, false);
1222 static struct skcipher_alg otx_cpt_skciphers
[] = { {
1223 .base
.cra_name
= "xts(aes)",
1224 .base
.cra_driver_name
= "cpt_xts_aes",
1225 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1226 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1227 .base
.cra_ctxsize
= sizeof(struct otx_cpt_enc_ctx
),
1228 .base
.cra_alignmask
= 7,
1229 .base
.cra_priority
= 4001,
1230 .base
.cra_module
= THIS_MODULE
,
1232 .init
= otx_cpt_enc_dec_init
,
1233 .ivsize
= AES_BLOCK_SIZE
,
1234 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1235 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1236 .setkey
= otx_cpt_skcipher_xts_setkey
,
1237 .encrypt
= otx_cpt_skcipher_encrypt
,
1238 .decrypt
= otx_cpt_skcipher_decrypt
,
1240 .base
.cra_name
= "cbc(aes)",
1241 .base
.cra_driver_name
= "cpt_cbc_aes",
1242 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1243 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1244 .base
.cra_ctxsize
= sizeof(struct otx_cpt_enc_ctx
),
1245 .base
.cra_alignmask
= 7,
1246 .base
.cra_priority
= 4001,
1247 .base
.cra_module
= THIS_MODULE
,
1249 .init
= otx_cpt_enc_dec_init
,
1250 .ivsize
= AES_BLOCK_SIZE
,
1251 .min_keysize
= AES_MIN_KEY_SIZE
,
1252 .max_keysize
= AES_MAX_KEY_SIZE
,
1253 .setkey
= otx_cpt_skcipher_cbc_aes_setkey
,
1254 .encrypt
= otx_cpt_skcipher_encrypt
,
1255 .decrypt
= otx_cpt_skcipher_decrypt
,
1257 .base
.cra_name
= "ecb(aes)",
1258 .base
.cra_driver_name
= "cpt_ecb_aes",
1259 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1260 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1261 .base
.cra_ctxsize
= sizeof(struct otx_cpt_enc_ctx
),
1262 .base
.cra_alignmask
= 7,
1263 .base
.cra_priority
= 4001,
1264 .base
.cra_module
= THIS_MODULE
,
1266 .init
= otx_cpt_enc_dec_init
,
1268 .min_keysize
= AES_MIN_KEY_SIZE
,
1269 .max_keysize
= AES_MAX_KEY_SIZE
,
1270 .setkey
= otx_cpt_skcipher_ecb_aes_setkey
,
1271 .encrypt
= otx_cpt_skcipher_encrypt
,
1272 .decrypt
= otx_cpt_skcipher_decrypt
,
1274 .base
.cra_name
= "cbc(des3_ede)",
1275 .base
.cra_driver_name
= "cpt_cbc_des3_ede",
1276 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1277 .base
.cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1278 .base
.cra_ctxsize
= sizeof(struct otx_cpt_des3_ctx
),
1279 .base
.cra_alignmask
= 7,
1280 .base
.cra_priority
= 4001,
1281 .base
.cra_module
= THIS_MODULE
,
1283 .init
= otx_cpt_enc_dec_init
,
1284 .min_keysize
= DES3_EDE_KEY_SIZE
,
1285 .max_keysize
= DES3_EDE_KEY_SIZE
,
1286 .ivsize
= DES_BLOCK_SIZE
,
1287 .setkey
= otx_cpt_skcipher_cbc_des3_setkey
,
1288 .encrypt
= otx_cpt_skcipher_encrypt
,
1289 .decrypt
= otx_cpt_skcipher_decrypt
,
1291 .base
.cra_name
= "ecb(des3_ede)",
1292 .base
.cra_driver_name
= "cpt_ecb_des3_ede",
1293 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1294 .base
.cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1295 .base
.cra_ctxsize
= sizeof(struct otx_cpt_des3_ctx
),
1296 .base
.cra_alignmask
= 7,
1297 .base
.cra_priority
= 4001,
1298 .base
.cra_module
= THIS_MODULE
,
1300 .init
= otx_cpt_enc_dec_init
,
1301 .min_keysize
= DES3_EDE_KEY_SIZE
,
1302 .max_keysize
= DES3_EDE_KEY_SIZE
,
1304 .setkey
= otx_cpt_skcipher_ecb_des3_setkey
,
1305 .encrypt
= otx_cpt_skcipher_encrypt
,
1306 .decrypt
= otx_cpt_skcipher_decrypt
,
1309 static struct aead_alg otx_cpt_aeads
[] = { {
1311 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1312 .cra_driver_name
= "cpt_hmac_sha1_cbc_aes",
1313 .cra_blocksize
= AES_BLOCK_SIZE
,
1314 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1315 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1316 .cra_priority
= 4001,
1318 .cra_module
= THIS_MODULE
,
1320 .init
= otx_cpt_aead_cbc_aes_sha1_init
,
1321 .exit
= otx_cpt_aead_exit
,
1322 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1323 .setauthsize
= otx_cpt_aead_set_authsize
,
1324 .encrypt
= otx_cpt_aead_encrypt
,
1325 .decrypt
= otx_cpt_aead_decrypt
,
1326 .ivsize
= AES_BLOCK_SIZE
,
1327 .maxauthsize
= SHA1_DIGEST_SIZE
,
1330 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1331 .cra_driver_name
= "cpt_hmac_sha256_cbc_aes",
1332 .cra_blocksize
= AES_BLOCK_SIZE
,
1333 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1334 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1335 .cra_priority
= 4001,
1337 .cra_module
= THIS_MODULE
,
1339 .init
= otx_cpt_aead_cbc_aes_sha256_init
,
1340 .exit
= otx_cpt_aead_exit
,
1341 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1342 .setauthsize
= otx_cpt_aead_set_authsize
,
1343 .encrypt
= otx_cpt_aead_encrypt
,
1344 .decrypt
= otx_cpt_aead_decrypt
,
1345 .ivsize
= AES_BLOCK_SIZE
,
1346 .maxauthsize
= SHA256_DIGEST_SIZE
,
1349 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
1350 .cra_driver_name
= "cpt_hmac_sha384_cbc_aes",
1351 .cra_blocksize
= AES_BLOCK_SIZE
,
1352 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1353 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1354 .cra_priority
= 4001,
1356 .cra_module
= THIS_MODULE
,
1358 .init
= otx_cpt_aead_cbc_aes_sha384_init
,
1359 .exit
= otx_cpt_aead_exit
,
1360 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1361 .setauthsize
= otx_cpt_aead_set_authsize
,
1362 .encrypt
= otx_cpt_aead_encrypt
,
1363 .decrypt
= otx_cpt_aead_decrypt
,
1364 .ivsize
= AES_BLOCK_SIZE
,
1365 .maxauthsize
= SHA384_DIGEST_SIZE
,
1368 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1369 .cra_driver_name
= "cpt_hmac_sha512_cbc_aes",
1370 .cra_blocksize
= AES_BLOCK_SIZE
,
1371 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1372 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1373 .cra_priority
= 4001,
1375 .cra_module
= THIS_MODULE
,
1377 .init
= otx_cpt_aead_cbc_aes_sha512_init
,
1378 .exit
= otx_cpt_aead_exit
,
1379 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1380 .setauthsize
= otx_cpt_aead_set_authsize
,
1381 .encrypt
= otx_cpt_aead_encrypt
,
1382 .decrypt
= otx_cpt_aead_decrypt
,
1383 .ivsize
= AES_BLOCK_SIZE
,
1384 .maxauthsize
= SHA512_DIGEST_SIZE
,
1387 .cra_name
= "authenc(hmac(sha1),ecb(cipher_null))",
1388 .cra_driver_name
= "cpt_hmac_sha1_ecb_null",
1390 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1391 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1392 .cra_priority
= 4001,
1394 .cra_module
= THIS_MODULE
,
1396 .init
= otx_cpt_aead_ecb_null_sha1_init
,
1397 .exit
= otx_cpt_aead_exit
,
1398 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1399 .setauthsize
= otx_cpt_aead_set_authsize
,
1400 .encrypt
= otx_cpt_aead_null_encrypt
,
1401 .decrypt
= otx_cpt_aead_null_decrypt
,
1403 .maxauthsize
= SHA1_DIGEST_SIZE
,
1406 .cra_name
= "authenc(hmac(sha256),ecb(cipher_null))",
1407 .cra_driver_name
= "cpt_hmac_sha256_ecb_null",
1409 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1410 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1411 .cra_priority
= 4001,
1413 .cra_module
= THIS_MODULE
,
1415 .init
= otx_cpt_aead_ecb_null_sha256_init
,
1416 .exit
= otx_cpt_aead_exit
,
1417 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1418 .setauthsize
= otx_cpt_aead_set_authsize
,
1419 .encrypt
= otx_cpt_aead_null_encrypt
,
1420 .decrypt
= otx_cpt_aead_null_decrypt
,
1422 .maxauthsize
= SHA256_DIGEST_SIZE
,
1425 .cra_name
= "authenc(hmac(sha384),ecb(cipher_null))",
1426 .cra_driver_name
= "cpt_hmac_sha384_ecb_null",
1428 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1429 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1430 .cra_priority
= 4001,
1432 .cra_module
= THIS_MODULE
,
1434 .init
= otx_cpt_aead_ecb_null_sha384_init
,
1435 .exit
= otx_cpt_aead_exit
,
1436 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1437 .setauthsize
= otx_cpt_aead_set_authsize
,
1438 .encrypt
= otx_cpt_aead_null_encrypt
,
1439 .decrypt
= otx_cpt_aead_null_decrypt
,
1441 .maxauthsize
= SHA384_DIGEST_SIZE
,
1444 .cra_name
= "authenc(hmac(sha512),ecb(cipher_null))",
1445 .cra_driver_name
= "cpt_hmac_sha512_ecb_null",
1447 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1448 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1449 .cra_priority
= 4001,
1451 .cra_module
= THIS_MODULE
,
1453 .init
= otx_cpt_aead_ecb_null_sha512_init
,
1454 .exit
= otx_cpt_aead_exit
,
1455 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1456 .setauthsize
= otx_cpt_aead_set_authsize
,
1457 .encrypt
= otx_cpt_aead_null_encrypt
,
1458 .decrypt
= otx_cpt_aead_null_decrypt
,
1460 .maxauthsize
= SHA512_DIGEST_SIZE
,
1463 .cra_name
= "rfc4106(gcm(aes))",
1464 .cra_driver_name
= "cpt_rfc4106_gcm_aes",
1466 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1467 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
) + CRYPTO_DMA_PADDING
,
1468 .cra_priority
= 4001,
1470 .cra_module
= THIS_MODULE
,
1472 .init
= otx_cpt_aead_gcm_aes_init
,
1473 .exit
= otx_cpt_aead_exit
,
1474 .setkey
= otx_cpt_aead_gcm_aes_setkey
,
1475 .setauthsize
= otx_cpt_aead_set_authsize
,
1476 .encrypt
= otx_cpt_aead_encrypt
,
1477 .decrypt
= otx_cpt_aead_decrypt
,
1478 .ivsize
= AES_GCM_IV_SIZE
,
1479 .maxauthsize
= AES_GCM_ICV_SIZE
,
1482 static inline int is_any_alg_used(void)
1486 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_skciphers
); i
++)
1487 if (refcount_read(&otx_cpt_skciphers
[i
].base
.cra_refcnt
) != 1)
1489 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_aeads
); i
++)
1490 if (refcount_read(&otx_cpt_aeads
[i
].base
.cra_refcnt
) != 1)
1495 static inline int cpt_register_algs(void)
1499 if (!IS_ENABLED(CONFIG_DM_CRYPT
)) {
1500 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_skciphers
); i
++)
1501 otx_cpt_skciphers
[i
].base
.cra_flags
&= ~CRYPTO_ALG_DEAD
;
1503 err
= crypto_register_skciphers(otx_cpt_skciphers
,
1504 ARRAY_SIZE(otx_cpt_skciphers
));
1509 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_aeads
); i
++)
1510 otx_cpt_aeads
[i
].base
.cra_flags
&= ~CRYPTO_ALG_DEAD
;
1512 err
= crypto_register_aeads(otx_cpt_aeads
, ARRAY_SIZE(otx_cpt_aeads
));
1514 crypto_unregister_skciphers(otx_cpt_skciphers
,
1515 ARRAY_SIZE(otx_cpt_skciphers
));
1522 static inline void cpt_unregister_algs(void)
1524 crypto_unregister_skciphers(otx_cpt_skciphers
,
1525 ARRAY_SIZE(otx_cpt_skciphers
));
1526 crypto_unregister_aeads(otx_cpt_aeads
, ARRAY_SIZE(otx_cpt_aeads
));
1529 static int compare_func(const void *lptr
, const void *rptr
)
1531 struct cpt_device_desc
*ldesc
= (struct cpt_device_desc
*) lptr
;
1532 struct cpt_device_desc
*rdesc
= (struct cpt_device_desc
*) rptr
;
1534 if (ldesc
->dev
->devfn
< rdesc
->dev
->devfn
)
1536 if (ldesc
->dev
->devfn
> rdesc
->dev
->devfn
)
1541 int otx_cpt_crypto_init(struct pci_dev
*pdev
, struct module
*mod
,
1542 enum otx_cptpf_type pf_type
,
1543 enum otx_cptvf_type engine_type
,
1544 int num_queues
, int num_devices
)
1550 switch (engine_type
) {
1551 case OTX_CPT_SE_TYPES
:
1552 count
= atomic_read(&se_devices
.count
);
1553 if (count
>= CPT_MAX_VF_NUM
) {
1554 dev_err(&pdev
->dev
, "No space to add a new device\n");
1558 se_devices
.desc
[count
].pf_type
= pf_type
;
1559 se_devices
.desc
[count
].num_queues
= num_queues
;
1560 se_devices
.desc
[count
++].dev
= pdev
;
1561 atomic_inc(&se_devices
.count
);
1563 if (atomic_read(&se_devices
.count
) == num_devices
&&
1564 is_crypto_registered
== false) {
1565 if (cpt_register_algs()) {
1567 "Error in registering crypto algorithms\n");
1571 try_module_get(mod
);
1572 is_crypto_registered
= true;
1574 sort(se_devices
.desc
, count
, sizeof(struct cpt_device_desc
),
1575 compare_func
, NULL
);
1578 case OTX_CPT_AE_TYPES
:
1579 count
= atomic_read(&ae_devices
.count
);
1580 if (count
>= CPT_MAX_VF_NUM
) {
1581 dev_err(&pdev
->dev
, "No space to a add new device\n");
1585 ae_devices
.desc
[count
].pf_type
= pf_type
;
1586 ae_devices
.desc
[count
].num_queues
= num_queues
;
1587 ae_devices
.desc
[count
++].dev
= pdev
;
1588 atomic_inc(&ae_devices
.count
);
1589 sort(ae_devices
.desc
, count
, sizeof(struct cpt_device_desc
),
1590 compare_func
, NULL
);
1594 dev_err(&pdev
->dev
, "Unknown VF type %d\n", engine_type
);
1595 ret
= BAD_OTX_CPTVF_TYPE
;
1598 mutex_unlock(&mutex
);
1602 void otx_cpt_crypto_exit(struct pci_dev
*pdev
, struct module
*mod
,
1603 enum otx_cptvf_type engine_type
)
1605 struct cpt_device_table
*dev_tbl
;
1606 bool dev_found
= false;
1611 dev_tbl
= (engine_type
== OTX_CPT_AE_TYPES
) ? &ae_devices
: &se_devices
;
1612 count
= atomic_read(&dev_tbl
->count
);
1613 for (i
= 0; i
< count
; i
++)
1614 if (pdev
== dev_tbl
->desc
[i
].dev
) {
1615 for (j
= i
; j
< count
-1; j
++)
1616 dev_tbl
->desc
[j
] = dev_tbl
->desc
[j
+1];
1622 dev_err(&pdev
->dev
, "%s device not found\n", __func__
);
1626 if (engine_type
!= OTX_CPT_AE_TYPES
) {
1627 if (atomic_dec_and_test(&se_devices
.count
) &&
1628 !is_any_alg_used()) {
1629 cpt_unregister_algs();
1631 is_crypto_registered
= false;
1634 atomic_dec(&ae_devices
.count
);
1636 mutex_unlock(&mutex
);