1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <crypto/aes.h>
12 #include <crypto/authenc.h>
13 #include <crypto/cryptd.h>
14 #include <crypto/des.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/sha.h>
17 #include <crypto/xts.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/sort.h>
21 #include <linux/module.h>
22 #include "otx_cptvf.h"
23 #include "otx_cptvf_algs.h"
24 #include "otx_cptvf_reqmgr.h"
26 #define CPT_MAX_VF_NUM 64
27 /* Size of salt in AES GCM mode */
28 #define AES_GCM_SALT_SIZE 4
29 /* Size of IV in AES GCM mode */
30 #define AES_GCM_IV_SIZE 8
31 /* Size of ICV (Integrity Check Value) in AES GCM mode */
32 #define AES_GCM_ICV_SIZE 16
33 /* Offset of IV in AES GCM mode */
34 #define AES_GCM_IV_OFFSET 8
35 #define CONTROL_WORD_LEN 8
36 #define KEY2_OFFSET 48
37 #define DMA_MODE_FLAG(dma_mode) \
38 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
40 /* Truncated SHA digest size */
41 #define SHA1_TRUNC_DIGEST_SIZE 12
42 #define SHA256_TRUNC_DIGEST_SIZE 16
43 #define SHA384_TRUNC_DIGEST_SIZE 24
44 #define SHA512_TRUNC_DIGEST_SIZE 32
46 static DEFINE_MUTEX(mutex
);
47 static int is_crypto_registered
;
49 struct cpt_device_desc
{
50 enum otx_cptpf_type pf_type
;
55 struct cpt_device_table
{
57 struct cpt_device_desc desc
[CPT_MAX_VF_NUM
];
60 static struct cpt_device_table se_devices
= {
61 .count
= ATOMIC_INIT(0)
64 static struct cpt_device_table ae_devices
= {
65 .count
= ATOMIC_INIT(0)
68 static inline int get_se_device(struct pci_dev
**pdev
, int *cpu_num
)
72 count
= atomic_read(&se_devices
.count
);
78 if (se_devices
.desc
[0].pf_type
== OTX_CPT_SE
) {
80 * On OcteonTX platform there is one CPT instruction queue bound
81 * to each VF. We get maximum performance if one CPT queue
82 * is available for each cpu otherwise CPT queues need to be
83 * shared between cpus.
85 if (*cpu_num
>= count
)
87 *pdev
= se_devices
.desc
[*cpu_num
].dev
;
89 pr_err("Unknown PF type %d\n", se_devices
.desc
[0].pf_type
);
97 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info
*cpt_req
)
99 struct otx_cpt_req_ctx
*rctx
;
100 struct aead_request
*req
;
101 struct crypto_aead
*tfm
;
103 req
= container_of(cpt_req
->areq
, struct aead_request
, base
);
104 tfm
= crypto_aead_reqtfm(req
);
105 rctx
= aead_request_ctx(req
);
106 if (memcmp(rctx
->fctx
.hmac
.s
.hmac_calc
,
107 rctx
->fctx
.hmac
.s
.hmac_recv
,
108 crypto_aead_authsize(tfm
)) != 0)
114 static void otx_cpt_aead_callback(int status
, void *arg1
, void *arg2
)
116 struct otx_cpt_info_buffer
*cpt_info
= arg2
;
117 struct crypto_async_request
*areq
= arg1
;
118 struct otx_cpt_req_info
*cpt_req
;
119 struct pci_dev
*pdev
;
121 cpt_req
= cpt_info
->req
;
124 * When selected cipher is NULL we need to manually
125 * verify whether calculated hmac value matches
126 * received hmac value
128 if (cpt_req
->req_type
== OTX_CPT_AEAD_ENC_DEC_NULL_REQ
&&
130 status
= validate_hmac_cipher_null(cpt_req
);
133 pdev
= cpt_info
->pdev
;
134 do_request_cleanup(pdev
, cpt_info
);
137 areq
->complete(areq
, status
);
140 static void output_iv_copyback(struct crypto_async_request
*areq
)
142 struct otx_cpt_req_info
*req_info
;
143 struct skcipher_request
*sreq
;
144 struct crypto_skcipher
*stfm
;
145 struct otx_cpt_req_ctx
*rctx
;
146 struct otx_cpt_enc_ctx
*ctx
;
149 sreq
= container_of(areq
, struct skcipher_request
, base
);
150 stfm
= crypto_skcipher_reqtfm(sreq
);
151 ctx
= crypto_skcipher_ctx(stfm
);
152 if (ctx
->cipher_type
== OTX_CPT_AES_CBC
||
153 ctx
->cipher_type
== OTX_CPT_DES3_CBC
) {
154 rctx
= skcipher_request_ctx(sreq
);
155 req_info
= &rctx
->cpt_req
;
156 ivsize
= crypto_skcipher_ivsize(stfm
);
157 start
= sreq
->cryptlen
- ivsize
;
159 if (req_info
->is_enc
) {
160 scatterwalk_map_and_copy(sreq
->iv
, sreq
->dst
, start
,
163 if (sreq
->src
!= sreq
->dst
) {
164 scatterwalk_map_and_copy(sreq
->iv
, sreq
->src
,
167 memcpy(sreq
->iv
, req_info
->iv_out
, ivsize
);
168 kfree(req_info
->iv_out
);
174 static void otx_cpt_skcipher_callback(int status
, void *arg1
, void *arg2
)
176 struct otx_cpt_info_buffer
*cpt_info
= arg2
;
177 struct crypto_async_request
*areq
= arg1
;
178 struct pci_dev
*pdev
;
182 output_iv_copyback(areq
);
184 pdev
= cpt_info
->pdev
;
185 do_request_cleanup(pdev
, cpt_info
);
187 areq
->complete(areq
, status
);
191 static inline void update_input_data(struct otx_cpt_req_info
*req_info
,
192 struct scatterlist
*inp_sg
,
193 u32 nbytes
, u32
*argcnt
)
195 req_info
->req
.dlen
+= nbytes
;
198 u32 len
= min(nbytes
, inp_sg
->length
);
199 u8
*ptr
= sg_virt(inp_sg
);
201 req_info
->in
[*argcnt
].vptr
= (void *)ptr
;
202 req_info
->in
[*argcnt
].size
= len
;
205 inp_sg
= sg_next(inp_sg
);
209 static inline void update_output_data(struct otx_cpt_req_info
*req_info
,
210 struct scatterlist
*outp_sg
,
211 u32 offset
, u32 nbytes
, u32
*argcnt
)
213 req_info
->rlen
+= nbytes
;
216 u32 len
= min(nbytes
, outp_sg
->length
- offset
);
217 u8
*ptr
= sg_virt(outp_sg
);
219 req_info
->out
[*argcnt
].vptr
= (void *) (ptr
+ offset
);
220 req_info
->out
[*argcnt
].size
= len
;
224 outp_sg
= sg_next(outp_sg
);
228 static inline u32
create_ctx_hdr(struct skcipher_request
*req
, u32 enc
,
231 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
232 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx(req
);
233 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
234 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(stfm
);
235 struct otx_cpt_enc_ctx
*ctx
= crypto_tfm_ctx(tfm
);
236 struct otx_cpt_fc_ctx
*fctx
= &rctx
->fctx
;
237 int ivsize
= crypto_skcipher_ivsize(stfm
);
238 u32 start
= req
->cryptlen
- ivsize
;
239 u64
*ctrl_flags
= NULL
;
242 flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
243 GFP_KERNEL
: GFP_ATOMIC
;
244 req_info
->ctrl
.s
.dma_mode
= OTX_CPT_DMA_GATHER_SCATTER
;
245 req_info
->ctrl
.s
.se_req
= OTX_CPT_SE_CORE_REQ
;
247 req_info
->req
.opcode
.s
.major
= OTX_CPT_MAJOR_OP_FC
|
248 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER
);
250 req_info
->req
.opcode
.s
.minor
= 2;
252 req_info
->req
.opcode
.s
.minor
= 3;
253 if ((ctx
->cipher_type
== OTX_CPT_AES_CBC
||
254 ctx
->cipher_type
== OTX_CPT_DES3_CBC
) &&
255 req
->src
== req
->dst
) {
256 req_info
->iv_out
= kmalloc(ivsize
, flags
);
257 if (!req_info
->iv_out
)
260 scatterwalk_map_and_copy(req_info
->iv_out
, req
->src
,
264 /* Encryption data length */
265 req_info
->req
.param1
= req
->cryptlen
;
266 /* Authentication data length */
267 req_info
->req
.param2
= 0;
269 fctx
->enc
.enc_ctrl
.e
.enc_cipher
= ctx
->cipher_type
;
270 fctx
->enc
.enc_ctrl
.e
.aes_key
= ctx
->key_type
;
271 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX_CPT_FROM_CPTR
;
273 if (ctx
->cipher_type
== OTX_CPT_AES_XTS
)
274 memcpy(fctx
->enc
.encr_key
, ctx
->enc_key
, ctx
->key_len
* 2);
276 memcpy(fctx
->enc
.encr_key
, ctx
->enc_key
, ctx
->key_len
);
278 memcpy(fctx
->enc
.encr_iv
, req
->iv
, crypto_skcipher_ivsize(stfm
));
280 ctrl_flags
= (u64
*)&fctx
->enc
.enc_ctrl
.flags
;
281 *ctrl_flags
= cpu_to_be64(*ctrl_flags
);
284 * Storing Packet Data Information in offset
285 * Control Word First 8 bytes
287 req_info
->in
[*argcnt
].vptr
= (u8
*)&rctx
->ctrl_word
;
288 req_info
->in
[*argcnt
].size
= CONTROL_WORD_LEN
;
289 req_info
->req
.dlen
+= CONTROL_WORD_LEN
;
292 req_info
->in
[*argcnt
].vptr
= (u8
*)fctx
;
293 req_info
->in
[*argcnt
].size
= sizeof(struct otx_cpt_fc_ctx
);
294 req_info
->req
.dlen
+= sizeof(struct otx_cpt_fc_ctx
);
301 static inline u32
create_input_list(struct skcipher_request
*req
, u32 enc
,
304 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx(req
);
305 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
309 ret
= create_ctx_hdr(req
, enc
, &argcnt
);
313 update_input_data(req_info
, req
->src
, req
->cryptlen
, &argcnt
);
314 req_info
->incnt
= argcnt
;
319 static inline void create_output_list(struct skcipher_request
*req
,
322 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx(req
);
323 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
327 * OUTPUT Buffer Processing
328 * AES encryption/decryption output would be
329 * received in the following format
331 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
332 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
334 update_output_data(req_info
, req
->dst
, 0, req
->cryptlen
, &argcnt
);
335 req_info
->outcnt
= argcnt
;
338 static inline int cpt_enc_dec(struct skcipher_request
*req
, u32 enc
)
340 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
341 struct otx_cpt_req_ctx
*rctx
= skcipher_request_ctx(req
);
342 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
343 u32 enc_iv_len
= crypto_skcipher_ivsize(stfm
);
344 struct pci_dev
*pdev
;
347 /* Validate that request doesn't exceed maximum CPT supported size */
348 if (req
->cryptlen
> OTX_CPT_MAX_REQ_SIZE
)
351 /* Clear control words */
352 rctx
->ctrl_word
.flags
= 0;
353 rctx
->fctx
.enc
.enc_ctrl
.flags
= 0;
355 status
= create_input_list(req
, enc
, enc_iv_len
);
358 create_output_list(req
, enc_iv_len
);
360 status
= get_se_device(&pdev
, &cpu_num
);
364 req_info
->callback
= (void *)otx_cpt_skcipher_callback
;
365 req_info
->areq
= &req
->base
;
366 req_info
->req_type
= OTX_CPT_ENC_DEC_REQ
;
367 req_info
->is_enc
= enc
;
368 req_info
->is_trunc_hmac
= false;
369 req_info
->ctrl
.s
.grp
= 0;
372 * We perform an asynchronous send and once
373 * the request is completed the driver would
374 * intimate through registered call back functions
376 status
= otx_cpt_do_request(pdev
, req_info
, cpu_num
);
381 static int otx_cpt_skcipher_encrypt(struct skcipher_request
*req
)
383 return cpt_enc_dec(req
, true);
386 static int otx_cpt_skcipher_decrypt(struct skcipher_request
*req
)
388 return cpt_enc_dec(req
, false);
391 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher
*tfm
,
392 const u8
*key
, u32 keylen
)
394 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
395 const u8
*key2
= key
+ (keylen
/ 2);
396 const u8
*key1
= key
;
399 ret
= xts_check_key(crypto_skcipher_tfm(tfm
), key
, keylen
);
402 ctx
->key_len
= keylen
;
403 memcpy(ctx
->enc_key
, key1
, keylen
/ 2);
404 memcpy(ctx
->enc_key
+ KEY2_OFFSET
, key2
, keylen
/ 2);
405 ctx
->cipher_type
= OTX_CPT_AES_XTS
;
406 switch (ctx
->key_len
) {
407 case 2 * AES_KEYSIZE_128
:
408 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
410 case 2 * AES_KEYSIZE_256
:
411 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
420 static int cpt_des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
421 u32 keylen
, u8 cipher_type
)
423 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
425 if (keylen
!= DES3_EDE_KEY_SIZE
)
428 ctx
->key_len
= keylen
;
429 ctx
->cipher_type
= cipher_type
;
431 memcpy(ctx
->enc_key
, key
, keylen
);
436 static int cpt_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
437 u32 keylen
, u8 cipher_type
)
439 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
442 case AES_KEYSIZE_128
:
443 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
445 case AES_KEYSIZE_192
:
446 ctx
->key_type
= OTX_CPT_AES_192_BIT
;
448 case AES_KEYSIZE_256
:
449 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
454 ctx
->key_len
= keylen
;
455 ctx
->cipher_type
= cipher_type
;
457 memcpy(ctx
->enc_key
, key
, keylen
);
462 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher
*tfm
,
463 const u8
*key
, u32 keylen
)
465 return cpt_aes_setkey(tfm
, key
, keylen
, OTX_CPT_AES_CBC
);
468 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher
*tfm
,
469 const u8
*key
, u32 keylen
)
471 return cpt_aes_setkey(tfm
, key
, keylen
, OTX_CPT_AES_ECB
);
474 static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher
*tfm
,
475 const u8
*key
, u32 keylen
)
477 return cpt_aes_setkey(tfm
, key
, keylen
, OTX_CPT_AES_CFB
);
480 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher
*tfm
,
481 const u8
*key
, u32 keylen
)
483 return cpt_des_setkey(tfm
, key
, keylen
, OTX_CPT_DES3_CBC
);
486 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher
*tfm
,
487 const u8
*key
, u32 keylen
)
489 return cpt_des_setkey(tfm
, key
, keylen
, OTX_CPT_DES3_ECB
);
492 static int otx_cpt_enc_dec_init(struct crypto_skcipher
*tfm
)
494 struct otx_cpt_enc_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
496 memset(ctx
, 0, sizeof(*ctx
));
498 * Additional memory for skcipher_request is
499 * allocated since the cryptd daemon uses
500 * this memory for request_ctx information
502 crypto_skcipher_set_reqsize(tfm
, sizeof(struct otx_cpt_req_ctx
) +
503 sizeof(struct skcipher_request
));
508 static int cpt_aead_init(struct crypto_aead
*tfm
, u8 cipher_type
, u8 mac_type
)
510 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
512 ctx
->cipher_type
= cipher_type
;
513 ctx
->mac_type
= mac_type
;
516 * When selected cipher is NULL we use HMAC opcode instead of
517 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
518 * for calculating ipad and opad
520 if (ctx
->cipher_type
!= OTX_CPT_CIPHER_NULL
) {
521 switch (ctx
->mac_type
) {
523 ctx
->hashalg
= crypto_alloc_shash("sha1", 0,
525 if (IS_ERR(ctx
->hashalg
))
526 return PTR_ERR(ctx
->hashalg
);
530 ctx
->hashalg
= crypto_alloc_shash("sha256", 0,
532 if (IS_ERR(ctx
->hashalg
))
533 return PTR_ERR(ctx
->hashalg
);
537 ctx
->hashalg
= crypto_alloc_shash("sha384", 0,
539 if (IS_ERR(ctx
->hashalg
))
540 return PTR_ERR(ctx
->hashalg
);
544 ctx
->hashalg
= crypto_alloc_shash("sha512", 0,
546 if (IS_ERR(ctx
->hashalg
))
547 return PTR_ERR(ctx
->hashalg
);
552 crypto_aead_set_reqsize(tfm
, sizeof(struct otx_cpt_req_ctx
));
557 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead
*tfm
)
559 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA1
);
562 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead
*tfm
)
564 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA256
);
567 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead
*tfm
)
569 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA384
);
572 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead
*tfm
)
574 return cpt_aead_init(tfm
, OTX_CPT_AES_CBC
, OTX_CPT_SHA512
);
577 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead
*tfm
)
579 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA1
);
582 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead
*tfm
)
584 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA256
);
587 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead
*tfm
)
589 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA384
);
592 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead
*tfm
)
594 return cpt_aead_init(tfm
, OTX_CPT_CIPHER_NULL
, OTX_CPT_SHA512
);
597 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead
*tfm
)
599 return cpt_aead_init(tfm
, OTX_CPT_AES_GCM
, OTX_CPT_MAC_NULL
);
602 static void otx_cpt_aead_exit(struct crypto_aead
*tfm
)
604 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
609 crypto_free_shash(ctx
->hashalg
);
614 * This is the Integrity Check Value validation (aka the authentication tag
617 static int otx_cpt_aead_set_authsize(struct crypto_aead
*tfm
,
618 unsigned int authsize
)
620 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
622 switch (ctx
->mac_type
) {
624 if (authsize
!= SHA1_DIGEST_SIZE
&&
625 authsize
!= SHA1_TRUNC_DIGEST_SIZE
)
628 if (authsize
== SHA1_TRUNC_DIGEST_SIZE
)
629 ctx
->is_trunc_hmac
= true;
633 if (authsize
!= SHA256_DIGEST_SIZE
&&
634 authsize
!= SHA256_TRUNC_DIGEST_SIZE
)
637 if (authsize
== SHA256_TRUNC_DIGEST_SIZE
)
638 ctx
->is_trunc_hmac
= true;
642 if (authsize
!= SHA384_DIGEST_SIZE
&&
643 authsize
!= SHA384_TRUNC_DIGEST_SIZE
)
646 if (authsize
== SHA384_TRUNC_DIGEST_SIZE
)
647 ctx
->is_trunc_hmac
= true;
651 if (authsize
!= SHA512_DIGEST_SIZE
&&
652 authsize
!= SHA512_TRUNC_DIGEST_SIZE
)
655 if (authsize
== SHA512_TRUNC_DIGEST_SIZE
)
656 ctx
->is_trunc_hmac
= true;
659 case OTX_CPT_MAC_NULL
:
660 if (ctx
->cipher_type
== OTX_CPT_AES_GCM
) {
661 if (authsize
!= AES_GCM_ICV_SIZE
)
671 tfm
->authsize
= authsize
;
675 static struct otx_cpt_sdesc
*alloc_sdesc(struct crypto_shash
*alg
)
677 struct otx_cpt_sdesc
*sdesc
;
680 size
= sizeof(struct shash_desc
) + crypto_shash_descsize(alg
);
681 sdesc
= kmalloc(size
, GFP_KERNEL
);
685 sdesc
->shash
.tfm
= alg
;
690 static inline void swap_data32(void *buf
, u32 len
)
692 u32
*store
= (u32
*) buf
;
695 for (i
= 0 ; i
< len
/sizeof(u32
); i
++, store
++)
696 *store
= cpu_to_be32(*store
);
699 static inline void swap_data64(void *buf
, u32 len
)
701 u64
*store
= (u64
*) buf
;
704 for (i
= 0 ; i
< len
/sizeof(u64
); i
++, store
++)
705 *store
= cpu_to_be64(*store
);
708 static int copy_pad(u8 mac_type
, u8
*out_pad
, u8
*in_pad
)
710 struct sha512_state
*sha512
;
711 struct sha256_state
*sha256
;
712 struct sha1_state
*sha1
;
716 sha1
= (struct sha1_state
*) in_pad
;
717 swap_data32(sha1
->state
, SHA1_DIGEST_SIZE
);
718 memcpy(out_pad
, &sha1
->state
, SHA1_DIGEST_SIZE
);
722 sha256
= (struct sha256_state
*) in_pad
;
723 swap_data32(sha256
->state
, SHA256_DIGEST_SIZE
);
724 memcpy(out_pad
, &sha256
->state
, SHA256_DIGEST_SIZE
);
729 sha512
= (struct sha512_state
*) in_pad
;
730 swap_data64(sha512
->state
, SHA512_DIGEST_SIZE
);
731 memcpy(out_pad
, &sha512
->state
, SHA512_DIGEST_SIZE
);
741 static int aead_hmac_init(struct crypto_aead
*cipher
)
743 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(cipher
);
744 int state_size
= crypto_shash_statesize(ctx
->hashalg
);
745 int ds
= crypto_shash_digestsize(ctx
->hashalg
);
746 int bs
= crypto_shash_blocksize(ctx
->hashalg
);
747 int authkeylen
= ctx
->auth_key_len
;
748 u8
*ipad
= NULL
, *opad
= NULL
;
749 int ret
= 0, icount
= 0;
751 ctx
->sdesc
= alloc_sdesc(ctx
->hashalg
);
755 ctx
->ipad
= kzalloc(bs
, GFP_KERNEL
);
761 ctx
->opad
= kzalloc(bs
, GFP_KERNEL
);
767 ipad
= kzalloc(state_size
, GFP_KERNEL
);
773 opad
= kzalloc(state_size
, GFP_KERNEL
);
779 if (authkeylen
> bs
) {
780 ret
= crypto_shash_digest(&ctx
->sdesc
->shash
, ctx
->key
,
787 memcpy(ipad
, ctx
->key
, authkeylen
);
790 memset(ipad
+ authkeylen
, 0, bs
- authkeylen
);
791 memcpy(opad
, ipad
, bs
);
793 for (icount
= 0; icount
< bs
; icount
++) {
794 ipad
[icount
] ^= 0x36;
795 opad
[icount
] ^= 0x5c;
799 * Partial Hash calculated from the software
800 * algorithm is retrieved for IPAD & OPAD
803 /* IPAD Calculation */
804 crypto_shash_init(&ctx
->sdesc
->shash
);
805 crypto_shash_update(&ctx
->sdesc
->shash
, ipad
, bs
);
806 crypto_shash_export(&ctx
->sdesc
->shash
, ipad
);
807 ret
= copy_pad(ctx
->mac_type
, ctx
->ipad
, ipad
);
811 /* OPAD Calculation */
812 crypto_shash_init(&ctx
->sdesc
->shash
);
813 crypto_shash_update(&ctx
->sdesc
->shash
, opad
, bs
);
814 crypto_shash_export(&ctx
->sdesc
->shash
, opad
);
815 ret
= copy_pad(ctx
->mac_type
, ctx
->opad
, opad
);
837 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead
*cipher
,
838 const unsigned char *key
,
841 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(cipher
);
842 struct crypto_authenc_key_param
*param
;
843 int enckeylen
= 0, authkeylen
= 0;
844 struct rtattr
*rta
= (void *)key
;
845 int status
= -EINVAL
;
847 if (!RTA_OK(rta
, keylen
))
850 if (rta
->rta_type
!= CRYPTO_AUTHENC_KEYA_PARAM
)
853 if (RTA_PAYLOAD(rta
) < sizeof(*param
))
856 param
= RTA_DATA(rta
);
857 enckeylen
= be32_to_cpu(param
->enckeylen
);
858 key
+= RTA_ALIGN(rta
->rta_len
);
859 keylen
-= RTA_ALIGN(rta
->rta_len
);
860 if (keylen
< enckeylen
)
863 if (keylen
> OTX_CPT_MAX_KEY_SIZE
)
866 authkeylen
= keylen
- enckeylen
;
867 memcpy(ctx
->key
, key
, keylen
);
870 case AES_KEYSIZE_128
:
871 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
873 case AES_KEYSIZE_192
:
874 ctx
->key_type
= OTX_CPT_AES_192_BIT
;
876 case AES_KEYSIZE_256
:
877 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
880 /* Invalid key length */
884 ctx
->enc_key_len
= enckeylen
;
885 ctx
->auth_key_len
= authkeylen
;
887 status
= aead_hmac_init(cipher
);
896 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead
*cipher
,
897 const unsigned char *key
,
900 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(cipher
);
901 struct crypto_authenc_key_param
*param
;
902 struct rtattr
*rta
= (void *)key
;
905 if (!RTA_OK(rta
, keylen
))
908 if (rta
->rta_type
!= CRYPTO_AUTHENC_KEYA_PARAM
)
911 if (RTA_PAYLOAD(rta
) < sizeof(*param
))
914 param
= RTA_DATA(rta
);
915 enckeylen
= be32_to_cpu(param
->enckeylen
);
916 key
+= RTA_ALIGN(rta
->rta_len
);
917 keylen
-= RTA_ALIGN(rta
->rta_len
);
921 if (keylen
> OTX_CPT_MAX_KEY_SIZE
)
924 memcpy(ctx
->key
, key
, keylen
);
925 ctx
->enc_key_len
= enckeylen
;
926 ctx
->auth_key_len
= keylen
;
932 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead
*cipher
,
933 const unsigned char *key
,
936 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(cipher
);
939 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
943 case AES_KEYSIZE_128
+ AES_GCM_SALT_SIZE
:
944 ctx
->key_type
= OTX_CPT_AES_128_BIT
;
945 ctx
->enc_key_len
= AES_KEYSIZE_128
;
947 case AES_KEYSIZE_192
+ AES_GCM_SALT_SIZE
:
948 ctx
->key_type
= OTX_CPT_AES_192_BIT
;
949 ctx
->enc_key_len
= AES_KEYSIZE_192
;
951 case AES_KEYSIZE_256
+ AES_GCM_SALT_SIZE
:
952 ctx
->key_type
= OTX_CPT_AES_256_BIT
;
953 ctx
->enc_key_len
= AES_KEYSIZE_256
;
956 /* Invalid key and salt length */
960 /* Store encryption key and salt */
961 memcpy(ctx
->key
, key
, keylen
);
966 static inline u32
create_aead_ctx_hdr(struct aead_request
*req
, u32 enc
,
969 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx(req
);
970 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
971 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
972 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
973 struct otx_cpt_fc_ctx
*fctx
= &rctx
->fctx
;
974 int mac_len
= crypto_aead_authsize(tfm
);
977 rctx
->ctrl_word
.e
.enc_data_offset
= req
->assoclen
;
979 switch (ctx
->cipher_type
) {
980 case OTX_CPT_AES_CBC
:
981 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX_CPT_FROM_CPTR
;
982 /* Copy encryption key to context */
983 memcpy(fctx
->enc
.encr_key
, ctx
->key
+ ctx
->auth_key_len
,
985 /* Copy IV to context */
986 memcpy(fctx
->enc
.encr_iv
, req
->iv
, crypto_aead_ivsize(tfm
));
988 ds
= crypto_shash_digestsize(ctx
->hashalg
);
989 if (ctx
->mac_type
== OTX_CPT_SHA384
)
990 ds
= SHA512_DIGEST_SIZE
;
992 memcpy(fctx
->hmac
.e
.ipad
, ctx
->ipad
, ds
);
994 memcpy(fctx
->hmac
.e
.opad
, ctx
->opad
, ds
);
997 case OTX_CPT_AES_GCM
:
998 fctx
->enc
.enc_ctrl
.e
.iv_source
= OTX_CPT_FROM_DPTR
;
999 /* Copy encryption key to context */
1000 memcpy(fctx
->enc
.encr_key
, ctx
->key
, ctx
->enc_key_len
);
1001 /* Copy salt to context */
1002 memcpy(fctx
->enc
.encr_iv
, ctx
->key
+ ctx
->enc_key_len
,
1005 rctx
->ctrl_word
.e
.iv_offset
= req
->assoclen
- AES_GCM_IV_OFFSET
;
1009 /* Unknown cipher type */
1012 rctx
->ctrl_word
.flags
= cpu_to_be64(rctx
->ctrl_word
.flags
);
1014 req_info
->ctrl
.s
.dma_mode
= OTX_CPT_DMA_GATHER_SCATTER
;
1015 req_info
->ctrl
.s
.se_req
= OTX_CPT_SE_CORE_REQ
;
1016 req_info
->req
.opcode
.s
.major
= OTX_CPT_MAJOR_OP_FC
|
1017 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER
);
1019 req_info
->req
.opcode
.s
.minor
= 2;
1020 req_info
->req
.param1
= req
->cryptlen
;
1021 req_info
->req
.param2
= req
->cryptlen
+ req
->assoclen
;
1023 req_info
->req
.opcode
.s
.minor
= 3;
1024 req_info
->req
.param1
= req
->cryptlen
- mac_len
;
1025 req_info
->req
.param2
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1028 fctx
->enc
.enc_ctrl
.e
.enc_cipher
= ctx
->cipher_type
;
1029 fctx
->enc
.enc_ctrl
.e
.aes_key
= ctx
->key_type
;
1030 fctx
->enc
.enc_ctrl
.e
.mac_type
= ctx
->mac_type
;
1031 fctx
->enc
.enc_ctrl
.e
.mac_len
= mac_len
;
1032 fctx
->enc
.enc_ctrl
.flags
= cpu_to_be64(fctx
->enc
.enc_ctrl
.flags
);
1035 * Storing Packet Data Information in offset
1036 * Control Word First 8 bytes
1038 req_info
->in
[*argcnt
].vptr
= (u8
*)&rctx
->ctrl_word
;
1039 req_info
->in
[*argcnt
].size
= CONTROL_WORD_LEN
;
1040 req_info
->req
.dlen
+= CONTROL_WORD_LEN
;
1043 req_info
->in
[*argcnt
].vptr
= (u8
*)fctx
;
1044 req_info
->in
[*argcnt
].size
= sizeof(struct otx_cpt_fc_ctx
);
1045 req_info
->req
.dlen
+= sizeof(struct otx_cpt_fc_ctx
);
1051 static inline u32
create_hmac_ctx_hdr(struct aead_request
*req
, u32
*argcnt
,
1054 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx(req
);
1055 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1056 struct otx_cpt_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1057 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1059 req_info
->ctrl
.s
.dma_mode
= OTX_CPT_DMA_GATHER_SCATTER
;
1060 req_info
->ctrl
.s
.se_req
= OTX_CPT_SE_CORE_REQ
;
1061 req_info
->req
.opcode
.s
.major
= OTX_CPT_MAJOR_OP_HMAC
|
1062 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER
);
1063 req_info
->is_trunc_hmac
= ctx
->is_trunc_hmac
;
1065 req_info
->req
.opcode
.s
.minor
= 0;
1066 req_info
->req
.param1
= ctx
->auth_key_len
;
1067 req_info
->req
.param2
= ctx
->mac_type
<< 8;
1069 /* Add authentication key */
1070 req_info
->in
[*argcnt
].vptr
= ctx
->key
;
1071 req_info
->in
[*argcnt
].size
= round_up(ctx
->auth_key_len
, 8);
1072 req_info
->req
.dlen
+= round_up(ctx
->auth_key_len
, 8);
1078 static inline u32
create_aead_input_list(struct aead_request
*req
, u32 enc
)
1080 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx(req
);
1081 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1082 u32 inputlen
= req
->cryptlen
+ req
->assoclen
;
1083 u32 status
, argcnt
= 0;
1085 status
= create_aead_ctx_hdr(req
, enc
, &argcnt
);
1088 update_input_data(req_info
, req
->src
, inputlen
, &argcnt
);
1089 req_info
->incnt
= argcnt
;
1094 static inline u32
create_aead_output_list(struct aead_request
*req
, u32 enc
,
1097 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx(req
);
1098 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1099 u32 argcnt
= 0, outputlen
= 0;
1102 outputlen
= req
->cryptlen
+ req
->assoclen
+ mac_len
;
1104 outputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1106 update_output_data(req_info
, req
->dst
, 0, outputlen
, &argcnt
);
1107 req_info
->outcnt
= argcnt
;
1112 static inline u32
create_aead_null_input_list(struct aead_request
*req
,
1113 u32 enc
, u32 mac_len
)
1115 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx(req
);
1116 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1117 u32 inputlen
, argcnt
= 0;
1120 inputlen
= req
->cryptlen
+ req
->assoclen
;
1122 inputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1124 create_hmac_ctx_hdr(req
, &argcnt
, enc
);
1125 update_input_data(req_info
, req
->src
, inputlen
, &argcnt
);
1126 req_info
->incnt
= argcnt
;
1131 static inline u32
create_aead_null_output_list(struct aead_request
*req
,
1132 u32 enc
, u32 mac_len
)
1134 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx(req
);
1135 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1136 struct scatterlist
*dst
;
1138 int argcnt
= 0, status
, offset
;
1142 inputlen
= req
->cryptlen
+ req
->assoclen
;
1144 inputlen
= req
->cryptlen
+ req
->assoclen
- mac_len
;
1147 * If source and destination are different
1148 * then copy payload to destination
1150 if (req
->src
!= req
->dst
) {
1152 ptr
= kmalloc(inputlen
, (req_info
->areq
->flags
&
1153 CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1154 GFP_KERNEL
: GFP_ATOMIC
);
1160 status
= sg_copy_to_buffer(req
->src
, sg_nents(req
->src
), ptr
,
1162 if (status
!= inputlen
) {
1166 status
= sg_copy_from_buffer(req
->dst
, sg_nents(req
->dst
), ptr
,
1168 if (status
!= inputlen
) {
1177 * In an encryption scenario hmac needs
1178 * to be appended after payload
1182 while (offset
>= dst
->length
) {
1183 offset
-= dst
->length
;
1191 update_output_data(req_info
, dst
, offset
, mac_len
, &argcnt
);
1194 * In a decryption scenario calculated hmac for received
1195 * payload needs to be compare with hmac received
1197 status
= sg_copy_buffer(req
->src
, sg_nents(req
->src
),
1198 rctx
->fctx
.hmac
.s
.hmac_recv
, mac_len
,
1200 if (status
!= mac_len
) {
1205 req_info
->out
[argcnt
].vptr
= rctx
->fctx
.hmac
.s
.hmac_calc
;
1206 req_info
->out
[argcnt
].size
= mac_len
;
1210 req_info
->outcnt
= argcnt
;
1219 static u32
cpt_aead_enc_dec(struct aead_request
*req
, u8 reg_type
, u8 enc
)
1221 struct otx_cpt_req_ctx
*rctx
= aead_request_ctx(req
);
1222 struct otx_cpt_req_info
*req_info
= &rctx
->cpt_req
;
1223 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1224 struct pci_dev
*pdev
;
1225 u32 status
, cpu_num
;
1227 /* Clear control words */
1228 rctx
->ctrl_word
.flags
= 0;
1229 rctx
->fctx
.enc
.enc_ctrl
.flags
= 0;
1231 req_info
->callback
= otx_cpt_aead_callback
;
1232 req_info
->areq
= &req
->base
;
1233 req_info
->req_type
= reg_type
;
1234 req_info
->is_enc
= enc
;
1235 req_info
->is_trunc_hmac
= false;
1238 case OTX_CPT_AEAD_ENC_DEC_REQ
:
1239 status
= create_aead_input_list(req
, enc
);
1242 status
= create_aead_output_list(req
, enc
,
1243 crypto_aead_authsize(tfm
));
1248 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ
:
1249 status
= create_aead_null_input_list(req
, enc
,
1250 crypto_aead_authsize(tfm
));
1253 status
= create_aead_null_output_list(req
, enc
,
1254 crypto_aead_authsize(tfm
));
1263 /* Validate that request doesn't exceed maximum CPT supported size */
1264 if (req_info
->req
.param1
> OTX_CPT_MAX_REQ_SIZE
||
1265 req_info
->req
.param2
> OTX_CPT_MAX_REQ_SIZE
)
1268 status
= get_se_device(&pdev
, &cpu_num
);
1272 req_info
->ctrl
.s
.grp
= 0;
1274 status
= otx_cpt_do_request(pdev
, req_info
, cpu_num
);
1276 * We perform an asynchronous send and once
1277 * the request is completed the driver would
1278 * intimate through registered call back functions
1283 static int otx_cpt_aead_encrypt(struct aead_request
*req
)
1285 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_REQ
, true);
1288 static int otx_cpt_aead_decrypt(struct aead_request
*req
)
1290 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_REQ
, false);
1293 static int otx_cpt_aead_null_encrypt(struct aead_request
*req
)
1295 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_NULL_REQ
, true);
1298 static int otx_cpt_aead_null_decrypt(struct aead_request
*req
)
1300 return cpt_aead_enc_dec(req
, OTX_CPT_AEAD_ENC_DEC_NULL_REQ
, false);
1303 static struct skcipher_alg otx_cpt_skciphers
[] = { {
1304 .base
.cra_name
= "xts(aes)",
1305 .base
.cra_driver_name
= "cpt_xts_aes",
1306 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
1307 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1308 .base
.cra_ctxsize
= sizeof(struct otx_cpt_enc_ctx
),
1309 .base
.cra_alignmask
= 7,
1310 .base
.cra_priority
= 4001,
1311 .base
.cra_module
= THIS_MODULE
,
1313 .init
= otx_cpt_enc_dec_init
,
1314 .ivsize
= AES_BLOCK_SIZE
,
1315 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1316 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1317 .setkey
= otx_cpt_skcipher_xts_setkey
,
1318 .encrypt
= otx_cpt_skcipher_encrypt
,
1319 .decrypt
= otx_cpt_skcipher_decrypt
,
1321 .base
.cra_name
= "cbc(aes)",
1322 .base
.cra_driver_name
= "cpt_cbc_aes",
1323 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
1324 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1325 .base
.cra_ctxsize
= sizeof(struct otx_cpt_enc_ctx
),
1326 .base
.cra_alignmask
= 7,
1327 .base
.cra_priority
= 4001,
1328 .base
.cra_module
= THIS_MODULE
,
1330 .init
= otx_cpt_enc_dec_init
,
1331 .ivsize
= AES_BLOCK_SIZE
,
1332 .min_keysize
= AES_MIN_KEY_SIZE
,
1333 .max_keysize
= AES_MAX_KEY_SIZE
,
1334 .setkey
= otx_cpt_skcipher_cbc_aes_setkey
,
1335 .encrypt
= otx_cpt_skcipher_encrypt
,
1336 .decrypt
= otx_cpt_skcipher_decrypt
,
1338 .base
.cra_name
= "ecb(aes)",
1339 .base
.cra_driver_name
= "cpt_ecb_aes",
1340 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
1341 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1342 .base
.cra_ctxsize
= sizeof(struct otx_cpt_enc_ctx
),
1343 .base
.cra_alignmask
= 7,
1344 .base
.cra_priority
= 4001,
1345 .base
.cra_module
= THIS_MODULE
,
1347 .init
= otx_cpt_enc_dec_init
,
1349 .min_keysize
= AES_MIN_KEY_SIZE
,
1350 .max_keysize
= AES_MAX_KEY_SIZE
,
1351 .setkey
= otx_cpt_skcipher_ecb_aes_setkey
,
1352 .encrypt
= otx_cpt_skcipher_encrypt
,
1353 .decrypt
= otx_cpt_skcipher_decrypt
,
1355 .base
.cra_name
= "cfb(aes)",
1356 .base
.cra_driver_name
= "cpt_cfb_aes",
1357 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
1358 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1359 .base
.cra_ctxsize
= sizeof(struct otx_cpt_enc_ctx
),
1360 .base
.cra_alignmask
= 7,
1361 .base
.cra_priority
= 4001,
1362 .base
.cra_module
= THIS_MODULE
,
1364 .init
= otx_cpt_enc_dec_init
,
1365 .ivsize
= AES_BLOCK_SIZE
,
1366 .min_keysize
= AES_MIN_KEY_SIZE
,
1367 .max_keysize
= AES_MAX_KEY_SIZE
,
1368 .setkey
= otx_cpt_skcipher_cfb_aes_setkey
,
1369 .encrypt
= otx_cpt_skcipher_encrypt
,
1370 .decrypt
= otx_cpt_skcipher_decrypt
,
1372 .base
.cra_name
= "cbc(des3_ede)",
1373 .base
.cra_driver_name
= "cpt_cbc_des3_ede",
1374 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
1375 .base
.cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1376 .base
.cra_ctxsize
= sizeof(struct otx_cpt_des3_ctx
),
1377 .base
.cra_alignmask
= 7,
1378 .base
.cra_priority
= 4001,
1379 .base
.cra_module
= THIS_MODULE
,
1381 .init
= otx_cpt_enc_dec_init
,
1382 .min_keysize
= DES3_EDE_KEY_SIZE
,
1383 .max_keysize
= DES3_EDE_KEY_SIZE
,
1384 .ivsize
= DES_BLOCK_SIZE
,
1385 .setkey
= otx_cpt_skcipher_cbc_des3_setkey
,
1386 .encrypt
= otx_cpt_skcipher_encrypt
,
1387 .decrypt
= otx_cpt_skcipher_decrypt
,
1389 .base
.cra_name
= "ecb(des3_ede)",
1390 .base
.cra_driver_name
= "cpt_ecb_des3_ede",
1391 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
1392 .base
.cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1393 .base
.cra_ctxsize
= sizeof(struct otx_cpt_des3_ctx
),
1394 .base
.cra_alignmask
= 7,
1395 .base
.cra_priority
= 4001,
1396 .base
.cra_module
= THIS_MODULE
,
1398 .init
= otx_cpt_enc_dec_init
,
1399 .min_keysize
= DES3_EDE_KEY_SIZE
,
1400 .max_keysize
= DES3_EDE_KEY_SIZE
,
1402 .setkey
= otx_cpt_skcipher_ecb_des3_setkey
,
1403 .encrypt
= otx_cpt_skcipher_encrypt
,
1404 .decrypt
= otx_cpt_skcipher_decrypt
,
1407 static struct aead_alg otx_cpt_aeads
[] = { {
1409 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1410 .cra_driver_name
= "cpt_hmac_sha1_cbc_aes",
1411 .cra_blocksize
= AES_BLOCK_SIZE
,
1412 .cra_flags
= CRYPTO_ALG_ASYNC
,
1413 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1414 .cra_priority
= 4001,
1416 .cra_module
= THIS_MODULE
,
1418 .init
= otx_cpt_aead_cbc_aes_sha1_init
,
1419 .exit
= otx_cpt_aead_exit
,
1420 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1421 .setauthsize
= otx_cpt_aead_set_authsize
,
1422 .encrypt
= otx_cpt_aead_encrypt
,
1423 .decrypt
= otx_cpt_aead_decrypt
,
1424 .ivsize
= AES_BLOCK_SIZE
,
1425 .maxauthsize
= SHA1_DIGEST_SIZE
,
1428 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1429 .cra_driver_name
= "cpt_hmac_sha256_cbc_aes",
1430 .cra_blocksize
= AES_BLOCK_SIZE
,
1431 .cra_flags
= CRYPTO_ALG_ASYNC
,
1432 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1433 .cra_priority
= 4001,
1435 .cra_module
= THIS_MODULE
,
1437 .init
= otx_cpt_aead_cbc_aes_sha256_init
,
1438 .exit
= otx_cpt_aead_exit
,
1439 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1440 .setauthsize
= otx_cpt_aead_set_authsize
,
1441 .encrypt
= otx_cpt_aead_encrypt
,
1442 .decrypt
= otx_cpt_aead_decrypt
,
1443 .ivsize
= AES_BLOCK_SIZE
,
1444 .maxauthsize
= SHA256_DIGEST_SIZE
,
1447 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
1448 .cra_driver_name
= "cpt_hmac_sha384_cbc_aes",
1449 .cra_blocksize
= AES_BLOCK_SIZE
,
1450 .cra_flags
= CRYPTO_ALG_ASYNC
,
1451 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1452 .cra_priority
= 4001,
1454 .cra_module
= THIS_MODULE
,
1456 .init
= otx_cpt_aead_cbc_aes_sha384_init
,
1457 .exit
= otx_cpt_aead_exit
,
1458 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1459 .setauthsize
= otx_cpt_aead_set_authsize
,
1460 .encrypt
= otx_cpt_aead_encrypt
,
1461 .decrypt
= otx_cpt_aead_decrypt
,
1462 .ivsize
= AES_BLOCK_SIZE
,
1463 .maxauthsize
= SHA384_DIGEST_SIZE
,
1466 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1467 .cra_driver_name
= "cpt_hmac_sha512_cbc_aes",
1468 .cra_blocksize
= AES_BLOCK_SIZE
,
1469 .cra_flags
= CRYPTO_ALG_ASYNC
,
1470 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1471 .cra_priority
= 4001,
1473 .cra_module
= THIS_MODULE
,
1475 .init
= otx_cpt_aead_cbc_aes_sha512_init
,
1476 .exit
= otx_cpt_aead_exit
,
1477 .setkey
= otx_cpt_aead_cbc_aes_sha_setkey
,
1478 .setauthsize
= otx_cpt_aead_set_authsize
,
1479 .encrypt
= otx_cpt_aead_encrypt
,
1480 .decrypt
= otx_cpt_aead_decrypt
,
1481 .ivsize
= AES_BLOCK_SIZE
,
1482 .maxauthsize
= SHA512_DIGEST_SIZE
,
1485 .cra_name
= "authenc(hmac(sha1),ecb(cipher_null))",
1486 .cra_driver_name
= "cpt_hmac_sha1_ecb_null",
1488 .cra_flags
= CRYPTO_ALG_ASYNC
,
1489 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1490 .cra_priority
= 4001,
1492 .cra_module
= THIS_MODULE
,
1494 .init
= otx_cpt_aead_ecb_null_sha1_init
,
1495 .exit
= otx_cpt_aead_exit
,
1496 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1497 .setauthsize
= otx_cpt_aead_set_authsize
,
1498 .encrypt
= otx_cpt_aead_null_encrypt
,
1499 .decrypt
= otx_cpt_aead_null_decrypt
,
1501 .maxauthsize
= SHA1_DIGEST_SIZE
,
1504 .cra_name
= "authenc(hmac(sha256),ecb(cipher_null))",
1505 .cra_driver_name
= "cpt_hmac_sha256_ecb_null",
1507 .cra_flags
= CRYPTO_ALG_ASYNC
,
1508 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1509 .cra_priority
= 4001,
1511 .cra_module
= THIS_MODULE
,
1513 .init
= otx_cpt_aead_ecb_null_sha256_init
,
1514 .exit
= otx_cpt_aead_exit
,
1515 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1516 .setauthsize
= otx_cpt_aead_set_authsize
,
1517 .encrypt
= otx_cpt_aead_null_encrypt
,
1518 .decrypt
= otx_cpt_aead_null_decrypt
,
1520 .maxauthsize
= SHA256_DIGEST_SIZE
,
1523 .cra_name
= "authenc(hmac(sha384),ecb(cipher_null))",
1524 .cra_driver_name
= "cpt_hmac_sha384_ecb_null",
1526 .cra_flags
= CRYPTO_ALG_ASYNC
,
1527 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1528 .cra_priority
= 4001,
1530 .cra_module
= THIS_MODULE
,
1532 .init
= otx_cpt_aead_ecb_null_sha384_init
,
1533 .exit
= otx_cpt_aead_exit
,
1534 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1535 .setauthsize
= otx_cpt_aead_set_authsize
,
1536 .encrypt
= otx_cpt_aead_null_encrypt
,
1537 .decrypt
= otx_cpt_aead_null_decrypt
,
1539 .maxauthsize
= SHA384_DIGEST_SIZE
,
1542 .cra_name
= "authenc(hmac(sha512),ecb(cipher_null))",
1543 .cra_driver_name
= "cpt_hmac_sha512_ecb_null",
1545 .cra_flags
= CRYPTO_ALG_ASYNC
,
1546 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1547 .cra_priority
= 4001,
1549 .cra_module
= THIS_MODULE
,
1551 .init
= otx_cpt_aead_ecb_null_sha512_init
,
1552 .exit
= otx_cpt_aead_exit
,
1553 .setkey
= otx_cpt_aead_ecb_null_sha_setkey
,
1554 .setauthsize
= otx_cpt_aead_set_authsize
,
1555 .encrypt
= otx_cpt_aead_null_encrypt
,
1556 .decrypt
= otx_cpt_aead_null_decrypt
,
1558 .maxauthsize
= SHA512_DIGEST_SIZE
,
1561 .cra_name
= "rfc4106(gcm(aes))",
1562 .cra_driver_name
= "cpt_rfc4106_gcm_aes",
1564 .cra_flags
= CRYPTO_ALG_ASYNC
,
1565 .cra_ctxsize
= sizeof(struct otx_cpt_aead_ctx
),
1566 .cra_priority
= 4001,
1568 .cra_module
= THIS_MODULE
,
1570 .init
= otx_cpt_aead_gcm_aes_init
,
1571 .exit
= otx_cpt_aead_exit
,
1572 .setkey
= otx_cpt_aead_gcm_aes_setkey
,
1573 .setauthsize
= otx_cpt_aead_set_authsize
,
1574 .encrypt
= otx_cpt_aead_encrypt
,
1575 .decrypt
= otx_cpt_aead_decrypt
,
1576 .ivsize
= AES_GCM_IV_SIZE
,
1577 .maxauthsize
= AES_GCM_ICV_SIZE
,
1580 static inline int is_any_alg_used(void)
1584 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_skciphers
); i
++)
1585 if (refcount_read(&otx_cpt_skciphers
[i
].base
.cra_refcnt
) != 1)
1587 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_aeads
); i
++)
1588 if (refcount_read(&otx_cpt_aeads
[i
].base
.cra_refcnt
) != 1)
1593 static inline int cpt_register_algs(void)
1597 if (!IS_ENABLED(CONFIG_DM_CRYPT
)) {
1598 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_skciphers
); i
++)
1599 otx_cpt_skciphers
[i
].base
.cra_flags
&= ~CRYPTO_ALG_DEAD
;
1601 err
= crypto_register_skciphers(otx_cpt_skciphers
,
1602 ARRAY_SIZE(otx_cpt_skciphers
));
1607 for (i
= 0; i
< ARRAY_SIZE(otx_cpt_aeads
); i
++)
1608 otx_cpt_aeads
[i
].base
.cra_flags
&= ~CRYPTO_ALG_DEAD
;
1610 err
= crypto_register_aeads(otx_cpt_aeads
, ARRAY_SIZE(otx_cpt_aeads
));
1612 crypto_unregister_skciphers(otx_cpt_skciphers
,
1613 ARRAY_SIZE(otx_cpt_skciphers
));
1620 static inline void cpt_unregister_algs(void)
1622 crypto_unregister_skciphers(otx_cpt_skciphers
,
1623 ARRAY_SIZE(otx_cpt_skciphers
));
1624 crypto_unregister_aeads(otx_cpt_aeads
, ARRAY_SIZE(otx_cpt_aeads
));
1627 static int compare_func(const void *lptr
, const void *rptr
)
1629 struct cpt_device_desc
*ldesc
= (struct cpt_device_desc
*) lptr
;
1630 struct cpt_device_desc
*rdesc
= (struct cpt_device_desc
*) rptr
;
1632 if (ldesc
->dev
->devfn
< rdesc
->dev
->devfn
)
1634 if (ldesc
->dev
->devfn
> rdesc
->dev
->devfn
)
1639 static void swap_func(void *lptr
, void *rptr
, int size
)
1641 struct cpt_device_desc
*ldesc
= (struct cpt_device_desc
*) lptr
;
1642 struct cpt_device_desc
*rdesc
= (struct cpt_device_desc
*) rptr
;
1643 struct cpt_device_desc desc
;
1650 int otx_cpt_crypto_init(struct pci_dev
*pdev
, struct module
*mod
,
1651 enum otx_cptpf_type pf_type
,
1652 enum otx_cptvf_type engine_type
,
1653 int num_queues
, int num_devices
)
1659 switch (engine_type
) {
1660 case OTX_CPT_SE_TYPES
:
1661 count
= atomic_read(&se_devices
.count
);
1662 if (count
>= CPT_MAX_VF_NUM
) {
1663 dev_err(&pdev
->dev
, "No space to add a new device");
1667 se_devices
.desc
[count
].pf_type
= pf_type
;
1668 se_devices
.desc
[count
].num_queues
= num_queues
;
1669 se_devices
.desc
[count
++].dev
= pdev
;
1670 atomic_inc(&se_devices
.count
);
1672 if (atomic_read(&se_devices
.count
) == num_devices
&&
1673 is_crypto_registered
== false) {
1674 if (cpt_register_algs()) {
1676 "Error in registering crypto algorithms\n");
1680 try_module_get(mod
);
1681 is_crypto_registered
= true;
1683 sort(se_devices
.desc
, count
, sizeof(struct cpt_device_desc
),
1684 compare_func
, swap_func
);
1687 case OTX_CPT_AE_TYPES
:
1688 count
= atomic_read(&ae_devices
.count
);
1689 if (count
>= CPT_MAX_VF_NUM
) {
1690 dev_err(&pdev
->dev
, "No space to a add new device");
1694 ae_devices
.desc
[count
].pf_type
= pf_type
;
1695 ae_devices
.desc
[count
].num_queues
= num_queues
;
1696 ae_devices
.desc
[count
++].dev
= pdev
;
1697 atomic_inc(&ae_devices
.count
);
1698 sort(ae_devices
.desc
, count
, sizeof(struct cpt_device_desc
),
1699 compare_func
, swap_func
);
1703 dev_err(&pdev
->dev
, "Unknown VF type %d\n", engine_type
);
1704 ret
= BAD_OTX_CPTVF_TYPE
;
1707 mutex_unlock(&mutex
);
1711 void otx_cpt_crypto_exit(struct pci_dev
*pdev
, struct module
*mod
,
1712 enum otx_cptvf_type engine_type
)
1714 struct cpt_device_table
*dev_tbl
;
1715 bool dev_found
= false;
1720 dev_tbl
= (engine_type
== OTX_CPT_AE_TYPES
) ? &ae_devices
: &se_devices
;
1721 count
= atomic_read(&dev_tbl
->count
);
1722 for (i
= 0; i
< count
; i
++)
1723 if (pdev
== dev_tbl
->desc
[i
].dev
) {
1724 for (j
= i
; j
< count
-1; j
++)
1725 dev_tbl
->desc
[j
] = dev_tbl
->desc
[j
+1];
1731 dev_err(&pdev
->dev
, "%s device not found", __func__
);
1735 if (engine_type
!= OTX_CPT_AE_TYPES
) {
1736 if (atomic_dec_and_test(&se_devices
.count
) &&
1737 !is_any_alg_used()) {
1738 cpt_unregister_algs();
1740 is_crypto_registered
= false;
1743 atomic_dec(&ae_devices
.count
);
1745 mutex_unlock(&mutex
);