1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <crypto/aes.h>
5 #include <crypto/algapi.h>
6 #include <crypto/authenc.h>
7 #include <crypto/des.h>
8 #include <crypto/hash.h>
9 #include <crypto/internal/aead.h>
10 #include <crypto/sha.h>
11 #include <crypto/skcipher.h>
12 #include <crypto/xts.h>
13 #include <linux/crypto.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/idr.h>
18 #include "sec_crypto.h"
20 #define SEC_PRIORITY 4001
21 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
22 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
23 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
24 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
26 /* SEC sqe(bd) bit operational relative MACRO */
27 #define SEC_DE_OFFSET 1
28 #define SEC_CIPHER_OFFSET 4
29 #define SEC_SCENE_OFFSET 3
30 #define SEC_DST_SGL_OFFSET 2
31 #define SEC_SRC_SGL_OFFSET 7
32 #define SEC_CKEY_OFFSET 9
33 #define SEC_CMODE_OFFSET 12
34 #define SEC_AKEY_OFFSET 5
35 #define SEC_AEAD_ALG_OFFSET 11
36 #define SEC_AUTH_OFFSET 6
38 #define SEC_FLAG_OFFSET 7
39 #define SEC_FLAG_MASK 0x0780
40 #define SEC_TYPE_MASK 0x0F
41 #define SEC_DONE_MASK 0x0001
43 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
44 #define SEC_SGL_SGE_NR 128
45 #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
46 #define SEC_CIPHER_AUTH 0xfe
47 #define SEC_AUTH_CIPHER 0x1
48 #define SEC_MAX_MAC_LEN 64
49 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
50 #define SEC_SQE_LEN_RATE 4
51 #define SEC_SQE_CFLAG 2
52 #define SEC_SQE_AEAD_FLAG 3
53 #define SEC_SQE_DONE 0x1
55 static atomic_t sec_active_devs
;
57 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
58 static inline int sec_alloc_queue_id(struct sec_ctx
*ctx
, struct sec_req
*req
)
60 if (req
->c_req
.encrypt
)
61 return (u32
)atomic_inc_return(&ctx
->enc_qcyclic
) %
64 return (u32
)atomic_inc_return(&ctx
->dec_qcyclic
) % ctx
->hlf_q_num
+
68 static inline void sec_free_queue_id(struct sec_ctx
*ctx
, struct sec_req
*req
)
70 if (req
->c_req
.encrypt
)
71 atomic_dec(&ctx
->enc_qcyclic
);
73 atomic_dec(&ctx
->dec_qcyclic
);
76 static int sec_alloc_req_id(struct sec_req
*req
, struct sec_qp_ctx
*qp_ctx
)
80 mutex_lock(&qp_ctx
->req_lock
);
82 req_id
= idr_alloc_cyclic(&qp_ctx
->req_idr
, NULL
,
83 0, QM_Q_DEPTH
, GFP_ATOMIC
);
84 mutex_unlock(&qp_ctx
->req_lock
);
85 if (unlikely(req_id
< 0)) {
86 dev_err(SEC_CTX_DEV(req
->ctx
), "alloc req id fail!\n");
91 qp_ctx
->req_list
[req_id
] = req
;
95 static void sec_free_req_id(struct sec_req
*req
)
97 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
98 int req_id
= req
->req_id
;
100 if (unlikely(req_id
< 0 || req_id
>= QM_Q_DEPTH
)) {
101 dev_err(SEC_CTX_DEV(req
->ctx
), "free request id invalid!\n");
105 qp_ctx
->req_list
[req_id
] = NULL
;
108 mutex_lock(&qp_ctx
->req_lock
);
109 idr_remove(&qp_ctx
->req_idr
, req_id
);
110 mutex_unlock(&qp_ctx
->req_lock
);
113 static int sec_aead_verify(struct sec_req
*req
, struct sec_qp_ctx
*qp_ctx
)
115 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
116 struct crypto_aead
*tfm
= crypto_aead_reqtfm(aead_req
);
117 u8
*mac_out
= qp_ctx
->res
[req
->req_id
].out_mac
;
118 size_t authsize
= crypto_aead_authsize(tfm
);
119 u8
*mac
= mac_out
+ SEC_MAX_MAC_LEN
;
120 struct scatterlist
*sgl
= aead_req
->src
;
123 sz
= sg_pcopy_to_buffer(sgl
, sg_nents(sgl
), mac
, authsize
,
124 aead_req
->cryptlen
+ aead_req
->assoclen
-
126 if (unlikely(sz
!= authsize
|| memcmp(mac_out
, mac
, sz
))) {
127 dev_err(SEC_CTX_DEV(req
->ctx
), "aead verify failure!\n");
134 static void sec_req_cb(struct hisi_qp
*qp
, void *resp
)
136 struct sec_qp_ctx
*qp_ctx
= qp
->qp_ctx
;
137 struct sec_sqe
*bd
= resp
;
144 type
= bd
->type_cipher_auth
& SEC_TYPE_MASK
;
145 if (unlikely(type
!= SEC_BD_TYPE2
)) {
146 pr_err("err bd type [%d]\n", type
);
150 req
= qp_ctx
->req_list
[le16_to_cpu(bd
->type2
.tag
)];
151 req
->err_type
= bd
->type2
.error_type
;
153 done
= le16_to_cpu(bd
->type2
.done_flag
) & SEC_DONE_MASK
;
154 flag
= (le16_to_cpu(bd
->type2
.done_flag
) &
155 SEC_FLAG_MASK
) >> SEC_FLAG_OFFSET
;
156 if (unlikely(req
->err_type
|| done
!= SEC_SQE_DONE
||
157 (ctx
->alg_type
== SEC_SKCIPHER
&& flag
!= SEC_SQE_CFLAG
) ||
158 (ctx
->alg_type
== SEC_AEAD
&& flag
!= SEC_SQE_AEAD_FLAG
))) {
159 dev_err(SEC_CTX_DEV(ctx
),
160 "err_type[%d],done[%d],flag[%d]\n",
161 req
->err_type
, done
, flag
);
165 if (ctx
->alg_type
== SEC_AEAD
&& !req
->c_req
.encrypt
)
166 err
= sec_aead_verify(req
, qp_ctx
);
168 atomic64_inc(&ctx
->sec
->debug
.dfx
.recv_cnt
);
170 ctx
->req_op
->buf_unmap(ctx
, req
);
172 ctx
->req_op
->callback(ctx
, req
, err
);
175 static int sec_bd_send(struct sec_ctx
*ctx
, struct sec_req
*req
)
177 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
180 mutex_lock(&qp_ctx
->req_lock
);
181 ret
= hisi_qp_send(qp_ctx
->qp
, &req
->sec_sqe
);
182 mutex_unlock(&qp_ctx
->req_lock
);
183 atomic64_inc(&ctx
->sec
->debug
.dfx
.send_cnt
);
185 if (unlikely(ret
== -EBUSY
))
198 /* Get DMA memory resources */
199 static int sec_alloc_civ_resource(struct device
*dev
, struct sec_alg_res
*res
)
203 res
->c_ivin
= dma_alloc_coherent(dev
, SEC_TOTAL_IV_SZ
,
204 &res
->c_ivin_dma
, GFP_KERNEL
);
208 for (i
= 1; i
< QM_Q_DEPTH
; i
++) {
209 res
[i
].c_ivin_dma
= res
->c_ivin_dma
+ i
* SEC_IV_SIZE
;
210 res
[i
].c_ivin
= res
->c_ivin
+ i
* SEC_IV_SIZE
;
216 static void sec_free_civ_resource(struct device
*dev
, struct sec_alg_res
*res
)
219 dma_free_coherent(dev
, SEC_TOTAL_IV_SZ
,
220 res
->c_ivin
, res
->c_ivin_dma
);
223 static int sec_alloc_mac_resource(struct device
*dev
, struct sec_alg_res
*res
)
227 res
->out_mac
= dma_alloc_coherent(dev
, SEC_TOTAL_MAC_SZ
<< 1,
228 &res
->out_mac_dma
, GFP_KERNEL
);
232 for (i
= 1; i
< QM_Q_DEPTH
; i
++) {
233 res
[i
].out_mac_dma
= res
->out_mac_dma
+
234 i
* (SEC_MAX_MAC_LEN
<< 1);
235 res
[i
].out_mac
= res
->out_mac
+ i
* (SEC_MAX_MAC_LEN
<< 1);
241 static void sec_free_mac_resource(struct device
*dev
, struct sec_alg_res
*res
)
244 dma_free_coherent(dev
, SEC_TOTAL_MAC_SZ
<< 1,
245 res
->out_mac
, res
->out_mac_dma
);
248 static int sec_alg_resource_alloc(struct sec_ctx
*ctx
,
249 struct sec_qp_ctx
*qp_ctx
)
251 struct device
*dev
= SEC_CTX_DEV(ctx
);
252 struct sec_alg_res
*res
= qp_ctx
->res
;
255 ret
= sec_alloc_civ_resource(dev
, res
);
259 if (ctx
->alg_type
== SEC_AEAD
) {
260 ret
= sec_alloc_mac_resource(dev
, res
);
267 sec_free_civ_resource(dev
, res
);
272 static void sec_alg_resource_free(struct sec_ctx
*ctx
,
273 struct sec_qp_ctx
*qp_ctx
)
275 struct device
*dev
= SEC_CTX_DEV(ctx
);
277 sec_free_civ_resource(dev
, qp_ctx
->res
);
279 if (ctx
->alg_type
== SEC_AEAD
)
280 sec_free_mac_resource(dev
, qp_ctx
->res
);
283 static int sec_create_qp_ctx(struct hisi_qm
*qm
, struct sec_ctx
*ctx
,
284 int qp_ctx_id
, int alg_type
)
286 struct device
*dev
= SEC_CTX_DEV(ctx
);
287 struct sec_qp_ctx
*qp_ctx
;
291 qp
= hisi_qm_create_qp(qm
, alg_type
);
295 qp_ctx
= &ctx
->qp_ctx
[qp_ctx_id
];
298 qp
->req_cb
= sec_req_cb
;
302 mutex_init(&qp_ctx
->req_lock
);
303 atomic_set(&qp_ctx
->pending_reqs
, 0);
304 idr_init(&qp_ctx
->req_idr
);
306 qp_ctx
->c_in_pool
= hisi_acc_create_sgl_pool(dev
, QM_Q_DEPTH
,
308 if (IS_ERR(qp_ctx
->c_in_pool
)) {
309 dev_err(dev
, "fail to create sgl pool for input!\n");
310 goto err_destroy_idr
;
313 qp_ctx
->c_out_pool
= hisi_acc_create_sgl_pool(dev
, QM_Q_DEPTH
,
315 if (IS_ERR(qp_ctx
->c_out_pool
)) {
316 dev_err(dev
, "fail to create sgl pool for output!\n");
317 goto err_free_c_in_pool
;
320 ret
= sec_alg_resource_alloc(ctx
, qp_ctx
);
322 goto err_free_c_out_pool
;
324 ret
= hisi_qm_start_qp(qp
, 0);
331 sec_alg_resource_free(ctx
, qp_ctx
);
333 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_out_pool
);
335 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_in_pool
);
337 idr_destroy(&qp_ctx
->req_idr
);
338 hisi_qm_release_qp(qp
);
343 static void sec_release_qp_ctx(struct sec_ctx
*ctx
,
344 struct sec_qp_ctx
*qp_ctx
)
346 struct device
*dev
= SEC_CTX_DEV(ctx
);
348 hisi_qm_stop_qp(qp_ctx
->qp
);
349 sec_alg_resource_free(ctx
, qp_ctx
);
351 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_out_pool
);
352 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_in_pool
);
354 idr_destroy(&qp_ctx
->req_idr
);
355 hisi_qm_release_qp(qp_ctx
->qp
);
358 static int sec_ctx_base_init(struct sec_ctx
*ctx
)
363 sec
= sec_find_device(cpu_to_node(smp_processor_id()));
365 pr_err("Can not find proper Hisilicon SEC device!\n");
369 ctx
->hlf_q_num
= sec
->ctx_q_num
>> 1;
371 /* Half of queue depth is taken as fake requests limit in the queue. */
372 ctx
->fake_req_limit
= QM_Q_DEPTH
>> 1;
373 ctx
->qp_ctx
= kcalloc(sec
->ctx_q_num
, sizeof(struct sec_qp_ctx
),
378 for (i
= 0; i
< sec
->ctx_q_num
; i
++) {
379 ret
= sec_create_qp_ctx(&sec
->qm
, ctx
, i
, 0);
381 goto err_sec_release_qp_ctx
;
385 err_sec_release_qp_ctx
:
386 for (i
= i
- 1; i
>= 0; i
--)
387 sec_release_qp_ctx(ctx
, &ctx
->qp_ctx
[i
]);
393 static void sec_ctx_base_uninit(struct sec_ctx
*ctx
)
397 for (i
= 0; i
< ctx
->sec
->ctx_q_num
; i
++)
398 sec_release_qp_ctx(ctx
, &ctx
->qp_ctx
[i
]);
403 static int sec_cipher_init(struct sec_ctx
*ctx
)
405 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
407 c_ctx
->c_key
= dma_alloc_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
408 &c_ctx
->c_key_dma
, GFP_KERNEL
);
415 static void sec_cipher_uninit(struct sec_ctx
*ctx
)
417 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
419 memzero_explicit(c_ctx
->c_key
, SEC_MAX_KEY_SIZE
);
420 dma_free_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
421 c_ctx
->c_key
, c_ctx
->c_key_dma
);
424 static int sec_auth_init(struct sec_ctx
*ctx
)
426 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
428 a_ctx
->a_key
= dma_alloc_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
429 &a_ctx
->a_key_dma
, GFP_KERNEL
);
436 static void sec_auth_uninit(struct sec_ctx
*ctx
)
438 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
440 memzero_explicit(a_ctx
->a_key
, SEC_MAX_KEY_SIZE
);
441 dma_free_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
442 a_ctx
->a_key
, a_ctx
->a_key_dma
);
445 static int sec_skcipher_init(struct crypto_skcipher
*tfm
)
447 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
450 ctx
= crypto_skcipher_ctx(tfm
);
451 ctx
->alg_type
= SEC_SKCIPHER
;
452 crypto_skcipher_set_reqsize(tfm
, sizeof(struct sec_req
));
453 ctx
->c_ctx
.ivsize
= crypto_skcipher_ivsize(tfm
);
454 if (ctx
->c_ctx
.ivsize
> SEC_IV_SIZE
) {
455 dev_err(SEC_CTX_DEV(ctx
), "get error skcipher iv size!\n");
459 ret
= sec_ctx_base_init(ctx
);
463 ret
= sec_cipher_init(ctx
);
465 goto err_cipher_init
;
469 sec_ctx_base_uninit(ctx
);
474 static void sec_skcipher_uninit(struct crypto_skcipher
*tfm
)
476 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
478 sec_cipher_uninit(ctx
);
479 sec_ctx_base_uninit(ctx
);
482 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx
*c_ctx
,
484 const enum sec_cmode c_mode
)
487 case SEC_DES3_2KEY_SIZE
:
488 c_ctx
->c_key_len
= SEC_CKEY_3DES_2KEY
;
490 case SEC_DES3_3KEY_SIZE
:
491 c_ctx
->c_key_len
= SEC_CKEY_3DES_3KEY
;
500 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx
*c_ctx
,
502 const enum sec_cmode c_mode
)
504 if (c_mode
== SEC_CMODE_XTS
) {
506 case SEC_XTS_MIN_KEY_SIZE
:
507 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
509 case SEC_XTS_MAX_KEY_SIZE
:
510 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
513 pr_err("hisi_sec2: xts mode key error!\n");
518 case AES_KEYSIZE_128
:
519 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
521 case AES_KEYSIZE_192
:
522 c_ctx
->c_key_len
= SEC_CKEY_192BIT
;
524 case AES_KEYSIZE_256
:
525 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
528 pr_err("hisi_sec2: aes key error!\n");
536 static int sec_skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
537 const u32 keylen
, const enum sec_calg c_alg
,
538 const enum sec_cmode c_mode
)
540 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
541 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
544 if (c_mode
== SEC_CMODE_XTS
) {
545 ret
= xts_verify_key(tfm
, key
, keylen
);
547 dev_err(SEC_CTX_DEV(ctx
), "xts mode key err!\n");
552 c_ctx
->c_alg
= c_alg
;
553 c_ctx
->c_mode
= c_mode
;
557 ret
= sec_skcipher_3des_setkey(c_ctx
, keylen
, c_mode
);
561 ret
= sec_skcipher_aes_sm4_setkey(c_ctx
, keylen
, c_mode
);
568 dev_err(SEC_CTX_DEV(ctx
), "set sec key err!\n");
572 memcpy(c_ctx
->c_key
, key
, keylen
);
577 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
578 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
581 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
584 GEN_SEC_SETKEY_FUNC(aes_ecb
, SEC_CALG_AES
, SEC_CMODE_ECB
)
585 GEN_SEC_SETKEY_FUNC(aes_cbc
, SEC_CALG_AES
, SEC_CMODE_CBC
)
586 GEN_SEC_SETKEY_FUNC(aes_xts
, SEC_CALG_AES
, SEC_CMODE_XTS
)
588 GEN_SEC_SETKEY_FUNC(3des_ecb
, SEC_CALG_3DES
, SEC_CMODE_ECB
)
589 GEN_SEC_SETKEY_FUNC(3des_cbc
, SEC_CALG_3DES
, SEC_CMODE_CBC
)
591 GEN_SEC_SETKEY_FUNC(sm4_xts
, SEC_CALG_SM4
, SEC_CMODE_XTS
)
592 GEN_SEC_SETKEY_FUNC(sm4_cbc
, SEC_CALG_SM4
, SEC_CMODE_CBC
)
594 static int sec_cipher_map(struct device
*dev
, struct sec_req
*req
,
595 struct scatterlist
*src
, struct scatterlist
*dst
)
597 struct sec_cipher_req
*c_req
= &req
->c_req
;
598 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
600 c_req
->c_in
= hisi_acc_sg_buf_map_to_hw_sgl(dev
, src
,
605 if (IS_ERR(c_req
->c_in
)) {
606 dev_err(dev
, "fail to dma map input sgl buffers!\n");
607 return PTR_ERR(c_req
->c_in
);
611 c_req
->c_out
= c_req
->c_in
;
612 c_req
->c_out_dma
= c_req
->c_in_dma
;
614 c_req
->c_out
= hisi_acc_sg_buf_map_to_hw_sgl(dev
, dst
,
619 if (IS_ERR(c_req
->c_out
)) {
620 dev_err(dev
, "fail to dma map output sgl buffers!\n");
621 hisi_acc_sg_buf_unmap(dev
, src
, c_req
->c_in
);
622 return PTR_ERR(c_req
->c_out
);
629 static void sec_cipher_unmap(struct device
*dev
, struct sec_cipher_req
*req
,
630 struct scatterlist
*src
, struct scatterlist
*dst
)
633 hisi_acc_sg_buf_unmap(dev
, src
, req
->c_in
);
635 hisi_acc_sg_buf_unmap(dev
, dst
, req
->c_out
);
638 static int sec_skcipher_sgl_map(struct sec_ctx
*ctx
, struct sec_req
*req
)
640 struct skcipher_request
*sq
= req
->c_req
.sk_req
;
642 return sec_cipher_map(SEC_CTX_DEV(ctx
), req
, sq
->src
, sq
->dst
);
645 static void sec_skcipher_sgl_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
)
647 struct device
*dev
= SEC_CTX_DEV(ctx
);
648 struct sec_cipher_req
*c_req
= &req
->c_req
;
649 struct skcipher_request
*sk_req
= c_req
->sk_req
;
651 sec_cipher_unmap(dev
, c_req
, sk_req
->src
, sk_req
->dst
);
654 static int sec_aead_aes_set_key(struct sec_cipher_ctx
*c_ctx
,
655 struct crypto_authenc_keys
*keys
)
657 switch (keys
->enckeylen
) {
658 case AES_KEYSIZE_128
:
659 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
661 case AES_KEYSIZE_192
:
662 c_ctx
->c_key_len
= SEC_CKEY_192BIT
;
664 case AES_KEYSIZE_256
:
665 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
668 pr_err("hisi_sec2: aead aes key error!\n");
671 memcpy(c_ctx
->c_key
, keys
->enckey
, keys
->enckeylen
);
676 static int sec_aead_auth_set_key(struct sec_auth_ctx
*ctx
,
677 struct crypto_authenc_keys
*keys
)
679 struct crypto_shash
*hash_tfm
= ctx
->hash_tfm
;
680 SHASH_DESC_ON_STACK(shash
, hash_tfm
);
683 if (!keys
->authkeylen
) {
684 pr_err("hisi_sec2: aead auth key error!\n");
688 blocksize
= crypto_shash_blocksize(hash_tfm
);
689 if (keys
->authkeylen
> blocksize
) {
690 ret
= crypto_shash_digest(shash
, keys
->authkey
,
691 keys
->authkeylen
, ctx
->a_key
);
693 pr_err("hisi_sec2: aead auth digest error!\n");
696 ctx
->a_key_len
= blocksize
;
698 memcpy(ctx
->a_key
, keys
->authkey
, keys
->authkeylen
);
699 ctx
->a_key_len
= keys
->authkeylen
;
705 static int sec_aead_setkey(struct crypto_aead
*tfm
, const u8
*key
,
706 const u32 keylen
, const enum sec_hash_alg a_alg
,
707 const enum sec_calg c_alg
,
708 const enum sec_mac_len mac_len
,
709 const enum sec_cmode c_mode
)
711 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
712 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
713 struct crypto_authenc_keys keys
;
716 ctx
->a_ctx
.a_alg
= a_alg
;
717 ctx
->c_ctx
.c_alg
= c_alg
;
718 ctx
->a_ctx
.mac_len
= mac_len
;
719 c_ctx
->c_mode
= c_mode
;
721 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
724 ret
= sec_aead_aes_set_key(c_ctx
, &keys
);
726 dev_err(SEC_CTX_DEV(ctx
), "set sec cipher key err!\n");
730 ret
= sec_aead_auth_set_key(&ctx
->a_ctx
, &keys
);
732 dev_err(SEC_CTX_DEV(ctx
), "set sec auth key err!\n");
738 memzero_explicit(&keys
, sizeof(struct crypto_authenc_keys
));
744 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
745 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
748 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
751 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1
, SEC_A_HMAC_SHA1
,
752 SEC_CALG_AES
, SEC_HMAC_SHA1_MAC
, SEC_CMODE_CBC
)
753 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256
, SEC_A_HMAC_SHA256
,
754 SEC_CALG_AES
, SEC_HMAC_SHA256_MAC
, SEC_CMODE_CBC
)
755 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512
, SEC_A_HMAC_SHA512
,
756 SEC_CALG_AES
, SEC_HMAC_SHA512_MAC
, SEC_CMODE_CBC
)
758 static int sec_aead_sgl_map(struct sec_ctx
*ctx
, struct sec_req
*req
)
760 struct aead_request
*aq
= req
->aead_req
.aead_req
;
762 return sec_cipher_map(SEC_CTX_DEV(ctx
), req
, aq
->src
, aq
->dst
);
765 static void sec_aead_sgl_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
)
767 struct device
*dev
= SEC_CTX_DEV(ctx
);
768 struct sec_cipher_req
*cq
= &req
->c_req
;
769 struct aead_request
*aq
= req
->aead_req
.aead_req
;
771 sec_cipher_unmap(dev
, cq
, aq
->src
, aq
->dst
);
774 static int sec_request_transfer(struct sec_ctx
*ctx
, struct sec_req
*req
)
778 ret
= ctx
->req_op
->buf_map(ctx
, req
);
782 ctx
->req_op
->do_transfer(ctx
, req
);
784 ret
= ctx
->req_op
->bd_fill(ctx
, req
);
791 ctx
->req_op
->buf_unmap(ctx
, req
);
796 static void sec_request_untransfer(struct sec_ctx
*ctx
, struct sec_req
*req
)
798 ctx
->req_op
->buf_unmap(ctx
, req
);
801 static void sec_skcipher_copy_iv(struct sec_ctx
*ctx
, struct sec_req
*req
)
803 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
804 u8
*c_ivin
= req
->qp_ctx
->res
[req
->req_id
].c_ivin
;
806 memcpy(c_ivin
, sk_req
->iv
, ctx
->c_ctx
.ivsize
);
809 static int sec_skcipher_bd_fill(struct sec_ctx
*ctx
, struct sec_req
*req
)
811 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
812 struct sec_cipher_req
*c_req
= &req
->c_req
;
813 struct sec_sqe
*sec_sqe
= &req
->sec_sqe
;
814 u8 scene
, sa_type
, da_type
;
818 memset(sec_sqe
, 0, sizeof(struct sec_sqe
));
820 sec_sqe
->type2
.c_key_addr
= cpu_to_le64(c_ctx
->c_key_dma
);
821 sec_sqe
->type2
.c_ivin_addr
=
822 cpu_to_le64(req
->qp_ctx
->res
[req
->req_id
].c_ivin_dma
);
823 sec_sqe
->type2
.data_src_addr
= cpu_to_le64(c_req
->c_in_dma
);
824 sec_sqe
->type2
.data_dst_addr
= cpu_to_le64(c_req
->c_out_dma
);
826 sec_sqe
->type2
.icvw_kmode
|= cpu_to_le16(((u16
)c_ctx
->c_mode
) <<
828 sec_sqe
->type2
.c_alg
= c_ctx
->c_alg
;
829 sec_sqe
->type2
.icvw_kmode
|= cpu_to_le16(((u16
)c_ctx
->c_key_len
) <<
832 bd_type
= SEC_BD_TYPE2
;
834 cipher
= SEC_CIPHER_ENC
<< SEC_CIPHER_OFFSET
;
836 cipher
= SEC_CIPHER_DEC
<< SEC_CIPHER_OFFSET
;
837 sec_sqe
->type_cipher_auth
= bd_type
| cipher
;
839 sa_type
= SEC_SGL
<< SEC_SRC_SGL_OFFSET
;
840 scene
= SEC_COMM_SCENE
<< SEC_SCENE_OFFSET
;
841 if (c_req
->c_in_dma
!= c_req
->c_out_dma
)
842 de
= 0x1 << SEC_DE_OFFSET
;
844 sec_sqe
->sds_sa_type
= (de
| scene
| sa_type
);
846 /* Just set DST address type */
847 da_type
= SEC_SGL
<< SEC_DST_SGL_OFFSET
;
848 sec_sqe
->sdm_addr_type
|= da_type
;
850 sec_sqe
->type2
.clen_ivhlen
|= cpu_to_le32(c_req
->c_len
);
851 sec_sqe
->type2
.tag
= cpu_to_le16((u16
)req
->req_id
);
856 static void sec_update_iv(struct sec_req
*req
, enum sec_alg_type alg_type
)
858 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
859 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
860 u32 iv_size
= req
->ctx
->c_ctx
.ivsize
;
861 struct scatterlist
*sgl
;
862 unsigned int cryptlen
;
866 if (req
->c_req
.encrypt
)
867 sgl
= alg_type
== SEC_SKCIPHER
? sk_req
->dst
: aead_req
->dst
;
869 sgl
= alg_type
== SEC_SKCIPHER
? sk_req
->src
: aead_req
->src
;
871 if (alg_type
== SEC_SKCIPHER
) {
873 cryptlen
= sk_req
->cryptlen
;
876 cryptlen
= aead_req
->cryptlen
;
879 sz
= sg_pcopy_to_buffer(sgl
, sg_nents(sgl
), iv
, iv_size
,
881 if (unlikely(sz
!= iv_size
))
882 dev_err(SEC_CTX_DEV(req
->ctx
), "copy output iv error!\n");
885 static void sec_skcipher_callback(struct sec_ctx
*ctx
, struct sec_req
*req
,
888 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
889 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
891 atomic_dec(&qp_ctx
->pending_reqs
);
892 sec_free_req_id(req
);
894 /* IV output at encrypto of CBC mode */
895 if (!err
&& ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& req
->c_req
.encrypt
)
896 sec_update_iv(req
, SEC_SKCIPHER
);
899 sk_req
->base
.complete(&sk_req
->base
, -EINPROGRESS
);
901 sk_req
->base
.complete(&sk_req
->base
, err
);
904 static void sec_aead_copy_iv(struct sec_ctx
*ctx
, struct sec_req
*req
)
906 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
907 u8
*c_ivin
= req
->qp_ctx
->res
[req
->req_id
].c_ivin
;
909 memcpy(c_ivin
, aead_req
->iv
, ctx
->c_ctx
.ivsize
);
912 static void sec_auth_bd_fill_ex(struct sec_auth_ctx
*ctx
, int dir
,
913 struct sec_req
*req
, struct sec_sqe
*sec_sqe
)
915 struct sec_aead_req
*a_req
= &req
->aead_req
;
916 struct sec_cipher_req
*c_req
= &req
->c_req
;
917 struct aead_request
*aq
= a_req
->aead_req
;
919 sec_sqe
->type2
.a_key_addr
= cpu_to_le64(ctx
->a_key_dma
);
921 sec_sqe
->type2
.mac_key_alg
=
922 cpu_to_le32(ctx
->mac_len
/ SEC_SQE_LEN_RATE
);
924 sec_sqe
->type2
.mac_key_alg
|=
925 cpu_to_le32((u32
)((ctx
->a_key_len
) /
926 SEC_SQE_LEN_RATE
) << SEC_AKEY_OFFSET
);
928 sec_sqe
->type2
.mac_key_alg
|=
929 cpu_to_le32((u32
)(ctx
->a_alg
) << SEC_AEAD_ALG_OFFSET
);
931 sec_sqe
->type_cipher_auth
|= SEC_AUTH_TYPE1
<< SEC_AUTH_OFFSET
;
934 sec_sqe
->sds_sa_type
&= SEC_CIPHER_AUTH
;
936 sec_sqe
->sds_sa_type
|= SEC_AUTH_CIPHER
;
938 sec_sqe
->type2
.alen_ivllen
= cpu_to_le32(c_req
->c_len
+ aq
->assoclen
);
940 sec_sqe
->type2
.cipher_src_offset
= cpu_to_le16((u16
)aq
->assoclen
);
942 sec_sqe
->type2
.mac_addr
=
943 cpu_to_le64(req
->qp_ctx
->res
[req
->req_id
].out_mac_dma
);
946 static int sec_aead_bd_fill(struct sec_ctx
*ctx
, struct sec_req
*req
)
948 struct sec_auth_ctx
*auth_ctx
= &ctx
->a_ctx
;
949 struct sec_sqe
*sec_sqe
= &req
->sec_sqe
;
952 ret
= sec_skcipher_bd_fill(ctx
, req
);
954 dev_err(SEC_CTX_DEV(ctx
), "skcipher bd fill is error!\n");
958 sec_auth_bd_fill_ex(auth_ctx
, req
->c_req
.encrypt
, req
, sec_sqe
);
963 static void sec_aead_callback(struct sec_ctx
*c
, struct sec_req
*req
, int err
)
965 struct aead_request
*a_req
= req
->aead_req
.aead_req
;
966 struct crypto_aead
*tfm
= crypto_aead_reqtfm(a_req
);
967 struct sec_cipher_req
*c_req
= &req
->c_req
;
968 size_t authsize
= crypto_aead_authsize(tfm
);
969 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
972 atomic_dec(&qp_ctx
->pending_reqs
);
974 if (!err
&& c
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& c_req
->encrypt
)
975 sec_update_iv(req
, SEC_AEAD
);
977 /* Copy output mac */
978 if (!err
&& c_req
->encrypt
) {
979 struct scatterlist
*sgl
= a_req
->dst
;
981 sz
= sg_pcopy_from_buffer(sgl
, sg_nents(sgl
),
982 qp_ctx
->res
[req
->req_id
].out_mac
,
983 authsize
, a_req
->cryptlen
+
986 if (unlikely(sz
!= authsize
)) {
987 dev_err(SEC_CTX_DEV(req
->ctx
), "copy out mac err!\n");
992 sec_free_req_id(req
);
995 a_req
->base
.complete(&a_req
->base
, -EINPROGRESS
);
997 a_req
->base
.complete(&a_req
->base
, err
);
1000 static void sec_request_uninit(struct sec_ctx
*ctx
, struct sec_req
*req
)
1002 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
1004 atomic_dec(&qp_ctx
->pending_reqs
);
1005 sec_free_req_id(req
);
1006 sec_free_queue_id(ctx
, req
);
1009 static int sec_request_init(struct sec_ctx
*ctx
, struct sec_req
*req
)
1011 struct sec_qp_ctx
*qp_ctx
;
1014 /* To load balance */
1015 queue_id
= sec_alloc_queue_id(ctx
, req
);
1016 qp_ctx
= &ctx
->qp_ctx
[queue_id
];
1018 req
->req_id
= sec_alloc_req_id(req
, qp_ctx
);
1019 if (unlikely(req
->req_id
< 0)) {
1020 sec_free_queue_id(ctx
, req
);
1024 if (ctx
->fake_req_limit
<= atomic_inc_return(&qp_ctx
->pending_reqs
))
1025 req
->fake_busy
= true;
1027 req
->fake_busy
= false;
1032 static int sec_process(struct sec_ctx
*ctx
, struct sec_req
*req
)
1036 ret
= sec_request_init(ctx
, req
);
1040 ret
= sec_request_transfer(ctx
, req
);
1042 goto err_uninit_req
;
1044 /* Output IV as decrypto */
1045 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& !req
->c_req
.encrypt
)
1046 sec_update_iv(req
, ctx
->alg_type
);
1048 ret
= ctx
->req_op
->bd_send(ctx
, req
);
1049 if (unlikely(ret
!= -EBUSY
&& ret
!= -EINPROGRESS
)) {
1050 dev_err_ratelimited(SEC_CTX_DEV(ctx
), "send sec request failed!\n");
1057 /* As failing, restore the IV from user */
1058 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& !req
->c_req
.encrypt
) {
1059 if (ctx
->alg_type
== SEC_SKCIPHER
)
1060 memcpy(req
->c_req
.sk_req
->iv
,
1061 req
->qp_ctx
->res
[req
->req_id
].c_ivin
,
1064 memcpy(req
->aead_req
.aead_req
->iv
,
1065 req
->qp_ctx
->res
[req
->req_id
].c_ivin
,
1069 sec_request_untransfer(ctx
, req
);
1071 sec_request_uninit(ctx
, req
);
1076 static const struct sec_req_op sec_skcipher_req_ops
= {
1077 .buf_map
= sec_skcipher_sgl_map
,
1078 .buf_unmap
= sec_skcipher_sgl_unmap
,
1079 .do_transfer
= sec_skcipher_copy_iv
,
1080 .bd_fill
= sec_skcipher_bd_fill
,
1081 .bd_send
= sec_bd_send
,
1082 .callback
= sec_skcipher_callback
,
1083 .process
= sec_process
,
1086 static const struct sec_req_op sec_aead_req_ops
= {
1087 .buf_map
= sec_aead_sgl_map
,
1088 .buf_unmap
= sec_aead_sgl_unmap
,
1089 .do_transfer
= sec_aead_copy_iv
,
1090 .bd_fill
= sec_aead_bd_fill
,
1091 .bd_send
= sec_bd_send
,
1092 .callback
= sec_aead_callback
,
1093 .process
= sec_process
,
1096 static int sec_skcipher_ctx_init(struct crypto_skcipher
*tfm
)
1098 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1100 ctx
->req_op
= &sec_skcipher_req_ops
;
1102 return sec_skcipher_init(tfm
);
1105 static void sec_skcipher_ctx_exit(struct crypto_skcipher
*tfm
)
1107 sec_skcipher_uninit(tfm
);
1110 static int sec_aead_init(struct crypto_aead
*tfm
)
1112 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1115 crypto_aead_set_reqsize(tfm
, sizeof(struct sec_req
));
1116 ctx
->alg_type
= SEC_AEAD
;
1117 ctx
->c_ctx
.ivsize
= crypto_aead_ivsize(tfm
);
1118 if (ctx
->c_ctx
.ivsize
> SEC_IV_SIZE
) {
1119 dev_err(SEC_CTX_DEV(ctx
), "get error aead iv size!\n");
1123 ctx
->req_op
= &sec_aead_req_ops
;
1124 ret
= sec_ctx_base_init(ctx
);
1128 ret
= sec_auth_init(ctx
);
1132 ret
= sec_cipher_init(ctx
);
1134 goto err_cipher_init
;
1139 sec_auth_uninit(ctx
);
1141 sec_ctx_base_uninit(ctx
);
1146 static void sec_aead_exit(struct crypto_aead
*tfm
)
1148 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1150 sec_cipher_uninit(ctx
);
1151 sec_auth_uninit(ctx
);
1152 sec_ctx_base_uninit(ctx
);
1155 static int sec_aead_ctx_init(struct crypto_aead
*tfm
, const char *hash_name
)
1157 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1158 struct sec_auth_ctx
*auth_ctx
= &ctx
->a_ctx
;
1161 ret
= sec_aead_init(tfm
);
1163 pr_err("hisi_sec2: aead init error!\n");
1167 auth_ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1168 if (IS_ERR(auth_ctx
->hash_tfm
)) {
1169 dev_err(SEC_CTX_DEV(ctx
), "aead alloc shash error!\n");
1171 return PTR_ERR(auth_ctx
->hash_tfm
);
1177 static void sec_aead_ctx_exit(struct crypto_aead
*tfm
)
1179 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1181 crypto_free_shash(ctx
->a_ctx
.hash_tfm
);
1185 static int sec_aead_sha1_ctx_init(struct crypto_aead
*tfm
)
1187 return sec_aead_ctx_init(tfm
, "sha1");
1190 static int sec_aead_sha256_ctx_init(struct crypto_aead
*tfm
)
1192 return sec_aead_ctx_init(tfm
, "sha256");
1195 static int sec_aead_sha512_ctx_init(struct crypto_aead
*tfm
)
1197 return sec_aead_ctx_init(tfm
, "sha512");
1200 static int sec_skcipher_param_check(struct sec_ctx
*ctx
, struct sec_req
*sreq
)
1202 struct skcipher_request
*sk_req
= sreq
->c_req
.sk_req
;
1203 struct device
*dev
= SEC_CTX_DEV(ctx
);
1204 u8 c_alg
= ctx
->c_ctx
.c_alg
;
1206 if (unlikely(!sk_req
->src
|| !sk_req
->dst
)) {
1207 dev_err(dev
, "skcipher input param error!\n");
1210 sreq
->c_req
.c_len
= sk_req
->cryptlen
;
1211 if (c_alg
== SEC_CALG_3DES
) {
1212 if (unlikely(sk_req
->cryptlen
& (DES3_EDE_BLOCK_SIZE
- 1))) {
1213 dev_err(dev
, "skcipher 3des input length error!\n");
1217 } else if (c_alg
== SEC_CALG_AES
|| c_alg
== SEC_CALG_SM4
) {
1218 if (unlikely(sk_req
->cryptlen
& (AES_BLOCK_SIZE
- 1))) {
1219 dev_err(dev
, "skcipher aes input length error!\n");
1225 dev_err(dev
, "skcipher algorithm error!\n");
1229 static int sec_skcipher_crypto(struct skcipher_request
*sk_req
, bool encrypt
)
1231 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(sk_req
);
1232 struct sec_req
*req
= skcipher_request_ctx(sk_req
);
1233 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1236 if (!sk_req
->cryptlen
)
1239 req
->c_req
.sk_req
= sk_req
;
1240 req
->c_req
.encrypt
= encrypt
;
1243 ret
= sec_skcipher_param_check(ctx
, req
);
1247 return ctx
->req_op
->process(ctx
, req
);
1250 static int sec_skcipher_encrypt(struct skcipher_request
*sk_req
)
1252 return sec_skcipher_crypto(sk_req
, true);
1255 static int sec_skcipher_decrypt(struct skcipher_request
*sk_req
)
1257 return sec_skcipher_crypto(sk_req
, false);
1260 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
1261 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
1264 .cra_name = sec_cra_name,\
1265 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1266 .cra_priority = SEC_PRIORITY,\
1267 .cra_flags = CRYPTO_ALG_ASYNC,\
1268 .cra_blocksize = blk_size,\
1269 .cra_ctxsize = sizeof(struct sec_ctx),\
1270 .cra_module = THIS_MODULE,\
1274 .setkey = sec_set_key,\
1275 .decrypt = sec_skcipher_decrypt,\
1276 .encrypt = sec_skcipher_encrypt,\
1277 .min_keysize = sec_min_key_size,\
1278 .max_keysize = sec_max_key_size,\
1282 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
1283 max_key_size, blk_size, iv_size) \
1284 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
1285 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
1287 static struct skcipher_alg sec_skciphers
[] = {
1288 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb
,
1289 AES_MIN_KEY_SIZE
, AES_MAX_KEY_SIZE
,
1292 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc
,
1293 AES_MIN_KEY_SIZE
, AES_MAX_KEY_SIZE
,
1294 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1296 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts
,
1297 SEC_XTS_MIN_KEY_SIZE
, SEC_XTS_MAX_KEY_SIZE
,
1298 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1300 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb
,
1301 SEC_DES3_2KEY_SIZE
, SEC_DES3_3KEY_SIZE
,
1302 DES3_EDE_BLOCK_SIZE
, 0)
1304 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc
,
1305 SEC_DES3_2KEY_SIZE
, SEC_DES3_3KEY_SIZE
,
1306 DES3_EDE_BLOCK_SIZE
, DES3_EDE_BLOCK_SIZE
)
1308 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts
,
1309 SEC_XTS_MIN_KEY_SIZE
, SEC_XTS_MIN_KEY_SIZE
,
1310 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1312 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc
,
1313 AES_MIN_KEY_SIZE
, AES_MIN_KEY_SIZE
,
1314 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1317 static int sec_aead_param_check(struct sec_ctx
*ctx
, struct sec_req
*sreq
)
1319 u8 c_alg
= ctx
->c_ctx
.c_alg
;
1320 struct aead_request
*req
= sreq
->aead_req
.aead_req
;
1321 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1322 size_t authsize
= crypto_aead_authsize(tfm
);
1324 if (unlikely(!req
->src
|| !req
->dst
|| !req
->cryptlen
)) {
1325 dev_err(SEC_CTX_DEV(ctx
), "aead input param error!\n");
1329 /* Support AES only */
1330 if (unlikely(c_alg
!= SEC_CALG_AES
)) {
1331 dev_err(SEC_CTX_DEV(ctx
), "aead crypto alg error!\n");
1335 if (sreq
->c_req
.encrypt
)
1336 sreq
->c_req
.c_len
= req
->cryptlen
;
1338 sreq
->c_req
.c_len
= req
->cryptlen
- authsize
;
1340 if (unlikely(sreq
->c_req
.c_len
& (AES_BLOCK_SIZE
- 1))) {
1341 dev_err(SEC_CTX_DEV(ctx
), "aead crypto length error!\n");
1348 static int sec_aead_crypto(struct aead_request
*a_req
, bool encrypt
)
1350 struct crypto_aead
*tfm
= crypto_aead_reqtfm(a_req
);
1351 struct sec_req
*req
= aead_request_ctx(a_req
);
1352 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1355 req
->aead_req
.aead_req
= a_req
;
1356 req
->c_req
.encrypt
= encrypt
;
1359 ret
= sec_aead_param_check(ctx
, req
);
1363 return ctx
->req_op
->process(ctx
, req
);
1366 static int sec_aead_encrypt(struct aead_request
*a_req
)
1368 return sec_aead_crypto(a_req
, true);
1371 static int sec_aead_decrypt(struct aead_request
*a_req
)
1373 return sec_aead_crypto(a_req
, false);
1376 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
1377 ctx_exit, blk_size, iv_size, max_authsize)\
1380 .cra_name = sec_cra_name,\
1381 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1382 .cra_priority = SEC_PRIORITY,\
1383 .cra_flags = CRYPTO_ALG_ASYNC,\
1384 .cra_blocksize = blk_size,\
1385 .cra_ctxsize = sizeof(struct sec_ctx),\
1386 .cra_module = THIS_MODULE,\
1390 .setkey = sec_set_key,\
1391 .decrypt = sec_aead_decrypt,\
1392 .encrypt = sec_aead_encrypt,\
1394 .maxauthsize = max_authsize,\
1397 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
1398 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
1399 sec_aead_ctx_exit, blksize, ivsize, authsize)
1401 static struct aead_alg sec_aeads
[] = {
1402 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
1403 sec_setkey_aes_cbc_sha1
, sec_aead_sha1_ctx_init
,
1404 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
, SHA1_DIGEST_SIZE
),
1406 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
1407 sec_setkey_aes_cbc_sha256
, sec_aead_sha256_ctx_init
,
1408 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
, SHA256_DIGEST_SIZE
),
1410 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
1411 sec_setkey_aes_cbc_sha512
, sec_aead_sha512_ctx_init
,
1412 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
, SHA512_DIGEST_SIZE
),
1415 int sec_register_to_crypto(void)
1419 /* To avoid repeat register */
1420 if (atomic_add_return(1, &sec_active_devs
) == 1) {
1421 ret
= crypto_register_skciphers(sec_skciphers
,
1422 ARRAY_SIZE(sec_skciphers
));
1426 ret
= crypto_register_aeads(sec_aeads
, ARRAY_SIZE(sec_aeads
));
1434 crypto_unregister_skciphers(sec_skciphers
, ARRAY_SIZE(sec_skciphers
));
1439 void sec_unregister_from_crypto(void)
1441 if (atomic_sub_return(1, &sec_active_devs
) == 0) {
1442 crypto_unregister_skciphers(sec_skciphers
,
1443 ARRAY_SIZE(sec_skciphers
));
1444 crypto_unregister_aeads(sec_aeads
, ARRAY_SIZE(sec_aeads
));