1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <crypto/aes.h>
5 #include <crypto/aead.h>
6 #include <crypto/algapi.h>
7 #include <crypto/authenc.h>
8 #include <crypto/des.h>
9 #include <crypto/hash.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/idr.h>
21 #include "sec_crypto.h"
23 #define SEC_PRIORITY 4001
24 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
25 #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
26 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
27 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
28 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
30 /* SEC sqe(bd) bit operational relative MACRO */
31 #define SEC_DE_OFFSET 1
32 #define SEC_CIPHER_OFFSET 4
33 #define SEC_SCENE_OFFSET 3
34 #define SEC_DST_SGL_OFFSET 2
35 #define SEC_SRC_SGL_OFFSET 7
36 #define SEC_CKEY_OFFSET 9
37 #define SEC_CMODE_OFFSET 12
38 #define SEC_AKEY_OFFSET 5
39 #define SEC_AEAD_ALG_OFFSET 11
40 #define SEC_AUTH_OFFSET 6
42 #define SEC_DE_OFFSET_V3 9
43 #define SEC_SCENE_OFFSET_V3 5
44 #define SEC_CKEY_OFFSET_V3 13
45 #define SEC_CTR_CNT_OFFSET 25
46 #define SEC_CTR_CNT_ROLLOVER 2
47 #define SEC_SRC_SGL_OFFSET_V3 11
48 #define SEC_DST_SGL_OFFSET_V3 14
49 #define SEC_CALG_OFFSET_V3 4
50 #define SEC_AKEY_OFFSET_V3 9
51 #define SEC_MAC_OFFSET_V3 4
52 #define SEC_AUTH_ALG_OFFSET_V3 15
53 #define SEC_CIPHER_AUTH_V3 0xbf
54 #define SEC_AUTH_CIPHER_V3 0x40
55 #define SEC_FLAG_OFFSET 7
56 #define SEC_FLAG_MASK 0x0780
57 #define SEC_TYPE_MASK 0x0F
58 #define SEC_DONE_MASK 0x0001
59 #define SEC_ICV_MASK 0x000E
60 #define SEC_SQE_LEN_RATE_MASK 0x3
62 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
63 #define SEC_SGL_SGE_NR 128
64 #define SEC_CIPHER_AUTH 0xfe
65 #define SEC_AUTH_CIPHER 0x1
66 #define SEC_MAX_MAC_LEN 64
67 #define SEC_MAX_AAD_LEN 65535
68 #define SEC_MAX_CCM_AAD_LEN 65279
69 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
71 #define SEC_PBUF_SZ 512
72 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
73 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
74 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
76 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
77 #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
78 #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
79 SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
80 #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
81 SEC_PBUF_LEFT_SZ(depth))
83 #define SEC_SQE_LEN_RATE 4
84 #define SEC_SQE_CFLAG 2
85 #define SEC_SQE_AEAD_FLAG 3
86 #define SEC_SQE_DONE 0x1
87 #define SEC_ICV_ERR 0x2
89 #define MAC_LEN_MASK 0x1U
90 #define MAX_INPUT_DATA_LEN 0xFFFE00
91 #define BITS_MASK 0xFF
93 #define SEC_XTS_NAME_SZ 0x3
94 #define IV_CM_CAL_NUM 2
95 #define IV_CL_MASK 0x7
99 #define IV_FLAGS_OFFSET 0x6
100 #define IV_CM_OFFSET 0x3
101 #define IV_LAST_BYTE1 1
102 #define IV_LAST_BYTE2 2
103 #define IV_LAST_BYTE_MASK 0xFF
104 #define IV_CTR_INIT 0x1
105 #define IV_BYTE_OFFSET 0x8
107 static DEFINE_MUTEX(sec_algs_lock
);
108 static unsigned int sec_available_devs
;
110 struct sec_skcipher
{
112 struct skcipher_alg alg
;
120 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
121 static inline u32
sec_alloc_queue_id(struct sec_ctx
*ctx
, struct sec_req
*req
)
123 if (req
->c_req
.encrypt
)
124 return (u32
)atomic_inc_return(&ctx
->enc_qcyclic
) %
127 return (u32
)atomic_inc_return(&ctx
->dec_qcyclic
) % ctx
->hlf_q_num
+
131 static inline void sec_free_queue_id(struct sec_ctx
*ctx
, struct sec_req
*req
)
133 if (req
->c_req
.encrypt
)
134 atomic_dec(&ctx
->enc_qcyclic
);
136 atomic_dec(&ctx
->dec_qcyclic
);
139 static int sec_alloc_req_id(struct sec_req
*req
, struct sec_qp_ctx
*qp_ctx
)
143 spin_lock_bh(&qp_ctx
->req_lock
);
144 req_id
= idr_alloc_cyclic(&qp_ctx
->req_idr
, NULL
, 0, qp_ctx
->qp
->sq_depth
, GFP_ATOMIC
);
145 spin_unlock_bh(&qp_ctx
->req_lock
);
146 if (unlikely(req_id
< 0)) {
147 dev_err(req
->ctx
->dev
, "alloc req id fail!\n");
151 req
->qp_ctx
= qp_ctx
;
152 qp_ctx
->req_list
[req_id
] = req
;
157 static void sec_free_req_id(struct sec_req
*req
)
159 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
160 int req_id
= req
->req_id
;
162 if (unlikely(req_id
< 0 || req_id
>= qp_ctx
->qp
->sq_depth
)) {
163 dev_err(req
->ctx
->dev
, "free request id invalid!\n");
167 qp_ctx
->req_list
[req_id
] = NULL
;
170 spin_lock_bh(&qp_ctx
->req_lock
);
171 idr_remove(&qp_ctx
->req_idr
, req_id
);
172 spin_unlock_bh(&qp_ctx
->req_lock
);
175 static u8
pre_parse_finished_bd(struct bd_status
*status
, void *resp
)
177 struct sec_sqe
*bd
= resp
;
179 status
->done
= le16_to_cpu(bd
->type2
.done_flag
) & SEC_DONE_MASK
;
180 status
->icv
= (le16_to_cpu(bd
->type2
.done_flag
) & SEC_ICV_MASK
) >> 1;
181 status
->flag
= (le16_to_cpu(bd
->type2
.done_flag
) &
182 SEC_FLAG_MASK
) >> SEC_FLAG_OFFSET
;
183 status
->tag
= le16_to_cpu(bd
->type2
.tag
);
184 status
->err_type
= bd
->type2
.error_type
;
186 return bd
->type_cipher_auth
& SEC_TYPE_MASK
;
189 static u8
pre_parse_finished_bd3(struct bd_status
*status
, void *resp
)
191 struct sec_sqe3
*bd3
= resp
;
193 status
->done
= le16_to_cpu(bd3
->done_flag
) & SEC_DONE_MASK
;
194 status
->icv
= (le16_to_cpu(bd3
->done_flag
) & SEC_ICV_MASK
) >> 1;
195 status
->flag
= (le16_to_cpu(bd3
->done_flag
) &
196 SEC_FLAG_MASK
) >> SEC_FLAG_OFFSET
;
197 status
->tag
= le64_to_cpu(bd3
->tag
);
198 status
->err_type
= bd3
->error_type
;
200 return le32_to_cpu(bd3
->bd_param
) & SEC_TYPE_MASK
;
203 static int sec_cb_status_check(struct sec_req
*req
,
204 struct bd_status
*status
)
206 struct sec_ctx
*ctx
= req
->ctx
;
208 if (unlikely(req
->err_type
|| status
->done
!= SEC_SQE_DONE
)) {
209 dev_err_ratelimited(ctx
->dev
, "err_type[%d], done[%u]\n",
210 req
->err_type
, status
->done
);
214 if (unlikely(ctx
->alg_type
== SEC_SKCIPHER
)) {
215 if (unlikely(status
->flag
!= SEC_SQE_CFLAG
)) {
216 dev_err_ratelimited(ctx
->dev
, "flag[%u]\n",
220 } else if (unlikely(ctx
->alg_type
== SEC_AEAD
)) {
221 if (unlikely(status
->flag
!= SEC_SQE_AEAD_FLAG
||
222 status
->icv
== SEC_ICV_ERR
)) {
223 dev_err_ratelimited(ctx
->dev
,
224 "flag[%u], icv[%u]\n",
225 status
->flag
, status
->icv
);
233 static void sec_req_cb(struct hisi_qp
*qp
, void *resp
)
235 struct sec_qp_ctx
*qp_ctx
= qp
->qp_ctx
;
236 struct sec_dfx
*dfx
= &qp_ctx
->ctx
->sec
->debug
.dfx
;
237 u8 type_supported
= qp_ctx
->ctx
->type_supported
;
238 struct bd_status status
;
244 if (type_supported
== SEC_BD_TYPE2
) {
245 type
= pre_parse_finished_bd(&status
, resp
);
246 req
= qp_ctx
->req_list
[status
.tag
];
248 type
= pre_parse_finished_bd3(&status
, resp
);
249 req
= (void *)(uintptr_t)status
.tag
;
252 if (unlikely(type
!= type_supported
)) {
253 atomic64_inc(&dfx
->err_bd_cnt
);
254 pr_err("err bd type [%u]\n", type
);
258 if (unlikely(!req
)) {
259 atomic64_inc(&dfx
->invalid_req_cnt
);
260 atomic_inc(&qp
->qp_status
.used
);
264 req
->err_type
= status
.err_type
;
266 err
= sec_cb_status_check(req
, &status
);
268 atomic64_inc(&dfx
->done_flag_cnt
);
270 atomic64_inc(&dfx
->recv_cnt
);
272 ctx
->req_op
->buf_unmap(ctx
, req
);
274 ctx
->req_op
->callback(ctx
, req
, err
);
277 static int sec_bd_send(struct sec_ctx
*ctx
, struct sec_req
*req
)
279 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
282 if (ctx
->fake_req_limit
<=
283 atomic_read(&qp_ctx
->qp
->qp_status
.used
) &&
284 !(req
->flag
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
287 spin_lock_bh(&qp_ctx
->req_lock
);
288 ret
= hisi_qp_send(qp_ctx
->qp
, &req
->sec_sqe
);
289 if (ctx
->fake_req_limit
<=
290 atomic_read(&qp_ctx
->qp
->qp_status
.used
) && !ret
) {
291 list_add_tail(&req
->backlog_head
, &qp_ctx
->backlog
);
292 atomic64_inc(&ctx
->sec
->debug
.dfx
.send_cnt
);
293 atomic64_inc(&ctx
->sec
->debug
.dfx
.send_busy_cnt
);
294 spin_unlock_bh(&qp_ctx
->req_lock
);
297 spin_unlock_bh(&qp_ctx
->req_lock
);
299 if (unlikely(ret
== -EBUSY
))
304 atomic64_inc(&ctx
->sec
->debug
.dfx
.send_cnt
);
310 /* Get DMA memory resources */
311 static int sec_alloc_civ_resource(struct device
*dev
, struct sec_alg_res
*res
)
313 u16 q_depth
= res
->depth
;
316 res
->c_ivin
= dma_alloc_coherent(dev
, SEC_TOTAL_IV_SZ(q_depth
),
317 &res
->c_ivin_dma
, GFP_KERNEL
);
321 for (i
= 1; i
< q_depth
; i
++) {
322 res
[i
].c_ivin_dma
= res
->c_ivin_dma
+ i
* SEC_IV_SIZE
;
323 res
[i
].c_ivin
= res
->c_ivin
+ i
* SEC_IV_SIZE
;
329 static void sec_free_civ_resource(struct device
*dev
, struct sec_alg_res
*res
)
332 dma_free_coherent(dev
, SEC_TOTAL_IV_SZ(res
->depth
),
333 res
->c_ivin
, res
->c_ivin_dma
);
336 static int sec_alloc_aiv_resource(struct device
*dev
, struct sec_alg_res
*res
)
338 u16 q_depth
= res
->depth
;
341 res
->a_ivin
= dma_alloc_coherent(dev
, SEC_TOTAL_IV_SZ(q_depth
),
342 &res
->a_ivin_dma
, GFP_KERNEL
);
346 for (i
= 1; i
< q_depth
; i
++) {
347 res
[i
].a_ivin_dma
= res
->a_ivin_dma
+ i
* SEC_IV_SIZE
;
348 res
[i
].a_ivin
= res
->a_ivin
+ i
* SEC_IV_SIZE
;
354 static void sec_free_aiv_resource(struct device
*dev
, struct sec_alg_res
*res
)
357 dma_free_coherent(dev
, SEC_TOTAL_IV_SZ(res
->depth
),
358 res
->a_ivin
, res
->a_ivin_dma
);
361 static int sec_alloc_mac_resource(struct device
*dev
, struct sec_alg_res
*res
)
363 u16 q_depth
= res
->depth
;
366 res
->out_mac
= dma_alloc_coherent(dev
, SEC_TOTAL_MAC_SZ(q_depth
) << 1,
367 &res
->out_mac_dma
, GFP_KERNEL
);
371 for (i
= 1; i
< q_depth
; i
++) {
372 res
[i
].out_mac_dma
= res
->out_mac_dma
+
373 i
* (SEC_MAX_MAC_LEN
<< 1);
374 res
[i
].out_mac
= res
->out_mac
+ i
* (SEC_MAX_MAC_LEN
<< 1);
380 static void sec_free_mac_resource(struct device
*dev
, struct sec_alg_res
*res
)
383 dma_free_coherent(dev
, SEC_TOTAL_MAC_SZ(res
->depth
) << 1,
384 res
->out_mac
, res
->out_mac_dma
);
387 static void sec_free_pbuf_resource(struct device
*dev
, struct sec_alg_res
*res
)
390 dma_free_coherent(dev
, SEC_TOTAL_PBUF_SZ(res
->depth
),
391 res
->pbuf
, res
->pbuf_dma
);
395 * To improve performance, pbuffer is used for
396 * small packets (< 512Bytes) as IOMMU translation using.
398 static int sec_alloc_pbuf_resource(struct device
*dev
, struct sec_alg_res
*res
)
400 u16 q_depth
= res
->depth
;
401 int size
= SEC_PBUF_PAGE_NUM(q_depth
);
402 int pbuf_page_offset
;
405 res
->pbuf
= dma_alloc_coherent(dev
, SEC_TOTAL_PBUF_SZ(q_depth
),
406 &res
->pbuf_dma
, GFP_KERNEL
);
411 * SEC_PBUF_PKG contains data pbuf, iv and
412 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
413 * Every PAGE contains six SEC_PBUF_PKG
414 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
415 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
416 * for the SEC_TOTAL_PBUF_SZ
418 for (i
= 0; i
<= size
; i
++) {
419 pbuf_page_offset
= PAGE_SIZE
* i
;
420 for (j
= 0; j
< SEC_PBUF_NUM
; j
++) {
421 k
= i
* SEC_PBUF_NUM
+ j
;
424 res
[k
].pbuf
= res
->pbuf
+
425 j
* SEC_PBUF_PKG
+ pbuf_page_offset
;
426 res
[k
].pbuf_dma
= res
->pbuf_dma
+
427 j
* SEC_PBUF_PKG
+ pbuf_page_offset
;
434 static int sec_alg_resource_alloc(struct sec_ctx
*ctx
,
435 struct sec_qp_ctx
*qp_ctx
)
437 struct sec_alg_res
*res
= qp_ctx
->res
;
438 struct device
*dev
= ctx
->dev
;
441 ret
= sec_alloc_civ_resource(dev
, res
);
445 if (ctx
->alg_type
== SEC_AEAD
) {
446 ret
= sec_alloc_aiv_resource(dev
, res
);
450 ret
= sec_alloc_mac_resource(dev
, res
);
454 if (ctx
->pbuf_supported
) {
455 ret
= sec_alloc_pbuf_resource(dev
, res
);
457 dev_err(dev
, "fail to alloc pbuf dma resource!\n");
458 goto alloc_pbuf_fail
;
465 if (ctx
->alg_type
== SEC_AEAD
)
466 sec_free_mac_resource(dev
, qp_ctx
->res
);
468 if (ctx
->alg_type
== SEC_AEAD
)
469 sec_free_aiv_resource(dev
, res
);
471 sec_free_civ_resource(dev
, res
);
475 static void sec_alg_resource_free(struct sec_ctx
*ctx
,
476 struct sec_qp_ctx
*qp_ctx
)
478 struct device
*dev
= ctx
->dev
;
480 sec_free_civ_resource(dev
, qp_ctx
->res
);
482 if (ctx
->pbuf_supported
)
483 sec_free_pbuf_resource(dev
, qp_ctx
->res
);
484 if (ctx
->alg_type
== SEC_AEAD
) {
485 sec_free_mac_resource(dev
, qp_ctx
->res
);
486 sec_free_aiv_resource(dev
, qp_ctx
->res
);
490 static int sec_alloc_qp_ctx_resource(struct sec_ctx
*ctx
, struct sec_qp_ctx
*qp_ctx
)
492 u16 q_depth
= qp_ctx
->qp
->sq_depth
;
493 struct device
*dev
= ctx
->dev
;
496 qp_ctx
->req_list
= kcalloc(q_depth
, sizeof(struct sec_req
*), GFP_KERNEL
);
497 if (!qp_ctx
->req_list
)
500 qp_ctx
->res
= kcalloc(q_depth
, sizeof(struct sec_alg_res
), GFP_KERNEL
);
502 goto err_free_req_list
;
503 qp_ctx
->res
->depth
= q_depth
;
505 qp_ctx
->c_in_pool
= hisi_acc_create_sgl_pool(dev
, q_depth
, SEC_SGL_SGE_NR
);
506 if (IS_ERR(qp_ctx
->c_in_pool
)) {
507 dev_err(dev
, "fail to create sgl pool for input!\n");
511 qp_ctx
->c_out_pool
= hisi_acc_create_sgl_pool(dev
, q_depth
, SEC_SGL_SGE_NR
);
512 if (IS_ERR(qp_ctx
->c_out_pool
)) {
513 dev_err(dev
, "fail to create sgl pool for output!\n");
514 goto err_free_c_in_pool
;
517 ret
= sec_alg_resource_alloc(ctx
, qp_ctx
);
519 goto err_free_c_out_pool
;
524 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_out_pool
);
526 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_in_pool
);
530 kfree(qp_ctx
->req_list
);
534 static void sec_free_qp_ctx_resource(struct sec_ctx
*ctx
, struct sec_qp_ctx
*qp_ctx
)
536 struct device
*dev
= ctx
->dev
;
538 sec_alg_resource_free(ctx
, qp_ctx
);
539 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_out_pool
);
540 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_in_pool
);
542 kfree(qp_ctx
->req_list
);
545 static int sec_create_qp_ctx(struct sec_ctx
*ctx
, int qp_ctx_id
)
547 struct sec_qp_ctx
*qp_ctx
;
551 qp_ctx
= &ctx
->qp_ctx
[qp_ctx_id
];
552 qp
= ctx
->qps
[qp_ctx_id
];
558 qp
->req_cb
= sec_req_cb
;
560 spin_lock_init(&qp_ctx
->req_lock
);
561 idr_init(&qp_ctx
->req_idr
);
562 INIT_LIST_HEAD(&qp_ctx
->backlog
);
564 ret
= sec_alloc_qp_ctx_resource(ctx
, qp_ctx
);
566 goto err_destroy_idr
;
568 ret
= hisi_qm_start_qp(qp
, 0);
570 goto err_resource_free
;
575 sec_free_qp_ctx_resource(ctx
, qp_ctx
);
577 idr_destroy(&qp_ctx
->req_idr
);
581 static void sec_release_qp_ctx(struct sec_ctx
*ctx
,
582 struct sec_qp_ctx
*qp_ctx
)
584 hisi_qm_stop_qp(qp_ctx
->qp
);
585 sec_free_qp_ctx_resource(ctx
, qp_ctx
);
586 idr_destroy(&qp_ctx
->req_idr
);
589 static int sec_ctx_base_init(struct sec_ctx
*ctx
)
594 ctx
->qps
= sec_create_qps();
596 pr_err("Can not create sec qps!\n");
600 sec
= container_of(ctx
->qps
[0]->qm
, struct sec_dev
, qm
);
602 ctx
->dev
= &sec
->qm
.pdev
->dev
;
603 ctx
->hlf_q_num
= sec
->ctx_q_num
>> 1;
605 ctx
->pbuf_supported
= ctx
->sec
->iommu_used
;
607 /* Half of queue depth is taken as fake requests limit in the queue. */
608 ctx
->fake_req_limit
= ctx
->qps
[0]->sq_depth
>> 1;
609 ctx
->qp_ctx
= kcalloc(sec
->ctx_q_num
, sizeof(struct sec_qp_ctx
),
613 goto err_destroy_qps
;
616 for (i
= 0; i
< sec
->ctx_q_num
; i
++) {
617 ret
= sec_create_qp_ctx(ctx
, i
);
619 goto err_sec_release_qp_ctx
;
624 err_sec_release_qp_ctx
:
625 for (i
= i
- 1; i
>= 0; i
--)
626 sec_release_qp_ctx(ctx
, &ctx
->qp_ctx
[i
]);
629 sec_destroy_qps(ctx
->qps
, sec
->ctx_q_num
);
633 static void sec_ctx_base_uninit(struct sec_ctx
*ctx
)
637 for (i
= 0; i
< ctx
->sec
->ctx_q_num
; i
++)
638 sec_release_qp_ctx(ctx
, &ctx
->qp_ctx
[i
]);
640 sec_destroy_qps(ctx
->qps
, ctx
->sec
->ctx_q_num
);
644 static int sec_cipher_init(struct sec_ctx
*ctx
)
646 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
648 c_ctx
->c_key
= dma_alloc_coherent(ctx
->dev
, SEC_MAX_KEY_SIZE
,
649 &c_ctx
->c_key_dma
, GFP_KERNEL
);
656 static void sec_cipher_uninit(struct sec_ctx
*ctx
)
658 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
660 memzero_explicit(c_ctx
->c_key
, SEC_MAX_KEY_SIZE
);
661 dma_free_coherent(ctx
->dev
, SEC_MAX_KEY_SIZE
,
662 c_ctx
->c_key
, c_ctx
->c_key_dma
);
665 static int sec_auth_init(struct sec_ctx
*ctx
)
667 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
669 a_ctx
->a_key
= dma_alloc_coherent(ctx
->dev
, SEC_MAX_AKEY_SIZE
,
670 &a_ctx
->a_key_dma
, GFP_KERNEL
);
677 static void sec_auth_uninit(struct sec_ctx
*ctx
)
679 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
681 memzero_explicit(a_ctx
->a_key
, SEC_MAX_AKEY_SIZE
);
682 dma_free_coherent(ctx
->dev
, SEC_MAX_AKEY_SIZE
,
683 a_ctx
->a_key
, a_ctx
->a_key_dma
);
686 static int sec_skcipher_fbtfm_init(struct crypto_skcipher
*tfm
)
688 const char *alg
= crypto_tfm_alg_name(&tfm
->base
);
689 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
690 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
692 c_ctx
->fallback
= false;
694 /* Currently, only XTS mode need fallback tfm when using 192bit key */
695 if (likely(strncmp(alg
, "xts", SEC_XTS_NAME_SZ
)))
698 c_ctx
->fbtfm
= crypto_alloc_sync_skcipher(alg
, 0,
699 CRYPTO_ALG_NEED_FALLBACK
);
700 if (IS_ERR(c_ctx
->fbtfm
)) {
701 pr_err("failed to alloc xts mode fallback tfm!\n");
702 return PTR_ERR(c_ctx
->fbtfm
);
708 static int sec_skcipher_init(struct crypto_skcipher
*tfm
)
710 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
713 ctx
->alg_type
= SEC_SKCIPHER
;
714 crypto_skcipher_set_reqsize(tfm
, sizeof(struct sec_req
));
715 ctx
->c_ctx
.ivsize
= crypto_skcipher_ivsize(tfm
);
716 if (ctx
->c_ctx
.ivsize
> SEC_IV_SIZE
) {
717 pr_err("get error skcipher iv size!\n");
721 ret
= sec_ctx_base_init(ctx
);
725 ret
= sec_cipher_init(ctx
);
727 goto err_cipher_init
;
729 ret
= sec_skcipher_fbtfm_init(tfm
);
736 sec_cipher_uninit(ctx
);
738 sec_ctx_base_uninit(ctx
);
742 static void sec_skcipher_uninit(struct crypto_skcipher
*tfm
)
744 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
746 if (ctx
->c_ctx
.fbtfm
)
747 crypto_free_sync_skcipher(ctx
->c_ctx
.fbtfm
);
749 sec_cipher_uninit(ctx
);
750 sec_ctx_base_uninit(ctx
);
753 static int sec_skcipher_3des_setkey(struct crypto_skcipher
*tfm
, const u8
*key
, const u32 keylen
)
755 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
756 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
759 ret
= verify_skcipher_des3_key(tfm
, key
);
764 case SEC_DES3_2KEY_SIZE
:
765 c_ctx
->c_key_len
= SEC_CKEY_3DES_2KEY
;
767 case SEC_DES3_3KEY_SIZE
:
768 c_ctx
->c_key_len
= SEC_CKEY_3DES_3KEY
;
777 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx
*c_ctx
,
779 const enum sec_cmode c_mode
)
781 if (c_mode
== SEC_CMODE_XTS
) {
783 case SEC_XTS_MIN_KEY_SIZE
:
784 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
786 case SEC_XTS_MID_KEY_SIZE
:
787 c_ctx
->fallback
= true;
789 case SEC_XTS_MAX_KEY_SIZE
:
790 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
793 pr_err("hisi_sec2: xts mode key error!\n");
797 if (c_ctx
->c_alg
== SEC_CALG_SM4
&&
798 keylen
!= AES_KEYSIZE_128
) {
799 pr_err("hisi_sec2: sm4 key error!\n");
803 case AES_KEYSIZE_128
:
804 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
806 case AES_KEYSIZE_192
:
807 c_ctx
->c_key_len
= SEC_CKEY_192BIT
;
809 case AES_KEYSIZE_256
:
810 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
813 pr_err("hisi_sec2: aes key error!\n");
822 static int sec_skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
823 const u32 keylen
, const enum sec_calg c_alg
,
824 const enum sec_cmode c_mode
)
826 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
827 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
828 struct device
*dev
= ctx
->dev
;
831 if (c_mode
== SEC_CMODE_XTS
) {
832 ret
= xts_verify_key(tfm
, key
, keylen
);
834 dev_err(dev
, "xts mode key err!\n");
839 c_ctx
->c_alg
= c_alg
;
840 c_ctx
->c_mode
= c_mode
;
844 ret
= sec_skcipher_3des_setkey(tfm
, key
, keylen
);
848 ret
= sec_skcipher_aes_sm4_setkey(c_ctx
, keylen
, c_mode
);
851 dev_err(dev
, "sec c_alg err!\n");
856 dev_err(dev
, "set sec key err!\n");
860 memcpy(c_ctx
->c_key
, key
, keylen
);
861 if (c_ctx
->fallback
&& c_ctx
->fbtfm
) {
862 ret
= crypto_sync_skcipher_setkey(c_ctx
->fbtfm
, key
, keylen
);
864 dev_err(dev
, "failed to set fallback skcipher key!\n");
871 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
872 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
875 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
878 GEN_SEC_SETKEY_FUNC(aes_ecb
, SEC_CALG_AES
, SEC_CMODE_ECB
)
879 GEN_SEC_SETKEY_FUNC(aes_cbc
, SEC_CALG_AES
, SEC_CMODE_CBC
)
880 GEN_SEC_SETKEY_FUNC(aes_xts
, SEC_CALG_AES
, SEC_CMODE_XTS
)
881 GEN_SEC_SETKEY_FUNC(aes_ctr
, SEC_CALG_AES
, SEC_CMODE_CTR
)
882 GEN_SEC_SETKEY_FUNC(3des_ecb
, SEC_CALG_3DES
, SEC_CMODE_ECB
)
883 GEN_SEC_SETKEY_FUNC(3des_cbc
, SEC_CALG_3DES
, SEC_CMODE_CBC
)
884 GEN_SEC_SETKEY_FUNC(sm4_xts
, SEC_CALG_SM4
, SEC_CMODE_XTS
)
885 GEN_SEC_SETKEY_FUNC(sm4_cbc
, SEC_CALG_SM4
, SEC_CMODE_CBC
)
886 GEN_SEC_SETKEY_FUNC(sm4_ctr
, SEC_CALG_SM4
, SEC_CMODE_CTR
)
888 static int sec_cipher_pbuf_map(struct sec_ctx
*ctx
, struct sec_req
*req
,
889 struct scatterlist
*src
)
891 struct sec_aead_req
*a_req
= &req
->aead_req
;
892 struct aead_request
*aead_req
= a_req
->aead_req
;
893 struct sec_cipher_req
*c_req
= &req
->c_req
;
894 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
895 struct device
*dev
= ctx
->dev
;
896 int copy_size
, pbuf_length
;
897 int req_id
= req
->req_id
;
898 struct crypto_aead
*tfm
;
902 if (ctx
->alg_type
== SEC_AEAD
)
903 copy_size
= aead_req
->cryptlen
+ aead_req
->assoclen
;
905 copy_size
= c_req
->c_len
;
907 pbuf_length
= sg_copy_to_buffer(src
, sg_nents(src
),
908 qp_ctx
->res
[req_id
].pbuf
, copy_size
);
909 if (unlikely(pbuf_length
!= copy_size
)) {
910 dev_err(dev
, "copy src data to pbuf error!\n");
913 if (!c_req
->encrypt
&& ctx
->alg_type
== SEC_AEAD
) {
914 tfm
= crypto_aead_reqtfm(aead_req
);
915 authsize
= crypto_aead_authsize(tfm
);
916 mac_offset
= qp_ctx
->res
[req_id
].pbuf
+ copy_size
- authsize
;
917 memcpy(a_req
->out_mac
, mac_offset
, authsize
);
920 req
->in_dma
= qp_ctx
->res
[req_id
].pbuf_dma
;
921 c_req
->c_out_dma
= req
->in_dma
;
926 static void sec_cipher_pbuf_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
,
927 struct scatterlist
*dst
)
929 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
930 struct sec_cipher_req
*c_req
= &req
->c_req
;
931 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
932 int copy_size
, pbuf_length
;
933 int req_id
= req
->req_id
;
935 if (ctx
->alg_type
== SEC_AEAD
)
936 copy_size
= c_req
->c_len
+ aead_req
->assoclen
;
938 copy_size
= c_req
->c_len
;
940 pbuf_length
= sg_copy_from_buffer(dst
, sg_nents(dst
),
941 qp_ctx
->res
[req_id
].pbuf
, copy_size
);
942 if (unlikely(pbuf_length
!= copy_size
))
943 dev_err(ctx
->dev
, "copy pbuf data to dst error!\n");
946 static int sec_aead_mac_init(struct sec_aead_req
*req
)
948 struct aead_request
*aead_req
= req
->aead_req
;
949 struct crypto_aead
*tfm
= crypto_aead_reqtfm(aead_req
);
950 size_t authsize
= crypto_aead_authsize(tfm
);
951 u8
*mac_out
= req
->out_mac
;
952 struct scatterlist
*sgl
= aead_req
->src
;
957 skip_size
= aead_req
->assoclen
+ aead_req
->cryptlen
- authsize
;
958 copy_size
= sg_pcopy_to_buffer(sgl
, sg_nents(sgl
), mac_out
,
959 authsize
, skip_size
);
960 if (unlikely(copy_size
!= authsize
))
966 static int sec_cipher_map(struct sec_ctx
*ctx
, struct sec_req
*req
,
967 struct scatterlist
*src
, struct scatterlist
*dst
)
969 struct sec_cipher_req
*c_req
= &req
->c_req
;
970 struct sec_aead_req
*a_req
= &req
->aead_req
;
971 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
972 struct sec_alg_res
*res
= &qp_ctx
->res
[req
->req_id
];
973 struct device
*dev
= ctx
->dev
;
977 c_req
->c_ivin
= res
->pbuf
+ SEC_PBUF_IV_OFFSET
;
978 c_req
->c_ivin_dma
= res
->pbuf_dma
+ SEC_PBUF_IV_OFFSET
;
979 if (ctx
->alg_type
== SEC_AEAD
) {
980 a_req
->a_ivin
= res
->a_ivin
;
981 a_req
->a_ivin_dma
= res
->a_ivin_dma
;
982 a_req
->out_mac
= res
->pbuf
+ SEC_PBUF_MAC_OFFSET
;
983 a_req
->out_mac_dma
= res
->pbuf_dma
+
986 ret
= sec_cipher_pbuf_map(ctx
, req
, src
);
990 c_req
->c_ivin
= res
->c_ivin
;
991 c_req
->c_ivin_dma
= res
->c_ivin_dma
;
992 if (ctx
->alg_type
== SEC_AEAD
) {
993 a_req
->a_ivin
= res
->a_ivin
;
994 a_req
->a_ivin_dma
= res
->a_ivin_dma
;
995 a_req
->out_mac
= res
->out_mac
;
996 a_req
->out_mac_dma
= res
->out_mac_dma
;
999 req
->in
= hisi_acc_sg_buf_map_to_hw_sgl(dev
, src
,
1003 if (IS_ERR(req
->in
)) {
1004 dev_err(dev
, "fail to dma map input sgl buffers!\n");
1005 return PTR_ERR(req
->in
);
1008 if (!c_req
->encrypt
&& ctx
->alg_type
== SEC_AEAD
) {
1009 ret
= sec_aead_mac_init(a_req
);
1010 if (unlikely(ret
)) {
1011 dev_err(dev
, "fail to init mac data for ICV!\n");
1012 hisi_acc_sg_buf_unmap(dev
, src
, req
->in
);
1018 c_req
->c_out
= req
->in
;
1019 c_req
->c_out_dma
= req
->in_dma
;
1021 c_req
->c_out
= hisi_acc_sg_buf_map_to_hw_sgl(dev
, dst
,
1026 if (IS_ERR(c_req
->c_out
)) {
1027 dev_err(dev
, "fail to dma map output sgl buffers!\n");
1028 hisi_acc_sg_buf_unmap(dev
, src
, req
->in
);
1029 return PTR_ERR(c_req
->c_out
);
1036 static void sec_cipher_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
,
1037 struct scatterlist
*src
, struct scatterlist
*dst
)
1039 struct sec_cipher_req
*c_req
= &req
->c_req
;
1040 struct device
*dev
= ctx
->dev
;
1042 if (req
->use_pbuf
) {
1043 sec_cipher_pbuf_unmap(ctx
, req
, dst
);
1046 hisi_acc_sg_buf_unmap(dev
, src
, req
->in
);
1048 hisi_acc_sg_buf_unmap(dev
, dst
, c_req
->c_out
);
1052 static int sec_skcipher_sgl_map(struct sec_ctx
*ctx
, struct sec_req
*req
)
1054 struct skcipher_request
*sq
= req
->c_req
.sk_req
;
1056 return sec_cipher_map(ctx
, req
, sq
->src
, sq
->dst
);
1059 static void sec_skcipher_sgl_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
)
1061 struct skcipher_request
*sq
= req
->c_req
.sk_req
;
1063 sec_cipher_unmap(ctx
, req
, sq
->src
, sq
->dst
);
1066 static int sec_aead_aes_set_key(struct sec_cipher_ctx
*c_ctx
,
1067 struct crypto_authenc_keys
*keys
)
1069 switch (keys
->enckeylen
) {
1070 case AES_KEYSIZE_128
:
1071 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
1073 case AES_KEYSIZE_192
:
1074 c_ctx
->c_key_len
= SEC_CKEY_192BIT
;
1076 case AES_KEYSIZE_256
:
1077 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
1080 pr_err("hisi_sec2: aead aes key error!\n");
1083 memcpy(c_ctx
->c_key
, keys
->enckey
, keys
->enckeylen
);
1088 static int sec_aead_auth_set_key(struct sec_auth_ctx
*ctx
,
1089 struct crypto_authenc_keys
*keys
)
1091 struct crypto_shash
*hash_tfm
= ctx
->hash_tfm
;
1092 int blocksize
, digestsize
, ret
;
1094 if (!keys
->authkeylen
) {
1095 pr_err("hisi_sec2: aead auth key error!\n");
1099 blocksize
= crypto_shash_blocksize(hash_tfm
);
1100 digestsize
= crypto_shash_digestsize(hash_tfm
);
1101 if (keys
->authkeylen
> blocksize
) {
1102 ret
= crypto_shash_tfm_digest(hash_tfm
, keys
->authkey
,
1103 keys
->authkeylen
, ctx
->a_key
);
1105 pr_err("hisi_sec2: aead auth digest error!\n");
1108 ctx
->a_key_len
= digestsize
;
1110 memcpy(ctx
->a_key
, keys
->authkey
, keys
->authkeylen
);
1111 ctx
->a_key_len
= keys
->authkeylen
;
1117 static int sec_aead_setauthsize(struct crypto_aead
*aead
, unsigned int authsize
)
1119 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead
);
1120 struct sec_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1121 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
1123 if (unlikely(a_ctx
->fallback_aead_tfm
))
1124 return crypto_aead_setauthsize(a_ctx
->fallback_aead_tfm
, authsize
);
1129 static int sec_aead_fallback_setkey(struct sec_auth_ctx
*a_ctx
,
1130 struct crypto_aead
*tfm
, const u8
*key
,
1131 unsigned int keylen
)
1133 crypto_aead_clear_flags(a_ctx
->fallback_aead_tfm
, CRYPTO_TFM_REQ_MASK
);
1134 crypto_aead_set_flags(a_ctx
->fallback_aead_tfm
,
1135 crypto_aead_get_flags(tfm
) & CRYPTO_TFM_REQ_MASK
);
1136 return crypto_aead_setkey(a_ctx
->fallback_aead_tfm
, key
, keylen
);
1139 static int sec_aead_setkey(struct crypto_aead
*tfm
, const u8
*key
,
1140 const u32 keylen
, const enum sec_hash_alg a_alg
,
1141 const enum sec_calg c_alg
,
1142 const enum sec_mac_len mac_len
,
1143 const enum sec_cmode c_mode
)
1145 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1146 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
1147 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
1148 struct device
*dev
= ctx
->dev
;
1149 struct crypto_authenc_keys keys
;
1152 ctx
->a_ctx
.a_alg
= a_alg
;
1153 ctx
->c_ctx
.c_alg
= c_alg
;
1154 ctx
->a_ctx
.mac_len
= mac_len
;
1155 c_ctx
->c_mode
= c_mode
;
1157 if (c_mode
== SEC_CMODE_CCM
|| c_mode
== SEC_CMODE_GCM
) {
1158 ret
= sec_skcipher_aes_sm4_setkey(c_ctx
, keylen
, c_mode
);
1160 dev_err(dev
, "set sec aes ccm cipher key err!\n");
1163 memcpy(c_ctx
->c_key
, key
, keylen
);
1165 if (unlikely(a_ctx
->fallback_aead_tfm
)) {
1166 ret
= sec_aead_fallback_setkey(a_ctx
, tfm
, key
, keylen
);
1174 ret
= crypto_authenc_extractkeys(&keys
, key
, keylen
);
1178 ret
= sec_aead_aes_set_key(c_ctx
, &keys
);
1180 dev_err(dev
, "set sec cipher key err!\n");
1184 ret
= sec_aead_auth_set_key(&ctx
->a_ctx
, &keys
);
1186 dev_err(dev
, "set sec auth key err!\n");
1190 if ((ctx
->a_ctx
.mac_len
& SEC_SQE_LEN_RATE_MASK
) ||
1191 (ctx
->a_ctx
.a_key_len
& SEC_SQE_LEN_RATE_MASK
)) {
1193 dev_err(dev
, "MAC or AUTH key length error!\n");
1200 memzero_explicit(&keys
, sizeof(struct crypto_authenc_keys
));
1205 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
1206 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
1209 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
1212 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1
, SEC_A_HMAC_SHA1
,
1213 SEC_CALG_AES
, SEC_HMAC_SHA1_MAC
, SEC_CMODE_CBC
)
1214 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256
, SEC_A_HMAC_SHA256
,
1215 SEC_CALG_AES
, SEC_HMAC_SHA256_MAC
, SEC_CMODE_CBC
)
1216 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512
, SEC_A_HMAC_SHA512
,
1217 SEC_CALG_AES
, SEC_HMAC_SHA512_MAC
, SEC_CMODE_CBC
)
1218 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm
, 0, SEC_CALG_AES
,
1219 SEC_HMAC_CCM_MAC
, SEC_CMODE_CCM
)
1220 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm
, 0, SEC_CALG_AES
,
1221 SEC_HMAC_GCM_MAC
, SEC_CMODE_GCM
)
1222 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm
, 0, SEC_CALG_SM4
,
1223 SEC_HMAC_CCM_MAC
, SEC_CMODE_CCM
)
1224 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm
, 0, SEC_CALG_SM4
,
1225 SEC_HMAC_GCM_MAC
, SEC_CMODE_GCM
)
1227 static int sec_aead_sgl_map(struct sec_ctx
*ctx
, struct sec_req
*req
)
1229 struct aead_request
*aq
= req
->aead_req
.aead_req
;
1231 return sec_cipher_map(ctx
, req
, aq
->src
, aq
->dst
);
1234 static void sec_aead_sgl_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
)
1236 struct aead_request
*aq
= req
->aead_req
.aead_req
;
1238 sec_cipher_unmap(ctx
, req
, aq
->src
, aq
->dst
);
1241 static int sec_request_transfer(struct sec_ctx
*ctx
, struct sec_req
*req
)
1245 ret
= ctx
->req_op
->buf_map(ctx
, req
);
1249 ctx
->req_op
->do_transfer(ctx
, req
);
1251 ret
= ctx
->req_op
->bd_fill(ctx
, req
);
1258 ctx
->req_op
->buf_unmap(ctx
, req
);
1262 static void sec_request_untransfer(struct sec_ctx
*ctx
, struct sec_req
*req
)
1264 ctx
->req_op
->buf_unmap(ctx
, req
);
1267 static void sec_skcipher_copy_iv(struct sec_ctx
*ctx
, struct sec_req
*req
)
1269 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
1270 struct sec_cipher_req
*c_req
= &req
->c_req
;
1272 memcpy(c_req
->c_ivin
, sk_req
->iv
, ctx
->c_ctx
.ivsize
);
1275 static int sec_skcipher_bd_fill(struct sec_ctx
*ctx
, struct sec_req
*req
)
1277 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
1278 struct sec_cipher_req
*c_req
= &req
->c_req
;
1279 struct sec_sqe
*sec_sqe
= &req
->sec_sqe
;
1280 u8 scene
, sa_type
, da_type
;
1284 memset(sec_sqe
, 0, sizeof(struct sec_sqe
));
1286 sec_sqe
->type2
.c_key_addr
= cpu_to_le64(c_ctx
->c_key_dma
);
1287 sec_sqe
->type2
.c_ivin_addr
= cpu_to_le64(c_req
->c_ivin_dma
);
1288 sec_sqe
->type2
.data_src_addr
= cpu_to_le64(req
->in_dma
);
1289 sec_sqe
->type2
.data_dst_addr
= cpu_to_le64(c_req
->c_out_dma
);
1291 sec_sqe
->type2
.icvw_kmode
|= cpu_to_le16(((u16
)c_ctx
->c_mode
) <<
1293 sec_sqe
->type2
.c_alg
= c_ctx
->c_alg
;
1294 sec_sqe
->type2
.icvw_kmode
|= cpu_to_le16(((u16
)c_ctx
->c_key_len
) <<
1297 bd_type
= SEC_BD_TYPE2
;
1299 cipher
= SEC_CIPHER_ENC
<< SEC_CIPHER_OFFSET
;
1301 cipher
= SEC_CIPHER_DEC
<< SEC_CIPHER_OFFSET
;
1302 sec_sqe
->type_cipher_auth
= bd_type
| cipher
;
1304 /* Set destination and source address type */
1305 if (req
->use_pbuf
) {
1306 sa_type
= SEC_PBUF
<< SEC_SRC_SGL_OFFSET
;
1307 da_type
= SEC_PBUF
<< SEC_DST_SGL_OFFSET
;
1309 sa_type
= SEC_SGL
<< SEC_SRC_SGL_OFFSET
;
1310 da_type
= SEC_SGL
<< SEC_DST_SGL_OFFSET
;
1313 sec_sqe
->sdm_addr_type
|= da_type
;
1314 scene
= SEC_COMM_SCENE
<< SEC_SCENE_OFFSET
;
1315 if (req
->in_dma
!= c_req
->c_out_dma
)
1316 de
= 0x1 << SEC_DE_OFFSET
;
1318 sec_sqe
->sds_sa_type
= (de
| scene
| sa_type
);
1320 sec_sqe
->type2
.clen_ivhlen
|= cpu_to_le32(c_req
->c_len
);
1321 sec_sqe
->type2
.tag
= cpu_to_le16((u16
)req
->req_id
);
1326 static int sec_skcipher_bd_fill_v3(struct sec_ctx
*ctx
, struct sec_req
*req
)
1328 struct sec_sqe3
*sec_sqe3
= &req
->sec_sqe3
;
1329 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
1330 struct sec_cipher_req
*c_req
= &req
->c_req
;
1334 memset(sec_sqe3
, 0, sizeof(struct sec_sqe3
));
1336 sec_sqe3
->c_key_addr
= cpu_to_le64(c_ctx
->c_key_dma
);
1337 sec_sqe3
->no_scene
.c_ivin_addr
= cpu_to_le64(c_req
->c_ivin_dma
);
1338 sec_sqe3
->data_src_addr
= cpu_to_le64(req
->in_dma
);
1339 sec_sqe3
->data_dst_addr
= cpu_to_le64(c_req
->c_out_dma
);
1341 sec_sqe3
->c_mode_alg
= ((u8
)c_ctx
->c_alg
<< SEC_CALG_OFFSET_V3
) |
1343 sec_sqe3
->c_icv_key
|= cpu_to_le16(((u16
)c_ctx
->c_key_len
) <<
1344 SEC_CKEY_OFFSET_V3
);
1347 cipher
= SEC_CIPHER_ENC
;
1349 cipher
= SEC_CIPHER_DEC
;
1350 sec_sqe3
->c_icv_key
|= cpu_to_le16(cipher
);
1352 /* Set the CTR counter mode is 128bit rollover */
1353 sec_sqe3
->auth_mac_key
= cpu_to_le32((u32
)SEC_CTR_CNT_ROLLOVER
<<
1354 SEC_CTR_CNT_OFFSET
);
1356 if (req
->use_pbuf
) {
1357 bd_param
|= SEC_PBUF
<< SEC_SRC_SGL_OFFSET_V3
;
1358 bd_param
|= SEC_PBUF
<< SEC_DST_SGL_OFFSET_V3
;
1360 bd_param
|= SEC_SGL
<< SEC_SRC_SGL_OFFSET_V3
;
1361 bd_param
|= SEC_SGL
<< SEC_DST_SGL_OFFSET_V3
;
1364 bd_param
|= SEC_COMM_SCENE
<< SEC_SCENE_OFFSET_V3
;
1365 if (req
->in_dma
!= c_req
->c_out_dma
)
1366 bd_param
|= 0x1 << SEC_DE_OFFSET_V3
;
1368 bd_param
|= SEC_BD_TYPE3
;
1369 sec_sqe3
->bd_param
= cpu_to_le32(bd_param
);
1371 sec_sqe3
->c_len_ivin
|= cpu_to_le32(c_req
->c_len
);
1372 sec_sqe3
->tag
= cpu_to_le64((unsigned long)req
);
1377 /* increment counter (128-bit int) */
1378 static void ctr_iv_inc(__u8
*counter
, __u8 bits
, __u32 nums
)
1382 nums
+= counter
[bits
];
1383 counter
[bits
] = nums
& BITS_MASK
;
1385 } while (bits
&& nums
);
1388 static void sec_update_iv(struct sec_req
*req
, enum sec_alg_type alg_type
)
1390 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
1391 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
1392 u32 iv_size
= req
->ctx
->c_ctx
.ivsize
;
1393 struct scatterlist
*sgl
;
1394 unsigned int cryptlen
;
1398 if (req
->c_req
.encrypt
)
1399 sgl
= alg_type
== SEC_SKCIPHER
? sk_req
->dst
: aead_req
->dst
;
1401 sgl
= alg_type
== SEC_SKCIPHER
? sk_req
->src
: aead_req
->src
;
1403 if (alg_type
== SEC_SKCIPHER
) {
1405 cryptlen
= sk_req
->cryptlen
;
1408 cryptlen
= aead_req
->cryptlen
;
1411 if (req
->ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
) {
1412 sz
= sg_pcopy_to_buffer(sgl
, sg_nents(sgl
), iv
, iv_size
,
1413 cryptlen
- iv_size
);
1414 if (unlikely(sz
!= iv_size
))
1415 dev_err(req
->ctx
->dev
, "copy output iv error!\n");
1417 sz
= cryptlen
/ iv_size
;
1418 if (cryptlen
% iv_size
)
1420 ctr_iv_inc(iv
, iv_size
, sz
);
1424 static struct sec_req
*sec_back_req_clear(struct sec_ctx
*ctx
,
1425 struct sec_qp_ctx
*qp_ctx
)
1427 struct sec_req
*backlog_req
= NULL
;
1429 spin_lock_bh(&qp_ctx
->req_lock
);
1430 if (ctx
->fake_req_limit
>=
1431 atomic_read(&qp_ctx
->qp
->qp_status
.used
) &&
1432 !list_empty(&qp_ctx
->backlog
)) {
1433 backlog_req
= list_first_entry(&qp_ctx
->backlog
,
1434 typeof(*backlog_req
), backlog_head
);
1435 list_del(&backlog_req
->backlog_head
);
1437 spin_unlock_bh(&qp_ctx
->req_lock
);
1442 static void sec_skcipher_callback(struct sec_ctx
*ctx
, struct sec_req
*req
,
1445 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
1446 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
1447 struct skcipher_request
*backlog_sk_req
;
1448 struct sec_req
*backlog_req
;
1450 sec_free_req_id(req
);
1452 /* IV output at encrypto of CBC/CTR mode */
1453 if (!err
&& (ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
||
1454 ctx
->c_ctx
.c_mode
== SEC_CMODE_CTR
) && req
->c_req
.encrypt
)
1455 sec_update_iv(req
, SEC_SKCIPHER
);
1458 backlog_req
= sec_back_req_clear(ctx
, qp_ctx
);
1462 backlog_sk_req
= backlog_req
->c_req
.sk_req
;
1463 skcipher_request_complete(backlog_sk_req
, -EINPROGRESS
);
1464 atomic64_inc(&ctx
->sec
->debug
.dfx
.recv_busy_cnt
);
1467 skcipher_request_complete(sk_req
, err
);
1470 static void set_aead_auth_iv(struct sec_ctx
*ctx
, struct sec_req
*req
)
1472 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
1473 struct sec_cipher_req
*c_req
= &req
->c_req
;
1474 struct sec_aead_req
*a_req
= &req
->aead_req
;
1475 size_t authsize
= ctx
->a_ctx
.mac_len
;
1476 u32 data_size
= aead_req
->cryptlen
;
1480 /* the specification has been checked in aead_iv_demension_check() */
1481 cl
= c_req
->c_ivin
[0] + 1;
1482 c_req
->c_ivin
[ctx
->c_ctx
.ivsize
- cl
] = 0x00;
1483 memset(&c_req
->c_ivin
[ctx
->c_ctx
.ivsize
- cl
], 0, cl
);
1484 c_req
->c_ivin
[ctx
->c_ctx
.ivsize
- IV_LAST_BYTE1
] = IV_CTR_INIT
;
1486 /* the last 3bit is L' */
1487 flage
|= c_req
->c_ivin
[0] & IV_CL_MASK
;
1489 /* the M' is bit3~bit5, the Flags is bit6 */
1490 cm
= (authsize
- IV_CM_CAL_NUM
) / IV_CM_CAL_NUM
;
1491 flage
|= cm
<< IV_CM_OFFSET
;
1492 if (aead_req
->assoclen
)
1493 flage
|= 0x01 << IV_FLAGS_OFFSET
;
1495 memcpy(a_req
->a_ivin
, c_req
->c_ivin
, ctx
->c_ctx
.ivsize
);
1496 a_req
->a_ivin
[0] = flage
;
1499 * the last 32bit is counter's initial number,
1500 * but the nonce uses the first 16bit
1501 * the tail 16bit fill with the cipher length
1503 if (!c_req
->encrypt
)
1504 data_size
= aead_req
->cryptlen
- authsize
;
1506 a_req
->a_ivin
[ctx
->c_ctx
.ivsize
- IV_LAST_BYTE1
] =
1507 data_size
& IV_LAST_BYTE_MASK
;
1508 data_size
>>= IV_BYTE_OFFSET
;
1509 a_req
->a_ivin
[ctx
->c_ctx
.ivsize
- IV_LAST_BYTE2
] =
1510 data_size
& IV_LAST_BYTE_MASK
;
1513 static void sec_aead_set_iv(struct sec_ctx
*ctx
, struct sec_req
*req
)
1515 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
1516 struct crypto_aead
*tfm
= crypto_aead_reqtfm(aead_req
);
1517 size_t authsize
= crypto_aead_authsize(tfm
);
1518 struct sec_cipher_req
*c_req
= &req
->c_req
;
1519 struct sec_aead_req
*a_req
= &req
->aead_req
;
1521 memcpy(c_req
->c_ivin
, aead_req
->iv
, ctx
->c_ctx
.ivsize
);
1523 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CCM
) {
1525 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1526 * the counter must set to 0x01
1528 ctx
->a_ctx
.mac_len
= authsize
;
1529 /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
1530 set_aead_auth_iv(ctx
, req
);
1533 /* GCM 12Byte Cipher_IV == Auth_IV */
1534 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_GCM
) {
1535 ctx
->a_ctx
.mac_len
= authsize
;
1536 memcpy(a_req
->a_ivin
, c_req
->c_ivin
, SEC_AIV_SIZE
);
1540 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx
*ctx
, int dir
,
1541 struct sec_req
*req
, struct sec_sqe
*sec_sqe
)
1543 struct sec_aead_req
*a_req
= &req
->aead_req
;
1544 struct aead_request
*aq
= a_req
->aead_req
;
1546 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1547 sec_sqe
->type2
.icvw_kmode
|= cpu_to_le16((u16
)ctx
->mac_len
);
1549 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1550 sec_sqe
->type2
.a_key_addr
= sec_sqe
->type2
.c_key_addr
;
1551 sec_sqe
->type2
.a_ivin_addr
= cpu_to_le64(a_req
->a_ivin_dma
);
1552 sec_sqe
->type_cipher_auth
|= SEC_NO_AUTH
<< SEC_AUTH_OFFSET
;
1555 sec_sqe
->sds_sa_type
&= SEC_CIPHER_AUTH
;
1557 sec_sqe
->sds_sa_type
|= SEC_AUTH_CIPHER
;
1559 sec_sqe
->type2
.alen_ivllen
= cpu_to_le32(aq
->assoclen
);
1560 sec_sqe
->type2
.auth_src_offset
= cpu_to_le16(0x0);
1561 sec_sqe
->type2
.cipher_src_offset
= cpu_to_le16((u16
)aq
->assoclen
);
1563 sec_sqe
->type2
.mac_addr
= cpu_to_le64(a_req
->out_mac_dma
);
1566 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx
*ctx
, int dir
,
1567 struct sec_req
*req
, struct sec_sqe3
*sqe3
)
1569 struct sec_aead_req
*a_req
= &req
->aead_req
;
1570 struct aead_request
*aq
= a_req
->aead_req
;
1572 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1573 sqe3
->c_icv_key
|= cpu_to_le16((u16
)ctx
->mac_len
<< SEC_MAC_OFFSET_V3
);
1575 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1576 sqe3
->a_key_addr
= sqe3
->c_key_addr
;
1577 sqe3
->auth_ivin
.a_ivin_addr
= cpu_to_le64(a_req
->a_ivin_dma
);
1578 sqe3
->auth_mac_key
|= SEC_NO_AUTH
;
1581 sqe3
->huk_iv_seq
&= SEC_CIPHER_AUTH_V3
;
1583 sqe3
->huk_iv_seq
|= SEC_AUTH_CIPHER_V3
;
1585 sqe3
->a_len_key
= cpu_to_le32(aq
->assoclen
);
1586 sqe3
->auth_src_offset
= cpu_to_le16(0x0);
1587 sqe3
->cipher_src_offset
= cpu_to_le16((u16
)aq
->assoclen
);
1588 sqe3
->mac_addr
= cpu_to_le64(a_req
->out_mac_dma
);
1591 static void sec_auth_bd_fill_ex(struct sec_auth_ctx
*ctx
, int dir
,
1592 struct sec_req
*req
, struct sec_sqe
*sec_sqe
)
1594 struct sec_aead_req
*a_req
= &req
->aead_req
;
1595 struct sec_cipher_req
*c_req
= &req
->c_req
;
1596 struct aead_request
*aq
= a_req
->aead_req
;
1598 sec_sqe
->type2
.a_key_addr
= cpu_to_le64(ctx
->a_key_dma
);
1600 sec_sqe
->type2
.mac_key_alg
=
1601 cpu_to_le32(ctx
->mac_len
/ SEC_SQE_LEN_RATE
);
1603 sec_sqe
->type2
.mac_key_alg
|=
1604 cpu_to_le32((u32
)((ctx
->a_key_len
) /
1605 SEC_SQE_LEN_RATE
) << SEC_AKEY_OFFSET
);
1607 sec_sqe
->type2
.mac_key_alg
|=
1608 cpu_to_le32((u32
)(ctx
->a_alg
) << SEC_AEAD_ALG_OFFSET
);
1611 sec_sqe
->type_cipher_auth
|= SEC_AUTH_TYPE1
<< SEC_AUTH_OFFSET
;
1612 sec_sqe
->sds_sa_type
&= SEC_CIPHER_AUTH
;
1614 sec_sqe
->type_cipher_auth
|= SEC_AUTH_TYPE2
<< SEC_AUTH_OFFSET
;
1615 sec_sqe
->sds_sa_type
|= SEC_AUTH_CIPHER
;
1617 sec_sqe
->type2
.alen_ivllen
= cpu_to_le32(c_req
->c_len
+ aq
->assoclen
);
1619 sec_sqe
->type2
.cipher_src_offset
= cpu_to_le16((u16
)aq
->assoclen
);
1621 sec_sqe
->type2
.mac_addr
= cpu_to_le64(a_req
->out_mac_dma
);
1624 static int sec_aead_bd_fill(struct sec_ctx
*ctx
, struct sec_req
*req
)
1626 struct sec_auth_ctx
*auth_ctx
= &ctx
->a_ctx
;
1627 struct sec_sqe
*sec_sqe
= &req
->sec_sqe
;
1630 ret
= sec_skcipher_bd_fill(ctx
, req
);
1631 if (unlikely(ret
)) {
1632 dev_err(ctx
->dev
, "skcipher bd fill is error!\n");
1636 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CCM
||
1637 ctx
->c_ctx
.c_mode
== SEC_CMODE_GCM
)
1638 sec_auth_bd_fill_xcm(auth_ctx
, req
->c_req
.encrypt
, req
, sec_sqe
);
1640 sec_auth_bd_fill_ex(auth_ctx
, req
->c_req
.encrypt
, req
, sec_sqe
);
1645 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx
*ctx
, int dir
,
1646 struct sec_req
*req
, struct sec_sqe3
*sqe3
)
1648 struct sec_aead_req
*a_req
= &req
->aead_req
;
1649 struct sec_cipher_req
*c_req
= &req
->c_req
;
1650 struct aead_request
*aq
= a_req
->aead_req
;
1652 sqe3
->a_key_addr
= cpu_to_le64(ctx
->a_key_dma
);
1654 sqe3
->auth_mac_key
|=
1655 cpu_to_le32((u32
)(ctx
->mac_len
/
1656 SEC_SQE_LEN_RATE
) << SEC_MAC_OFFSET_V3
);
1658 sqe3
->auth_mac_key
|=
1659 cpu_to_le32((u32
)(ctx
->a_key_len
/
1660 SEC_SQE_LEN_RATE
) << SEC_AKEY_OFFSET_V3
);
1662 sqe3
->auth_mac_key
|=
1663 cpu_to_le32((u32
)(ctx
->a_alg
) << SEC_AUTH_ALG_OFFSET_V3
);
1666 sqe3
->auth_mac_key
|= cpu_to_le32((u32
)SEC_AUTH_TYPE1
);
1667 sqe3
->huk_iv_seq
&= SEC_CIPHER_AUTH_V3
;
1669 sqe3
->auth_mac_key
|= cpu_to_le32((u32
)SEC_AUTH_TYPE2
);
1670 sqe3
->huk_iv_seq
|= SEC_AUTH_CIPHER_V3
;
1672 sqe3
->a_len_key
= cpu_to_le32(c_req
->c_len
+ aq
->assoclen
);
1674 sqe3
->cipher_src_offset
= cpu_to_le16((u16
)aq
->assoclen
);
1676 sqe3
->mac_addr
= cpu_to_le64(a_req
->out_mac_dma
);
1679 static int sec_aead_bd_fill_v3(struct sec_ctx
*ctx
, struct sec_req
*req
)
1681 struct sec_auth_ctx
*auth_ctx
= &ctx
->a_ctx
;
1682 struct sec_sqe3
*sec_sqe3
= &req
->sec_sqe3
;
1685 ret
= sec_skcipher_bd_fill_v3(ctx
, req
);
1686 if (unlikely(ret
)) {
1687 dev_err(ctx
->dev
, "skcipher bd3 fill is error!\n");
1691 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CCM
||
1692 ctx
->c_ctx
.c_mode
== SEC_CMODE_GCM
)
1693 sec_auth_bd_fill_xcm_v3(auth_ctx
, req
->c_req
.encrypt
,
1696 sec_auth_bd_fill_ex_v3(auth_ctx
, req
->c_req
.encrypt
,
1702 static void sec_aead_callback(struct sec_ctx
*c
, struct sec_req
*req
, int err
)
1704 struct aead_request
*a_req
= req
->aead_req
.aead_req
;
1705 struct crypto_aead
*tfm
= crypto_aead_reqtfm(a_req
);
1706 struct sec_aead_req
*aead_req
= &req
->aead_req
;
1707 struct sec_cipher_req
*c_req
= &req
->c_req
;
1708 size_t authsize
= crypto_aead_authsize(tfm
);
1709 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
1710 struct aead_request
*backlog_aead_req
;
1711 struct sec_req
*backlog_req
;
1714 if (!err
&& c
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& c_req
->encrypt
)
1715 sec_update_iv(req
, SEC_AEAD
);
1717 /* Copy output mac */
1718 if (!err
&& c_req
->encrypt
) {
1719 struct scatterlist
*sgl
= a_req
->dst
;
1721 sz
= sg_pcopy_from_buffer(sgl
, sg_nents(sgl
),
1723 authsize
, a_req
->cryptlen
+
1725 if (unlikely(sz
!= authsize
)) {
1726 dev_err(c
->dev
, "copy out mac err!\n");
1731 sec_free_req_id(req
);
1734 backlog_req
= sec_back_req_clear(c
, qp_ctx
);
1738 backlog_aead_req
= backlog_req
->aead_req
.aead_req
;
1739 aead_request_complete(backlog_aead_req
, -EINPROGRESS
);
1740 atomic64_inc(&c
->sec
->debug
.dfx
.recv_busy_cnt
);
1743 aead_request_complete(a_req
, err
);
1746 static void sec_request_uninit(struct sec_ctx
*ctx
, struct sec_req
*req
)
1748 sec_free_req_id(req
);
1749 sec_free_queue_id(ctx
, req
);
1752 static int sec_request_init(struct sec_ctx
*ctx
, struct sec_req
*req
)
1754 struct sec_qp_ctx
*qp_ctx
;
1757 /* To load balance */
1758 queue_id
= sec_alloc_queue_id(ctx
, req
);
1759 qp_ctx
= &ctx
->qp_ctx
[queue_id
];
1761 req
->req_id
= sec_alloc_req_id(req
, qp_ctx
);
1762 if (unlikely(req
->req_id
< 0)) {
1763 sec_free_queue_id(ctx
, req
);
1770 static int sec_process(struct sec_ctx
*ctx
, struct sec_req
*req
)
1772 struct sec_cipher_req
*c_req
= &req
->c_req
;
1775 ret
= sec_request_init(ctx
, req
);
1779 ret
= sec_request_transfer(ctx
, req
);
1781 goto err_uninit_req
;
1783 /* Output IV as decrypto */
1784 if (!req
->c_req
.encrypt
&& (ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
||
1785 ctx
->c_ctx
.c_mode
== SEC_CMODE_CTR
))
1786 sec_update_iv(req
, ctx
->alg_type
);
1788 ret
= ctx
->req_op
->bd_send(ctx
, req
);
1789 if (unlikely((ret
!= -EBUSY
&& ret
!= -EINPROGRESS
) ||
1790 (ret
== -EBUSY
&& !(req
->flag
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1791 dev_err_ratelimited(ctx
->dev
, "send sec request failed!\n");
1798 /* As failing, restore the IV from user */
1799 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& !req
->c_req
.encrypt
) {
1800 if (ctx
->alg_type
== SEC_SKCIPHER
)
1801 memcpy(req
->c_req
.sk_req
->iv
, c_req
->c_ivin
,
1804 memcpy(req
->aead_req
.aead_req
->iv
, c_req
->c_ivin
,
1808 sec_request_untransfer(ctx
, req
);
1810 sec_request_uninit(ctx
, req
);
1814 static const struct sec_req_op sec_skcipher_req_ops
= {
1815 .buf_map
= sec_skcipher_sgl_map
,
1816 .buf_unmap
= sec_skcipher_sgl_unmap
,
1817 .do_transfer
= sec_skcipher_copy_iv
,
1818 .bd_fill
= sec_skcipher_bd_fill
,
1819 .bd_send
= sec_bd_send
,
1820 .callback
= sec_skcipher_callback
,
1821 .process
= sec_process
,
1824 static const struct sec_req_op sec_aead_req_ops
= {
1825 .buf_map
= sec_aead_sgl_map
,
1826 .buf_unmap
= sec_aead_sgl_unmap
,
1827 .do_transfer
= sec_aead_set_iv
,
1828 .bd_fill
= sec_aead_bd_fill
,
1829 .bd_send
= sec_bd_send
,
1830 .callback
= sec_aead_callback
,
1831 .process
= sec_process
,
1834 static const struct sec_req_op sec_skcipher_req_ops_v3
= {
1835 .buf_map
= sec_skcipher_sgl_map
,
1836 .buf_unmap
= sec_skcipher_sgl_unmap
,
1837 .do_transfer
= sec_skcipher_copy_iv
,
1838 .bd_fill
= sec_skcipher_bd_fill_v3
,
1839 .bd_send
= sec_bd_send
,
1840 .callback
= sec_skcipher_callback
,
1841 .process
= sec_process
,
1844 static const struct sec_req_op sec_aead_req_ops_v3
= {
1845 .buf_map
= sec_aead_sgl_map
,
1846 .buf_unmap
= sec_aead_sgl_unmap
,
1847 .do_transfer
= sec_aead_set_iv
,
1848 .bd_fill
= sec_aead_bd_fill_v3
,
1849 .bd_send
= sec_bd_send
,
1850 .callback
= sec_aead_callback
,
1851 .process
= sec_process
,
1854 static int sec_skcipher_ctx_init(struct crypto_skcipher
*tfm
)
1856 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1859 ret
= sec_skcipher_init(tfm
);
1863 if (ctx
->sec
->qm
.ver
< QM_HW_V3
) {
1864 ctx
->type_supported
= SEC_BD_TYPE2
;
1865 ctx
->req_op
= &sec_skcipher_req_ops
;
1867 ctx
->type_supported
= SEC_BD_TYPE3
;
1868 ctx
->req_op
= &sec_skcipher_req_ops_v3
;
1874 static void sec_skcipher_ctx_exit(struct crypto_skcipher
*tfm
)
1876 sec_skcipher_uninit(tfm
);
1879 static int sec_aead_init(struct crypto_aead
*tfm
)
1881 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1884 crypto_aead_set_reqsize(tfm
, sizeof(struct sec_req
));
1885 ctx
->alg_type
= SEC_AEAD
;
1886 ctx
->c_ctx
.ivsize
= crypto_aead_ivsize(tfm
);
1887 if (ctx
->c_ctx
.ivsize
< SEC_AIV_SIZE
||
1888 ctx
->c_ctx
.ivsize
> SEC_IV_SIZE
) {
1889 pr_err("get error aead iv size!\n");
1893 ret
= sec_ctx_base_init(ctx
);
1896 if (ctx
->sec
->qm
.ver
< QM_HW_V3
) {
1897 ctx
->type_supported
= SEC_BD_TYPE2
;
1898 ctx
->req_op
= &sec_aead_req_ops
;
1900 ctx
->type_supported
= SEC_BD_TYPE3
;
1901 ctx
->req_op
= &sec_aead_req_ops_v3
;
1904 ret
= sec_auth_init(ctx
);
1908 ret
= sec_cipher_init(ctx
);
1910 goto err_cipher_init
;
1915 sec_auth_uninit(ctx
);
1917 sec_ctx_base_uninit(ctx
);
1921 static void sec_aead_exit(struct crypto_aead
*tfm
)
1923 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1925 sec_cipher_uninit(ctx
);
1926 sec_auth_uninit(ctx
);
1927 sec_ctx_base_uninit(ctx
);
1930 static int sec_aead_ctx_init(struct crypto_aead
*tfm
, const char *hash_name
)
1932 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1933 struct sec_auth_ctx
*auth_ctx
= &ctx
->a_ctx
;
1936 ret
= sec_aead_init(tfm
);
1938 pr_err("hisi_sec2: aead init error!\n");
1942 auth_ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1943 if (IS_ERR(auth_ctx
->hash_tfm
)) {
1944 dev_err(ctx
->dev
, "aead alloc shash error!\n");
1946 return PTR_ERR(auth_ctx
->hash_tfm
);
1952 static void sec_aead_ctx_exit(struct crypto_aead
*tfm
)
1954 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1956 crypto_free_shash(ctx
->a_ctx
.hash_tfm
);
1960 static int sec_aead_xcm_ctx_init(struct crypto_aead
*tfm
)
1962 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
1963 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1964 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
1965 const char *aead_name
= alg
->base
.cra_name
;
1968 ret
= sec_aead_init(tfm
);
1970 dev_err(ctx
->dev
, "hisi_sec2: aead xcm init error!\n");
1974 a_ctx
->fallback_aead_tfm
= crypto_alloc_aead(aead_name
, 0,
1975 CRYPTO_ALG_NEED_FALLBACK
|
1977 if (IS_ERR(a_ctx
->fallback_aead_tfm
)) {
1978 dev_err(ctx
->dev
, "aead driver alloc fallback tfm error!\n");
1980 return PTR_ERR(a_ctx
->fallback_aead_tfm
);
1982 a_ctx
->fallback
= false;
1987 static void sec_aead_xcm_ctx_exit(struct crypto_aead
*tfm
)
1989 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1991 crypto_free_aead(ctx
->a_ctx
.fallback_aead_tfm
);
1995 static int sec_aead_sha1_ctx_init(struct crypto_aead
*tfm
)
1997 return sec_aead_ctx_init(tfm
, "sha1");
2000 static int sec_aead_sha256_ctx_init(struct crypto_aead
*tfm
)
2002 return sec_aead_ctx_init(tfm
, "sha256");
2005 static int sec_aead_sha512_ctx_init(struct crypto_aead
*tfm
)
2007 return sec_aead_ctx_init(tfm
, "sha512");
2010 static int sec_skcipher_cryptlen_check(struct sec_ctx
*ctx
,
2011 struct sec_req
*sreq
)
2013 u32 cryptlen
= sreq
->c_req
.sk_req
->cryptlen
;
2014 struct device
*dev
= ctx
->dev
;
2015 u8 c_mode
= ctx
->c_ctx
.c_mode
;
2020 if (unlikely(cryptlen
< AES_BLOCK_SIZE
)) {
2021 dev_err(dev
, "skcipher XTS mode input length error!\n");
2027 if (unlikely(cryptlen
& (AES_BLOCK_SIZE
- 1))) {
2028 dev_err(dev
, "skcipher AES input length error!\n");
2033 if (unlikely(ctx
->sec
->qm
.ver
< QM_HW_V3
)) {
2034 dev_err(dev
, "skcipher HW version error!\n");
2045 static int sec_skcipher_param_check(struct sec_ctx
*ctx
, struct sec_req
*sreq
)
2047 struct skcipher_request
*sk_req
= sreq
->c_req
.sk_req
;
2048 struct device
*dev
= ctx
->dev
;
2049 u8 c_alg
= ctx
->c_ctx
.c_alg
;
2051 if (unlikely(!sk_req
->src
|| !sk_req
->dst
||
2052 sk_req
->cryptlen
> MAX_INPUT_DATA_LEN
)) {
2053 dev_err(dev
, "skcipher input param error!\n");
2056 sreq
->c_req
.c_len
= sk_req
->cryptlen
;
2058 if (ctx
->pbuf_supported
&& sk_req
->cryptlen
<= SEC_PBUF_SZ
)
2059 sreq
->use_pbuf
= true;
2061 sreq
->use_pbuf
= false;
2063 if (c_alg
== SEC_CALG_3DES
) {
2064 if (unlikely(sk_req
->cryptlen
& (DES3_EDE_BLOCK_SIZE
- 1))) {
2065 dev_err(dev
, "skcipher 3des input length error!\n");
2069 } else if (c_alg
== SEC_CALG_AES
|| c_alg
== SEC_CALG_SM4
) {
2070 return sec_skcipher_cryptlen_check(ctx
, sreq
);
2073 dev_err(dev
, "skcipher algorithm error!\n");
2078 static int sec_skcipher_soft_crypto(struct sec_ctx
*ctx
,
2079 struct skcipher_request
*sreq
, bool encrypt
)
2081 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
2082 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, c_ctx
->fbtfm
);
2083 struct device
*dev
= ctx
->dev
;
2086 if (!c_ctx
->fbtfm
) {
2087 dev_err_ratelimited(dev
, "the soft tfm isn't supported in the current system.\n");
2091 skcipher_request_set_sync_tfm(subreq
, c_ctx
->fbtfm
);
2093 /* software need sync mode to do crypto */
2094 skcipher_request_set_callback(subreq
, sreq
->base
.flags
,
2096 skcipher_request_set_crypt(subreq
, sreq
->src
, sreq
->dst
,
2097 sreq
->cryptlen
, sreq
->iv
);
2099 ret
= crypto_skcipher_encrypt(subreq
);
2101 ret
= crypto_skcipher_decrypt(subreq
);
2103 skcipher_request_zero(subreq
);
2108 static int sec_skcipher_crypto(struct skcipher_request
*sk_req
, bool encrypt
)
2110 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(sk_req
);
2111 struct sec_req
*req
= skcipher_request_ctx(sk_req
);
2112 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
2115 if (!sk_req
->cryptlen
) {
2116 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_XTS
)
2121 req
->flag
= sk_req
->base
.flags
;
2122 req
->c_req
.sk_req
= sk_req
;
2123 req
->c_req
.encrypt
= encrypt
;
2126 ret
= sec_skcipher_param_check(ctx
, req
);
2130 if (unlikely(ctx
->c_ctx
.fallback
))
2131 return sec_skcipher_soft_crypto(ctx
, sk_req
, encrypt
);
2133 return ctx
->req_op
->process(ctx
, req
);
2136 static int sec_skcipher_encrypt(struct skcipher_request
*sk_req
)
2138 return sec_skcipher_crypto(sk_req
, true);
2141 static int sec_skcipher_decrypt(struct skcipher_request
*sk_req
)
2143 return sec_skcipher_crypto(sk_req
, false);
2146 #define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
2147 sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
2150 .cra_name = sec_cra_name,\
2151 .cra_driver_name = "hisi_sec_"sec_cra_name,\
2152 .cra_priority = SEC_PRIORITY,\
2153 .cra_flags = CRYPTO_ALG_ASYNC |\
2154 CRYPTO_ALG_NEED_FALLBACK,\
2155 .cra_blocksize = blk_size,\
2156 .cra_ctxsize = sizeof(struct sec_ctx),\
2157 .cra_module = THIS_MODULE,\
2159 .init = sec_skcipher_ctx_init,\
2160 .exit = sec_skcipher_ctx_exit,\
2161 .setkey = sec_set_key,\
2162 .decrypt = sec_skcipher_decrypt,\
2163 .encrypt = sec_skcipher_encrypt,\
2164 .min_keysize = sec_min_key_size,\
2165 .max_keysize = sec_max_key_size,\
2169 static struct sec_skcipher sec_skciphers
[] = {
2172 .alg
= SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb
, AES_MIN_KEY_SIZE
,
2173 AES_MAX_KEY_SIZE
, AES_BLOCK_SIZE
, 0),
2177 .alg
= SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc
, AES_MIN_KEY_SIZE
,
2178 AES_MAX_KEY_SIZE
, AES_BLOCK_SIZE
, AES_BLOCK_SIZE
),
2182 .alg
= SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr
, AES_MIN_KEY_SIZE
,
2183 AES_MAX_KEY_SIZE
, SEC_MIN_BLOCK_SZ
, AES_BLOCK_SIZE
),
2187 .alg
= SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts
, SEC_XTS_MIN_KEY_SIZE
,
2188 SEC_XTS_MAX_KEY_SIZE
, AES_BLOCK_SIZE
, AES_BLOCK_SIZE
),
2192 .alg
= SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc
, AES_MIN_KEY_SIZE
,
2193 AES_MIN_KEY_SIZE
, AES_BLOCK_SIZE
, AES_BLOCK_SIZE
),
2197 .alg
= SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr
, AES_MIN_KEY_SIZE
,
2198 AES_MIN_KEY_SIZE
, SEC_MIN_BLOCK_SZ
, AES_BLOCK_SIZE
),
2202 .alg
= SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts
, SEC_XTS_MIN_KEY_SIZE
,
2203 SEC_XTS_MIN_KEY_SIZE
, AES_BLOCK_SIZE
, AES_BLOCK_SIZE
),
2207 .alg
= SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb
, SEC_DES3_3KEY_SIZE
,
2208 SEC_DES3_3KEY_SIZE
, DES3_EDE_BLOCK_SIZE
, 0),
2212 .alg
= SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc
, SEC_DES3_3KEY_SIZE
,
2213 SEC_DES3_3KEY_SIZE
, DES3_EDE_BLOCK_SIZE
,
2214 DES3_EDE_BLOCK_SIZE
),
2218 static int aead_iv_demension_check(struct aead_request
*aead_req
)
2222 cl
= aead_req
->iv
[0] + 1;
2223 if (cl
< IV_CL_MIN
|| cl
> IV_CL_MAX
)
2226 if (cl
< IV_CL_MID
&& aead_req
->cryptlen
>> (BYTE_BITS
* cl
))
2232 static int sec_aead_spec_check(struct sec_ctx
*ctx
, struct sec_req
*sreq
)
2234 struct aead_request
*req
= sreq
->aead_req
.aead_req
;
2235 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2236 size_t authsize
= crypto_aead_authsize(tfm
);
2237 u8 c_mode
= ctx
->c_ctx
.c_mode
;
2238 struct device
*dev
= ctx
->dev
;
2241 if (unlikely(req
->cryptlen
+ req
->assoclen
> MAX_INPUT_DATA_LEN
||
2242 req
->assoclen
> SEC_MAX_AAD_LEN
)) {
2243 dev_err(dev
, "aead input spec error!\n");
2247 if (unlikely((c_mode
== SEC_CMODE_GCM
&& authsize
< DES_BLOCK_SIZE
) ||
2248 (c_mode
== SEC_CMODE_CCM
&& (authsize
< MIN_MAC_LEN
||
2249 authsize
& MAC_LEN_MASK
)))) {
2250 dev_err(dev
, "aead input mac length error!\n");
2254 if (c_mode
== SEC_CMODE_CCM
) {
2255 if (unlikely(req
->assoclen
> SEC_MAX_CCM_AAD_LEN
)) {
2256 dev_err_ratelimited(dev
, "CCM input aad parameter is too long!\n");
2259 ret
= aead_iv_demension_check(req
);
2261 dev_err(dev
, "aead input iv param error!\n");
2266 if (sreq
->c_req
.encrypt
)
2267 sreq
->c_req
.c_len
= req
->cryptlen
;
2269 sreq
->c_req
.c_len
= req
->cryptlen
- authsize
;
2270 if (c_mode
== SEC_CMODE_CBC
) {
2271 if (unlikely(sreq
->c_req
.c_len
& (AES_BLOCK_SIZE
- 1))) {
2272 dev_err(dev
, "aead crypto length error!\n");
2280 static int sec_aead_param_check(struct sec_ctx
*ctx
, struct sec_req
*sreq
)
2282 struct aead_request
*req
= sreq
->aead_req
.aead_req
;
2283 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2284 size_t authsize
= crypto_aead_authsize(tfm
);
2285 struct device
*dev
= ctx
->dev
;
2286 u8 c_alg
= ctx
->c_ctx
.c_alg
;
2288 if (unlikely(!req
->src
|| !req
->dst
)) {
2289 dev_err(dev
, "aead input param error!\n");
2293 if (ctx
->sec
->qm
.ver
== QM_HW_V2
) {
2294 if (unlikely(!req
->cryptlen
|| (!sreq
->c_req
.encrypt
&&
2295 req
->cryptlen
<= authsize
))) {
2296 ctx
->a_ctx
.fallback
= true;
2301 /* Support AES or SM4 */
2302 if (unlikely(c_alg
!= SEC_CALG_AES
&& c_alg
!= SEC_CALG_SM4
)) {
2303 dev_err(dev
, "aead crypto alg error!\n");
2307 if (unlikely(sec_aead_spec_check(ctx
, sreq
)))
2310 if (ctx
->pbuf_supported
&& (req
->cryptlen
+ req
->assoclen
) <=
2312 sreq
->use_pbuf
= true;
2314 sreq
->use_pbuf
= false;
2319 static int sec_aead_soft_crypto(struct sec_ctx
*ctx
,
2320 struct aead_request
*aead_req
,
2323 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
2324 struct device
*dev
= ctx
->dev
;
2325 struct aead_request
*subreq
;
2328 /* Kunpeng920 aead mode not support input 0 size */
2329 if (!a_ctx
->fallback_aead_tfm
) {
2330 dev_err(dev
, "aead fallback tfm is NULL!\n");
2334 subreq
= aead_request_alloc(a_ctx
->fallback_aead_tfm
, GFP_KERNEL
);
2338 aead_request_set_tfm(subreq
, a_ctx
->fallback_aead_tfm
);
2339 aead_request_set_callback(subreq
, aead_req
->base
.flags
,
2340 aead_req
->base
.complete
, aead_req
->base
.data
);
2341 aead_request_set_crypt(subreq
, aead_req
->src
, aead_req
->dst
,
2342 aead_req
->cryptlen
, aead_req
->iv
);
2343 aead_request_set_ad(subreq
, aead_req
->assoclen
);
2346 ret
= crypto_aead_encrypt(subreq
);
2348 ret
= crypto_aead_decrypt(subreq
);
2349 aead_request_free(subreq
);
2354 static int sec_aead_crypto(struct aead_request
*a_req
, bool encrypt
)
2356 struct crypto_aead
*tfm
= crypto_aead_reqtfm(a_req
);
2357 struct sec_req
*req
= aead_request_ctx(a_req
);
2358 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
2361 req
->flag
= a_req
->base
.flags
;
2362 req
->aead_req
.aead_req
= a_req
;
2363 req
->c_req
.encrypt
= encrypt
;
2366 ret
= sec_aead_param_check(ctx
, req
);
2367 if (unlikely(ret
)) {
2368 if (ctx
->a_ctx
.fallback
)
2369 return sec_aead_soft_crypto(ctx
, a_req
, encrypt
);
2373 return ctx
->req_op
->process(ctx
, req
);
2376 static int sec_aead_encrypt(struct aead_request
*a_req
)
2378 return sec_aead_crypto(a_req
, true);
2381 static int sec_aead_decrypt(struct aead_request
*a_req
)
2383 return sec_aead_crypto(a_req
, false);
2386 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2387 ctx_exit, blk_size, iv_size, max_authsize)\
2390 .cra_name = sec_cra_name,\
2391 .cra_driver_name = "hisi_sec_"sec_cra_name,\
2392 .cra_priority = SEC_PRIORITY,\
2393 .cra_flags = CRYPTO_ALG_ASYNC |\
2394 CRYPTO_ALG_NEED_FALLBACK,\
2395 .cra_blocksize = blk_size,\
2396 .cra_ctxsize = sizeof(struct sec_ctx),\
2397 .cra_module = THIS_MODULE,\
2401 .setkey = sec_set_key,\
2402 .setauthsize = sec_aead_setauthsize,\
2403 .decrypt = sec_aead_decrypt,\
2404 .encrypt = sec_aead_encrypt,\
2406 .maxauthsize = max_authsize,\
2409 static struct sec_aead sec_aeads
[] = {
2412 .alg
= SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm
, sec_aead_xcm_ctx_init
,
2413 sec_aead_xcm_ctx_exit
, SEC_MIN_BLOCK_SZ
, AES_BLOCK_SIZE
,
2418 .alg
= SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm
, sec_aead_xcm_ctx_init
,
2419 sec_aead_xcm_ctx_exit
, SEC_MIN_BLOCK_SZ
, SEC_AIV_SIZE
,
2424 .alg
= SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm
, sec_aead_xcm_ctx_init
,
2425 sec_aead_xcm_ctx_exit
, SEC_MIN_BLOCK_SZ
, AES_BLOCK_SIZE
,
2430 .alg
= SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm
, sec_aead_xcm_ctx_init
,
2431 sec_aead_xcm_ctx_exit
, SEC_MIN_BLOCK_SZ
, SEC_AIV_SIZE
,
2436 .alg
= SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1
,
2437 sec_aead_sha1_ctx_init
, sec_aead_ctx_exit
, AES_BLOCK_SIZE
,
2438 AES_BLOCK_SIZE
, SHA1_DIGEST_SIZE
),
2442 .alg
= SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256
,
2443 sec_aead_sha256_ctx_init
, sec_aead_ctx_exit
, AES_BLOCK_SIZE
,
2444 AES_BLOCK_SIZE
, SHA256_DIGEST_SIZE
),
2448 .alg
= SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512
,
2449 sec_aead_sha512_ctx_init
, sec_aead_ctx_exit
, AES_BLOCK_SIZE
,
2450 AES_BLOCK_SIZE
, SHA512_DIGEST_SIZE
),
2454 static void sec_unregister_skcipher(u64 alg_mask
, int end
)
2458 for (i
= 0; i
< end
; i
++)
2459 if (sec_skciphers
[i
].alg_msk
& alg_mask
)
2460 crypto_unregister_skcipher(&sec_skciphers
[i
].alg
);
2463 static int sec_register_skcipher(u64 alg_mask
)
2467 count
= ARRAY_SIZE(sec_skciphers
);
2469 for (i
= 0; i
< count
; i
++) {
2470 if (!(sec_skciphers
[i
].alg_msk
& alg_mask
))
2473 ret
= crypto_register_skcipher(&sec_skciphers
[i
].alg
);
2481 sec_unregister_skcipher(alg_mask
, i
);
2486 static void sec_unregister_aead(u64 alg_mask
, int end
)
2490 for (i
= 0; i
< end
; i
++)
2491 if (sec_aeads
[i
].alg_msk
& alg_mask
)
2492 crypto_unregister_aead(&sec_aeads
[i
].alg
);
2495 static int sec_register_aead(u64 alg_mask
)
2499 count
= ARRAY_SIZE(sec_aeads
);
2501 for (i
= 0; i
< count
; i
++) {
2502 if (!(sec_aeads
[i
].alg_msk
& alg_mask
))
2505 ret
= crypto_register_aead(&sec_aeads
[i
].alg
);
2513 sec_unregister_aead(alg_mask
, i
);
2518 int sec_register_to_crypto(struct hisi_qm
*qm
)
2523 alg_mask
= sec_get_alg_bitmap(qm
, SEC_DRV_ALG_BITMAP_HIGH_TB
,
2524 SEC_DRV_ALG_BITMAP_LOW_TB
);
2526 mutex_lock(&sec_algs_lock
);
2527 if (sec_available_devs
) {
2528 sec_available_devs
++;
2532 ret
= sec_register_skcipher(alg_mask
);
2536 ret
= sec_register_aead(alg_mask
);
2538 goto unreg_skcipher
;
2540 sec_available_devs
++;
2541 mutex_unlock(&sec_algs_lock
);
2546 sec_unregister_skcipher(alg_mask
, ARRAY_SIZE(sec_skciphers
));
2548 mutex_unlock(&sec_algs_lock
);
2552 void sec_unregister_from_crypto(struct hisi_qm
*qm
)
2556 alg_mask
= sec_get_alg_bitmap(qm
, SEC_DRV_ALG_BITMAP_HIGH_TB
,
2557 SEC_DRV_ALG_BITMAP_LOW_TB
);
2559 mutex_lock(&sec_algs_lock
);
2560 if (--sec_available_devs
)
2563 sec_unregister_aead(alg_mask
, ARRAY_SIZE(sec_aeads
));
2564 sec_unregister_skcipher(alg_mask
, ARRAY_SIZE(sec_skciphers
));
2567 mutex_unlock(&sec_algs_lock
);