1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <crypto/aes.h>
5 #include <crypto/algapi.h>
6 #include <crypto/authenc.h>
7 #include <crypto/des.h>
8 #include <crypto/hash.h>
9 #include <crypto/internal/aead.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <crypto/skcipher.h>
13 #include <crypto/xts.h>
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/idr.h>
19 #include "sec_crypto.h"
21 #define SEC_PRIORITY 4001
22 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
23 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
24 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
25 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
27 /* SEC sqe(bd) bit operational relative MACRO */
28 #define SEC_DE_OFFSET 1
29 #define SEC_CIPHER_OFFSET 4
30 #define SEC_SCENE_OFFSET 3
31 #define SEC_DST_SGL_OFFSET 2
32 #define SEC_SRC_SGL_OFFSET 7
33 #define SEC_CKEY_OFFSET 9
34 #define SEC_CMODE_OFFSET 12
35 #define SEC_AKEY_OFFSET 5
36 #define SEC_AEAD_ALG_OFFSET 11
37 #define SEC_AUTH_OFFSET 6
39 #define SEC_FLAG_OFFSET 7
40 #define SEC_FLAG_MASK 0x0780
41 #define SEC_TYPE_MASK 0x0F
42 #define SEC_DONE_MASK 0x0001
44 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
45 #define SEC_SGL_SGE_NR 128
46 #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
47 #define SEC_CIPHER_AUTH 0xfe
48 #define SEC_AUTH_CIPHER 0x1
49 #define SEC_MAX_MAC_LEN 64
50 #define SEC_MAX_AAD_LEN 65535
51 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
53 #define SEC_PBUF_SZ 512
54 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
55 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
56 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
58 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
59 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
60 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
61 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
62 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
65 #define SEC_SQE_LEN_RATE 4
66 #define SEC_SQE_CFLAG 2
67 #define SEC_SQE_AEAD_FLAG 3
68 #define SEC_SQE_DONE 0x1
70 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
71 static inline int sec_alloc_queue_id(struct sec_ctx
*ctx
, struct sec_req
*req
)
73 if (req
->c_req
.encrypt
)
74 return (u32
)atomic_inc_return(&ctx
->enc_qcyclic
) %
77 return (u32
)atomic_inc_return(&ctx
->dec_qcyclic
) % ctx
->hlf_q_num
+
81 static inline void sec_free_queue_id(struct sec_ctx
*ctx
, struct sec_req
*req
)
83 if (req
->c_req
.encrypt
)
84 atomic_dec(&ctx
->enc_qcyclic
);
86 atomic_dec(&ctx
->dec_qcyclic
);
89 static int sec_alloc_req_id(struct sec_req
*req
, struct sec_qp_ctx
*qp_ctx
)
93 mutex_lock(&qp_ctx
->req_lock
);
95 req_id
= idr_alloc_cyclic(&qp_ctx
->req_idr
, NULL
,
96 0, QM_Q_DEPTH
, GFP_ATOMIC
);
97 mutex_unlock(&qp_ctx
->req_lock
);
98 if (unlikely(req_id
< 0)) {
99 dev_err(SEC_CTX_DEV(req
->ctx
), "alloc req id fail!\n");
103 req
->qp_ctx
= qp_ctx
;
104 qp_ctx
->req_list
[req_id
] = req
;
109 static void sec_free_req_id(struct sec_req
*req
)
111 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
112 int req_id
= req
->req_id
;
114 if (unlikely(req_id
< 0 || req_id
>= QM_Q_DEPTH
)) {
115 dev_err(SEC_CTX_DEV(req
->ctx
), "free request id invalid!\n");
119 qp_ctx
->req_list
[req_id
] = NULL
;
122 mutex_lock(&qp_ctx
->req_lock
);
123 idr_remove(&qp_ctx
->req_idr
, req_id
);
124 mutex_unlock(&qp_ctx
->req_lock
);
127 static int sec_aead_verify(struct sec_req
*req
)
129 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
130 struct crypto_aead
*tfm
= crypto_aead_reqtfm(aead_req
);
131 size_t authsize
= crypto_aead_authsize(tfm
);
132 u8
*mac_out
= req
->aead_req
.out_mac
;
133 u8
*mac
= mac_out
+ SEC_MAX_MAC_LEN
;
134 struct scatterlist
*sgl
= aead_req
->src
;
137 sz
= sg_pcopy_to_buffer(sgl
, sg_nents(sgl
), mac
, authsize
,
138 aead_req
->cryptlen
+ aead_req
->assoclen
-
140 if (unlikely(sz
!= authsize
|| memcmp(mac_out
, mac
, sz
))) {
141 dev_err(SEC_CTX_DEV(req
->ctx
), "aead verify failure!\n");
148 static void sec_req_cb(struct hisi_qp
*qp
, void *resp
)
150 struct sec_qp_ctx
*qp_ctx
= qp
->qp_ctx
;
151 struct sec_dfx
*dfx
= &qp_ctx
->ctx
->sec
->debug
.dfx
;
152 struct sec_sqe
*bd
= resp
;
159 type
= bd
->type_cipher_auth
& SEC_TYPE_MASK
;
160 if (unlikely(type
!= SEC_BD_TYPE2
)) {
161 atomic64_inc(&dfx
->err_bd_cnt
);
162 pr_err("err bd type [%d]\n", type
);
166 req
= qp_ctx
->req_list
[le16_to_cpu(bd
->type2
.tag
)];
167 if (unlikely(!req
)) {
168 atomic64_inc(&dfx
->invalid_req_cnt
);
169 atomic_inc(&qp
->qp_status
.used
);
172 req
->err_type
= bd
->type2
.error_type
;
174 done
= le16_to_cpu(bd
->type2
.done_flag
) & SEC_DONE_MASK
;
175 flag
= (le16_to_cpu(bd
->type2
.done_flag
) &
176 SEC_FLAG_MASK
) >> SEC_FLAG_OFFSET
;
177 if (unlikely(req
->err_type
|| done
!= SEC_SQE_DONE
||
178 (ctx
->alg_type
== SEC_SKCIPHER
&& flag
!= SEC_SQE_CFLAG
) ||
179 (ctx
->alg_type
== SEC_AEAD
&& flag
!= SEC_SQE_AEAD_FLAG
))) {
180 dev_err(SEC_CTX_DEV(ctx
),
181 "err_type[%d],done[%d],flag[%d]\n",
182 req
->err_type
, done
, flag
);
184 atomic64_inc(&dfx
->done_flag_cnt
);
187 if (ctx
->alg_type
== SEC_AEAD
&& !req
->c_req
.encrypt
)
188 err
= sec_aead_verify(req
);
190 atomic64_inc(&dfx
->recv_cnt
);
192 ctx
->req_op
->buf_unmap(ctx
, req
);
194 ctx
->req_op
->callback(ctx
, req
, err
);
197 static int sec_bd_send(struct sec_ctx
*ctx
, struct sec_req
*req
)
199 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
202 if (ctx
->fake_req_limit
<=
203 atomic_read(&qp_ctx
->qp
->qp_status
.used
) &&
204 !(req
->flag
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
207 mutex_lock(&qp_ctx
->req_lock
);
208 ret
= hisi_qp_send(qp_ctx
->qp
, &req
->sec_sqe
);
210 if (ctx
->fake_req_limit
<=
211 atomic_read(&qp_ctx
->qp
->qp_status
.used
) && !ret
) {
212 list_add_tail(&req
->backlog_head
, &qp_ctx
->backlog
);
213 atomic64_inc(&ctx
->sec
->debug
.dfx
.send_cnt
);
214 atomic64_inc(&ctx
->sec
->debug
.dfx
.send_busy_cnt
);
215 mutex_unlock(&qp_ctx
->req_lock
);
218 mutex_unlock(&qp_ctx
->req_lock
);
220 if (unlikely(ret
== -EBUSY
))
225 atomic64_inc(&ctx
->sec
->debug
.dfx
.send_cnt
);
231 /* Get DMA memory resources */
232 static int sec_alloc_civ_resource(struct device
*dev
, struct sec_alg_res
*res
)
236 res
->c_ivin
= dma_alloc_coherent(dev
, SEC_TOTAL_IV_SZ
,
237 &res
->c_ivin_dma
, GFP_KERNEL
);
241 for (i
= 1; i
< QM_Q_DEPTH
; i
++) {
242 res
[i
].c_ivin_dma
= res
->c_ivin_dma
+ i
* SEC_IV_SIZE
;
243 res
[i
].c_ivin
= res
->c_ivin
+ i
* SEC_IV_SIZE
;
249 static void sec_free_civ_resource(struct device
*dev
, struct sec_alg_res
*res
)
252 dma_free_coherent(dev
, SEC_TOTAL_IV_SZ
,
253 res
->c_ivin
, res
->c_ivin_dma
);
256 static int sec_alloc_mac_resource(struct device
*dev
, struct sec_alg_res
*res
)
260 res
->out_mac
= dma_alloc_coherent(dev
, SEC_TOTAL_MAC_SZ
<< 1,
261 &res
->out_mac_dma
, GFP_KERNEL
);
265 for (i
= 1; i
< QM_Q_DEPTH
; i
++) {
266 res
[i
].out_mac_dma
= res
->out_mac_dma
+
267 i
* (SEC_MAX_MAC_LEN
<< 1);
268 res
[i
].out_mac
= res
->out_mac
+ i
* (SEC_MAX_MAC_LEN
<< 1);
274 static void sec_free_mac_resource(struct device
*dev
, struct sec_alg_res
*res
)
277 dma_free_coherent(dev
, SEC_TOTAL_MAC_SZ
<< 1,
278 res
->out_mac
, res
->out_mac_dma
);
281 static void sec_free_pbuf_resource(struct device
*dev
, struct sec_alg_res
*res
)
284 dma_free_coherent(dev
, SEC_TOTAL_PBUF_SZ
,
285 res
->pbuf
, res
->pbuf_dma
);
289 * To improve performance, pbuffer is used for
290 * small packets (< 512Bytes) as IOMMU translation using.
292 static int sec_alloc_pbuf_resource(struct device
*dev
, struct sec_alg_res
*res
)
294 int pbuf_page_offset
;
297 res
->pbuf
= dma_alloc_coherent(dev
, SEC_TOTAL_PBUF_SZ
,
298 &res
->pbuf_dma
, GFP_KERNEL
);
303 * SEC_PBUF_PKG contains data pbuf, iv and
304 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
305 * Every PAGE contains six SEC_PBUF_PKG
306 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
307 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
308 * for the SEC_TOTAL_PBUF_SZ
310 for (i
= 0; i
<= SEC_PBUF_PAGE_NUM
; i
++) {
311 pbuf_page_offset
= PAGE_SIZE
* i
;
312 for (j
= 0; j
< SEC_PBUF_NUM
; j
++) {
313 k
= i
* SEC_PBUF_NUM
+ j
;
316 res
[k
].pbuf
= res
->pbuf
+
317 j
* SEC_PBUF_PKG
+ pbuf_page_offset
;
318 res
[k
].pbuf_dma
= res
->pbuf_dma
+
319 j
* SEC_PBUF_PKG
+ pbuf_page_offset
;
326 static int sec_alg_resource_alloc(struct sec_ctx
*ctx
,
327 struct sec_qp_ctx
*qp_ctx
)
329 struct device
*dev
= SEC_CTX_DEV(ctx
);
330 struct sec_alg_res
*res
= qp_ctx
->res
;
333 ret
= sec_alloc_civ_resource(dev
, res
);
337 if (ctx
->alg_type
== SEC_AEAD
) {
338 ret
= sec_alloc_mac_resource(dev
, res
);
342 if (ctx
->pbuf_supported
) {
343 ret
= sec_alloc_pbuf_resource(dev
, res
);
345 dev_err(dev
, "fail to alloc pbuf dma resource!\n");
346 goto alloc_pbuf_fail
;
353 if (ctx
->alg_type
== SEC_AEAD
)
354 sec_free_mac_resource(dev
, qp_ctx
->res
);
356 sec_free_civ_resource(dev
, res
);
360 static void sec_alg_resource_free(struct sec_ctx
*ctx
,
361 struct sec_qp_ctx
*qp_ctx
)
363 struct device
*dev
= SEC_CTX_DEV(ctx
);
365 sec_free_civ_resource(dev
, qp_ctx
->res
);
367 if (ctx
->pbuf_supported
)
368 sec_free_pbuf_resource(dev
, qp_ctx
->res
);
369 if (ctx
->alg_type
== SEC_AEAD
)
370 sec_free_mac_resource(dev
, qp_ctx
->res
);
373 static int sec_create_qp_ctx(struct hisi_qm
*qm
, struct sec_ctx
*ctx
,
374 int qp_ctx_id
, int alg_type
)
376 struct device
*dev
= SEC_CTX_DEV(ctx
);
377 struct sec_qp_ctx
*qp_ctx
;
381 qp_ctx
= &ctx
->qp_ctx
[qp_ctx_id
];
382 qp
= ctx
->qps
[qp_ctx_id
];
385 qp
->req_cb
= sec_req_cb
;
389 mutex_init(&qp_ctx
->req_lock
);
390 idr_init(&qp_ctx
->req_idr
);
391 INIT_LIST_HEAD(&qp_ctx
->backlog
);
393 qp_ctx
->c_in_pool
= hisi_acc_create_sgl_pool(dev
, QM_Q_DEPTH
,
395 if (IS_ERR(qp_ctx
->c_in_pool
)) {
396 dev_err(dev
, "fail to create sgl pool for input!\n");
397 goto err_destroy_idr
;
400 qp_ctx
->c_out_pool
= hisi_acc_create_sgl_pool(dev
, QM_Q_DEPTH
,
402 if (IS_ERR(qp_ctx
->c_out_pool
)) {
403 dev_err(dev
, "fail to create sgl pool for output!\n");
404 goto err_free_c_in_pool
;
407 ret
= sec_alg_resource_alloc(ctx
, qp_ctx
);
409 goto err_free_c_out_pool
;
411 ret
= hisi_qm_start_qp(qp
, 0);
418 sec_alg_resource_free(ctx
, qp_ctx
);
420 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_out_pool
);
422 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_in_pool
);
424 idr_destroy(&qp_ctx
->req_idr
);
428 static void sec_release_qp_ctx(struct sec_ctx
*ctx
,
429 struct sec_qp_ctx
*qp_ctx
)
431 struct device
*dev
= SEC_CTX_DEV(ctx
);
433 hisi_qm_stop_qp(qp_ctx
->qp
);
434 sec_alg_resource_free(ctx
, qp_ctx
);
436 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_out_pool
);
437 hisi_acc_free_sgl_pool(dev
, qp_ctx
->c_in_pool
);
439 idr_destroy(&qp_ctx
->req_idr
);
442 static int sec_ctx_base_init(struct sec_ctx
*ctx
)
447 ctx
->qps
= sec_create_qps();
449 pr_err("Can not create sec qps!\n");
453 sec
= container_of(ctx
->qps
[0]->qm
, struct sec_dev
, qm
);
455 ctx
->hlf_q_num
= sec
->ctx_q_num
>> 1;
457 ctx
->pbuf_supported
= ctx
->sec
->iommu_used
;
459 /* Half of queue depth is taken as fake requests limit in the queue. */
460 ctx
->fake_req_limit
= QM_Q_DEPTH
>> 1;
461 ctx
->qp_ctx
= kcalloc(sec
->ctx_q_num
, sizeof(struct sec_qp_ctx
),
465 goto err_destroy_qps
;
468 for (i
= 0; i
< sec
->ctx_q_num
; i
++) {
469 ret
= sec_create_qp_ctx(&sec
->qm
, ctx
, i
, 0);
471 goto err_sec_release_qp_ctx
;
476 err_sec_release_qp_ctx
:
477 for (i
= i
- 1; i
>= 0; i
--)
478 sec_release_qp_ctx(ctx
, &ctx
->qp_ctx
[i
]);
482 sec_destroy_qps(ctx
->qps
, sec
->ctx_q_num
);
487 static void sec_ctx_base_uninit(struct sec_ctx
*ctx
)
491 for (i
= 0; i
< ctx
->sec
->ctx_q_num
; i
++)
492 sec_release_qp_ctx(ctx
, &ctx
->qp_ctx
[i
]);
494 sec_destroy_qps(ctx
->qps
, ctx
->sec
->ctx_q_num
);
498 static int sec_cipher_init(struct sec_ctx
*ctx
)
500 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
502 c_ctx
->c_key
= dma_alloc_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
503 &c_ctx
->c_key_dma
, GFP_KERNEL
);
510 static void sec_cipher_uninit(struct sec_ctx
*ctx
)
512 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
514 memzero_explicit(c_ctx
->c_key
, SEC_MAX_KEY_SIZE
);
515 dma_free_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
516 c_ctx
->c_key
, c_ctx
->c_key_dma
);
519 static int sec_auth_init(struct sec_ctx
*ctx
)
521 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
523 a_ctx
->a_key
= dma_alloc_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
524 &a_ctx
->a_key_dma
, GFP_KERNEL
);
531 static void sec_auth_uninit(struct sec_ctx
*ctx
)
533 struct sec_auth_ctx
*a_ctx
= &ctx
->a_ctx
;
535 memzero_explicit(a_ctx
->a_key
, SEC_MAX_KEY_SIZE
);
536 dma_free_coherent(SEC_CTX_DEV(ctx
), SEC_MAX_KEY_SIZE
,
537 a_ctx
->a_key
, a_ctx
->a_key_dma
);
540 static int sec_skcipher_init(struct crypto_skcipher
*tfm
)
542 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
545 ctx
->alg_type
= SEC_SKCIPHER
;
546 crypto_skcipher_set_reqsize(tfm
, sizeof(struct sec_req
));
547 ctx
->c_ctx
.ivsize
= crypto_skcipher_ivsize(tfm
);
548 if (ctx
->c_ctx
.ivsize
> SEC_IV_SIZE
) {
549 dev_err(SEC_CTX_DEV(ctx
), "get error skcipher iv size!\n");
553 ret
= sec_ctx_base_init(ctx
);
557 ret
= sec_cipher_init(ctx
);
559 goto err_cipher_init
;
564 sec_ctx_base_uninit(ctx
);
568 static void sec_skcipher_uninit(struct crypto_skcipher
*tfm
)
570 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
572 sec_cipher_uninit(ctx
);
573 sec_ctx_base_uninit(ctx
);
576 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx
*c_ctx
,
578 const enum sec_cmode c_mode
)
581 case SEC_DES3_2KEY_SIZE
:
582 c_ctx
->c_key_len
= SEC_CKEY_3DES_2KEY
;
584 case SEC_DES3_3KEY_SIZE
:
585 c_ctx
->c_key_len
= SEC_CKEY_3DES_3KEY
;
594 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx
*c_ctx
,
596 const enum sec_cmode c_mode
)
598 if (c_mode
== SEC_CMODE_XTS
) {
600 case SEC_XTS_MIN_KEY_SIZE
:
601 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
603 case SEC_XTS_MAX_KEY_SIZE
:
604 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
607 pr_err("hisi_sec2: xts mode key error!\n");
612 case AES_KEYSIZE_128
:
613 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
615 case AES_KEYSIZE_192
:
616 c_ctx
->c_key_len
= SEC_CKEY_192BIT
;
618 case AES_KEYSIZE_256
:
619 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
622 pr_err("hisi_sec2: aes key error!\n");
630 static int sec_skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
631 const u32 keylen
, const enum sec_calg c_alg
,
632 const enum sec_cmode c_mode
)
634 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
635 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
638 if (c_mode
== SEC_CMODE_XTS
) {
639 ret
= xts_verify_key(tfm
, key
, keylen
);
641 dev_err(SEC_CTX_DEV(ctx
), "xts mode key err!\n");
646 c_ctx
->c_alg
= c_alg
;
647 c_ctx
->c_mode
= c_mode
;
651 ret
= sec_skcipher_3des_setkey(c_ctx
, keylen
, c_mode
);
655 ret
= sec_skcipher_aes_sm4_setkey(c_ctx
, keylen
, c_mode
);
662 dev_err(SEC_CTX_DEV(ctx
), "set sec key err!\n");
666 memcpy(c_ctx
->c_key
, key
, keylen
);
671 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
672 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
675 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
678 GEN_SEC_SETKEY_FUNC(aes_ecb
, SEC_CALG_AES
, SEC_CMODE_ECB
)
679 GEN_SEC_SETKEY_FUNC(aes_cbc
, SEC_CALG_AES
, SEC_CMODE_CBC
)
680 GEN_SEC_SETKEY_FUNC(aes_xts
, SEC_CALG_AES
, SEC_CMODE_XTS
)
682 GEN_SEC_SETKEY_FUNC(3des_ecb
, SEC_CALG_3DES
, SEC_CMODE_ECB
)
683 GEN_SEC_SETKEY_FUNC(3des_cbc
, SEC_CALG_3DES
, SEC_CMODE_CBC
)
685 GEN_SEC_SETKEY_FUNC(sm4_xts
, SEC_CALG_SM4
, SEC_CMODE_XTS
)
686 GEN_SEC_SETKEY_FUNC(sm4_cbc
, SEC_CALG_SM4
, SEC_CMODE_CBC
)
688 static int sec_cipher_pbuf_map(struct sec_ctx
*ctx
, struct sec_req
*req
,
689 struct scatterlist
*src
)
691 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
692 struct sec_cipher_req
*c_req
= &req
->c_req
;
693 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
694 struct device
*dev
= SEC_CTX_DEV(ctx
);
695 int copy_size
, pbuf_length
;
696 int req_id
= req
->req_id
;
698 if (ctx
->alg_type
== SEC_AEAD
)
699 copy_size
= aead_req
->cryptlen
+ aead_req
->assoclen
;
701 copy_size
= c_req
->c_len
;
703 pbuf_length
= sg_copy_to_buffer(src
, sg_nents(src
),
704 qp_ctx
->res
[req_id
].pbuf
,
707 if (unlikely(pbuf_length
!= copy_size
)) {
708 dev_err(dev
, "copy src data to pbuf error!\n");
712 c_req
->c_in_dma
= qp_ctx
->res
[req_id
].pbuf_dma
;
714 if (!c_req
->c_in_dma
) {
715 dev_err(dev
, "fail to set pbuffer address!\n");
719 c_req
->c_out_dma
= c_req
->c_in_dma
;
724 static void sec_cipher_pbuf_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
,
725 struct scatterlist
*dst
)
727 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
728 struct sec_cipher_req
*c_req
= &req
->c_req
;
729 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
730 struct device
*dev
= SEC_CTX_DEV(ctx
);
731 int copy_size
, pbuf_length
;
732 int req_id
= req
->req_id
;
734 if (ctx
->alg_type
== SEC_AEAD
)
735 copy_size
= c_req
->c_len
+ aead_req
->assoclen
;
737 copy_size
= c_req
->c_len
;
739 pbuf_length
= sg_copy_from_buffer(dst
, sg_nents(dst
),
740 qp_ctx
->res
[req_id
].pbuf
,
743 if (unlikely(pbuf_length
!= copy_size
))
744 dev_err(dev
, "copy pbuf data to dst error!\n");
747 static int sec_cipher_map(struct sec_ctx
*ctx
, struct sec_req
*req
,
748 struct scatterlist
*src
, struct scatterlist
*dst
)
750 struct sec_cipher_req
*c_req
= &req
->c_req
;
751 struct sec_aead_req
*a_req
= &req
->aead_req
;
752 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
753 struct sec_alg_res
*res
= &qp_ctx
->res
[req
->req_id
];
754 struct device
*dev
= SEC_CTX_DEV(ctx
);
758 ret
= sec_cipher_pbuf_map(ctx
, req
, src
);
759 c_req
->c_ivin
= res
->pbuf
+ SEC_PBUF_IV_OFFSET
;
760 c_req
->c_ivin_dma
= res
->pbuf_dma
+ SEC_PBUF_IV_OFFSET
;
761 if (ctx
->alg_type
== SEC_AEAD
) {
762 a_req
->out_mac
= res
->pbuf
+ SEC_PBUF_MAC_OFFSET
;
763 a_req
->out_mac_dma
= res
->pbuf_dma
+
769 c_req
->c_ivin
= res
->c_ivin
;
770 c_req
->c_ivin_dma
= res
->c_ivin_dma
;
771 if (ctx
->alg_type
== SEC_AEAD
) {
772 a_req
->out_mac
= res
->out_mac
;
773 a_req
->out_mac_dma
= res
->out_mac_dma
;
776 c_req
->c_in
= hisi_acc_sg_buf_map_to_hw_sgl(dev
, src
,
781 if (IS_ERR(c_req
->c_in
)) {
782 dev_err(dev
, "fail to dma map input sgl buffers!\n");
783 return PTR_ERR(c_req
->c_in
);
787 c_req
->c_out
= c_req
->c_in
;
788 c_req
->c_out_dma
= c_req
->c_in_dma
;
790 c_req
->c_out
= hisi_acc_sg_buf_map_to_hw_sgl(dev
, dst
,
795 if (IS_ERR(c_req
->c_out
)) {
796 dev_err(dev
, "fail to dma map output sgl buffers!\n");
797 hisi_acc_sg_buf_unmap(dev
, src
, c_req
->c_in
);
798 return PTR_ERR(c_req
->c_out
);
805 static void sec_cipher_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
,
806 struct scatterlist
*src
, struct scatterlist
*dst
)
808 struct sec_cipher_req
*c_req
= &req
->c_req
;
809 struct device
*dev
= SEC_CTX_DEV(ctx
);
812 sec_cipher_pbuf_unmap(ctx
, req
, dst
);
815 hisi_acc_sg_buf_unmap(dev
, src
, c_req
->c_in
);
817 hisi_acc_sg_buf_unmap(dev
, dst
, c_req
->c_out
);
821 static int sec_skcipher_sgl_map(struct sec_ctx
*ctx
, struct sec_req
*req
)
823 struct skcipher_request
*sq
= req
->c_req
.sk_req
;
825 return sec_cipher_map(ctx
, req
, sq
->src
, sq
->dst
);
828 static void sec_skcipher_sgl_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
)
830 struct skcipher_request
*sq
= req
->c_req
.sk_req
;
832 sec_cipher_unmap(ctx
, req
, sq
->src
, sq
->dst
);
835 static int sec_aead_aes_set_key(struct sec_cipher_ctx
*c_ctx
,
836 struct crypto_authenc_keys
*keys
)
838 switch (keys
->enckeylen
) {
839 case AES_KEYSIZE_128
:
840 c_ctx
->c_key_len
= SEC_CKEY_128BIT
;
842 case AES_KEYSIZE_192
:
843 c_ctx
->c_key_len
= SEC_CKEY_192BIT
;
845 case AES_KEYSIZE_256
:
846 c_ctx
->c_key_len
= SEC_CKEY_256BIT
;
849 pr_err("hisi_sec2: aead aes key error!\n");
852 memcpy(c_ctx
->c_key
, keys
->enckey
, keys
->enckeylen
);
857 static int sec_aead_auth_set_key(struct sec_auth_ctx
*ctx
,
858 struct crypto_authenc_keys
*keys
)
860 struct crypto_shash
*hash_tfm
= ctx
->hash_tfm
;
861 int blocksize
, digestsize
, ret
;
863 if (!keys
->authkeylen
) {
864 pr_err("hisi_sec2: aead auth key error!\n");
868 blocksize
= crypto_shash_blocksize(hash_tfm
);
869 digestsize
= crypto_shash_digestsize(hash_tfm
);
870 if (keys
->authkeylen
> blocksize
) {
871 ret
= crypto_shash_tfm_digest(hash_tfm
, keys
->authkey
,
872 keys
->authkeylen
, ctx
->a_key
);
874 pr_err("hisi_sec2: aead auth digest error!\n");
877 ctx
->a_key_len
= digestsize
;
879 memcpy(ctx
->a_key
, keys
->authkey
, keys
->authkeylen
);
880 ctx
->a_key_len
= keys
->authkeylen
;
886 static int sec_aead_setkey(struct crypto_aead
*tfm
, const u8
*key
,
887 const u32 keylen
, const enum sec_hash_alg a_alg
,
888 const enum sec_calg c_alg
,
889 const enum sec_mac_len mac_len
,
890 const enum sec_cmode c_mode
)
892 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
893 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
894 struct crypto_authenc_keys keys
;
897 ctx
->a_ctx
.a_alg
= a_alg
;
898 ctx
->c_ctx
.c_alg
= c_alg
;
899 ctx
->a_ctx
.mac_len
= mac_len
;
900 c_ctx
->c_mode
= c_mode
;
902 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
905 ret
= sec_aead_aes_set_key(c_ctx
, &keys
);
907 dev_err(SEC_CTX_DEV(ctx
), "set sec cipher key err!\n");
911 ret
= sec_aead_auth_set_key(&ctx
->a_ctx
, &keys
);
913 dev_err(SEC_CTX_DEV(ctx
), "set sec auth key err!\n");
920 memzero_explicit(&keys
, sizeof(struct crypto_authenc_keys
));
925 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
926 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
929 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
932 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1
, SEC_A_HMAC_SHA1
,
933 SEC_CALG_AES
, SEC_HMAC_SHA1_MAC
, SEC_CMODE_CBC
)
934 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256
, SEC_A_HMAC_SHA256
,
935 SEC_CALG_AES
, SEC_HMAC_SHA256_MAC
, SEC_CMODE_CBC
)
936 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512
, SEC_A_HMAC_SHA512
,
937 SEC_CALG_AES
, SEC_HMAC_SHA512_MAC
, SEC_CMODE_CBC
)
939 static int sec_aead_sgl_map(struct sec_ctx
*ctx
, struct sec_req
*req
)
941 struct aead_request
*aq
= req
->aead_req
.aead_req
;
943 return sec_cipher_map(ctx
, req
, aq
->src
, aq
->dst
);
946 static void sec_aead_sgl_unmap(struct sec_ctx
*ctx
, struct sec_req
*req
)
948 struct aead_request
*aq
= req
->aead_req
.aead_req
;
950 sec_cipher_unmap(ctx
, req
, aq
->src
, aq
->dst
);
953 static int sec_request_transfer(struct sec_ctx
*ctx
, struct sec_req
*req
)
957 ret
= ctx
->req_op
->buf_map(ctx
, req
);
961 ctx
->req_op
->do_transfer(ctx
, req
);
963 ret
= ctx
->req_op
->bd_fill(ctx
, req
);
970 ctx
->req_op
->buf_unmap(ctx
, req
);
974 static void sec_request_untransfer(struct sec_ctx
*ctx
, struct sec_req
*req
)
976 ctx
->req_op
->buf_unmap(ctx
, req
);
979 static void sec_skcipher_copy_iv(struct sec_ctx
*ctx
, struct sec_req
*req
)
981 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
982 struct sec_cipher_req
*c_req
= &req
->c_req
;
984 memcpy(c_req
->c_ivin
, sk_req
->iv
, ctx
->c_ctx
.ivsize
);
987 static int sec_skcipher_bd_fill(struct sec_ctx
*ctx
, struct sec_req
*req
)
989 struct sec_cipher_ctx
*c_ctx
= &ctx
->c_ctx
;
990 struct sec_cipher_req
*c_req
= &req
->c_req
;
991 struct sec_sqe
*sec_sqe
= &req
->sec_sqe
;
992 u8 scene
, sa_type
, da_type
;
996 memset(sec_sqe
, 0, sizeof(struct sec_sqe
));
998 sec_sqe
->type2
.c_key_addr
= cpu_to_le64(c_ctx
->c_key_dma
);
999 sec_sqe
->type2
.c_ivin_addr
= cpu_to_le64(c_req
->c_ivin_dma
);
1000 sec_sqe
->type2
.data_src_addr
= cpu_to_le64(c_req
->c_in_dma
);
1001 sec_sqe
->type2
.data_dst_addr
= cpu_to_le64(c_req
->c_out_dma
);
1003 sec_sqe
->type2
.icvw_kmode
|= cpu_to_le16(((u16
)c_ctx
->c_mode
) <<
1005 sec_sqe
->type2
.c_alg
= c_ctx
->c_alg
;
1006 sec_sqe
->type2
.icvw_kmode
|= cpu_to_le16(((u16
)c_ctx
->c_key_len
) <<
1009 bd_type
= SEC_BD_TYPE2
;
1011 cipher
= SEC_CIPHER_ENC
<< SEC_CIPHER_OFFSET
;
1013 cipher
= SEC_CIPHER_DEC
<< SEC_CIPHER_OFFSET
;
1014 sec_sqe
->type_cipher_auth
= bd_type
| cipher
;
1017 sa_type
= SEC_PBUF
<< SEC_SRC_SGL_OFFSET
;
1019 sa_type
= SEC_SGL
<< SEC_SRC_SGL_OFFSET
;
1020 scene
= SEC_COMM_SCENE
<< SEC_SCENE_OFFSET
;
1021 if (c_req
->c_in_dma
!= c_req
->c_out_dma
)
1022 de
= 0x1 << SEC_DE_OFFSET
;
1024 sec_sqe
->sds_sa_type
= (de
| scene
| sa_type
);
1026 /* Just set DST address type */
1028 da_type
= SEC_PBUF
<< SEC_DST_SGL_OFFSET
;
1030 da_type
= SEC_SGL
<< SEC_DST_SGL_OFFSET
;
1031 sec_sqe
->sdm_addr_type
|= da_type
;
1033 sec_sqe
->type2
.clen_ivhlen
|= cpu_to_le32(c_req
->c_len
);
1034 sec_sqe
->type2
.tag
= cpu_to_le16((u16
)req
->req_id
);
1039 static void sec_update_iv(struct sec_req
*req
, enum sec_alg_type alg_type
)
1041 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
1042 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
1043 u32 iv_size
= req
->ctx
->c_ctx
.ivsize
;
1044 struct scatterlist
*sgl
;
1045 unsigned int cryptlen
;
1049 if (req
->c_req
.encrypt
)
1050 sgl
= alg_type
== SEC_SKCIPHER
? sk_req
->dst
: aead_req
->dst
;
1052 sgl
= alg_type
== SEC_SKCIPHER
? sk_req
->src
: aead_req
->src
;
1054 if (alg_type
== SEC_SKCIPHER
) {
1056 cryptlen
= sk_req
->cryptlen
;
1059 cryptlen
= aead_req
->cryptlen
;
1062 sz
= sg_pcopy_to_buffer(sgl
, sg_nents(sgl
), iv
, iv_size
,
1063 cryptlen
- iv_size
);
1064 if (unlikely(sz
!= iv_size
))
1065 dev_err(SEC_CTX_DEV(req
->ctx
), "copy output iv error!\n");
1068 static struct sec_req
*sec_back_req_clear(struct sec_ctx
*ctx
,
1069 struct sec_qp_ctx
*qp_ctx
)
1071 struct sec_req
*backlog_req
= NULL
;
1073 mutex_lock(&qp_ctx
->req_lock
);
1074 if (ctx
->fake_req_limit
>=
1075 atomic_read(&qp_ctx
->qp
->qp_status
.used
) &&
1076 !list_empty(&qp_ctx
->backlog
)) {
1077 backlog_req
= list_first_entry(&qp_ctx
->backlog
,
1078 typeof(*backlog_req
), backlog_head
);
1079 list_del(&backlog_req
->backlog_head
);
1081 mutex_unlock(&qp_ctx
->req_lock
);
1086 static void sec_skcipher_callback(struct sec_ctx
*ctx
, struct sec_req
*req
,
1089 struct skcipher_request
*sk_req
= req
->c_req
.sk_req
;
1090 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
1091 struct skcipher_request
*backlog_sk_req
;
1092 struct sec_req
*backlog_req
;
1094 sec_free_req_id(req
);
1096 /* IV output at encrypto of CBC mode */
1097 if (!err
&& ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& req
->c_req
.encrypt
)
1098 sec_update_iv(req
, SEC_SKCIPHER
);
1101 backlog_req
= sec_back_req_clear(ctx
, qp_ctx
);
1105 backlog_sk_req
= backlog_req
->c_req
.sk_req
;
1106 backlog_sk_req
->base
.complete(&backlog_sk_req
->base
,
1108 atomic64_inc(&ctx
->sec
->debug
.dfx
.recv_busy_cnt
);
1111 sk_req
->base
.complete(&sk_req
->base
, err
);
1114 static void sec_aead_copy_iv(struct sec_ctx
*ctx
, struct sec_req
*req
)
1116 struct aead_request
*aead_req
= req
->aead_req
.aead_req
;
1117 struct sec_cipher_req
*c_req
= &req
->c_req
;
1119 memcpy(c_req
->c_ivin
, aead_req
->iv
, ctx
->c_ctx
.ivsize
);
1122 static void sec_auth_bd_fill_ex(struct sec_auth_ctx
*ctx
, int dir
,
1123 struct sec_req
*req
, struct sec_sqe
*sec_sqe
)
1125 struct sec_aead_req
*a_req
= &req
->aead_req
;
1126 struct sec_cipher_req
*c_req
= &req
->c_req
;
1127 struct aead_request
*aq
= a_req
->aead_req
;
1129 sec_sqe
->type2
.a_key_addr
= cpu_to_le64(ctx
->a_key_dma
);
1131 sec_sqe
->type2
.mac_key_alg
=
1132 cpu_to_le32(ctx
->mac_len
/ SEC_SQE_LEN_RATE
);
1134 sec_sqe
->type2
.mac_key_alg
|=
1135 cpu_to_le32((u32
)((ctx
->a_key_len
) /
1136 SEC_SQE_LEN_RATE
) << SEC_AKEY_OFFSET
);
1138 sec_sqe
->type2
.mac_key_alg
|=
1139 cpu_to_le32((u32
)(ctx
->a_alg
) << SEC_AEAD_ALG_OFFSET
);
1141 sec_sqe
->type_cipher_auth
|= SEC_AUTH_TYPE1
<< SEC_AUTH_OFFSET
;
1144 sec_sqe
->sds_sa_type
&= SEC_CIPHER_AUTH
;
1146 sec_sqe
->sds_sa_type
|= SEC_AUTH_CIPHER
;
1148 sec_sqe
->type2
.alen_ivllen
= cpu_to_le32(c_req
->c_len
+ aq
->assoclen
);
1150 sec_sqe
->type2
.cipher_src_offset
= cpu_to_le16((u16
)aq
->assoclen
);
1152 sec_sqe
->type2
.mac_addr
= cpu_to_le64(a_req
->out_mac_dma
);
1155 static int sec_aead_bd_fill(struct sec_ctx
*ctx
, struct sec_req
*req
)
1157 struct sec_auth_ctx
*auth_ctx
= &ctx
->a_ctx
;
1158 struct sec_sqe
*sec_sqe
= &req
->sec_sqe
;
1161 ret
= sec_skcipher_bd_fill(ctx
, req
);
1162 if (unlikely(ret
)) {
1163 dev_err(SEC_CTX_DEV(ctx
), "skcipher bd fill is error!\n");
1167 sec_auth_bd_fill_ex(auth_ctx
, req
->c_req
.encrypt
, req
, sec_sqe
);
1172 static void sec_aead_callback(struct sec_ctx
*c
, struct sec_req
*req
, int err
)
1174 struct aead_request
*a_req
= req
->aead_req
.aead_req
;
1175 struct crypto_aead
*tfm
= crypto_aead_reqtfm(a_req
);
1176 struct sec_aead_req
*aead_req
= &req
->aead_req
;
1177 struct sec_cipher_req
*c_req
= &req
->c_req
;
1178 size_t authsize
= crypto_aead_authsize(tfm
);
1179 struct sec_qp_ctx
*qp_ctx
= req
->qp_ctx
;
1180 struct aead_request
*backlog_aead_req
;
1181 struct sec_req
*backlog_req
;
1184 if (!err
&& c
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& c_req
->encrypt
)
1185 sec_update_iv(req
, SEC_AEAD
);
1187 /* Copy output mac */
1188 if (!err
&& c_req
->encrypt
) {
1189 struct scatterlist
*sgl
= a_req
->dst
;
1191 sz
= sg_pcopy_from_buffer(sgl
, sg_nents(sgl
),
1193 authsize
, a_req
->cryptlen
+
1196 if (unlikely(sz
!= authsize
)) {
1197 dev_err(SEC_CTX_DEV(req
->ctx
), "copy out mac err!\n");
1202 sec_free_req_id(req
);
1205 backlog_req
= sec_back_req_clear(c
, qp_ctx
);
1209 backlog_aead_req
= backlog_req
->aead_req
.aead_req
;
1210 backlog_aead_req
->base
.complete(&backlog_aead_req
->base
,
1212 atomic64_inc(&c
->sec
->debug
.dfx
.recv_busy_cnt
);
1215 a_req
->base
.complete(&a_req
->base
, err
);
1218 static void sec_request_uninit(struct sec_ctx
*ctx
, struct sec_req
*req
)
1220 sec_free_req_id(req
);
1221 sec_free_queue_id(ctx
, req
);
1224 static int sec_request_init(struct sec_ctx
*ctx
, struct sec_req
*req
)
1226 struct sec_qp_ctx
*qp_ctx
;
1229 /* To load balance */
1230 queue_id
= sec_alloc_queue_id(ctx
, req
);
1231 qp_ctx
= &ctx
->qp_ctx
[queue_id
];
1233 req
->req_id
= sec_alloc_req_id(req
, qp_ctx
);
1234 if (unlikely(req
->req_id
< 0)) {
1235 sec_free_queue_id(ctx
, req
);
1242 static int sec_process(struct sec_ctx
*ctx
, struct sec_req
*req
)
1244 struct sec_cipher_req
*c_req
= &req
->c_req
;
1247 ret
= sec_request_init(ctx
, req
);
1251 ret
= sec_request_transfer(ctx
, req
);
1253 goto err_uninit_req
;
1255 /* Output IV as decrypto */
1256 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& !req
->c_req
.encrypt
)
1257 sec_update_iv(req
, ctx
->alg_type
);
1259 ret
= ctx
->req_op
->bd_send(ctx
, req
);
1260 if (unlikely((ret
!= -EBUSY
&& ret
!= -EINPROGRESS
) ||
1261 (ret
== -EBUSY
&& !(req
->flag
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1262 dev_err_ratelimited(SEC_CTX_DEV(ctx
), "send sec request failed!\n");
1269 /* As failing, restore the IV from user */
1270 if (ctx
->c_ctx
.c_mode
== SEC_CMODE_CBC
&& !req
->c_req
.encrypt
) {
1271 if (ctx
->alg_type
== SEC_SKCIPHER
)
1272 memcpy(req
->c_req
.sk_req
->iv
, c_req
->c_ivin
,
1275 memcpy(req
->aead_req
.aead_req
->iv
, c_req
->c_ivin
,
1279 sec_request_untransfer(ctx
, req
);
1281 sec_request_uninit(ctx
, req
);
1285 static const struct sec_req_op sec_skcipher_req_ops
= {
1286 .buf_map
= sec_skcipher_sgl_map
,
1287 .buf_unmap
= sec_skcipher_sgl_unmap
,
1288 .do_transfer
= sec_skcipher_copy_iv
,
1289 .bd_fill
= sec_skcipher_bd_fill
,
1290 .bd_send
= sec_bd_send
,
1291 .callback
= sec_skcipher_callback
,
1292 .process
= sec_process
,
1295 static const struct sec_req_op sec_aead_req_ops
= {
1296 .buf_map
= sec_aead_sgl_map
,
1297 .buf_unmap
= sec_aead_sgl_unmap
,
1298 .do_transfer
= sec_aead_copy_iv
,
1299 .bd_fill
= sec_aead_bd_fill
,
1300 .bd_send
= sec_bd_send
,
1301 .callback
= sec_aead_callback
,
1302 .process
= sec_process
,
1305 static int sec_skcipher_ctx_init(struct crypto_skcipher
*tfm
)
1307 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1309 ctx
->req_op
= &sec_skcipher_req_ops
;
1311 return sec_skcipher_init(tfm
);
1314 static void sec_skcipher_ctx_exit(struct crypto_skcipher
*tfm
)
1316 sec_skcipher_uninit(tfm
);
1319 static int sec_aead_init(struct crypto_aead
*tfm
)
1321 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1324 crypto_aead_set_reqsize(tfm
, sizeof(struct sec_req
));
1325 ctx
->alg_type
= SEC_AEAD
;
1326 ctx
->c_ctx
.ivsize
= crypto_aead_ivsize(tfm
);
1327 if (ctx
->c_ctx
.ivsize
> SEC_IV_SIZE
) {
1328 dev_err(SEC_CTX_DEV(ctx
), "get error aead iv size!\n");
1332 ctx
->req_op
= &sec_aead_req_ops
;
1333 ret
= sec_ctx_base_init(ctx
);
1337 ret
= sec_auth_init(ctx
);
1341 ret
= sec_cipher_init(ctx
);
1343 goto err_cipher_init
;
1348 sec_auth_uninit(ctx
);
1350 sec_ctx_base_uninit(ctx
);
1354 static void sec_aead_exit(struct crypto_aead
*tfm
)
1356 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1358 sec_cipher_uninit(ctx
);
1359 sec_auth_uninit(ctx
);
1360 sec_ctx_base_uninit(ctx
);
1363 static int sec_aead_ctx_init(struct crypto_aead
*tfm
, const char *hash_name
)
1365 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1366 struct sec_auth_ctx
*auth_ctx
= &ctx
->a_ctx
;
1369 ret
= sec_aead_init(tfm
);
1371 pr_err("hisi_sec2: aead init error!\n");
1375 auth_ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1376 if (IS_ERR(auth_ctx
->hash_tfm
)) {
1377 dev_err(SEC_CTX_DEV(ctx
), "aead alloc shash error!\n");
1379 return PTR_ERR(auth_ctx
->hash_tfm
);
1385 static void sec_aead_ctx_exit(struct crypto_aead
*tfm
)
1387 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1389 crypto_free_shash(ctx
->a_ctx
.hash_tfm
);
1393 static int sec_aead_sha1_ctx_init(struct crypto_aead
*tfm
)
1395 return sec_aead_ctx_init(tfm
, "sha1");
1398 static int sec_aead_sha256_ctx_init(struct crypto_aead
*tfm
)
1400 return sec_aead_ctx_init(tfm
, "sha256");
1403 static int sec_aead_sha512_ctx_init(struct crypto_aead
*tfm
)
1405 return sec_aead_ctx_init(tfm
, "sha512");
1408 static int sec_skcipher_param_check(struct sec_ctx
*ctx
, struct sec_req
*sreq
)
1410 struct skcipher_request
*sk_req
= sreq
->c_req
.sk_req
;
1411 struct device
*dev
= SEC_CTX_DEV(ctx
);
1412 u8 c_alg
= ctx
->c_ctx
.c_alg
;
1414 if (unlikely(!sk_req
->src
|| !sk_req
->dst
)) {
1415 dev_err(dev
, "skcipher input param error!\n");
1418 sreq
->c_req
.c_len
= sk_req
->cryptlen
;
1420 if (ctx
->pbuf_supported
&& sk_req
->cryptlen
<= SEC_PBUF_SZ
)
1421 sreq
->use_pbuf
= true;
1423 sreq
->use_pbuf
= false;
1425 if (c_alg
== SEC_CALG_3DES
) {
1426 if (unlikely(sk_req
->cryptlen
& (DES3_EDE_BLOCK_SIZE
- 1))) {
1427 dev_err(dev
, "skcipher 3des input length error!\n");
1431 } else if (c_alg
== SEC_CALG_AES
|| c_alg
== SEC_CALG_SM4
) {
1432 if (unlikely(sk_req
->cryptlen
& (AES_BLOCK_SIZE
- 1))) {
1433 dev_err(dev
, "skcipher aes input length error!\n");
1438 dev_err(dev
, "skcipher algorithm error!\n");
1443 static int sec_skcipher_crypto(struct skcipher_request
*sk_req
, bool encrypt
)
1445 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(sk_req
);
1446 struct sec_req
*req
= skcipher_request_ctx(sk_req
);
1447 struct sec_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1450 if (!sk_req
->cryptlen
)
1453 req
->flag
= sk_req
->base
.flags
;
1454 req
->c_req
.sk_req
= sk_req
;
1455 req
->c_req
.encrypt
= encrypt
;
1458 ret
= sec_skcipher_param_check(ctx
, req
);
1462 return ctx
->req_op
->process(ctx
, req
);
1465 static int sec_skcipher_encrypt(struct skcipher_request
*sk_req
)
1467 return sec_skcipher_crypto(sk_req
, true);
1470 static int sec_skcipher_decrypt(struct skcipher_request
*sk_req
)
1472 return sec_skcipher_crypto(sk_req
, false);
1475 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
1476 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
1479 .cra_name = sec_cra_name,\
1480 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1481 .cra_priority = SEC_PRIORITY,\
1482 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1483 .cra_blocksize = blk_size,\
1484 .cra_ctxsize = sizeof(struct sec_ctx),\
1485 .cra_module = THIS_MODULE,\
1489 .setkey = sec_set_key,\
1490 .decrypt = sec_skcipher_decrypt,\
1491 .encrypt = sec_skcipher_encrypt,\
1492 .min_keysize = sec_min_key_size,\
1493 .max_keysize = sec_max_key_size,\
1497 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
1498 max_key_size, blk_size, iv_size) \
1499 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
1500 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
1502 static struct skcipher_alg sec_skciphers
[] = {
1503 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb
,
1504 AES_MIN_KEY_SIZE
, AES_MAX_KEY_SIZE
,
1507 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc
,
1508 AES_MIN_KEY_SIZE
, AES_MAX_KEY_SIZE
,
1509 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1511 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts
,
1512 SEC_XTS_MIN_KEY_SIZE
, SEC_XTS_MAX_KEY_SIZE
,
1513 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1515 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb
,
1516 SEC_DES3_2KEY_SIZE
, SEC_DES3_3KEY_SIZE
,
1517 DES3_EDE_BLOCK_SIZE
, 0)
1519 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc
,
1520 SEC_DES3_2KEY_SIZE
, SEC_DES3_3KEY_SIZE
,
1521 DES3_EDE_BLOCK_SIZE
, DES3_EDE_BLOCK_SIZE
)
1523 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts
,
1524 SEC_XTS_MIN_KEY_SIZE
, SEC_XTS_MIN_KEY_SIZE
,
1525 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1527 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc
,
1528 AES_MIN_KEY_SIZE
, AES_MIN_KEY_SIZE
,
1529 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
)
1532 static int sec_aead_param_check(struct sec_ctx
*ctx
, struct sec_req
*sreq
)
1534 u8 c_alg
= ctx
->c_ctx
.c_alg
;
1535 struct aead_request
*req
= sreq
->aead_req
.aead_req
;
1536 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1537 size_t authsize
= crypto_aead_authsize(tfm
);
1539 if (unlikely(!req
->src
|| !req
->dst
|| !req
->cryptlen
||
1540 req
->assoclen
> SEC_MAX_AAD_LEN
)) {
1541 dev_err(SEC_CTX_DEV(ctx
), "aead input param error!\n");
1545 if (ctx
->pbuf_supported
&& (req
->cryptlen
+ req
->assoclen
) <=
1547 sreq
->use_pbuf
= true;
1549 sreq
->use_pbuf
= false;
1551 /* Support AES only */
1552 if (unlikely(c_alg
!= SEC_CALG_AES
)) {
1553 dev_err(SEC_CTX_DEV(ctx
), "aead crypto alg error!\n");
1556 if (sreq
->c_req
.encrypt
)
1557 sreq
->c_req
.c_len
= req
->cryptlen
;
1559 sreq
->c_req
.c_len
= req
->cryptlen
- authsize
;
1561 if (unlikely(sreq
->c_req
.c_len
& (AES_BLOCK_SIZE
- 1))) {
1562 dev_err(SEC_CTX_DEV(ctx
), "aead crypto length error!\n");
1569 static int sec_aead_crypto(struct aead_request
*a_req
, bool encrypt
)
1571 struct crypto_aead
*tfm
= crypto_aead_reqtfm(a_req
);
1572 struct sec_req
*req
= aead_request_ctx(a_req
);
1573 struct sec_ctx
*ctx
= crypto_aead_ctx(tfm
);
1576 req
->flag
= a_req
->base
.flags
;
1577 req
->aead_req
.aead_req
= a_req
;
1578 req
->c_req
.encrypt
= encrypt
;
1581 ret
= sec_aead_param_check(ctx
, req
);
1585 return ctx
->req_op
->process(ctx
, req
);
1588 static int sec_aead_encrypt(struct aead_request
*a_req
)
1590 return sec_aead_crypto(a_req
, true);
1593 static int sec_aead_decrypt(struct aead_request
*a_req
)
1595 return sec_aead_crypto(a_req
, false);
1598 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
1599 ctx_exit, blk_size, iv_size, max_authsize)\
1602 .cra_name = sec_cra_name,\
1603 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1604 .cra_priority = SEC_PRIORITY,\
1605 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1606 .cra_blocksize = blk_size,\
1607 .cra_ctxsize = sizeof(struct sec_ctx),\
1608 .cra_module = THIS_MODULE,\
1612 .setkey = sec_set_key,\
1613 .decrypt = sec_aead_decrypt,\
1614 .encrypt = sec_aead_encrypt,\
1616 .maxauthsize = max_authsize,\
1619 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
1620 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
1621 sec_aead_ctx_exit, blksize, ivsize, authsize)
1623 static struct aead_alg sec_aeads
[] = {
1624 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
1625 sec_setkey_aes_cbc_sha1
, sec_aead_sha1_ctx_init
,
1626 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
, SHA1_DIGEST_SIZE
),
1628 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
1629 sec_setkey_aes_cbc_sha256
, sec_aead_sha256_ctx_init
,
1630 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
, SHA256_DIGEST_SIZE
),
1632 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
1633 sec_setkey_aes_cbc_sha512
, sec_aead_sha512_ctx_init
,
1634 AES_BLOCK_SIZE
, AES_BLOCK_SIZE
, SHA512_DIGEST_SIZE
),
1637 int sec_register_to_crypto(void)
1641 /* To avoid repeat register */
1642 ret
= crypto_register_skciphers(sec_skciphers
,
1643 ARRAY_SIZE(sec_skciphers
));
1647 ret
= crypto_register_aeads(sec_aeads
, ARRAY_SIZE(sec_aeads
));
1649 crypto_unregister_skciphers(sec_skciphers
,
1650 ARRAY_SIZE(sec_skciphers
));
1654 void sec_unregister_from_crypto(void)
1656 crypto_unregister_skciphers(sec_skciphers
,
1657 ARRAY_SIZE(sec_skciphers
));
1658 crypto_unregister_aeads(sec_aeads
, ARRAY_SIZE(sec_aeads
));