1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/curve25519.h>
6 #include <crypto/ecc_curve.h>
7 #include <crypto/ecdh.h>
8 #include <crypto/rng.h>
9 #include <crypto/internal/akcipher.h>
10 #include <crypto/internal/kpp.h>
11 #include <crypto/internal/rsa.h>
12 #include <crypto/kpp.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/fips.h>
16 #include <linux/module.h>
17 #include <linux/time.h>
22 #define HPRE_CRYPTO_ALG_PRI 1000
23 #define HPRE_ALIGN_SZ 64
24 #define HPRE_BITS_2_BYTES_SHIFT 3
25 #define HPRE_RSA_512BITS_KSZ 64
26 #define HPRE_RSA_1536BITS_KSZ 192
27 #define HPRE_CRT_PRMS 5
30 #define HPRE_CRT_INV 4
31 #define HPRE_DH_G_FLAG 0x02
32 #define HPRE_TRY_SEND_TIMES 100
33 #define HPRE_INVLD_REQ_ID (-1)
35 #define HPRE_SQE_ALG_BITS 5
36 #define HPRE_SQE_DONE_SHIFT 30
37 #define HPRE_DH_MAX_P_SZ 512
39 #define HPRE_DFX_SEC_TO_US 1000000
40 #define HPRE_DFX_US_TO_NS 1000
42 /* due to nist p521 */
43 #define HPRE_ECC_MAX_KSZ 66
45 /* size in bytes of the n prime */
46 #define HPRE_ECC_NIST_P192_N_SIZE 24
47 #define HPRE_ECC_NIST_P256_N_SIZE 32
48 #define HPRE_ECC_NIST_P384_N_SIZE 48
51 #define HPRE_ECC_HW256_KSZ_B 32
52 #define HPRE_ECC_HW384_KSZ_B 48
54 /* capability register mask of driver */
55 #define HPRE_DRV_RSA_MASK_CAP BIT(0)
56 #define HPRE_DRV_DH_MASK_CAP BIT(1)
57 #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
58 #define HPRE_DRV_X25519_MASK_CAP BIT(5)
60 static DEFINE_MUTEX(hpre_algs_lock
);
61 static unsigned int hpre_available_devs
;
63 typedef void (*hpre_cb
)(struct hpre_ctx
*ctx
, void *sqe
);
66 /* low address: e--->n */
68 dma_addr_t dma_pubkey
;
70 /* low address: d--->n */
72 dma_addr_t dma_prikey
;
74 /* low address: dq->dp->q->p->qinv */
76 dma_addr_t dma_crt_prikey
;
78 struct crypto_akcipher
*soft_tfm
;
83 * If base is g we compute the public key
84 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
85 * else if base if the counterpart public key we
86 * compute the shared secret
87 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
88 * low address: d--->n, please refer to Hisilicon HPRE UM
97 struct hpre_ecdh_ctx
{
98 /* low address: p->a->k->b */
102 /* low address: x->y */
107 struct hpre_curve25519_ctx
{
108 /* low address: p->a->k */
120 struct hpre_asym_request
**req_list
;
127 struct hpre_rsa_ctx rsa
;
128 struct hpre_dh_ctx dh
;
129 struct hpre_ecdh_ctx ecdh
;
130 struct hpre_curve25519_ctx curve25519
;
132 /* for ecc algorithms */
133 unsigned int curve_id
;
136 struct hpre_asym_request
{
140 struct hpre_ctx
*ctx
;
142 struct akcipher_request
*rsa
;
143 struct kpp_request
*dh
;
144 struct kpp_request
*ecdh
;
145 struct kpp_request
*curve25519
;
150 struct timespec64 req_time
;
153 static inline unsigned int hpre_align_sz(void)
155 return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ
- 1)) + 1;
158 static inline unsigned int hpre_align_pd(void)
160 return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
163 static int hpre_alloc_req_id(struct hpre_ctx
*ctx
)
168 spin_lock_irqsave(&ctx
->req_lock
, flags
);
169 id
= idr_alloc(&ctx
->req_idr
, NULL
, 0, ctx
->qp
->sq_depth
, GFP_ATOMIC
);
170 spin_unlock_irqrestore(&ctx
->req_lock
, flags
);
175 static void hpre_free_req_id(struct hpre_ctx
*ctx
, int req_id
)
179 spin_lock_irqsave(&ctx
->req_lock
, flags
);
180 idr_remove(&ctx
->req_idr
, req_id
);
181 spin_unlock_irqrestore(&ctx
->req_lock
, flags
);
184 static int hpre_add_req_to_ctx(struct hpre_asym_request
*hpre_req
)
186 struct hpre_ctx
*ctx
;
187 struct hpre_dfx
*dfx
;
191 id
= hpre_alloc_req_id(ctx
);
192 if (unlikely(id
< 0))
195 ctx
->req_list
[id
] = hpre_req
;
196 hpre_req
->req_id
= id
;
198 dfx
= ctx
->hpre
->debug
.dfx
;
199 if (atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
))
200 ktime_get_ts64(&hpre_req
->req_time
);
205 static void hpre_rm_req_from_ctx(struct hpre_asym_request
*hpre_req
)
207 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
208 int id
= hpre_req
->req_id
;
210 if (hpre_req
->req_id
>= 0) {
211 hpre_req
->req_id
= HPRE_INVLD_REQ_ID
;
212 ctx
->req_list
[id
] = NULL
;
213 hpre_free_req_id(ctx
, id
);
217 static struct hisi_qp
*hpre_get_qp_and_start(u8 type
)
222 qp
= hpre_create_qp(type
);
224 pr_err("Can not create hpre qp!\n");
225 return ERR_PTR(-ENODEV
);
228 ret
= hisi_qm_start_qp(qp
, 0);
230 hisi_qm_free_qps(&qp
, 1);
231 pci_err(qp
->qm
->pdev
, "Can not start qp!\n");
232 return ERR_PTR(-EINVAL
);
238 static int hpre_get_data_dma_addr(struct hpre_asym_request
*hpre_req
,
239 struct scatterlist
*data
, unsigned int len
,
240 int is_src
, dma_addr_t
*tmp
)
242 struct device
*dev
= hpre_req
->ctx
->dev
;
243 enum dma_data_direction dma_dir
;
246 hpre_req
->src
= NULL
;
247 dma_dir
= DMA_TO_DEVICE
;
249 hpre_req
->dst
= NULL
;
250 dma_dir
= DMA_FROM_DEVICE
;
252 *tmp
= dma_map_single(dev
, sg_virt(data
), len
, dma_dir
);
253 if (unlikely(dma_mapping_error(dev
, *tmp
))) {
254 dev_err(dev
, "dma map data err!\n");
261 static int hpre_prepare_dma_buf(struct hpre_asym_request
*hpre_req
,
262 struct scatterlist
*data
, unsigned int len
,
263 int is_src
, dma_addr_t
*tmp
)
265 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
266 struct device
*dev
= ctx
->dev
;
270 shift
= ctx
->key_sz
- len
;
271 if (unlikely(shift
< 0))
274 ptr
= dma_alloc_coherent(dev
, ctx
->key_sz
, tmp
, GFP_ATOMIC
);
279 scatterwalk_map_and_copy(ptr
+ shift
, data
, 0, len
, 0);
288 static int hpre_hw_data_init(struct hpre_asym_request
*hpre_req
,
289 struct scatterlist
*data
, unsigned int len
,
290 int is_src
, int is_dh
)
292 struct hpre_sqe
*msg
= &hpre_req
->req
;
293 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
297 /* when the data is dh's source, we should format it */
298 if ((sg_is_last(data
) && len
== ctx
->key_sz
) &&
299 ((is_dh
&& !is_src
) || !is_dh
))
300 ret
= hpre_get_data_dma_addr(hpre_req
, data
, len
, is_src
, &tmp
);
302 ret
= hpre_prepare_dma_buf(hpre_req
, data
, len
, is_src
, &tmp
);
308 msg
->in
= cpu_to_le64(tmp
);
310 msg
->out
= cpu_to_le64(tmp
);
315 static void hpre_hw_data_clr_all(struct hpre_ctx
*ctx
,
316 struct hpre_asym_request
*req
,
317 struct scatterlist
*dst
,
318 struct scatterlist
*src
)
320 struct device
*dev
= ctx
->dev
;
321 struct hpre_sqe
*sqe
= &req
->req
;
324 tmp
= le64_to_cpu(sqe
->in
);
325 if (unlikely(dma_mapping_error(dev
, tmp
)))
330 dma_free_coherent(dev
, ctx
->key_sz
, req
->src
, tmp
);
332 dma_unmap_single(dev
, tmp
, ctx
->key_sz
, DMA_TO_DEVICE
);
335 tmp
= le64_to_cpu(sqe
->out
);
336 if (unlikely(dma_mapping_error(dev
, tmp
)))
341 scatterwalk_map_and_copy(req
->dst
, dst
, 0,
343 dma_free_coherent(dev
, ctx
->key_sz
, req
->dst
, tmp
);
345 dma_unmap_single(dev
, tmp
, ctx
->key_sz
, DMA_FROM_DEVICE
);
349 static int hpre_alg_res_post_hf(struct hpre_ctx
*ctx
, struct hpre_sqe
*sqe
,
352 struct hpre_asym_request
*req
;
353 unsigned int err
, done
, alg
;
356 #define HPRE_NO_HW_ERR 0
357 #define HPRE_HW_TASK_DONE 3
358 #define HREE_HW_ERR_MASK GENMASK(10, 0)
359 #define HREE_SQE_DONE_MASK GENMASK(1, 0)
360 #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
361 id
= (int)le16_to_cpu(sqe
->tag
);
362 req
= ctx
->req_list
[id
];
363 hpre_rm_req_from_ctx(req
);
366 err
= (le32_to_cpu(sqe
->dw0
) >> HPRE_SQE_ALG_BITS
) &
369 done
= (le32_to_cpu(sqe
->dw0
) >> HPRE_SQE_DONE_SHIFT
) &
372 if (likely(err
== HPRE_NO_HW_ERR
&& done
== HPRE_HW_TASK_DONE
))
375 alg
= le32_to_cpu(sqe
->dw0
) & HREE_ALG_TYPE_MASK
;
376 dev_err_ratelimited(ctx
->dev
, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
382 static int hpre_ctx_set(struct hpre_ctx
*ctx
, struct hisi_qp
*qp
, int qlen
)
386 if (!ctx
|| !qp
|| qlen
< 0)
389 spin_lock_init(&ctx
->req_lock
);
391 ctx
->dev
= &qp
->qm
->pdev
->dev
;
393 hpre
= container_of(ctx
->qp
->qm
, struct hpre
, qm
);
395 ctx
->req_list
= kcalloc(qlen
, sizeof(void *), GFP_KERNEL
);
399 ctx
->crt_g2_mode
= false;
400 idr_init(&ctx
->req_idr
);
405 static void hpre_ctx_clear(struct hpre_ctx
*ctx
, bool is_clear_all
)
408 idr_destroy(&ctx
->req_idr
);
409 kfree(ctx
->req_list
);
410 hisi_qm_free_qps(&ctx
->qp
, 1);
413 ctx
->crt_g2_mode
= false;
417 static bool hpre_is_bd_timeout(struct hpre_asym_request
*req
,
420 struct timespec64 reply_time
;
423 ktime_get_ts64(&reply_time
);
424 time_use_us
= (reply_time
.tv_sec
- req
->req_time
.tv_sec
) *
426 (reply_time
.tv_nsec
- req
->req_time
.tv_nsec
) /
429 if (time_use_us
<= overtime_thrhld
)
435 static void hpre_dh_cb(struct hpre_ctx
*ctx
, void *resp
)
437 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
438 struct hpre_asym_request
*req
;
439 struct kpp_request
*areq
;
443 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
445 areq
->dst_len
= ctx
->key_sz
;
447 overtime_thrhld
= atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
);
448 if (overtime_thrhld
&& hpre_is_bd_timeout(req
, overtime_thrhld
))
449 atomic64_inc(&dfx
[HPRE_OVER_THRHLD_CNT
].value
);
451 hpre_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
452 kpp_request_complete(areq
, ret
);
453 atomic64_inc(&dfx
[HPRE_RECV_CNT
].value
);
456 static void hpre_rsa_cb(struct hpre_ctx
*ctx
, void *resp
)
458 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
459 struct hpre_asym_request
*req
;
460 struct akcipher_request
*areq
;
464 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
466 overtime_thrhld
= atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
);
467 if (overtime_thrhld
&& hpre_is_bd_timeout(req
, overtime_thrhld
))
468 atomic64_inc(&dfx
[HPRE_OVER_THRHLD_CNT
].value
);
470 areq
= req
->areq
.rsa
;
471 areq
->dst_len
= ctx
->key_sz
;
472 hpre_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
473 akcipher_request_complete(areq
, ret
);
474 atomic64_inc(&dfx
[HPRE_RECV_CNT
].value
);
477 static void hpre_alg_cb(struct hisi_qp
*qp
, void *resp
)
479 struct hpre_ctx
*ctx
= qp
->qp_ctx
;
480 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
481 struct hpre_sqe
*sqe
= resp
;
482 struct hpre_asym_request
*req
= ctx
->req_list
[le16_to_cpu(sqe
->tag
)];
484 if (unlikely(!req
)) {
485 atomic64_inc(&dfx
[HPRE_INVALID_REQ_CNT
].value
);
492 static void hpre_stop_qp_and_put(struct hisi_qp
*qp
)
495 hisi_qm_free_qps(&qp
, 1);
498 static int hpre_ctx_init(struct hpre_ctx
*ctx
, u8 type
)
503 qp
= hpre_get_qp_and_start(type
);
508 qp
->req_cb
= hpre_alg_cb
;
510 ret
= hpre_ctx_set(ctx
, qp
, qp
->sq_depth
);
512 hpre_stop_qp_and_put(qp
);
517 static int hpre_msg_request_set(struct hpre_ctx
*ctx
, void *req
, bool is_rsa
)
519 struct hpre_asym_request
*h_req
;
520 struct hpre_sqe
*msg
;
525 struct akcipher_request
*akreq
= req
;
527 if (akreq
->dst_len
< ctx
->key_sz
) {
528 akreq
->dst_len
= ctx
->key_sz
;
532 tmp
= akcipher_request_ctx(akreq
);
533 h_req
= PTR_ALIGN(tmp
, hpre_align_sz());
534 h_req
->cb
= hpre_rsa_cb
;
535 h_req
->areq
.rsa
= akreq
;
537 memset(msg
, 0, sizeof(*msg
));
539 struct kpp_request
*kreq
= req
;
541 if (kreq
->dst_len
< ctx
->key_sz
) {
542 kreq
->dst_len
= ctx
->key_sz
;
546 tmp
= kpp_request_ctx(kreq
);
547 h_req
= PTR_ALIGN(tmp
, hpre_align_sz());
548 h_req
->cb
= hpre_dh_cb
;
549 h_req
->areq
.dh
= kreq
;
551 memset(msg
, 0, sizeof(*msg
));
552 msg
->key
= cpu_to_le64(ctx
->dh
.dma_xa_p
);
555 msg
->in
= cpu_to_le64(DMA_MAPPING_ERROR
);
556 msg
->out
= cpu_to_le64(DMA_MAPPING_ERROR
);
557 msg
->dw0
|= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT
);
558 msg
->task_len1
= (ctx
->key_sz
>> HPRE_BITS_2_BYTES_SHIFT
) - 1;
561 req_id
= hpre_add_req_to_ctx(h_req
);
565 msg
->tag
= cpu_to_le16((u16
)req_id
);
570 static int hpre_send(struct hpre_ctx
*ctx
, struct hpre_sqe
*msg
)
572 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
577 atomic64_inc(&dfx
[HPRE_SEND_CNT
].value
);
578 spin_lock_bh(&ctx
->req_lock
);
579 ret
= hisi_qp_send(ctx
->qp
, msg
);
580 spin_unlock_bh(&ctx
->req_lock
);
583 atomic64_inc(&dfx
[HPRE_SEND_BUSY_CNT
].value
);
584 } while (ctr
++ < HPRE_TRY_SEND_TIMES
);
590 atomic64_inc(&dfx
[HPRE_SEND_FAIL_CNT
].value
);
595 static int hpre_dh_compute_value(struct kpp_request
*req
)
597 struct crypto_kpp
*tfm
= crypto_kpp_reqtfm(req
);
598 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
599 void *tmp
= kpp_request_ctx(req
);
600 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, hpre_align_sz());
601 struct hpre_sqe
*msg
= &hpre_req
->req
;
604 ret
= hpre_msg_request_set(ctx
, req
, false);
609 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 1);
613 msg
->in
= cpu_to_le64(ctx
->dh
.dma_g
);
616 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 1);
620 if (ctx
->crt_g2_mode
&& !req
->src
)
621 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_DH_G2
);
623 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_DH
);
626 ret
= hpre_send(ctx
, msg
);
631 hpre_rm_req_from_ctx(hpre_req
);
632 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
637 static int hpre_is_dh_params_length_valid(unsigned int key_sz
)
639 #define _HPRE_DH_GRP1 768
640 #define _HPRE_DH_GRP2 1024
641 #define _HPRE_DH_GRP5 1536
642 #define _HPRE_DH_GRP14 2048
643 #define _HPRE_DH_GRP15 3072
644 #define _HPRE_DH_GRP16 4096
658 static int hpre_dh_set_params(struct hpre_ctx
*ctx
, struct dh
*params
)
660 struct device
*dev
= ctx
->dev
;
663 if (params
->p_size
> HPRE_DH_MAX_P_SZ
)
666 if (hpre_is_dh_params_length_valid(params
->p_size
<<
667 HPRE_BITS_2_BYTES_SHIFT
))
670 sz
= ctx
->key_sz
= params
->p_size
;
671 ctx
->dh
.xa_p
= dma_alloc_coherent(dev
, sz
<< 1,
672 &ctx
->dh
.dma_xa_p
, GFP_KERNEL
);
676 memcpy(ctx
->dh
.xa_p
+ sz
, params
->p
, sz
);
678 /* If g equals 2 don't copy it */
679 if (params
->g_size
== 1 && *(char *)params
->g
== HPRE_DH_G_FLAG
) {
680 ctx
->crt_g2_mode
= true;
684 ctx
->dh
.g
= dma_alloc_coherent(dev
, sz
, &ctx
->dh
.dma_g
, GFP_KERNEL
);
686 dma_free_coherent(dev
, sz
<< 1, ctx
->dh
.xa_p
,
692 memcpy(ctx
->dh
.g
+ (sz
- params
->g_size
), params
->g
, params
->g_size
);
697 static void hpre_dh_clear_ctx(struct hpre_ctx
*ctx
, bool is_clear_all
)
699 struct device
*dev
= ctx
->dev
;
700 unsigned int sz
= ctx
->key_sz
;
703 hisi_qm_stop_qp(ctx
->qp
);
706 dma_free_coherent(dev
, sz
, ctx
->dh
.g
, ctx
->dh
.dma_g
);
711 memzero_explicit(ctx
->dh
.xa_p
, sz
);
712 dma_free_coherent(dev
, sz
<< 1, ctx
->dh
.xa_p
,
717 hpre_ctx_clear(ctx
, is_clear_all
);
720 static int hpre_dh_set_secret(struct crypto_kpp
*tfm
, const void *buf
,
723 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
727 if (crypto_dh_decode_key(buf
, len
, ¶ms
) < 0)
730 /* Free old secret if any */
731 hpre_dh_clear_ctx(ctx
, false);
733 ret
= hpre_dh_set_params(ctx
, ¶ms
);
737 memcpy(ctx
->dh
.xa_p
+ (ctx
->key_sz
- params
.key_size
), params
.key
,
743 hpre_dh_clear_ctx(ctx
, false);
747 static unsigned int hpre_dh_max_size(struct crypto_kpp
*tfm
)
749 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
754 static int hpre_dh_init_tfm(struct crypto_kpp
*tfm
)
756 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
758 kpp_set_reqsize(tfm
, sizeof(struct hpre_asym_request
) + hpre_align_pd());
760 return hpre_ctx_init(ctx
, HPRE_V2_ALG_TYPE
);
763 static void hpre_dh_exit_tfm(struct crypto_kpp
*tfm
)
765 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
767 hpre_dh_clear_ctx(ctx
, true);
770 static void hpre_rsa_drop_leading_zeros(const char **ptr
, size_t *len
)
772 while (!**ptr
&& *len
) {
778 static bool hpre_rsa_key_size_is_support(unsigned int len
)
780 unsigned int bits
= len
<< HPRE_BITS_2_BYTES_SHIFT
;
782 #define _RSA_1024BITS_KEY_WDTH 1024
783 #define _RSA_2048BITS_KEY_WDTH 2048
784 #define _RSA_3072BITS_KEY_WDTH 3072
785 #define _RSA_4096BITS_KEY_WDTH 4096
788 case _RSA_1024BITS_KEY_WDTH
:
789 case _RSA_2048BITS_KEY_WDTH
:
790 case _RSA_3072BITS_KEY_WDTH
:
791 case _RSA_4096BITS_KEY_WDTH
:
798 static int hpre_rsa_enc(struct akcipher_request
*req
)
800 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
801 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
802 void *tmp
= akcipher_request_ctx(req
);
803 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, hpre_align_sz());
804 struct hpre_sqe
*msg
= &hpre_req
->req
;
807 /* For 512 and 1536 bits key size, use soft tfm instead */
808 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
809 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
) {
810 akcipher_request_set_tfm(req
, ctx
->rsa
.soft_tfm
);
811 ret
= crypto_akcipher_encrypt(req
);
812 akcipher_request_set_tfm(req
, tfm
);
816 if (unlikely(!ctx
->rsa
.pubkey
))
819 ret
= hpre_msg_request_set(ctx
, req
, true);
823 msg
->dw0
|= cpu_to_le32(HPRE_ALG_NC_NCRT
);
824 msg
->key
= cpu_to_le64(ctx
->rsa
.dma_pubkey
);
826 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 0);
830 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 0);
835 ret
= hpre_send(ctx
, msg
);
840 hpre_rm_req_from_ctx(hpre_req
);
841 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
846 static int hpre_rsa_dec(struct akcipher_request
*req
)
848 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
849 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
850 void *tmp
= akcipher_request_ctx(req
);
851 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, hpre_align_sz());
852 struct hpre_sqe
*msg
= &hpre_req
->req
;
855 /* For 512 and 1536 bits key size, use soft tfm instead */
856 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
857 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
) {
858 akcipher_request_set_tfm(req
, ctx
->rsa
.soft_tfm
);
859 ret
= crypto_akcipher_decrypt(req
);
860 akcipher_request_set_tfm(req
, tfm
);
864 if (unlikely(!ctx
->rsa
.prikey
))
867 ret
= hpre_msg_request_set(ctx
, req
, true);
871 if (ctx
->crt_g2_mode
) {
872 msg
->key
= cpu_to_le64(ctx
->rsa
.dma_crt_prikey
);
873 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) |
876 msg
->key
= cpu_to_le64(ctx
->rsa
.dma_prikey
);
877 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) |
881 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 0);
885 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 0);
890 ret
= hpre_send(ctx
, msg
);
895 hpre_rm_req_from_ctx(hpre_req
);
896 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
901 static int hpre_rsa_set_n(struct hpre_ctx
*ctx
, const char *value
,
902 size_t vlen
, bool private)
904 const char *ptr
= value
;
906 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
910 /* if invalid key size provided, we use software tfm */
911 if (!hpre_rsa_key_size_is_support(ctx
->key_sz
))
914 ctx
->rsa
.pubkey
= dma_alloc_coherent(ctx
->dev
, vlen
<< 1,
915 &ctx
->rsa
.dma_pubkey
,
917 if (!ctx
->rsa
.pubkey
)
921 ctx
->rsa
.prikey
= dma_alloc_coherent(ctx
->dev
, vlen
<< 1,
922 &ctx
->rsa
.dma_prikey
,
924 if (!ctx
->rsa
.prikey
) {
925 dma_free_coherent(ctx
->dev
, vlen
<< 1,
927 ctx
->rsa
.dma_pubkey
);
928 ctx
->rsa
.pubkey
= NULL
;
931 memcpy(ctx
->rsa
.prikey
+ vlen
, ptr
, vlen
);
933 memcpy(ctx
->rsa
.pubkey
+ vlen
, ptr
, vlen
);
935 /* Using hardware HPRE to do RSA */
939 static int hpre_rsa_set_e(struct hpre_ctx
*ctx
, const char *value
,
942 const char *ptr
= value
;
944 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
946 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
949 memcpy(ctx
->rsa
.pubkey
+ ctx
->key_sz
- vlen
, ptr
, vlen
);
954 static int hpre_rsa_set_d(struct hpre_ctx
*ctx
, const char *value
,
957 const char *ptr
= value
;
959 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
961 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
964 memcpy(ctx
->rsa
.prikey
+ ctx
->key_sz
- vlen
, ptr
, vlen
);
969 static int hpre_crt_para_get(char *para
, size_t para_sz
,
970 const char *raw
, size_t raw_sz
)
972 const char *ptr
= raw
;
975 hpre_rsa_drop_leading_zeros(&ptr
, &len
);
976 if (!len
|| len
> para_sz
)
979 memcpy(para
+ para_sz
- len
, ptr
, len
);
984 static int hpre_rsa_setkey_crt(struct hpre_ctx
*ctx
, struct rsa_key
*rsa_key
)
986 unsigned int hlf_ksz
= ctx
->key_sz
>> 1;
987 struct device
*dev
= ctx
->dev
;
991 ctx
->rsa
.crt_prikey
= dma_alloc_coherent(dev
, hlf_ksz
* HPRE_CRT_PRMS
,
992 &ctx
->rsa
.dma_crt_prikey
,
994 if (!ctx
->rsa
.crt_prikey
)
997 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
, hlf_ksz
,
998 rsa_key
->dq
, rsa_key
->dq_sz
);
1003 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
1004 rsa_key
->dp
, rsa_key
->dp_sz
);
1008 offset
= hlf_ksz
* HPRE_CRT_Q
;
1009 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
1010 rsa_key
->q
, rsa_key
->q_sz
);
1014 offset
= hlf_ksz
* HPRE_CRT_P
;
1015 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
1016 rsa_key
->p
, rsa_key
->p_sz
);
1020 offset
= hlf_ksz
* HPRE_CRT_INV
;
1021 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
1022 rsa_key
->qinv
, rsa_key
->qinv_sz
);
1026 ctx
->crt_g2_mode
= true;
1031 offset
= hlf_ksz
* HPRE_CRT_PRMS
;
1032 memzero_explicit(ctx
->rsa
.crt_prikey
, offset
);
1033 dma_free_coherent(dev
, hlf_ksz
* HPRE_CRT_PRMS
, ctx
->rsa
.crt_prikey
,
1034 ctx
->rsa
.dma_crt_prikey
);
1035 ctx
->rsa
.crt_prikey
= NULL
;
1036 ctx
->crt_g2_mode
= false;
1041 /* If it is clear all, all the resources of the QP will be cleaned. */
1042 static void hpre_rsa_clear_ctx(struct hpre_ctx
*ctx
, bool is_clear_all
)
1044 unsigned int half_key_sz
= ctx
->key_sz
>> 1;
1045 struct device
*dev
= ctx
->dev
;
1048 hisi_qm_stop_qp(ctx
->qp
);
1050 if (ctx
->rsa
.pubkey
) {
1051 dma_free_coherent(dev
, ctx
->key_sz
<< 1,
1052 ctx
->rsa
.pubkey
, ctx
->rsa
.dma_pubkey
);
1053 ctx
->rsa
.pubkey
= NULL
;
1056 if (ctx
->rsa
.crt_prikey
) {
1057 memzero_explicit(ctx
->rsa
.crt_prikey
,
1058 half_key_sz
* HPRE_CRT_PRMS
);
1059 dma_free_coherent(dev
, half_key_sz
* HPRE_CRT_PRMS
,
1060 ctx
->rsa
.crt_prikey
, ctx
->rsa
.dma_crt_prikey
);
1061 ctx
->rsa
.crt_prikey
= NULL
;
1064 if (ctx
->rsa
.prikey
) {
1065 memzero_explicit(ctx
->rsa
.prikey
, ctx
->key_sz
);
1066 dma_free_coherent(dev
, ctx
->key_sz
<< 1, ctx
->rsa
.prikey
,
1067 ctx
->rsa
.dma_prikey
);
1068 ctx
->rsa
.prikey
= NULL
;
1071 hpre_ctx_clear(ctx
, is_clear_all
);
1075 * we should judge if it is CRT or not,
1076 * CRT: return true, N-CRT: return false .
1078 static bool hpre_is_crt_key(struct rsa_key
*key
)
1080 u16 len
= key
->p_sz
+ key
->q_sz
+ key
->dp_sz
+ key
->dq_sz
+
1083 #define LEN_OF_NCRT_PARA 5
1085 /* N-CRT less than 5 parameters */
1086 return len
> LEN_OF_NCRT_PARA
;
1089 static int hpre_rsa_setkey(struct hpre_ctx
*ctx
, const void *key
,
1090 unsigned int keylen
, bool private)
1092 struct rsa_key rsa_key
;
1095 hpre_rsa_clear_ctx(ctx
, false);
1098 ret
= rsa_parse_priv_key(&rsa_key
, key
, keylen
);
1100 ret
= rsa_parse_pub_key(&rsa_key
, key
, keylen
);
1104 ret
= hpre_rsa_set_n(ctx
, rsa_key
.n
, rsa_key
.n_sz
, private);
1109 ret
= hpre_rsa_set_d(ctx
, rsa_key
.d
, rsa_key
.d_sz
);
1113 if (hpre_is_crt_key(&rsa_key
)) {
1114 ret
= hpre_rsa_setkey_crt(ctx
, &rsa_key
);
1120 ret
= hpre_rsa_set_e(ctx
, rsa_key
.e
, rsa_key
.e_sz
);
1124 if ((private && !ctx
->rsa
.prikey
) || !ctx
->rsa
.pubkey
) {
1132 hpre_rsa_clear_ctx(ctx
, false);
1136 static int hpre_rsa_setpubkey(struct crypto_akcipher
*tfm
, const void *key
,
1137 unsigned int keylen
)
1139 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1142 ret
= crypto_akcipher_set_pub_key(ctx
->rsa
.soft_tfm
, key
, keylen
);
1146 return hpre_rsa_setkey(ctx
, key
, keylen
, false);
1149 static int hpre_rsa_setprivkey(struct crypto_akcipher
*tfm
, const void *key
,
1150 unsigned int keylen
)
1152 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1155 ret
= crypto_akcipher_set_priv_key(ctx
->rsa
.soft_tfm
, key
, keylen
);
1159 return hpre_rsa_setkey(ctx
, key
, keylen
, true);
1162 static unsigned int hpre_rsa_max_size(struct crypto_akcipher
*tfm
)
1164 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1166 /* For 512 and 1536 bits key size, use soft tfm instead */
1167 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
1168 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
)
1169 return crypto_akcipher_maxsize(ctx
->rsa
.soft_tfm
);
1174 static int hpre_rsa_init_tfm(struct crypto_akcipher
*tfm
)
1176 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1179 ctx
->rsa
.soft_tfm
= crypto_alloc_akcipher("rsa-generic", 0, 0);
1180 if (IS_ERR(ctx
->rsa
.soft_tfm
)) {
1181 pr_err("Can not alloc_akcipher!\n");
1182 return PTR_ERR(ctx
->rsa
.soft_tfm
);
1185 akcipher_set_reqsize(tfm
, sizeof(struct hpre_asym_request
) +
1188 ret
= hpre_ctx_init(ctx
, HPRE_V2_ALG_TYPE
);
1190 crypto_free_akcipher(ctx
->rsa
.soft_tfm
);
1195 static void hpre_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
1197 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1199 hpre_rsa_clear_ctx(ctx
, true);
1200 crypto_free_akcipher(ctx
->rsa
.soft_tfm
);
1203 static void hpre_key_to_big_end(u8
*data
, int len
)
1207 for (i
= 0; i
< len
/ 2; i
++) {
1209 swap(data
[j
], data
[i
]);
1213 static void hpre_ecc_clear_ctx(struct hpre_ctx
*ctx
, bool is_clear_all
,
1216 struct device
*dev
= ctx
->dev
;
1217 unsigned int sz
= ctx
->key_sz
;
1218 unsigned int shift
= sz
<< 1;
1221 hisi_qm_stop_qp(ctx
->qp
);
1223 if (is_ecdh
&& ctx
->ecdh
.p
) {
1224 /* ecdh: p->a->k->b */
1225 memzero_explicit(ctx
->ecdh
.p
+ shift
, sz
);
1226 dma_free_coherent(dev
, sz
<< 3, ctx
->ecdh
.p
, ctx
->ecdh
.dma_p
);
1228 } else if (!is_ecdh
&& ctx
->curve25519
.p
) {
1229 /* curve25519: p->a->k */
1230 memzero_explicit(ctx
->curve25519
.p
+ shift
, sz
);
1231 dma_free_coherent(dev
, sz
<< 2, ctx
->curve25519
.p
,
1232 ctx
->curve25519
.dma_p
);
1233 ctx
->curve25519
.p
= NULL
;
1236 hpre_ctx_clear(ctx
, is_clear_all
);
1240 * The bits of 192/224/256/384/521 are supported by HPRE,
1241 * and convert the bits like:
1242 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1243 * If the parameter bit width is insufficient, then we fill in the
1244 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1246 static unsigned int hpre_ecdh_supported_curve(unsigned short id
)
1249 case ECC_CURVE_NIST_P192
:
1250 case ECC_CURVE_NIST_P256
:
1251 return HPRE_ECC_HW256_KSZ_B
;
1252 case ECC_CURVE_NIST_P384
:
1253 return HPRE_ECC_HW384_KSZ_B
;
1261 static void fill_curve_param(void *addr
, u64
*param
, unsigned int cur_sz
, u8 ndigits
)
1263 unsigned int sz
= cur_sz
- (ndigits
- 1) * sizeof(u64
);
1266 while (i
< ndigits
- 1) {
1267 memcpy(addr
+ sizeof(u64
) * i
, ¶m
[i
], sizeof(u64
));
1271 memcpy(addr
+ sizeof(u64
) * i
, ¶m
[ndigits
- 1], sz
);
1272 hpre_key_to_big_end((u8
*)addr
, cur_sz
);
1275 static int hpre_ecdh_fill_curve(struct hpre_ctx
*ctx
, struct ecdh
*params
,
1276 unsigned int cur_sz
)
1278 unsigned int shifta
= ctx
->key_sz
<< 1;
1279 unsigned int shiftb
= ctx
->key_sz
<< 2;
1280 void *p
= ctx
->ecdh
.p
+ ctx
->key_sz
- cur_sz
;
1281 void *a
= ctx
->ecdh
.p
+ shifta
- cur_sz
;
1282 void *b
= ctx
->ecdh
.p
+ shiftb
- cur_sz
;
1283 void *x
= ctx
->ecdh
.g
+ ctx
->key_sz
- cur_sz
;
1284 void *y
= ctx
->ecdh
.g
+ shifta
- cur_sz
;
1285 const struct ecc_curve
*curve
= ecc_get_curve(ctx
->curve_id
);
1288 if (unlikely(!curve
))
1291 n
= kzalloc(ctx
->key_sz
, GFP_KERNEL
);
1295 fill_curve_param(p
, curve
->p
, cur_sz
, curve
->g
.ndigits
);
1296 fill_curve_param(a
, curve
->a
, cur_sz
, curve
->g
.ndigits
);
1297 fill_curve_param(b
, curve
->b
, cur_sz
, curve
->g
.ndigits
);
1298 fill_curve_param(x
, curve
->g
.x
, cur_sz
, curve
->g
.ndigits
);
1299 fill_curve_param(y
, curve
->g
.y
, cur_sz
, curve
->g
.ndigits
);
1300 fill_curve_param(n
, curve
->n
, cur_sz
, curve
->g
.ndigits
);
1302 if (params
->key_size
== cur_sz
&& memcmp(params
->key
, n
, cur_sz
) >= 0) {
1311 static unsigned int hpre_ecdh_get_curvesz(unsigned short id
)
1314 case ECC_CURVE_NIST_P192
:
1315 return HPRE_ECC_NIST_P192_N_SIZE
;
1316 case ECC_CURVE_NIST_P256
:
1317 return HPRE_ECC_NIST_P256_N_SIZE
;
1318 case ECC_CURVE_NIST_P384
:
1319 return HPRE_ECC_NIST_P384_N_SIZE
;
1327 static int hpre_ecdh_set_param(struct hpre_ctx
*ctx
, struct ecdh
*params
)
1329 struct device
*dev
= ctx
->dev
;
1330 unsigned int sz
, shift
, curve_sz
;
1333 ctx
->key_sz
= hpre_ecdh_supported_curve(ctx
->curve_id
);
1337 curve_sz
= hpre_ecdh_get_curvesz(ctx
->curve_id
);
1338 if (!curve_sz
|| params
->key_size
> curve_sz
)
1344 ctx
->ecdh
.p
= dma_alloc_coherent(dev
, sz
<< 3, &ctx
->ecdh
.dma_p
,
1351 ctx
->ecdh
.g
= ctx
->ecdh
.p
+ shift
;
1352 ctx
->ecdh
.dma_g
= ctx
->ecdh
.dma_p
+ shift
;
1354 ret
= hpre_ecdh_fill_curve(ctx
, params
, curve_sz
);
1356 dev_err(dev
, "failed to fill curve_param, ret = %d!\n", ret
);
1357 dma_free_coherent(dev
, sz
<< 3, ctx
->ecdh
.p
, ctx
->ecdh
.dma_p
);
1365 static bool hpre_key_is_zero(char *key
, unsigned short key_sz
)
1369 for (i
= 0; i
< key_sz
; i
++)
1376 static int ecdh_gen_privkey(struct hpre_ctx
*ctx
, struct ecdh
*params
)
1378 struct device
*dev
= ctx
->dev
;
1381 ret
= crypto_get_default_rng();
1383 dev_err(dev
, "failed to get default rng, ret = %d!\n", ret
);
1387 ret
= crypto_rng_get_bytes(crypto_default_rng
, (u8
*)params
->key
,
1389 crypto_put_default_rng();
1391 dev_err(dev
, "failed to get rng, ret = %d!\n", ret
);
1396 static int hpre_ecdh_set_secret(struct crypto_kpp
*tfm
, const void *buf
,
1399 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1400 unsigned int sz
, sz_shift
, curve_sz
;
1401 struct device
*dev
= ctx
->dev
;
1402 char key
[HPRE_ECC_MAX_KSZ
];
1406 if (crypto_ecdh_decode_key(buf
, len
, ¶ms
) < 0) {
1407 dev_err(dev
, "failed to decode ecdh key!\n");
1411 /* Use stdrng to generate private key */
1412 if (!params
.key
|| !params
.key_size
) {
1414 curve_sz
= hpre_ecdh_get_curvesz(ctx
->curve_id
);
1416 dev_err(dev
, "Invalid curve size!\n");
1420 params
.key_size
= curve_sz
- 1;
1421 ret
= ecdh_gen_privkey(ctx
, ¶ms
);
1426 if (hpre_key_is_zero(params
.key
, params
.key_size
)) {
1427 dev_err(dev
, "Invalid hpre key!\n");
1431 hpre_ecc_clear_ctx(ctx
, false, true);
1433 ret
= hpre_ecdh_set_param(ctx
, ¶ms
);
1435 dev_err(dev
, "failed to set hpre param, ret = %d!\n", ret
);
1440 sz_shift
= (sz
<< 1) + sz
- params
.key_size
;
1441 memcpy(ctx
->ecdh
.p
+ sz_shift
, params
.key
, params
.key_size
);
1446 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx
*ctx
,
1447 struct hpre_asym_request
*req
,
1448 struct scatterlist
*dst
,
1449 struct scatterlist
*src
)
1451 struct device
*dev
= ctx
->dev
;
1452 struct hpre_sqe
*sqe
= &req
->req
;
1455 dma
= le64_to_cpu(sqe
->in
);
1456 if (unlikely(dma_mapping_error(dev
, dma
)))
1459 if (src
&& req
->src
)
1460 dma_free_coherent(dev
, ctx
->key_sz
<< 2, req
->src
, dma
);
1462 dma
= le64_to_cpu(sqe
->out
);
1463 if (unlikely(dma_mapping_error(dev
, dma
)))
1467 dma_free_coherent(dev
, ctx
->key_sz
<< 1, req
->dst
, dma
);
1469 dma_unmap_single(dev
, dma
, ctx
->key_sz
<< 1, DMA_FROM_DEVICE
);
1472 static void hpre_ecdh_cb(struct hpre_ctx
*ctx
, void *resp
)
1474 unsigned int curve_sz
= hpre_ecdh_get_curvesz(ctx
->curve_id
);
1475 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
1476 struct hpre_asym_request
*req
= NULL
;
1477 struct kpp_request
*areq
;
1478 u64 overtime_thrhld
;
1482 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
1483 areq
= req
->areq
.ecdh
;
1484 areq
->dst_len
= ctx
->key_sz
<< 1;
1486 overtime_thrhld
= atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
);
1487 if (overtime_thrhld
&& hpre_is_bd_timeout(req
, overtime_thrhld
))
1488 atomic64_inc(&dfx
[HPRE_OVER_THRHLD_CNT
].value
);
1490 p
= sg_virt(areq
->dst
);
1491 memmove(p
, p
+ ctx
->key_sz
- curve_sz
, curve_sz
);
1492 memmove(p
+ curve_sz
, p
+ areq
->dst_len
- curve_sz
, curve_sz
);
1494 hpre_ecdh_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
1495 kpp_request_complete(areq
, ret
);
1497 atomic64_inc(&dfx
[HPRE_RECV_CNT
].value
);
1500 static int hpre_ecdh_msg_request_set(struct hpre_ctx
*ctx
,
1501 struct kpp_request
*req
)
1503 struct hpre_asym_request
*h_req
;
1504 struct hpre_sqe
*msg
;
1508 if (req
->dst_len
< ctx
->key_sz
<< 1) {
1509 req
->dst_len
= ctx
->key_sz
<< 1;
1513 tmp
= kpp_request_ctx(req
);
1514 h_req
= PTR_ALIGN(tmp
, hpre_align_sz());
1515 h_req
->cb
= hpre_ecdh_cb
;
1516 h_req
->areq
.ecdh
= req
;
1518 memset(msg
, 0, sizeof(*msg
));
1519 msg
->in
= cpu_to_le64(DMA_MAPPING_ERROR
);
1520 msg
->out
= cpu_to_le64(DMA_MAPPING_ERROR
);
1521 msg
->key
= cpu_to_le64(ctx
->ecdh
.dma_p
);
1523 msg
->dw0
|= cpu_to_le32(0x1U
<< HPRE_SQE_DONE_SHIFT
);
1524 msg
->task_len1
= (ctx
->key_sz
>> HPRE_BITS_2_BYTES_SHIFT
) - 1;
1527 req_id
= hpre_add_req_to_ctx(h_req
);
1531 msg
->tag
= cpu_to_le16((u16
)req_id
);
1535 static int hpre_ecdh_src_data_init(struct hpre_asym_request
*hpre_req
,
1536 struct scatterlist
*data
, unsigned int len
)
1538 struct hpre_sqe
*msg
= &hpre_req
->req
;
1539 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
1540 struct device
*dev
= ctx
->dev
;
1541 unsigned int tmpshift
;
1546 /* Src_data include gx and gy. */
1547 shift
= ctx
->key_sz
- (len
>> 1);
1548 if (unlikely(shift
< 0))
1551 ptr
= dma_alloc_coherent(dev
, ctx
->key_sz
<< 2, &dma
, GFP_KERNEL
);
1555 tmpshift
= ctx
->key_sz
<< 1;
1556 scatterwalk_map_and_copy(ptr
+ tmpshift
, data
, 0, len
, 0);
1557 memcpy(ptr
+ shift
, ptr
+ tmpshift
, len
>> 1);
1558 memcpy(ptr
+ ctx
->key_sz
+ shift
, ptr
+ tmpshift
+ (len
>> 1), len
>> 1);
1560 hpre_req
->src
= ptr
;
1561 msg
->in
= cpu_to_le64(dma
);
1565 static int hpre_ecdh_dst_data_init(struct hpre_asym_request
*hpre_req
,
1566 struct scatterlist
*data
, unsigned int len
)
1568 struct hpre_sqe
*msg
= &hpre_req
->req
;
1569 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
1570 struct device
*dev
= ctx
->dev
;
1573 if (unlikely(!data
|| !sg_is_last(data
) || len
!= ctx
->key_sz
<< 1)) {
1574 dev_err(dev
, "data or data length is illegal!\n");
1578 hpre_req
->dst
= NULL
;
1579 dma
= dma_map_single(dev
, sg_virt(data
), len
, DMA_FROM_DEVICE
);
1580 if (unlikely(dma_mapping_error(dev
, dma
))) {
1581 dev_err(dev
, "dma map data err!\n");
1585 msg
->out
= cpu_to_le64(dma
);
1589 static int hpre_ecdh_compute_value(struct kpp_request
*req
)
1591 struct crypto_kpp
*tfm
= crypto_kpp_reqtfm(req
);
1592 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1593 struct device
*dev
= ctx
->dev
;
1594 void *tmp
= kpp_request_ctx(req
);
1595 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, hpre_align_sz());
1596 struct hpre_sqe
*msg
= &hpre_req
->req
;
1599 ret
= hpre_ecdh_msg_request_set(ctx
, req
);
1600 if (unlikely(ret
)) {
1601 dev_err(dev
, "failed to set ecdh request, ret = %d!\n", ret
);
1606 ret
= hpre_ecdh_src_data_init(hpre_req
, req
->src
, req
->src_len
);
1607 if (unlikely(ret
)) {
1608 dev_err(dev
, "failed to init src data, ret = %d!\n", ret
);
1612 msg
->in
= cpu_to_le64(ctx
->ecdh
.dma_g
);
1615 ret
= hpre_ecdh_dst_data_init(hpre_req
, req
->dst
, req
->dst_len
);
1616 if (unlikely(ret
)) {
1617 dev_err(dev
, "failed to init dst data, ret = %d!\n", ret
);
1621 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_ECC_MUL
);
1622 ret
= hpre_send(ctx
, msg
);
1624 return -EINPROGRESS
;
1627 hpre_rm_req_from_ctx(hpre_req
);
1628 hpre_ecdh_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
1632 static unsigned int hpre_ecdh_max_size(struct crypto_kpp
*tfm
)
1634 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1636 /* max size is the pub_key_size, include x and y */
1637 return ctx
->key_sz
<< 1;
1640 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp
*tfm
)
1642 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1644 ctx
->curve_id
= ECC_CURVE_NIST_P192
;
1646 kpp_set_reqsize(tfm
, sizeof(struct hpre_asym_request
) + hpre_align_pd());
1648 return hpre_ctx_init(ctx
, HPRE_V3_ECC_ALG_TYPE
);
1651 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp
*tfm
)
1653 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1655 ctx
->curve_id
= ECC_CURVE_NIST_P256
;
1657 kpp_set_reqsize(tfm
, sizeof(struct hpre_asym_request
) + hpre_align_pd());
1659 return hpre_ctx_init(ctx
, HPRE_V3_ECC_ALG_TYPE
);
1662 static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp
*tfm
)
1664 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1666 ctx
->curve_id
= ECC_CURVE_NIST_P384
;
1668 kpp_set_reqsize(tfm
, sizeof(struct hpre_asym_request
) + hpre_align_pd());
1670 return hpre_ctx_init(ctx
, HPRE_V3_ECC_ALG_TYPE
);
1673 static void hpre_ecdh_exit_tfm(struct crypto_kpp
*tfm
)
1675 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1677 hpre_ecc_clear_ctx(ctx
, true, true);
1680 static void hpre_curve25519_fill_curve(struct hpre_ctx
*ctx
, const void *buf
,
1683 u8 secret
[CURVE25519_KEY_SIZE
] = { 0 };
1684 unsigned int sz
= ctx
->key_sz
;
1685 const struct ecc_curve
*curve
;
1686 unsigned int shift
= sz
<< 1;
1690 * The key from 'buf' is in little-endian, we should preprocess it as
1691 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1692 * then convert it to big endian. Only in this way, the result can be
1693 * the same as the software curve-25519 that exists in crypto.
1695 memcpy(secret
, buf
, len
);
1696 curve25519_clamp_secret(secret
);
1697 hpre_key_to_big_end(secret
, CURVE25519_KEY_SIZE
);
1699 p
= ctx
->curve25519
.p
+ sz
- len
;
1701 curve
= ecc_get_curve25519();
1703 /* fill curve parameters */
1704 fill_curve_param(p
, curve
->p
, len
, curve
->g
.ndigits
);
1705 fill_curve_param(p
+ sz
, curve
->a
, len
, curve
->g
.ndigits
);
1706 memcpy(p
+ shift
, secret
, len
);
1707 fill_curve_param(p
+ shift
+ sz
, curve
->g
.x
, len
, curve
->g
.ndigits
);
1708 memzero_explicit(secret
, CURVE25519_KEY_SIZE
);
1711 static int hpre_curve25519_set_param(struct hpre_ctx
*ctx
, const void *buf
,
1714 struct device
*dev
= ctx
->dev
;
1715 unsigned int sz
= ctx
->key_sz
;
1716 unsigned int shift
= sz
<< 1;
1719 if (!ctx
->curve25519
.p
) {
1720 ctx
->curve25519
.p
= dma_alloc_coherent(dev
, sz
<< 2,
1721 &ctx
->curve25519
.dma_p
,
1723 if (!ctx
->curve25519
.p
)
1727 ctx
->curve25519
.g
= ctx
->curve25519
.p
+ shift
+ sz
;
1728 ctx
->curve25519
.dma_g
= ctx
->curve25519
.dma_p
+ shift
+ sz
;
1730 hpre_curve25519_fill_curve(ctx
, buf
, len
);
1735 static int hpre_curve25519_set_secret(struct crypto_kpp
*tfm
, const void *buf
,
1738 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1739 struct device
*dev
= ctx
->dev
;
1742 if (len
!= CURVE25519_KEY_SIZE
||
1743 !crypto_memneq(buf
, curve25519_null_point
, CURVE25519_KEY_SIZE
)) {
1744 dev_err(dev
, "key is null or key len is not 32bytes!\n");
1748 /* Free old secret if any */
1749 hpre_ecc_clear_ctx(ctx
, false, false);
1751 ctx
->key_sz
= CURVE25519_KEY_SIZE
;
1752 ret
= hpre_curve25519_set_param(ctx
, buf
, CURVE25519_KEY_SIZE
);
1754 dev_err(dev
, "failed to set curve25519 param, ret = %d!\n", ret
);
1755 hpre_ecc_clear_ctx(ctx
, false, false);
1762 static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx
*ctx
,
1763 struct hpre_asym_request
*req
,
1764 struct scatterlist
*dst
,
1765 struct scatterlist
*src
)
1767 struct device
*dev
= ctx
->dev
;
1768 struct hpre_sqe
*sqe
= &req
->req
;
1771 dma
= le64_to_cpu(sqe
->in
);
1772 if (unlikely(dma_mapping_error(dev
, dma
)))
1775 if (src
&& req
->src
)
1776 dma_free_coherent(dev
, ctx
->key_sz
, req
->src
, dma
);
1778 dma
= le64_to_cpu(sqe
->out
);
1779 if (unlikely(dma_mapping_error(dev
, dma
)))
1783 dma_free_coherent(dev
, ctx
->key_sz
, req
->dst
, dma
);
1785 dma_unmap_single(dev
, dma
, ctx
->key_sz
, DMA_FROM_DEVICE
);
1788 static void hpre_curve25519_cb(struct hpre_ctx
*ctx
, void *resp
)
1790 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
1791 struct hpre_asym_request
*req
= NULL
;
1792 struct kpp_request
*areq
;
1793 u64 overtime_thrhld
;
1796 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
1797 areq
= req
->areq
.curve25519
;
1798 areq
->dst_len
= ctx
->key_sz
;
1800 overtime_thrhld
= atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
);
1801 if (overtime_thrhld
&& hpre_is_bd_timeout(req
, overtime_thrhld
))
1802 atomic64_inc(&dfx
[HPRE_OVER_THRHLD_CNT
].value
);
1804 hpre_key_to_big_end(sg_virt(areq
->dst
), CURVE25519_KEY_SIZE
);
1806 hpre_curve25519_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
1807 kpp_request_complete(areq
, ret
);
1809 atomic64_inc(&dfx
[HPRE_RECV_CNT
].value
);
1812 static int hpre_curve25519_msg_request_set(struct hpre_ctx
*ctx
,
1813 struct kpp_request
*req
)
1815 struct hpre_asym_request
*h_req
;
1816 struct hpre_sqe
*msg
;
1820 if (unlikely(req
->dst_len
< ctx
->key_sz
)) {
1821 req
->dst_len
= ctx
->key_sz
;
1825 tmp
= kpp_request_ctx(req
);
1826 h_req
= PTR_ALIGN(tmp
, hpre_align_sz());
1827 h_req
->cb
= hpre_curve25519_cb
;
1828 h_req
->areq
.curve25519
= req
;
1830 memset(msg
, 0, sizeof(*msg
));
1831 msg
->in
= cpu_to_le64(DMA_MAPPING_ERROR
);
1832 msg
->out
= cpu_to_le64(DMA_MAPPING_ERROR
);
1833 msg
->key
= cpu_to_le64(ctx
->curve25519
.dma_p
);
1835 msg
->dw0
|= cpu_to_le32(0x1U
<< HPRE_SQE_DONE_SHIFT
);
1836 msg
->task_len1
= (ctx
->key_sz
>> HPRE_BITS_2_BYTES_SHIFT
) - 1;
1839 req_id
= hpre_add_req_to_ctx(h_req
);
1843 msg
->tag
= cpu_to_le16((u16
)req_id
);
1847 static void hpre_curve25519_src_modulo_p(u8
*ptr
)
1851 for (i
= 0; i
< CURVE25519_KEY_SIZE
- 1; i
++)
1854 /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1858 static int hpre_curve25519_src_init(struct hpre_asym_request
*hpre_req
,
1859 struct scatterlist
*data
, unsigned int len
)
1861 struct hpre_sqe
*msg
= &hpre_req
->req
;
1862 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
1863 struct device
*dev
= ctx
->dev
;
1864 u8 p
[CURVE25519_KEY_SIZE
] = { 0 };
1865 const struct ecc_curve
*curve
;
1869 if (len
!= CURVE25519_KEY_SIZE
) {
1870 dev_err(dev
, "sourc_data len is not 32bytes, len = %u!\n", len
);
1874 ptr
= dma_alloc_coherent(dev
, ctx
->key_sz
, &dma
, GFP_KERNEL
);
1878 scatterwalk_map_and_copy(ptr
, data
, 0, len
, 0);
1880 if (!crypto_memneq(ptr
, curve25519_null_point
, CURVE25519_KEY_SIZE
)) {
1881 dev_err(dev
, "gx is null!\n");
1886 * Src_data(gx) is in little-endian order, MSB in the final byte should
1887 * be masked as described in RFC7748, then transform it to big-endian
1888 * form, then hisi_hpre can use the data.
1891 hpre_key_to_big_end(ptr
, CURVE25519_KEY_SIZE
);
1893 curve
= ecc_get_curve25519();
1895 fill_curve_param(p
, curve
->p
, CURVE25519_KEY_SIZE
, curve
->g
.ndigits
);
1898 * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
1899 * we get its modulus to p, and then use it.
1901 if (memcmp(ptr
, p
, ctx
->key_sz
) == 0) {
1902 dev_err(dev
, "gx is p!\n");
1904 } else if (memcmp(ptr
, p
, ctx
->key_sz
) > 0) {
1905 hpre_curve25519_src_modulo_p(ptr
);
1908 hpre_req
->src
= ptr
;
1909 msg
->in
= cpu_to_le64(dma
);
1913 dma_free_coherent(dev
, ctx
->key_sz
, ptr
, dma
);
1917 static int hpre_curve25519_dst_init(struct hpre_asym_request
*hpre_req
,
1918 struct scatterlist
*data
, unsigned int len
)
1920 struct hpre_sqe
*msg
= &hpre_req
->req
;
1921 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
1922 struct device
*dev
= ctx
->dev
;
1925 if (!data
|| !sg_is_last(data
) || len
!= ctx
->key_sz
) {
1926 dev_err(dev
, "data or data length is illegal!\n");
1930 hpre_req
->dst
= NULL
;
1931 dma
= dma_map_single(dev
, sg_virt(data
), len
, DMA_FROM_DEVICE
);
1932 if (unlikely(dma_mapping_error(dev
, dma
))) {
1933 dev_err(dev
, "dma map data err!\n");
1937 msg
->out
= cpu_to_le64(dma
);
1941 static int hpre_curve25519_compute_value(struct kpp_request
*req
)
1943 struct crypto_kpp
*tfm
= crypto_kpp_reqtfm(req
);
1944 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1945 struct device
*dev
= ctx
->dev
;
1946 void *tmp
= kpp_request_ctx(req
);
1947 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, hpre_align_sz());
1948 struct hpre_sqe
*msg
= &hpre_req
->req
;
1951 ret
= hpre_curve25519_msg_request_set(ctx
, req
);
1952 if (unlikely(ret
)) {
1953 dev_err(dev
, "failed to set curve25519 request, ret = %d!\n", ret
);
1958 ret
= hpre_curve25519_src_init(hpre_req
, req
->src
, req
->src_len
);
1959 if (unlikely(ret
)) {
1960 dev_err(dev
, "failed to init src data, ret = %d!\n",
1965 msg
->in
= cpu_to_le64(ctx
->curve25519
.dma_g
);
1968 ret
= hpre_curve25519_dst_init(hpre_req
, req
->dst
, req
->dst_len
);
1969 if (unlikely(ret
)) {
1970 dev_err(dev
, "failed to init dst data, ret = %d!\n", ret
);
1974 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_CURVE25519_MUL
);
1975 ret
= hpre_send(ctx
, msg
);
1977 return -EINPROGRESS
;
1980 hpre_rm_req_from_ctx(hpre_req
);
1981 hpre_curve25519_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
1985 static unsigned int hpre_curve25519_max_size(struct crypto_kpp
*tfm
)
1987 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1992 static int hpre_curve25519_init_tfm(struct crypto_kpp
*tfm
)
1994 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
1996 kpp_set_reqsize(tfm
, sizeof(struct hpre_asym_request
) + hpre_align_pd());
1998 return hpre_ctx_init(ctx
, HPRE_V3_ECC_ALG_TYPE
);
2001 static void hpre_curve25519_exit_tfm(struct crypto_kpp
*tfm
)
2003 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
2005 hpre_ecc_clear_ctx(ctx
, true, false);
2008 static struct akcipher_alg rsa
= {
2009 .encrypt
= hpre_rsa_enc
,
2010 .decrypt
= hpre_rsa_dec
,
2011 .set_pub_key
= hpre_rsa_setpubkey
,
2012 .set_priv_key
= hpre_rsa_setprivkey
,
2013 .max_size
= hpre_rsa_max_size
,
2014 .init
= hpre_rsa_init_tfm
,
2015 .exit
= hpre_rsa_exit_tfm
,
2017 .cra_ctxsize
= sizeof(struct hpre_ctx
),
2018 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
2020 .cra_driver_name
= "hpre-rsa",
2021 .cra_module
= THIS_MODULE
,
2025 static struct kpp_alg dh
= {
2026 .set_secret
= hpre_dh_set_secret
,
2027 .generate_public_key
= hpre_dh_compute_value
,
2028 .compute_shared_secret
= hpre_dh_compute_value
,
2029 .max_size
= hpre_dh_max_size
,
2030 .init
= hpre_dh_init_tfm
,
2031 .exit
= hpre_dh_exit_tfm
,
2033 .cra_ctxsize
= sizeof(struct hpre_ctx
),
2034 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
2036 .cra_driver_name
= "hpre-dh",
2037 .cra_module
= THIS_MODULE
,
2041 static struct kpp_alg ecdh_curves
[] = {
2043 .set_secret
= hpre_ecdh_set_secret
,
2044 .generate_public_key
= hpre_ecdh_compute_value
,
2045 .compute_shared_secret
= hpre_ecdh_compute_value
,
2046 .max_size
= hpre_ecdh_max_size
,
2047 .init
= hpre_ecdh_nist_p192_init_tfm
,
2048 .exit
= hpre_ecdh_exit_tfm
,
2050 .cra_ctxsize
= sizeof(struct hpre_ctx
),
2051 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
2052 .cra_name
= "ecdh-nist-p192",
2053 .cra_driver_name
= "hpre-ecdh-nist-p192",
2054 .cra_module
= THIS_MODULE
,
2057 .set_secret
= hpre_ecdh_set_secret
,
2058 .generate_public_key
= hpre_ecdh_compute_value
,
2059 .compute_shared_secret
= hpre_ecdh_compute_value
,
2060 .max_size
= hpre_ecdh_max_size
,
2061 .init
= hpre_ecdh_nist_p256_init_tfm
,
2062 .exit
= hpre_ecdh_exit_tfm
,
2064 .cra_ctxsize
= sizeof(struct hpre_ctx
),
2065 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
2066 .cra_name
= "ecdh-nist-p256",
2067 .cra_driver_name
= "hpre-ecdh-nist-p256",
2068 .cra_module
= THIS_MODULE
,
2071 .set_secret
= hpre_ecdh_set_secret
,
2072 .generate_public_key
= hpre_ecdh_compute_value
,
2073 .compute_shared_secret
= hpre_ecdh_compute_value
,
2074 .max_size
= hpre_ecdh_max_size
,
2075 .init
= hpre_ecdh_nist_p384_init_tfm
,
2076 .exit
= hpre_ecdh_exit_tfm
,
2078 .cra_ctxsize
= sizeof(struct hpre_ctx
),
2079 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
2080 .cra_name
= "ecdh-nist-p384",
2081 .cra_driver_name
= "hpre-ecdh-nist-p384",
2082 .cra_module
= THIS_MODULE
,
2087 static struct kpp_alg curve25519_alg
= {
2088 .set_secret
= hpre_curve25519_set_secret
,
2089 .generate_public_key
= hpre_curve25519_compute_value
,
2090 .compute_shared_secret
= hpre_curve25519_compute_value
,
2091 .max_size
= hpre_curve25519_max_size
,
2092 .init
= hpre_curve25519_init_tfm
,
2093 .exit
= hpre_curve25519_exit_tfm
,
2095 .cra_ctxsize
= sizeof(struct hpre_ctx
),
2096 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
2097 .cra_name
= "curve25519",
2098 .cra_driver_name
= "hpre-curve25519",
2099 .cra_module
= THIS_MODULE
,
2103 static int hpre_register_rsa(struct hisi_qm
*qm
)
2107 if (!hpre_check_alg_support(qm
, HPRE_DRV_RSA_MASK_CAP
))
2110 rsa
.base
.cra_flags
= 0;
2111 ret
= crypto_register_akcipher(&rsa
);
2113 dev_err(&qm
->pdev
->dev
, "failed to register rsa (%d)!\n", ret
);
2118 static void hpre_unregister_rsa(struct hisi_qm
*qm
)
2120 if (!hpre_check_alg_support(qm
, HPRE_DRV_RSA_MASK_CAP
))
2123 crypto_unregister_akcipher(&rsa
);
2126 static int hpre_register_dh(struct hisi_qm
*qm
)
2130 if (!hpre_check_alg_support(qm
, HPRE_DRV_DH_MASK_CAP
))
2133 ret
= crypto_register_kpp(&dh
);
2135 dev_err(&qm
->pdev
->dev
, "failed to register dh (%d)!\n", ret
);
2140 static void hpre_unregister_dh(struct hisi_qm
*qm
)
2142 if (!hpre_check_alg_support(qm
, HPRE_DRV_DH_MASK_CAP
))
2145 crypto_unregister_kpp(&dh
);
2148 static int hpre_register_ecdh(struct hisi_qm
*qm
)
2152 if (!hpre_check_alg_support(qm
, HPRE_DRV_ECDH_MASK_CAP
))
2155 for (i
= 0; i
< ARRAY_SIZE(ecdh_curves
); i
++) {
2156 ret
= crypto_register_kpp(&ecdh_curves
[i
]);
2158 dev_err(&qm
->pdev
->dev
, "failed to register %s (%d)!\n",
2159 ecdh_curves
[i
].base
.cra_name
, ret
);
2167 for (--i
; i
>= 0; --i
)
2168 crypto_unregister_kpp(&ecdh_curves
[i
]);
2173 static void hpre_unregister_ecdh(struct hisi_qm
*qm
)
2177 if (!hpre_check_alg_support(qm
, HPRE_DRV_ECDH_MASK_CAP
))
2180 for (i
= ARRAY_SIZE(ecdh_curves
) - 1; i
>= 0; --i
)
2181 crypto_unregister_kpp(&ecdh_curves
[i
]);
2184 static int hpre_register_x25519(struct hisi_qm
*qm
)
2188 if (!hpre_check_alg_support(qm
, HPRE_DRV_X25519_MASK_CAP
))
2191 ret
= crypto_register_kpp(&curve25519_alg
);
2193 dev_err(&qm
->pdev
->dev
, "failed to register x25519 (%d)!\n", ret
);
2198 static void hpre_unregister_x25519(struct hisi_qm
*qm
)
2200 if (!hpre_check_alg_support(qm
, HPRE_DRV_X25519_MASK_CAP
))
2203 crypto_unregister_kpp(&curve25519_alg
);
2206 int hpre_algs_register(struct hisi_qm
*qm
)
2210 mutex_lock(&hpre_algs_lock
);
2211 if (hpre_available_devs
) {
2212 hpre_available_devs
++;
2216 ret
= hpre_register_rsa(qm
);
2220 ret
= hpre_register_dh(qm
);
2224 ret
= hpre_register_ecdh(qm
);
2228 ret
= hpre_register_x25519(qm
);
2232 hpre_available_devs
++;
2233 mutex_unlock(&hpre_algs_lock
);
2238 hpre_unregister_ecdh(qm
);
2240 hpre_unregister_dh(qm
);
2242 hpre_unregister_rsa(qm
);
2244 mutex_unlock(&hpre_algs_lock
);
2248 void hpre_algs_unregister(struct hisi_qm
*qm
)
2250 mutex_lock(&hpre_algs_lock
);
2251 if (--hpre_available_devs
)
2254 hpre_unregister_x25519(qm
);
2255 hpre_unregister_ecdh(qm
);
2256 hpre_unregister_dh(qm
);
2257 hpre_unregister_rsa(qm
);
2260 mutex_unlock(&hpre_algs_lock
);