1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/internal/kpp.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/kpp.h>
9 #include <crypto/scatterwalk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <linux/module.h>
13 #include <linux/time.h>
18 #define HPRE_CRYPTO_ALG_PRI 1000
19 #define HPRE_ALIGN_SZ 64
20 #define HPRE_BITS_2_BYTES_SHIFT 3
21 #define HPRE_RSA_512BITS_KSZ 64
22 #define HPRE_RSA_1536BITS_KSZ 192
23 #define HPRE_CRT_PRMS 5
26 #define HPRE_CRT_INV 4
27 #define HPRE_DH_G_FLAG 0x02
28 #define HPRE_TRY_SEND_TIMES 100
29 #define HPRE_INVLD_REQ_ID (-1)
30 #define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
32 #define HPRE_SQE_ALG_BITS 5
33 #define HPRE_SQE_DONE_SHIFT 30
34 #define HPRE_DH_MAX_P_SZ 512
36 #define HPRE_DFX_SEC_TO_US 1000000
37 #define HPRE_DFX_US_TO_NS 1000
39 typedef void (*hpre_cb
)(struct hpre_ctx
*ctx
, void *sqe
);
42 /* low address: e--->n */
44 dma_addr_t dma_pubkey
;
46 /* low address: d--->n */
48 dma_addr_t dma_prikey
;
50 /* low address: dq->dp->q->p->qinv */
52 dma_addr_t dma_crt_prikey
;
54 struct crypto_akcipher
*soft_tfm
;
59 * If base is g we compute the public key
60 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
61 * else if base if the counterpart public key we
62 * compute the shared secret
63 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
65 char *xa_p
; /* low address: d--->n, please refer to Hisilicon HPRE UM */
74 struct hpre_asym_request
**req_list
;
81 struct hpre_rsa_ctx rsa
;
82 struct hpre_dh_ctx dh
;
86 struct hpre_asym_request
{
92 struct akcipher_request
*rsa
;
93 struct kpp_request
*dh
;
98 struct timespec64 req_time
;
101 static int hpre_alloc_req_id(struct hpre_ctx
*ctx
)
106 spin_lock_irqsave(&ctx
->req_lock
, flags
);
107 id
= idr_alloc(&ctx
->req_idr
, NULL
, 0, QM_Q_DEPTH
, GFP_ATOMIC
);
108 spin_unlock_irqrestore(&ctx
->req_lock
, flags
);
113 static void hpre_free_req_id(struct hpre_ctx
*ctx
, int req_id
)
117 spin_lock_irqsave(&ctx
->req_lock
, flags
);
118 idr_remove(&ctx
->req_idr
, req_id
);
119 spin_unlock_irqrestore(&ctx
->req_lock
, flags
);
122 static int hpre_add_req_to_ctx(struct hpre_asym_request
*hpre_req
)
124 struct hpre_ctx
*ctx
;
125 struct hpre_dfx
*dfx
;
129 id
= hpre_alloc_req_id(ctx
);
130 if (unlikely(id
< 0))
133 ctx
->req_list
[id
] = hpre_req
;
134 hpre_req
->req_id
= id
;
136 dfx
= ctx
->hpre
->debug
.dfx
;
137 if (atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
))
138 ktime_get_ts64(&hpre_req
->req_time
);
143 static void hpre_rm_req_from_ctx(struct hpre_asym_request
*hpre_req
)
145 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
146 int id
= hpre_req
->req_id
;
148 if (hpre_req
->req_id
>= 0) {
149 hpre_req
->req_id
= HPRE_INVLD_REQ_ID
;
150 ctx
->req_list
[id
] = NULL
;
151 hpre_free_req_id(ctx
, id
);
155 static struct hisi_qp
*hpre_get_qp_and_start(void)
160 qp
= hpre_create_qp();
162 pr_err("Can not create hpre qp!\n");
163 return ERR_PTR(-ENODEV
);
166 ret
= hisi_qm_start_qp(qp
, 0);
168 hisi_qm_free_qps(&qp
, 1);
169 pci_err(qp
->qm
->pdev
, "Can not start qp!\n");
170 return ERR_PTR(-EINVAL
);
176 static int hpre_get_data_dma_addr(struct hpre_asym_request
*hpre_req
,
177 struct scatterlist
*data
, unsigned int len
,
178 int is_src
, dma_addr_t
*tmp
)
180 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
181 struct device
*dev
= HPRE_DEV(ctx
);
182 enum dma_data_direction dma_dir
;
185 hpre_req
->src
= NULL
;
186 dma_dir
= DMA_TO_DEVICE
;
188 hpre_req
->dst
= NULL
;
189 dma_dir
= DMA_FROM_DEVICE
;
191 *tmp
= dma_map_single(dev
, sg_virt(data
), len
, dma_dir
);
192 if (unlikely(dma_mapping_error(dev
, *tmp
))) {
193 dev_err(dev
, "dma map data err!\n");
200 static int hpre_prepare_dma_buf(struct hpre_asym_request
*hpre_req
,
201 struct scatterlist
*data
, unsigned int len
,
202 int is_src
, dma_addr_t
*tmp
)
204 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
205 struct device
*dev
= HPRE_DEV(ctx
);
209 shift
= ctx
->key_sz
- len
;
210 if (unlikely(shift
< 0))
213 ptr
= dma_alloc_coherent(dev
, ctx
->key_sz
, tmp
, GFP_KERNEL
);
218 scatterwalk_map_and_copy(ptr
+ shift
, data
, 0, len
, 0);
227 static int hpre_hw_data_init(struct hpre_asym_request
*hpre_req
,
228 struct scatterlist
*data
, unsigned int len
,
229 int is_src
, int is_dh
)
231 struct hpre_sqe
*msg
= &hpre_req
->req
;
232 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
236 /* when the data is dh's source, we should format it */
237 if ((sg_is_last(data
) && len
== ctx
->key_sz
) &&
238 ((is_dh
&& !is_src
) || !is_dh
))
239 ret
= hpre_get_data_dma_addr(hpre_req
, data
, len
, is_src
, &tmp
);
241 ret
= hpre_prepare_dma_buf(hpre_req
, data
, len
, is_src
, &tmp
);
247 msg
->in
= cpu_to_le64(tmp
);
249 msg
->out
= cpu_to_le64(tmp
);
254 static void hpre_hw_data_clr_all(struct hpre_ctx
*ctx
,
255 struct hpre_asym_request
*req
,
256 struct scatterlist
*dst
,
257 struct scatterlist
*src
)
259 struct device
*dev
= HPRE_DEV(ctx
);
260 struct hpre_sqe
*sqe
= &req
->req
;
263 tmp
= le64_to_cpu(sqe
->in
);
269 dma_free_coherent(dev
, ctx
->key_sz
, req
->src
, tmp
);
271 dma_unmap_single(dev
, tmp
, ctx
->key_sz
, DMA_TO_DEVICE
);
274 tmp
= le64_to_cpu(sqe
->out
);
280 scatterwalk_map_and_copy(req
->dst
, dst
, 0,
282 dma_free_coherent(dev
, ctx
->key_sz
, req
->dst
, tmp
);
284 dma_unmap_single(dev
, tmp
, ctx
->key_sz
, DMA_FROM_DEVICE
);
288 static int hpre_alg_res_post_hf(struct hpre_ctx
*ctx
, struct hpre_sqe
*sqe
,
291 struct hpre_asym_request
*req
;
294 #define HPRE_NO_HW_ERR 0
295 #define HPRE_HW_TASK_DONE 3
296 #define HREE_HW_ERR_MASK 0x7ff
297 #define HREE_SQE_DONE_MASK 0x3
298 id
= (int)le16_to_cpu(sqe
->tag
);
299 req
= ctx
->req_list
[id
];
300 hpre_rm_req_from_ctx(req
);
303 err
= (le32_to_cpu(sqe
->dw0
) >> HPRE_SQE_ALG_BITS
) &
306 done
= (le32_to_cpu(sqe
->dw0
) >> HPRE_SQE_DONE_SHIFT
) &
309 if (likely(err
== HPRE_NO_HW_ERR
&& done
== HPRE_HW_TASK_DONE
))
315 static int hpre_ctx_set(struct hpre_ctx
*ctx
, struct hisi_qp
*qp
, int qlen
)
319 if (!ctx
|| !qp
|| qlen
< 0)
322 spin_lock_init(&ctx
->req_lock
);
325 hpre
= container_of(ctx
->qp
->qm
, struct hpre
, qm
);
327 ctx
->req_list
= kcalloc(qlen
, sizeof(void *), GFP_KERNEL
);
331 ctx
->crt_g2_mode
= false;
332 idr_init(&ctx
->req_idr
);
337 static void hpre_ctx_clear(struct hpre_ctx
*ctx
, bool is_clear_all
)
340 idr_destroy(&ctx
->req_idr
);
341 kfree(ctx
->req_list
);
342 hisi_qm_free_qps(&ctx
->qp
, 1);
345 ctx
->crt_g2_mode
= false;
349 static bool hpre_is_bd_timeout(struct hpre_asym_request
*req
,
352 struct timespec64 reply_time
;
355 ktime_get_ts64(&reply_time
);
356 time_use_us
= (reply_time
.tv_sec
- req
->req_time
.tv_sec
) *
358 (reply_time
.tv_nsec
- req
->req_time
.tv_nsec
) /
361 if (time_use_us
<= overtime_thrhld
)
367 static void hpre_dh_cb(struct hpre_ctx
*ctx
, void *resp
)
369 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
370 struct hpre_asym_request
*req
;
371 struct kpp_request
*areq
;
375 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
377 areq
->dst_len
= ctx
->key_sz
;
379 overtime_thrhld
= atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
);
380 if (overtime_thrhld
&& hpre_is_bd_timeout(req
, overtime_thrhld
))
381 atomic64_inc(&dfx
[HPRE_OVER_THRHLD_CNT
].value
);
383 hpre_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
384 kpp_request_complete(areq
, ret
);
385 atomic64_inc(&dfx
[HPRE_RECV_CNT
].value
);
388 static void hpre_rsa_cb(struct hpre_ctx
*ctx
, void *resp
)
390 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
391 struct hpre_asym_request
*req
;
392 struct akcipher_request
*areq
;
396 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
398 overtime_thrhld
= atomic64_read(&dfx
[HPRE_OVERTIME_THRHLD
].value
);
399 if (overtime_thrhld
&& hpre_is_bd_timeout(req
, overtime_thrhld
))
400 atomic64_inc(&dfx
[HPRE_OVER_THRHLD_CNT
].value
);
402 areq
= req
->areq
.rsa
;
403 areq
->dst_len
= ctx
->key_sz
;
404 hpre_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
405 akcipher_request_complete(areq
, ret
);
406 atomic64_inc(&dfx
[HPRE_RECV_CNT
].value
);
409 static void hpre_alg_cb(struct hisi_qp
*qp
, void *resp
)
411 struct hpre_ctx
*ctx
= qp
->qp_ctx
;
412 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
413 struct hpre_sqe
*sqe
= resp
;
414 struct hpre_asym_request
*req
= ctx
->req_list
[le16_to_cpu(sqe
->tag
)];
417 if (unlikely(!req
)) {
418 atomic64_inc(&dfx
[HPRE_INVALID_REQ_CNT
].value
);
425 static int hpre_ctx_init(struct hpre_ctx
*ctx
)
429 qp
= hpre_get_qp_and_start();
434 qp
->req_cb
= hpre_alg_cb
;
436 return hpre_ctx_set(ctx
, qp
, QM_Q_DEPTH
);
439 static int hpre_msg_request_set(struct hpre_ctx
*ctx
, void *req
, bool is_rsa
)
441 struct hpre_asym_request
*h_req
;
442 struct hpre_sqe
*msg
;
447 struct akcipher_request
*akreq
= req
;
449 if (akreq
->dst_len
< ctx
->key_sz
) {
450 akreq
->dst_len
= ctx
->key_sz
;
454 tmp
= akcipher_request_ctx(akreq
);
455 h_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
456 h_req
->cb
= hpre_rsa_cb
;
457 h_req
->areq
.rsa
= akreq
;
459 memset(msg
, 0, sizeof(*msg
));
461 struct kpp_request
*kreq
= req
;
463 if (kreq
->dst_len
< ctx
->key_sz
) {
464 kreq
->dst_len
= ctx
->key_sz
;
468 tmp
= kpp_request_ctx(kreq
);
469 h_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
470 h_req
->cb
= hpre_dh_cb
;
471 h_req
->areq
.dh
= kreq
;
473 memset(msg
, 0, sizeof(*msg
));
474 msg
->key
= cpu_to_le64(ctx
->dh
.dma_xa_p
);
477 msg
->dw0
|= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT
);
478 msg
->task_len1
= (ctx
->key_sz
>> HPRE_BITS_2_BYTES_SHIFT
) - 1;
481 req_id
= hpre_add_req_to_ctx(h_req
);
485 msg
->tag
= cpu_to_le16((u16
)req_id
);
490 static int hpre_send(struct hpre_ctx
*ctx
, struct hpre_sqe
*msg
)
492 struct hpre_dfx
*dfx
= ctx
->hpre
->debug
.dfx
;
497 atomic64_inc(&dfx
[HPRE_SEND_CNT
].value
);
498 ret
= hisi_qp_send(ctx
->qp
, msg
);
501 atomic64_inc(&dfx
[HPRE_SEND_BUSY_CNT
].value
);
502 } while (ctr
++ < HPRE_TRY_SEND_TIMES
);
508 atomic64_inc(&dfx
[HPRE_SEND_FAIL_CNT
].value
);
513 #ifdef CONFIG_CRYPTO_DH
514 static int hpre_dh_compute_value(struct kpp_request
*req
)
516 struct crypto_kpp
*tfm
= crypto_kpp_reqtfm(req
);
517 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
518 void *tmp
= kpp_request_ctx(req
);
519 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
520 struct hpre_sqe
*msg
= &hpre_req
->req
;
523 ret
= hpre_msg_request_set(ctx
, req
, false);
528 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 1);
532 msg
->in
= cpu_to_le64(ctx
->dh
.dma_g
);
535 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 1);
539 if (ctx
->crt_g2_mode
&& !req
->src
)
540 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_DH_G2
);
542 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_DH
);
545 ret
= hpre_send(ctx
, msg
);
550 hpre_rm_req_from_ctx(hpre_req
);
551 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
556 static int hpre_is_dh_params_length_valid(unsigned int key_sz
)
558 #define _HPRE_DH_GRP1 768
559 #define _HPRE_DH_GRP2 1024
560 #define _HPRE_DH_GRP5 1536
561 #define _HPRE_DH_GRP14 2048
562 #define _HPRE_DH_GRP15 3072
563 #define _HPRE_DH_GRP16 4096
577 static int hpre_dh_set_params(struct hpre_ctx
*ctx
, struct dh
*params
)
579 struct device
*dev
= HPRE_DEV(ctx
);
582 if (params
->p_size
> HPRE_DH_MAX_P_SZ
)
585 if (hpre_is_dh_params_length_valid(params
->p_size
<<
586 HPRE_BITS_2_BYTES_SHIFT
))
589 sz
= ctx
->key_sz
= params
->p_size
;
590 ctx
->dh
.xa_p
= dma_alloc_coherent(dev
, sz
<< 1,
591 &ctx
->dh
.dma_xa_p
, GFP_KERNEL
);
595 memcpy(ctx
->dh
.xa_p
+ sz
, params
->p
, sz
);
597 /* If g equals 2 don't copy it */
598 if (params
->g_size
== 1 && *(char *)params
->g
== HPRE_DH_G_FLAG
) {
599 ctx
->crt_g2_mode
= true;
603 ctx
->dh
.g
= dma_alloc_coherent(dev
, sz
, &ctx
->dh
.dma_g
, GFP_KERNEL
);
605 dma_free_coherent(dev
, sz
<< 1, ctx
->dh
.xa_p
,
611 memcpy(ctx
->dh
.g
+ (sz
- params
->g_size
), params
->g
, params
->g_size
);
616 static void hpre_dh_clear_ctx(struct hpre_ctx
*ctx
, bool is_clear_all
)
618 struct device
*dev
= HPRE_DEV(ctx
);
619 unsigned int sz
= ctx
->key_sz
;
622 hisi_qm_stop_qp(ctx
->qp
);
625 dma_free_coherent(dev
, sz
, ctx
->dh
.g
, ctx
->dh
.dma_g
);
630 memzero_explicit(ctx
->dh
.xa_p
, sz
);
631 dma_free_coherent(dev
, sz
<< 1, ctx
->dh
.xa_p
,
636 hpre_ctx_clear(ctx
, is_clear_all
);
639 static int hpre_dh_set_secret(struct crypto_kpp
*tfm
, const void *buf
,
642 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
646 if (crypto_dh_decode_key(buf
, len
, ¶ms
) < 0)
649 /* Free old secret if any */
650 hpre_dh_clear_ctx(ctx
, false);
652 ret
= hpre_dh_set_params(ctx
, ¶ms
);
656 memcpy(ctx
->dh
.xa_p
+ (ctx
->key_sz
- params
.key_size
), params
.key
,
662 hpre_dh_clear_ctx(ctx
, false);
666 static unsigned int hpre_dh_max_size(struct crypto_kpp
*tfm
)
668 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
673 static int hpre_dh_init_tfm(struct crypto_kpp
*tfm
)
675 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
677 return hpre_ctx_init(ctx
);
680 static void hpre_dh_exit_tfm(struct crypto_kpp
*tfm
)
682 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
684 hpre_dh_clear_ctx(ctx
, true);
688 static void hpre_rsa_drop_leading_zeros(const char **ptr
, size_t *len
)
690 while (!**ptr
&& *len
) {
696 static bool hpre_rsa_key_size_is_support(unsigned int len
)
698 unsigned int bits
= len
<< HPRE_BITS_2_BYTES_SHIFT
;
700 #define _RSA_1024BITS_KEY_WDTH 1024
701 #define _RSA_2048BITS_KEY_WDTH 2048
702 #define _RSA_3072BITS_KEY_WDTH 3072
703 #define _RSA_4096BITS_KEY_WDTH 4096
706 case _RSA_1024BITS_KEY_WDTH
:
707 case _RSA_2048BITS_KEY_WDTH
:
708 case _RSA_3072BITS_KEY_WDTH
:
709 case _RSA_4096BITS_KEY_WDTH
:
716 static int hpre_rsa_enc(struct akcipher_request
*req
)
718 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
719 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
720 void *tmp
= akcipher_request_ctx(req
);
721 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
722 struct hpre_sqe
*msg
= &hpre_req
->req
;
725 /* For 512 and 1536 bits key size, use soft tfm instead */
726 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
727 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
) {
728 akcipher_request_set_tfm(req
, ctx
->rsa
.soft_tfm
);
729 ret
= crypto_akcipher_encrypt(req
);
730 akcipher_request_set_tfm(req
, tfm
);
734 if (unlikely(!ctx
->rsa
.pubkey
))
737 ret
= hpre_msg_request_set(ctx
, req
, true);
741 msg
->dw0
|= cpu_to_le32(HPRE_ALG_NC_NCRT
);
742 msg
->key
= cpu_to_le64(ctx
->rsa
.dma_pubkey
);
744 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 0);
748 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 0);
753 ret
= hpre_send(ctx
, msg
);
758 hpre_rm_req_from_ctx(hpre_req
);
759 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
764 static int hpre_rsa_dec(struct akcipher_request
*req
)
766 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
767 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
768 void *tmp
= akcipher_request_ctx(req
);
769 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
770 struct hpre_sqe
*msg
= &hpre_req
->req
;
773 /* For 512 and 1536 bits key size, use soft tfm instead */
774 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
775 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
) {
776 akcipher_request_set_tfm(req
, ctx
->rsa
.soft_tfm
);
777 ret
= crypto_akcipher_decrypt(req
);
778 akcipher_request_set_tfm(req
, tfm
);
782 if (unlikely(!ctx
->rsa
.prikey
))
785 ret
= hpre_msg_request_set(ctx
, req
, true);
789 if (ctx
->crt_g2_mode
) {
790 msg
->key
= cpu_to_le64(ctx
->rsa
.dma_crt_prikey
);
791 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) |
794 msg
->key
= cpu_to_le64(ctx
->rsa
.dma_prikey
);
795 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) |
799 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 0);
803 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 0);
808 ret
= hpre_send(ctx
, msg
);
813 hpre_rm_req_from_ctx(hpre_req
);
814 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
819 static int hpre_rsa_set_n(struct hpre_ctx
*ctx
, const char *value
,
820 size_t vlen
, bool private)
822 const char *ptr
= value
;
824 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
828 /* if invalid key size provided, we use software tfm */
829 if (!hpre_rsa_key_size_is_support(ctx
->key_sz
))
832 ctx
->rsa
.pubkey
= dma_alloc_coherent(HPRE_DEV(ctx
), vlen
<< 1,
833 &ctx
->rsa
.dma_pubkey
,
835 if (!ctx
->rsa
.pubkey
)
839 ctx
->rsa
.prikey
= dma_alloc_coherent(HPRE_DEV(ctx
), vlen
<< 1,
840 &ctx
->rsa
.dma_prikey
,
842 if (!ctx
->rsa
.prikey
) {
843 dma_free_coherent(HPRE_DEV(ctx
), vlen
<< 1,
845 ctx
->rsa
.dma_pubkey
);
846 ctx
->rsa
.pubkey
= NULL
;
849 memcpy(ctx
->rsa
.prikey
+ vlen
, ptr
, vlen
);
851 memcpy(ctx
->rsa
.pubkey
+ vlen
, ptr
, vlen
);
853 /* Using hardware HPRE to do RSA */
857 static int hpre_rsa_set_e(struct hpre_ctx
*ctx
, const char *value
,
860 const char *ptr
= value
;
862 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
864 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
867 memcpy(ctx
->rsa
.pubkey
+ ctx
->key_sz
- vlen
, ptr
, vlen
);
872 static int hpre_rsa_set_d(struct hpre_ctx
*ctx
, const char *value
,
875 const char *ptr
= value
;
877 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
879 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
882 memcpy(ctx
->rsa
.prikey
+ ctx
->key_sz
- vlen
, ptr
, vlen
);
887 static int hpre_crt_para_get(char *para
, size_t para_sz
,
888 const char *raw
, size_t raw_sz
)
890 const char *ptr
= raw
;
893 hpre_rsa_drop_leading_zeros(&ptr
, &len
);
894 if (!len
|| len
> para_sz
)
897 memcpy(para
+ para_sz
- len
, ptr
, len
);
902 static int hpre_rsa_setkey_crt(struct hpre_ctx
*ctx
, struct rsa_key
*rsa_key
)
904 unsigned int hlf_ksz
= ctx
->key_sz
>> 1;
905 struct device
*dev
= HPRE_DEV(ctx
);
909 ctx
->rsa
.crt_prikey
= dma_alloc_coherent(dev
, hlf_ksz
* HPRE_CRT_PRMS
,
910 &ctx
->rsa
.dma_crt_prikey
,
912 if (!ctx
->rsa
.crt_prikey
)
915 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
, hlf_ksz
,
916 rsa_key
->dq
, rsa_key
->dq_sz
);
921 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
922 rsa_key
->dp
, rsa_key
->dp_sz
);
926 offset
= hlf_ksz
* HPRE_CRT_Q
;
927 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
928 rsa_key
->q
, rsa_key
->q_sz
);
932 offset
= hlf_ksz
* HPRE_CRT_P
;
933 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
934 rsa_key
->p
, rsa_key
->p_sz
);
938 offset
= hlf_ksz
* HPRE_CRT_INV
;
939 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
940 rsa_key
->qinv
, rsa_key
->qinv_sz
);
944 ctx
->crt_g2_mode
= true;
949 offset
= hlf_ksz
* HPRE_CRT_PRMS
;
950 memzero_explicit(ctx
->rsa
.crt_prikey
, offset
);
951 dma_free_coherent(dev
, hlf_ksz
* HPRE_CRT_PRMS
, ctx
->rsa
.crt_prikey
,
952 ctx
->rsa
.dma_crt_prikey
);
953 ctx
->rsa
.crt_prikey
= NULL
;
954 ctx
->crt_g2_mode
= false;
959 /* If it is clear all, all the resources of the QP will be cleaned. */
960 static void hpre_rsa_clear_ctx(struct hpre_ctx
*ctx
, bool is_clear_all
)
962 unsigned int half_key_sz
= ctx
->key_sz
>> 1;
963 struct device
*dev
= HPRE_DEV(ctx
);
966 hisi_qm_stop_qp(ctx
->qp
);
968 if (ctx
->rsa
.pubkey
) {
969 dma_free_coherent(dev
, ctx
->key_sz
<< 1,
970 ctx
->rsa
.pubkey
, ctx
->rsa
.dma_pubkey
);
971 ctx
->rsa
.pubkey
= NULL
;
974 if (ctx
->rsa
.crt_prikey
) {
975 memzero_explicit(ctx
->rsa
.crt_prikey
,
976 half_key_sz
* HPRE_CRT_PRMS
);
977 dma_free_coherent(dev
, half_key_sz
* HPRE_CRT_PRMS
,
978 ctx
->rsa
.crt_prikey
, ctx
->rsa
.dma_crt_prikey
);
979 ctx
->rsa
.crt_prikey
= NULL
;
982 if (ctx
->rsa
.prikey
) {
983 memzero_explicit(ctx
->rsa
.prikey
, ctx
->key_sz
);
984 dma_free_coherent(dev
, ctx
->key_sz
<< 1, ctx
->rsa
.prikey
,
985 ctx
->rsa
.dma_prikey
);
986 ctx
->rsa
.prikey
= NULL
;
989 hpre_ctx_clear(ctx
, is_clear_all
);
993 * we should judge if it is CRT or not,
994 * CRT: return true, N-CRT: return false .
996 static bool hpre_is_crt_key(struct rsa_key
*key
)
998 u16 len
= key
->p_sz
+ key
->q_sz
+ key
->dp_sz
+ key
->dq_sz
+
1001 #define LEN_OF_NCRT_PARA 5
1003 /* N-CRT less than 5 parameters */
1004 return len
> LEN_OF_NCRT_PARA
;
1007 static int hpre_rsa_setkey(struct hpre_ctx
*ctx
, const void *key
,
1008 unsigned int keylen
, bool private)
1010 struct rsa_key rsa_key
;
1013 hpre_rsa_clear_ctx(ctx
, false);
1016 ret
= rsa_parse_priv_key(&rsa_key
, key
, keylen
);
1018 ret
= rsa_parse_pub_key(&rsa_key
, key
, keylen
);
1022 ret
= hpre_rsa_set_n(ctx
, rsa_key
.n
, rsa_key
.n_sz
, private);
1027 ret
= hpre_rsa_set_d(ctx
, rsa_key
.d
, rsa_key
.d_sz
);
1031 if (hpre_is_crt_key(&rsa_key
)) {
1032 ret
= hpre_rsa_setkey_crt(ctx
, &rsa_key
);
1038 ret
= hpre_rsa_set_e(ctx
, rsa_key
.e
, rsa_key
.e_sz
);
1042 if ((private && !ctx
->rsa
.prikey
) || !ctx
->rsa
.pubkey
) {
1050 hpre_rsa_clear_ctx(ctx
, false);
1054 static int hpre_rsa_setpubkey(struct crypto_akcipher
*tfm
, const void *key
,
1055 unsigned int keylen
)
1057 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1060 ret
= crypto_akcipher_set_pub_key(ctx
->rsa
.soft_tfm
, key
, keylen
);
1064 return hpre_rsa_setkey(ctx
, key
, keylen
, false);
1067 static int hpre_rsa_setprivkey(struct crypto_akcipher
*tfm
, const void *key
,
1068 unsigned int keylen
)
1070 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1073 ret
= crypto_akcipher_set_priv_key(ctx
->rsa
.soft_tfm
, key
, keylen
);
1077 return hpre_rsa_setkey(ctx
, key
, keylen
, true);
1080 static unsigned int hpre_rsa_max_size(struct crypto_akcipher
*tfm
)
1082 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1084 /* For 512 and 1536 bits key size, use soft tfm instead */
1085 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
1086 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
)
1087 return crypto_akcipher_maxsize(ctx
->rsa
.soft_tfm
);
1092 static int hpre_rsa_init_tfm(struct crypto_akcipher
*tfm
)
1094 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1097 ctx
->rsa
.soft_tfm
= crypto_alloc_akcipher("rsa-generic", 0, 0);
1098 if (IS_ERR(ctx
->rsa
.soft_tfm
)) {
1099 pr_err("Can not alloc_akcipher!\n");
1100 return PTR_ERR(ctx
->rsa
.soft_tfm
);
1103 ret
= hpre_ctx_init(ctx
);
1105 crypto_free_akcipher(ctx
->rsa
.soft_tfm
);
1110 static void hpre_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
1112 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1114 hpre_rsa_clear_ctx(ctx
, true);
1115 crypto_free_akcipher(ctx
->rsa
.soft_tfm
);
1118 static struct akcipher_alg rsa
= {
1119 .sign
= hpre_rsa_dec
,
1120 .verify
= hpre_rsa_enc
,
1121 .encrypt
= hpre_rsa_enc
,
1122 .decrypt
= hpre_rsa_dec
,
1123 .set_pub_key
= hpre_rsa_setpubkey
,
1124 .set_priv_key
= hpre_rsa_setprivkey
,
1125 .max_size
= hpre_rsa_max_size
,
1126 .init
= hpre_rsa_init_tfm
,
1127 .exit
= hpre_rsa_exit_tfm
,
1128 .reqsize
= sizeof(struct hpre_asym_request
) + HPRE_ALIGN_SZ
,
1130 .cra_ctxsize
= sizeof(struct hpre_ctx
),
1131 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
1133 .cra_driver_name
= "hpre-rsa",
1134 .cra_module
= THIS_MODULE
,
1138 #ifdef CONFIG_CRYPTO_DH
1139 static struct kpp_alg dh
= {
1140 .set_secret
= hpre_dh_set_secret
,
1141 .generate_public_key
= hpre_dh_compute_value
,
1142 .compute_shared_secret
= hpre_dh_compute_value
,
1143 .max_size
= hpre_dh_max_size
,
1144 .init
= hpre_dh_init_tfm
,
1145 .exit
= hpre_dh_exit_tfm
,
1146 .reqsize
= sizeof(struct hpre_asym_request
) + HPRE_ALIGN_SZ
,
1148 .cra_ctxsize
= sizeof(struct hpre_ctx
),
1149 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
1151 .cra_driver_name
= "hpre-dh",
1152 .cra_module
= THIS_MODULE
,
1157 int hpre_algs_register(void)
1161 rsa
.base
.cra_flags
= 0;
1162 ret
= crypto_register_akcipher(&rsa
);
1165 #ifdef CONFIG_CRYPTO_DH
1166 ret
= crypto_register_kpp(&dh
);
1168 crypto_unregister_akcipher(&rsa
);
1174 void hpre_algs_unregister(void)
1176 crypto_unregister_akcipher(&rsa
);
1177 #ifdef CONFIG_CRYPTO_DH
1178 crypto_unregister_kpp(&dh
);