1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/internal/kpp.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/kpp.h>
9 #include <crypto/scatterwalk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <linux/module.h>
17 #define HPRE_CRYPTO_ALG_PRI 1000
18 #define HPRE_ALIGN_SZ 64
19 #define HPRE_BITS_2_BYTES_SHIFT 3
20 #define HPRE_RSA_512BITS_KSZ 64
21 #define HPRE_RSA_1536BITS_KSZ 192
22 #define HPRE_CRT_PRMS 5
25 #define HPRE_CRT_INV 4
26 #define HPRE_DH_G_FLAG 0x02
27 #define HPRE_TRY_SEND_TIMES 100
28 #define HPRE_INVLD_REQ_ID (-1)
29 #define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
31 #define HPRE_SQE_ALG_BITS 5
32 #define HPRE_SQE_DONE_SHIFT 30
33 #define HPRE_DH_MAX_P_SZ 512
35 typedef void (*hpre_cb
)(struct hpre_ctx
*ctx
, void *sqe
);
38 /* low address: e--->n */
40 dma_addr_t dma_pubkey
;
42 /* low address: d--->n */
44 dma_addr_t dma_prikey
;
46 /* low address: dq->dp->q->p->qinv */
48 dma_addr_t dma_crt_prikey
;
50 struct crypto_akcipher
*soft_tfm
;
55 * If base is g we compute the public key
56 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
57 * else if base if the counterpart public key we
58 * compute the shared secret
59 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
61 char *xa_p
; /* low address: d--->n, please refer to Hisilicon HPRE UM */
70 struct hpre_asym_request
**req_list
;
76 struct hpre_rsa_ctx rsa
;
77 struct hpre_dh_ctx dh
;
81 struct hpre_asym_request
{
87 struct akcipher_request
*rsa
;
88 struct kpp_request
*dh
;
95 static DEFINE_MUTEX(hpre_alg_lock
);
96 static unsigned int hpre_active_devs
;
98 static int hpre_alloc_req_id(struct hpre_ctx
*ctx
)
103 spin_lock_irqsave(&ctx
->req_lock
, flags
);
104 id
= idr_alloc(&ctx
->req_idr
, NULL
, 0, QM_Q_DEPTH
, GFP_ATOMIC
);
105 spin_unlock_irqrestore(&ctx
->req_lock
, flags
);
110 static void hpre_free_req_id(struct hpre_ctx
*ctx
, int req_id
)
114 spin_lock_irqsave(&ctx
->req_lock
, flags
);
115 idr_remove(&ctx
->req_idr
, req_id
);
116 spin_unlock_irqrestore(&ctx
->req_lock
, flags
);
119 static int hpre_add_req_to_ctx(struct hpre_asym_request
*hpre_req
)
121 struct hpre_ctx
*ctx
;
125 id
= hpre_alloc_req_id(ctx
);
126 if (unlikely(id
< 0))
129 ctx
->req_list
[id
] = hpre_req
;
130 hpre_req
->req_id
= id
;
135 static void hpre_rm_req_from_ctx(struct hpre_asym_request
*hpre_req
)
137 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
138 int id
= hpre_req
->req_id
;
140 if (hpre_req
->req_id
>= 0) {
141 hpre_req
->req_id
= HPRE_INVLD_REQ_ID
;
142 ctx
->req_list
[id
] = NULL
;
143 hpre_free_req_id(ctx
, id
);
147 static struct hisi_qp
*hpre_get_qp_and_start(void)
153 /* find the proper hpre device, which is near the current CPU core */
154 hpre
= hpre_find_device(cpu_to_node(smp_processor_id()));
156 pr_err("Can not find proper hpre device!\n");
157 return ERR_PTR(-ENODEV
);
160 qp
= hisi_qm_create_qp(&hpre
->qm
, 0);
162 pci_err(hpre
->qm
.pdev
, "Can not create qp!\n");
163 return ERR_PTR(-ENODEV
);
166 ret
= hisi_qm_start_qp(qp
, 0);
168 hisi_qm_release_qp(qp
);
169 pci_err(hpre
->qm
.pdev
, "Can not start qp!\n");
170 return ERR_PTR(-EINVAL
);
176 static int hpre_get_data_dma_addr(struct hpre_asym_request
*hpre_req
,
177 struct scatterlist
*data
, unsigned int len
,
178 int is_src
, dma_addr_t
*tmp
)
180 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
181 struct device
*dev
= HPRE_DEV(ctx
);
182 enum dma_data_direction dma_dir
;
185 hpre_req
->src
= NULL
;
186 dma_dir
= DMA_TO_DEVICE
;
188 hpre_req
->dst
= NULL
;
189 dma_dir
= DMA_FROM_DEVICE
;
191 *tmp
= dma_map_single(dev
, sg_virt(data
),
193 if (unlikely(dma_mapping_error(dev
, *tmp
))) {
194 dev_err(dev
, "dma map data err!\n");
201 static int hpre_prepare_dma_buf(struct hpre_asym_request
*hpre_req
,
202 struct scatterlist
*data
, unsigned int len
,
203 int is_src
, dma_addr_t
*tmp
)
205 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
206 struct device
*dev
= HPRE_DEV(ctx
);
210 shift
= ctx
->key_sz
- len
;
211 if (unlikely(shift
< 0))
214 ptr
= dma_alloc_coherent(dev
, ctx
->key_sz
, tmp
, GFP_KERNEL
);
219 scatterwalk_map_and_copy(ptr
+ shift
, data
, 0, len
, 0);
228 static int hpre_hw_data_init(struct hpre_asym_request
*hpre_req
,
229 struct scatterlist
*data
, unsigned int len
,
230 int is_src
, int is_dh
)
232 struct hpre_sqe
*msg
= &hpre_req
->req
;
233 struct hpre_ctx
*ctx
= hpre_req
->ctx
;
237 /* when the data is dh's source, we should format it */
238 if ((sg_is_last(data
) && len
== ctx
->key_sz
) &&
239 ((is_dh
&& !is_src
) || !is_dh
))
240 ret
= hpre_get_data_dma_addr(hpre_req
, data
, len
, is_src
, &tmp
);
242 ret
= hpre_prepare_dma_buf(hpre_req
, data
, len
,
248 msg
->in
= cpu_to_le64(tmp
);
250 msg
->out
= cpu_to_le64(tmp
);
255 static void hpre_hw_data_clr_all(struct hpre_ctx
*ctx
,
256 struct hpre_asym_request
*req
,
257 struct scatterlist
*dst
,
258 struct scatterlist
*src
)
260 struct device
*dev
= HPRE_DEV(ctx
);
261 struct hpre_sqe
*sqe
= &req
->req
;
264 tmp
= le64_to_cpu(sqe
->in
);
270 dma_free_coherent(dev
, ctx
->key_sz
,
273 dma_unmap_single(dev
, tmp
,
274 ctx
->key_sz
, DMA_TO_DEVICE
);
277 tmp
= le64_to_cpu(sqe
->out
);
283 scatterwalk_map_and_copy(req
->dst
, dst
, 0,
285 dma_free_coherent(dev
, ctx
->key_sz
, req
->dst
, tmp
);
287 dma_unmap_single(dev
, tmp
, ctx
->key_sz
, DMA_FROM_DEVICE
);
291 static int hpre_alg_res_post_hf(struct hpre_ctx
*ctx
, struct hpre_sqe
*sqe
,
294 struct hpre_asym_request
*req
;
297 #define HPRE_NO_HW_ERR 0
298 #define HPRE_HW_TASK_DONE 3
299 #define HREE_HW_ERR_MASK 0x7ff
300 #define HREE_SQE_DONE_MASK 0x3
301 id
= (int)le16_to_cpu(sqe
->tag
);
302 req
= ctx
->req_list
[id
];
303 hpre_rm_req_from_ctx(req
);
306 err
= (le32_to_cpu(sqe
->dw0
) >> HPRE_SQE_ALG_BITS
) &
309 done
= (le32_to_cpu(sqe
->dw0
) >> HPRE_SQE_DONE_SHIFT
) &
312 if (likely(err
== HPRE_NO_HW_ERR
&& done
== HPRE_HW_TASK_DONE
))
318 static int hpre_ctx_set(struct hpre_ctx
*ctx
, struct hisi_qp
*qp
, int qlen
)
320 if (!ctx
|| !qp
|| qlen
< 0)
323 spin_lock_init(&ctx
->req_lock
);
326 ctx
->req_list
= kcalloc(qlen
, sizeof(void *), GFP_KERNEL
);
330 ctx
->crt_g2_mode
= false;
331 idr_init(&ctx
->req_idr
);
336 static void hpre_ctx_clear(struct hpre_ctx
*ctx
, bool is_clear_all
)
339 idr_destroy(&ctx
->req_idr
);
340 kfree(ctx
->req_list
);
341 hisi_qm_release_qp(ctx
->qp
);
344 ctx
->crt_g2_mode
= false;
348 static void hpre_dh_cb(struct hpre_ctx
*ctx
, void *resp
)
350 struct hpre_asym_request
*req
;
351 struct kpp_request
*areq
;
354 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
356 areq
->dst_len
= ctx
->key_sz
;
357 hpre_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
358 kpp_request_complete(areq
, ret
);
361 static void hpre_rsa_cb(struct hpre_ctx
*ctx
, void *resp
)
363 struct hpre_asym_request
*req
;
364 struct akcipher_request
*areq
;
367 ret
= hpre_alg_res_post_hf(ctx
, resp
, (void **)&req
);
368 areq
= req
->areq
.rsa
;
369 areq
->dst_len
= ctx
->key_sz
;
370 hpre_hw_data_clr_all(ctx
, req
, areq
->dst
, areq
->src
);
371 akcipher_request_complete(areq
, ret
);
374 static void hpre_alg_cb(struct hisi_qp
*qp
, void *resp
)
376 struct hpre_ctx
*ctx
= qp
->qp_ctx
;
377 struct hpre_sqe
*sqe
= resp
;
379 ctx
->req_list
[le16_to_cpu(sqe
->tag
)]->cb(ctx
, resp
);
382 static int hpre_ctx_init(struct hpre_ctx
*ctx
)
386 qp
= hpre_get_qp_and_start();
391 qp
->req_cb
= hpre_alg_cb
;
393 return hpre_ctx_set(ctx
, qp
, QM_Q_DEPTH
);
396 static int hpre_msg_request_set(struct hpre_ctx
*ctx
, void *req
, bool is_rsa
)
398 struct hpre_asym_request
*h_req
;
399 struct hpre_sqe
*msg
;
404 struct akcipher_request
*akreq
= req
;
406 if (akreq
->dst_len
< ctx
->key_sz
) {
407 akreq
->dst_len
= ctx
->key_sz
;
411 tmp
= akcipher_request_ctx(akreq
);
412 h_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
413 h_req
->cb
= hpre_rsa_cb
;
414 h_req
->areq
.rsa
= akreq
;
416 memset(msg
, 0, sizeof(*msg
));
418 struct kpp_request
*kreq
= req
;
420 if (kreq
->dst_len
< ctx
->key_sz
) {
421 kreq
->dst_len
= ctx
->key_sz
;
425 tmp
= kpp_request_ctx(kreq
);
426 h_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
427 h_req
->cb
= hpre_dh_cb
;
428 h_req
->areq
.dh
= kreq
;
430 memset(msg
, 0, sizeof(*msg
));
431 msg
->key
= cpu_to_le64((u64
)ctx
->dh
.dma_xa_p
);
434 msg
->dw0
|= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT
);
435 msg
->task_len1
= (ctx
->key_sz
>> HPRE_BITS_2_BYTES_SHIFT
) - 1;
438 req_id
= hpre_add_req_to_ctx(h_req
);
442 msg
->tag
= cpu_to_le16((u16
)req_id
);
447 #ifdef CONFIG_CRYPTO_DH
448 static int hpre_dh_compute_value(struct kpp_request
*req
)
450 struct crypto_kpp
*tfm
= crypto_kpp_reqtfm(req
);
451 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
452 void *tmp
= kpp_request_ctx(req
);
453 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
454 struct hpre_sqe
*msg
= &hpre_req
->req
;
458 ret
= hpre_msg_request_set(ctx
, req
, false);
463 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 1);
468 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 1);
472 if (ctx
->crt_g2_mode
&& !req
->src
)
473 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_DH_G2
);
475 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) | HPRE_ALG_DH
);
477 ret
= hisi_qp_send(ctx
->qp
, msg
);
478 } while (ret
== -EBUSY
&& ctr
++ < HPRE_TRY_SEND_TIMES
);
485 hpre_rm_req_from_ctx(hpre_req
);
486 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
491 static int hpre_is_dh_params_length_valid(unsigned int key_sz
)
493 #define _HPRE_DH_GRP1 768
494 #define _HPRE_DH_GRP2 1024
495 #define _HPRE_DH_GRP5 1536
496 #define _HPRE_DH_GRP14 2048
497 #define _HPRE_DH_GRP15 3072
498 #define _HPRE_DH_GRP16 4096
512 static int hpre_dh_set_params(struct hpre_ctx
*ctx
, struct dh
*params
)
514 struct device
*dev
= HPRE_DEV(ctx
);
517 if (params
->p_size
> HPRE_DH_MAX_P_SZ
)
520 if (hpre_is_dh_params_length_valid(params
->p_size
<<
521 HPRE_BITS_2_BYTES_SHIFT
))
524 sz
= ctx
->key_sz
= params
->p_size
;
525 ctx
->dh
.xa_p
= dma_alloc_coherent(dev
, sz
<< 1,
526 &ctx
->dh
.dma_xa_p
, GFP_KERNEL
);
530 memcpy(ctx
->dh
.xa_p
+ sz
, params
->p
, sz
);
532 /* If g equals 2 don't copy it */
533 if (params
->g_size
== 1 && *(char *)params
->g
== HPRE_DH_G_FLAG
) {
534 ctx
->crt_g2_mode
= true;
538 ctx
->dh
.g
= dma_alloc_coherent(dev
, sz
, &ctx
->dh
.dma_g
, GFP_KERNEL
);
540 dma_free_coherent(dev
, sz
<< 1, ctx
->dh
.xa_p
,
546 memcpy(ctx
->dh
.g
+ (sz
- params
->g_size
), params
->g
, params
->g_size
);
551 static void hpre_dh_clear_ctx(struct hpre_ctx
*ctx
, bool is_clear_all
)
553 struct device
*dev
= HPRE_DEV(ctx
);
554 unsigned int sz
= ctx
->key_sz
;
557 hisi_qm_stop_qp(ctx
->qp
);
560 dma_free_coherent(dev
, sz
, ctx
->dh
.g
, ctx
->dh
.dma_g
);
565 memzero_explicit(ctx
->dh
.xa_p
, sz
);
566 dma_free_coherent(dev
, sz
<< 1, ctx
->dh
.xa_p
,
571 hpre_ctx_clear(ctx
, is_clear_all
);
574 static int hpre_dh_set_secret(struct crypto_kpp
*tfm
, const void *buf
,
577 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
581 if (crypto_dh_decode_key(buf
, len
, ¶ms
) < 0)
584 /* Free old secret if any */
585 hpre_dh_clear_ctx(ctx
, false);
587 ret
= hpre_dh_set_params(ctx
, ¶ms
);
591 memcpy(ctx
->dh
.xa_p
+ (ctx
->key_sz
- params
.key_size
), params
.key
,
597 hpre_dh_clear_ctx(ctx
, false);
601 static unsigned int hpre_dh_max_size(struct crypto_kpp
*tfm
)
603 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
608 static int hpre_dh_init_tfm(struct crypto_kpp
*tfm
)
610 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
612 return hpre_ctx_init(ctx
);
615 static void hpre_dh_exit_tfm(struct crypto_kpp
*tfm
)
617 struct hpre_ctx
*ctx
= kpp_tfm_ctx(tfm
);
619 hpre_dh_clear_ctx(ctx
, true);
623 static void hpre_rsa_drop_leading_zeros(const char **ptr
, size_t *len
)
625 while (!**ptr
&& *len
) {
631 static bool hpre_rsa_key_size_is_support(unsigned int len
)
633 unsigned int bits
= len
<< HPRE_BITS_2_BYTES_SHIFT
;
635 #define _RSA_1024BITS_KEY_WDTH 1024
636 #define _RSA_2048BITS_KEY_WDTH 2048
637 #define _RSA_3072BITS_KEY_WDTH 3072
638 #define _RSA_4096BITS_KEY_WDTH 4096
641 case _RSA_1024BITS_KEY_WDTH
:
642 case _RSA_2048BITS_KEY_WDTH
:
643 case _RSA_3072BITS_KEY_WDTH
:
644 case _RSA_4096BITS_KEY_WDTH
:
651 static int hpre_rsa_enc(struct akcipher_request
*req
)
653 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
654 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
655 void *tmp
= akcipher_request_ctx(req
);
656 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
657 struct hpre_sqe
*msg
= &hpre_req
->req
;
661 /* For 512 and 1536 bits key size, use soft tfm instead */
662 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
663 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
) {
664 akcipher_request_set_tfm(req
, ctx
->rsa
.soft_tfm
);
665 ret
= crypto_akcipher_encrypt(req
);
666 akcipher_request_set_tfm(req
, tfm
);
670 if (unlikely(!ctx
->rsa
.pubkey
))
673 ret
= hpre_msg_request_set(ctx
, req
, true);
677 msg
->dw0
|= cpu_to_le32(HPRE_ALG_NC_NCRT
);
678 msg
->key
= cpu_to_le64((u64
)ctx
->rsa
.dma_pubkey
);
680 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 0);
684 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 0);
689 ret
= hisi_qp_send(ctx
->qp
, msg
);
690 } while (ret
== -EBUSY
&& ctr
++ < HPRE_TRY_SEND_TIMES
);
697 hpre_rm_req_from_ctx(hpre_req
);
698 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
703 static int hpre_rsa_dec(struct akcipher_request
*req
)
705 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
706 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
707 void *tmp
= akcipher_request_ctx(req
);
708 struct hpre_asym_request
*hpre_req
= PTR_ALIGN(tmp
, HPRE_ALIGN_SZ
);
709 struct hpre_sqe
*msg
= &hpre_req
->req
;
713 /* For 512 and 1536 bits key size, use soft tfm instead */
714 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
715 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
) {
716 akcipher_request_set_tfm(req
, ctx
->rsa
.soft_tfm
);
717 ret
= crypto_akcipher_decrypt(req
);
718 akcipher_request_set_tfm(req
, tfm
);
722 if (unlikely(!ctx
->rsa
.prikey
))
725 ret
= hpre_msg_request_set(ctx
, req
, true);
729 if (ctx
->crt_g2_mode
) {
730 msg
->key
= cpu_to_le64((u64
)ctx
->rsa
.dma_crt_prikey
);
731 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) |
734 msg
->key
= cpu_to_le64((u64
)ctx
->rsa
.dma_prikey
);
735 msg
->dw0
= cpu_to_le32(le32_to_cpu(msg
->dw0
) |
739 ret
= hpre_hw_data_init(hpre_req
, req
->src
, req
->src_len
, 1, 0);
743 ret
= hpre_hw_data_init(hpre_req
, req
->dst
, req
->dst_len
, 0, 0);
748 ret
= hisi_qp_send(ctx
->qp
, msg
);
749 } while (ret
== -EBUSY
&& ctr
++ < HPRE_TRY_SEND_TIMES
);
756 hpre_rm_req_from_ctx(hpre_req
);
757 hpre_hw_data_clr_all(ctx
, hpre_req
, req
->dst
, req
->src
);
762 static int hpre_rsa_set_n(struct hpre_ctx
*ctx
, const char *value
,
763 size_t vlen
, bool private)
765 const char *ptr
= value
;
767 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
771 /* if invalid key size provided, we use software tfm */
772 if (!hpre_rsa_key_size_is_support(ctx
->key_sz
))
775 ctx
->rsa
.pubkey
= dma_alloc_coherent(HPRE_DEV(ctx
), vlen
<< 1,
776 &ctx
->rsa
.dma_pubkey
,
778 if (!ctx
->rsa
.pubkey
)
782 ctx
->rsa
.prikey
= dma_alloc_coherent(HPRE_DEV(ctx
), vlen
<< 1,
783 &ctx
->rsa
.dma_prikey
,
785 if (!ctx
->rsa
.prikey
) {
786 dma_free_coherent(HPRE_DEV(ctx
), vlen
<< 1,
788 ctx
->rsa
.dma_pubkey
);
789 ctx
->rsa
.pubkey
= NULL
;
792 memcpy(ctx
->rsa
.prikey
+ vlen
, ptr
, vlen
);
794 memcpy(ctx
->rsa
.pubkey
+ vlen
, ptr
, vlen
);
796 /* Using hardware HPRE to do RSA */
800 static int hpre_rsa_set_e(struct hpre_ctx
*ctx
, const char *value
,
803 const char *ptr
= value
;
805 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
807 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
810 memcpy(ctx
->rsa
.pubkey
+ ctx
->key_sz
- vlen
, ptr
, vlen
);
815 static int hpre_rsa_set_d(struct hpre_ctx
*ctx
, const char *value
,
818 const char *ptr
= value
;
820 hpre_rsa_drop_leading_zeros(&ptr
, &vlen
);
822 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
825 memcpy(ctx
->rsa
.prikey
+ ctx
->key_sz
- vlen
, ptr
, vlen
);
830 static int hpre_crt_para_get(char *para
, size_t para_sz
,
831 const char *raw
, size_t raw_sz
)
833 const char *ptr
= raw
;
836 hpre_rsa_drop_leading_zeros(&ptr
, &len
);
837 if (!len
|| len
> para_sz
)
840 memcpy(para
+ para_sz
- len
, ptr
, len
);
845 static int hpre_rsa_setkey_crt(struct hpre_ctx
*ctx
, struct rsa_key
*rsa_key
)
847 unsigned int hlf_ksz
= ctx
->key_sz
>> 1;
848 struct device
*dev
= HPRE_DEV(ctx
);
852 ctx
->rsa
.crt_prikey
= dma_alloc_coherent(dev
, hlf_ksz
* HPRE_CRT_PRMS
,
853 &ctx
->rsa
.dma_crt_prikey
,
855 if (!ctx
->rsa
.crt_prikey
)
858 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
, hlf_ksz
,
859 rsa_key
->dq
, rsa_key
->dq_sz
);
864 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
865 rsa_key
->dp
, rsa_key
->dp_sz
);
869 offset
= hlf_ksz
* HPRE_CRT_Q
;
870 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
871 rsa_key
->q
, rsa_key
->q_sz
);
875 offset
= hlf_ksz
* HPRE_CRT_P
;
876 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
877 rsa_key
->p
, rsa_key
->p_sz
);
881 offset
= hlf_ksz
* HPRE_CRT_INV
;
882 ret
= hpre_crt_para_get(ctx
->rsa
.crt_prikey
+ offset
, hlf_ksz
,
883 rsa_key
->qinv
, rsa_key
->qinv_sz
);
887 ctx
->crt_g2_mode
= true;
892 offset
= hlf_ksz
* HPRE_CRT_PRMS
;
893 memzero_explicit(ctx
->rsa
.crt_prikey
, offset
);
894 dma_free_coherent(dev
, hlf_ksz
* HPRE_CRT_PRMS
, ctx
->rsa
.crt_prikey
,
895 ctx
->rsa
.dma_crt_prikey
);
896 ctx
->rsa
.crt_prikey
= NULL
;
897 ctx
->crt_g2_mode
= false;
902 /* If it is clear all, all the resources of the QP will be cleaned. */
903 static void hpre_rsa_clear_ctx(struct hpre_ctx
*ctx
, bool is_clear_all
)
905 unsigned int half_key_sz
= ctx
->key_sz
>> 1;
906 struct device
*dev
= HPRE_DEV(ctx
);
909 hisi_qm_stop_qp(ctx
->qp
);
911 if (ctx
->rsa
.pubkey
) {
912 dma_free_coherent(dev
, ctx
->key_sz
<< 1,
913 ctx
->rsa
.pubkey
, ctx
->rsa
.dma_pubkey
);
914 ctx
->rsa
.pubkey
= NULL
;
917 if (ctx
->rsa
.crt_prikey
) {
918 memzero_explicit(ctx
->rsa
.crt_prikey
,
919 half_key_sz
* HPRE_CRT_PRMS
);
920 dma_free_coherent(dev
, half_key_sz
* HPRE_CRT_PRMS
,
921 ctx
->rsa
.crt_prikey
, ctx
->rsa
.dma_crt_prikey
);
922 ctx
->rsa
.crt_prikey
= NULL
;
925 if (ctx
->rsa
.prikey
) {
926 memzero_explicit(ctx
->rsa
.prikey
, ctx
->key_sz
);
927 dma_free_coherent(dev
, ctx
->key_sz
<< 1, ctx
->rsa
.prikey
,
928 ctx
->rsa
.dma_prikey
);
929 ctx
->rsa
.prikey
= NULL
;
932 hpre_ctx_clear(ctx
, is_clear_all
);
936 * we should judge if it is CRT or not,
937 * CRT: return true, N-CRT: return false .
939 static bool hpre_is_crt_key(struct rsa_key
*key
)
941 u16 len
= key
->p_sz
+ key
->q_sz
+ key
->dp_sz
+ key
->dq_sz
+
944 #define LEN_OF_NCRT_PARA 5
946 /* N-CRT less than 5 parameters */
947 return len
> LEN_OF_NCRT_PARA
;
950 static int hpre_rsa_setkey(struct hpre_ctx
*ctx
, const void *key
,
951 unsigned int keylen
, bool private)
953 struct rsa_key rsa_key
;
956 hpre_rsa_clear_ctx(ctx
, false);
959 ret
= rsa_parse_priv_key(&rsa_key
, key
, keylen
);
961 ret
= rsa_parse_pub_key(&rsa_key
, key
, keylen
);
965 ret
= hpre_rsa_set_n(ctx
, rsa_key
.n
, rsa_key
.n_sz
, private);
970 ret
= hpre_rsa_set_d(ctx
, rsa_key
.d
, rsa_key
.d_sz
);
974 if (hpre_is_crt_key(&rsa_key
)) {
975 ret
= hpre_rsa_setkey_crt(ctx
, &rsa_key
);
981 ret
= hpre_rsa_set_e(ctx
, rsa_key
.e
, rsa_key
.e_sz
);
985 if ((private && !ctx
->rsa
.prikey
) || !ctx
->rsa
.pubkey
) {
993 hpre_rsa_clear_ctx(ctx
, false);
997 static int hpre_rsa_setpubkey(struct crypto_akcipher
*tfm
, const void *key
,
1000 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1003 ret
= crypto_akcipher_set_pub_key(ctx
->rsa
.soft_tfm
, key
, keylen
);
1007 return hpre_rsa_setkey(ctx
, key
, keylen
, false);
1010 static int hpre_rsa_setprivkey(struct crypto_akcipher
*tfm
, const void *key
,
1011 unsigned int keylen
)
1013 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1016 ret
= crypto_akcipher_set_priv_key(ctx
->rsa
.soft_tfm
, key
, keylen
);
1020 return hpre_rsa_setkey(ctx
, key
, keylen
, true);
1023 static unsigned int hpre_rsa_max_size(struct crypto_akcipher
*tfm
)
1025 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1027 /* For 512 and 1536 bits key size, use soft tfm instead */
1028 if (ctx
->key_sz
== HPRE_RSA_512BITS_KSZ
||
1029 ctx
->key_sz
== HPRE_RSA_1536BITS_KSZ
)
1030 return crypto_akcipher_maxsize(ctx
->rsa
.soft_tfm
);
1035 static int hpre_rsa_init_tfm(struct crypto_akcipher
*tfm
)
1037 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1040 ctx
->rsa
.soft_tfm
= crypto_alloc_akcipher("rsa-generic", 0, 0);
1041 if (IS_ERR(ctx
->rsa
.soft_tfm
)) {
1042 pr_err("Can not alloc_akcipher!\n");
1043 return PTR_ERR(ctx
->rsa
.soft_tfm
);
1046 ret
= hpre_ctx_init(ctx
);
1048 crypto_free_akcipher(ctx
->rsa
.soft_tfm
);
1053 static void hpre_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
1055 struct hpre_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1057 hpre_rsa_clear_ctx(ctx
, true);
1058 crypto_free_akcipher(ctx
->rsa
.soft_tfm
);
1061 static struct akcipher_alg rsa
= {
1062 .sign
= hpre_rsa_dec
,
1063 .verify
= hpre_rsa_enc
,
1064 .encrypt
= hpre_rsa_enc
,
1065 .decrypt
= hpre_rsa_dec
,
1066 .set_pub_key
= hpre_rsa_setpubkey
,
1067 .set_priv_key
= hpre_rsa_setprivkey
,
1068 .max_size
= hpre_rsa_max_size
,
1069 .init
= hpre_rsa_init_tfm
,
1070 .exit
= hpre_rsa_exit_tfm
,
1071 .reqsize
= sizeof(struct hpre_asym_request
) + HPRE_ALIGN_SZ
,
1073 .cra_ctxsize
= sizeof(struct hpre_ctx
),
1074 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
1076 .cra_driver_name
= "hpre-rsa",
1077 .cra_module
= THIS_MODULE
,
1081 #ifdef CONFIG_CRYPTO_DH
1082 static struct kpp_alg dh
= {
1083 .set_secret
= hpre_dh_set_secret
,
1084 .generate_public_key
= hpre_dh_compute_value
,
1085 .compute_shared_secret
= hpre_dh_compute_value
,
1086 .max_size
= hpre_dh_max_size
,
1087 .init
= hpre_dh_init_tfm
,
1088 .exit
= hpre_dh_exit_tfm
,
1089 .reqsize
= sizeof(struct hpre_asym_request
) + HPRE_ALIGN_SZ
,
1091 .cra_ctxsize
= sizeof(struct hpre_ctx
),
1092 .cra_priority
= HPRE_CRYPTO_ALG_PRI
,
1094 .cra_driver_name
= "hpre-dh",
1095 .cra_module
= THIS_MODULE
,
1100 int hpre_algs_register(void)
1104 mutex_lock(&hpre_alg_lock
);
1105 if (++hpre_active_devs
== 1) {
1106 rsa
.base
.cra_flags
= 0;
1107 ret
= crypto_register_akcipher(&rsa
);
1110 #ifdef CONFIG_CRYPTO_DH
1111 ret
= crypto_register_kpp(&dh
);
1113 crypto_unregister_akcipher(&rsa
);
1120 mutex_unlock(&hpre_alg_lock
);
1124 void hpre_algs_unregister(void)
1126 mutex_lock(&hpre_alg_lock
);
1127 if (--hpre_active_devs
== 0) {
1128 crypto_unregister_akcipher(&rsa
);
1129 #ifdef CONFIG_CRYPTO_DH
1130 crypto_unregister_kpp(&dh
);
1133 mutex_unlock(&hpre_alg_lock
);