Merge branch 'akpm' (patches from Andrew)
[linux/fpc-iii.git] / drivers / crypto / hisilicon / hpre / hpre_crypto.c
blob5d400d69e8e431488e89b45f1f1e88ade300e1b1
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/dh.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/internal/kpp.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/kpp.h>
9 #include <crypto/scatterwalk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <linux/module.h>
13 #include "hpre.h"
15 struct hpre_ctx;
17 #define HPRE_CRYPTO_ALG_PRI 1000
18 #define HPRE_ALIGN_SZ 64
19 #define HPRE_BITS_2_BYTES_SHIFT 3
20 #define HPRE_RSA_512BITS_KSZ 64
21 #define HPRE_RSA_1536BITS_KSZ 192
22 #define HPRE_CRT_PRMS 5
23 #define HPRE_CRT_Q 2
24 #define HPRE_CRT_P 3
25 #define HPRE_CRT_INV 4
26 #define HPRE_DH_G_FLAG 0x02
27 #define HPRE_TRY_SEND_TIMES 100
28 #define HPRE_INVLD_REQ_ID (-1)
29 #define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
31 #define HPRE_SQE_ALG_BITS 5
32 #define HPRE_SQE_DONE_SHIFT 30
33 #define HPRE_DH_MAX_P_SZ 512
35 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
37 struct hpre_rsa_ctx {
38 /* low address: e--->n */
39 char *pubkey;
40 dma_addr_t dma_pubkey;
42 /* low address: d--->n */
43 char *prikey;
44 dma_addr_t dma_prikey;
46 /* low address: dq->dp->q->p->qinv */
47 char *crt_prikey;
48 dma_addr_t dma_crt_prikey;
50 struct crypto_akcipher *soft_tfm;
53 struct hpre_dh_ctx {
55 * If base is g we compute the public key
56 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
57 * else if base if the counterpart public key we
58 * compute the shared secret
59 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
61 char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
62 dma_addr_t dma_xa_p;
64 char *g; /* m */
65 dma_addr_t dma_g;
68 struct hpre_ctx {
69 struct hisi_qp *qp;
70 struct hpre_asym_request **req_list;
71 spinlock_t req_lock;
72 unsigned int key_sz;
73 bool crt_g2_mode;
74 struct idr req_idr;
75 union {
76 struct hpre_rsa_ctx rsa;
77 struct hpre_dh_ctx dh;
81 struct hpre_asym_request {
82 char *src;
83 char *dst;
84 struct hpre_sqe req;
85 struct hpre_ctx *ctx;
86 union {
87 struct akcipher_request *rsa;
88 struct kpp_request *dh;
89 } areq;
90 int err;
91 int req_id;
92 hpre_cb cb;
95 static DEFINE_MUTEX(hpre_alg_lock);
96 static unsigned int hpre_active_devs;
98 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
100 unsigned long flags;
101 int id;
103 spin_lock_irqsave(&ctx->req_lock, flags);
104 id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
105 spin_unlock_irqrestore(&ctx->req_lock, flags);
107 return id;
110 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
112 unsigned long flags;
114 spin_lock_irqsave(&ctx->req_lock, flags);
115 idr_remove(&ctx->req_idr, req_id);
116 spin_unlock_irqrestore(&ctx->req_lock, flags);
119 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
121 struct hpre_ctx *ctx;
122 int id;
124 ctx = hpre_req->ctx;
125 id = hpre_alloc_req_id(ctx);
126 if (unlikely(id < 0))
127 return -EINVAL;
129 ctx->req_list[id] = hpre_req;
130 hpre_req->req_id = id;
132 return id;
135 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
137 struct hpre_ctx *ctx = hpre_req->ctx;
138 int id = hpre_req->req_id;
140 if (hpre_req->req_id >= 0) {
141 hpre_req->req_id = HPRE_INVLD_REQ_ID;
142 ctx->req_list[id] = NULL;
143 hpre_free_req_id(ctx, id);
147 static struct hisi_qp *hpre_get_qp_and_start(void)
149 struct hisi_qp *qp;
150 struct hpre *hpre;
151 int ret;
153 /* find the proper hpre device, which is near the current CPU core */
154 hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
155 if (!hpre) {
156 pr_err("Can not find proper hpre device!\n");
157 return ERR_PTR(-ENODEV);
160 qp = hisi_qm_create_qp(&hpre->qm, 0);
161 if (IS_ERR(qp)) {
162 pci_err(hpre->qm.pdev, "Can not create qp!\n");
163 return ERR_PTR(-ENODEV);
166 ret = hisi_qm_start_qp(qp, 0);
167 if (ret < 0) {
168 hisi_qm_release_qp(qp);
169 pci_err(hpre->qm.pdev, "Can not start qp!\n");
170 return ERR_PTR(-EINVAL);
173 return qp;
176 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
177 struct scatterlist *data, unsigned int len,
178 int is_src, dma_addr_t *tmp)
180 struct hpre_ctx *ctx = hpre_req->ctx;
181 struct device *dev = HPRE_DEV(ctx);
182 enum dma_data_direction dma_dir;
184 if (is_src) {
185 hpre_req->src = NULL;
186 dma_dir = DMA_TO_DEVICE;
187 } else {
188 hpre_req->dst = NULL;
189 dma_dir = DMA_FROM_DEVICE;
191 *tmp = dma_map_single(dev, sg_virt(data),
192 len, dma_dir);
193 if (unlikely(dma_mapping_error(dev, *tmp))) {
194 dev_err(dev, "dma map data err!\n");
195 return -ENOMEM;
198 return 0;
201 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
202 struct scatterlist *data, unsigned int len,
203 int is_src, dma_addr_t *tmp)
205 struct hpre_ctx *ctx = hpre_req->ctx;
206 struct device *dev = HPRE_DEV(ctx);
207 void *ptr;
208 int shift;
210 shift = ctx->key_sz - len;
211 if (unlikely(shift < 0))
212 return -EINVAL;
214 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
215 if (unlikely(!ptr))
216 return -ENOMEM;
218 if (is_src) {
219 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
220 hpre_req->src = ptr;
221 } else {
222 hpre_req->dst = ptr;
225 return 0;
228 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
229 struct scatterlist *data, unsigned int len,
230 int is_src, int is_dh)
232 struct hpre_sqe *msg = &hpre_req->req;
233 struct hpre_ctx *ctx = hpre_req->ctx;
234 dma_addr_t tmp = 0;
235 int ret;
237 /* when the data is dh's source, we should format it */
238 if ((sg_is_last(data) && len == ctx->key_sz) &&
239 ((is_dh && !is_src) || !is_dh))
240 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
241 else
242 ret = hpre_prepare_dma_buf(hpre_req, data, len,
243 is_src, &tmp);
244 if (unlikely(ret))
245 return ret;
247 if (is_src)
248 msg->in = cpu_to_le64(tmp);
249 else
250 msg->out = cpu_to_le64(tmp);
252 return 0;
255 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
256 struct hpre_asym_request *req,
257 struct scatterlist *dst,
258 struct scatterlist *src)
260 struct device *dev = HPRE_DEV(ctx);
261 struct hpre_sqe *sqe = &req->req;
262 dma_addr_t tmp;
264 tmp = le64_to_cpu(sqe->in);
265 if (unlikely(!tmp))
266 return;
268 if (src) {
269 if (req->src)
270 dma_free_coherent(dev, ctx->key_sz,
271 req->src, tmp);
272 else
273 dma_unmap_single(dev, tmp,
274 ctx->key_sz, DMA_TO_DEVICE);
277 tmp = le64_to_cpu(sqe->out);
278 if (unlikely(!tmp))
279 return;
281 if (req->dst) {
282 if (dst)
283 scatterwalk_map_and_copy(req->dst, dst, 0,
284 ctx->key_sz, 1);
285 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
286 } else {
287 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
291 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
292 void **kreq)
294 struct hpre_asym_request *req;
295 int err, id, done;
297 #define HPRE_NO_HW_ERR 0
298 #define HPRE_HW_TASK_DONE 3
299 #define HREE_HW_ERR_MASK 0x7ff
300 #define HREE_SQE_DONE_MASK 0x3
301 id = (int)le16_to_cpu(sqe->tag);
302 req = ctx->req_list[id];
303 hpre_rm_req_from_ctx(req);
304 *kreq = req;
306 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
307 HREE_HW_ERR_MASK;
309 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
310 HREE_SQE_DONE_MASK;
312 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
313 return 0;
315 return -EINVAL;
318 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
320 if (!ctx || !qp || qlen < 0)
321 return -EINVAL;
323 spin_lock_init(&ctx->req_lock);
324 ctx->qp = qp;
326 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
327 if (!ctx->req_list)
328 return -ENOMEM;
329 ctx->key_sz = 0;
330 ctx->crt_g2_mode = false;
331 idr_init(&ctx->req_idr);
333 return 0;
336 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
338 if (is_clear_all) {
339 idr_destroy(&ctx->req_idr);
340 kfree(ctx->req_list);
341 hisi_qm_release_qp(ctx->qp);
344 ctx->crt_g2_mode = false;
345 ctx->key_sz = 0;
348 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
350 struct hpre_asym_request *req;
351 struct kpp_request *areq;
352 int ret;
354 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
355 areq = req->areq.dh;
356 areq->dst_len = ctx->key_sz;
357 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
358 kpp_request_complete(areq, ret);
361 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
363 struct hpre_asym_request *req;
364 struct akcipher_request *areq;
365 int ret;
367 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
368 areq = req->areq.rsa;
369 areq->dst_len = ctx->key_sz;
370 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
371 akcipher_request_complete(areq, ret);
374 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
376 struct hpre_ctx *ctx = qp->qp_ctx;
377 struct hpre_sqe *sqe = resp;
379 ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
382 static int hpre_ctx_init(struct hpre_ctx *ctx)
384 struct hisi_qp *qp;
386 qp = hpre_get_qp_and_start();
387 if (IS_ERR(qp))
388 return PTR_ERR(qp);
390 qp->qp_ctx = ctx;
391 qp->req_cb = hpre_alg_cb;
393 return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
396 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
398 struct hpre_asym_request *h_req;
399 struct hpre_sqe *msg;
400 int req_id;
401 void *tmp;
403 if (is_rsa) {
404 struct akcipher_request *akreq = req;
406 if (akreq->dst_len < ctx->key_sz) {
407 akreq->dst_len = ctx->key_sz;
408 return -EOVERFLOW;
411 tmp = akcipher_request_ctx(akreq);
412 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
413 h_req->cb = hpre_rsa_cb;
414 h_req->areq.rsa = akreq;
415 msg = &h_req->req;
416 memset(msg, 0, sizeof(*msg));
417 } else {
418 struct kpp_request *kreq = req;
420 if (kreq->dst_len < ctx->key_sz) {
421 kreq->dst_len = ctx->key_sz;
422 return -EOVERFLOW;
425 tmp = kpp_request_ctx(kreq);
426 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
427 h_req->cb = hpre_dh_cb;
428 h_req->areq.dh = kreq;
429 msg = &h_req->req;
430 memset(msg, 0, sizeof(*msg));
431 msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
434 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
435 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
436 h_req->ctx = ctx;
438 req_id = hpre_add_req_to_ctx(h_req);
439 if (req_id < 0)
440 return -EBUSY;
442 msg->tag = cpu_to_le16((u16)req_id);
444 return 0;
447 #ifdef CONFIG_CRYPTO_DH
448 static int hpre_dh_compute_value(struct kpp_request *req)
450 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
451 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
452 void *tmp = kpp_request_ctx(req);
453 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
454 struct hpre_sqe *msg = &hpre_req->req;
455 int ctr = 0;
456 int ret;
458 ret = hpre_msg_request_set(ctx, req, false);
459 if (unlikely(ret))
460 return ret;
462 if (req->src) {
463 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
464 if (unlikely(ret))
465 goto clear_all;
468 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
469 if (unlikely(ret))
470 goto clear_all;
472 if (ctx->crt_g2_mode && !req->src)
473 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
474 else
475 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
476 do {
477 ret = hisi_qp_send(ctx->qp, msg);
478 } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
480 /* success */
481 if (likely(!ret))
482 return -EINPROGRESS;
484 clear_all:
485 hpre_rm_req_from_ctx(hpre_req);
486 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
488 return ret;
491 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
493 #define _HPRE_DH_GRP1 768
494 #define _HPRE_DH_GRP2 1024
495 #define _HPRE_DH_GRP5 1536
496 #define _HPRE_DH_GRP14 2048
497 #define _HPRE_DH_GRP15 3072
498 #define _HPRE_DH_GRP16 4096
499 switch (key_sz) {
500 case _HPRE_DH_GRP1:
501 case _HPRE_DH_GRP2:
502 case _HPRE_DH_GRP5:
503 case _HPRE_DH_GRP14:
504 case _HPRE_DH_GRP15:
505 case _HPRE_DH_GRP16:
506 return 0;
509 return -EINVAL;
512 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
514 struct device *dev = HPRE_DEV(ctx);
515 unsigned int sz;
517 if (params->p_size > HPRE_DH_MAX_P_SZ)
518 return -EINVAL;
520 if (hpre_is_dh_params_length_valid(params->p_size <<
521 HPRE_BITS_2_BYTES_SHIFT))
522 return -EINVAL;
524 sz = ctx->key_sz = params->p_size;
525 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
526 &ctx->dh.dma_xa_p, GFP_KERNEL);
527 if (!ctx->dh.xa_p)
528 return -ENOMEM;
530 memcpy(ctx->dh.xa_p + sz, params->p, sz);
532 /* If g equals 2 don't copy it */
533 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
534 ctx->crt_g2_mode = true;
535 return 0;
538 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
539 if (!ctx->dh.g) {
540 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
541 ctx->dh.dma_xa_p);
542 ctx->dh.xa_p = NULL;
543 return -ENOMEM;
546 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
548 return 0;
551 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
553 struct device *dev = HPRE_DEV(ctx);
554 unsigned int sz = ctx->key_sz;
556 if (is_clear_all)
557 hisi_qm_stop_qp(ctx->qp);
559 if (ctx->dh.g) {
560 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
561 ctx->dh.g = NULL;
564 if (ctx->dh.xa_p) {
565 memzero_explicit(ctx->dh.xa_p, sz);
566 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
567 ctx->dh.dma_xa_p);
568 ctx->dh.xa_p = NULL;
571 hpre_ctx_clear(ctx, is_clear_all);
574 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
575 unsigned int len)
577 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
578 struct dh params;
579 int ret;
581 if (crypto_dh_decode_key(buf, len, &params) < 0)
582 return -EINVAL;
584 /* Free old secret if any */
585 hpre_dh_clear_ctx(ctx, false);
587 ret = hpre_dh_set_params(ctx, &params);
588 if (ret < 0)
589 goto err_clear_ctx;
591 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
592 params.key_size);
594 return 0;
596 err_clear_ctx:
597 hpre_dh_clear_ctx(ctx, false);
598 return ret;
601 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
603 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
605 return ctx->key_sz;
608 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
610 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
612 return hpre_ctx_init(ctx);
615 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
617 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
619 hpre_dh_clear_ctx(ctx, true);
621 #endif
623 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
625 while (!**ptr && *len) {
626 (*ptr)++;
627 (*len)--;
631 static bool hpre_rsa_key_size_is_support(unsigned int len)
633 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
635 #define _RSA_1024BITS_KEY_WDTH 1024
636 #define _RSA_2048BITS_KEY_WDTH 2048
637 #define _RSA_3072BITS_KEY_WDTH 3072
638 #define _RSA_4096BITS_KEY_WDTH 4096
640 switch (bits) {
641 case _RSA_1024BITS_KEY_WDTH:
642 case _RSA_2048BITS_KEY_WDTH:
643 case _RSA_3072BITS_KEY_WDTH:
644 case _RSA_4096BITS_KEY_WDTH:
645 return true;
646 default:
647 return false;
651 static int hpre_rsa_enc(struct akcipher_request *req)
653 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
654 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
655 void *tmp = akcipher_request_ctx(req);
656 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
657 struct hpre_sqe *msg = &hpre_req->req;
658 int ctr = 0;
659 int ret;
661 /* For 512 and 1536 bits key size, use soft tfm instead */
662 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
663 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
664 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
665 ret = crypto_akcipher_encrypt(req);
666 akcipher_request_set_tfm(req, tfm);
667 return ret;
670 if (unlikely(!ctx->rsa.pubkey))
671 return -EINVAL;
673 ret = hpre_msg_request_set(ctx, req, true);
674 if (unlikely(ret))
675 return ret;
677 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
678 msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
680 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
681 if (unlikely(ret))
682 goto clear_all;
684 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
685 if (unlikely(ret))
686 goto clear_all;
688 do {
689 ret = hisi_qp_send(ctx->qp, msg);
690 } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
692 /* success */
693 if (likely(!ret))
694 return -EINPROGRESS;
696 clear_all:
697 hpre_rm_req_from_ctx(hpre_req);
698 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
700 return ret;
703 static int hpre_rsa_dec(struct akcipher_request *req)
705 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
706 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
707 void *tmp = akcipher_request_ctx(req);
708 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
709 struct hpre_sqe *msg = &hpre_req->req;
710 int ctr = 0;
711 int ret;
713 /* For 512 and 1536 bits key size, use soft tfm instead */
714 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
715 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
716 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
717 ret = crypto_akcipher_decrypt(req);
718 akcipher_request_set_tfm(req, tfm);
719 return ret;
722 if (unlikely(!ctx->rsa.prikey))
723 return -EINVAL;
725 ret = hpre_msg_request_set(ctx, req, true);
726 if (unlikely(ret))
727 return ret;
729 if (ctx->crt_g2_mode) {
730 msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
731 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
732 HPRE_ALG_NC_CRT);
733 } else {
734 msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
735 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
736 HPRE_ALG_NC_NCRT);
739 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
740 if (unlikely(ret))
741 goto clear_all;
743 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
744 if (unlikely(ret))
745 goto clear_all;
747 do {
748 ret = hisi_qp_send(ctx->qp, msg);
749 } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
751 /* success */
752 if (likely(!ret))
753 return -EINPROGRESS;
755 clear_all:
756 hpre_rm_req_from_ctx(hpre_req);
757 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
759 return ret;
762 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
763 size_t vlen, bool private)
765 const char *ptr = value;
767 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
769 ctx->key_sz = vlen;
771 /* if invalid key size provided, we use software tfm */
772 if (!hpre_rsa_key_size_is_support(ctx->key_sz))
773 return 0;
775 ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
776 &ctx->rsa.dma_pubkey,
777 GFP_KERNEL);
778 if (!ctx->rsa.pubkey)
779 return -ENOMEM;
781 if (private) {
782 ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
783 &ctx->rsa.dma_prikey,
784 GFP_KERNEL);
785 if (!ctx->rsa.prikey) {
786 dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
787 ctx->rsa.pubkey,
788 ctx->rsa.dma_pubkey);
789 ctx->rsa.pubkey = NULL;
790 return -ENOMEM;
792 memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
794 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
796 /* Using hardware HPRE to do RSA */
797 return 1;
800 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
801 size_t vlen)
803 const char *ptr = value;
805 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
807 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
808 return -EINVAL;
810 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
812 return 0;
815 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
816 size_t vlen)
818 const char *ptr = value;
820 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
822 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
823 return -EINVAL;
825 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
827 return 0;
830 static int hpre_crt_para_get(char *para, size_t para_sz,
831 const char *raw, size_t raw_sz)
833 const char *ptr = raw;
834 size_t len = raw_sz;
836 hpre_rsa_drop_leading_zeros(&ptr, &len);
837 if (!len || len > para_sz)
838 return -EINVAL;
840 memcpy(para + para_sz - len, ptr, len);
842 return 0;
845 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
847 unsigned int hlf_ksz = ctx->key_sz >> 1;
848 struct device *dev = HPRE_DEV(ctx);
849 u64 offset;
850 int ret;
852 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
853 &ctx->rsa.dma_crt_prikey,
854 GFP_KERNEL);
855 if (!ctx->rsa.crt_prikey)
856 return -ENOMEM;
858 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
859 rsa_key->dq, rsa_key->dq_sz);
860 if (ret)
861 goto free_key;
863 offset = hlf_ksz;
864 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
865 rsa_key->dp, rsa_key->dp_sz);
866 if (ret)
867 goto free_key;
869 offset = hlf_ksz * HPRE_CRT_Q;
870 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
871 rsa_key->q, rsa_key->q_sz);
872 if (ret)
873 goto free_key;
875 offset = hlf_ksz * HPRE_CRT_P;
876 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
877 rsa_key->p, rsa_key->p_sz);
878 if (ret)
879 goto free_key;
881 offset = hlf_ksz * HPRE_CRT_INV;
882 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
883 rsa_key->qinv, rsa_key->qinv_sz);
884 if (ret)
885 goto free_key;
887 ctx->crt_g2_mode = true;
889 return 0;
891 free_key:
892 offset = hlf_ksz * HPRE_CRT_PRMS;
893 memzero_explicit(ctx->rsa.crt_prikey, offset);
894 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
895 ctx->rsa.dma_crt_prikey);
896 ctx->rsa.crt_prikey = NULL;
897 ctx->crt_g2_mode = false;
899 return ret;
902 /* If it is clear all, all the resources of the QP will be cleaned. */
903 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
905 unsigned int half_key_sz = ctx->key_sz >> 1;
906 struct device *dev = HPRE_DEV(ctx);
908 if (is_clear_all)
909 hisi_qm_stop_qp(ctx->qp);
911 if (ctx->rsa.pubkey) {
912 dma_free_coherent(dev, ctx->key_sz << 1,
913 ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
914 ctx->rsa.pubkey = NULL;
917 if (ctx->rsa.crt_prikey) {
918 memzero_explicit(ctx->rsa.crt_prikey,
919 half_key_sz * HPRE_CRT_PRMS);
920 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
921 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
922 ctx->rsa.crt_prikey = NULL;
925 if (ctx->rsa.prikey) {
926 memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
927 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
928 ctx->rsa.dma_prikey);
929 ctx->rsa.prikey = NULL;
932 hpre_ctx_clear(ctx, is_clear_all);
936 * we should judge if it is CRT or not,
937 * CRT: return true, N-CRT: return false .
939 static bool hpre_is_crt_key(struct rsa_key *key)
941 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
942 key->qinv_sz;
944 #define LEN_OF_NCRT_PARA 5
946 /* N-CRT less than 5 parameters */
947 return len > LEN_OF_NCRT_PARA;
950 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
951 unsigned int keylen, bool private)
953 struct rsa_key rsa_key;
954 int ret;
956 hpre_rsa_clear_ctx(ctx, false);
958 if (private)
959 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
960 else
961 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
962 if (ret < 0)
963 return ret;
965 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
966 if (ret <= 0)
967 return ret;
969 if (private) {
970 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
971 if (ret < 0)
972 goto free;
974 if (hpre_is_crt_key(&rsa_key)) {
975 ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
976 if (ret < 0)
977 goto free;
981 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
982 if (ret < 0)
983 goto free;
985 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
986 ret = -EINVAL;
987 goto free;
990 return 0;
992 free:
993 hpre_rsa_clear_ctx(ctx, false);
994 return ret;
997 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
998 unsigned int keylen)
1000 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1001 int ret;
1003 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1004 if (ret)
1005 return ret;
1007 return hpre_rsa_setkey(ctx, key, keylen, false);
1010 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1011 unsigned int keylen)
1013 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1014 int ret;
1016 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1017 if (ret)
1018 return ret;
1020 return hpre_rsa_setkey(ctx, key, keylen, true);
1023 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1025 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1027 /* For 512 and 1536 bits key size, use soft tfm instead */
1028 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1029 ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1030 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1032 return ctx->key_sz;
1035 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1037 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1038 int ret;
1040 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1041 if (IS_ERR(ctx->rsa.soft_tfm)) {
1042 pr_err("Can not alloc_akcipher!\n");
1043 return PTR_ERR(ctx->rsa.soft_tfm);
1046 ret = hpre_ctx_init(ctx);
1047 if (ret)
1048 crypto_free_akcipher(ctx->rsa.soft_tfm);
1050 return ret;
1053 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1055 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1057 hpre_rsa_clear_ctx(ctx, true);
1058 crypto_free_akcipher(ctx->rsa.soft_tfm);
1061 static struct akcipher_alg rsa = {
1062 .sign = hpre_rsa_dec,
1063 .verify = hpre_rsa_enc,
1064 .encrypt = hpre_rsa_enc,
1065 .decrypt = hpre_rsa_dec,
1066 .set_pub_key = hpre_rsa_setpubkey,
1067 .set_priv_key = hpre_rsa_setprivkey,
1068 .max_size = hpre_rsa_max_size,
1069 .init = hpre_rsa_init_tfm,
1070 .exit = hpre_rsa_exit_tfm,
1071 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1072 .base = {
1073 .cra_ctxsize = sizeof(struct hpre_ctx),
1074 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1075 .cra_name = "rsa",
1076 .cra_driver_name = "hpre-rsa",
1077 .cra_module = THIS_MODULE,
1081 #ifdef CONFIG_CRYPTO_DH
1082 static struct kpp_alg dh = {
1083 .set_secret = hpre_dh_set_secret,
1084 .generate_public_key = hpre_dh_compute_value,
1085 .compute_shared_secret = hpre_dh_compute_value,
1086 .max_size = hpre_dh_max_size,
1087 .init = hpre_dh_init_tfm,
1088 .exit = hpre_dh_exit_tfm,
1089 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1090 .base = {
1091 .cra_ctxsize = sizeof(struct hpre_ctx),
1092 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1093 .cra_name = "dh",
1094 .cra_driver_name = "hpre-dh",
1095 .cra_module = THIS_MODULE,
1098 #endif
1100 int hpre_algs_register(void)
1102 int ret = 0;
1104 mutex_lock(&hpre_alg_lock);
1105 if (++hpre_active_devs == 1) {
1106 rsa.base.cra_flags = 0;
1107 ret = crypto_register_akcipher(&rsa);
1108 if (ret)
1109 goto unlock;
1110 #ifdef CONFIG_CRYPTO_DH
1111 ret = crypto_register_kpp(&dh);
1112 if (ret) {
1113 crypto_unregister_akcipher(&rsa);
1114 goto unlock;
1116 #endif
1119 unlock:
1120 mutex_unlock(&hpre_alg_lock);
1121 return ret;
1124 void hpre_algs_unregister(void)
1126 mutex_lock(&hpre_alg_lock);
1127 if (--hpre_active_devs == 0) {
1128 crypto_unregister_akcipher(&rsa);
1129 #ifdef CONFIG_CRYPTO_DH
1130 crypto_unregister_kpp(&dh);
1131 #endif
1133 mutex_unlock(&hpre_alg_lock);