Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / crypto / qat / qat_common / qat_asym_algs.c
blob2c863d25327a3841473f74c06e9723b9ca5fc809
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <crypto/internal/rsa.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/akcipher.h>
7 #include <crypto/kpp.h>
8 #include <crypto/internal/kpp.h>
9 #include <crypto/dh.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <crypto/scatterwalk.h>
13 #include "icp_qat_fw_pke.h"
14 #include "adf_accel_devices.h"
15 #include "adf_transport.h"
16 #include "adf_common_drv.h"
17 #include "qat_crypto.h"
19 static DEFINE_MUTEX(algs_lock);
20 static unsigned int active_devs;
22 struct qat_rsa_input_params {
23 union {
24 struct {
25 dma_addr_t m;
26 dma_addr_t e;
27 dma_addr_t n;
28 } enc;
29 struct {
30 dma_addr_t c;
31 dma_addr_t d;
32 dma_addr_t n;
33 } dec;
34 struct {
35 dma_addr_t c;
36 dma_addr_t p;
37 dma_addr_t q;
38 dma_addr_t dp;
39 dma_addr_t dq;
40 dma_addr_t qinv;
41 } dec_crt;
42 u64 in_tab[8];
44 } __packed __aligned(64);
46 struct qat_rsa_output_params {
47 union {
48 struct {
49 dma_addr_t c;
50 } enc;
51 struct {
52 dma_addr_t m;
53 } dec;
54 u64 out_tab[8];
56 } __packed __aligned(64);
58 struct qat_rsa_ctx {
59 char *n;
60 char *e;
61 char *d;
62 char *p;
63 char *q;
64 char *dp;
65 char *dq;
66 char *qinv;
67 dma_addr_t dma_n;
68 dma_addr_t dma_e;
69 dma_addr_t dma_d;
70 dma_addr_t dma_p;
71 dma_addr_t dma_q;
72 dma_addr_t dma_dp;
73 dma_addr_t dma_dq;
74 dma_addr_t dma_qinv;
75 unsigned int key_sz;
76 bool crt_mode;
77 struct qat_crypto_instance *inst;
78 } __packed __aligned(64);
80 struct qat_dh_input_params {
81 union {
82 struct {
83 dma_addr_t b;
84 dma_addr_t xa;
85 dma_addr_t p;
86 } in;
87 struct {
88 dma_addr_t xa;
89 dma_addr_t p;
90 } in_g2;
91 u64 in_tab[8];
93 } __packed __aligned(64);
95 struct qat_dh_output_params {
96 union {
97 dma_addr_t r;
98 u64 out_tab[8];
100 } __packed __aligned(64);
102 struct qat_dh_ctx {
103 char *g;
104 char *xa;
105 char *p;
106 dma_addr_t dma_g;
107 dma_addr_t dma_xa;
108 dma_addr_t dma_p;
109 unsigned int p_size;
110 bool g2;
111 struct qat_crypto_instance *inst;
112 } __packed __aligned(64);
114 struct qat_asym_request {
115 union {
116 struct qat_rsa_input_params rsa;
117 struct qat_dh_input_params dh;
118 } in;
119 union {
120 struct qat_rsa_output_params rsa;
121 struct qat_dh_output_params dh;
122 } out;
123 dma_addr_t phy_in;
124 dma_addr_t phy_out;
125 char *src_align;
126 char *dst_align;
127 struct icp_qat_fw_pke_request req;
128 union {
129 struct qat_rsa_ctx *rsa;
130 struct qat_dh_ctx *dh;
131 } ctx;
132 union {
133 struct akcipher_request *rsa;
134 struct kpp_request *dh;
135 } areq;
136 int err;
137 void (*cb)(struct icp_qat_fw_pke_resp *resp);
138 } __aligned(64);
140 static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
142 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
143 struct kpp_request *areq = req->areq.dh;
144 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
145 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
146 resp->pke_resp_hdr.comn_resp_flags);
148 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
150 if (areq->src) {
151 if (req->src_align)
152 dma_free_coherent(dev, req->ctx.dh->p_size,
153 req->src_align, req->in.dh.in.b);
154 else
155 dma_unmap_single(dev, req->in.dh.in.b,
156 req->ctx.dh->p_size, DMA_TO_DEVICE);
159 areq->dst_len = req->ctx.dh->p_size;
160 if (req->dst_align) {
161 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
162 areq->dst_len, 1);
164 dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
165 req->out.dh.r);
166 } else {
167 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
168 DMA_FROM_DEVICE);
171 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
172 DMA_TO_DEVICE);
173 dma_unmap_single(dev, req->phy_out,
174 sizeof(struct qat_dh_output_params),
175 DMA_TO_DEVICE);
177 kpp_request_complete(areq, err);
180 #define PKE_DH_1536 0x390c1a49
181 #define PKE_DH_G2_1536 0x2e0b1a3e
182 #define PKE_DH_2048 0x4d0c1a60
183 #define PKE_DH_G2_2048 0x3e0b1a55
184 #define PKE_DH_3072 0x510c1a77
185 #define PKE_DH_G2_3072 0x3a0b1a6c
186 #define PKE_DH_4096 0x690c1a8e
187 #define PKE_DH_G2_4096 0x4a0b1a83
189 static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
191 unsigned int bitslen = len << 3;
193 switch (bitslen) {
194 case 1536:
195 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
196 case 2048:
197 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
198 case 3072:
199 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
200 case 4096:
201 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
202 default:
203 return 0;
207 static int qat_dh_compute_value(struct kpp_request *req)
209 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
210 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
211 struct qat_crypto_instance *inst = ctx->inst;
212 struct device *dev = &GET_DEV(inst->accel_dev);
213 struct qat_asym_request *qat_req =
214 PTR_ALIGN(kpp_request_ctx(req), 64);
215 struct icp_qat_fw_pke_request *msg = &qat_req->req;
216 int ret, ctr = 0;
217 int n_input_params = 0;
219 if (unlikely(!ctx->xa))
220 return -EINVAL;
222 if (req->dst_len < ctx->p_size) {
223 req->dst_len = ctx->p_size;
224 return -EOVERFLOW;
226 memset(msg, '\0', sizeof(*msg));
227 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
228 ICP_QAT_FW_COMN_REQ_FLAG_SET);
230 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
231 !req->src && ctx->g2);
232 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
233 return -EINVAL;
235 qat_req->cb = qat_dh_cb;
236 qat_req->ctx.dh = ctx;
237 qat_req->areq.dh = req;
238 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
239 msg->pke_hdr.comn_req_flags =
240 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
241 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
244 * If no source is provided use g as base
246 if (req->src) {
247 qat_req->in.dh.in.xa = ctx->dma_xa;
248 qat_req->in.dh.in.p = ctx->dma_p;
249 n_input_params = 3;
250 } else {
251 if (ctx->g2) {
252 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
253 qat_req->in.dh.in_g2.p = ctx->dma_p;
254 n_input_params = 2;
255 } else {
256 qat_req->in.dh.in.b = ctx->dma_g;
257 qat_req->in.dh.in.xa = ctx->dma_xa;
258 qat_req->in.dh.in.p = ctx->dma_p;
259 n_input_params = 3;
263 ret = -ENOMEM;
264 if (req->src) {
266 * src can be of any size in valid range, but HW expects it to
267 * be the same as modulo p so in case it is different we need
268 * to allocate a new buf and copy src data.
269 * In other case we just need to map the user provided buffer.
270 * Also need to make sure that it is in contiguous buffer.
272 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
273 qat_req->src_align = NULL;
274 qat_req->in.dh.in.b = dma_map_single(dev,
275 sg_virt(req->src),
276 req->src_len,
277 DMA_TO_DEVICE);
278 if (unlikely(dma_mapping_error(dev,
279 qat_req->in.dh.in.b)))
280 return ret;
282 } else {
283 int shift = ctx->p_size - req->src_len;
285 qat_req->src_align = dma_alloc_coherent(dev,
286 ctx->p_size,
287 &qat_req->in.dh.in.b,
288 GFP_KERNEL);
289 if (unlikely(!qat_req->src_align))
290 return ret;
292 scatterwalk_map_and_copy(qat_req->src_align + shift,
293 req->src, 0, req->src_len, 0);
297 * dst can be of any size in valid range, but HW expects it to be the
298 * same as modulo m so in case it is different we need to allocate a
299 * new buf and copy src data.
300 * In other case we just need to map the user provided buffer.
301 * Also need to make sure that it is in contiguous buffer.
303 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
304 qat_req->dst_align = NULL;
305 qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
306 req->dst_len,
307 DMA_FROM_DEVICE);
309 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
310 goto unmap_src;
312 } else {
313 qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
314 &qat_req->out.dh.r,
315 GFP_KERNEL);
316 if (unlikely(!qat_req->dst_align))
317 goto unmap_src;
320 qat_req->in.dh.in_tab[n_input_params] = 0;
321 qat_req->out.dh.out_tab[1] = 0;
322 /* Mapping in.in.b or in.in_g2.xa is the same */
323 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
324 sizeof(struct qat_dh_input_params),
325 DMA_TO_DEVICE);
326 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
327 goto unmap_dst;
329 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
330 sizeof(struct qat_dh_output_params),
331 DMA_TO_DEVICE);
332 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
333 goto unmap_in_params;
335 msg->pke_mid.src_data_addr = qat_req->phy_in;
336 msg->pke_mid.dest_data_addr = qat_req->phy_out;
337 msg->pke_mid.opaque = (u64)(__force long)qat_req;
338 msg->input_param_count = n_input_params;
339 msg->output_param_count = 1;
341 do {
342 ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
343 } while (ret == -EBUSY && ctr++ < 100);
345 if (!ret)
346 return -EINPROGRESS;
348 if (!dma_mapping_error(dev, qat_req->phy_out))
349 dma_unmap_single(dev, qat_req->phy_out,
350 sizeof(struct qat_dh_output_params),
351 DMA_TO_DEVICE);
352 unmap_in_params:
353 if (!dma_mapping_error(dev, qat_req->phy_in))
354 dma_unmap_single(dev, qat_req->phy_in,
355 sizeof(struct qat_dh_input_params),
356 DMA_TO_DEVICE);
357 unmap_dst:
358 if (qat_req->dst_align)
359 dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
360 qat_req->out.dh.r);
361 else
362 if (!dma_mapping_error(dev, qat_req->out.dh.r))
363 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
364 DMA_FROM_DEVICE);
365 unmap_src:
366 if (req->src) {
367 if (qat_req->src_align)
368 dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
369 qat_req->in.dh.in.b);
370 else
371 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
372 dma_unmap_single(dev, qat_req->in.dh.in.b,
373 ctx->p_size,
374 DMA_TO_DEVICE);
376 return ret;
379 static int qat_dh_check_params_length(unsigned int p_len)
381 switch (p_len) {
382 case 1536:
383 case 2048:
384 case 3072:
385 case 4096:
386 return 0;
388 return -EINVAL;
391 static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
393 struct qat_crypto_instance *inst = ctx->inst;
394 struct device *dev = &GET_DEV(inst->accel_dev);
396 if (qat_dh_check_params_length(params->p_size << 3))
397 return -EINVAL;
399 ctx->p_size = params->p_size;
400 ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
401 if (!ctx->p)
402 return -ENOMEM;
403 memcpy(ctx->p, params->p, ctx->p_size);
405 /* If g equals 2 don't copy it */
406 if (params->g_size == 1 && *(char *)params->g == 0x02) {
407 ctx->g2 = true;
408 return 0;
411 ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
412 if (!ctx->g)
413 return -ENOMEM;
414 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
415 params->g_size);
417 return 0;
420 static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
422 if (ctx->g) {
423 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
424 ctx->g = NULL;
426 if (ctx->xa) {
427 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
428 ctx->xa = NULL;
430 if (ctx->p) {
431 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
432 ctx->p = NULL;
434 ctx->p_size = 0;
435 ctx->g2 = false;
438 static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
439 unsigned int len)
441 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
442 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
443 struct dh params;
444 int ret;
446 if (crypto_dh_decode_key(buf, len, &params) < 0)
447 return -EINVAL;
449 /* Free old secret if any */
450 qat_dh_clear_ctx(dev, ctx);
452 ret = qat_dh_set_params(ctx, &params);
453 if (ret < 0)
454 goto err_clear_ctx;
456 ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
457 GFP_KERNEL);
458 if (!ctx->xa) {
459 ret = -ENOMEM;
460 goto err_clear_ctx;
462 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
463 params.key_size);
465 return 0;
467 err_clear_ctx:
468 qat_dh_clear_ctx(dev, ctx);
469 return ret;
472 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
474 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
476 return ctx->p_size;
479 static int qat_dh_init_tfm(struct crypto_kpp *tfm)
481 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
482 struct qat_crypto_instance *inst =
483 qat_crypto_get_instance_node(get_current_node());
485 if (!inst)
486 return -EINVAL;
488 ctx->p_size = 0;
489 ctx->g2 = false;
490 ctx->inst = inst;
491 return 0;
494 static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
496 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
497 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
499 qat_dh_clear_ctx(dev, ctx);
500 qat_crypto_put_instance(ctx->inst);
503 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
505 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
506 struct akcipher_request *areq = req->areq.rsa;
507 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
508 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
509 resp->pke_resp_hdr.comn_resp_flags);
511 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
513 if (req->src_align)
514 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
515 req->in.rsa.enc.m);
516 else
517 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
518 DMA_TO_DEVICE);
520 areq->dst_len = req->ctx.rsa->key_sz;
521 if (req->dst_align) {
522 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
523 areq->dst_len, 1);
525 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
526 req->out.rsa.enc.c);
527 } else {
528 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
529 DMA_FROM_DEVICE);
532 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
533 DMA_TO_DEVICE);
534 dma_unmap_single(dev, req->phy_out,
535 sizeof(struct qat_rsa_output_params),
536 DMA_TO_DEVICE);
538 akcipher_request_complete(areq, err);
541 void qat_alg_asym_callback(void *_resp)
543 struct icp_qat_fw_pke_resp *resp = _resp;
544 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
546 areq->cb(resp);
549 #define PKE_RSA_EP_512 0x1c161b21
550 #define PKE_RSA_EP_1024 0x35111bf7
551 #define PKE_RSA_EP_1536 0x4d111cdc
552 #define PKE_RSA_EP_2048 0x6e111dba
553 #define PKE_RSA_EP_3072 0x7d111ea3
554 #define PKE_RSA_EP_4096 0xa5101f7e
556 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
558 unsigned int bitslen = len << 3;
560 switch (bitslen) {
561 case 512:
562 return PKE_RSA_EP_512;
563 case 1024:
564 return PKE_RSA_EP_1024;
565 case 1536:
566 return PKE_RSA_EP_1536;
567 case 2048:
568 return PKE_RSA_EP_2048;
569 case 3072:
570 return PKE_RSA_EP_3072;
571 case 4096:
572 return PKE_RSA_EP_4096;
573 default:
574 return 0;
578 #define PKE_RSA_DP1_512 0x1c161b3c
579 #define PKE_RSA_DP1_1024 0x35111c12
580 #define PKE_RSA_DP1_1536 0x4d111cf7
581 #define PKE_RSA_DP1_2048 0x6e111dda
582 #define PKE_RSA_DP1_3072 0x7d111ebe
583 #define PKE_RSA_DP1_4096 0xa5101f98
585 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
587 unsigned int bitslen = len << 3;
589 switch (bitslen) {
590 case 512:
591 return PKE_RSA_DP1_512;
592 case 1024:
593 return PKE_RSA_DP1_1024;
594 case 1536:
595 return PKE_RSA_DP1_1536;
596 case 2048:
597 return PKE_RSA_DP1_2048;
598 case 3072:
599 return PKE_RSA_DP1_3072;
600 case 4096:
601 return PKE_RSA_DP1_4096;
602 default:
603 return 0;
607 #define PKE_RSA_DP2_512 0x1c131b57
608 #define PKE_RSA_DP2_1024 0x26131c2d
609 #define PKE_RSA_DP2_1536 0x45111d12
610 #define PKE_RSA_DP2_2048 0x59121dfa
611 #define PKE_RSA_DP2_3072 0x81121ed9
612 #define PKE_RSA_DP2_4096 0xb1111fb2
614 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
616 unsigned int bitslen = len << 3;
618 switch (bitslen) {
619 case 512:
620 return PKE_RSA_DP2_512;
621 case 1024:
622 return PKE_RSA_DP2_1024;
623 case 1536:
624 return PKE_RSA_DP2_1536;
625 case 2048:
626 return PKE_RSA_DP2_2048;
627 case 3072:
628 return PKE_RSA_DP2_3072;
629 case 4096:
630 return PKE_RSA_DP2_4096;
631 default:
632 return 0;
636 static int qat_rsa_enc(struct akcipher_request *req)
638 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
639 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
640 struct qat_crypto_instance *inst = ctx->inst;
641 struct device *dev = &GET_DEV(inst->accel_dev);
642 struct qat_asym_request *qat_req =
643 PTR_ALIGN(akcipher_request_ctx(req), 64);
644 struct icp_qat_fw_pke_request *msg = &qat_req->req;
645 int ret, ctr = 0;
647 if (unlikely(!ctx->n || !ctx->e))
648 return -EINVAL;
650 if (req->dst_len < ctx->key_sz) {
651 req->dst_len = ctx->key_sz;
652 return -EOVERFLOW;
654 memset(msg, '\0', sizeof(*msg));
655 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
656 ICP_QAT_FW_COMN_REQ_FLAG_SET);
657 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
658 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
659 return -EINVAL;
661 qat_req->cb = qat_rsa_cb;
662 qat_req->ctx.rsa = ctx;
663 qat_req->areq.rsa = req;
664 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
665 msg->pke_hdr.comn_req_flags =
666 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
667 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
669 qat_req->in.rsa.enc.e = ctx->dma_e;
670 qat_req->in.rsa.enc.n = ctx->dma_n;
671 ret = -ENOMEM;
674 * src can be of any size in valid range, but HW expects it to be the
675 * same as modulo n so in case it is different we need to allocate a
676 * new buf and copy src data.
677 * In other case we just need to map the user provided buffer.
678 * Also need to make sure that it is in contiguous buffer.
680 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
681 qat_req->src_align = NULL;
682 qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
683 req->src_len, DMA_TO_DEVICE);
684 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
685 return ret;
687 } else {
688 int shift = ctx->key_sz - req->src_len;
690 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
691 &qat_req->in.rsa.enc.m,
692 GFP_KERNEL);
693 if (unlikely(!qat_req->src_align))
694 return ret;
696 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
697 0, req->src_len, 0);
699 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
700 qat_req->dst_align = NULL;
701 qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
702 req->dst_len,
703 DMA_FROM_DEVICE);
705 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
706 goto unmap_src;
708 } else {
709 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
710 &qat_req->out.rsa.enc.c,
711 GFP_KERNEL);
712 if (unlikely(!qat_req->dst_align))
713 goto unmap_src;
716 qat_req->in.rsa.in_tab[3] = 0;
717 qat_req->out.rsa.out_tab[1] = 0;
718 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
719 sizeof(struct qat_rsa_input_params),
720 DMA_TO_DEVICE);
721 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
722 goto unmap_dst;
724 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
725 sizeof(struct qat_rsa_output_params),
726 DMA_TO_DEVICE);
727 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
728 goto unmap_in_params;
730 msg->pke_mid.src_data_addr = qat_req->phy_in;
731 msg->pke_mid.dest_data_addr = qat_req->phy_out;
732 msg->pke_mid.opaque = (u64)(__force long)qat_req;
733 msg->input_param_count = 3;
734 msg->output_param_count = 1;
735 do {
736 ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
737 } while (ret == -EBUSY && ctr++ < 100);
739 if (!ret)
740 return -EINPROGRESS;
742 if (!dma_mapping_error(dev, qat_req->phy_out))
743 dma_unmap_single(dev, qat_req->phy_out,
744 sizeof(struct qat_rsa_output_params),
745 DMA_TO_DEVICE);
746 unmap_in_params:
747 if (!dma_mapping_error(dev, qat_req->phy_in))
748 dma_unmap_single(dev, qat_req->phy_in,
749 sizeof(struct qat_rsa_input_params),
750 DMA_TO_DEVICE);
751 unmap_dst:
752 if (qat_req->dst_align)
753 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
754 qat_req->out.rsa.enc.c);
755 else
756 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
757 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
758 ctx->key_sz, DMA_FROM_DEVICE);
759 unmap_src:
760 if (qat_req->src_align)
761 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
762 qat_req->in.rsa.enc.m);
763 else
764 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
765 dma_unmap_single(dev, qat_req->in.rsa.enc.m,
766 ctx->key_sz, DMA_TO_DEVICE);
767 return ret;
770 static int qat_rsa_dec(struct akcipher_request *req)
772 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
773 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
774 struct qat_crypto_instance *inst = ctx->inst;
775 struct device *dev = &GET_DEV(inst->accel_dev);
776 struct qat_asym_request *qat_req =
777 PTR_ALIGN(akcipher_request_ctx(req), 64);
778 struct icp_qat_fw_pke_request *msg = &qat_req->req;
779 int ret, ctr = 0;
781 if (unlikely(!ctx->n || !ctx->d))
782 return -EINVAL;
784 if (req->dst_len < ctx->key_sz) {
785 req->dst_len = ctx->key_sz;
786 return -EOVERFLOW;
788 memset(msg, '\0', sizeof(*msg));
789 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
790 ICP_QAT_FW_COMN_REQ_FLAG_SET);
791 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
792 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
793 qat_rsa_dec_fn_id(ctx->key_sz);
794 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
795 return -EINVAL;
797 qat_req->cb = qat_rsa_cb;
798 qat_req->ctx.rsa = ctx;
799 qat_req->areq.rsa = req;
800 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
801 msg->pke_hdr.comn_req_flags =
802 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
803 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
805 if (ctx->crt_mode) {
806 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
807 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
808 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
809 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
810 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
811 } else {
812 qat_req->in.rsa.dec.d = ctx->dma_d;
813 qat_req->in.rsa.dec.n = ctx->dma_n;
815 ret = -ENOMEM;
818 * src can be of any size in valid range, but HW expects it to be the
819 * same as modulo n so in case it is different we need to allocate a
820 * new buf and copy src data.
821 * In other case we just need to map the user provided buffer.
822 * Also need to make sure that it is in contiguous buffer.
824 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
825 qat_req->src_align = NULL;
826 qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
827 req->dst_len, DMA_TO_DEVICE);
828 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
829 return ret;
831 } else {
832 int shift = ctx->key_sz - req->src_len;
834 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
835 &qat_req->in.rsa.dec.c,
836 GFP_KERNEL);
837 if (unlikely(!qat_req->src_align))
838 return ret;
840 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
841 0, req->src_len, 0);
843 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
844 qat_req->dst_align = NULL;
845 qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
846 req->dst_len,
847 DMA_FROM_DEVICE);
849 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
850 goto unmap_src;
852 } else {
853 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
854 &qat_req->out.rsa.dec.m,
855 GFP_KERNEL);
856 if (unlikely(!qat_req->dst_align))
857 goto unmap_src;
861 if (ctx->crt_mode)
862 qat_req->in.rsa.in_tab[6] = 0;
863 else
864 qat_req->in.rsa.in_tab[3] = 0;
865 qat_req->out.rsa.out_tab[1] = 0;
866 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
867 sizeof(struct qat_rsa_input_params),
868 DMA_TO_DEVICE);
869 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
870 goto unmap_dst;
872 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
873 sizeof(struct qat_rsa_output_params),
874 DMA_TO_DEVICE);
875 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
876 goto unmap_in_params;
878 msg->pke_mid.src_data_addr = qat_req->phy_in;
879 msg->pke_mid.dest_data_addr = qat_req->phy_out;
880 msg->pke_mid.opaque = (u64)(__force long)qat_req;
881 if (ctx->crt_mode)
882 msg->input_param_count = 6;
883 else
884 msg->input_param_count = 3;
886 msg->output_param_count = 1;
887 do {
888 ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
889 } while (ret == -EBUSY && ctr++ < 100);
891 if (!ret)
892 return -EINPROGRESS;
894 if (!dma_mapping_error(dev, qat_req->phy_out))
895 dma_unmap_single(dev, qat_req->phy_out,
896 sizeof(struct qat_rsa_output_params),
897 DMA_TO_DEVICE);
898 unmap_in_params:
899 if (!dma_mapping_error(dev, qat_req->phy_in))
900 dma_unmap_single(dev, qat_req->phy_in,
901 sizeof(struct qat_rsa_input_params),
902 DMA_TO_DEVICE);
903 unmap_dst:
904 if (qat_req->dst_align)
905 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
906 qat_req->out.rsa.dec.m);
907 else
908 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
909 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
910 ctx->key_sz, DMA_FROM_DEVICE);
911 unmap_src:
912 if (qat_req->src_align)
913 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
914 qat_req->in.rsa.dec.c);
915 else
916 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
917 dma_unmap_single(dev, qat_req->in.rsa.dec.c,
918 ctx->key_sz, DMA_TO_DEVICE);
919 return ret;
922 static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
923 size_t vlen)
925 struct qat_crypto_instance *inst = ctx->inst;
926 struct device *dev = &GET_DEV(inst->accel_dev);
927 const char *ptr = value;
928 int ret;
930 while (!*ptr && vlen) {
931 ptr++;
932 vlen--;
935 ctx->key_sz = vlen;
936 ret = -EINVAL;
937 /* invalid key size provided */
938 if (!qat_rsa_enc_fn_id(ctx->key_sz))
939 goto err;
941 ret = -ENOMEM;
942 ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
943 if (!ctx->n)
944 goto err;
946 memcpy(ctx->n, ptr, ctx->key_sz);
947 return 0;
948 err:
949 ctx->key_sz = 0;
950 ctx->n = NULL;
951 return ret;
954 static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
955 size_t vlen)
957 struct qat_crypto_instance *inst = ctx->inst;
958 struct device *dev = &GET_DEV(inst->accel_dev);
959 const char *ptr = value;
961 while (!*ptr && vlen) {
962 ptr++;
963 vlen--;
966 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
967 ctx->e = NULL;
968 return -EINVAL;
971 ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
972 if (!ctx->e)
973 return -ENOMEM;
975 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
976 return 0;
979 static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
980 size_t vlen)
982 struct qat_crypto_instance *inst = ctx->inst;
983 struct device *dev = &GET_DEV(inst->accel_dev);
984 const char *ptr = value;
985 int ret;
987 while (!*ptr && vlen) {
988 ptr++;
989 vlen--;
992 ret = -EINVAL;
993 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
994 goto err;
996 ret = -ENOMEM;
997 ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
998 if (!ctx->d)
999 goto err;
1001 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1002 return 0;
1003 err:
1004 ctx->d = NULL;
1005 return ret;
1008 static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1010 while (!**ptr && *len) {
1011 (*ptr)++;
1012 (*len)--;
1016 static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1018 struct qat_crypto_instance *inst = ctx->inst;
1019 struct device *dev = &GET_DEV(inst->accel_dev);
1020 const char *ptr;
1021 unsigned int len;
1022 unsigned int half_key_sz = ctx->key_sz / 2;
1024 /* p */
1025 ptr = rsa_key->p;
1026 len = rsa_key->p_sz;
1027 qat_rsa_drop_leading_zeros(&ptr, &len);
1028 if (!len)
1029 goto err;
1030 ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1031 if (!ctx->p)
1032 goto err;
1033 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1035 /* q */
1036 ptr = rsa_key->q;
1037 len = rsa_key->q_sz;
1038 qat_rsa_drop_leading_zeros(&ptr, &len);
1039 if (!len)
1040 goto free_p;
1041 ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1042 if (!ctx->q)
1043 goto free_p;
1044 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1046 /* dp */
1047 ptr = rsa_key->dp;
1048 len = rsa_key->dp_sz;
1049 qat_rsa_drop_leading_zeros(&ptr, &len);
1050 if (!len)
1051 goto free_q;
1052 ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1053 GFP_KERNEL);
1054 if (!ctx->dp)
1055 goto free_q;
1056 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1058 /* dq */
1059 ptr = rsa_key->dq;
1060 len = rsa_key->dq_sz;
1061 qat_rsa_drop_leading_zeros(&ptr, &len);
1062 if (!len)
1063 goto free_dp;
1064 ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1065 GFP_KERNEL);
1066 if (!ctx->dq)
1067 goto free_dp;
1068 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1070 /* qinv */
1071 ptr = rsa_key->qinv;
1072 len = rsa_key->qinv_sz;
1073 qat_rsa_drop_leading_zeros(&ptr, &len);
1074 if (!len)
1075 goto free_dq;
1076 ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1077 GFP_KERNEL);
1078 if (!ctx->qinv)
1079 goto free_dq;
1080 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1082 ctx->crt_mode = true;
1083 return;
1085 free_dq:
1086 memset(ctx->dq, '\0', half_key_sz);
1087 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1088 ctx->dq = NULL;
1089 free_dp:
1090 memset(ctx->dp, '\0', half_key_sz);
1091 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1092 ctx->dp = NULL;
1093 free_q:
1094 memset(ctx->q, '\0', half_key_sz);
1095 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1096 ctx->q = NULL;
1097 free_p:
1098 memset(ctx->p, '\0', half_key_sz);
1099 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1100 ctx->p = NULL;
1101 err:
1102 ctx->crt_mode = false;
1105 static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1107 unsigned int half_key_sz = ctx->key_sz / 2;
1109 /* Free the old key if any */
1110 if (ctx->n)
1111 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1112 if (ctx->e)
1113 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1114 if (ctx->d) {
1115 memset(ctx->d, '\0', ctx->key_sz);
1116 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1118 if (ctx->p) {
1119 memset(ctx->p, '\0', half_key_sz);
1120 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1122 if (ctx->q) {
1123 memset(ctx->q, '\0', half_key_sz);
1124 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1126 if (ctx->dp) {
1127 memset(ctx->dp, '\0', half_key_sz);
1128 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1130 if (ctx->dq) {
1131 memset(ctx->dq, '\0', half_key_sz);
1132 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1134 if (ctx->qinv) {
1135 memset(ctx->qinv, '\0', half_key_sz);
1136 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1139 ctx->n = NULL;
1140 ctx->e = NULL;
1141 ctx->d = NULL;
1142 ctx->p = NULL;
1143 ctx->q = NULL;
1144 ctx->dp = NULL;
1145 ctx->dq = NULL;
1146 ctx->qinv = NULL;
1147 ctx->crt_mode = false;
1148 ctx->key_sz = 0;
1151 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1152 unsigned int keylen, bool private)
1154 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1155 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1156 struct rsa_key rsa_key;
1157 int ret;
1159 qat_rsa_clear_ctx(dev, ctx);
1161 if (private)
1162 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1163 else
1164 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1165 if (ret < 0)
1166 goto free;
1168 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1169 if (ret < 0)
1170 goto free;
1171 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1172 if (ret < 0)
1173 goto free;
1174 if (private) {
1175 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1176 if (ret < 0)
1177 goto free;
1178 qat_rsa_setkey_crt(ctx, &rsa_key);
1181 if (!ctx->n || !ctx->e) {
1182 /* invalid key provided */
1183 ret = -EINVAL;
1184 goto free;
1186 if (private && !ctx->d) {
1187 /* invalid private key provided */
1188 ret = -EINVAL;
1189 goto free;
1192 return 0;
1193 free:
1194 qat_rsa_clear_ctx(dev, ctx);
1195 return ret;
1198 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1199 unsigned int keylen)
1201 return qat_rsa_setkey(tfm, key, keylen, false);
1204 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1205 unsigned int keylen)
1207 return qat_rsa_setkey(tfm, key, keylen, true);
1210 static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
1212 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1214 return ctx->key_sz;
1217 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1219 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1220 struct qat_crypto_instance *inst =
1221 qat_crypto_get_instance_node(get_current_node());
1223 if (!inst)
1224 return -EINVAL;
1226 ctx->key_sz = 0;
1227 ctx->inst = inst;
1228 return 0;
1231 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1233 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1234 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1236 if (ctx->n)
1237 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1238 if (ctx->e)
1239 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1240 if (ctx->d) {
1241 memset(ctx->d, '\0', ctx->key_sz);
1242 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1244 qat_crypto_put_instance(ctx->inst);
1245 ctx->n = NULL;
1246 ctx->e = NULL;
1247 ctx->d = NULL;
1250 static struct akcipher_alg rsa = {
1251 .encrypt = qat_rsa_enc,
1252 .decrypt = qat_rsa_dec,
1253 .set_pub_key = qat_rsa_setpubkey,
1254 .set_priv_key = qat_rsa_setprivkey,
1255 .max_size = qat_rsa_max_size,
1256 .init = qat_rsa_init_tfm,
1257 .exit = qat_rsa_exit_tfm,
1258 .reqsize = sizeof(struct qat_asym_request) + 64,
1259 .base = {
1260 .cra_name = "rsa",
1261 .cra_driver_name = "qat-rsa",
1262 .cra_priority = 1000,
1263 .cra_module = THIS_MODULE,
1264 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
1268 static struct kpp_alg dh = {
1269 .set_secret = qat_dh_set_secret,
1270 .generate_public_key = qat_dh_compute_value,
1271 .compute_shared_secret = qat_dh_compute_value,
1272 .max_size = qat_dh_max_size,
1273 .init = qat_dh_init_tfm,
1274 .exit = qat_dh_exit_tfm,
1275 .reqsize = sizeof(struct qat_asym_request) + 64,
1276 .base = {
1277 .cra_name = "dh",
1278 .cra_driver_name = "qat-dh",
1279 .cra_priority = 1000,
1280 .cra_module = THIS_MODULE,
1281 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1285 int qat_asym_algs_register(void)
1287 int ret = 0;
1289 mutex_lock(&algs_lock);
1290 if (++active_devs == 1) {
1291 rsa.base.cra_flags = 0;
1292 ret = crypto_register_akcipher(&rsa);
1293 if (ret)
1294 goto unlock;
1295 ret = crypto_register_kpp(&dh);
1297 unlock:
1298 mutex_unlock(&algs_lock);
1299 return ret;
1302 void qat_asym_algs_unregister(void)
1304 mutex_lock(&algs_lock);
1305 if (--active_devs == 0) {
1306 crypto_unregister_akcipher(&rsa);
1307 crypto_unregister_kpp(&dh);
1309 mutex_unlock(&algs_lock);