2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <crypto/kpp.h>
53 #include <crypto/internal/kpp.h>
54 #include <crypto/dh.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/fips.h>
57 #include <crypto/scatterwalk.h>
58 #include "icp_qat_fw_pke.h"
59 #include "adf_accel_devices.h"
60 #include "adf_transport.h"
61 #include "adf_common_drv.h"
62 #include "qat_crypto.h"
64 static DEFINE_MUTEX(algs_lock
);
65 static unsigned int active_devs
;
67 struct qat_rsa_input_params
{
89 } __packed
__aligned(64);
91 struct qat_rsa_output_params
{
101 } __packed
__aligned(64);
122 struct qat_crypto_instance
*inst
;
123 } __packed
__aligned(64);
125 struct qat_dh_input_params
{
138 } __packed
__aligned(64);
140 struct qat_dh_output_params
{
145 } __packed
__aligned(64);
156 struct qat_crypto_instance
*inst
;
157 } __packed
__aligned(64);
159 struct qat_asym_request
{
161 struct qat_rsa_input_params rsa
;
162 struct qat_dh_input_params dh
;
165 struct qat_rsa_output_params rsa
;
166 struct qat_dh_output_params dh
;
172 struct icp_qat_fw_pke_request req
;
174 struct qat_rsa_ctx
*rsa
;
175 struct qat_dh_ctx
*dh
;
178 struct akcipher_request
*rsa
;
179 struct kpp_request
*dh
;
182 void (*cb
)(struct icp_qat_fw_pke_resp
*resp
);
185 static void qat_dh_cb(struct icp_qat_fw_pke_resp
*resp
)
187 struct qat_asym_request
*req
= (void *)(__force
long)resp
->opaque
;
188 struct kpp_request
*areq
= req
->areq
.dh
;
189 struct device
*dev
= &GET_DEV(req
->ctx
.dh
->inst
->accel_dev
);
190 int err
= ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
191 resp
->pke_resp_hdr
.comn_resp_flags
);
193 err
= (err
== ICP_QAT_FW_COMN_STATUS_FLAG_OK
) ? 0 : -EINVAL
;
197 dma_free_coherent(dev
, req
->ctx
.dh
->p_size
,
198 req
->src_align
, req
->in
.dh
.in
.b
);
200 dma_unmap_single(dev
, req
->in
.dh
.in
.b
,
201 req
->ctx
.dh
->p_size
, DMA_TO_DEVICE
);
204 areq
->dst_len
= req
->ctx
.dh
->p_size
;
205 if (req
->dst_align
) {
206 scatterwalk_map_and_copy(req
->dst_align
, areq
->dst
, 0,
209 dma_free_coherent(dev
, req
->ctx
.dh
->p_size
, req
->dst_align
,
212 dma_unmap_single(dev
, req
->out
.dh
.r
, req
->ctx
.dh
->p_size
,
216 dma_unmap_single(dev
, req
->phy_in
, sizeof(struct qat_dh_input_params
),
218 dma_unmap_single(dev
, req
->phy_out
,
219 sizeof(struct qat_dh_output_params
),
222 kpp_request_complete(areq
, err
);
225 #define PKE_DH_1536 0x390c1a49
226 #define PKE_DH_G2_1536 0x2e0b1a3e
227 #define PKE_DH_2048 0x4d0c1a60
228 #define PKE_DH_G2_2048 0x3e0b1a55
229 #define PKE_DH_3072 0x510c1a77
230 #define PKE_DH_G2_3072 0x3a0b1a6c
231 #define PKE_DH_4096 0x690c1a8e
232 #define PKE_DH_G2_4096 0x4a0b1a83
234 static unsigned long qat_dh_fn_id(unsigned int len
, bool g2
)
236 unsigned int bitslen
= len
<< 3;
240 return g2
? PKE_DH_G2_1536
: PKE_DH_1536
;
242 return g2
? PKE_DH_G2_2048
: PKE_DH_2048
;
244 return g2
? PKE_DH_G2_3072
: PKE_DH_3072
;
246 return g2
? PKE_DH_G2_4096
: PKE_DH_4096
;
252 static inline struct qat_dh_ctx
*qat_dh_get_params(struct crypto_kpp
*tfm
)
254 return kpp_tfm_ctx(tfm
);
257 static int qat_dh_compute_value(struct kpp_request
*req
)
259 struct crypto_kpp
*tfm
= crypto_kpp_reqtfm(req
);
260 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
261 struct qat_crypto_instance
*inst
= ctx
->inst
;
262 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
263 struct qat_asym_request
*qat_req
=
264 PTR_ALIGN(kpp_request_ctx(req
), 64);
265 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
267 int n_input_params
= 0;
269 if (unlikely(!ctx
->xa
))
272 if (req
->dst_len
< ctx
->p_size
) {
273 req
->dst_len
= ctx
->p_size
;
276 memset(msg
, '\0', sizeof(*msg
));
277 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
278 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
280 msg
->pke_hdr
.cd_pars
.func_id
= qat_dh_fn_id(ctx
->p_size
,
281 !req
->src
&& ctx
->g2
);
282 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
285 qat_req
->cb
= qat_dh_cb
;
286 qat_req
->ctx
.dh
= ctx
;
287 qat_req
->areq
.dh
= req
;
288 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
289 msg
->pke_hdr
.comn_req_flags
=
290 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
291 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
294 * If no source is provided use g as base
297 qat_req
->in
.dh
.in
.xa
= ctx
->dma_xa
;
298 qat_req
->in
.dh
.in
.p
= ctx
->dma_p
;
302 qat_req
->in
.dh
.in_g2
.xa
= ctx
->dma_xa
;
303 qat_req
->in
.dh
.in_g2
.p
= ctx
->dma_p
;
306 qat_req
->in
.dh
.in
.b
= ctx
->dma_g
;
307 qat_req
->in
.dh
.in
.xa
= ctx
->dma_xa
;
308 qat_req
->in
.dh
.in
.p
= ctx
->dma_p
;
316 * src can be of any size in valid range, but HW expects it to
317 * be the same as modulo p so in case it is different we need
318 * to allocate a new buf and copy src data.
319 * In other case we just need to map the user provided buffer.
320 * Also need to make sure that it is in contiguous buffer.
322 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->p_size
) {
323 qat_req
->src_align
= NULL
;
324 qat_req
->in
.dh
.in
.b
= dma_map_single(dev
,
328 if (unlikely(dma_mapping_error(dev
,
329 qat_req
->in
.dh
.in
.b
)))
333 int shift
= ctx
->p_size
- req
->src_len
;
335 qat_req
->src_align
= dma_zalloc_coherent(dev
,
337 &qat_req
->in
.dh
.in
.b
,
339 if (unlikely(!qat_req
->src_align
))
342 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
,
343 req
->src
, 0, req
->src_len
, 0);
347 * dst can be of any size in valid range, but HW expects it to be the
348 * same as modulo m so in case it is different we need to allocate a
349 * new buf and copy src data.
350 * In other case we just need to map the user provided buffer.
351 * Also need to make sure that it is in contiguous buffer.
353 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->p_size
) {
354 qat_req
->dst_align
= NULL
;
355 qat_req
->out
.dh
.r
= dma_map_single(dev
, sg_virt(req
->dst
),
359 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.dh
.r
)))
363 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->p_size
,
366 if (unlikely(!qat_req
->dst_align
))
370 qat_req
->in
.dh
.in_tab
[n_input_params
] = 0;
371 qat_req
->out
.dh
.out_tab
[1] = 0;
372 /* Mapping in.in.b or in.in_g2.xa is the same */
373 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.dh
.in
.b
,
374 sizeof(struct qat_dh_input_params
),
376 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
379 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.dh
.r
,
380 sizeof(struct qat_dh_output_params
),
382 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
383 goto unmap_in_params
;
385 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
386 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
387 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)qat_req
;
388 msg
->input_param_count
= n_input_params
;
389 msg
->output_param_count
= 1;
392 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
393 } while (ret
== -EBUSY
&& ctr
++ < 100);
398 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
399 dma_unmap_single(dev
, qat_req
->phy_out
,
400 sizeof(struct qat_dh_output_params
),
403 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
404 dma_unmap_single(dev
, qat_req
->phy_in
,
405 sizeof(struct qat_dh_input_params
),
408 if (qat_req
->dst_align
)
409 dma_free_coherent(dev
, ctx
->p_size
, qat_req
->dst_align
,
412 if (!dma_mapping_error(dev
, qat_req
->out
.dh
.r
))
413 dma_unmap_single(dev
, qat_req
->out
.dh
.r
, ctx
->p_size
,
417 if (qat_req
->src_align
)
418 dma_free_coherent(dev
, ctx
->p_size
, qat_req
->src_align
,
419 qat_req
->in
.dh
.in
.b
);
421 if (!dma_mapping_error(dev
, qat_req
->in
.dh
.in
.b
))
422 dma_unmap_single(dev
, qat_req
->in
.dh
.in
.b
,
429 static int qat_dh_check_params_length(unsigned int p_len
)
441 static int qat_dh_set_params(struct qat_dh_ctx
*ctx
, struct dh
*params
)
443 struct qat_crypto_instance
*inst
= ctx
->inst
;
444 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
446 if (unlikely(!params
->p
|| !params
->g
))
449 if (qat_dh_check_params_length(params
->p_size
<< 3))
452 ctx
->p_size
= params
->p_size
;
453 ctx
->p
= dma_zalloc_coherent(dev
, ctx
->p_size
, &ctx
->dma_p
, GFP_KERNEL
);
456 memcpy(ctx
->p
, params
->p
, ctx
->p_size
);
458 /* If g equals 2 don't copy it */
459 if (params
->g_size
== 1 && *(char *)params
->g
== 0x02) {
464 ctx
->g
= dma_zalloc_coherent(dev
, ctx
->p_size
, &ctx
->dma_g
, GFP_KERNEL
);
466 dma_free_coherent(dev
, ctx
->p_size
, ctx
->p
, ctx
->dma_p
);
470 memcpy(ctx
->g
+ (ctx
->p_size
- params
->g_size
), params
->g
,
476 static void qat_dh_clear_ctx(struct device
*dev
, struct qat_dh_ctx
*ctx
)
479 dma_free_coherent(dev
, ctx
->p_size
, ctx
->g
, ctx
->dma_g
);
483 dma_free_coherent(dev
, ctx
->p_size
, ctx
->xa
, ctx
->dma_xa
);
487 dma_free_coherent(dev
, ctx
->p_size
, ctx
->p
, ctx
->dma_p
);
494 static int qat_dh_set_secret(struct crypto_kpp
*tfm
, void *buf
,
497 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
498 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
502 if (crypto_dh_decode_key(buf
, len
, ¶ms
) < 0)
505 /* Free old secret if any */
506 qat_dh_clear_ctx(dev
, ctx
);
508 ret
= qat_dh_set_params(ctx
, ¶ms
);
512 ctx
->xa
= dma_zalloc_coherent(dev
, ctx
->p_size
, &ctx
->dma_xa
,
515 qat_dh_clear_ctx(dev
, ctx
);
518 memcpy(ctx
->xa
+ (ctx
->p_size
- params
.key_size
), params
.key
,
524 static int qat_dh_max_size(struct crypto_kpp
*tfm
)
526 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
528 return ctx
->p
? ctx
->p_size
: -EINVAL
;
531 static int qat_dh_init_tfm(struct crypto_kpp
*tfm
)
533 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
534 struct qat_crypto_instance
*inst
=
535 qat_crypto_get_instance_node(get_current_node());
546 static void qat_dh_exit_tfm(struct crypto_kpp
*tfm
)
548 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
549 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
551 qat_dh_clear_ctx(dev
, ctx
);
552 qat_crypto_put_instance(ctx
->inst
);
555 static void qat_rsa_cb(struct icp_qat_fw_pke_resp
*resp
)
557 struct qat_asym_request
*req
= (void *)(__force
long)resp
->opaque
;
558 struct akcipher_request
*areq
= req
->areq
.rsa
;
559 struct device
*dev
= &GET_DEV(req
->ctx
.rsa
->inst
->accel_dev
);
560 int err
= ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
561 resp
->pke_resp_hdr
.comn_resp_flags
);
563 err
= (err
== ICP_QAT_FW_COMN_STATUS_FLAG_OK
) ? 0 : -EINVAL
;
566 dma_free_coherent(dev
, req
->ctx
.rsa
->key_sz
, req
->src_align
,
569 dma_unmap_single(dev
, req
->in
.rsa
.enc
.m
, req
->ctx
.rsa
->key_sz
,
572 areq
->dst_len
= req
->ctx
.rsa
->key_sz
;
573 if (req
->dst_align
) {
574 scatterwalk_map_and_copy(req
->dst_align
, areq
->dst
, 0,
577 dma_free_coherent(dev
, req
->ctx
.rsa
->key_sz
, req
->dst_align
,
580 dma_unmap_single(dev
, req
->out
.rsa
.enc
.c
, req
->ctx
.rsa
->key_sz
,
584 dma_unmap_single(dev
, req
->phy_in
, sizeof(struct qat_rsa_input_params
),
586 dma_unmap_single(dev
, req
->phy_out
,
587 sizeof(struct qat_rsa_output_params
),
590 akcipher_request_complete(areq
, err
);
593 void qat_alg_asym_callback(void *_resp
)
595 struct icp_qat_fw_pke_resp
*resp
= _resp
;
596 struct qat_asym_request
*areq
= (void *)(__force
long)resp
->opaque
;
601 #define PKE_RSA_EP_512 0x1c161b21
602 #define PKE_RSA_EP_1024 0x35111bf7
603 #define PKE_RSA_EP_1536 0x4d111cdc
604 #define PKE_RSA_EP_2048 0x6e111dba
605 #define PKE_RSA_EP_3072 0x7d111ea3
606 #define PKE_RSA_EP_4096 0xa5101f7e
608 static unsigned long qat_rsa_enc_fn_id(unsigned int len
)
610 unsigned int bitslen
= len
<< 3;
614 return PKE_RSA_EP_512
;
616 return PKE_RSA_EP_1024
;
618 return PKE_RSA_EP_1536
;
620 return PKE_RSA_EP_2048
;
622 return PKE_RSA_EP_3072
;
624 return PKE_RSA_EP_4096
;
630 #define PKE_RSA_DP1_512 0x1c161b3c
631 #define PKE_RSA_DP1_1024 0x35111c12
632 #define PKE_RSA_DP1_1536 0x4d111cf7
633 #define PKE_RSA_DP1_2048 0x6e111dda
634 #define PKE_RSA_DP1_3072 0x7d111ebe
635 #define PKE_RSA_DP1_4096 0xa5101f98
637 static unsigned long qat_rsa_dec_fn_id(unsigned int len
)
639 unsigned int bitslen
= len
<< 3;
643 return PKE_RSA_DP1_512
;
645 return PKE_RSA_DP1_1024
;
647 return PKE_RSA_DP1_1536
;
649 return PKE_RSA_DP1_2048
;
651 return PKE_RSA_DP1_3072
;
653 return PKE_RSA_DP1_4096
;
659 #define PKE_RSA_DP2_512 0x1c131b57
660 #define PKE_RSA_DP2_1024 0x26131c2d
661 #define PKE_RSA_DP2_1536 0x45111d12
662 #define PKE_RSA_DP2_2048 0x59121dfa
663 #define PKE_RSA_DP2_3072 0x81121ed9
664 #define PKE_RSA_DP2_4096 0xb1111fb2
666 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len
)
668 unsigned int bitslen
= len
<< 3;
672 return PKE_RSA_DP2_512
;
674 return PKE_RSA_DP2_1024
;
676 return PKE_RSA_DP2_1536
;
678 return PKE_RSA_DP2_2048
;
680 return PKE_RSA_DP2_3072
;
682 return PKE_RSA_DP2_4096
;
688 static int qat_rsa_enc(struct akcipher_request
*req
)
690 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
691 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
692 struct qat_crypto_instance
*inst
= ctx
->inst
;
693 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
694 struct qat_asym_request
*qat_req
=
695 PTR_ALIGN(akcipher_request_ctx(req
), 64);
696 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
699 if (unlikely(!ctx
->n
|| !ctx
->e
))
702 if (req
->dst_len
< ctx
->key_sz
) {
703 req
->dst_len
= ctx
->key_sz
;
706 memset(msg
, '\0', sizeof(*msg
));
707 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
708 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
709 msg
->pke_hdr
.cd_pars
.func_id
= qat_rsa_enc_fn_id(ctx
->key_sz
);
710 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
713 qat_req
->cb
= qat_rsa_cb
;
714 qat_req
->ctx
.rsa
= ctx
;
715 qat_req
->areq
.rsa
= req
;
716 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
717 msg
->pke_hdr
.comn_req_flags
=
718 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
719 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
721 qat_req
->in
.rsa
.enc
.e
= ctx
->dma_e
;
722 qat_req
->in
.rsa
.enc
.n
= ctx
->dma_n
;
726 * src can be of any size in valid range, but HW expects it to be the
727 * same as modulo n so in case it is different we need to allocate a
728 * new buf and copy src data.
729 * In other case we just need to map the user provided buffer.
730 * Also need to make sure that it is in contiguous buffer.
732 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->key_sz
) {
733 qat_req
->src_align
= NULL
;
734 qat_req
->in
.rsa
.enc
.m
= dma_map_single(dev
, sg_virt(req
->src
),
735 req
->src_len
, DMA_TO_DEVICE
);
736 if (unlikely(dma_mapping_error(dev
, qat_req
->in
.rsa
.enc
.m
)))
740 int shift
= ctx
->key_sz
- req
->src_len
;
742 qat_req
->src_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
743 &qat_req
->in
.rsa
.enc
.m
,
745 if (unlikely(!qat_req
->src_align
))
748 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
, req
->src
,
751 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->key_sz
) {
752 qat_req
->dst_align
= NULL
;
753 qat_req
->out
.rsa
.enc
.c
= dma_map_single(dev
, sg_virt(req
->dst
),
757 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.rsa
.enc
.c
)))
761 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
762 &qat_req
->out
.rsa
.enc
.c
,
764 if (unlikely(!qat_req
->dst_align
))
768 qat_req
->in
.rsa
.in_tab
[3] = 0;
769 qat_req
->out
.rsa
.out_tab
[1] = 0;
770 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.rsa
.enc
.m
,
771 sizeof(struct qat_rsa_input_params
),
773 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
776 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.rsa
.enc
.c
,
777 sizeof(struct qat_rsa_output_params
),
779 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
780 goto unmap_in_params
;
782 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
783 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
784 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)qat_req
;
785 msg
->input_param_count
= 3;
786 msg
->output_param_count
= 1;
788 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
789 } while (ret
== -EBUSY
&& ctr
++ < 100);
794 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
795 dma_unmap_single(dev
, qat_req
->phy_out
,
796 sizeof(struct qat_rsa_output_params
),
799 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
800 dma_unmap_single(dev
, qat_req
->phy_in
,
801 sizeof(struct qat_rsa_input_params
),
804 if (qat_req
->dst_align
)
805 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->dst_align
,
806 qat_req
->out
.rsa
.enc
.c
);
808 if (!dma_mapping_error(dev
, qat_req
->out
.rsa
.enc
.c
))
809 dma_unmap_single(dev
, qat_req
->out
.rsa
.enc
.c
,
810 ctx
->key_sz
, DMA_FROM_DEVICE
);
812 if (qat_req
->src_align
)
813 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->src_align
,
814 qat_req
->in
.rsa
.enc
.m
);
816 if (!dma_mapping_error(dev
, qat_req
->in
.rsa
.enc
.m
))
817 dma_unmap_single(dev
, qat_req
->in
.rsa
.enc
.m
,
818 ctx
->key_sz
, DMA_TO_DEVICE
);
822 static int qat_rsa_dec(struct akcipher_request
*req
)
824 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
825 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
826 struct qat_crypto_instance
*inst
= ctx
->inst
;
827 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
828 struct qat_asym_request
*qat_req
=
829 PTR_ALIGN(akcipher_request_ctx(req
), 64);
830 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
833 if (unlikely(!ctx
->n
|| !ctx
->d
))
836 if (req
->dst_len
< ctx
->key_sz
) {
837 req
->dst_len
= ctx
->key_sz
;
840 memset(msg
, '\0', sizeof(*msg
));
841 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
842 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
843 msg
->pke_hdr
.cd_pars
.func_id
= ctx
->crt_mode
?
844 qat_rsa_dec_fn_id_crt(ctx
->key_sz
) :
845 qat_rsa_dec_fn_id(ctx
->key_sz
);
846 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
849 qat_req
->cb
= qat_rsa_cb
;
850 qat_req
->ctx
.rsa
= ctx
;
851 qat_req
->areq
.rsa
= req
;
852 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
853 msg
->pke_hdr
.comn_req_flags
=
854 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
855 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
858 qat_req
->in
.rsa
.dec_crt
.p
= ctx
->dma_p
;
859 qat_req
->in
.rsa
.dec_crt
.q
= ctx
->dma_q
;
860 qat_req
->in
.rsa
.dec_crt
.dp
= ctx
->dma_dp
;
861 qat_req
->in
.rsa
.dec_crt
.dq
= ctx
->dma_dq
;
862 qat_req
->in
.rsa
.dec_crt
.qinv
= ctx
->dma_qinv
;
864 qat_req
->in
.rsa
.dec
.d
= ctx
->dma_d
;
865 qat_req
->in
.rsa
.dec
.n
= ctx
->dma_n
;
870 * src can be of any size in valid range, but HW expects it to be the
871 * same as modulo n so in case it is different we need to allocate a
872 * new buf and copy src data.
873 * In other case we just need to map the user provided buffer.
874 * Also need to make sure that it is in contiguous buffer.
876 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->key_sz
) {
877 qat_req
->src_align
= NULL
;
878 qat_req
->in
.rsa
.dec
.c
= dma_map_single(dev
, sg_virt(req
->src
),
879 req
->dst_len
, DMA_TO_DEVICE
);
880 if (unlikely(dma_mapping_error(dev
, qat_req
->in
.rsa
.dec
.c
)))
884 int shift
= ctx
->key_sz
- req
->src_len
;
886 qat_req
->src_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
887 &qat_req
->in
.rsa
.dec
.c
,
889 if (unlikely(!qat_req
->src_align
))
892 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
, req
->src
,
895 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->key_sz
) {
896 qat_req
->dst_align
= NULL
;
897 qat_req
->out
.rsa
.dec
.m
= dma_map_single(dev
, sg_virt(req
->dst
),
901 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.rsa
.dec
.m
)))
905 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
906 &qat_req
->out
.rsa
.dec
.m
,
908 if (unlikely(!qat_req
->dst_align
))
914 qat_req
->in
.rsa
.in_tab
[6] = 0;
916 qat_req
->in
.rsa
.in_tab
[3] = 0;
917 qat_req
->out
.rsa
.out_tab
[1] = 0;
918 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.rsa
.dec
.c
,
919 sizeof(struct qat_rsa_input_params
),
921 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
924 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.rsa
.dec
.m
,
925 sizeof(struct qat_rsa_output_params
),
927 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
928 goto unmap_in_params
;
930 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
931 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
932 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)qat_req
;
934 msg
->input_param_count
= 6;
936 msg
->input_param_count
= 3;
938 msg
->output_param_count
= 1;
940 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
941 } while (ret
== -EBUSY
&& ctr
++ < 100);
946 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
947 dma_unmap_single(dev
, qat_req
->phy_out
,
948 sizeof(struct qat_rsa_output_params
),
951 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
952 dma_unmap_single(dev
, qat_req
->phy_in
,
953 sizeof(struct qat_rsa_input_params
),
956 if (qat_req
->dst_align
)
957 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->dst_align
,
958 qat_req
->out
.rsa
.dec
.m
);
960 if (!dma_mapping_error(dev
, qat_req
->out
.rsa
.dec
.m
))
961 dma_unmap_single(dev
, qat_req
->out
.rsa
.dec
.m
,
962 ctx
->key_sz
, DMA_FROM_DEVICE
);
964 if (qat_req
->src_align
)
965 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->src_align
,
966 qat_req
->in
.rsa
.dec
.c
);
968 if (!dma_mapping_error(dev
, qat_req
->in
.rsa
.dec
.c
))
969 dma_unmap_single(dev
, qat_req
->in
.rsa
.dec
.c
,
970 ctx
->key_sz
, DMA_TO_DEVICE
);
974 int qat_rsa_set_n(struct qat_rsa_ctx
*ctx
, const char *value
, size_t vlen
)
976 struct qat_crypto_instance
*inst
= ctx
->inst
;
977 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
978 const char *ptr
= value
;
981 while (!*ptr
&& vlen
) {
988 /* invalid key size provided */
989 if (!qat_rsa_enc_fn_id(ctx
->key_sz
))
993 ctx
->n
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_n
, GFP_KERNEL
);
997 memcpy(ctx
->n
, ptr
, ctx
->key_sz
);
1005 int qat_rsa_set_e(struct qat_rsa_ctx
*ctx
, const char *value
, size_t vlen
)
1007 struct qat_crypto_instance
*inst
= ctx
->inst
;
1008 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
1009 const char *ptr
= value
;
1011 while (!*ptr
&& vlen
) {
1016 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
) {
1021 ctx
->e
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_e
, GFP_KERNEL
);
1025 memcpy(ctx
->e
+ (ctx
->key_sz
- vlen
), ptr
, vlen
);
1029 int qat_rsa_set_d(struct qat_rsa_ctx
*ctx
, const char *value
, size_t vlen
)
1031 struct qat_crypto_instance
*inst
= ctx
->inst
;
1032 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
1033 const char *ptr
= value
;
1036 while (!*ptr
&& vlen
) {
1042 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
1046 ctx
->d
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_d
, GFP_KERNEL
);
1050 memcpy(ctx
->d
+ (ctx
->key_sz
- vlen
), ptr
, vlen
);
1057 static void qat_rsa_drop_leading_zeros(const char **ptr
, unsigned int *len
)
1059 while (!**ptr
&& *len
) {
1065 static void qat_rsa_setkey_crt(struct qat_rsa_ctx
*ctx
, struct rsa_key
*rsa_key
)
1067 struct qat_crypto_instance
*inst
= ctx
->inst
;
1068 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
1071 unsigned int half_key_sz
= ctx
->key_sz
/ 2;
1075 len
= rsa_key
->p_sz
;
1076 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1079 ctx
->p
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_p
, GFP_KERNEL
);
1082 memcpy(ctx
->p
+ (half_key_sz
- len
), ptr
, len
);
1086 len
= rsa_key
->q_sz
;
1087 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1090 ctx
->q
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_q
, GFP_KERNEL
);
1093 memcpy(ctx
->q
+ (half_key_sz
- len
), ptr
, len
);
1097 len
= rsa_key
->dp_sz
;
1098 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1101 ctx
->dp
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_dp
,
1105 memcpy(ctx
->dp
+ (half_key_sz
- len
), ptr
, len
);
1109 len
= rsa_key
->dq_sz
;
1110 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1113 ctx
->dq
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_dq
,
1117 memcpy(ctx
->dq
+ (half_key_sz
- len
), ptr
, len
);
1120 ptr
= rsa_key
->qinv
;
1121 len
= rsa_key
->qinv_sz
;
1122 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1125 ctx
->qinv
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_qinv
,
1129 memcpy(ctx
->qinv
+ (half_key_sz
- len
), ptr
, len
);
1131 ctx
->crt_mode
= true;
1135 memset(ctx
->dq
, '\0', half_key_sz
);
1136 dma_free_coherent(dev
, half_key_sz
, ctx
->dq
, ctx
->dma_dq
);
1139 memset(ctx
->dp
, '\0', half_key_sz
);
1140 dma_free_coherent(dev
, half_key_sz
, ctx
->dp
, ctx
->dma_dp
);
1143 memset(ctx
->q
, '\0', half_key_sz
);
1144 dma_free_coherent(dev
, half_key_sz
, ctx
->q
, ctx
->dma_q
);
1147 memset(ctx
->p
, '\0', half_key_sz
);
1148 dma_free_coherent(dev
, half_key_sz
, ctx
->p
, ctx
->dma_p
);
1151 ctx
->crt_mode
= false;
1154 static void qat_rsa_clear_ctx(struct device
*dev
, struct qat_rsa_ctx
*ctx
)
1156 unsigned int half_key_sz
= ctx
->key_sz
/ 2;
1158 /* Free the old key if any */
1160 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->n
, ctx
->dma_n
);
1162 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->e
, ctx
->dma_e
);
1164 memset(ctx
->d
, '\0', ctx
->key_sz
);
1165 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->d
, ctx
->dma_d
);
1168 memset(ctx
->p
, '\0', half_key_sz
);
1169 dma_free_coherent(dev
, half_key_sz
, ctx
->p
, ctx
->dma_p
);
1172 memset(ctx
->q
, '\0', half_key_sz
);
1173 dma_free_coherent(dev
, half_key_sz
, ctx
->q
, ctx
->dma_q
);
1176 memset(ctx
->dp
, '\0', half_key_sz
);
1177 dma_free_coherent(dev
, half_key_sz
, ctx
->dp
, ctx
->dma_dp
);
1180 memset(ctx
->dq
, '\0', half_key_sz
);
1181 dma_free_coherent(dev
, half_key_sz
, ctx
->dq
, ctx
->dma_dq
);
1184 memset(ctx
->qinv
, '\0', half_key_sz
);
1185 dma_free_coherent(dev
, half_key_sz
, ctx
->qinv
, ctx
->dma_qinv
);
1196 ctx
->crt_mode
= false;
1200 static int qat_rsa_setkey(struct crypto_akcipher
*tfm
, const void *key
,
1201 unsigned int keylen
, bool private)
1203 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1204 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
1205 struct rsa_key rsa_key
;
1208 qat_rsa_clear_ctx(dev
, ctx
);
1211 ret
= rsa_parse_priv_key(&rsa_key
, key
, keylen
);
1213 ret
= rsa_parse_pub_key(&rsa_key
, key
, keylen
);
1217 ret
= qat_rsa_set_n(ctx
, rsa_key
.n
, rsa_key
.n_sz
);
1220 ret
= qat_rsa_set_e(ctx
, rsa_key
.e
, rsa_key
.e_sz
);
1224 ret
= qat_rsa_set_d(ctx
, rsa_key
.d
, rsa_key
.d_sz
);
1227 qat_rsa_setkey_crt(ctx
, &rsa_key
);
1230 if (!ctx
->n
|| !ctx
->e
) {
1231 /* invalid key provided */
1235 if (private && !ctx
->d
) {
1236 /* invalid private key provided */
1243 qat_rsa_clear_ctx(dev
, ctx
);
1247 static int qat_rsa_setpubkey(struct crypto_akcipher
*tfm
, const void *key
,
1248 unsigned int keylen
)
1250 return qat_rsa_setkey(tfm
, key
, keylen
, false);
1253 static int qat_rsa_setprivkey(struct crypto_akcipher
*tfm
, const void *key
,
1254 unsigned int keylen
)
1256 return qat_rsa_setkey(tfm
, key
, keylen
, true);
1259 static int qat_rsa_max_size(struct crypto_akcipher
*tfm
)
1261 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1263 return (ctx
->n
) ? ctx
->key_sz
: -EINVAL
;
1266 static int qat_rsa_init_tfm(struct crypto_akcipher
*tfm
)
1268 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1269 struct qat_crypto_instance
*inst
=
1270 qat_crypto_get_instance_node(get_current_node());
1280 static void qat_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
1282 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1283 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
1286 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->n
, ctx
->dma_n
);
1288 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->e
, ctx
->dma_e
);
1290 memset(ctx
->d
, '\0', ctx
->key_sz
);
1291 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->d
, ctx
->dma_d
);
1293 qat_crypto_put_instance(ctx
->inst
);
1299 static struct akcipher_alg rsa
= {
1300 .encrypt
= qat_rsa_enc
,
1301 .decrypt
= qat_rsa_dec
,
1302 .sign
= qat_rsa_dec
,
1303 .verify
= qat_rsa_enc
,
1304 .set_pub_key
= qat_rsa_setpubkey
,
1305 .set_priv_key
= qat_rsa_setprivkey
,
1306 .max_size
= qat_rsa_max_size
,
1307 .init
= qat_rsa_init_tfm
,
1308 .exit
= qat_rsa_exit_tfm
,
1309 .reqsize
= sizeof(struct qat_asym_request
) + 64,
1312 .cra_driver_name
= "qat-rsa",
1313 .cra_priority
= 1000,
1314 .cra_module
= THIS_MODULE
,
1315 .cra_ctxsize
= sizeof(struct qat_rsa_ctx
),
1319 static struct kpp_alg dh
= {
1320 .set_secret
= qat_dh_set_secret
,
1321 .generate_public_key
= qat_dh_compute_value
,
1322 .compute_shared_secret
= qat_dh_compute_value
,
1323 .max_size
= qat_dh_max_size
,
1324 .init
= qat_dh_init_tfm
,
1325 .exit
= qat_dh_exit_tfm
,
1326 .reqsize
= sizeof(struct qat_asym_request
) + 64,
1329 .cra_driver_name
= "qat-dh",
1330 .cra_priority
= 1000,
1331 .cra_module
= THIS_MODULE
,
1332 .cra_ctxsize
= sizeof(struct qat_dh_ctx
),
1336 int qat_asym_algs_register(void)
1340 mutex_lock(&algs_lock
);
1341 if (++active_devs
== 1) {
1342 rsa
.base
.cra_flags
= 0;
1343 ret
= crypto_register_akcipher(&rsa
);
1346 ret
= crypto_register_kpp(&dh
);
1349 mutex_unlock(&algs_lock
);
1353 void qat_asym_algs_unregister(void)
1355 mutex_lock(&algs_lock
);
1356 if (--active_devs
== 0) {
1357 crypto_unregister_akcipher(&rsa
);
1358 crypto_unregister_kpp(&dh
);
1360 mutex_unlock(&algs_lock
);