2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <crypto/kpp.h>
53 #include <crypto/internal/kpp.h>
54 #include <crypto/dh.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/fips.h>
57 #include <crypto/scatterwalk.h>
58 #include "icp_qat_fw_pke.h"
59 #include "adf_accel_devices.h"
60 #include "adf_transport.h"
61 #include "adf_common_drv.h"
62 #include "qat_crypto.h"
64 static DEFINE_MUTEX(algs_lock
);
65 static unsigned int active_devs
;
67 struct qat_rsa_input_params
{
89 } __packed
__aligned(64);
91 struct qat_rsa_output_params
{
101 } __packed
__aligned(64);
122 struct qat_crypto_instance
*inst
;
123 } __packed
__aligned(64);
125 struct qat_dh_input_params
{
138 } __packed
__aligned(64);
140 struct qat_dh_output_params
{
145 } __packed
__aligned(64);
156 struct qat_crypto_instance
*inst
;
157 } __packed
__aligned(64);
159 struct qat_asym_request
{
161 struct qat_rsa_input_params rsa
;
162 struct qat_dh_input_params dh
;
165 struct qat_rsa_output_params rsa
;
166 struct qat_dh_output_params dh
;
172 struct icp_qat_fw_pke_request req
;
174 struct qat_rsa_ctx
*rsa
;
175 struct qat_dh_ctx
*dh
;
178 struct akcipher_request
*rsa
;
179 struct kpp_request
*dh
;
182 void (*cb
)(struct icp_qat_fw_pke_resp
*resp
);
185 static void qat_dh_cb(struct icp_qat_fw_pke_resp
*resp
)
187 struct qat_asym_request
*req
= (void *)(__force
long)resp
->opaque
;
188 struct kpp_request
*areq
= req
->areq
.dh
;
189 struct device
*dev
= &GET_DEV(req
->ctx
.dh
->inst
->accel_dev
);
190 int err
= ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
191 resp
->pke_resp_hdr
.comn_resp_flags
);
193 err
= (err
== ICP_QAT_FW_COMN_STATUS_FLAG_OK
) ? 0 : -EINVAL
;
197 dma_free_coherent(dev
, req
->ctx
.dh
->p_size
,
198 req
->src_align
, req
->in
.dh
.in
.b
);
200 dma_unmap_single(dev
, req
->in
.dh
.in
.b
,
201 req
->ctx
.dh
->p_size
, DMA_TO_DEVICE
);
204 areq
->dst_len
= req
->ctx
.dh
->p_size
;
205 if (req
->dst_align
) {
206 scatterwalk_map_and_copy(req
->dst_align
, areq
->dst
, 0,
209 dma_free_coherent(dev
, req
->ctx
.dh
->p_size
, req
->dst_align
,
212 dma_unmap_single(dev
, req
->out
.dh
.r
, req
->ctx
.dh
->p_size
,
216 dma_unmap_single(dev
, req
->phy_in
, sizeof(struct qat_dh_input_params
),
218 dma_unmap_single(dev
, req
->phy_out
,
219 sizeof(struct qat_dh_output_params
),
222 kpp_request_complete(areq
, err
);
225 #define PKE_DH_1536 0x390c1a49
226 #define PKE_DH_G2_1536 0x2e0b1a3e
227 #define PKE_DH_2048 0x4d0c1a60
228 #define PKE_DH_G2_2048 0x3e0b1a55
229 #define PKE_DH_3072 0x510c1a77
230 #define PKE_DH_G2_3072 0x3a0b1a6c
231 #define PKE_DH_4096 0x690c1a8e
232 #define PKE_DH_G2_4096 0x4a0b1a83
234 static unsigned long qat_dh_fn_id(unsigned int len
, bool g2
)
236 unsigned int bitslen
= len
<< 3;
240 return g2
? PKE_DH_G2_1536
: PKE_DH_1536
;
242 return g2
? PKE_DH_G2_2048
: PKE_DH_2048
;
244 return g2
? PKE_DH_G2_3072
: PKE_DH_3072
;
246 return g2
? PKE_DH_G2_4096
: PKE_DH_4096
;
252 static inline struct qat_dh_ctx
*qat_dh_get_params(struct crypto_kpp
*tfm
)
254 return kpp_tfm_ctx(tfm
);
257 static int qat_dh_compute_value(struct kpp_request
*req
)
259 struct crypto_kpp
*tfm
= crypto_kpp_reqtfm(req
);
260 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
261 struct qat_crypto_instance
*inst
= ctx
->inst
;
262 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
263 struct qat_asym_request
*qat_req
=
264 PTR_ALIGN(kpp_request_ctx(req
), 64);
265 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
267 int n_input_params
= 0;
269 if (unlikely(!ctx
->xa
))
272 if (req
->dst_len
< ctx
->p_size
) {
273 req
->dst_len
= ctx
->p_size
;
276 memset(msg
, '\0', sizeof(*msg
));
277 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
278 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
280 msg
->pke_hdr
.cd_pars
.func_id
= qat_dh_fn_id(ctx
->p_size
,
281 !req
->src
&& ctx
->g2
);
282 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
285 qat_req
->cb
= qat_dh_cb
;
286 qat_req
->ctx
.dh
= ctx
;
287 qat_req
->areq
.dh
= req
;
288 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
289 msg
->pke_hdr
.comn_req_flags
=
290 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
291 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
294 * If no source is provided use g as base
297 qat_req
->in
.dh
.in
.xa
= ctx
->dma_xa
;
298 qat_req
->in
.dh
.in
.p
= ctx
->dma_p
;
302 qat_req
->in
.dh
.in_g2
.xa
= ctx
->dma_xa
;
303 qat_req
->in
.dh
.in_g2
.p
= ctx
->dma_p
;
306 qat_req
->in
.dh
.in
.b
= ctx
->dma_g
;
307 qat_req
->in
.dh
.in
.xa
= ctx
->dma_xa
;
308 qat_req
->in
.dh
.in
.p
= ctx
->dma_p
;
316 * src can be of any size in valid range, but HW expects it to
317 * be the same as modulo p so in case it is different we need
318 * to allocate a new buf and copy src data.
319 * In other case we just need to map the user provided buffer.
320 * Also need to make sure that it is in contiguous buffer.
322 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->p_size
) {
323 qat_req
->src_align
= NULL
;
324 qat_req
->in
.dh
.in
.b
= dma_map_single(dev
,
328 if (unlikely(dma_mapping_error(dev
,
329 qat_req
->in
.dh
.in
.b
)))
333 int shift
= ctx
->p_size
- req
->src_len
;
335 qat_req
->src_align
= dma_zalloc_coherent(dev
,
337 &qat_req
->in
.dh
.in
.b
,
339 if (unlikely(!qat_req
->src_align
))
342 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
,
343 req
->src
, 0, req
->src_len
, 0);
347 * dst can be of any size in valid range, but HW expects it to be the
348 * same as modulo m so in case it is different we need to allocate a
349 * new buf and copy src data.
350 * In other case we just need to map the user provided buffer.
351 * Also need to make sure that it is in contiguous buffer.
353 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->p_size
) {
354 qat_req
->dst_align
= NULL
;
355 qat_req
->out
.dh
.r
= dma_map_single(dev
, sg_virt(req
->dst
),
359 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.dh
.r
)))
363 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->p_size
,
366 if (unlikely(!qat_req
->dst_align
))
370 qat_req
->in
.dh
.in_tab
[n_input_params
] = 0;
371 qat_req
->out
.dh
.out_tab
[1] = 0;
372 /* Mapping in.in.b or in.in_g2.xa is the same */
373 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.dh
.in
.b
,
374 sizeof(struct qat_dh_input_params
),
376 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
379 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.dh
.r
,
380 sizeof(struct qat_dh_output_params
),
382 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
383 goto unmap_in_params
;
385 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
386 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
387 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)qat_req
;
388 msg
->input_param_count
= n_input_params
;
389 msg
->output_param_count
= 1;
392 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
393 } while (ret
== -EBUSY
&& ctr
++ < 100);
398 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
399 dma_unmap_single(dev
, qat_req
->phy_out
,
400 sizeof(struct qat_dh_output_params
),
403 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
404 dma_unmap_single(dev
, qat_req
->phy_in
,
405 sizeof(struct qat_dh_input_params
),
408 if (qat_req
->dst_align
)
409 dma_free_coherent(dev
, ctx
->p_size
, qat_req
->dst_align
,
412 if (!dma_mapping_error(dev
, qat_req
->out
.dh
.r
))
413 dma_unmap_single(dev
, qat_req
->out
.dh
.r
, ctx
->p_size
,
417 if (qat_req
->src_align
)
418 dma_free_coherent(dev
, ctx
->p_size
, qat_req
->src_align
,
419 qat_req
->in
.dh
.in
.b
);
421 if (!dma_mapping_error(dev
, qat_req
->in
.dh
.in
.b
))
422 dma_unmap_single(dev
, qat_req
->in
.dh
.in
.b
,
429 static int qat_dh_check_params_length(unsigned int p_len
)
441 static int qat_dh_set_params(struct qat_dh_ctx
*ctx
, struct dh
*params
)
443 struct qat_crypto_instance
*inst
= ctx
->inst
;
444 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
446 if (qat_dh_check_params_length(params
->p_size
<< 3))
449 ctx
->p_size
= params
->p_size
;
450 ctx
->p
= dma_zalloc_coherent(dev
, ctx
->p_size
, &ctx
->dma_p
, GFP_KERNEL
);
453 memcpy(ctx
->p
, params
->p
, ctx
->p_size
);
455 /* If g equals 2 don't copy it */
456 if (params
->g_size
== 1 && *(char *)params
->g
== 0x02) {
461 ctx
->g
= dma_zalloc_coherent(dev
, ctx
->p_size
, &ctx
->dma_g
, GFP_KERNEL
);
464 memcpy(ctx
->g
+ (ctx
->p_size
- params
->g_size
), params
->g
,
470 static void qat_dh_clear_ctx(struct device
*dev
, struct qat_dh_ctx
*ctx
)
473 dma_free_coherent(dev
, ctx
->p_size
, ctx
->g
, ctx
->dma_g
);
477 dma_free_coherent(dev
, ctx
->p_size
, ctx
->xa
, ctx
->dma_xa
);
481 dma_free_coherent(dev
, ctx
->p_size
, ctx
->p
, ctx
->dma_p
);
488 static int qat_dh_set_secret(struct crypto_kpp
*tfm
, const void *buf
,
491 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
492 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
496 if (crypto_dh_decode_key(buf
, len
, ¶ms
) < 0)
499 /* Free old secret if any */
500 qat_dh_clear_ctx(dev
, ctx
);
502 ret
= qat_dh_set_params(ctx
, ¶ms
);
506 ctx
->xa
= dma_zalloc_coherent(dev
, ctx
->p_size
, &ctx
->dma_xa
,
512 memcpy(ctx
->xa
+ (ctx
->p_size
- params
.key_size
), params
.key
,
518 qat_dh_clear_ctx(dev
, ctx
);
522 static unsigned int qat_dh_max_size(struct crypto_kpp
*tfm
)
524 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
529 static int qat_dh_init_tfm(struct crypto_kpp
*tfm
)
531 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
532 struct qat_crypto_instance
*inst
=
533 qat_crypto_get_instance_node(get_current_node());
544 static void qat_dh_exit_tfm(struct crypto_kpp
*tfm
)
546 struct qat_dh_ctx
*ctx
= kpp_tfm_ctx(tfm
);
547 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
549 qat_dh_clear_ctx(dev
, ctx
);
550 qat_crypto_put_instance(ctx
->inst
);
553 static void qat_rsa_cb(struct icp_qat_fw_pke_resp
*resp
)
555 struct qat_asym_request
*req
= (void *)(__force
long)resp
->opaque
;
556 struct akcipher_request
*areq
= req
->areq
.rsa
;
557 struct device
*dev
= &GET_DEV(req
->ctx
.rsa
->inst
->accel_dev
);
558 int err
= ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
559 resp
->pke_resp_hdr
.comn_resp_flags
);
561 err
= (err
== ICP_QAT_FW_COMN_STATUS_FLAG_OK
) ? 0 : -EINVAL
;
564 dma_free_coherent(dev
, req
->ctx
.rsa
->key_sz
, req
->src_align
,
567 dma_unmap_single(dev
, req
->in
.rsa
.enc
.m
, req
->ctx
.rsa
->key_sz
,
570 areq
->dst_len
= req
->ctx
.rsa
->key_sz
;
571 if (req
->dst_align
) {
572 scatterwalk_map_and_copy(req
->dst_align
, areq
->dst
, 0,
575 dma_free_coherent(dev
, req
->ctx
.rsa
->key_sz
, req
->dst_align
,
578 dma_unmap_single(dev
, req
->out
.rsa
.enc
.c
, req
->ctx
.rsa
->key_sz
,
582 dma_unmap_single(dev
, req
->phy_in
, sizeof(struct qat_rsa_input_params
),
584 dma_unmap_single(dev
, req
->phy_out
,
585 sizeof(struct qat_rsa_output_params
),
588 akcipher_request_complete(areq
, err
);
591 void qat_alg_asym_callback(void *_resp
)
593 struct icp_qat_fw_pke_resp
*resp
= _resp
;
594 struct qat_asym_request
*areq
= (void *)(__force
long)resp
->opaque
;
599 #define PKE_RSA_EP_512 0x1c161b21
600 #define PKE_RSA_EP_1024 0x35111bf7
601 #define PKE_RSA_EP_1536 0x4d111cdc
602 #define PKE_RSA_EP_2048 0x6e111dba
603 #define PKE_RSA_EP_3072 0x7d111ea3
604 #define PKE_RSA_EP_4096 0xa5101f7e
606 static unsigned long qat_rsa_enc_fn_id(unsigned int len
)
608 unsigned int bitslen
= len
<< 3;
612 return PKE_RSA_EP_512
;
614 return PKE_RSA_EP_1024
;
616 return PKE_RSA_EP_1536
;
618 return PKE_RSA_EP_2048
;
620 return PKE_RSA_EP_3072
;
622 return PKE_RSA_EP_4096
;
628 #define PKE_RSA_DP1_512 0x1c161b3c
629 #define PKE_RSA_DP1_1024 0x35111c12
630 #define PKE_RSA_DP1_1536 0x4d111cf7
631 #define PKE_RSA_DP1_2048 0x6e111dda
632 #define PKE_RSA_DP1_3072 0x7d111ebe
633 #define PKE_RSA_DP1_4096 0xa5101f98
635 static unsigned long qat_rsa_dec_fn_id(unsigned int len
)
637 unsigned int bitslen
= len
<< 3;
641 return PKE_RSA_DP1_512
;
643 return PKE_RSA_DP1_1024
;
645 return PKE_RSA_DP1_1536
;
647 return PKE_RSA_DP1_2048
;
649 return PKE_RSA_DP1_3072
;
651 return PKE_RSA_DP1_4096
;
657 #define PKE_RSA_DP2_512 0x1c131b57
658 #define PKE_RSA_DP2_1024 0x26131c2d
659 #define PKE_RSA_DP2_1536 0x45111d12
660 #define PKE_RSA_DP2_2048 0x59121dfa
661 #define PKE_RSA_DP2_3072 0x81121ed9
662 #define PKE_RSA_DP2_4096 0xb1111fb2
664 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len
)
666 unsigned int bitslen
= len
<< 3;
670 return PKE_RSA_DP2_512
;
672 return PKE_RSA_DP2_1024
;
674 return PKE_RSA_DP2_1536
;
676 return PKE_RSA_DP2_2048
;
678 return PKE_RSA_DP2_3072
;
680 return PKE_RSA_DP2_4096
;
686 static int qat_rsa_enc(struct akcipher_request
*req
)
688 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
689 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
690 struct qat_crypto_instance
*inst
= ctx
->inst
;
691 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
692 struct qat_asym_request
*qat_req
=
693 PTR_ALIGN(akcipher_request_ctx(req
), 64);
694 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
697 if (unlikely(!ctx
->n
|| !ctx
->e
))
700 if (req
->dst_len
< ctx
->key_sz
) {
701 req
->dst_len
= ctx
->key_sz
;
704 memset(msg
, '\0', sizeof(*msg
));
705 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
706 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
707 msg
->pke_hdr
.cd_pars
.func_id
= qat_rsa_enc_fn_id(ctx
->key_sz
);
708 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
711 qat_req
->cb
= qat_rsa_cb
;
712 qat_req
->ctx
.rsa
= ctx
;
713 qat_req
->areq
.rsa
= req
;
714 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
715 msg
->pke_hdr
.comn_req_flags
=
716 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
717 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
719 qat_req
->in
.rsa
.enc
.e
= ctx
->dma_e
;
720 qat_req
->in
.rsa
.enc
.n
= ctx
->dma_n
;
724 * src can be of any size in valid range, but HW expects it to be the
725 * same as modulo n so in case it is different we need to allocate a
726 * new buf and copy src data.
727 * In other case we just need to map the user provided buffer.
728 * Also need to make sure that it is in contiguous buffer.
730 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->key_sz
) {
731 qat_req
->src_align
= NULL
;
732 qat_req
->in
.rsa
.enc
.m
= dma_map_single(dev
, sg_virt(req
->src
),
733 req
->src_len
, DMA_TO_DEVICE
);
734 if (unlikely(dma_mapping_error(dev
, qat_req
->in
.rsa
.enc
.m
)))
738 int shift
= ctx
->key_sz
- req
->src_len
;
740 qat_req
->src_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
741 &qat_req
->in
.rsa
.enc
.m
,
743 if (unlikely(!qat_req
->src_align
))
746 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
, req
->src
,
749 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->key_sz
) {
750 qat_req
->dst_align
= NULL
;
751 qat_req
->out
.rsa
.enc
.c
= dma_map_single(dev
, sg_virt(req
->dst
),
755 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.rsa
.enc
.c
)))
759 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
760 &qat_req
->out
.rsa
.enc
.c
,
762 if (unlikely(!qat_req
->dst_align
))
766 qat_req
->in
.rsa
.in_tab
[3] = 0;
767 qat_req
->out
.rsa
.out_tab
[1] = 0;
768 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.rsa
.enc
.m
,
769 sizeof(struct qat_rsa_input_params
),
771 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
774 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.rsa
.enc
.c
,
775 sizeof(struct qat_rsa_output_params
),
777 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
778 goto unmap_in_params
;
780 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
781 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
782 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)qat_req
;
783 msg
->input_param_count
= 3;
784 msg
->output_param_count
= 1;
786 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
787 } while (ret
== -EBUSY
&& ctr
++ < 100);
792 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
793 dma_unmap_single(dev
, qat_req
->phy_out
,
794 sizeof(struct qat_rsa_output_params
),
797 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
798 dma_unmap_single(dev
, qat_req
->phy_in
,
799 sizeof(struct qat_rsa_input_params
),
802 if (qat_req
->dst_align
)
803 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->dst_align
,
804 qat_req
->out
.rsa
.enc
.c
);
806 if (!dma_mapping_error(dev
, qat_req
->out
.rsa
.enc
.c
))
807 dma_unmap_single(dev
, qat_req
->out
.rsa
.enc
.c
,
808 ctx
->key_sz
, DMA_FROM_DEVICE
);
810 if (qat_req
->src_align
)
811 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->src_align
,
812 qat_req
->in
.rsa
.enc
.m
);
814 if (!dma_mapping_error(dev
, qat_req
->in
.rsa
.enc
.m
))
815 dma_unmap_single(dev
, qat_req
->in
.rsa
.enc
.m
,
816 ctx
->key_sz
, DMA_TO_DEVICE
);
820 static int qat_rsa_dec(struct akcipher_request
*req
)
822 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
823 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
824 struct qat_crypto_instance
*inst
= ctx
->inst
;
825 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
826 struct qat_asym_request
*qat_req
=
827 PTR_ALIGN(akcipher_request_ctx(req
), 64);
828 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
831 if (unlikely(!ctx
->n
|| !ctx
->d
))
834 if (req
->dst_len
< ctx
->key_sz
) {
835 req
->dst_len
= ctx
->key_sz
;
838 memset(msg
, '\0', sizeof(*msg
));
839 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
840 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
841 msg
->pke_hdr
.cd_pars
.func_id
= ctx
->crt_mode
?
842 qat_rsa_dec_fn_id_crt(ctx
->key_sz
) :
843 qat_rsa_dec_fn_id(ctx
->key_sz
);
844 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
847 qat_req
->cb
= qat_rsa_cb
;
848 qat_req
->ctx
.rsa
= ctx
;
849 qat_req
->areq
.rsa
= req
;
850 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
851 msg
->pke_hdr
.comn_req_flags
=
852 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
853 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
856 qat_req
->in
.rsa
.dec_crt
.p
= ctx
->dma_p
;
857 qat_req
->in
.rsa
.dec_crt
.q
= ctx
->dma_q
;
858 qat_req
->in
.rsa
.dec_crt
.dp
= ctx
->dma_dp
;
859 qat_req
->in
.rsa
.dec_crt
.dq
= ctx
->dma_dq
;
860 qat_req
->in
.rsa
.dec_crt
.qinv
= ctx
->dma_qinv
;
862 qat_req
->in
.rsa
.dec
.d
= ctx
->dma_d
;
863 qat_req
->in
.rsa
.dec
.n
= ctx
->dma_n
;
868 * src can be of any size in valid range, but HW expects it to be the
869 * same as modulo n so in case it is different we need to allocate a
870 * new buf and copy src data.
871 * In other case we just need to map the user provided buffer.
872 * Also need to make sure that it is in contiguous buffer.
874 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->key_sz
) {
875 qat_req
->src_align
= NULL
;
876 qat_req
->in
.rsa
.dec
.c
= dma_map_single(dev
, sg_virt(req
->src
),
877 req
->dst_len
, DMA_TO_DEVICE
);
878 if (unlikely(dma_mapping_error(dev
, qat_req
->in
.rsa
.dec
.c
)))
882 int shift
= ctx
->key_sz
- req
->src_len
;
884 qat_req
->src_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
885 &qat_req
->in
.rsa
.dec
.c
,
887 if (unlikely(!qat_req
->src_align
))
890 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
, req
->src
,
893 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->key_sz
) {
894 qat_req
->dst_align
= NULL
;
895 qat_req
->out
.rsa
.dec
.m
= dma_map_single(dev
, sg_virt(req
->dst
),
899 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.rsa
.dec
.m
)))
903 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
904 &qat_req
->out
.rsa
.dec
.m
,
906 if (unlikely(!qat_req
->dst_align
))
912 qat_req
->in
.rsa
.in_tab
[6] = 0;
914 qat_req
->in
.rsa
.in_tab
[3] = 0;
915 qat_req
->out
.rsa
.out_tab
[1] = 0;
916 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.rsa
.dec
.c
,
917 sizeof(struct qat_rsa_input_params
),
919 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
922 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.rsa
.dec
.m
,
923 sizeof(struct qat_rsa_output_params
),
925 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
926 goto unmap_in_params
;
928 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
929 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
930 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)qat_req
;
932 msg
->input_param_count
= 6;
934 msg
->input_param_count
= 3;
936 msg
->output_param_count
= 1;
938 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
939 } while (ret
== -EBUSY
&& ctr
++ < 100);
944 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
945 dma_unmap_single(dev
, qat_req
->phy_out
,
946 sizeof(struct qat_rsa_output_params
),
949 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
950 dma_unmap_single(dev
, qat_req
->phy_in
,
951 sizeof(struct qat_rsa_input_params
),
954 if (qat_req
->dst_align
)
955 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->dst_align
,
956 qat_req
->out
.rsa
.dec
.m
);
958 if (!dma_mapping_error(dev
, qat_req
->out
.rsa
.dec
.m
))
959 dma_unmap_single(dev
, qat_req
->out
.rsa
.dec
.m
,
960 ctx
->key_sz
, DMA_FROM_DEVICE
);
962 if (qat_req
->src_align
)
963 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->src_align
,
964 qat_req
->in
.rsa
.dec
.c
);
966 if (!dma_mapping_error(dev
, qat_req
->in
.rsa
.dec
.c
))
967 dma_unmap_single(dev
, qat_req
->in
.rsa
.dec
.c
,
968 ctx
->key_sz
, DMA_TO_DEVICE
);
972 int qat_rsa_set_n(struct qat_rsa_ctx
*ctx
, const char *value
, size_t vlen
)
974 struct qat_crypto_instance
*inst
= ctx
->inst
;
975 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
976 const char *ptr
= value
;
979 while (!*ptr
&& vlen
) {
986 /* invalid key size provided */
987 if (!qat_rsa_enc_fn_id(ctx
->key_sz
))
991 ctx
->n
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_n
, GFP_KERNEL
);
995 memcpy(ctx
->n
, ptr
, ctx
->key_sz
);
1003 int qat_rsa_set_e(struct qat_rsa_ctx
*ctx
, const char *value
, size_t vlen
)
1005 struct qat_crypto_instance
*inst
= ctx
->inst
;
1006 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
1007 const char *ptr
= value
;
1009 while (!*ptr
&& vlen
) {
1014 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
) {
1019 ctx
->e
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_e
, GFP_KERNEL
);
1023 memcpy(ctx
->e
+ (ctx
->key_sz
- vlen
), ptr
, vlen
);
1027 int qat_rsa_set_d(struct qat_rsa_ctx
*ctx
, const char *value
, size_t vlen
)
1029 struct qat_crypto_instance
*inst
= ctx
->inst
;
1030 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
1031 const char *ptr
= value
;
1034 while (!*ptr
&& vlen
) {
1040 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
1044 ctx
->d
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_d
, GFP_KERNEL
);
1048 memcpy(ctx
->d
+ (ctx
->key_sz
- vlen
), ptr
, vlen
);
1055 static void qat_rsa_drop_leading_zeros(const char **ptr
, unsigned int *len
)
1057 while (!**ptr
&& *len
) {
1063 static void qat_rsa_setkey_crt(struct qat_rsa_ctx
*ctx
, struct rsa_key
*rsa_key
)
1065 struct qat_crypto_instance
*inst
= ctx
->inst
;
1066 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
1069 unsigned int half_key_sz
= ctx
->key_sz
/ 2;
1073 len
= rsa_key
->p_sz
;
1074 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1077 ctx
->p
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_p
, GFP_KERNEL
);
1080 memcpy(ctx
->p
+ (half_key_sz
- len
), ptr
, len
);
1084 len
= rsa_key
->q_sz
;
1085 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1088 ctx
->q
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_q
, GFP_KERNEL
);
1091 memcpy(ctx
->q
+ (half_key_sz
- len
), ptr
, len
);
1095 len
= rsa_key
->dp_sz
;
1096 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1099 ctx
->dp
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_dp
,
1103 memcpy(ctx
->dp
+ (half_key_sz
- len
), ptr
, len
);
1107 len
= rsa_key
->dq_sz
;
1108 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1111 ctx
->dq
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_dq
,
1115 memcpy(ctx
->dq
+ (half_key_sz
- len
), ptr
, len
);
1118 ptr
= rsa_key
->qinv
;
1119 len
= rsa_key
->qinv_sz
;
1120 qat_rsa_drop_leading_zeros(&ptr
, &len
);
1123 ctx
->qinv
= dma_zalloc_coherent(dev
, half_key_sz
, &ctx
->dma_qinv
,
1127 memcpy(ctx
->qinv
+ (half_key_sz
- len
), ptr
, len
);
1129 ctx
->crt_mode
= true;
1133 memset(ctx
->dq
, '\0', half_key_sz
);
1134 dma_free_coherent(dev
, half_key_sz
, ctx
->dq
, ctx
->dma_dq
);
1137 memset(ctx
->dp
, '\0', half_key_sz
);
1138 dma_free_coherent(dev
, half_key_sz
, ctx
->dp
, ctx
->dma_dp
);
1141 memset(ctx
->q
, '\0', half_key_sz
);
1142 dma_free_coherent(dev
, half_key_sz
, ctx
->q
, ctx
->dma_q
);
1145 memset(ctx
->p
, '\0', half_key_sz
);
1146 dma_free_coherent(dev
, half_key_sz
, ctx
->p
, ctx
->dma_p
);
1149 ctx
->crt_mode
= false;
1152 static void qat_rsa_clear_ctx(struct device
*dev
, struct qat_rsa_ctx
*ctx
)
1154 unsigned int half_key_sz
= ctx
->key_sz
/ 2;
1156 /* Free the old key if any */
1158 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->n
, ctx
->dma_n
);
1160 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->e
, ctx
->dma_e
);
1162 memset(ctx
->d
, '\0', ctx
->key_sz
);
1163 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->d
, ctx
->dma_d
);
1166 memset(ctx
->p
, '\0', half_key_sz
);
1167 dma_free_coherent(dev
, half_key_sz
, ctx
->p
, ctx
->dma_p
);
1170 memset(ctx
->q
, '\0', half_key_sz
);
1171 dma_free_coherent(dev
, half_key_sz
, ctx
->q
, ctx
->dma_q
);
1174 memset(ctx
->dp
, '\0', half_key_sz
);
1175 dma_free_coherent(dev
, half_key_sz
, ctx
->dp
, ctx
->dma_dp
);
1178 memset(ctx
->dq
, '\0', half_key_sz
);
1179 dma_free_coherent(dev
, half_key_sz
, ctx
->dq
, ctx
->dma_dq
);
1182 memset(ctx
->qinv
, '\0', half_key_sz
);
1183 dma_free_coherent(dev
, half_key_sz
, ctx
->qinv
, ctx
->dma_qinv
);
1194 ctx
->crt_mode
= false;
1198 static int qat_rsa_setkey(struct crypto_akcipher
*tfm
, const void *key
,
1199 unsigned int keylen
, bool private)
1201 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1202 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
1203 struct rsa_key rsa_key
;
1206 qat_rsa_clear_ctx(dev
, ctx
);
1209 ret
= rsa_parse_priv_key(&rsa_key
, key
, keylen
);
1211 ret
= rsa_parse_pub_key(&rsa_key
, key
, keylen
);
1215 ret
= qat_rsa_set_n(ctx
, rsa_key
.n
, rsa_key
.n_sz
);
1218 ret
= qat_rsa_set_e(ctx
, rsa_key
.e
, rsa_key
.e_sz
);
1222 ret
= qat_rsa_set_d(ctx
, rsa_key
.d
, rsa_key
.d_sz
);
1225 qat_rsa_setkey_crt(ctx
, &rsa_key
);
1228 if (!ctx
->n
|| !ctx
->e
) {
1229 /* invalid key provided */
1233 if (private && !ctx
->d
) {
1234 /* invalid private key provided */
1241 qat_rsa_clear_ctx(dev
, ctx
);
1245 static int qat_rsa_setpubkey(struct crypto_akcipher
*tfm
, const void *key
,
1246 unsigned int keylen
)
1248 return qat_rsa_setkey(tfm
, key
, keylen
, false);
1251 static int qat_rsa_setprivkey(struct crypto_akcipher
*tfm
, const void *key
,
1252 unsigned int keylen
)
1254 return qat_rsa_setkey(tfm
, key
, keylen
, true);
1257 static unsigned int qat_rsa_max_size(struct crypto_akcipher
*tfm
)
1259 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1264 static int qat_rsa_init_tfm(struct crypto_akcipher
*tfm
)
1266 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1267 struct qat_crypto_instance
*inst
=
1268 qat_crypto_get_instance_node(get_current_node());
1278 static void qat_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
1280 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1281 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
1284 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->n
, ctx
->dma_n
);
1286 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->e
, ctx
->dma_e
);
1288 memset(ctx
->d
, '\0', ctx
->key_sz
);
1289 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->d
, ctx
->dma_d
);
1291 qat_crypto_put_instance(ctx
->inst
);
1297 static struct akcipher_alg rsa
= {
1298 .encrypt
= qat_rsa_enc
,
1299 .decrypt
= qat_rsa_dec
,
1300 .sign
= qat_rsa_dec
,
1301 .verify
= qat_rsa_enc
,
1302 .set_pub_key
= qat_rsa_setpubkey
,
1303 .set_priv_key
= qat_rsa_setprivkey
,
1304 .max_size
= qat_rsa_max_size
,
1305 .init
= qat_rsa_init_tfm
,
1306 .exit
= qat_rsa_exit_tfm
,
1307 .reqsize
= sizeof(struct qat_asym_request
) + 64,
1310 .cra_driver_name
= "qat-rsa",
1311 .cra_priority
= 1000,
1312 .cra_module
= THIS_MODULE
,
1313 .cra_ctxsize
= sizeof(struct qat_rsa_ctx
),
1317 static struct kpp_alg dh
= {
1318 .set_secret
= qat_dh_set_secret
,
1319 .generate_public_key
= qat_dh_compute_value
,
1320 .compute_shared_secret
= qat_dh_compute_value
,
1321 .max_size
= qat_dh_max_size
,
1322 .init
= qat_dh_init_tfm
,
1323 .exit
= qat_dh_exit_tfm
,
1324 .reqsize
= sizeof(struct qat_asym_request
) + 64,
1327 .cra_driver_name
= "qat-dh",
1328 .cra_priority
= 1000,
1329 .cra_module
= THIS_MODULE
,
1330 .cra_ctxsize
= sizeof(struct qat_dh_ctx
),
1334 int qat_asym_algs_register(void)
1338 mutex_lock(&algs_lock
);
1339 if (++active_devs
== 1) {
1340 rsa
.base
.cra_flags
= 0;
1341 ret
= crypto_register_akcipher(&rsa
);
1344 ret
= crypto_register_kpp(&dh
);
1347 mutex_unlock(&algs_lock
);
1351 void qat_asym_algs_unregister(void)
1353 mutex_lock(&algs_lock
);
1354 if (--active_devs
== 0) {
1355 crypto_unregister_akcipher(&rsa
);
1356 crypto_unregister_kpp(&dh
);
1358 mutex_unlock(&algs_lock
);