2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/fips.h>
54 #include <crypto/scatterwalk.h>
55 #include "qat_rsapubkey-asn1.h"
56 #include "qat_rsaprivkey-asn1.h"
57 #include "icp_qat_fw_pke.h"
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
63 static DEFINE_MUTEX(algs_lock
);
64 static unsigned int active_devs
;
66 struct qat_rsa_input_params
{
80 } __packed
__aligned(64);
82 struct qat_rsa_output_params
{
92 } __packed
__aligned(64);
102 struct qat_crypto_instance
*inst
;
103 } __packed
__aligned(64);
105 struct qat_rsa_request
{
106 struct qat_rsa_input_params in
;
107 struct qat_rsa_output_params out
;
112 struct icp_qat_fw_pke_request req
;
113 struct qat_rsa_ctx
*ctx
;
117 static void qat_rsa_cb(struct icp_qat_fw_pke_resp
*resp
)
119 struct akcipher_request
*areq
= (void *)(__force
long)resp
->opaque
;
120 struct qat_rsa_request
*req
= PTR_ALIGN(akcipher_request_ctx(areq
), 64);
121 struct device
*dev
= &GET_DEV(req
->ctx
->inst
->accel_dev
);
122 int err
= ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
123 resp
->pke_resp_hdr
.comn_resp_flags
);
125 err
= (err
== ICP_QAT_FW_COMN_STATUS_FLAG_OK
) ? 0 : -EINVAL
;
128 dma_free_coherent(dev
, req
->ctx
->key_sz
, req
->src_align
,
131 dma_unmap_single(dev
, req
->in
.enc
.m
, req
->ctx
->key_sz
,
134 areq
->dst_len
= req
->ctx
->key_sz
;
135 if (req
->dst_align
) {
136 char *ptr
= req
->dst_align
;
138 while (!(*ptr
) && areq
->dst_len
) {
143 if (areq
->dst_len
!= req
->ctx
->key_sz
)
144 memmove(req
->dst_align
, ptr
, areq
->dst_len
);
146 scatterwalk_map_and_copy(req
->dst_align
, areq
->dst
, 0,
149 dma_free_coherent(dev
, req
->ctx
->key_sz
, req
->dst_align
,
152 char *ptr
= sg_virt(areq
->dst
);
154 while (!(*ptr
) && areq
->dst_len
) {
159 if (sg_virt(areq
->dst
) != ptr
&& areq
->dst_len
)
160 memmove(sg_virt(areq
->dst
), ptr
, areq
->dst_len
);
162 dma_unmap_single(dev
, req
->out
.enc
.c
, req
->ctx
->key_sz
,
166 dma_unmap_single(dev
, req
->phy_in
, sizeof(struct qat_rsa_input_params
),
168 dma_unmap_single(dev
, req
->phy_out
,
169 sizeof(struct qat_rsa_output_params
),
172 akcipher_request_complete(areq
, err
);
175 void qat_alg_asym_callback(void *_resp
)
177 struct icp_qat_fw_pke_resp
*resp
= _resp
;
182 #define PKE_RSA_EP_512 0x1c161b21
183 #define PKE_RSA_EP_1024 0x35111bf7
184 #define PKE_RSA_EP_1536 0x4d111cdc
185 #define PKE_RSA_EP_2048 0x6e111dba
186 #define PKE_RSA_EP_3072 0x7d111ea3
187 #define PKE_RSA_EP_4096 0xa5101f7e
189 static unsigned long qat_rsa_enc_fn_id(unsigned int len
)
191 unsigned int bitslen
= len
<< 3;
195 return PKE_RSA_EP_512
;
197 return PKE_RSA_EP_1024
;
199 return PKE_RSA_EP_1536
;
201 return PKE_RSA_EP_2048
;
203 return PKE_RSA_EP_3072
;
205 return PKE_RSA_EP_4096
;
211 #define PKE_RSA_DP1_512 0x1c161b3c
212 #define PKE_RSA_DP1_1024 0x35111c12
213 #define PKE_RSA_DP1_1536 0x4d111cf7
214 #define PKE_RSA_DP1_2048 0x6e111dda
215 #define PKE_RSA_DP1_3072 0x7d111ebe
216 #define PKE_RSA_DP1_4096 0xa5101f98
218 static unsigned long qat_rsa_dec_fn_id(unsigned int len
)
220 unsigned int bitslen
= len
<< 3;
224 return PKE_RSA_DP1_512
;
226 return PKE_RSA_DP1_1024
;
228 return PKE_RSA_DP1_1536
;
230 return PKE_RSA_DP1_2048
;
232 return PKE_RSA_DP1_3072
;
234 return PKE_RSA_DP1_4096
;
240 static int qat_rsa_enc(struct akcipher_request
*req
)
242 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
243 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
244 struct qat_crypto_instance
*inst
= ctx
->inst
;
245 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
246 struct qat_rsa_request
*qat_req
=
247 PTR_ALIGN(akcipher_request_ctx(req
), 64);
248 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
251 if (unlikely(!ctx
->n
|| !ctx
->e
))
254 if (req
->dst_len
< ctx
->key_sz
) {
255 req
->dst_len
= ctx
->key_sz
;
258 memset(msg
, '\0', sizeof(*msg
));
259 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
260 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
261 msg
->pke_hdr
.cd_pars
.func_id
= qat_rsa_enc_fn_id(ctx
->key_sz
);
262 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
266 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
267 msg
->pke_hdr
.comn_req_flags
=
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
269 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
271 qat_req
->in
.enc
.e
= ctx
->dma_e
;
272 qat_req
->in
.enc
.n
= ctx
->dma_n
;
276 * src can be of any size in valid range, but HW expects it to be the
277 * same as modulo n so in case it is different we need to allocate a
278 * new buf and copy src data.
279 * In other case we just need to map the user provided buffer.
280 * Also need to make sure that it is in contiguous buffer.
282 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->key_sz
) {
283 qat_req
->src_align
= NULL
;
284 qat_req
->in
.enc
.m
= dma_map_single(dev
, sg_virt(req
->src
),
285 req
->src_len
, DMA_TO_DEVICE
);
286 if (unlikely(dma_mapping_error(dev
, qat_req
->in
.enc
.m
)))
290 int shift
= ctx
->key_sz
- req
->src_len
;
292 qat_req
->src_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
295 if (unlikely(!qat_req
->src_align
))
298 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
, req
->src
,
301 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->key_sz
) {
302 qat_req
->dst_align
= NULL
;
303 qat_req
->out
.enc
.c
= dma_map_single(dev
, sg_virt(req
->dst
),
307 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.enc
.c
)))
311 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
314 if (unlikely(!qat_req
->dst_align
))
318 qat_req
->in
.in_tab
[3] = 0;
319 qat_req
->out
.out_tab
[1] = 0;
320 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.enc
.m
,
321 sizeof(struct qat_rsa_input_params
),
323 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
326 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.enc
.c
,
327 sizeof(struct qat_rsa_output_params
),
329 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
330 goto unmap_in_params
;
332 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
333 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
334 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)req
;
335 msg
->input_param_count
= 3;
336 msg
->output_param_count
= 1;
338 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
339 } while (ret
== -EBUSY
&& ctr
++ < 100);
344 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
345 dma_unmap_single(dev
, qat_req
->phy_out
,
346 sizeof(struct qat_rsa_output_params
),
349 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
350 dma_unmap_single(dev
, qat_req
->phy_in
,
351 sizeof(struct qat_rsa_input_params
),
354 if (qat_req
->dst_align
)
355 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->dst_align
,
358 if (!dma_mapping_error(dev
, qat_req
->out
.enc
.c
))
359 dma_unmap_single(dev
, qat_req
->out
.enc
.c
, ctx
->key_sz
,
362 if (qat_req
->src_align
)
363 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->src_align
,
366 if (!dma_mapping_error(dev
, qat_req
->in
.enc
.m
))
367 dma_unmap_single(dev
, qat_req
->in
.enc
.m
, ctx
->key_sz
,
372 static int qat_rsa_dec(struct akcipher_request
*req
)
374 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
375 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
376 struct qat_crypto_instance
*inst
= ctx
->inst
;
377 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
378 struct qat_rsa_request
*qat_req
=
379 PTR_ALIGN(akcipher_request_ctx(req
), 64);
380 struct icp_qat_fw_pke_request
*msg
= &qat_req
->req
;
383 if (unlikely(!ctx
->n
|| !ctx
->d
))
386 if (req
->dst_len
< ctx
->key_sz
) {
387 req
->dst_len
= ctx
->key_sz
;
390 memset(msg
, '\0', sizeof(*msg
));
391 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg
->pke_hdr
,
392 ICP_QAT_FW_COMN_REQ_FLAG_SET
);
393 msg
->pke_hdr
.cd_pars
.func_id
= qat_rsa_dec_fn_id(ctx
->key_sz
);
394 if (unlikely(!msg
->pke_hdr
.cd_pars
.func_id
))
398 msg
->pke_hdr
.service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_PKE
;
399 msg
->pke_hdr
.comn_req_flags
=
400 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT
,
401 QAT_COMN_CD_FLD_TYPE_64BIT_ADR
);
403 qat_req
->in
.dec
.d
= ctx
->dma_d
;
404 qat_req
->in
.dec
.n
= ctx
->dma_n
;
408 * src can be of any size in valid range, but HW expects it to be the
409 * same as modulo n so in case it is different we need to allocate a
410 * new buf and copy src data.
411 * In other case we just need to map the user provided buffer.
412 * Also need to make sure that it is in contiguous buffer.
414 if (sg_is_last(req
->src
) && req
->src_len
== ctx
->key_sz
) {
415 qat_req
->src_align
= NULL
;
416 qat_req
->in
.dec
.c
= dma_map_single(dev
, sg_virt(req
->src
),
417 req
->dst_len
, DMA_TO_DEVICE
);
418 if (unlikely(dma_mapping_error(dev
, qat_req
->in
.dec
.c
)))
422 int shift
= ctx
->key_sz
- req
->src_len
;
424 qat_req
->src_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
427 if (unlikely(!qat_req
->src_align
))
430 scatterwalk_map_and_copy(qat_req
->src_align
+ shift
, req
->src
,
433 if (sg_is_last(req
->dst
) && req
->dst_len
== ctx
->key_sz
) {
434 qat_req
->dst_align
= NULL
;
435 qat_req
->out
.dec
.m
= dma_map_single(dev
, sg_virt(req
->dst
),
439 if (unlikely(dma_mapping_error(dev
, qat_req
->out
.dec
.m
)))
443 qat_req
->dst_align
= dma_zalloc_coherent(dev
, ctx
->key_sz
,
446 if (unlikely(!qat_req
->dst_align
))
451 qat_req
->in
.in_tab
[3] = 0;
452 qat_req
->out
.out_tab
[1] = 0;
453 qat_req
->phy_in
= dma_map_single(dev
, &qat_req
->in
.dec
.c
,
454 sizeof(struct qat_rsa_input_params
),
456 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_in
)))
459 qat_req
->phy_out
= dma_map_single(dev
, &qat_req
->out
.dec
.m
,
460 sizeof(struct qat_rsa_output_params
),
462 if (unlikely(dma_mapping_error(dev
, qat_req
->phy_out
)))
463 goto unmap_in_params
;
465 msg
->pke_mid
.src_data_addr
= qat_req
->phy_in
;
466 msg
->pke_mid
.dest_data_addr
= qat_req
->phy_out
;
467 msg
->pke_mid
.opaque
= (uint64_t)(__force
long)req
;
468 msg
->input_param_count
= 3;
469 msg
->output_param_count
= 1;
471 ret
= adf_send_message(ctx
->inst
->pke_tx
, (uint32_t *)msg
);
472 } while (ret
== -EBUSY
&& ctr
++ < 100);
477 if (!dma_mapping_error(dev
, qat_req
->phy_out
))
478 dma_unmap_single(dev
, qat_req
->phy_out
,
479 sizeof(struct qat_rsa_output_params
),
482 if (!dma_mapping_error(dev
, qat_req
->phy_in
))
483 dma_unmap_single(dev
, qat_req
->phy_in
,
484 sizeof(struct qat_rsa_input_params
),
487 if (qat_req
->dst_align
)
488 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->dst_align
,
491 if (!dma_mapping_error(dev
, qat_req
->out
.dec
.m
))
492 dma_unmap_single(dev
, qat_req
->out
.dec
.m
, ctx
->key_sz
,
495 if (qat_req
->src_align
)
496 dma_free_coherent(dev
, ctx
->key_sz
, qat_req
->src_align
,
499 if (!dma_mapping_error(dev
, qat_req
->in
.dec
.c
))
500 dma_unmap_single(dev
, qat_req
->in
.dec
.c
, ctx
->key_sz
,
505 int qat_rsa_get_n(void *context
, size_t hdrlen
, unsigned char tag
,
506 const void *value
, size_t vlen
)
508 struct qat_rsa_ctx
*ctx
= context
;
509 struct qat_crypto_instance
*inst
= ctx
->inst
;
510 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
511 const char *ptr
= value
;
514 while (!*ptr
&& vlen
) {
521 /* In FIPS mode only allow key size 2K & 3K */
522 if (fips_enabled
&& (ctx
->key_sz
!= 256 && ctx
->key_sz
!= 384)) {
523 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
526 /* invalid key size provided */
527 if (!qat_rsa_enc_fn_id(ctx
->key_sz
))
531 ctx
->n
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_n
, GFP_KERNEL
);
535 memcpy(ctx
->n
, ptr
, ctx
->key_sz
);
543 int qat_rsa_get_e(void *context
, size_t hdrlen
, unsigned char tag
,
544 const void *value
, size_t vlen
)
546 struct qat_rsa_ctx
*ctx
= context
;
547 struct qat_crypto_instance
*inst
= ctx
->inst
;
548 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
549 const char *ptr
= value
;
551 while (!*ptr
&& vlen
) {
556 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
) {
561 ctx
->e
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_e
, GFP_KERNEL
);
566 memcpy(ctx
->e
+ (ctx
->key_sz
- vlen
), ptr
, vlen
);
570 int qat_rsa_get_d(void *context
, size_t hdrlen
, unsigned char tag
,
571 const void *value
, size_t vlen
)
573 struct qat_rsa_ctx
*ctx
= context
;
574 struct qat_crypto_instance
*inst
= ctx
->inst
;
575 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
576 const char *ptr
= value
;
579 while (!*ptr
&& vlen
) {
585 if (!ctx
->key_sz
|| !vlen
|| vlen
> ctx
->key_sz
)
588 /* In FIPS mode only allow key size 2K & 3K */
589 if (fips_enabled
&& (vlen
!= 256 && vlen
!= 384)) {
590 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
595 ctx
->d
= dma_zalloc_coherent(dev
, ctx
->key_sz
, &ctx
->dma_d
, GFP_KERNEL
);
599 memcpy(ctx
->d
+ (ctx
->key_sz
- vlen
), ptr
, vlen
);
606 static int qat_rsa_setkey(struct crypto_akcipher
*tfm
, const void *key
,
607 unsigned int keylen
, bool private)
609 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
610 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
613 /* Free the old key if any */
615 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->n
, ctx
->dma_n
);
617 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->e
, ctx
->dma_e
);
619 memset(ctx
->d
, '\0', ctx
->key_sz
);
620 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->d
, ctx
->dma_d
);
628 ret
= asn1_ber_decoder(&qat_rsaprivkey_decoder
, ctx
, key
,
631 ret
= asn1_ber_decoder(&qat_rsapubkey_decoder
, ctx
, key
,
636 if (!ctx
->n
|| !ctx
->e
) {
637 /* invalid key provided */
641 if (private && !ctx
->d
) {
642 /* invalid private key provided */
650 memset(ctx
->d
, '\0', ctx
->key_sz
);
651 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->d
, ctx
->dma_d
);
655 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->e
, ctx
->dma_e
);
659 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->n
, ctx
->dma_n
);
666 static int qat_rsa_setpubkey(struct crypto_akcipher
*tfm
, const void *key
,
669 return qat_rsa_setkey(tfm
, key
, keylen
, false);
672 static int qat_rsa_setprivkey(struct crypto_akcipher
*tfm
, const void *key
,
675 return qat_rsa_setkey(tfm
, key
, keylen
, true);
678 static int qat_rsa_max_size(struct crypto_akcipher
*tfm
)
680 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
682 return (ctx
->n
) ? ctx
->key_sz
: -EINVAL
;
685 static int qat_rsa_init_tfm(struct crypto_akcipher
*tfm
)
687 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
688 struct qat_crypto_instance
*inst
=
689 qat_crypto_get_instance_node(get_current_node());
699 static void qat_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
701 struct qat_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
702 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
705 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->n
, ctx
->dma_n
);
707 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->e
, ctx
->dma_e
);
709 memset(ctx
->d
, '\0', ctx
->key_sz
);
710 dma_free_coherent(dev
, ctx
->key_sz
, ctx
->d
, ctx
->dma_d
);
712 qat_crypto_put_instance(ctx
->inst
);
718 static struct akcipher_alg rsa
= {
719 .encrypt
= qat_rsa_enc
,
720 .decrypt
= qat_rsa_dec
,
722 .verify
= qat_rsa_enc
,
723 .set_pub_key
= qat_rsa_setpubkey
,
724 .set_priv_key
= qat_rsa_setprivkey
,
725 .max_size
= qat_rsa_max_size
,
726 .init
= qat_rsa_init_tfm
,
727 .exit
= qat_rsa_exit_tfm
,
728 .reqsize
= sizeof(struct qat_rsa_request
) + 64,
731 .cra_driver_name
= "qat-rsa",
732 .cra_priority
= 1000,
733 .cra_module
= THIS_MODULE
,
734 .cra_ctxsize
= sizeof(struct qat_rsa_ctx
),
738 int qat_asym_algs_register(void)
742 mutex_lock(&algs_lock
);
743 if (++active_devs
== 1) {
744 rsa
.base
.cra_flags
= 0;
745 ret
= crypto_register_akcipher(&rsa
);
747 mutex_unlock(&algs_lock
);
751 void qat_asym_algs_unregister(void)
753 mutex_lock(&algs_lock
);
754 if (--active_devs
== 0)
755 crypto_unregister_akcipher(&rsa
);
756 mutex_unlock(&algs_lock
);