1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
19 #include <crypto/internal/engine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
26 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
27 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
28 SIZEOF_RSA_PRIV_F1_PDB)
29 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
30 SIZEOF_RSA_PRIV_F2_PDB)
31 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
32 SIZEOF_RSA_PRIV_F3_PDB)
33 #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
35 /* buffer filled with zeros, used for padding */
36 static u8
*zero_buffer
;
39 * variable used to avoid double free of resources in case
40 * algorithm registration was unsuccessful
42 static bool init_done
;
44 struct caam_akcipher_alg
{
45 struct akcipher_engine_alg akcipher
;
49 static void rsa_io_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
50 struct akcipher_request
*req
)
52 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
54 dma_unmap_sg(dev
, req
->dst
, edesc
->dst_nents
, DMA_FROM_DEVICE
);
55 dma_unmap_sg(dev
, req_ctx
->fixup_src
, edesc
->src_nents
, DMA_TO_DEVICE
);
57 if (edesc
->sec4_sg_bytes
)
58 dma_unmap_single(dev
, edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
,
62 static void rsa_pub_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
63 struct akcipher_request
*req
)
65 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
66 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
67 struct caam_rsa_key
*key
= &ctx
->key
;
68 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
70 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
71 dma_unmap_single(dev
, pdb
->e_dma
, key
->e_sz
, DMA_TO_DEVICE
);
74 static void rsa_priv_f1_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
75 struct akcipher_request
*req
)
77 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
78 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
79 struct caam_rsa_key
*key
= &ctx
->key
;
80 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
82 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
83 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
86 static void rsa_priv_f2_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
87 struct akcipher_request
*req
)
89 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
90 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
91 struct caam_rsa_key
*key
= &ctx
->key
;
92 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
93 size_t p_sz
= key
->p_sz
;
94 size_t q_sz
= key
->q_sz
;
96 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
97 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
98 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
99 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
100 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
103 static void rsa_priv_f3_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
104 struct akcipher_request
*req
)
106 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
107 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
108 struct caam_rsa_key
*key
= &ctx
->key
;
109 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
110 size_t p_sz
= key
->p_sz
;
111 size_t q_sz
= key
->q_sz
;
113 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
114 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
115 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
116 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
117 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
118 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
119 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
122 /* RSA Job Completion handler */
123 static void rsa_pub_done(struct device
*dev
, u32
*desc
, u32 err
, void *context
)
125 struct akcipher_request
*req
= context
;
126 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
127 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
128 struct rsa_edesc
*edesc
;
133 ecode
= caam_jr_strstatus(dev
, err
);
135 edesc
= req_ctx
->edesc
;
136 has_bklog
= edesc
->bklog
;
138 rsa_pub_unmap(dev
, edesc
, req
);
139 rsa_io_unmap(dev
, edesc
, req
);
143 * If no backlog flag, the completion of the request is done
144 * by CAAM, not crypto engine.
147 akcipher_request_complete(req
, ecode
);
149 crypto_finalize_akcipher_request(jrp
->engine
, req
, ecode
);
152 static void rsa_priv_f_done(struct device
*dev
, u32
*desc
, u32 err
,
155 struct akcipher_request
*req
= context
;
156 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
157 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
158 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
159 struct caam_rsa_key
*key
= &ctx
->key
;
160 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
161 struct rsa_edesc
*edesc
;
166 ecode
= caam_jr_strstatus(dev
, err
);
168 edesc
= req_ctx
->edesc
;
169 has_bklog
= edesc
->bklog
;
171 switch (key
->priv_form
) {
173 rsa_priv_f1_unmap(dev
, edesc
, req
);
176 rsa_priv_f2_unmap(dev
, edesc
, req
);
179 rsa_priv_f3_unmap(dev
, edesc
, req
);
182 rsa_io_unmap(dev
, edesc
, req
);
186 * If no backlog flag, the completion of the request is done
187 * by CAAM, not crypto engine.
190 akcipher_request_complete(req
, ecode
);
192 crypto_finalize_akcipher_request(jrp
->engine
, req
, ecode
);
196 * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
197 * from a given scatterlist
199 * @sgl : scatterlist to count zeros from
200 * @nbytes: number of zeros, in bytes, to strip
201 * @flags : operation flags
203 static int caam_rsa_count_leading_zeros(struct scatterlist
*sgl
,
207 struct sg_mapping_iter miter
;
210 unsigned int tbytes
= nbytes
;
213 ents
= sg_nents_for_len(sgl
, nbytes
);
217 sg_miter_start(&miter
, sgl
, ents
, SG_MITER_FROM_SG
| flags
);
222 /* do not strip more than given bytes */
223 while (len
&& !*buff
&& lzeros
< nbytes
) {
232 if (!sg_miter_next(&miter
))
242 miter
.consumed
= lzeros
;
243 sg_miter_stop(&miter
);
246 return tbytes
- nbytes
;
249 static struct rsa_edesc
*rsa_edesc_alloc(struct akcipher_request
*req
,
252 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
253 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
254 struct device
*dev
= ctx
->dev
;
255 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
256 struct caam_rsa_key
*key
= &ctx
->key
;
257 struct rsa_edesc
*edesc
;
258 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
259 GFP_KERNEL
: GFP_ATOMIC
;
260 int sg_flags
= (flags
== GFP_ATOMIC
) ? SG_MITER_ATOMIC
: 0;
261 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
262 int src_nents
, dst_nents
;
263 int mapped_src_nents
, mapped_dst_nents
;
264 unsigned int diff_size
= 0;
267 if (req
->src_len
> key
->n_sz
) {
269 * strip leading zeros and
270 * return the number of zeros to skip
272 lzeros
= caam_rsa_count_leading_zeros(req
->src
, req
->src_len
-
273 key
->n_sz
, sg_flags
);
275 return ERR_PTR(lzeros
);
277 req_ctx
->fixup_src
= scatterwalk_ffwd(req_ctx
->src
, req
->src
,
279 req_ctx
->fixup_src_len
= req
->src_len
- lzeros
;
282 * input src is less then n key modulus,
283 * so there will be zero padding
285 diff_size
= key
->n_sz
- req
->src_len
;
286 req_ctx
->fixup_src
= req
->src
;
287 req_ctx
->fixup_src_len
= req
->src_len
;
290 src_nents
= sg_nents_for_len(req_ctx
->fixup_src
,
291 req_ctx
->fixup_src_len
);
292 dst_nents
= sg_nents_for_len(req
->dst
, req
->dst_len
);
294 mapped_src_nents
= dma_map_sg(dev
, req_ctx
->fixup_src
, src_nents
,
296 if (unlikely(!mapped_src_nents
)) {
297 dev_err(dev
, "unable to map source\n");
298 return ERR_PTR(-ENOMEM
);
300 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
302 if (unlikely(!mapped_dst_nents
)) {
303 dev_err(dev
, "unable to map destination\n");
307 if (!diff_size
&& mapped_src_nents
== 1)
308 sec4_sg_len
= 0; /* no need for an input hw s/g table */
310 sec4_sg_len
= mapped_src_nents
+ !!diff_size
;
311 sec4_sg_index
= sec4_sg_len
;
313 if (mapped_dst_nents
> 1)
314 sec4_sg_len
+= pad_sg_nents(mapped_dst_nents
);
316 sec4_sg_len
= pad_sg_nents(sec4_sg_len
);
318 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
320 /* allocate space for base edesc, hw desc commands and link tables */
321 edesc
= kzalloc(sizeof(*edesc
) + desclen
+ sec4_sg_bytes
, flags
);
325 edesc
->sec4_sg
= (void *)edesc
+ sizeof(*edesc
) + desclen
;
327 dma_to_sec4_sg_one(edesc
->sec4_sg
, ctx
->padding_dma
, diff_size
,
331 sg_to_sec4_sg_last(req_ctx
->fixup_src
, req_ctx
->fixup_src_len
,
332 edesc
->sec4_sg
+ !!diff_size
, 0);
334 if (mapped_dst_nents
> 1)
335 sg_to_sec4_sg_last(req
->dst
, req
->dst_len
,
336 edesc
->sec4_sg
+ sec4_sg_index
, 0);
338 /* Save nents for later use in Job Descriptor */
339 edesc
->src_nents
= src_nents
;
340 edesc
->dst_nents
= dst_nents
;
342 req_ctx
->edesc
= edesc
;
347 edesc
->mapped_src_nents
= mapped_src_nents
;
348 edesc
->mapped_dst_nents
= mapped_dst_nents
;
350 edesc
->sec4_sg_dma
= dma_map_single(dev
, edesc
->sec4_sg
,
351 sec4_sg_bytes
, DMA_TO_DEVICE
);
352 if (dma_mapping_error(dev
, edesc
->sec4_sg_dma
)) {
353 dev_err(dev
, "unable to map S/G table\n");
357 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
359 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__
) ": ",
360 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->sec4_sg
,
361 edesc
->sec4_sg_bytes
, 1);
368 dma_unmap_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
370 dma_unmap_sg(dev
, req_ctx
->fixup_src
, src_nents
, DMA_TO_DEVICE
);
371 return ERR_PTR(-ENOMEM
);
374 static int akcipher_do_one_req(struct crypto_engine
*engine
, void *areq
)
376 struct akcipher_request
*req
= container_of(areq
,
377 struct akcipher_request
,
379 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
380 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
381 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
382 struct device
*jrdev
= ctx
->dev
;
383 u32
*desc
= req_ctx
->edesc
->hw_desc
;
386 req_ctx
->edesc
->bklog
= true;
388 ret
= caam_jr_enqueue(jrdev
, desc
, req_ctx
->akcipher_op_done
, req
);
390 if (ret
== -ENOSPC
&& engine
->retry_support
)
393 if (ret
!= -EINPROGRESS
) {
394 rsa_pub_unmap(jrdev
, req_ctx
->edesc
, req
);
395 rsa_io_unmap(jrdev
, req_ctx
->edesc
, req
);
396 kfree(req_ctx
->edesc
);
404 static int set_rsa_pub_pdb(struct akcipher_request
*req
,
405 struct rsa_edesc
*edesc
)
407 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
408 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
409 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
410 struct caam_rsa_key
*key
= &ctx
->key
;
411 struct device
*dev
= ctx
->dev
;
412 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
413 int sec4_sg_index
= 0;
415 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
416 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
417 dev_err(dev
, "Unable to map RSA modulus memory\n");
421 pdb
->e_dma
= dma_map_single(dev
, key
->e
, key
->e_sz
, DMA_TO_DEVICE
);
422 if (dma_mapping_error(dev
, pdb
->e_dma
)) {
423 dev_err(dev
, "Unable to map RSA public exponent memory\n");
424 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
428 if (edesc
->mapped_src_nents
> 1) {
429 pdb
->sgf
|= RSA_PDB_SGF_F
;
430 pdb
->f_dma
= edesc
->sec4_sg_dma
;
431 sec4_sg_index
+= edesc
->mapped_src_nents
;
433 pdb
->f_dma
= sg_dma_address(req_ctx
->fixup_src
);
436 if (edesc
->mapped_dst_nents
> 1) {
437 pdb
->sgf
|= RSA_PDB_SGF_G
;
438 pdb
->g_dma
= edesc
->sec4_sg_dma
+
439 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
441 pdb
->g_dma
= sg_dma_address(req
->dst
);
444 pdb
->sgf
|= (key
->e_sz
<< RSA_PDB_E_SHIFT
) | key
->n_sz
;
445 pdb
->f_len
= req_ctx
->fixup_src_len
;
450 static int set_rsa_priv_f1_pdb(struct akcipher_request
*req
,
451 struct rsa_edesc
*edesc
)
453 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
454 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
455 struct caam_rsa_key
*key
= &ctx
->key
;
456 struct device
*dev
= ctx
->dev
;
457 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
458 int sec4_sg_index
= 0;
460 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
461 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
462 dev_err(dev
, "Unable to map modulus memory\n");
466 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
467 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
468 dev_err(dev
, "Unable to map RSA private exponent memory\n");
469 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
473 if (edesc
->mapped_src_nents
> 1) {
474 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
475 pdb
->g_dma
= edesc
->sec4_sg_dma
;
476 sec4_sg_index
+= edesc
->mapped_src_nents
;
479 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
481 pdb
->g_dma
= sg_dma_address(req_ctx
->fixup_src
);
484 if (edesc
->mapped_dst_nents
> 1) {
485 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
486 pdb
->f_dma
= edesc
->sec4_sg_dma
+
487 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
489 pdb
->f_dma
= sg_dma_address(req
->dst
);
492 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
497 static int set_rsa_priv_f2_pdb(struct akcipher_request
*req
,
498 struct rsa_edesc
*edesc
)
500 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
501 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
502 struct caam_rsa_key
*key
= &ctx
->key
;
503 struct device
*dev
= ctx
->dev
;
504 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
505 int sec4_sg_index
= 0;
506 size_t p_sz
= key
->p_sz
;
507 size_t q_sz
= key
->q_sz
;
509 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
510 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
511 dev_err(dev
, "Unable to map RSA private exponent memory\n");
515 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
516 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
517 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
521 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
522 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
523 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
527 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
528 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
529 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
533 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
534 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
535 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
539 if (edesc
->mapped_src_nents
> 1) {
540 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
541 pdb
->g_dma
= edesc
->sec4_sg_dma
;
542 sec4_sg_index
+= edesc
->mapped_src_nents
;
544 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
546 pdb
->g_dma
= sg_dma_address(req_ctx
->fixup_src
);
549 if (edesc
->mapped_dst_nents
> 1) {
550 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
551 pdb
->f_dma
= edesc
->sec4_sg_dma
+
552 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
554 pdb
->f_dma
= sg_dma_address(req
->dst
);
557 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
558 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
563 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
565 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
567 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
569 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
574 static int set_rsa_priv_f3_pdb(struct akcipher_request
*req
,
575 struct rsa_edesc
*edesc
)
577 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
578 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
579 struct caam_rsa_key
*key
= &ctx
->key
;
580 struct device
*dev
= ctx
->dev
;
581 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
582 int sec4_sg_index
= 0;
583 size_t p_sz
= key
->p_sz
;
584 size_t q_sz
= key
->q_sz
;
586 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
587 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
588 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
592 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
593 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
594 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
598 pdb
->dp_dma
= dma_map_single(dev
, key
->dp
, p_sz
, DMA_TO_DEVICE
);
599 if (dma_mapping_error(dev
, pdb
->dp_dma
)) {
600 dev_err(dev
, "Unable to map RSA exponent dp memory\n");
604 pdb
->dq_dma
= dma_map_single(dev
, key
->dq
, q_sz
, DMA_TO_DEVICE
);
605 if (dma_mapping_error(dev
, pdb
->dq_dma
)) {
606 dev_err(dev
, "Unable to map RSA exponent dq memory\n");
610 pdb
->c_dma
= dma_map_single(dev
, key
->qinv
, p_sz
, DMA_TO_DEVICE
);
611 if (dma_mapping_error(dev
, pdb
->c_dma
)) {
612 dev_err(dev
, "Unable to map RSA CRT coefficient qinv memory\n");
616 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
617 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
618 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
622 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
623 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
624 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
628 if (edesc
->mapped_src_nents
> 1) {
629 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
630 pdb
->g_dma
= edesc
->sec4_sg_dma
;
631 sec4_sg_index
+= edesc
->mapped_src_nents
;
633 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
635 pdb
->g_dma
= sg_dma_address(req_ctx
->fixup_src
);
638 if (edesc
->mapped_dst_nents
> 1) {
639 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
640 pdb
->f_dma
= edesc
->sec4_sg_dma
+
641 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
643 pdb
->f_dma
= sg_dma_address(req
->dst
);
646 pdb
->sgf
|= key
->n_sz
;
647 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
652 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
654 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
656 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
658 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
660 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
662 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
667 static int akcipher_enqueue_req(struct device
*jrdev
,
668 void (*cbk
)(struct device
*jrdev
, u32
*desc
,
669 u32 err
, void *context
),
670 struct akcipher_request
*req
)
672 struct caam_drv_private_jr
*jrpriv
= dev_get_drvdata(jrdev
);
673 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
674 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
675 struct caam_rsa_key
*key
= &ctx
->key
;
676 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
677 struct rsa_edesc
*edesc
= req_ctx
->edesc
;
678 u32
*desc
= edesc
->hw_desc
;
681 req_ctx
->akcipher_op_done
= cbk
;
683 * Only the backlog request are sent to crypto-engine since the others
684 * can be handled by CAAM, if free, especially since JR has up to 1024
685 * entries (more than the 10 entries from crypto-engine).
687 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)
688 ret
= crypto_transfer_akcipher_request_to_engine(jrpriv
->engine
,
691 ret
= caam_jr_enqueue(jrdev
, desc
, cbk
, req
);
693 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
)) {
694 switch (key
->priv_form
) {
696 rsa_priv_f1_unmap(jrdev
, edesc
, req
);
699 rsa_priv_f2_unmap(jrdev
, edesc
, req
);
702 rsa_priv_f3_unmap(jrdev
, edesc
, req
);
705 rsa_pub_unmap(jrdev
, edesc
, req
);
707 rsa_io_unmap(jrdev
, edesc
, req
);
714 static int caam_rsa_enc(struct akcipher_request
*req
)
716 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
717 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
718 struct caam_rsa_key
*key
= &ctx
->key
;
719 struct device
*jrdev
= ctx
->dev
;
720 struct rsa_edesc
*edesc
;
723 if (unlikely(!key
->n
|| !key
->e
))
726 if (req
->dst_len
< key
->n_sz
) {
727 req
->dst_len
= key
->n_sz
;
728 dev_err(jrdev
, "Output buffer length less than parameter n\n");
732 /* Allocate extended descriptor */
733 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PUB_LEN
);
735 return PTR_ERR(edesc
);
737 /* Set RSA Encrypt Protocol Data Block */
738 ret
= set_rsa_pub_pdb(req
, edesc
);
742 /* Initialize Job Descriptor */
743 init_rsa_pub_desc(edesc
->hw_desc
, &edesc
->pdb
.pub
);
745 return akcipher_enqueue_req(jrdev
, rsa_pub_done
, req
);
748 rsa_io_unmap(jrdev
, edesc
, req
);
753 static int caam_rsa_dec_priv_f1(struct akcipher_request
*req
)
755 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
756 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
757 struct device
*jrdev
= ctx
->dev
;
758 struct rsa_edesc
*edesc
;
761 /* Allocate extended descriptor */
762 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F1_LEN
);
764 return PTR_ERR(edesc
);
766 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
767 ret
= set_rsa_priv_f1_pdb(req
, edesc
);
771 /* Initialize Job Descriptor */
772 init_rsa_priv_f1_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f1
);
774 return akcipher_enqueue_req(jrdev
, rsa_priv_f_done
, req
);
777 rsa_io_unmap(jrdev
, edesc
, req
);
782 static int caam_rsa_dec_priv_f2(struct akcipher_request
*req
)
784 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
785 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
786 struct device
*jrdev
= ctx
->dev
;
787 struct rsa_edesc
*edesc
;
790 /* Allocate extended descriptor */
791 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F2_LEN
);
793 return PTR_ERR(edesc
);
795 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
796 ret
= set_rsa_priv_f2_pdb(req
, edesc
);
800 /* Initialize Job Descriptor */
801 init_rsa_priv_f2_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f2
);
803 return akcipher_enqueue_req(jrdev
, rsa_priv_f_done
, req
);
806 rsa_io_unmap(jrdev
, edesc
, req
);
811 static int caam_rsa_dec_priv_f3(struct akcipher_request
*req
)
813 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
814 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
815 struct device
*jrdev
= ctx
->dev
;
816 struct rsa_edesc
*edesc
;
819 /* Allocate extended descriptor */
820 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F3_LEN
);
822 return PTR_ERR(edesc
);
824 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
825 ret
= set_rsa_priv_f3_pdb(req
, edesc
);
829 /* Initialize Job Descriptor */
830 init_rsa_priv_f3_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f3
);
832 return akcipher_enqueue_req(jrdev
, rsa_priv_f_done
, req
);
835 rsa_io_unmap(jrdev
, edesc
, req
);
840 static int caam_rsa_dec(struct akcipher_request
*req
)
842 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
843 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
844 struct caam_rsa_key
*key
= &ctx
->key
;
847 if (unlikely(!key
->n
|| !key
->d
))
850 if (req
->dst_len
< key
->n_sz
) {
851 req
->dst_len
= key
->n_sz
;
852 dev_err(ctx
->dev
, "Output buffer length less than parameter n\n");
856 if (key
->priv_form
== FORM3
)
857 ret
= caam_rsa_dec_priv_f3(req
);
858 else if (key
->priv_form
== FORM2
)
859 ret
= caam_rsa_dec_priv_f2(req
);
861 ret
= caam_rsa_dec_priv_f1(req
);
866 static void caam_rsa_free_key(struct caam_rsa_key
*key
)
868 kfree_sensitive(key
->d
);
869 kfree_sensitive(key
->p
);
870 kfree_sensitive(key
->q
);
871 kfree_sensitive(key
->dp
);
872 kfree_sensitive(key
->dq
);
873 kfree_sensitive(key
->qinv
);
874 kfree_sensitive(key
->tmp1
);
875 kfree_sensitive(key
->tmp2
);
878 memset(key
, 0, sizeof(*key
));
881 static void caam_rsa_drop_leading_zeros(const u8
**ptr
, size_t *nbytes
)
883 while (!**ptr
&& *nbytes
) {
890 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
891 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
892 * BER-encoding requires that the minimum number of bytes be used to encode the
893 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
896 * @ptr : pointer to {dP, dQ, qInv} CRT member
897 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
898 * @dstlen: length in bytes of corresponding p or q prime factor
900 static u8
*caam_read_rsa_crt(const u8
*ptr
, size_t nbytes
, size_t dstlen
)
904 caam_rsa_drop_leading_zeros(&ptr
, &nbytes
);
908 dst
= kzalloc(dstlen
, GFP_KERNEL
);
912 memcpy(dst
+ (dstlen
- nbytes
), ptr
, nbytes
);
918 * caam_read_raw_data - Read a raw byte stream as a positive integer.
919 * The function skips buffer's leading zeros, copies the remained data
920 * to a buffer allocated in the GFP_KERNEL zone and returns
921 * the address of the new buffer.
923 * @buf : The data to read
924 * @nbytes: The amount of data to read
926 static inline u8
*caam_read_raw_data(const u8
*buf
, size_t *nbytes
)
929 caam_rsa_drop_leading_zeros(&buf
, nbytes
);
933 return kmemdup(buf
, *nbytes
, GFP_KERNEL
);
936 static int caam_rsa_check_key_length(unsigned int len
)
943 static int caam_rsa_set_pub_key(struct crypto_akcipher
*tfm
, const void *key
,
946 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
947 struct rsa_key raw_key
= {NULL
};
948 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
951 /* Free the old RSA key if any */
952 caam_rsa_free_key(rsa_key
);
954 ret
= rsa_parse_pub_key(&raw_key
, key
, keylen
);
958 /* Copy key in DMA zone */
959 rsa_key
->e
= kmemdup(raw_key
.e
, raw_key
.e_sz
, GFP_KERNEL
);
964 * Skip leading zeros and copy the positive integer to a buffer
965 * allocated in the GFP_KERNEL zone. The decryption descriptor
966 * expects a positive integer for the RSA modulus and uses its length as
967 * decryption output length.
969 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
973 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
974 caam_rsa_free_key(rsa_key
);
978 rsa_key
->e_sz
= raw_key
.e_sz
;
979 rsa_key
->n_sz
= raw_key
.n_sz
;
983 caam_rsa_free_key(rsa_key
);
987 static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx
*ctx
,
988 struct rsa_key
*raw_key
)
990 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
991 size_t p_sz
= raw_key
->p_sz
;
992 size_t q_sz
= raw_key
->q_sz
;
993 unsigned aligned_size
;
995 rsa_key
->p
= caam_read_raw_data(raw_key
->p
, &p_sz
);
998 rsa_key
->p_sz
= p_sz
;
1000 rsa_key
->q
= caam_read_raw_data(raw_key
->q
, &q_sz
);
1003 rsa_key
->q_sz
= q_sz
;
1005 aligned_size
= ALIGN(raw_key
->p_sz
, dma_get_cache_alignment());
1006 rsa_key
->tmp1
= kzalloc(aligned_size
, GFP_KERNEL
);
1010 aligned_size
= ALIGN(raw_key
->q_sz
, dma_get_cache_alignment());
1011 rsa_key
->tmp2
= kzalloc(aligned_size
, GFP_KERNEL
);
1015 rsa_key
->priv_form
= FORM2
;
1017 rsa_key
->dp
= caam_read_rsa_crt(raw_key
->dp
, raw_key
->dp_sz
, p_sz
);
1021 rsa_key
->dq
= caam_read_rsa_crt(raw_key
->dq
, raw_key
->dq_sz
, q_sz
);
1025 rsa_key
->qinv
= caam_read_rsa_crt(raw_key
->qinv
, raw_key
->qinv_sz
,
1030 rsa_key
->priv_form
= FORM3
;
1035 kfree_sensitive(rsa_key
->dq
);
1037 kfree_sensitive(rsa_key
->dp
);
1039 kfree_sensitive(rsa_key
->tmp2
);
1041 kfree_sensitive(rsa_key
->tmp1
);
1043 kfree_sensitive(rsa_key
->q
);
1045 kfree_sensitive(rsa_key
->p
);
1049 static int caam_rsa_set_priv_key(struct crypto_akcipher
*tfm
, const void *key
,
1050 unsigned int keylen
)
1052 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
1053 struct rsa_key raw_key
= {NULL
};
1054 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
1057 /* Free the old RSA key if any */
1058 caam_rsa_free_key(rsa_key
);
1060 ret
= rsa_parse_priv_key(&raw_key
, key
, keylen
);
1064 /* Copy key in DMA zone */
1065 rsa_key
->d
= kmemdup(raw_key
.d
, raw_key
.d_sz
, GFP_KERNEL
);
1069 rsa_key
->e
= kmemdup(raw_key
.e
, raw_key
.e_sz
, GFP_KERNEL
);
1074 * Skip leading zeros and copy the positive integer to a buffer
1075 * allocated in the GFP_KERNEL zone. The decryption descriptor
1076 * expects a positive integer for the RSA modulus and uses its length as
1077 * decryption output length.
1079 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
1083 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
1084 caam_rsa_free_key(rsa_key
);
1088 rsa_key
->d_sz
= raw_key
.d_sz
;
1089 rsa_key
->e_sz
= raw_key
.e_sz
;
1090 rsa_key
->n_sz
= raw_key
.n_sz
;
1092 ret
= caam_rsa_set_priv_key_form(ctx
, &raw_key
);
1099 caam_rsa_free_key(rsa_key
);
1103 static unsigned int caam_rsa_max_size(struct crypto_akcipher
*tfm
)
1105 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
1107 return ctx
->key
.n_sz
;
1110 /* Per session pkc's driver context creation function */
1111 static int caam_rsa_init_tfm(struct crypto_akcipher
*tfm
)
1113 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
1115 akcipher_set_reqsize(tfm
, sizeof(struct caam_rsa_req_ctx
));
1117 ctx
->dev
= caam_jr_alloc();
1119 if (IS_ERR(ctx
->dev
)) {
1120 pr_err("Job Ring Device allocation for transform failed\n");
1121 return PTR_ERR(ctx
->dev
);
1124 ctx
->padding_dma
= dma_map_single(ctx
->dev
, zero_buffer
,
1125 CAAM_RSA_MAX_INPUT_SIZE
- 1,
1127 if (dma_mapping_error(ctx
->dev
, ctx
->padding_dma
)) {
1128 dev_err(ctx
->dev
, "unable to map padding\n");
1129 caam_jr_free(ctx
->dev
);
1136 /* Per session pkc's driver context cleanup function */
1137 static void caam_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
1139 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx_dma(tfm
);
1140 struct caam_rsa_key
*key
= &ctx
->key
;
1142 dma_unmap_single(ctx
->dev
, ctx
->padding_dma
, CAAM_RSA_MAX_INPUT_SIZE
-
1144 caam_rsa_free_key(key
);
1145 caam_jr_free(ctx
->dev
);
1148 static struct caam_akcipher_alg caam_rsa
= {
1150 .encrypt
= caam_rsa_enc
,
1151 .decrypt
= caam_rsa_dec
,
1152 .set_pub_key
= caam_rsa_set_pub_key
,
1153 .set_priv_key
= caam_rsa_set_priv_key
,
1154 .max_size
= caam_rsa_max_size
,
1155 .init
= caam_rsa_init_tfm
,
1156 .exit
= caam_rsa_exit_tfm
,
1159 .cra_driver_name
= "rsa-caam",
1160 .cra_priority
= 3000,
1161 .cra_module
= THIS_MODULE
,
1162 .cra_ctxsize
= sizeof(struct caam_rsa_ctx
) +
1167 .do_one_request
= akcipher_do_one_req
,
1171 /* Public Key Cryptography module initialization handler */
1172 int caam_pkc_init(struct device
*ctrldev
)
1174 struct caam_drv_private
*priv
= dev_get_drvdata(ctrldev
);
1179 /* Determine public key hardware accelerator presence. */
1180 if (priv
->era
< 10) {
1181 pk_inst
= (rd_reg32(&priv
->jr
[0]->perfmon
.cha_num_ls
) &
1182 CHA_ID_LS_PK_MASK
) >> CHA_ID_LS_PK_SHIFT
;
1184 pkha
= rd_reg32(&priv
->jr
[0]->vreg
.pkha
);
1185 pk_inst
= pkha
& CHA_VER_NUM_MASK
;
1188 * Newer CAAMs support partially disabled functionality. If this is the
1189 * case, the number is non-zero, but this bit is set to indicate that
1190 * no encryption or decryption is supported. Only signing and verifying
1193 if (pkha
& CHA_VER_MISC_PKHA_NO_CRYPT
)
1197 /* Do not register algorithms if PKHA is not present. */
1201 /* allocate zero buffer, used for padding input */
1202 zero_buffer
= kzalloc(CAAM_RSA_MAX_INPUT_SIZE
- 1, GFP_KERNEL
);
1206 err
= crypto_engine_register_akcipher(&caam_rsa
.akcipher
);
1210 dev_warn(ctrldev
, "%s alg registration failed\n",
1211 caam_rsa
.akcipher
.base
.base
.cra_driver_name
);
1214 caam_rsa
.registered
= true;
1215 dev_info(ctrldev
, "caam pkc algorithms registered in /proc/crypto\n");
1221 void caam_pkc_exit(void)
1226 if (caam_rsa
.registered
)
1227 crypto_engine_unregister_akcipher(&caam_rsa
.akcipher
);