1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
20 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
29 /* buffer filled with zeros, used for padding */
30 static u8
*zero_buffer
;
33 * variable used to avoid double free of resources in case
34 * algorithm registration was unsuccessful
36 static bool init_done
;
38 struct caam_akcipher_alg
{
39 struct akcipher_alg akcipher
;
43 static void rsa_io_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
44 struct akcipher_request
*req
)
46 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
48 dma_unmap_sg(dev
, req
->dst
, edesc
->dst_nents
, DMA_FROM_DEVICE
);
49 dma_unmap_sg(dev
, req_ctx
->fixup_src
, edesc
->src_nents
, DMA_TO_DEVICE
);
51 if (edesc
->sec4_sg_bytes
)
52 dma_unmap_single(dev
, edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
,
56 static void rsa_pub_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
57 struct akcipher_request
*req
)
59 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
60 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
61 struct caam_rsa_key
*key
= &ctx
->key
;
62 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
64 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
65 dma_unmap_single(dev
, pdb
->e_dma
, key
->e_sz
, DMA_TO_DEVICE
);
68 static void rsa_priv_f1_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
69 struct akcipher_request
*req
)
71 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
72 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
73 struct caam_rsa_key
*key
= &ctx
->key
;
74 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
76 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
77 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
80 static void rsa_priv_f2_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
81 struct akcipher_request
*req
)
83 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
84 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
85 struct caam_rsa_key
*key
= &ctx
->key
;
86 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
87 size_t p_sz
= key
->p_sz
;
88 size_t q_sz
= key
->q_sz
;
90 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
91 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
92 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
93 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
94 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
97 static void rsa_priv_f3_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
98 struct akcipher_request
*req
)
100 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
101 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
102 struct caam_rsa_key
*key
= &ctx
->key
;
103 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
104 size_t p_sz
= key
->p_sz
;
105 size_t q_sz
= key
->q_sz
;
107 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
108 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
109 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
110 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
111 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
112 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
113 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
116 /* RSA Job Completion handler */
117 static void rsa_pub_done(struct device
*dev
, u32
*desc
, u32 err
, void *context
)
119 struct akcipher_request
*req
= context
;
120 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
121 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
122 struct rsa_edesc
*edesc
;
127 ecode
= caam_jr_strstatus(dev
, err
);
129 edesc
= req_ctx
->edesc
;
130 has_bklog
= edesc
->bklog
;
132 rsa_pub_unmap(dev
, edesc
, req
);
133 rsa_io_unmap(dev
, edesc
, req
);
137 * If no backlog flag, the completion of the request is done
138 * by CAAM, not crypto engine.
141 akcipher_request_complete(req
, ecode
);
143 crypto_finalize_akcipher_request(jrp
->engine
, req
, ecode
);
146 static void rsa_priv_f_done(struct device
*dev
, u32
*desc
, u32 err
,
149 struct akcipher_request
*req
= context
;
150 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
151 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
152 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
153 struct caam_rsa_key
*key
= &ctx
->key
;
154 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
155 struct rsa_edesc
*edesc
;
160 ecode
= caam_jr_strstatus(dev
, err
);
162 edesc
= req_ctx
->edesc
;
163 has_bklog
= edesc
->bklog
;
165 switch (key
->priv_form
) {
167 rsa_priv_f1_unmap(dev
, edesc
, req
);
170 rsa_priv_f2_unmap(dev
, edesc
, req
);
173 rsa_priv_f3_unmap(dev
, edesc
, req
);
176 rsa_io_unmap(dev
, edesc
, req
);
180 * If no backlog flag, the completion of the request is done
181 * by CAAM, not crypto engine.
184 akcipher_request_complete(req
, ecode
);
186 crypto_finalize_akcipher_request(jrp
->engine
, req
, ecode
);
190 * Count leading zeros, need it to strip, from a given scatterlist
192 * @sgl : scatterlist to count zeros from
193 * @nbytes: number of zeros, in bytes, to strip
194 * @flags : operation flags
196 static int caam_rsa_count_leading_zeros(struct scatterlist
*sgl
,
200 struct sg_mapping_iter miter
;
203 unsigned int tbytes
= nbytes
;
206 ents
= sg_nents_for_len(sgl
, nbytes
);
210 sg_miter_start(&miter
, sgl
, ents
, SG_MITER_FROM_SG
| flags
);
215 /* do not strip more than given bytes */
216 while (len
&& !*buff
&& lzeros
< nbytes
) {
225 sg_miter_next(&miter
);
233 miter
.consumed
= lzeros
;
234 sg_miter_stop(&miter
);
237 return tbytes
- nbytes
;
240 static struct rsa_edesc
*rsa_edesc_alloc(struct akcipher_request
*req
,
243 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
244 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
245 struct device
*dev
= ctx
->dev
;
246 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
247 struct caam_rsa_key
*key
= &ctx
->key
;
248 struct rsa_edesc
*edesc
;
249 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
250 GFP_KERNEL
: GFP_ATOMIC
;
251 int sg_flags
= (flags
== GFP_ATOMIC
) ? SG_MITER_ATOMIC
: 0;
252 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
253 int src_nents
, dst_nents
;
254 int mapped_src_nents
, mapped_dst_nents
;
255 unsigned int diff_size
= 0;
258 if (req
->src_len
> key
->n_sz
) {
260 * strip leading zeros and
261 * return the number of zeros to skip
263 lzeros
= caam_rsa_count_leading_zeros(req
->src
, req
->src_len
-
264 key
->n_sz
, sg_flags
);
266 return ERR_PTR(lzeros
);
268 req_ctx
->fixup_src
= scatterwalk_ffwd(req_ctx
->src
, req
->src
,
270 req_ctx
->fixup_src_len
= req
->src_len
- lzeros
;
273 * input src is less then n key modulus,
274 * so there will be zero padding
276 diff_size
= key
->n_sz
- req
->src_len
;
277 req_ctx
->fixup_src
= req
->src
;
278 req_ctx
->fixup_src_len
= req
->src_len
;
281 src_nents
= sg_nents_for_len(req_ctx
->fixup_src
,
282 req_ctx
->fixup_src_len
);
283 dst_nents
= sg_nents_for_len(req
->dst
, req
->dst_len
);
285 mapped_src_nents
= dma_map_sg(dev
, req_ctx
->fixup_src
, src_nents
,
287 if (unlikely(!mapped_src_nents
)) {
288 dev_err(dev
, "unable to map source\n");
289 return ERR_PTR(-ENOMEM
);
291 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
293 if (unlikely(!mapped_dst_nents
)) {
294 dev_err(dev
, "unable to map destination\n");
298 if (!diff_size
&& mapped_src_nents
== 1)
299 sec4_sg_len
= 0; /* no need for an input hw s/g table */
301 sec4_sg_len
= mapped_src_nents
+ !!diff_size
;
302 sec4_sg_index
= sec4_sg_len
;
304 if (mapped_dst_nents
> 1)
305 sec4_sg_len
+= pad_sg_nents(mapped_dst_nents
);
307 sec4_sg_len
= pad_sg_nents(sec4_sg_len
);
309 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
311 /* allocate space for base edesc, hw desc commands and link tables */
312 edesc
= kzalloc(sizeof(*edesc
) + desclen
+ sec4_sg_bytes
,
317 edesc
->sec4_sg
= (void *)edesc
+ sizeof(*edesc
) + desclen
;
319 dma_to_sec4_sg_one(edesc
->sec4_sg
, ctx
->padding_dma
, diff_size
,
323 sg_to_sec4_sg_last(req_ctx
->fixup_src
, req_ctx
->fixup_src_len
,
324 edesc
->sec4_sg
+ !!diff_size
, 0);
326 if (mapped_dst_nents
> 1)
327 sg_to_sec4_sg_last(req
->dst
, req
->dst_len
,
328 edesc
->sec4_sg
+ sec4_sg_index
, 0);
330 /* Save nents for later use in Job Descriptor */
331 edesc
->src_nents
= src_nents
;
332 edesc
->dst_nents
= dst_nents
;
334 req_ctx
->edesc
= edesc
;
339 edesc
->mapped_src_nents
= mapped_src_nents
;
340 edesc
->mapped_dst_nents
= mapped_dst_nents
;
342 edesc
->sec4_sg_dma
= dma_map_single(dev
, edesc
->sec4_sg
,
343 sec4_sg_bytes
, DMA_TO_DEVICE
);
344 if (dma_mapping_error(dev
, edesc
->sec4_sg_dma
)) {
345 dev_err(dev
, "unable to map S/G table\n");
349 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
351 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__
) ": ",
352 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->sec4_sg
,
353 edesc
->sec4_sg_bytes
, 1);
360 dma_unmap_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
362 dma_unmap_sg(dev
, req_ctx
->fixup_src
, src_nents
, DMA_TO_DEVICE
);
363 return ERR_PTR(-ENOMEM
);
366 static int akcipher_do_one_req(struct crypto_engine
*engine
, void *areq
)
368 struct akcipher_request
*req
= container_of(areq
,
369 struct akcipher_request
,
371 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
372 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
373 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
374 struct device
*jrdev
= ctx
->dev
;
375 u32
*desc
= req_ctx
->edesc
->hw_desc
;
378 req_ctx
->edesc
->bklog
= true;
380 ret
= caam_jr_enqueue(jrdev
, desc
, req_ctx
->akcipher_op_done
, req
);
382 if (ret
!= -EINPROGRESS
) {
383 rsa_pub_unmap(jrdev
, req_ctx
->edesc
, req
);
384 rsa_io_unmap(jrdev
, req_ctx
->edesc
, req
);
385 kfree(req_ctx
->edesc
);
393 static int set_rsa_pub_pdb(struct akcipher_request
*req
,
394 struct rsa_edesc
*edesc
)
396 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
397 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
398 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
399 struct caam_rsa_key
*key
= &ctx
->key
;
400 struct device
*dev
= ctx
->dev
;
401 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
402 int sec4_sg_index
= 0;
404 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
405 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
406 dev_err(dev
, "Unable to map RSA modulus memory\n");
410 pdb
->e_dma
= dma_map_single(dev
, key
->e
, key
->e_sz
, DMA_TO_DEVICE
);
411 if (dma_mapping_error(dev
, pdb
->e_dma
)) {
412 dev_err(dev
, "Unable to map RSA public exponent memory\n");
413 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
417 if (edesc
->mapped_src_nents
> 1) {
418 pdb
->sgf
|= RSA_PDB_SGF_F
;
419 pdb
->f_dma
= edesc
->sec4_sg_dma
;
420 sec4_sg_index
+= edesc
->mapped_src_nents
;
422 pdb
->f_dma
= sg_dma_address(req_ctx
->fixup_src
);
425 if (edesc
->mapped_dst_nents
> 1) {
426 pdb
->sgf
|= RSA_PDB_SGF_G
;
427 pdb
->g_dma
= edesc
->sec4_sg_dma
+
428 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
430 pdb
->g_dma
= sg_dma_address(req
->dst
);
433 pdb
->sgf
|= (key
->e_sz
<< RSA_PDB_E_SHIFT
) | key
->n_sz
;
434 pdb
->f_len
= req_ctx
->fixup_src_len
;
439 static int set_rsa_priv_f1_pdb(struct akcipher_request
*req
,
440 struct rsa_edesc
*edesc
)
442 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
443 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
444 struct caam_rsa_key
*key
= &ctx
->key
;
445 struct device
*dev
= ctx
->dev
;
446 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
447 int sec4_sg_index
= 0;
449 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
450 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
451 dev_err(dev
, "Unable to map modulus memory\n");
455 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
456 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
457 dev_err(dev
, "Unable to map RSA private exponent memory\n");
458 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
462 if (edesc
->mapped_src_nents
> 1) {
463 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
464 pdb
->g_dma
= edesc
->sec4_sg_dma
;
465 sec4_sg_index
+= edesc
->mapped_src_nents
;
468 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
470 pdb
->g_dma
= sg_dma_address(req_ctx
->fixup_src
);
473 if (edesc
->mapped_dst_nents
> 1) {
474 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
475 pdb
->f_dma
= edesc
->sec4_sg_dma
+
476 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
478 pdb
->f_dma
= sg_dma_address(req
->dst
);
481 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
486 static int set_rsa_priv_f2_pdb(struct akcipher_request
*req
,
487 struct rsa_edesc
*edesc
)
489 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
490 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
491 struct caam_rsa_key
*key
= &ctx
->key
;
492 struct device
*dev
= ctx
->dev
;
493 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
494 int sec4_sg_index
= 0;
495 size_t p_sz
= key
->p_sz
;
496 size_t q_sz
= key
->q_sz
;
498 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
499 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
500 dev_err(dev
, "Unable to map RSA private exponent memory\n");
504 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
505 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
506 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
510 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
511 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
512 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
516 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
517 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
518 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
522 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
523 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
524 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
528 if (edesc
->mapped_src_nents
> 1) {
529 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
530 pdb
->g_dma
= edesc
->sec4_sg_dma
;
531 sec4_sg_index
+= edesc
->mapped_src_nents
;
533 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
535 pdb
->g_dma
= sg_dma_address(req_ctx
->fixup_src
);
538 if (edesc
->mapped_dst_nents
> 1) {
539 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
540 pdb
->f_dma
= edesc
->sec4_sg_dma
+
541 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
543 pdb
->f_dma
= sg_dma_address(req
->dst
);
546 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
547 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
552 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
554 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
556 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
558 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
563 static int set_rsa_priv_f3_pdb(struct akcipher_request
*req
,
564 struct rsa_edesc
*edesc
)
566 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
567 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
568 struct caam_rsa_key
*key
= &ctx
->key
;
569 struct device
*dev
= ctx
->dev
;
570 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
571 int sec4_sg_index
= 0;
572 size_t p_sz
= key
->p_sz
;
573 size_t q_sz
= key
->q_sz
;
575 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
576 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
577 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
581 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
582 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
583 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
587 pdb
->dp_dma
= dma_map_single(dev
, key
->dp
, p_sz
, DMA_TO_DEVICE
);
588 if (dma_mapping_error(dev
, pdb
->dp_dma
)) {
589 dev_err(dev
, "Unable to map RSA exponent dp memory\n");
593 pdb
->dq_dma
= dma_map_single(dev
, key
->dq
, q_sz
, DMA_TO_DEVICE
);
594 if (dma_mapping_error(dev
, pdb
->dq_dma
)) {
595 dev_err(dev
, "Unable to map RSA exponent dq memory\n");
599 pdb
->c_dma
= dma_map_single(dev
, key
->qinv
, p_sz
, DMA_TO_DEVICE
);
600 if (dma_mapping_error(dev
, pdb
->c_dma
)) {
601 dev_err(dev
, "Unable to map RSA CRT coefficient qinv memory\n");
605 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
606 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
607 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
611 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
612 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
613 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
617 if (edesc
->mapped_src_nents
> 1) {
618 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
619 pdb
->g_dma
= edesc
->sec4_sg_dma
;
620 sec4_sg_index
+= edesc
->mapped_src_nents
;
622 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
624 pdb
->g_dma
= sg_dma_address(req_ctx
->fixup_src
);
627 if (edesc
->mapped_dst_nents
> 1) {
628 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
629 pdb
->f_dma
= edesc
->sec4_sg_dma
+
630 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
632 pdb
->f_dma
= sg_dma_address(req
->dst
);
635 pdb
->sgf
|= key
->n_sz
;
636 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
641 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
643 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
645 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
647 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
649 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
651 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
656 static int akcipher_enqueue_req(struct device
*jrdev
,
657 void (*cbk
)(struct device
*jrdev
, u32
*desc
,
658 u32 err
, void *context
),
659 struct akcipher_request
*req
)
661 struct caam_drv_private_jr
*jrpriv
= dev_get_drvdata(jrdev
);
662 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
663 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
664 struct caam_rsa_key
*key
= &ctx
->key
;
665 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
666 struct rsa_edesc
*edesc
= req_ctx
->edesc
;
667 u32
*desc
= edesc
->hw_desc
;
670 req_ctx
->akcipher_op_done
= cbk
;
672 * Only the backlog request are sent to crypto-engine since the others
673 * can be handled by CAAM, if free, especially since JR has up to 1024
674 * entries (more than the 10 entries from crypto-engine).
676 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)
677 ret
= crypto_transfer_akcipher_request_to_engine(jrpriv
->engine
,
680 ret
= caam_jr_enqueue(jrdev
, desc
, cbk
, req
);
682 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
)) {
683 switch (key
->priv_form
) {
685 rsa_priv_f1_unmap(jrdev
, edesc
, req
);
688 rsa_priv_f2_unmap(jrdev
, edesc
, req
);
691 rsa_priv_f3_unmap(jrdev
, edesc
, req
);
694 rsa_pub_unmap(jrdev
, edesc
, req
);
696 rsa_io_unmap(jrdev
, edesc
, req
);
703 static int caam_rsa_enc(struct akcipher_request
*req
)
705 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
706 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
707 struct caam_rsa_key
*key
= &ctx
->key
;
708 struct device
*jrdev
= ctx
->dev
;
709 struct rsa_edesc
*edesc
;
712 if (unlikely(!key
->n
|| !key
->e
))
715 if (req
->dst_len
< key
->n_sz
) {
716 req
->dst_len
= key
->n_sz
;
717 dev_err(jrdev
, "Output buffer length less than parameter n\n");
721 /* Allocate extended descriptor */
722 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PUB_LEN
);
724 return PTR_ERR(edesc
);
726 /* Set RSA Encrypt Protocol Data Block */
727 ret
= set_rsa_pub_pdb(req
, edesc
);
731 /* Initialize Job Descriptor */
732 init_rsa_pub_desc(edesc
->hw_desc
, &edesc
->pdb
.pub
);
734 return akcipher_enqueue_req(jrdev
, rsa_pub_done
, req
);
737 rsa_io_unmap(jrdev
, edesc
, req
);
742 static int caam_rsa_dec_priv_f1(struct akcipher_request
*req
)
744 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
745 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
746 struct device
*jrdev
= ctx
->dev
;
747 struct rsa_edesc
*edesc
;
750 /* Allocate extended descriptor */
751 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F1_LEN
);
753 return PTR_ERR(edesc
);
755 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
756 ret
= set_rsa_priv_f1_pdb(req
, edesc
);
760 /* Initialize Job Descriptor */
761 init_rsa_priv_f1_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f1
);
763 return akcipher_enqueue_req(jrdev
, rsa_priv_f_done
, req
);
766 rsa_io_unmap(jrdev
, edesc
, req
);
771 static int caam_rsa_dec_priv_f2(struct akcipher_request
*req
)
773 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
774 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
775 struct device
*jrdev
= ctx
->dev
;
776 struct rsa_edesc
*edesc
;
779 /* Allocate extended descriptor */
780 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F2_LEN
);
782 return PTR_ERR(edesc
);
784 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
785 ret
= set_rsa_priv_f2_pdb(req
, edesc
);
789 /* Initialize Job Descriptor */
790 init_rsa_priv_f2_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f2
);
792 return akcipher_enqueue_req(jrdev
, rsa_priv_f_done
, req
);
795 rsa_io_unmap(jrdev
, edesc
, req
);
800 static int caam_rsa_dec_priv_f3(struct akcipher_request
*req
)
802 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
803 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
804 struct device
*jrdev
= ctx
->dev
;
805 struct rsa_edesc
*edesc
;
808 /* Allocate extended descriptor */
809 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F3_LEN
);
811 return PTR_ERR(edesc
);
813 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
814 ret
= set_rsa_priv_f3_pdb(req
, edesc
);
818 /* Initialize Job Descriptor */
819 init_rsa_priv_f3_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f3
);
821 return akcipher_enqueue_req(jrdev
, rsa_priv_f_done
, req
);
824 rsa_io_unmap(jrdev
, edesc
, req
);
829 static int caam_rsa_dec(struct akcipher_request
*req
)
831 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
832 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
833 struct caam_rsa_key
*key
= &ctx
->key
;
836 if (unlikely(!key
->n
|| !key
->d
))
839 if (req
->dst_len
< key
->n_sz
) {
840 req
->dst_len
= key
->n_sz
;
841 dev_err(ctx
->dev
, "Output buffer length less than parameter n\n");
845 if (key
->priv_form
== FORM3
)
846 ret
= caam_rsa_dec_priv_f3(req
);
847 else if (key
->priv_form
== FORM2
)
848 ret
= caam_rsa_dec_priv_f2(req
);
850 ret
= caam_rsa_dec_priv_f1(req
);
855 static void caam_rsa_free_key(struct caam_rsa_key
*key
)
857 kfree_sensitive(key
->d
);
858 kfree_sensitive(key
->p
);
859 kfree_sensitive(key
->q
);
860 kfree_sensitive(key
->dp
);
861 kfree_sensitive(key
->dq
);
862 kfree_sensitive(key
->qinv
);
863 kfree_sensitive(key
->tmp1
);
864 kfree_sensitive(key
->tmp2
);
867 memset(key
, 0, sizeof(*key
));
870 static void caam_rsa_drop_leading_zeros(const u8
**ptr
, size_t *nbytes
)
872 while (!**ptr
&& *nbytes
) {
879 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
880 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
881 * BER-encoding requires that the minimum number of bytes be used to encode the
882 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
885 * @ptr : pointer to {dP, dQ, qInv} CRT member
886 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
887 * @dstlen: length in bytes of corresponding p or q prime factor
889 static u8
*caam_read_rsa_crt(const u8
*ptr
, size_t nbytes
, size_t dstlen
)
893 caam_rsa_drop_leading_zeros(&ptr
, &nbytes
);
897 dst
= kzalloc(dstlen
, GFP_DMA
| GFP_KERNEL
);
901 memcpy(dst
+ (dstlen
- nbytes
), ptr
, nbytes
);
907 * caam_read_raw_data - Read a raw byte stream as a positive integer.
908 * The function skips buffer's leading zeros, copies the remained data
909 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
910 * the address of the new buffer.
912 * @buf : The data to read
913 * @nbytes: The amount of data to read
915 static inline u8
*caam_read_raw_data(const u8
*buf
, size_t *nbytes
)
918 caam_rsa_drop_leading_zeros(&buf
, nbytes
);
922 return kmemdup(buf
, *nbytes
, GFP_DMA
| GFP_KERNEL
);
925 static int caam_rsa_check_key_length(unsigned int len
)
932 static int caam_rsa_set_pub_key(struct crypto_akcipher
*tfm
, const void *key
,
935 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
936 struct rsa_key raw_key
= {NULL
};
937 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
940 /* Free the old RSA key if any */
941 caam_rsa_free_key(rsa_key
);
943 ret
= rsa_parse_pub_key(&raw_key
, key
, keylen
);
947 /* Copy key in DMA zone */
948 rsa_key
->e
= kmemdup(raw_key
.e
, raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
953 * Skip leading zeros and copy the positive integer to a buffer
954 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
955 * expects a positive integer for the RSA modulus and uses its length as
956 * decryption output length.
958 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
962 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
963 caam_rsa_free_key(rsa_key
);
967 rsa_key
->e_sz
= raw_key
.e_sz
;
968 rsa_key
->n_sz
= raw_key
.n_sz
;
972 caam_rsa_free_key(rsa_key
);
976 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx
*ctx
,
977 struct rsa_key
*raw_key
)
979 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
980 size_t p_sz
= raw_key
->p_sz
;
981 size_t q_sz
= raw_key
->q_sz
;
983 rsa_key
->p
= caam_read_raw_data(raw_key
->p
, &p_sz
);
986 rsa_key
->p_sz
= p_sz
;
988 rsa_key
->q
= caam_read_raw_data(raw_key
->q
, &q_sz
);
991 rsa_key
->q_sz
= q_sz
;
993 rsa_key
->tmp1
= kzalloc(raw_key
->p_sz
, GFP_DMA
| GFP_KERNEL
);
997 rsa_key
->tmp2
= kzalloc(raw_key
->q_sz
, GFP_DMA
| GFP_KERNEL
);
1001 rsa_key
->priv_form
= FORM2
;
1003 rsa_key
->dp
= caam_read_rsa_crt(raw_key
->dp
, raw_key
->dp_sz
, p_sz
);
1007 rsa_key
->dq
= caam_read_rsa_crt(raw_key
->dq
, raw_key
->dq_sz
, q_sz
);
1011 rsa_key
->qinv
= caam_read_rsa_crt(raw_key
->qinv
, raw_key
->qinv_sz
,
1016 rsa_key
->priv_form
= FORM3
;
1021 kfree_sensitive(rsa_key
->dq
);
1023 kfree_sensitive(rsa_key
->dp
);
1025 kfree_sensitive(rsa_key
->tmp2
);
1027 kfree_sensitive(rsa_key
->tmp1
);
1029 kfree_sensitive(rsa_key
->q
);
1031 kfree_sensitive(rsa_key
->p
);
1034 static int caam_rsa_set_priv_key(struct crypto_akcipher
*tfm
, const void *key
,
1035 unsigned int keylen
)
1037 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1038 struct rsa_key raw_key
= {NULL
};
1039 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
1042 /* Free the old RSA key if any */
1043 caam_rsa_free_key(rsa_key
);
1045 ret
= rsa_parse_priv_key(&raw_key
, key
, keylen
);
1049 /* Copy key in DMA zone */
1050 rsa_key
->d
= kmemdup(raw_key
.d
, raw_key
.d_sz
, GFP_DMA
| GFP_KERNEL
);
1054 rsa_key
->e
= kmemdup(raw_key
.e
, raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
1059 * Skip leading zeros and copy the positive integer to a buffer
1060 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1061 * expects a positive integer for the RSA modulus and uses its length as
1062 * decryption output length.
1064 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
1068 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
1069 caam_rsa_free_key(rsa_key
);
1073 rsa_key
->d_sz
= raw_key
.d_sz
;
1074 rsa_key
->e_sz
= raw_key
.e_sz
;
1075 rsa_key
->n_sz
= raw_key
.n_sz
;
1077 caam_rsa_set_priv_key_form(ctx
, &raw_key
);
1082 caam_rsa_free_key(rsa_key
);
1086 static unsigned int caam_rsa_max_size(struct crypto_akcipher
*tfm
)
1088 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1090 return ctx
->key
.n_sz
;
1093 /* Per session pkc's driver context creation function */
1094 static int caam_rsa_init_tfm(struct crypto_akcipher
*tfm
)
1096 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1098 ctx
->dev
= caam_jr_alloc();
1100 if (IS_ERR(ctx
->dev
)) {
1101 pr_err("Job Ring Device allocation for transform failed\n");
1102 return PTR_ERR(ctx
->dev
);
1105 ctx
->padding_dma
= dma_map_single(ctx
->dev
, zero_buffer
,
1106 CAAM_RSA_MAX_INPUT_SIZE
- 1,
1108 if (dma_mapping_error(ctx
->dev
, ctx
->padding_dma
)) {
1109 dev_err(ctx
->dev
, "unable to map padding\n");
1110 caam_jr_free(ctx
->dev
);
1114 ctx
->enginectx
.op
.do_one_request
= akcipher_do_one_req
;
1119 /* Per session pkc's driver context cleanup function */
1120 static void caam_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
1122 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
1123 struct caam_rsa_key
*key
= &ctx
->key
;
1125 dma_unmap_single(ctx
->dev
, ctx
->padding_dma
, CAAM_RSA_MAX_INPUT_SIZE
-
1127 caam_rsa_free_key(key
);
1128 caam_jr_free(ctx
->dev
);
1131 static struct caam_akcipher_alg caam_rsa
= {
1133 .encrypt
= caam_rsa_enc
,
1134 .decrypt
= caam_rsa_dec
,
1135 .set_pub_key
= caam_rsa_set_pub_key
,
1136 .set_priv_key
= caam_rsa_set_priv_key
,
1137 .max_size
= caam_rsa_max_size
,
1138 .init
= caam_rsa_init_tfm
,
1139 .exit
= caam_rsa_exit_tfm
,
1140 .reqsize
= sizeof(struct caam_rsa_req_ctx
),
1143 .cra_driver_name
= "rsa-caam",
1144 .cra_priority
= 3000,
1145 .cra_module
= THIS_MODULE
,
1146 .cra_ctxsize
= sizeof(struct caam_rsa_ctx
),
1151 /* Public Key Cryptography module initialization handler */
1152 int caam_pkc_init(struct device
*ctrldev
)
1154 struct caam_drv_private
*priv
= dev_get_drvdata(ctrldev
);
1159 /* Determine public key hardware accelerator presence. */
1161 pk_inst
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
) &
1162 CHA_ID_LS_PK_MASK
) >> CHA_ID_LS_PK_SHIFT
;
1164 pk_inst
= rd_reg32(&priv
->ctrl
->vreg
.pkha
) & CHA_VER_NUM_MASK
;
1166 /* Do not register algorithms if PKHA is not present. */
1170 /* allocate zero buffer, used for padding input */
1171 zero_buffer
= kzalloc(CAAM_RSA_MAX_INPUT_SIZE
- 1, GFP_DMA
|
1176 err
= crypto_register_akcipher(&caam_rsa
.akcipher
);
1180 dev_warn(ctrldev
, "%s alg registration failed\n",
1181 caam_rsa
.akcipher
.base
.cra_driver_name
);
1184 caam_rsa
.registered
= true;
1185 dev_info(ctrldev
, "caam pkc algorithms registered in /proc/crypto\n");
1191 void caam_pkc_exit(void)
1196 if (caam_rsa
.registered
)
1197 crypto_unregister_akcipher(&caam_rsa
.akcipher
);