1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
5 * Copyright 2016 Freescale Semiconductor, Inc.
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
20 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
21 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f1_pdb))
23 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f2_pdb))
25 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 sizeof(struct rsa_priv_f3_pdb))
28 static void rsa_io_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
29 struct akcipher_request
*req
)
31 dma_unmap_sg(dev
, req
->dst
, edesc
->dst_nents
, DMA_FROM_DEVICE
);
32 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
34 if (edesc
->sec4_sg_bytes
)
35 dma_unmap_single(dev
, edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
,
39 static void rsa_pub_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
40 struct akcipher_request
*req
)
42 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
43 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
44 struct caam_rsa_key
*key
= &ctx
->key
;
45 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
47 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
48 dma_unmap_single(dev
, pdb
->e_dma
, key
->e_sz
, DMA_TO_DEVICE
);
51 static void rsa_priv_f1_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
52 struct akcipher_request
*req
)
54 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
55 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
56 struct caam_rsa_key
*key
= &ctx
->key
;
57 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
59 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
60 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
63 static void rsa_priv_f2_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
64 struct akcipher_request
*req
)
66 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
67 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
68 struct caam_rsa_key
*key
= &ctx
->key
;
69 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
70 size_t p_sz
= key
->p_sz
;
71 size_t q_sz
= key
->q_sz
;
73 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
74 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
75 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
76 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
77 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
80 static void rsa_priv_f3_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
81 struct akcipher_request
*req
)
83 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
84 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
85 struct caam_rsa_key
*key
= &ctx
->key
;
86 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
87 size_t p_sz
= key
->p_sz
;
88 size_t q_sz
= key
->q_sz
;
90 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
91 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
92 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
93 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
94 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
95 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
96 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
99 /* RSA Job Completion handler */
100 static void rsa_pub_done(struct device
*dev
, u32
*desc
, u32 err
, void *context
)
102 struct akcipher_request
*req
= context
;
103 struct rsa_edesc
*edesc
;
106 caam_jr_strstatus(dev
, err
);
108 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
110 rsa_pub_unmap(dev
, edesc
, req
);
111 rsa_io_unmap(dev
, edesc
, req
);
114 akcipher_request_complete(req
, err
);
117 static void rsa_priv_f1_done(struct device
*dev
, u32
*desc
, u32 err
,
120 struct akcipher_request
*req
= context
;
121 struct rsa_edesc
*edesc
;
124 caam_jr_strstatus(dev
, err
);
126 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
128 rsa_priv_f1_unmap(dev
, edesc
, req
);
129 rsa_io_unmap(dev
, edesc
, req
);
132 akcipher_request_complete(req
, err
);
135 static void rsa_priv_f2_done(struct device
*dev
, u32
*desc
, u32 err
,
138 struct akcipher_request
*req
= context
;
139 struct rsa_edesc
*edesc
;
142 caam_jr_strstatus(dev
, err
);
144 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
146 rsa_priv_f2_unmap(dev
, edesc
, req
);
147 rsa_io_unmap(dev
, edesc
, req
);
150 akcipher_request_complete(req
, err
);
153 static void rsa_priv_f3_done(struct device
*dev
, u32
*desc
, u32 err
,
156 struct akcipher_request
*req
= context
;
157 struct rsa_edesc
*edesc
;
160 caam_jr_strstatus(dev
, err
);
162 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
164 rsa_priv_f3_unmap(dev
, edesc
, req
);
165 rsa_io_unmap(dev
, edesc
, req
);
168 akcipher_request_complete(req
, err
);
171 static int caam_rsa_count_leading_zeros(struct scatterlist
*sgl
,
175 struct sg_mapping_iter miter
;
178 unsigned int tbytes
= nbytes
;
181 ents
= sg_nents_for_len(sgl
, nbytes
);
185 sg_miter_start(&miter
, sgl
, ents
, SG_MITER_FROM_SG
| flags
);
190 while (len
&& !*buff
) {
199 sg_miter_next(&miter
);
207 miter
.consumed
= lzeros
;
208 sg_miter_stop(&miter
);
211 return tbytes
- nbytes
;
214 static struct rsa_edesc
*rsa_edesc_alloc(struct akcipher_request
*req
,
217 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
218 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
219 struct device
*dev
= ctx
->dev
;
220 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
221 struct rsa_edesc
*edesc
;
222 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
223 GFP_KERNEL
: GFP_ATOMIC
;
224 int sg_flags
= (flags
== GFP_ATOMIC
) ? SG_MITER_ATOMIC
: 0;
226 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
227 int src_nents
, dst_nents
;
230 lzeros
= caam_rsa_count_leading_zeros(req
->src
, req
->src_len
, sg_flags
);
232 return ERR_PTR(lzeros
);
234 req
->src_len
-= lzeros
;
235 req
->src
= scatterwalk_ffwd(req_ctx
->src
, req
->src
, lzeros
);
237 src_nents
= sg_nents_for_len(req
->src
, req
->src_len
);
238 dst_nents
= sg_nents_for_len(req
->dst
, req
->dst_len
);
241 sec4_sg_len
= src_nents
;
243 sec4_sg_len
+= dst_nents
;
245 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
247 /* allocate space for base edesc, hw desc commands and link tables */
248 edesc
= kzalloc(sizeof(*edesc
) + desclen
+ sec4_sg_bytes
,
251 return ERR_PTR(-ENOMEM
);
253 sgc
= dma_map_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
254 if (unlikely(!sgc
)) {
255 dev_err(dev
, "unable to map source\n");
259 sgc
= dma_map_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
260 if (unlikely(!sgc
)) {
261 dev_err(dev
, "unable to map destination\n");
265 edesc
->sec4_sg
= (void *)edesc
+ sizeof(*edesc
) + desclen
;
269 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
270 sec4_sg_index
+= src_nents
;
273 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
274 edesc
->sec4_sg
+ sec4_sg_index
, 0);
276 /* Save nents for later use in Job Descriptor */
277 edesc
->src_nents
= src_nents
;
278 edesc
->dst_nents
= dst_nents
;
283 edesc
->sec4_sg_dma
= dma_map_single(dev
, edesc
->sec4_sg
,
284 sec4_sg_bytes
, DMA_TO_DEVICE
);
285 if (dma_mapping_error(dev
, edesc
->sec4_sg_dma
)) {
286 dev_err(dev
, "unable to map S/G table\n");
290 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
295 dma_unmap_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
297 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
300 return ERR_PTR(-ENOMEM
);
303 static int set_rsa_pub_pdb(struct akcipher_request
*req
,
304 struct rsa_edesc
*edesc
)
306 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
307 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
308 struct caam_rsa_key
*key
= &ctx
->key
;
309 struct device
*dev
= ctx
->dev
;
310 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
311 int sec4_sg_index
= 0;
313 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
314 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
315 dev_err(dev
, "Unable to map RSA modulus memory\n");
319 pdb
->e_dma
= dma_map_single(dev
, key
->e
, key
->e_sz
, DMA_TO_DEVICE
);
320 if (dma_mapping_error(dev
, pdb
->e_dma
)) {
321 dev_err(dev
, "Unable to map RSA public exponent memory\n");
322 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
326 if (edesc
->src_nents
> 1) {
327 pdb
->sgf
|= RSA_PDB_SGF_F
;
328 pdb
->f_dma
= edesc
->sec4_sg_dma
;
329 sec4_sg_index
+= edesc
->src_nents
;
331 pdb
->f_dma
= sg_dma_address(req
->src
);
334 if (edesc
->dst_nents
> 1) {
335 pdb
->sgf
|= RSA_PDB_SGF_G
;
336 pdb
->g_dma
= edesc
->sec4_sg_dma
+
337 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
339 pdb
->g_dma
= sg_dma_address(req
->dst
);
342 pdb
->sgf
|= (key
->e_sz
<< RSA_PDB_E_SHIFT
) | key
->n_sz
;
343 pdb
->f_len
= req
->src_len
;
348 static int set_rsa_priv_f1_pdb(struct akcipher_request
*req
,
349 struct rsa_edesc
*edesc
)
351 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
352 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
353 struct caam_rsa_key
*key
= &ctx
->key
;
354 struct device
*dev
= ctx
->dev
;
355 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
356 int sec4_sg_index
= 0;
358 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
359 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
360 dev_err(dev
, "Unable to map modulus memory\n");
364 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
365 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
366 dev_err(dev
, "Unable to map RSA private exponent memory\n");
367 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
371 if (edesc
->src_nents
> 1) {
372 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
373 pdb
->g_dma
= edesc
->sec4_sg_dma
;
374 sec4_sg_index
+= edesc
->src_nents
;
376 pdb
->g_dma
= sg_dma_address(req
->src
);
379 if (edesc
->dst_nents
> 1) {
380 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
381 pdb
->f_dma
= edesc
->sec4_sg_dma
+
382 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
384 pdb
->f_dma
= sg_dma_address(req
->dst
);
387 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
392 static int set_rsa_priv_f2_pdb(struct akcipher_request
*req
,
393 struct rsa_edesc
*edesc
)
395 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
396 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
397 struct caam_rsa_key
*key
= &ctx
->key
;
398 struct device
*dev
= ctx
->dev
;
399 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
400 int sec4_sg_index
= 0;
401 size_t p_sz
= key
->p_sz
;
402 size_t q_sz
= key
->q_sz
;
404 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
405 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
406 dev_err(dev
, "Unable to map RSA private exponent memory\n");
410 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
411 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
412 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
416 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
417 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
418 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
422 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
423 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
424 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
428 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
429 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
430 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
434 if (edesc
->src_nents
> 1) {
435 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
436 pdb
->g_dma
= edesc
->sec4_sg_dma
;
437 sec4_sg_index
+= edesc
->src_nents
;
439 pdb
->g_dma
= sg_dma_address(req
->src
);
442 if (edesc
->dst_nents
> 1) {
443 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
444 pdb
->f_dma
= edesc
->sec4_sg_dma
+
445 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
447 pdb
->f_dma
= sg_dma_address(req
->dst
);
450 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
451 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
456 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
458 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
460 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
462 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
467 static int set_rsa_priv_f3_pdb(struct akcipher_request
*req
,
468 struct rsa_edesc
*edesc
)
470 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
471 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
472 struct caam_rsa_key
*key
= &ctx
->key
;
473 struct device
*dev
= ctx
->dev
;
474 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
475 int sec4_sg_index
= 0;
476 size_t p_sz
= key
->p_sz
;
477 size_t q_sz
= key
->q_sz
;
479 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
480 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
481 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
485 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
486 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
487 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
491 pdb
->dp_dma
= dma_map_single(dev
, key
->dp
, p_sz
, DMA_TO_DEVICE
);
492 if (dma_mapping_error(dev
, pdb
->dp_dma
)) {
493 dev_err(dev
, "Unable to map RSA exponent dp memory\n");
497 pdb
->dq_dma
= dma_map_single(dev
, key
->dq
, q_sz
, DMA_TO_DEVICE
);
498 if (dma_mapping_error(dev
, pdb
->dq_dma
)) {
499 dev_err(dev
, "Unable to map RSA exponent dq memory\n");
503 pdb
->c_dma
= dma_map_single(dev
, key
->qinv
, p_sz
, DMA_TO_DEVICE
);
504 if (dma_mapping_error(dev
, pdb
->c_dma
)) {
505 dev_err(dev
, "Unable to map RSA CRT coefficient qinv memory\n");
509 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
510 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
511 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
515 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
516 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
517 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
521 if (edesc
->src_nents
> 1) {
522 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
523 pdb
->g_dma
= edesc
->sec4_sg_dma
;
524 sec4_sg_index
+= edesc
->src_nents
;
526 pdb
->g_dma
= sg_dma_address(req
->src
);
529 if (edesc
->dst_nents
> 1) {
530 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
531 pdb
->f_dma
= edesc
->sec4_sg_dma
+
532 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
534 pdb
->f_dma
= sg_dma_address(req
->dst
);
537 pdb
->sgf
|= key
->n_sz
;
538 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
543 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
545 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
547 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
549 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
551 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
553 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
558 static int caam_rsa_enc(struct akcipher_request
*req
)
560 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
561 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
562 struct caam_rsa_key
*key
= &ctx
->key
;
563 struct device
*jrdev
= ctx
->dev
;
564 struct rsa_edesc
*edesc
;
567 if (unlikely(!key
->n
|| !key
->e
))
570 if (req
->dst_len
< key
->n_sz
) {
571 req
->dst_len
= key
->n_sz
;
572 dev_err(jrdev
, "Output buffer length less than parameter n\n");
576 /* Allocate extended descriptor */
577 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PUB_LEN
);
579 return PTR_ERR(edesc
);
581 /* Set RSA Encrypt Protocol Data Block */
582 ret
= set_rsa_pub_pdb(req
, edesc
);
586 /* Initialize Job Descriptor */
587 init_rsa_pub_desc(edesc
->hw_desc
, &edesc
->pdb
.pub
);
589 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_pub_done
, req
);
593 rsa_pub_unmap(jrdev
, edesc
, req
);
596 rsa_io_unmap(jrdev
, edesc
, req
);
601 static int caam_rsa_dec_priv_f1(struct akcipher_request
*req
)
603 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
604 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
605 struct device
*jrdev
= ctx
->dev
;
606 struct rsa_edesc
*edesc
;
609 /* Allocate extended descriptor */
610 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F1_LEN
);
612 return PTR_ERR(edesc
);
614 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
615 ret
= set_rsa_priv_f1_pdb(req
, edesc
);
619 /* Initialize Job Descriptor */
620 init_rsa_priv_f1_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f1
);
622 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f1_done
, req
);
626 rsa_priv_f1_unmap(jrdev
, edesc
, req
);
629 rsa_io_unmap(jrdev
, edesc
, req
);
634 static int caam_rsa_dec_priv_f2(struct akcipher_request
*req
)
636 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
637 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
638 struct device
*jrdev
= ctx
->dev
;
639 struct rsa_edesc
*edesc
;
642 /* Allocate extended descriptor */
643 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F2_LEN
);
645 return PTR_ERR(edesc
);
647 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
648 ret
= set_rsa_priv_f2_pdb(req
, edesc
);
652 /* Initialize Job Descriptor */
653 init_rsa_priv_f2_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f2
);
655 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f2_done
, req
);
659 rsa_priv_f2_unmap(jrdev
, edesc
, req
);
662 rsa_io_unmap(jrdev
, edesc
, req
);
667 static int caam_rsa_dec_priv_f3(struct akcipher_request
*req
)
669 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
670 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
671 struct device
*jrdev
= ctx
->dev
;
672 struct rsa_edesc
*edesc
;
675 /* Allocate extended descriptor */
676 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F3_LEN
);
678 return PTR_ERR(edesc
);
680 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
681 ret
= set_rsa_priv_f3_pdb(req
, edesc
);
685 /* Initialize Job Descriptor */
686 init_rsa_priv_f3_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f3
);
688 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f3_done
, req
);
692 rsa_priv_f3_unmap(jrdev
, edesc
, req
);
695 rsa_io_unmap(jrdev
, edesc
, req
);
700 static int caam_rsa_dec(struct akcipher_request
*req
)
702 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
703 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
704 struct caam_rsa_key
*key
= &ctx
->key
;
707 if (unlikely(!key
->n
|| !key
->d
))
710 if (req
->dst_len
< key
->n_sz
) {
711 req
->dst_len
= key
->n_sz
;
712 dev_err(ctx
->dev
, "Output buffer length less than parameter n\n");
716 if (key
->priv_form
== FORM3
)
717 ret
= caam_rsa_dec_priv_f3(req
);
718 else if (key
->priv_form
== FORM2
)
719 ret
= caam_rsa_dec_priv_f2(req
);
721 ret
= caam_rsa_dec_priv_f1(req
);
726 static void caam_rsa_free_key(struct caam_rsa_key
*key
)
738 memset(key
, 0, sizeof(*key
));
741 static void caam_rsa_drop_leading_zeros(const u8
**ptr
, size_t *nbytes
)
743 while (!**ptr
&& *nbytes
) {
750 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
751 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
752 * BER-encoding requires that the minimum number of bytes be used to encode the
753 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
756 * @ptr : pointer to {dP, dQ, qInv} CRT member
757 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
758 * @dstlen: length in bytes of corresponding p or q prime factor
760 static u8
*caam_read_rsa_crt(const u8
*ptr
, size_t nbytes
, size_t dstlen
)
764 caam_rsa_drop_leading_zeros(&ptr
, &nbytes
);
768 dst
= kzalloc(dstlen
, GFP_DMA
| GFP_KERNEL
);
772 memcpy(dst
+ (dstlen
- nbytes
), ptr
, nbytes
);
778 * caam_read_raw_data - Read a raw byte stream as a positive integer.
779 * The function skips buffer's leading zeros, copies the remained data
780 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
781 * the address of the new buffer.
783 * @buf : The data to read
784 * @nbytes: The amount of data to read
786 static inline u8
*caam_read_raw_data(const u8
*buf
, size_t *nbytes
)
789 caam_rsa_drop_leading_zeros(&buf
, nbytes
);
793 return kmemdup(buf
, *nbytes
, GFP_DMA
| GFP_KERNEL
);
796 static int caam_rsa_check_key_length(unsigned int len
)
803 static int caam_rsa_set_pub_key(struct crypto_akcipher
*tfm
, const void *key
,
806 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
807 struct rsa_key raw_key
= {NULL
};
808 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
811 /* Free the old RSA key if any */
812 caam_rsa_free_key(rsa_key
);
814 ret
= rsa_parse_pub_key(&raw_key
, key
, keylen
);
818 /* Copy key in DMA zone */
819 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
824 * Skip leading zeros and copy the positive integer to a buffer
825 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
826 * expects a positive integer for the RSA modulus and uses its length as
827 * decryption output length.
829 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
833 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
834 caam_rsa_free_key(rsa_key
);
838 rsa_key
->e_sz
= raw_key
.e_sz
;
839 rsa_key
->n_sz
= raw_key
.n_sz
;
841 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
845 caam_rsa_free_key(rsa_key
);
849 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx
*ctx
,
850 struct rsa_key
*raw_key
)
852 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
853 size_t p_sz
= raw_key
->p_sz
;
854 size_t q_sz
= raw_key
->q_sz
;
856 rsa_key
->p
= caam_read_raw_data(raw_key
->p
, &p_sz
);
859 rsa_key
->p_sz
= p_sz
;
861 rsa_key
->q
= caam_read_raw_data(raw_key
->q
, &q_sz
);
864 rsa_key
->q_sz
= q_sz
;
866 rsa_key
->tmp1
= kzalloc(raw_key
->p_sz
, GFP_DMA
| GFP_KERNEL
);
870 rsa_key
->tmp2
= kzalloc(raw_key
->q_sz
, GFP_DMA
| GFP_KERNEL
);
874 rsa_key
->priv_form
= FORM2
;
876 rsa_key
->dp
= caam_read_rsa_crt(raw_key
->dp
, raw_key
->dp_sz
, p_sz
);
880 rsa_key
->dq
= caam_read_rsa_crt(raw_key
->dq
, raw_key
->dq_sz
, q_sz
);
884 rsa_key
->qinv
= caam_read_rsa_crt(raw_key
->qinv
, raw_key
->qinv_sz
,
889 rsa_key
->priv_form
= FORM3
;
898 kzfree(rsa_key
->tmp2
);
900 kzfree(rsa_key
->tmp1
);
907 static int caam_rsa_set_priv_key(struct crypto_akcipher
*tfm
, const void *key
,
910 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
911 struct rsa_key raw_key
= {NULL
};
912 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
915 /* Free the old RSA key if any */
916 caam_rsa_free_key(rsa_key
);
918 ret
= rsa_parse_priv_key(&raw_key
, key
, keylen
);
922 /* Copy key in DMA zone */
923 rsa_key
->d
= kzalloc(raw_key
.d_sz
, GFP_DMA
| GFP_KERNEL
);
927 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
932 * Skip leading zeros and copy the positive integer to a buffer
933 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
934 * expects a positive integer for the RSA modulus and uses its length as
935 * decryption output length.
937 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
941 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
942 caam_rsa_free_key(rsa_key
);
946 rsa_key
->d_sz
= raw_key
.d_sz
;
947 rsa_key
->e_sz
= raw_key
.e_sz
;
948 rsa_key
->n_sz
= raw_key
.n_sz
;
950 memcpy(rsa_key
->d
, raw_key
.d
, raw_key
.d_sz
);
951 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
953 caam_rsa_set_priv_key_form(ctx
, &raw_key
);
958 caam_rsa_free_key(rsa_key
);
962 static unsigned int caam_rsa_max_size(struct crypto_akcipher
*tfm
)
964 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
966 return ctx
->key
.n_sz
;
969 /* Per session pkc's driver context creation function */
970 static int caam_rsa_init_tfm(struct crypto_akcipher
*tfm
)
972 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
974 ctx
->dev
= caam_jr_alloc();
976 if (IS_ERR(ctx
->dev
)) {
977 pr_err("Job Ring Device allocation for transform failed\n");
978 return PTR_ERR(ctx
->dev
);
984 /* Per session pkc's driver context cleanup function */
985 static void caam_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
987 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
988 struct caam_rsa_key
*key
= &ctx
->key
;
990 caam_rsa_free_key(key
);
991 caam_jr_free(ctx
->dev
);
994 static struct akcipher_alg caam_rsa
= {
995 .encrypt
= caam_rsa_enc
,
996 .decrypt
= caam_rsa_dec
,
997 .set_pub_key
= caam_rsa_set_pub_key
,
998 .set_priv_key
= caam_rsa_set_priv_key
,
999 .max_size
= caam_rsa_max_size
,
1000 .init
= caam_rsa_init_tfm
,
1001 .exit
= caam_rsa_exit_tfm
,
1002 .reqsize
= sizeof(struct caam_rsa_req_ctx
),
1005 .cra_driver_name
= "rsa-caam",
1006 .cra_priority
= 3000,
1007 .cra_module
= THIS_MODULE
,
1008 .cra_ctxsize
= sizeof(struct caam_rsa_ctx
),
1012 /* Public Key Cryptography module initialization handler */
1013 static int __init
caam_pkc_init(void)
1015 struct device_node
*dev_node
;
1016 struct platform_device
*pdev
;
1017 struct device
*ctrldev
;
1018 struct caam_drv_private
*priv
;
1022 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1024 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1029 pdev
= of_find_device_by_node(dev_node
);
1031 of_node_put(dev_node
);
1035 ctrldev
= &pdev
->dev
;
1036 priv
= dev_get_drvdata(ctrldev
);
1037 of_node_put(dev_node
);
1040 * If priv is NULL, it's probably because the caam driver wasn't
1041 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1048 /* Determine public key hardware accelerator presence. */
1050 pk_inst
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
) &
1051 CHA_ID_LS_PK_MASK
) >> CHA_ID_LS_PK_SHIFT
;
1053 pk_inst
= rd_reg32(&priv
->ctrl
->vreg
.pkha
) & CHA_VER_NUM_MASK
;
1055 /* Do not register algorithms if PKHA is not present. */
1061 err
= crypto_register_akcipher(&caam_rsa
);
1063 dev_warn(ctrldev
, "%s alg registration failed\n",
1064 caam_rsa
.base
.cra_driver_name
);
1066 dev_info(ctrldev
, "caam pkc algorithms registered in /proc/crypto\n");
1069 put_device(ctrldev
);
1073 static void __exit
caam_pkc_exit(void)
1075 crypto_unregister_akcipher(&caam_rsa
);
1078 module_init(caam_pkc_init
);
1079 module_exit(caam_pkc_exit
);
1081 MODULE_LICENSE("Dual BSD/GPL");
1082 MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1083 MODULE_AUTHOR("Freescale Semiconductor");