2 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 * Copyright 2016 Freescale Semiconductor, Inc.
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
14 #include "desc_constr.h"
15 #include "sg_sw_sec4.h"
18 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
21 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f2_pdb))
23 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f3_pdb))
26 static void rsa_io_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
27 struct akcipher_request
*req
)
29 dma_unmap_sg(dev
, req
->dst
, edesc
->dst_nents
, DMA_FROM_DEVICE
);
30 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
32 if (edesc
->sec4_sg_bytes
)
33 dma_unmap_single(dev
, edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
,
37 static void rsa_pub_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
38 struct akcipher_request
*req
)
40 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
41 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
42 struct caam_rsa_key
*key
= &ctx
->key
;
43 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
45 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
46 dma_unmap_single(dev
, pdb
->e_dma
, key
->e_sz
, DMA_TO_DEVICE
);
49 static void rsa_priv_f1_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
50 struct akcipher_request
*req
)
52 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
53 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
54 struct caam_rsa_key
*key
= &ctx
->key
;
55 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
57 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
58 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
61 static void rsa_priv_f2_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
62 struct akcipher_request
*req
)
64 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
65 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
66 struct caam_rsa_key
*key
= &ctx
->key
;
67 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
68 size_t p_sz
= key
->p_sz
;
69 size_t q_sz
= key
->p_sz
;
71 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
72 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
73 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
74 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_TO_DEVICE
);
75 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_TO_DEVICE
);
78 static void rsa_priv_f3_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
79 struct akcipher_request
*req
)
81 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
82 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
83 struct caam_rsa_key
*key
= &ctx
->key
;
84 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
85 size_t p_sz
= key
->p_sz
;
86 size_t q_sz
= key
->p_sz
;
88 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
89 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
90 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
91 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
92 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
93 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_TO_DEVICE
);
94 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_TO_DEVICE
);
97 /* RSA Job Completion handler */
98 static void rsa_pub_done(struct device
*dev
, u32
*desc
, u32 err
, void *context
)
100 struct akcipher_request
*req
= context
;
101 struct rsa_edesc
*edesc
;
104 caam_jr_strstatus(dev
, err
);
106 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
108 rsa_pub_unmap(dev
, edesc
, req
);
109 rsa_io_unmap(dev
, edesc
, req
);
112 akcipher_request_complete(req
, err
);
115 static void rsa_priv_f1_done(struct device
*dev
, u32
*desc
, u32 err
,
118 struct akcipher_request
*req
= context
;
119 struct rsa_edesc
*edesc
;
122 caam_jr_strstatus(dev
, err
);
124 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
126 rsa_priv_f1_unmap(dev
, edesc
, req
);
127 rsa_io_unmap(dev
, edesc
, req
);
130 akcipher_request_complete(req
, err
);
133 static void rsa_priv_f2_done(struct device
*dev
, u32
*desc
, u32 err
,
136 struct akcipher_request
*req
= context
;
137 struct rsa_edesc
*edesc
;
140 caam_jr_strstatus(dev
, err
);
142 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
144 rsa_priv_f2_unmap(dev
, edesc
, req
);
145 rsa_io_unmap(dev
, edesc
, req
);
148 akcipher_request_complete(req
, err
);
151 static void rsa_priv_f3_done(struct device
*dev
, u32
*desc
, u32 err
,
154 struct akcipher_request
*req
= context
;
155 struct rsa_edesc
*edesc
;
158 caam_jr_strstatus(dev
, err
);
160 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
162 rsa_priv_f3_unmap(dev
, edesc
, req
);
163 rsa_io_unmap(dev
, edesc
, req
);
166 akcipher_request_complete(req
, err
);
169 static struct rsa_edesc
*rsa_edesc_alloc(struct akcipher_request
*req
,
172 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
173 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
174 struct device
*dev
= ctx
->dev
;
175 struct rsa_edesc
*edesc
;
176 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
177 GFP_KERNEL
: GFP_ATOMIC
;
179 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
180 int src_nents
, dst_nents
;
182 src_nents
= sg_nents_for_len(req
->src
, req
->src_len
);
183 dst_nents
= sg_nents_for_len(req
->dst
, req
->dst_len
);
186 sec4_sg_len
= src_nents
;
188 sec4_sg_len
+= dst_nents
;
190 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
192 /* allocate space for base edesc, hw desc commands and link tables */
193 edesc
= kzalloc(sizeof(*edesc
) + desclen
+ sec4_sg_bytes
,
196 return ERR_PTR(-ENOMEM
);
198 sgc
= dma_map_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
199 if (unlikely(!sgc
)) {
200 dev_err(dev
, "unable to map source\n");
204 sgc
= dma_map_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
205 if (unlikely(!sgc
)) {
206 dev_err(dev
, "unable to map destination\n");
210 edesc
->sec4_sg
= (void *)edesc
+ sizeof(*edesc
) + desclen
;
214 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
215 sec4_sg_index
+= src_nents
;
218 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
219 edesc
->sec4_sg
+ sec4_sg_index
, 0);
221 /* Save nents for later use in Job Descriptor */
222 edesc
->src_nents
= src_nents
;
223 edesc
->dst_nents
= dst_nents
;
228 edesc
->sec4_sg_dma
= dma_map_single(dev
, edesc
->sec4_sg
,
229 sec4_sg_bytes
, DMA_TO_DEVICE
);
230 if (dma_mapping_error(dev
, edesc
->sec4_sg_dma
)) {
231 dev_err(dev
, "unable to map S/G table\n");
235 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
240 dma_unmap_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
242 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
245 return ERR_PTR(-ENOMEM
);
248 static int set_rsa_pub_pdb(struct akcipher_request
*req
,
249 struct rsa_edesc
*edesc
)
251 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
252 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
253 struct caam_rsa_key
*key
= &ctx
->key
;
254 struct device
*dev
= ctx
->dev
;
255 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
256 int sec4_sg_index
= 0;
258 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
259 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
260 dev_err(dev
, "Unable to map RSA modulus memory\n");
264 pdb
->e_dma
= dma_map_single(dev
, key
->e
, key
->e_sz
, DMA_TO_DEVICE
);
265 if (dma_mapping_error(dev
, pdb
->e_dma
)) {
266 dev_err(dev
, "Unable to map RSA public exponent memory\n");
267 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
271 if (edesc
->src_nents
> 1) {
272 pdb
->sgf
|= RSA_PDB_SGF_F
;
273 pdb
->f_dma
= edesc
->sec4_sg_dma
;
274 sec4_sg_index
+= edesc
->src_nents
;
276 pdb
->f_dma
= sg_dma_address(req
->src
);
279 if (edesc
->dst_nents
> 1) {
280 pdb
->sgf
|= RSA_PDB_SGF_G
;
281 pdb
->g_dma
= edesc
->sec4_sg_dma
+
282 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
284 pdb
->g_dma
= sg_dma_address(req
->dst
);
287 pdb
->sgf
|= (key
->e_sz
<< RSA_PDB_E_SHIFT
) | key
->n_sz
;
288 pdb
->f_len
= req
->src_len
;
293 static int set_rsa_priv_f1_pdb(struct akcipher_request
*req
,
294 struct rsa_edesc
*edesc
)
296 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
297 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
298 struct caam_rsa_key
*key
= &ctx
->key
;
299 struct device
*dev
= ctx
->dev
;
300 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
301 int sec4_sg_index
= 0;
303 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
304 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
305 dev_err(dev
, "Unable to map modulus memory\n");
309 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
310 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
311 dev_err(dev
, "Unable to map RSA private exponent memory\n");
312 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
316 if (edesc
->src_nents
> 1) {
317 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
318 pdb
->g_dma
= edesc
->sec4_sg_dma
;
319 sec4_sg_index
+= edesc
->src_nents
;
321 pdb
->g_dma
= sg_dma_address(req
->src
);
324 if (edesc
->dst_nents
> 1) {
325 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
326 pdb
->f_dma
= edesc
->sec4_sg_dma
+
327 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
329 pdb
->f_dma
= sg_dma_address(req
->dst
);
332 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
337 static int set_rsa_priv_f2_pdb(struct akcipher_request
*req
,
338 struct rsa_edesc
*edesc
)
340 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
341 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
342 struct caam_rsa_key
*key
= &ctx
->key
;
343 struct device
*dev
= ctx
->dev
;
344 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
345 int sec4_sg_index
= 0;
346 size_t p_sz
= key
->p_sz
;
347 size_t q_sz
= key
->p_sz
;
349 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
350 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
351 dev_err(dev
, "Unable to map RSA private exponent memory\n");
355 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
356 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
357 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
361 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
362 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
363 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
367 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_TO_DEVICE
);
368 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
369 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
373 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_TO_DEVICE
);
374 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
375 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
379 if (edesc
->src_nents
> 1) {
380 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
381 pdb
->g_dma
= edesc
->sec4_sg_dma
;
382 sec4_sg_index
+= edesc
->src_nents
;
384 pdb
->g_dma
= sg_dma_address(req
->src
);
387 if (edesc
->dst_nents
> 1) {
388 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
389 pdb
->f_dma
= edesc
->sec4_sg_dma
+
390 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
392 pdb
->f_dma
= sg_dma_address(req
->dst
);
395 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
396 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
401 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_TO_DEVICE
);
403 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
405 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
407 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
412 static int set_rsa_priv_f3_pdb(struct akcipher_request
*req
,
413 struct rsa_edesc
*edesc
)
415 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
416 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
417 struct caam_rsa_key
*key
= &ctx
->key
;
418 struct device
*dev
= ctx
->dev
;
419 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
420 int sec4_sg_index
= 0;
421 size_t p_sz
= key
->p_sz
;
422 size_t q_sz
= key
->p_sz
;
424 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
425 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
426 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
430 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
431 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
432 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
436 pdb
->dp_dma
= dma_map_single(dev
, key
->dp
, p_sz
, DMA_TO_DEVICE
);
437 if (dma_mapping_error(dev
, pdb
->dp_dma
)) {
438 dev_err(dev
, "Unable to map RSA exponent dp memory\n");
442 pdb
->dq_dma
= dma_map_single(dev
, key
->dq
, q_sz
, DMA_TO_DEVICE
);
443 if (dma_mapping_error(dev
, pdb
->dq_dma
)) {
444 dev_err(dev
, "Unable to map RSA exponent dq memory\n");
448 pdb
->c_dma
= dma_map_single(dev
, key
->qinv
, p_sz
, DMA_TO_DEVICE
);
449 if (dma_mapping_error(dev
, pdb
->c_dma
)) {
450 dev_err(dev
, "Unable to map RSA CRT coefficient qinv memory\n");
454 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_TO_DEVICE
);
455 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
456 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
460 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_TO_DEVICE
);
461 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
462 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
466 if (edesc
->src_nents
> 1) {
467 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
468 pdb
->g_dma
= edesc
->sec4_sg_dma
;
469 sec4_sg_index
+= edesc
->src_nents
;
471 pdb
->g_dma
= sg_dma_address(req
->src
);
474 if (edesc
->dst_nents
> 1) {
475 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
476 pdb
->f_dma
= edesc
->sec4_sg_dma
+
477 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
479 pdb
->f_dma
= sg_dma_address(req
->dst
);
482 pdb
->sgf
|= key
->n_sz
;
483 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
488 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_TO_DEVICE
);
490 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
492 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
494 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
496 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
498 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
503 static int caam_rsa_enc(struct akcipher_request
*req
)
505 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
506 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
507 struct caam_rsa_key
*key
= &ctx
->key
;
508 struct device
*jrdev
= ctx
->dev
;
509 struct rsa_edesc
*edesc
;
512 if (unlikely(!key
->n
|| !key
->e
))
515 if (req
->dst_len
< key
->n_sz
) {
516 req
->dst_len
= key
->n_sz
;
517 dev_err(jrdev
, "Output buffer length less than parameter n\n");
521 /* Allocate extended descriptor */
522 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PUB_LEN
);
524 return PTR_ERR(edesc
);
526 /* Set RSA Encrypt Protocol Data Block */
527 ret
= set_rsa_pub_pdb(req
, edesc
);
531 /* Initialize Job Descriptor */
532 init_rsa_pub_desc(edesc
->hw_desc
, &edesc
->pdb
.pub
);
534 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_pub_done
, req
);
538 rsa_pub_unmap(jrdev
, edesc
, req
);
541 rsa_io_unmap(jrdev
, edesc
, req
);
546 static int caam_rsa_dec_priv_f1(struct akcipher_request
*req
)
548 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
549 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
550 struct device
*jrdev
= ctx
->dev
;
551 struct rsa_edesc
*edesc
;
554 /* Allocate extended descriptor */
555 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F1_LEN
);
557 return PTR_ERR(edesc
);
559 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
560 ret
= set_rsa_priv_f1_pdb(req
, edesc
);
564 /* Initialize Job Descriptor */
565 init_rsa_priv_f1_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f1
);
567 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f1_done
, req
);
571 rsa_priv_f1_unmap(jrdev
, edesc
, req
);
574 rsa_io_unmap(jrdev
, edesc
, req
);
579 static int caam_rsa_dec_priv_f2(struct akcipher_request
*req
)
581 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
582 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
583 struct device
*jrdev
= ctx
->dev
;
584 struct rsa_edesc
*edesc
;
587 /* Allocate extended descriptor */
588 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F2_LEN
);
590 return PTR_ERR(edesc
);
592 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
593 ret
= set_rsa_priv_f2_pdb(req
, edesc
);
597 /* Initialize Job Descriptor */
598 init_rsa_priv_f2_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f2
);
600 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f2_done
, req
);
604 rsa_priv_f2_unmap(jrdev
, edesc
, req
);
607 rsa_io_unmap(jrdev
, edesc
, req
);
612 static int caam_rsa_dec_priv_f3(struct akcipher_request
*req
)
614 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
615 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
616 struct device
*jrdev
= ctx
->dev
;
617 struct rsa_edesc
*edesc
;
620 /* Allocate extended descriptor */
621 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F3_LEN
);
623 return PTR_ERR(edesc
);
625 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
626 ret
= set_rsa_priv_f3_pdb(req
, edesc
);
630 /* Initialize Job Descriptor */
631 init_rsa_priv_f3_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f3
);
633 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f3_done
, req
);
637 rsa_priv_f3_unmap(jrdev
, edesc
, req
);
640 rsa_io_unmap(jrdev
, edesc
, req
);
645 static int caam_rsa_dec(struct akcipher_request
*req
)
647 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
648 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
649 struct caam_rsa_key
*key
= &ctx
->key
;
652 if (unlikely(!key
->n
|| !key
->d
))
655 if (req
->dst_len
< key
->n_sz
) {
656 req
->dst_len
= key
->n_sz
;
657 dev_err(ctx
->dev
, "Output buffer length less than parameter n\n");
661 if (key
->priv_form
== FORM3
)
662 ret
= caam_rsa_dec_priv_f3(req
);
663 else if (key
->priv_form
== FORM2
)
664 ret
= caam_rsa_dec_priv_f2(req
);
666 ret
= caam_rsa_dec_priv_f1(req
);
671 static void caam_rsa_free_key(struct caam_rsa_key
*key
)
683 memset(key
, 0, sizeof(*key
));
686 static void caam_rsa_drop_leading_zeros(const u8
**ptr
, size_t *nbytes
)
688 while (!**ptr
&& *nbytes
) {
695 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
696 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
697 * BER-encoding requires that the minimum number of bytes be used to encode the
698 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
701 * @ptr : pointer to {dP, dQ, qInv} CRT member
702 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
703 * @dstlen: length in bytes of corresponding p or q prime factor
705 static u8
*caam_read_rsa_crt(const u8
*ptr
, size_t nbytes
, size_t dstlen
)
709 caam_rsa_drop_leading_zeros(&ptr
, &nbytes
);
713 dst
= kzalloc(dstlen
, GFP_DMA
| GFP_KERNEL
);
717 memcpy(dst
+ (dstlen
- nbytes
), ptr
, nbytes
);
723 * caam_read_raw_data - Read a raw byte stream as a positive integer.
724 * The function skips buffer's leading zeros, copies the remained data
725 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
726 * the address of the new buffer.
728 * @buf : The data to read
729 * @nbytes: The amount of data to read
731 static inline u8
*caam_read_raw_data(const u8
*buf
, size_t *nbytes
)
735 caam_rsa_drop_leading_zeros(&buf
, nbytes
);
739 val
= kzalloc(*nbytes
, GFP_DMA
| GFP_KERNEL
);
743 memcpy(val
, buf
, *nbytes
);
748 static int caam_rsa_check_key_length(unsigned int len
)
755 static int caam_rsa_set_pub_key(struct crypto_akcipher
*tfm
, const void *key
,
758 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
759 struct rsa_key raw_key
= {NULL
};
760 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
763 /* Free the old RSA key if any */
764 caam_rsa_free_key(rsa_key
);
766 ret
= rsa_parse_pub_key(&raw_key
, key
, keylen
);
770 /* Copy key in DMA zone */
771 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
776 * Skip leading zeros and copy the positive integer to a buffer
777 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
778 * expects a positive integer for the RSA modulus and uses its length as
779 * decryption output length.
781 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
785 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
786 caam_rsa_free_key(rsa_key
);
790 rsa_key
->e_sz
= raw_key
.e_sz
;
791 rsa_key
->n_sz
= raw_key
.n_sz
;
793 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
797 caam_rsa_free_key(rsa_key
);
801 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx
*ctx
,
802 struct rsa_key
*raw_key
)
804 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
805 size_t p_sz
= raw_key
->p_sz
;
806 size_t q_sz
= raw_key
->q_sz
;
808 rsa_key
->p
= caam_read_raw_data(raw_key
->p
, &p_sz
);
811 rsa_key
->p_sz
= p_sz
;
813 rsa_key
->q
= caam_read_raw_data(raw_key
->q
, &q_sz
);
816 rsa_key
->q_sz
= q_sz
;
818 rsa_key
->tmp1
= kzalloc(raw_key
->p_sz
, GFP_DMA
| GFP_KERNEL
);
822 rsa_key
->tmp2
= kzalloc(raw_key
->q_sz
, GFP_DMA
| GFP_KERNEL
);
826 rsa_key
->priv_form
= FORM2
;
828 rsa_key
->dp
= caam_read_rsa_crt(raw_key
->dp
, raw_key
->dp_sz
, p_sz
);
832 rsa_key
->dq
= caam_read_rsa_crt(raw_key
->dq
, raw_key
->dq_sz
, q_sz
);
836 rsa_key
->qinv
= caam_read_rsa_crt(raw_key
->qinv
, raw_key
->qinv_sz
,
841 rsa_key
->priv_form
= FORM3
;
850 kzfree(rsa_key
->tmp2
);
852 kzfree(rsa_key
->tmp1
);
859 static int caam_rsa_set_priv_key(struct crypto_akcipher
*tfm
, const void *key
,
862 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
863 struct rsa_key raw_key
= {NULL
};
864 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
867 /* Free the old RSA key if any */
868 caam_rsa_free_key(rsa_key
);
870 ret
= rsa_parse_priv_key(&raw_key
, key
, keylen
);
874 /* Copy key in DMA zone */
875 rsa_key
->d
= kzalloc(raw_key
.d_sz
, GFP_DMA
| GFP_KERNEL
);
879 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
884 * Skip leading zeros and copy the positive integer to a buffer
885 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
886 * expects a positive integer for the RSA modulus and uses its length as
887 * decryption output length.
889 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
893 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
894 caam_rsa_free_key(rsa_key
);
898 rsa_key
->d_sz
= raw_key
.d_sz
;
899 rsa_key
->e_sz
= raw_key
.e_sz
;
900 rsa_key
->n_sz
= raw_key
.n_sz
;
902 memcpy(rsa_key
->d
, raw_key
.d
, raw_key
.d_sz
);
903 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
905 caam_rsa_set_priv_key_form(ctx
, &raw_key
);
910 caam_rsa_free_key(rsa_key
);
914 static unsigned int caam_rsa_max_size(struct crypto_akcipher
*tfm
)
916 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
918 return ctx
->key
.n_sz
;
921 /* Per session pkc's driver context creation function */
922 static int caam_rsa_init_tfm(struct crypto_akcipher
*tfm
)
924 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
926 ctx
->dev
= caam_jr_alloc();
928 if (IS_ERR(ctx
->dev
)) {
929 pr_err("Job Ring Device allocation for transform failed\n");
930 return PTR_ERR(ctx
->dev
);
936 /* Per session pkc's driver context cleanup function */
937 static void caam_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
939 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
940 struct caam_rsa_key
*key
= &ctx
->key
;
942 caam_rsa_free_key(key
);
943 caam_jr_free(ctx
->dev
);
946 static struct akcipher_alg caam_rsa
= {
947 .encrypt
= caam_rsa_enc
,
948 .decrypt
= caam_rsa_dec
,
949 .sign
= caam_rsa_dec
,
950 .verify
= caam_rsa_enc
,
951 .set_pub_key
= caam_rsa_set_pub_key
,
952 .set_priv_key
= caam_rsa_set_priv_key
,
953 .max_size
= caam_rsa_max_size
,
954 .init
= caam_rsa_init_tfm
,
955 .exit
= caam_rsa_exit_tfm
,
958 .cra_driver_name
= "rsa-caam",
959 .cra_priority
= 3000,
960 .cra_module
= THIS_MODULE
,
961 .cra_ctxsize
= sizeof(struct caam_rsa_ctx
),
965 /* Public Key Cryptography module initialization handler */
966 static int __init
caam_pkc_init(void)
968 struct device_node
*dev_node
;
969 struct platform_device
*pdev
;
970 struct device
*ctrldev
;
971 struct caam_drv_private
*priv
;
972 u32 cha_inst
, pk_inst
;
975 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
977 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
982 pdev
= of_find_device_by_node(dev_node
);
984 of_node_put(dev_node
);
988 ctrldev
= &pdev
->dev
;
989 priv
= dev_get_drvdata(ctrldev
);
990 of_node_put(dev_node
);
993 * If priv is NULL, it's probably because the caam driver wasn't
994 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
999 /* Determine public key hardware accelerator presence. */
1000 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1001 pk_inst
= (cha_inst
& CHA_ID_LS_PK_MASK
) >> CHA_ID_LS_PK_SHIFT
;
1003 /* Do not register algorithms if PKHA is not present. */
1007 err
= crypto_register_akcipher(&caam_rsa
);
1009 dev_warn(ctrldev
, "%s alg registration failed\n",
1010 caam_rsa
.base
.cra_driver_name
);
1012 dev_info(ctrldev
, "caam pkc algorithms registered in /proc/crypto\n");
1017 static void __exit
caam_pkc_exit(void)
1019 crypto_unregister_akcipher(&caam_rsa
);
1022 module_init(caam_pkc_init
);
1023 module_exit(caam_pkc_exit
);
1025 MODULE_LICENSE("Dual BSD/GPL");
1026 MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1027 MODULE_AUTHOR("Freescale Semiconductor");