2 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 * Copyright 2016 Freescale Semiconductor, Inc.
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
14 #include "desc_constr.h"
15 #include "sg_sw_sec4.h"
18 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
21 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f2_pdb))
23 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f3_pdb))
26 static void rsa_io_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
27 struct akcipher_request
*req
)
29 dma_unmap_sg(dev
, req
->dst
, edesc
->dst_nents
, DMA_FROM_DEVICE
);
30 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
32 if (edesc
->sec4_sg_bytes
)
33 dma_unmap_single(dev
, edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
,
37 static void rsa_pub_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
38 struct akcipher_request
*req
)
40 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
41 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
42 struct caam_rsa_key
*key
= &ctx
->key
;
43 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
45 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
46 dma_unmap_single(dev
, pdb
->e_dma
, key
->e_sz
, DMA_TO_DEVICE
);
49 static void rsa_priv_f1_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
50 struct akcipher_request
*req
)
52 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
53 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
54 struct caam_rsa_key
*key
= &ctx
->key
;
55 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
57 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
58 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
61 static void rsa_priv_f2_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
62 struct akcipher_request
*req
)
64 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
65 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
66 struct caam_rsa_key
*key
= &ctx
->key
;
67 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
68 size_t p_sz
= key
->p_sz
;
69 size_t q_sz
= key
->q_sz
;
71 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
72 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
73 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
74 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
75 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
78 static void rsa_priv_f3_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
79 struct akcipher_request
*req
)
81 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
82 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
83 struct caam_rsa_key
*key
= &ctx
->key
;
84 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
85 size_t p_sz
= key
->p_sz
;
86 size_t q_sz
= key
->q_sz
;
88 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
89 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
90 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
91 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
92 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
93 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
94 dma_unmap_single(dev
, pdb
->tmp2_dma
, q_sz
, DMA_BIDIRECTIONAL
);
97 /* RSA Job Completion handler */
98 static void rsa_pub_done(struct device
*dev
, u32
*desc
, u32 err
, void *context
)
100 struct akcipher_request
*req
= context
;
101 struct rsa_edesc
*edesc
;
104 caam_jr_strstatus(dev
, err
);
106 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
108 rsa_pub_unmap(dev
, edesc
, req
);
109 rsa_io_unmap(dev
, edesc
, req
);
112 akcipher_request_complete(req
, err
);
115 static void rsa_priv_f1_done(struct device
*dev
, u32
*desc
, u32 err
,
118 struct akcipher_request
*req
= context
;
119 struct rsa_edesc
*edesc
;
122 caam_jr_strstatus(dev
, err
);
124 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
126 rsa_priv_f1_unmap(dev
, edesc
, req
);
127 rsa_io_unmap(dev
, edesc
, req
);
130 akcipher_request_complete(req
, err
);
133 static void rsa_priv_f2_done(struct device
*dev
, u32
*desc
, u32 err
,
136 struct akcipher_request
*req
= context
;
137 struct rsa_edesc
*edesc
;
140 caam_jr_strstatus(dev
, err
);
142 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
144 rsa_priv_f2_unmap(dev
, edesc
, req
);
145 rsa_io_unmap(dev
, edesc
, req
);
148 akcipher_request_complete(req
, err
);
151 static void rsa_priv_f3_done(struct device
*dev
, u32
*desc
, u32 err
,
154 struct akcipher_request
*req
= context
;
155 struct rsa_edesc
*edesc
;
158 caam_jr_strstatus(dev
, err
);
160 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
162 rsa_priv_f3_unmap(dev
, edesc
, req
);
163 rsa_io_unmap(dev
, edesc
, req
);
166 akcipher_request_complete(req
, err
);
169 static int caam_rsa_count_leading_zeros(struct scatterlist
*sgl
,
173 struct sg_mapping_iter miter
;
176 unsigned int tbytes
= nbytes
;
179 ents
= sg_nents_for_len(sgl
, nbytes
);
183 sg_miter_start(&miter
, sgl
, ents
, SG_MITER_FROM_SG
| flags
);
188 while (len
&& !*buff
) {
197 sg_miter_next(&miter
);
205 miter
.consumed
= lzeros
;
206 sg_miter_stop(&miter
);
209 return tbytes
- nbytes
;
212 static struct rsa_edesc
*rsa_edesc_alloc(struct akcipher_request
*req
,
215 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
216 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
217 struct device
*dev
= ctx
->dev
;
218 struct caam_rsa_req_ctx
*req_ctx
= akcipher_request_ctx(req
);
219 struct rsa_edesc
*edesc
;
220 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
221 GFP_KERNEL
: GFP_ATOMIC
;
222 int sg_flags
= (flags
== GFP_ATOMIC
) ? SG_MITER_ATOMIC
: 0;
224 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
225 int src_nents
, dst_nents
;
228 lzeros
= caam_rsa_count_leading_zeros(req
->src
, req
->src_len
, sg_flags
);
230 return ERR_PTR(lzeros
);
232 req
->src_len
-= lzeros
;
233 req
->src
= scatterwalk_ffwd(req_ctx
->src
, req
->src
, lzeros
);
235 src_nents
= sg_nents_for_len(req
->src
, req
->src_len
);
236 dst_nents
= sg_nents_for_len(req
->dst
, req
->dst_len
);
239 sec4_sg_len
= src_nents
;
241 sec4_sg_len
+= dst_nents
;
243 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
245 /* allocate space for base edesc, hw desc commands and link tables */
246 edesc
= kzalloc(sizeof(*edesc
) + desclen
+ sec4_sg_bytes
,
249 return ERR_PTR(-ENOMEM
);
251 sgc
= dma_map_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
252 if (unlikely(!sgc
)) {
253 dev_err(dev
, "unable to map source\n");
257 sgc
= dma_map_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
258 if (unlikely(!sgc
)) {
259 dev_err(dev
, "unable to map destination\n");
263 edesc
->sec4_sg
= (void *)edesc
+ sizeof(*edesc
) + desclen
;
267 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
268 sec4_sg_index
+= src_nents
;
271 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
272 edesc
->sec4_sg
+ sec4_sg_index
, 0);
274 /* Save nents for later use in Job Descriptor */
275 edesc
->src_nents
= src_nents
;
276 edesc
->dst_nents
= dst_nents
;
281 edesc
->sec4_sg_dma
= dma_map_single(dev
, edesc
->sec4_sg
,
282 sec4_sg_bytes
, DMA_TO_DEVICE
);
283 if (dma_mapping_error(dev
, edesc
->sec4_sg_dma
)) {
284 dev_err(dev
, "unable to map S/G table\n");
288 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
293 dma_unmap_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
295 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
298 return ERR_PTR(-ENOMEM
);
301 static int set_rsa_pub_pdb(struct akcipher_request
*req
,
302 struct rsa_edesc
*edesc
)
304 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
305 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
306 struct caam_rsa_key
*key
= &ctx
->key
;
307 struct device
*dev
= ctx
->dev
;
308 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
309 int sec4_sg_index
= 0;
311 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
312 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
313 dev_err(dev
, "Unable to map RSA modulus memory\n");
317 pdb
->e_dma
= dma_map_single(dev
, key
->e
, key
->e_sz
, DMA_TO_DEVICE
);
318 if (dma_mapping_error(dev
, pdb
->e_dma
)) {
319 dev_err(dev
, "Unable to map RSA public exponent memory\n");
320 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
324 if (edesc
->src_nents
> 1) {
325 pdb
->sgf
|= RSA_PDB_SGF_F
;
326 pdb
->f_dma
= edesc
->sec4_sg_dma
;
327 sec4_sg_index
+= edesc
->src_nents
;
329 pdb
->f_dma
= sg_dma_address(req
->src
);
332 if (edesc
->dst_nents
> 1) {
333 pdb
->sgf
|= RSA_PDB_SGF_G
;
334 pdb
->g_dma
= edesc
->sec4_sg_dma
+
335 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
337 pdb
->g_dma
= sg_dma_address(req
->dst
);
340 pdb
->sgf
|= (key
->e_sz
<< RSA_PDB_E_SHIFT
) | key
->n_sz
;
341 pdb
->f_len
= req
->src_len
;
346 static int set_rsa_priv_f1_pdb(struct akcipher_request
*req
,
347 struct rsa_edesc
*edesc
)
349 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
350 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
351 struct caam_rsa_key
*key
= &ctx
->key
;
352 struct device
*dev
= ctx
->dev
;
353 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
354 int sec4_sg_index
= 0;
356 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
357 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
358 dev_err(dev
, "Unable to map modulus memory\n");
362 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
363 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
364 dev_err(dev
, "Unable to map RSA private exponent memory\n");
365 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
369 if (edesc
->src_nents
> 1) {
370 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
371 pdb
->g_dma
= edesc
->sec4_sg_dma
;
372 sec4_sg_index
+= edesc
->src_nents
;
374 pdb
->g_dma
= sg_dma_address(req
->src
);
377 if (edesc
->dst_nents
> 1) {
378 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
379 pdb
->f_dma
= edesc
->sec4_sg_dma
+
380 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
382 pdb
->f_dma
= sg_dma_address(req
->dst
);
385 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
390 static int set_rsa_priv_f2_pdb(struct akcipher_request
*req
,
391 struct rsa_edesc
*edesc
)
393 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
394 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
395 struct caam_rsa_key
*key
= &ctx
->key
;
396 struct device
*dev
= ctx
->dev
;
397 struct rsa_priv_f2_pdb
*pdb
= &edesc
->pdb
.priv_f2
;
398 int sec4_sg_index
= 0;
399 size_t p_sz
= key
->p_sz
;
400 size_t q_sz
= key
->q_sz
;
402 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
403 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
404 dev_err(dev
, "Unable to map RSA private exponent memory\n");
408 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
409 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
410 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
414 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
415 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
416 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
420 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
421 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
422 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
426 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
427 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
428 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
432 if (edesc
->src_nents
> 1) {
433 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
434 pdb
->g_dma
= edesc
->sec4_sg_dma
;
435 sec4_sg_index
+= edesc
->src_nents
;
437 pdb
->g_dma
= sg_dma_address(req
->src
);
440 if (edesc
->dst_nents
> 1) {
441 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
442 pdb
->f_dma
= edesc
->sec4_sg_dma
+
443 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
445 pdb
->f_dma
= sg_dma_address(req
->dst
);
448 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
449 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
454 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
456 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
458 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
460 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
465 static int set_rsa_priv_f3_pdb(struct akcipher_request
*req
,
466 struct rsa_edesc
*edesc
)
468 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
469 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
470 struct caam_rsa_key
*key
= &ctx
->key
;
471 struct device
*dev
= ctx
->dev
;
472 struct rsa_priv_f3_pdb
*pdb
= &edesc
->pdb
.priv_f3
;
473 int sec4_sg_index
= 0;
474 size_t p_sz
= key
->p_sz
;
475 size_t q_sz
= key
->q_sz
;
477 pdb
->p_dma
= dma_map_single(dev
, key
->p
, p_sz
, DMA_TO_DEVICE
);
478 if (dma_mapping_error(dev
, pdb
->p_dma
)) {
479 dev_err(dev
, "Unable to map RSA prime factor p memory\n");
483 pdb
->q_dma
= dma_map_single(dev
, key
->q
, q_sz
, DMA_TO_DEVICE
);
484 if (dma_mapping_error(dev
, pdb
->q_dma
)) {
485 dev_err(dev
, "Unable to map RSA prime factor q memory\n");
489 pdb
->dp_dma
= dma_map_single(dev
, key
->dp
, p_sz
, DMA_TO_DEVICE
);
490 if (dma_mapping_error(dev
, pdb
->dp_dma
)) {
491 dev_err(dev
, "Unable to map RSA exponent dp memory\n");
495 pdb
->dq_dma
= dma_map_single(dev
, key
->dq
, q_sz
, DMA_TO_DEVICE
);
496 if (dma_mapping_error(dev
, pdb
->dq_dma
)) {
497 dev_err(dev
, "Unable to map RSA exponent dq memory\n");
501 pdb
->c_dma
= dma_map_single(dev
, key
->qinv
, p_sz
, DMA_TO_DEVICE
);
502 if (dma_mapping_error(dev
, pdb
->c_dma
)) {
503 dev_err(dev
, "Unable to map RSA CRT coefficient qinv memory\n");
507 pdb
->tmp1_dma
= dma_map_single(dev
, key
->tmp1
, p_sz
, DMA_BIDIRECTIONAL
);
508 if (dma_mapping_error(dev
, pdb
->tmp1_dma
)) {
509 dev_err(dev
, "Unable to map RSA tmp1 memory\n");
513 pdb
->tmp2_dma
= dma_map_single(dev
, key
->tmp2
, q_sz
, DMA_BIDIRECTIONAL
);
514 if (dma_mapping_error(dev
, pdb
->tmp2_dma
)) {
515 dev_err(dev
, "Unable to map RSA tmp2 memory\n");
519 if (edesc
->src_nents
> 1) {
520 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
521 pdb
->g_dma
= edesc
->sec4_sg_dma
;
522 sec4_sg_index
+= edesc
->src_nents
;
524 pdb
->g_dma
= sg_dma_address(req
->src
);
527 if (edesc
->dst_nents
> 1) {
528 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
529 pdb
->f_dma
= edesc
->sec4_sg_dma
+
530 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
532 pdb
->f_dma
= sg_dma_address(req
->dst
);
535 pdb
->sgf
|= key
->n_sz
;
536 pdb
->p_q_len
= (q_sz
<< RSA_PDB_Q_SHIFT
) | p_sz
;
541 dma_unmap_single(dev
, pdb
->tmp1_dma
, p_sz
, DMA_BIDIRECTIONAL
);
543 dma_unmap_single(dev
, pdb
->c_dma
, p_sz
, DMA_TO_DEVICE
);
545 dma_unmap_single(dev
, pdb
->dq_dma
, q_sz
, DMA_TO_DEVICE
);
547 dma_unmap_single(dev
, pdb
->dp_dma
, p_sz
, DMA_TO_DEVICE
);
549 dma_unmap_single(dev
, pdb
->q_dma
, q_sz
, DMA_TO_DEVICE
);
551 dma_unmap_single(dev
, pdb
->p_dma
, p_sz
, DMA_TO_DEVICE
);
556 static int caam_rsa_enc(struct akcipher_request
*req
)
558 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
559 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
560 struct caam_rsa_key
*key
= &ctx
->key
;
561 struct device
*jrdev
= ctx
->dev
;
562 struct rsa_edesc
*edesc
;
565 if (unlikely(!key
->n
|| !key
->e
))
568 if (req
->dst_len
< key
->n_sz
) {
569 req
->dst_len
= key
->n_sz
;
570 dev_err(jrdev
, "Output buffer length less than parameter n\n");
574 /* Allocate extended descriptor */
575 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PUB_LEN
);
577 return PTR_ERR(edesc
);
579 /* Set RSA Encrypt Protocol Data Block */
580 ret
= set_rsa_pub_pdb(req
, edesc
);
584 /* Initialize Job Descriptor */
585 init_rsa_pub_desc(edesc
->hw_desc
, &edesc
->pdb
.pub
);
587 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_pub_done
, req
);
591 rsa_pub_unmap(jrdev
, edesc
, req
);
594 rsa_io_unmap(jrdev
, edesc
, req
);
599 static int caam_rsa_dec_priv_f1(struct akcipher_request
*req
)
601 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
602 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
603 struct device
*jrdev
= ctx
->dev
;
604 struct rsa_edesc
*edesc
;
607 /* Allocate extended descriptor */
608 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F1_LEN
);
610 return PTR_ERR(edesc
);
612 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
613 ret
= set_rsa_priv_f1_pdb(req
, edesc
);
617 /* Initialize Job Descriptor */
618 init_rsa_priv_f1_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f1
);
620 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f1_done
, req
);
624 rsa_priv_f1_unmap(jrdev
, edesc
, req
);
627 rsa_io_unmap(jrdev
, edesc
, req
);
632 static int caam_rsa_dec_priv_f2(struct akcipher_request
*req
)
634 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
635 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
636 struct device
*jrdev
= ctx
->dev
;
637 struct rsa_edesc
*edesc
;
640 /* Allocate extended descriptor */
641 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F2_LEN
);
643 return PTR_ERR(edesc
);
645 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
646 ret
= set_rsa_priv_f2_pdb(req
, edesc
);
650 /* Initialize Job Descriptor */
651 init_rsa_priv_f2_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f2
);
653 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f2_done
, req
);
657 rsa_priv_f2_unmap(jrdev
, edesc
, req
);
660 rsa_io_unmap(jrdev
, edesc
, req
);
665 static int caam_rsa_dec_priv_f3(struct akcipher_request
*req
)
667 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
668 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
669 struct device
*jrdev
= ctx
->dev
;
670 struct rsa_edesc
*edesc
;
673 /* Allocate extended descriptor */
674 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F3_LEN
);
676 return PTR_ERR(edesc
);
678 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
679 ret
= set_rsa_priv_f3_pdb(req
, edesc
);
683 /* Initialize Job Descriptor */
684 init_rsa_priv_f3_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f3
);
686 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f3_done
, req
);
690 rsa_priv_f3_unmap(jrdev
, edesc
, req
);
693 rsa_io_unmap(jrdev
, edesc
, req
);
698 static int caam_rsa_dec(struct akcipher_request
*req
)
700 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
701 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
702 struct caam_rsa_key
*key
= &ctx
->key
;
705 if (unlikely(!key
->n
|| !key
->d
))
708 if (req
->dst_len
< key
->n_sz
) {
709 req
->dst_len
= key
->n_sz
;
710 dev_err(ctx
->dev
, "Output buffer length less than parameter n\n");
714 if (key
->priv_form
== FORM3
)
715 ret
= caam_rsa_dec_priv_f3(req
);
716 else if (key
->priv_form
== FORM2
)
717 ret
= caam_rsa_dec_priv_f2(req
);
719 ret
= caam_rsa_dec_priv_f1(req
);
724 static void caam_rsa_free_key(struct caam_rsa_key
*key
)
736 memset(key
, 0, sizeof(*key
));
739 static void caam_rsa_drop_leading_zeros(const u8
**ptr
, size_t *nbytes
)
741 while (!**ptr
&& *nbytes
) {
748 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
749 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
750 * BER-encoding requires that the minimum number of bytes be used to encode the
751 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
754 * @ptr : pointer to {dP, dQ, qInv} CRT member
755 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
756 * @dstlen: length in bytes of corresponding p or q prime factor
758 static u8
*caam_read_rsa_crt(const u8
*ptr
, size_t nbytes
, size_t dstlen
)
762 caam_rsa_drop_leading_zeros(&ptr
, &nbytes
);
766 dst
= kzalloc(dstlen
, GFP_DMA
| GFP_KERNEL
);
770 memcpy(dst
+ (dstlen
- nbytes
), ptr
, nbytes
);
776 * caam_read_raw_data - Read a raw byte stream as a positive integer.
777 * The function skips buffer's leading zeros, copies the remained data
778 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
779 * the address of the new buffer.
781 * @buf : The data to read
782 * @nbytes: The amount of data to read
784 static inline u8
*caam_read_raw_data(const u8
*buf
, size_t *nbytes
)
787 caam_rsa_drop_leading_zeros(&buf
, nbytes
);
791 return kmemdup(buf
, *nbytes
, GFP_DMA
| GFP_KERNEL
);
794 static int caam_rsa_check_key_length(unsigned int len
)
801 static int caam_rsa_set_pub_key(struct crypto_akcipher
*tfm
, const void *key
,
804 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
805 struct rsa_key raw_key
= {NULL
};
806 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
809 /* Free the old RSA key if any */
810 caam_rsa_free_key(rsa_key
);
812 ret
= rsa_parse_pub_key(&raw_key
, key
, keylen
);
816 /* Copy key in DMA zone */
817 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
822 * Skip leading zeros and copy the positive integer to a buffer
823 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
824 * expects a positive integer for the RSA modulus and uses its length as
825 * decryption output length.
827 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
831 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
832 caam_rsa_free_key(rsa_key
);
836 rsa_key
->e_sz
= raw_key
.e_sz
;
837 rsa_key
->n_sz
= raw_key
.n_sz
;
839 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
843 caam_rsa_free_key(rsa_key
);
847 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx
*ctx
,
848 struct rsa_key
*raw_key
)
850 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
851 size_t p_sz
= raw_key
->p_sz
;
852 size_t q_sz
= raw_key
->q_sz
;
854 rsa_key
->p
= caam_read_raw_data(raw_key
->p
, &p_sz
);
857 rsa_key
->p_sz
= p_sz
;
859 rsa_key
->q
= caam_read_raw_data(raw_key
->q
, &q_sz
);
862 rsa_key
->q_sz
= q_sz
;
864 rsa_key
->tmp1
= kzalloc(raw_key
->p_sz
, GFP_DMA
| GFP_KERNEL
);
868 rsa_key
->tmp2
= kzalloc(raw_key
->q_sz
, GFP_DMA
| GFP_KERNEL
);
872 rsa_key
->priv_form
= FORM2
;
874 rsa_key
->dp
= caam_read_rsa_crt(raw_key
->dp
, raw_key
->dp_sz
, p_sz
);
878 rsa_key
->dq
= caam_read_rsa_crt(raw_key
->dq
, raw_key
->dq_sz
, q_sz
);
882 rsa_key
->qinv
= caam_read_rsa_crt(raw_key
->qinv
, raw_key
->qinv_sz
,
887 rsa_key
->priv_form
= FORM3
;
896 kzfree(rsa_key
->tmp2
);
898 kzfree(rsa_key
->tmp1
);
905 static int caam_rsa_set_priv_key(struct crypto_akcipher
*tfm
, const void *key
,
908 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
909 struct rsa_key raw_key
= {NULL
};
910 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
913 /* Free the old RSA key if any */
914 caam_rsa_free_key(rsa_key
);
916 ret
= rsa_parse_priv_key(&raw_key
, key
, keylen
);
920 /* Copy key in DMA zone */
921 rsa_key
->d
= kzalloc(raw_key
.d_sz
, GFP_DMA
| GFP_KERNEL
);
925 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
930 * Skip leading zeros and copy the positive integer to a buffer
931 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
932 * expects a positive integer for the RSA modulus and uses its length as
933 * decryption output length.
935 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
939 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
940 caam_rsa_free_key(rsa_key
);
944 rsa_key
->d_sz
= raw_key
.d_sz
;
945 rsa_key
->e_sz
= raw_key
.e_sz
;
946 rsa_key
->n_sz
= raw_key
.n_sz
;
948 memcpy(rsa_key
->d
, raw_key
.d
, raw_key
.d_sz
);
949 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
951 caam_rsa_set_priv_key_form(ctx
, &raw_key
);
956 caam_rsa_free_key(rsa_key
);
960 static unsigned int caam_rsa_max_size(struct crypto_akcipher
*tfm
)
962 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
964 return ctx
->key
.n_sz
;
967 /* Per session pkc's driver context creation function */
968 static int caam_rsa_init_tfm(struct crypto_akcipher
*tfm
)
970 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
972 ctx
->dev
= caam_jr_alloc();
974 if (IS_ERR(ctx
->dev
)) {
975 pr_err("Job Ring Device allocation for transform failed\n");
976 return PTR_ERR(ctx
->dev
);
982 /* Per session pkc's driver context cleanup function */
983 static void caam_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
985 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
986 struct caam_rsa_key
*key
= &ctx
->key
;
988 caam_rsa_free_key(key
);
989 caam_jr_free(ctx
->dev
);
992 static struct akcipher_alg caam_rsa
= {
993 .encrypt
= caam_rsa_enc
,
994 .decrypt
= caam_rsa_dec
,
995 .sign
= caam_rsa_dec
,
996 .verify
= caam_rsa_enc
,
997 .set_pub_key
= caam_rsa_set_pub_key
,
998 .set_priv_key
= caam_rsa_set_priv_key
,
999 .max_size
= caam_rsa_max_size
,
1000 .init
= caam_rsa_init_tfm
,
1001 .exit
= caam_rsa_exit_tfm
,
1002 .reqsize
= sizeof(struct caam_rsa_req_ctx
),
1005 .cra_driver_name
= "rsa-caam",
1006 .cra_priority
= 3000,
1007 .cra_module
= THIS_MODULE
,
1008 .cra_ctxsize
= sizeof(struct caam_rsa_ctx
),
1012 /* Public Key Cryptography module initialization handler */
1013 static int __init
caam_pkc_init(void)
1015 struct device_node
*dev_node
;
1016 struct platform_device
*pdev
;
1017 struct device
*ctrldev
;
1018 struct caam_drv_private
*priv
;
1019 u32 cha_inst
, pk_inst
;
1022 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1024 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1029 pdev
= of_find_device_by_node(dev_node
);
1031 of_node_put(dev_node
);
1035 ctrldev
= &pdev
->dev
;
1036 priv
= dev_get_drvdata(ctrldev
);
1037 of_node_put(dev_node
);
1040 * If priv is NULL, it's probably because the caam driver wasn't
1041 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1046 /* Determine public key hardware accelerator presence. */
1047 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1048 pk_inst
= (cha_inst
& CHA_ID_LS_PK_MASK
) >> CHA_ID_LS_PK_SHIFT
;
1050 /* Do not register algorithms if PKHA is not present. */
1054 err
= crypto_register_akcipher(&caam_rsa
);
1056 dev_warn(ctrldev
, "%s alg registration failed\n",
1057 caam_rsa
.base
.cra_driver_name
);
1059 dev_info(ctrldev
, "caam pkc algorithms registered in /proc/crypto\n");
1064 static void __exit
caam_pkc_exit(void)
1066 crypto_unregister_akcipher(&caam_rsa
);
1069 module_init(caam_pkc_init
);
1070 module_exit(caam_pkc_exit
);
1072 MODULE_LICENSE("Dual BSD/GPL");
1073 MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1074 MODULE_AUTHOR("Freescale Semiconductor");