Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / crypto / caam / caampkc.c
blob7a897209f181383e7e0ae7ece652ffa25b709877
1 /*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 * Copyright 2016 Freescale Semiconductor, Inc.
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
8 */
9 #include "compat.h"
10 #include "regs.h"
11 #include "intern.h"
12 #include "jr.h"
13 #include "error.h"
14 #include "desc_constr.h"
15 #include "sg_sw_sec4.h"
16 #include "caampkc.h"
18 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
21 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f2_pdb))
23 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f3_pdb))
26 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
27 struct akcipher_request *req)
29 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
30 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
32 if (edesc->sec4_sg_bytes)
33 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
34 DMA_TO_DEVICE);
37 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
38 struct akcipher_request *req)
40 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
41 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
42 struct caam_rsa_key *key = &ctx->key;
43 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
45 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
46 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
49 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
50 struct akcipher_request *req)
52 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
53 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
54 struct caam_rsa_key *key = &ctx->key;
55 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
57 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
58 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
61 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
62 struct akcipher_request *req)
64 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
65 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
66 struct caam_rsa_key *key = &ctx->key;
67 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
68 size_t p_sz = key->p_sz;
69 size_t q_sz = key->p_sz;
71 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
72 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
73 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
74 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
75 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
78 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
79 struct akcipher_request *req)
81 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
82 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
83 struct caam_rsa_key *key = &ctx->key;
84 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
85 size_t p_sz = key->p_sz;
86 size_t q_sz = key->p_sz;
88 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
89 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
90 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
97 /* RSA Job Completion handler */
98 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
100 struct akcipher_request *req = context;
101 struct rsa_edesc *edesc;
103 if (err)
104 caam_jr_strstatus(dev, err);
106 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
108 rsa_pub_unmap(dev, edesc, req);
109 rsa_io_unmap(dev, edesc, req);
110 kfree(edesc);
112 akcipher_request_complete(req, err);
115 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
116 void *context)
118 struct akcipher_request *req = context;
119 struct rsa_edesc *edesc;
121 if (err)
122 caam_jr_strstatus(dev, err);
124 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
126 rsa_priv_f1_unmap(dev, edesc, req);
127 rsa_io_unmap(dev, edesc, req);
128 kfree(edesc);
130 akcipher_request_complete(req, err);
133 static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
134 void *context)
136 struct akcipher_request *req = context;
137 struct rsa_edesc *edesc;
139 if (err)
140 caam_jr_strstatus(dev, err);
142 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
144 rsa_priv_f2_unmap(dev, edesc, req);
145 rsa_io_unmap(dev, edesc, req);
146 kfree(edesc);
148 akcipher_request_complete(req, err);
151 static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
152 void *context)
154 struct akcipher_request *req = context;
155 struct rsa_edesc *edesc;
157 if (err)
158 caam_jr_strstatus(dev, err);
160 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
162 rsa_priv_f3_unmap(dev, edesc, req);
163 rsa_io_unmap(dev, edesc, req);
164 kfree(edesc);
166 akcipher_request_complete(req, err);
169 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
170 size_t desclen)
172 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
173 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
174 struct device *dev = ctx->dev;
175 struct rsa_edesc *edesc;
176 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
177 GFP_KERNEL : GFP_ATOMIC;
178 int sgc;
179 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
180 int src_nents, dst_nents;
182 src_nents = sg_nents_for_len(req->src, req->src_len);
183 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
185 if (src_nents > 1)
186 sec4_sg_len = src_nents;
187 if (dst_nents > 1)
188 sec4_sg_len += dst_nents;
190 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
192 /* allocate space for base edesc, hw desc commands and link tables */
193 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
194 GFP_DMA | flags);
195 if (!edesc)
196 return ERR_PTR(-ENOMEM);
198 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
199 if (unlikely(!sgc)) {
200 dev_err(dev, "unable to map source\n");
201 goto src_fail;
204 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
205 if (unlikely(!sgc)) {
206 dev_err(dev, "unable to map destination\n");
207 goto dst_fail;
210 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
212 sec4_sg_index = 0;
213 if (src_nents > 1) {
214 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
215 sec4_sg_index += src_nents;
217 if (dst_nents > 1)
218 sg_to_sec4_sg_last(req->dst, dst_nents,
219 edesc->sec4_sg + sec4_sg_index, 0);
221 /* Save nents for later use in Job Descriptor */
222 edesc->src_nents = src_nents;
223 edesc->dst_nents = dst_nents;
225 if (!sec4_sg_bytes)
226 return edesc;
228 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
229 sec4_sg_bytes, DMA_TO_DEVICE);
230 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
231 dev_err(dev, "unable to map S/G table\n");
232 goto sec4_sg_fail;
235 edesc->sec4_sg_bytes = sec4_sg_bytes;
237 return edesc;
239 sec4_sg_fail:
240 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
241 dst_fail:
242 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
243 src_fail:
244 kfree(edesc);
245 return ERR_PTR(-ENOMEM);
248 static int set_rsa_pub_pdb(struct akcipher_request *req,
249 struct rsa_edesc *edesc)
251 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
252 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
253 struct caam_rsa_key *key = &ctx->key;
254 struct device *dev = ctx->dev;
255 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
256 int sec4_sg_index = 0;
258 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
259 if (dma_mapping_error(dev, pdb->n_dma)) {
260 dev_err(dev, "Unable to map RSA modulus memory\n");
261 return -ENOMEM;
264 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
265 if (dma_mapping_error(dev, pdb->e_dma)) {
266 dev_err(dev, "Unable to map RSA public exponent memory\n");
267 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
268 return -ENOMEM;
271 if (edesc->src_nents > 1) {
272 pdb->sgf |= RSA_PDB_SGF_F;
273 pdb->f_dma = edesc->sec4_sg_dma;
274 sec4_sg_index += edesc->src_nents;
275 } else {
276 pdb->f_dma = sg_dma_address(req->src);
279 if (edesc->dst_nents > 1) {
280 pdb->sgf |= RSA_PDB_SGF_G;
281 pdb->g_dma = edesc->sec4_sg_dma +
282 sec4_sg_index * sizeof(struct sec4_sg_entry);
283 } else {
284 pdb->g_dma = sg_dma_address(req->dst);
287 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
288 pdb->f_len = req->src_len;
290 return 0;
293 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
294 struct rsa_edesc *edesc)
296 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
297 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
298 struct caam_rsa_key *key = &ctx->key;
299 struct device *dev = ctx->dev;
300 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
301 int sec4_sg_index = 0;
303 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
304 if (dma_mapping_error(dev, pdb->n_dma)) {
305 dev_err(dev, "Unable to map modulus memory\n");
306 return -ENOMEM;
309 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
310 if (dma_mapping_error(dev, pdb->d_dma)) {
311 dev_err(dev, "Unable to map RSA private exponent memory\n");
312 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
313 return -ENOMEM;
316 if (edesc->src_nents > 1) {
317 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
318 pdb->g_dma = edesc->sec4_sg_dma;
319 sec4_sg_index += edesc->src_nents;
320 } else {
321 pdb->g_dma = sg_dma_address(req->src);
324 if (edesc->dst_nents > 1) {
325 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
326 pdb->f_dma = edesc->sec4_sg_dma +
327 sec4_sg_index * sizeof(struct sec4_sg_entry);
328 } else {
329 pdb->f_dma = sg_dma_address(req->dst);
332 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
334 return 0;
337 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
338 struct rsa_edesc *edesc)
340 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
341 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
342 struct caam_rsa_key *key = &ctx->key;
343 struct device *dev = ctx->dev;
344 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
345 int sec4_sg_index = 0;
346 size_t p_sz = key->p_sz;
347 size_t q_sz = key->p_sz;
349 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
350 if (dma_mapping_error(dev, pdb->d_dma)) {
351 dev_err(dev, "Unable to map RSA private exponent memory\n");
352 return -ENOMEM;
355 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
356 if (dma_mapping_error(dev, pdb->p_dma)) {
357 dev_err(dev, "Unable to map RSA prime factor p memory\n");
358 goto unmap_d;
361 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
362 if (dma_mapping_error(dev, pdb->q_dma)) {
363 dev_err(dev, "Unable to map RSA prime factor q memory\n");
364 goto unmap_p;
367 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
368 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
369 dev_err(dev, "Unable to map RSA tmp1 memory\n");
370 goto unmap_q;
373 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
374 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
375 dev_err(dev, "Unable to map RSA tmp2 memory\n");
376 goto unmap_tmp1;
379 if (edesc->src_nents > 1) {
380 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
381 pdb->g_dma = edesc->sec4_sg_dma;
382 sec4_sg_index += edesc->src_nents;
383 } else {
384 pdb->g_dma = sg_dma_address(req->src);
387 if (edesc->dst_nents > 1) {
388 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
389 pdb->f_dma = edesc->sec4_sg_dma +
390 sec4_sg_index * sizeof(struct sec4_sg_entry);
391 } else {
392 pdb->f_dma = sg_dma_address(req->dst);
395 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
396 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
398 return 0;
400 unmap_tmp1:
401 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
402 unmap_q:
403 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
404 unmap_p:
405 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
406 unmap_d:
407 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
409 return -ENOMEM;
412 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
413 struct rsa_edesc *edesc)
415 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
416 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
417 struct caam_rsa_key *key = &ctx->key;
418 struct device *dev = ctx->dev;
419 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
420 int sec4_sg_index = 0;
421 size_t p_sz = key->p_sz;
422 size_t q_sz = key->p_sz;
424 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
425 if (dma_mapping_error(dev, pdb->p_dma)) {
426 dev_err(dev, "Unable to map RSA prime factor p memory\n");
427 return -ENOMEM;
430 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
431 if (dma_mapping_error(dev, pdb->q_dma)) {
432 dev_err(dev, "Unable to map RSA prime factor q memory\n");
433 goto unmap_p;
436 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
437 if (dma_mapping_error(dev, pdb->dp_dma)) {
438 dev_err(dev, "Unable to map RSA exponent dp memory\n");
439 goto unmap_q;
442 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
443 if (dma_mapping_error(dev, pdb->dq_dma)) {
444 dev_err(dev, "Unable to map RSA exponent dq memory\n");
445 goto unmap_dp;
448 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
449 if (dma_mapping_error(dev, pdb->c_dma)) {
450 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
451 goto unmap_dq;
454 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
455 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
456 dev_err(dev, "Unable to map RSA tmp1 memory\n");
457 goto unmap_qinv;
460 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
461 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
462 dev_err(dev, "Unable to map RSA tmp2 memory\n");
463 goto unmap_tmp1;
466 if (edesc->src_nents > 1) {
467 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
468 pdb->g_dma = edesc->sec4_sg_dma;
469 sec4_sg_index += edesc->src_nents;
470 } else {
471 pdb->g_dma = sg_dma_address(req->src);
474 if (edesc->dst_nents > 1) {
475 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
476 pdb->f_dma = edesc->sec4_sg_dma +
477 sec4_sg_index * sizeof(struct sec4_sg_entry);
478 } else {
479 pdb->f_dma = sg_dma_address(req->dst);
482 pdb->sgf |= key->n_sz;
483 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
485 return 0;
487 unmap_tmp1:
488 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
489 unmap_qinv:
490 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
491 unmap_dq:
492 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
493 unmap_dp:
494 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
495 unmap_q:
496 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
497 unmap_p:
498 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
500 return -ENOMEM;
503 static int caam_rsa_enc(struct akcipher_request *req)
505 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
506 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
507 struct caam_rsa_key *key = &ctx->key;
508 struct device *jrdev = ctx->dev;
509 struct rsa_edesc *edesc;
510 int ret;
512 if (unlikely(!key->n || !key->e))
513 return -EINVAL;
515 if (req->dst_len < key->n_sz) {
516 req->dst_len = key->n_sz;
517 dev_err(jrdev, "Output buffer length less than parameter n\n");
518 return -EOVERFLOW;
521 /* Allocate extended descriptor */
522 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
523 if (IS_ERR(edesc))
524 return PTR_ERR(edesc);
526 /* Set RSA Encrypt Protocol Data Block */
527 ret = set_rsa_pub_pdb(req, edesc);
528 if (ret)
529 goto init_fail;
531 /* Initialize Job Descriptor */
532 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
534 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
535 if (!ret)
536 return -EINPROGRESS;
538 rsa_pub_unmap(jrdev, edesc, req);
540 init_fail:
541 rsa_io_unmap(jrdev, edesc, req);
542 kfree(edesc);
543 return ret;
546 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
548 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
549 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
550 struct device *jrdev = ctx->dev;
551 struct rsa_edesc *edesc;
552 int ret;
554 /* Allocate extended descriptor */
555 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
556 if (IS_ERR(edesc))
557 return PTR_ERR(edesc);
559 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
560 ret = set_rsa_priv_f1_pdb(req, edesc);
561 if (ret)
562 goto init_fail;
564 /* Initialize Job Descriptor */
565 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
567 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
568 if (!ret)
569 return -EINPROGRESS;
571 rsa_priv_f1_unmap(jrdev, edesc, req);
573 init_fail:
574 rsa_io_unmap(jrdev, edesc, req);
575 kfree(edesc);
576 return ret;
579 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
581 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
582 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
583 struct device *jrdev = ctx->dev;
584 struct rsa_edesc *edesc;
585 int ret;
587 /* Allocate extended descriptor */
588 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
589 if (IS_ERR(edesc))
590 return PTR_ERR(edesc);
592 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
593 ret = set_rsa_priv_f2_pdb(req, edesc);
594 if (ret)
595 goto init_fail;
597 /* Initialize Job Descriptor */
598 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
600 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
601 if (!ret)
602 return -EINPROGRESS;
604 rsa_priv_f2_unmap(jrdev, edesc, req);
606 init_fail:
607 rsa_io_unmap(jrdev, edesc, req);
608 kfree(edesc);
609 return ret;
612 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
614 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
615 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
616 struct device *jrdev = ctx->dev;
617 struct rsa_edesc *edesc;
618 int ret;
620 /* Allocate extended descriptor */
621 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
622 if (IS_ERR(edesc))
623 return PTR_ERR(edesc);
625 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
626 ret = set_rsa_priv_f3_pdb(req, edesc);
627 if (ret)
628 goto init_fail;
630 /* Initialize Job Descriptor */
631 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
633 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
634 if (!ret)
635 return -EINPROGRESS;
637 rsa_priv_f3_unmap(jrdev, edesc, req);
639 init_fail:
640 rsa_io_unmap(jrdev, edesc, req);
641 kfree(edesc);
642 return ret;
645 static int caam_rsa_dec(struct akcipher_request *req)
647 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
648 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
649 struct caam_rsa_key *key = &ctx->key;
650 int ret;
652 if (unlikely(!key->n || !key->d))
653 return -EINVAL;
655 if (req->dst_len < key->n_sz) {
656 req->dst_len = key->n_sz;
657 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
658 return -EOVERFLOW;
661 if (key->priv_form == FORM3)
662 ret = caam_rsa_dec_priv_f3(req);
663 else if (key->priv_form == FORM2)
664 ret = caam_rsa_dec_priv_f2(req);
665 else
666 ret = caam_rsa_dec_priv_f1(req);
668 return ret;
671 static void caam_rsa_free_key(struct caam_rsa_key *key)
673 kzfree(key->d);
674 kzfree(key->p);
675 kzfree(key->q);
676 kzfree(key->dp);
677 kzfree(key->dq);
678 kzfree(key->qinv);
679 kzfree(key->tmp1);
680 kzfree(key->tmp2);
681 kfree(key->e);
682 kfree(key->n);
683 memset(key, 0, sizeof(*key));
686 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
688 while (!**ptr && *nbytes) {
689 (*ptr)++;
690 (*nbytes)--;
695 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
696 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
697 * BER-encoding requires that the minimum number of bytes be used to encode the
698 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
699 * length.
701 * @ptr : pointer to {dP, dQ, qInv} CRT member
702 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
703 * @dstlen: length in bytes of corresponding p or q prime factor
705 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
707 u8 *dst;
709 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
710 if (!nbytes)
711 return NULL;
713 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
714 if (!dst)
715 return NULL;
717 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
719 return dst;
723 * caam_read_raw_data - Read a raw byte stream as a positive integer.
724 * The function skips buffer's leading zeros, copies the remained data
725 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
726 * the address of the new buffer.
728 * @buf : The data to read
729 * @nbytes: The amount of data to read
731 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
733 u8 *val;
735 caam_rsa_drop_leading_zeros(&buf, nbytes);
736 if (!*nbytes)
737 return NULL;
739 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
740 if (!val)
741 return NULL;
743 memcpy(val, buf, *nbytes);
745 return val;
748 static int caam_rsa_check_key_length(unsigned int len)
750 if (len > 4096)
751 return -EINVAL;
752 return 0;
755 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
756 unsigned int keylen)
758 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
759 struct rsa_key raw_key = {NULL};
760 struct caam_rsa_key *rsa_key = &ctx->key;
761 int ret;
763 /* Free the old RSA key if any */
764 caam_rsa_free_key(rsa_key);
766 ret = rsa_parse_pub_key(&raw_key, key, keylen);
767 if (ret)
768 return ret;
770 /* Copy key in DMA zone */
771 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
772 if (!rsa_key->e)
773 goto err;
776 * Skip leading zeros and copy the positive integer to a buffer
777 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
778 * expects a positive integer for the RSA modulus and uses its length as
779 * decryption output length.
781 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
782 if (!rsa_key->n)
783 goto err;
785 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
786 caam_rsa_free_key(rsa_key);
787 return -EINVAL;
790 rsa_key->e_sz = raw_key.e_sz;
791 rsa_key->n_sz = raw_key.n_sz;
793 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
795 return 0;
796 err:
797 caam_rsa_free_key(rsa_key);
798 return -ENOMEM;
801 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
802 struct rsa_key *raw_key)
804 struct caam_rsa_key *rsa_key = &ctx->key;
805 size_t p_sz = raw_key->p_sz;
806 size_t q_sz = raw_key->q_sz;
808 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
809 if (!rsa_key->p)
810 return;
811 rsa_key->p_sz = p_sz;
813 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
814 if (!rsa_key->q)
815 goto free_p;
816 rsa_key->q_sz = q_sz;
818 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
819 if (!rsa_key->tmp1)
820 goto free_q;
822 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
823 if (!rsa_key->tmp2)
824 goto free_tmp1;
826 rsa_key->priv_form = FORM2;
828 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
829 if (!rsa_key->dp)
830 goto free_tmp2;
832 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
833 if (!rsa_key->dq)
834 goto free_dp;
836 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
837 q_sz);
838 if (!rsa_key->qinv)
839 goto free_dq;
841 rsa_key->priv_form = FORM3;
843 return;
845 free_dq:
846 kzfree(rsa_key->dq);
847 free_dp:
848 kzfree(rsa_key->dp);
849 free_tmp2:
850 kzfree(rsa_key->tmp2);
851 free_tmp1:
852 kzfree(rsa_key->tmp1);
853 free_q:
854 kzfree(rsa_key->q);
855 free_p:
856 kzfree(rsa_key->p);
859 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
860 unsigned int keylen)
862 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
863 struct rsa_key raw_key = {NULL};
864 struct caam_rsa_key *rsa_key = &ctx->key;
865 int ret;
867 /* Free the old RSA key if any */
868 caam_rsa_free_key(rsa_key);
870 ret = rsa_parse_priv_key(&raw_key, key, keylen);
871 if (ret)
872 return ret;
874 /* Copy key in DMA zone */
875 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
876 if (!rsa_key->d)
877 goto err;
879 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
880 if (!rsa_key->e)
881 goto err;
884 * Skip leading zeros and copy the positive integer to a buffer
885 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
886 * expects a positive integer for the RSA modulus and uses its length as
887 * decryption output length.
889 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
890 if (!rsa_key->n)
891 goto err;
893 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
894 caam_rsa_free_key(rsa_key);
895 return -EINVAL;
898 rsa_key->d_sz = raw_key.d_sz;
899 rsa_key->e_sz = raw_key.e_sz;
900 rsa_key->n_sz = raw_key.n_sz;
902 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
903 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
905 caam_rsa_set_priv_key_form(ctx, &raw_key);
907 return 0;
909 err:
910 caam_rsa_free_key(rsa_key);
911 return -ENOMEM;
914 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
916 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
918 return ctx->key.n_sz;
921 /* Per session pkc's driver context creation function */
922 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
924 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
926 ctx->dev = caam_jr_alloc();
928 if (IS_ERR(ctx->dev)) {
929 pr_err("Job Ring Device allocation for transform failed\n");
930 return PTR_ERR(ctx->dev);
933 return 0;
936 /* Per session pkc's driver context cleanup function */
937 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
939 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
940 struct caam_rsa_key *key = &ctx->key;
942 caam_rsa_free_key(key);
943 caam_jr_free(ctx->dev);
946 static struct akcipher_alg caam_rsa = {
947 .encrypt = caam_rsa_enc,
948 .decrypt = caam_rsa_dec,
949 .sign = caam_rsa_dec,
950 .verify = caam_rsa_enc,
951 .set_pub_key = caam_rsa_set_pub_key,
952 .set_priv_key = caam_rsa_set_priv_key,
953 .max_size = caam_rsa_max_size,
954 .init = caam_rsa_init_tfm,
955 .exit = caam_rsa_exit_tfm,
956 .base = {
957 .cra_name = "rsa",
958 .cra_driver_name = "rsa-caam",
959 .cra_priority = 3000,
960 .cra_module = THIS_MODULE,
961 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
965 /* Public Key Cryptography module initialization handler */
966 static int __init caam_pkc_init(void)
968 struct device_node *dev_node;
969 struct platform_device *pdev;
970 struct device *ctrldev;
971 struct caam_drv_private *priv;
972 u32 cha_inst, pk_inst;
973 int err;
975 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
976 if (!dev_node) {
977 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
978 if (!dev_node)
979 return -ENODEV;
982 pdev = of_find_device_by_node(dev_node);
983 if (!pdev) {
984 of_node_put(dev_node);
985 return -ENODEV;
988 ctrldev = &pdev->dev;
989 priv = dev_get_drvdata(ctrldev);
990 of_node_put(dev_node);
993 * If priv is NULL, it's probably because the caam driver wasn't
994 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
996 if (!priv)
997 return -ENODEV;
999 /* Determine public key hardware accelerator presence. */
1000 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1001 pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1003 /* Do not register algorithms if PKHA is not present. */
1004 if (!pk_inst)
1005 return -ENODEV;
1007 err = crypto_register_akcipher(&caam_rsa);
1008 if (err)
1009 dev_warn(ctrldev, "%s alg registration failed\n",
1010 caam_rsa.base.cra_driver_name);
1011 else
1012 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1014 return err;
1017 static void __exit caam_pkc_exit(void)
1019 crypto_unregister_akcipher(&caam_rsa);
1022 module_init(caam_pkc_init);
1023 module_exit(caam_pkc_exit);
1025 MODULE_LICENSE("Dual BSD/GPL");
1026 MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1027 MODULE_AUTHOR("Freescale Semiconductor");