2 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 * Copyright 2016 Freescale Semiconductor, Inc.
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
14 #include "desc_constr.h"
15 #include "sg_sw_sec4.h"
18 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
22 static void rsa_io_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
23 struct akcipher_request
*req
)
25 dma_unmap_sg(dev
, req
->dst
, edesc
->dst_nents
, DMA_FROM_DEVICE
);
26 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
28 if (edesc
->sec4_sg_bytes
)
29 dma_unmap_single(dev
, edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
,
33 static void rsa_pub_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
34 struct akcipher_request
*req
)
36 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
37 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
38 struct caam_rsa_key
*key
= &ctx
->key
;
39 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
41 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
42 dma_unmap_single(dev
, pdb
->e_dma
, key
->e_sz
, DMA_TO_DEVICE
);
45 static void rsa_priv_f1_unmap(struct device
*dev
, struct rsa_edesc
*edesc
,
46 struct akcipher_request
*req
)
48 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
49 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
50 struct caam_rsa_key
*key
= &ctx
->key
;
51 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
53 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
54 dma_unmap_single(dev
, pdb
->d_dma
, key
->d_sz
, DMA_TO_DEVICE
);
57 /* RSA Job Completion handler */
58 static void rsa_pub_done(struct device
*dev
, u32
*desc
, u32 err
, void *context
)
60 struct akcipher_request
*req
= context
;
61 struct rsa_edesc
*edesc
;
64 caam_jr_strstatus(dev
, err
);
66 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
68 rsa_pub_unmap(dev
, edesc
, req
);
69 rsa_io_unmap(dev
, edesc
, req
);
72 akcipher_request_complete(req
, err
);
75 static void rsa_priv_f1_done(struct device
*dev
, u32
*desc
, u32 err
,
78 struct akcipher_request
*req
= context
;
79 struct rsa_edesc
*edesc
;
82 caam_jr_strstatus(dev
, err
);
84 edesc
= container_of(desc
, struct rsa_edesc
, hw_desc
[0]);
86 rsa_priv_f1_unmap(dev
, edesc
, req
);
87 rsa_io_unmap(dev
, edesc
, req
);
90 akcipher_request_complete(req
, err
);
93 static struct rsa_edesc
*rsa_edesc_alloc(struct akcipher_request
*req
,
96 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
97 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
98 struct device
*dev
= ctx
->dev
;
99 struct rsa_edesc
*edesc
;
100 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
101 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
103 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
104 int src_nents
, dst_nents
;
106 src_nents
= sg_nents_for_len(req
->src
, req
->src_len
);
107 dst_nents
= sg_nents_for_len(req
->dst
, req
->dst_len
);
110 sec4_sg_len
= src_nents
;
112 sec4_sg_len
+= dst_nents
;
114 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
116 /* allocate space for base edesc, hw desc commands and link tables */
117 edesc
= kzalloc(sizeof(*edesc
) + desclen
+ sec4_sg_bytes
,
120 return ERR_PTR(-ENOMEM
);
122 sgc
= dma_map_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
123 if (unlikely(!sgc
)) {
124 dev_err(dev
, "unable to map source\n");
128 sgc
= dma_map_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
129 if (unlikely(!sgc
)) {
130 dev_err(dev
, "unable to map destination\n");
134 edesc
->sec4_sg
= (void *)edesc
+ sizeof(*edesc
) + desclen
;
138 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
139 sec4_sg_index
+= src_nents
;
142 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
143 edesc
->sec4_sg
+ sec4_sg_index
, 0);
145 /* Save nents for later use in Job Descriptor */
146 edesc
->src_nents
= src_nents
;
147 edesc
->dst_nents
= dst_nents
;
152 edesc
->sec4_sg_dma
= dma_map_single(dev
, edesc
->sec4_sg
,
153 sec4_sg_bytes
, DMA_TO_DEVICE
);
154 if (dma_mapping_error(dev
, edesc
->sec4_sg_dma
)) {
155 dev_err(dev
, "unable to map S/G table\n");
159 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
164 dma_unmap_sg(dev
, req
->dst
, dst_nents
, DMA_FROM_DEVICE
);
166 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
169 return ERR_PTR(-ENOMEM
);
172 static int set_rsa_pub_pdb(struct akcipher_request
*req
,
173 struct rsa_edesc
*edesc
)
175 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
176 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
177 struct caam_rsa_key
*key
= &ctx
->key
;
178 struct device
*dev
= ctx
->dev
;
179 struct rsa_pub_pdb
*pdb
= &edesc
->pdb
.pub
;
180 int sec4_sg_index
= 0;
182 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
183 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
184 dev_err(dev
, "Unable to map RSA modulus memory\n");
188 pdb
->e_dma
= dma_map_single(dev
, key
->e
, key
->e_sz
, DMA_TO_DEVICE
);
189 if (dma_mapping_error(dev
, pdb
->e_dma
)) {
190 dev_err(dev
, "Unable to map RSA public exponent memory\n");
191 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
195 if (edesc
->src_nents
> 1) {
196 pdb
->sgf
|= RSA_PDB_SGF_F
;
197 pdb
->f_dma
= edesc
->sec4_sg_dma
;
198 sec4_sg_index
+= edesc
->src_nents
;
200 pdb
->f_dma
= sg_dma_address(req
->src
);
203 if (edesc
->dst_nents
> 1) {
204 pdb
->sgf
|= RSA_PDB_SGF_G
;
205 pdb
->g_dma
= edesc
->sec4_sg_dma
+
206 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
208 pdb
->g_dma
= sg_dma_address(req
->dst
);
211 pdb
->sgf
|= (key
->e_sz
<< RSA_PDB_E_SHIFT
) | key
->n_sz
;
212 pdb
->f_len
= req
->src_len
;
217 static int set_rsa_priv_f1_pdb(struct akcipher_request
*req
,
218 struct rsa_edesc
*edesc
)
220 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
221 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
222 struct caam_rsa_key
*key
= &ctx
->key
;
223 struct device
*dev
= ctx
->dev
;
224 struct rsa_priv_f1_pdb
*pdb
= &edesc
->pdb
.priv_f1
;
225 int sec4_sg_index
= 0;
227 pdb
->n_dma
= dma_map_single(dev
, key
->n
, key
->n_sz
, DMA_TO_DEVICE
);
228 if (dma_mapping_error(dev
, pdb
->n_dma
)) {
229 dev_err(dev
, "Unable to map modulus memory\n");
233 pdb
->d_dma
= dma_map_single(dev
, key
->d
, key
->d_sz
, DMA_TO_DEVICE
);
234 if (dma_mapping_error(dev
, pdb
->d_dma
)) {
235 dev_err(dev
, "Unable to map RSA private exponent memory\n");
236 dma_unmap_single(dev
, pdb
->n_dma
, key
->n_sz
, DMA_TO_DEVICE
);
240 if (edesc
->src_nents
> 1) {
241 pdb
->sgf
|= RSA_PRIV_PDB_SGF_G
;
242 pdb
->g_dma
= edesc
->sec4_sg_dma
;
243 sec4_sg_index
+= edesc
->src_nents
;
245 pdb
->g_dma
= sg_dma_address(req
->src
);
248 if (edesc
->dst_nents
> 1) {
249 pdb
->sgf
|= RSA_PRIV_PDB_SGF_F
;
250 pdb
->f_dma
= edesc
->sec4_sg_dma
+
251 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
253 pdb
->f_dma
= sg_dma_address(req
->dst
);
256 pdb
->sgf
|= (key
->d_sz
<< RSA_PDB_D_SHIFT
) | key
->n_sz
;
261 static int caam_rsa_enc(struct akcipher_request
*req
)
263 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
264 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
265 struct caam_rsa_key
*key
= &ctx
->key
;
266 struct device
*jrdev
= ctx
->dev
;
267 struct rsa_edesc
*edesc
;
270 if (unlikely(!key
->n
|| !key
->e
))
273 if (req
->dst_len
< key
->n_sz
) {
274 req
->dst_len
= key
->n_sz
;
275 dev_err(jrdev
, "Output buffer length less than parameter n\n");
279 /* Allocate extended descriptor */
280 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PUB_LEN
);
282 return PTR_ERR(edesc
);
284 /* Set RSA Encrypt Protocol Data Block */
285 ret
= set_rsa_pub_pdb(req
, edesc
);
289 /* Initialize Job Descriptor */
290 init_rsa_pub_desc(edesc
->hw_desc
, &edesc
->pdb
.pub
);
292 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_pub_done
, req
);
296 rsa_pub_unmap(jrdev
, edesc
, req
);
299 rsa_io_unmap(jrdev
, edesc
, req
);
304 static int caam_rsa_dec(struct akcipher_request
*req
)
306 struct crypto_akcipher
*tfm
= crypto_akcipher_reqtfm(req
);
307 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
308 struct caam_rsa_key
*key
= &ctx
->key
;
309 struct device
*jrdev
= ctx
->dev
;
310 struct rsa_edesc
*edesc
;
313 if (unlikely(!key
->n
|| !key
->d
))
316 if (req
->dst_len
< key
->n_sz
) {
317 req
->dst_len
= key
->n_sz
;
318 dev_err(jrdev
, "Output buffer length less than parameter n\n");
322 /* Allocate extended descriptor */
323 edesc
= rsa_edesc_alloc(req
, DESC_RSA_PRIV_F1_LEN
);
325 return PTR_ERR(edesc
);
327 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
328 ret
= set_rsa_priv_f1_pdb(req
, edesc
);
332 /* Initialize Job Descriptor */
333 init_rsa_priv_f1_desc(edesc
->hw_desc
, &edesc
->pdb
.priv_f1
);
335 ret
= caam_jr_enqueue(jrdev
, edesc
->hw_desc
, rsa_priv_f1_done
, req
);
339 rsa_priv_f1_unmap(jrdev
, edesc
, req
);
342 rsa_io_unmap(jrdev
, edesc
, req
);
347 static void caam_rsa_free_key(struct caam_rsa_key
*key
)
361 * caam_read_raw_data - Read a raw byte stream as a positive integer.
362 * The function skips buffer's leading zeros, copies the remained data
363 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
364 * the address of the new buffer.
366 * @buf : The data to read
367 * @nbytes: The amount of data to read
369 static inline u8
*caam_read_raw_data(const u8
*buf
, size_t *nbytes
)
373 while (!*buf
&& *nbytes
) {
378 val
= kzalloc(*nbytes
, GFP_DMA
| GFP_KERNEL
);
382 memcpy(val
, buf
, *nbytes
);
387 static int caam_rsa_check_key_length(unsigned int len
)
394 static int caam_rsa_set_pub_key(struct crypto_akcipher
*tfm
, const void *key
,
397 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
398 struct rsa_key raw_key
= {NULL
};
399 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
402 /* Free the old RSA key if any */
403 caam_rsa_free_key(rsa_key
);
405 ret
= rsa_parse_pub_key(&raw_key
, key
, keylen
);
409 /* Copy key in DMA zone */
410 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
415 * Skip leading zeros and copy the positive integer to a buffer
416 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
417 * expects a positive integer for the RSA modulus and uses its length as
418 * decryption output length.
420 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
424 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
425 caam_rsa_free_key(rsa_key
);
429 rsa_key
->e_sz
= raw_key
.e_sz
;
430 rsa_key
->n_sz
= raw_key
.n_sz
;
432 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
436 caam_rsa_free_key(rsa_key
);
440 static int caam_rsa_set_priv_key(struct crypto_akcipher
*tfm
, const void *key
,
443 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
444 struct rsa_key raw_key
= {NULL
};
445 struct caam_rsa_key
*rsa_key
= &ctx
->key
;
448 /* Free the old RSA key if any */
449 caam_rsa_free_key(rsa_key
);
451 ret
= rsa_parse_priv_key(&raw_key
, key
, keylen
);
455 /* Copy key in DMA zone */
456 rsa_key
->d
= kzalloc(raw_key
.d_sz
, GFP_DMA
| GFP_KERNEL
);
460 rsa_key
->e
= kzalloc(raw_key
.e_sz
, GFP_DMA
| GFP_KERNEL
);
465 * Skip leading zeros and copy the positive integer to a buffer
466 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
467 * expects a positive integer for the RSA modulus and uses its length as
468 * decryption output length.
470 rsa_key
->n
= caam_read_raw_data(raw_key
.n
, &raw_key
.n_sz
);
474 if (caam_rsa_check_key_length(raw_key
.n_sz
<< 3)) {
475 caam_rsa_free_key(rsa_key
);
479 rsa_key
->d_sz
= raw_key
.d_sz
;
480 rsa_key
->e_sz
= raw_key
.e_sz
;
481 rsa_key
->n_sz
= raw_key
.n_sz
;
483 memcpy(rsa_key
->d
, raw_key
.d
, raw_key
.d_sz
);
484 memcpy(rsa_key
->e
, raw_key
.e
, raw_key
.e_sz
);
489 caam_rsa_free_key(rsa_key
);
493 static int caam_rsa_max_size(struct crypto_akcipher
*tfm
)
495 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
496 struct caam_rsa_key
*key
= &ctx
->key
;
498 return (key
->n
) ? key
->n_sz
: -EINVAL
;
501 /* Per session pkc's driver context creation function */
502 static int caam_rsa_init_tfm(struct crypto_akcipher
*tfm
)
504 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
506 ctx
->dev
= caam_jr_alloc();
508 if (IS_ERR(ctx
->dev
)) {
509 dev_err(ctx
->dev
, "Job Ring Device allocation for transform failed\n");
510 return PTR_ERR(ctx
->dev
);
516 /* Per session pkc's driver context cleanup function */
517 static void caam_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
519 struct caam_rsa_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
520 struct caam_rsa_key
*key
= &ctx
->key
;
522 caam_rsa_free_key(key
);
523 caam_jr_free(ctx
->dev
);
526 static struct akcipher_alg caam_rsa
= {
527 .encrypt
= caam_rsa_enc
,
528 .decrypt
= caam_rsa_dec
,
529 .sign
= caam_rsa_dec
,
530 .verify
= caam_rsa_enc
,
531 .set_pub_key
= caam_rsa_set_pub_key
,
532 .set_priv_key
= caam_rsa_set_priv_key
,
533 .max_size
= caam_rsa_max_size
,
534 .init
= caam_rsa_init_tfm
,
535 .exit
= caam_rsa_exit_tfm
,
538 .cra_driver_name
= "rsa-caam",
539 .cra_priority
= 3000,
540 .cra_module
= THIS_MODULE
,
541 .cra_ctxsize
= sizeof(struct caam_rsa_ctx
),
545 /* Public Key Cryptography module initialization handler */
546 static int __init
caam_pkc_init(void)
548 struct device_node
*dev_node
;
549 struct platform_device
*pdev
;
550 struct device
*ctrldev
;
551 struct caam_drv_private
*priv
;
552 u32 cha_inst
, pk_inst
;
555 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
557 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
562 pdev
= of_find_device_by_node(dev_node
);
564 of_node_put(dev_node
);
568 ctrldev
= &pdev
->dev
;
569 priv
= dev_get_drvdata(ctrldev
);
570 of_node_put(dev_node
);
573 * If priv is NULL, it's probably because the caam driver wasn't
574 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
579 /* Determine public key hardware accelerator presence. */
580 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
581 pk_inst
= (cha_inst
& CHA_ID_LS_PK_MASK
) >> CHA_ID_LS_PK_SHIFT
;
583 /* Do not register algorithms if PKHA is not present. */
587 err
= crypto_register_akcipher(&caam_rsa
);
589 dev_warn(ctrldev
, "%s alg registration failed\n",
590 caam_rsa
.base
.cra_driver_name
);
592 dev_info(ctrldev
, "caam pkc algorithms registered in /proc/crypto\n");
597 static void __exit
caam_pkc_exit(void)
599 crypto_unregister_akcipher(&caam_rsa
);
602 module_init(caam_pkc_init
);
603 module_exit(caam_pkc_exit
);
605 MODULE_LICENSE("Dual BSD/GPL");
606 MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
607 MODULE_AUTHOR("Freescale Semiconductor");