iommu/of: Fix of_iommu_configure() for disabled IOMMUs
[linux/fpc-iii.git] / drivers / crypto / caam / caamalg_qi.c
blob78c4c0485c58291d969663134d1c572edddb0acb
1 /*
2 * Freescale FSL CAAM support for crypto API over QI backend.
3 * Based on caamalg.c
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
7 */
9 #include "compat.h"
11 #include "regs.h"
12 #include "intern.h"
13 #include "desc_constr.h"
14 #include "error.h"
15 #include "sg_sw_sec4.h"
16 #include "sg_sw_qm.h"
17 #include "key_gen.h"
18 #include "qi.h"
19 #include "jr.h"
20 #include "caamalg_desc.h"
23 * crypto alg
25 #define CAAM_CRA_PRIORITY 2000
26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
28 SHA512_DIGEST_SIZE * 2)
30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
31 CAAM_MAX_KEY_SIZE)
32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
34 struct caam_alg_entry {
35 int class1_alg_type;
36 int class2_alg_type;
37 bool rfc3686;
38 bool geniv;
41 struct caam_aead_alg {
42 struct aead_alg aead;
43 struct caam_alg_entry caam;
44 bool registered;
48 * per-session context
50 struct caam_ctx {
51 struct device *jrdev;
52 u32 sh_desc_enc[DESC_MAX_USED_LEN];
53 u32 sh_desc_dec[DESC_MAX_USED_LEN];
54 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
55 u8 key[CAAM_MAX_KEY_SIZE];
56 dma_addr_t key_dma;
57 struct alginfo adata;
58 struct alginfo cdata;
59 unsigned int authsize;
60 struct device *qidev;
61 spinlock_t lock; /* Protects multiple init of driver context */
62 struct caam_drv_ctx *drv_ctx[NUM_OP];
65 static int aead_set_sh_desc(struct crypto_aead *aead)
67 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 typeof(*alg), aead);
69 struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 unsigned int ivsize = crypto_aead_ivsize(aead);
71 u32 ctx1_iv_off = 0;
72 u32 *nonce = NULL;
73 unsigned int data_len[2];
74 u32 inl_mask;
75 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 OP_ALG_AAI_CTR_MOD128);
77 const bool is_rfc3686 = alg->caam.rfc3686;
79 if (!ctx->cdata.keylen || !ctx->authsize)
80 return 0;
83 * AES-CTR needs to load IV in CONTEXT1 reg
84 * at an offset of 128bits (16bytes)
85 * CONTEXT1[255:128] = IV
87 if (ctr_mode)
88 ctx1_iv_off = 16;
91 * RFC3686 specific:
92 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
94 if (is_rfc3686) {
95 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
96 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
97 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
100 data_len[0] = ctx->adata.keylen_pad;
101 data_len[1] = ctx->cdata.keylen;
103 if (alg->caam.geniv)
104 goto skip_enc;
106 /* aead_encrypt shared descriptor */
107 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
108 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
109 DESC_JOB_IO_LEN, data_len, &inl_mask,
110 ARRAY_SIZE(data_len)) < 0)
111 return -EINVAL;
113 if (inl_mask & 1)
114 ctx->adata.key_virt = ctx->key;
115 else
116 ctx->adata.key_dma = ctx->key_dma;
118 if (inl_mask & 2)
119 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
120 else
121 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
123 ctx->adata.key_inline = !!(inl_mask & 1);
124 ctx->cdata.key_inline = !!(inl_mask & 2);
126 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
127 ivsize, ctx->authsize, is_rfc3686, nonce,
128 ctx1_iv_off, true);
130 skip_enc:
131 /* aead_decrypt shared descriptor */
132 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
133 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
134 DESC_JOB_IO_LEN, data_len, &inl_mask,
135 ARRAY_SIZE(data_len)) < 0)
136 return -EINVAL;
138 if (inl_mask & 1)
139 ctx->adata.key_virt = ctx->key;
140 else
141 ctx->adata.key_dma = ctx->key_dma;
143 if (inl_mask & 2)
144 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
145 else
146 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
148 ctx->adata.key_inline = !!(inl_mask & 1);
149 ctx->cdata.key_inline = !!(inl_mask & 2);
151 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
152 ivsize, ctx->authsize, alg->caam.geniv,
153 is_rfc3686, nonce, ctx1_iv_off, true);
155 if (!alg->caam.geniv)
156 goto skip_givenc;
158 /* aead_givencrypt shared descriptor */
159 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
160 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
161 DESC_JOB_IO_LEN, data_len, &inl_mask,
162 ARRAY_SIZE(data_len)) < 0)
163 return -EINVAL;
165 if (inl_mask & 1)
166 ctx->adata.key_virt = ctx->key;
167 else
168 ctx->adata.key_dma = ctx->key_dma;
170 if (inl_mask & 2)
171 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
172 else
173 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
175 ctx->adata.key_inline = !!(inl_mask & 1);
176 ctx->cdata.key_inline = !!(inl_mask & 2);
178 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
179 ivsize, ctx->authsize, is_rfc3686, nonce,
180 ctx1_iv_off, true);
182 skip_givenc:
183 return 0;
186 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
188 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
190 ctx->authsize = authsize;
191 aead_set_sh_desc(authenc);
193 return 0;
196 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
197 unsigned int keylen)
199 struct caam_ctx *ctx = crypto_aead_ctx(aead);
200 struct device *jrdev = ctx->jrdev;
201 struct crypto_authenc_keys keys;
202 int ret = 0;
204 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
205 goto badkey;
207 #ifdef DEBUG
208 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
209 keys.authkeylen + keys.enckeylen, keys.enckeylen,
210 keys.authkeylen);
211 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
212 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
213 #endif
215 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
216 keys.authkeylen, CAAM_MAX_KEY_SIZE -
217 keys.enckeylen);
218 if (ret)
219 goto badkey;
221 /* postpend encryption key to auth split key */
222 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
223 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
224 keys.enckeylen, DMA_TO_DEVICE);
225 #ifdef DEBUG
226 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
227 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
228 ctx->adata.keylen_pad + keys.enckeylen, 1);
229 #endif
231 ctx->cdata.keylen = keys.enckeylen;
233 ret = aead_set_sh_desc(aead);
234 if (ret)
235 goto badkey;
237 /* Now update the driver contexts with the new shared descriptor */
238 if (ctx->drv_ctx[ENCRYPT]) {
239 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
240 ctx->sh_desc_enc);
241 if (ret) {
242 dev_err(jrdev, "driver enc context update failed\n");
243 goto badkey;
247 if (ctx->drv_ctx[DECRYPT]) {
248 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
249 ctx->sh_desc_dec);
250 if (ret) {
251 dev_err(jrdev, "driver dec context update failed\n");
252 goto badkey;
256 return ret;
257 badkey:
258 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
259 return -EINVAL;
262 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
263 const u8 *key, unsigned int keylen)
265 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
266 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
267 const char *alg_name = crypto_tfm_alg_name(tfm);
268 struct device *jrdev = ctx->jrdev;
269 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
270 u32 ctx1_iv_off = 0;
271 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
272 OP_ALG_AAI_CTR_MOD128);
273 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
274 int ret = 0;
276 memcpy(ctx->key, key, keylen);
277 #ifdef DEBUG
278 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
279 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
280 #endif
282 * AES-CTR needs to load IV in CONTEXT1 reg
283 * at an offset of 128bits (16bytes)
284 * CONTEXT1[255:128] = IV
286 if (ctr_mode)
287 ctx1_iv_off = 16;
290 * RFC3686 specific:
291 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
292 * | *key = {KEY, NONCE}
294 if (is_rfc3686) {
295 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
296 keylen -= CTR_RFC3686_NONCE_SIZE;
299 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
300 ctx->cdata.keylen = keylen;
301 ctx->cdata.key_virt = ctx->key;
302 ctx->cdata.key_inline = true;
304 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
305 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
306 is_rfc3686, ctx1_iv_off);
307 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
308 is_rfc3686, ctx1_iv_off);
309 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
310 ivsize, is_rfc3686, ctx1_iv_off);
312 /* Now update the driver contexts with the new shared descriptor */
313 if (ctx->drv_ctx[ENCRYPT]) {
314 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
315 ctx->sh_desc_enc);
316 if (ret) {
317 dev_err(jrdev, "driver enc context update failed\n");
318 goto badkey;
322 if (ctx->drv_ctx[DECRYPT]) {
323 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
324 ctx->sh_desc_dec);
325 if (ret) {
326 dev_err(jrdev, "driver dec context update failed\n");
327 goto badkey;
331 if (ctx->drv_ctx[GIVENCRYPT]) {
332 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
333 ctx->sh_desc_givenc);
334 if (ret) {
335 dev_err(jrdev, "driver givenc context update failed\n");
336 goto badkey;
340 return ret;
341 badkey:
342 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
343 return -EINVAL;
346 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
347 const u8 *key, unsigned int keylen)
349 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
350 struct device *jrdev = ctx->jrdev;
351 int ret = 0;
353 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
354 crypto_ablkcipher_set_flags(ablkcipher,
355 CRYPTO_TFM_RES_BAD_KEY_LEN);
356 dev_err(jrdev, "key size mismatch\n");
357 return -EINVAL;
360 memcpy(ctx->key, key, keylen);
361 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
362 ctx->cdata.keylen = keylen;
363 ctx->cdata.key_virt = ctx->key;
364 ctx->cdata.key_inline = true;
366 /* xts ablkcipher encrypt, decrypt shared descriptors */
367 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
368 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
370 /* Now update the driver contexts with the new shared descriptor */
371 if (ctx->drv_ctx[ENCRYPT]) {
372 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
373 ctx->sh_desc_enc);
374 if (ret) {
375 dev_err(jrdev, "driver enc context update failed\n");
376 goto badkey;
380 if (ctx->drv_ctx[DECRYPT]) {
381 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
382 ctx->sh_desc_dec);
383 if (ret) {
384 dev_err(jrdev, "driver dec context update failed\n");
385 goto badkey;
389 return ret;
390 badkey:
391 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
392 return 0;
396 * aead_edesc - s/w-extended aead descriptor
397 * @src_nents: number of segments in input scatterlist
398 * @dst_nents: number of segments in output scatterlist
399 * @iv_dma: dma address of iv for checking continuity and link table
400 * @qm_sg_bytes: length of dma mapped h/w link table
401 * @qm_sg_dma: bus physical mapped address of h/w link table
402 * @assoclen_dma: bus physical mapped address of req->assoclen
403 * @drv_req: driver-specific request structure
404 * @sgt: the h/w link table
406 struct aead_edesc {
407 int src_nents;
408 int dst_nents;
409 dma_addr_t iv_dma;
410 int qm_sg_bytes;
411 dma_addr_t qm_sg_dma;
412 dma_addr_t assoclen_dma;
413 struct caam_drv_req drv_req;
414 struct qm_sg_entry sgt[0];
418 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
419 * @src_nents: number of segments in input scatterlist
420 * @dst_nents: number of segments in output scatterlist
421 * @iv_dma: dma address of iv for checking continuity and link table
422 * @qm_sg_bytes: length of dma mapped h/w link table
423 * @qm_sg_dma: bus physical mapped address of h/w link table
424 * @drv_req: driver-specific request structure
425 * @sgt: the h/w link table
427 struct ablkcipher_edesc {
428 int src_nents;
429 int dst_nents;
430 dma_addr_t iv_dma;
431 int qm_sg_bytes;
432 dma_addr_t qm_sg_dma;
433 struct caam_drv_req drv_req;
434 struct qm_sg_entry sgt[0];
437 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
438 enum optype type)
441 * This function is called on the fast path with values of 'type'
442 * known at compile time. Invalid arguments are not expected and
443 * thus no checks are made.
445 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
446 u32 *desc;
448 if (unlikely(!drv_ctx)) {
449 spin_lock(&ctx->lock);
451 /* Read again to check if some other core init drv_ctx */
452 drv_ctx = ctx->drv_ctx[type];
453 if (!drv_ctx) {
454 int cpu;
456 if (type == ENCRYPT)
457 desc = ctx->sh_desc_enc;
458 else if (type == DECRYPT)
459 desc = ctx->sh_desc_dec;
460 else /* (type == GIVENCRYPT) */
461 desc = ctx->sh_desc_givenc;
463 cpu = smp_processor_id();
464 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
465 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
466 drv_ctx->op_type = type;
468 ctx->drv_ctx[type] = drv_ctx;
471 spin_unlock(&ctx->lock);
474 return drv_ctx;
477 static void caam_unmap(struct device *dev, struct scatterlist *src,
478 struct scatterlist *dst, int src_nents,
479 int dst_nents, dma_addr_t iv_dma, int ivsize,
480 enum optype op_type, dma_addr_t qm_sg_dma,
481 int qm_sg_bytes)
483 if (dst != src) {
484 if (src_nents)
485 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
486 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
487 } else {
488 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
491 if (iv_dma)
492 dma_unmap_single(dev, iv_dma, ivsize,
493 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
494 DMA_TO_DEVICE);
495 if (qm_sg_bytes)
496 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
499 static void aead_unmap(struct device *dev,
500 struct aead_edesc *edesc,
501 struct aead_request *req)
503 struct crypto_aead *aead = crypto_aead_reqtfm(req);
504 int ivsize = crypto_aead_ivsize(aead);
506 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
507 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
508 edesc->qm_sg_dma, edesc->qm_sg_bytes);
509 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
512 static void ablkcipher_unmap(struct device *dev,
513 struct ablkcipher_edesc *edesc,
514 struct ablkcipher_request *req)
516 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
517 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
519 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
520 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
521 edesc->qm_sg_dma, edesc->qm_sg_bytes);
524 static void aead_done(struct caam_drv_req *drv_req, u32 status)
526 struct device *qidev;
527 struct aead_edesc *edesc;
528 struct aead_request *aead_req = drv_req->app_ctx;
529 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
530 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
531 int ecode = 0;
533 qidev = caam_ctx->qidev;
535 if (unlikely(status)) {
536 caam_jr_strstatus(qidev, status);
537 ecode = -EIO;
540 edesc = container_of(drv_req, typeof(*edesc), drv_req);
541 aead_unmap(qidev, edesc, aead_req);
543 aead_request_complete(aead_req, ecode);
544 qi_cache_free(edesc);
548 * allocate and map the aead extended descriptor
550 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
551 bool encrypt)
553 struct crypto_aead *aead = crypto_aead_reqtfm(req);
554 struct caam_ctx *ctx = crypto_aead_ctx(aead);
555 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
556 typeof(*alg), aead);
557 struct device *qidev = ctx->qidev;
558 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
559 GFP_KERNEL : GFP_ATOMIC;
560 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
561 struct aead_edesc *edesc;
562 dma_addr_t qm_sg_dma, iv_dma = 0;
563 int ivsize = 0;
564 unsigned int authsize = ctx->authsize;
565 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
566 int in_len, out_len;
567 struct qm_sg_entry *sg_table, *fd_sgt;
568 struct caam_drv_ctx *drv_ctx;
569 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
571 drv_ctx = get_drv_ctx(ctx, op_type);
572 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
573 return (struct aead_edesc *)drv_ctx;
575 /* allocate space for base edesc and hw desc commands, link tables */
576 edesc = qi_cache_alloc(GFP_DMA | flags);
577 if (unlikely(!edesc)) {
578 dev_err(qidev, "could not allocate extended descriptor\n");
579 return ERR_PTR(-ENOMEM);
582 if (likely(req->src == req->dst)) {
583 src_nents = sg_nents_for_len(req->src, req->assoclen +
584 req->cryptlen +
585 (encrypt ? authsize : 0));
586 if (unlikely(src_nents < 0)) {
587 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
588 req->assoclen + req->cryptlen +
589 (encrypt ? authsize : 0));
590 qi_cache_free(edesc);
591 return ERR_PTR(src_nents);
594 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
595 DMA_BIDIRECTIONAL);
596 if (unlikely(!mapped_src_nents)) {
597 dev_err(qidev, "unable to map source\n");
598 qi_cache_free(edesc);
599 return ERR_PTR(-ENOMEM);
601 } else {
602 src_nents = sg_nents_for_len(req->src, req->assoclen +
603 req->cryptlen);
604 if (unlikely(src_nents < 0)) {
605 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
606 req->assoclen + req->cryptlen);
607 qi_cache_free(edesc);
608 return ERR_PTR(src_nents);
611 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
612 req->cryptlen +
613 (encrypt ? authsize :
614 (-authsize)));
615 if (unlikely(dst_nents < 0)) {
616 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
617 req->assoclen + req->cryptlen +
618 (encrypt ? authsize : (-authsize)));
619 qi_cache_free(edesc);
620 return ERR_PTR(dst_nents);
623 if (src_nents) {
624 mapped_src_nents = dma_map_sg(qidev, req->src,
625 src_nents, DMA_TO_DEVICE);
626 if (unlikely(!mapped_src_nents)) {
627 dev_err(qidev, "unable to map source\n");
628 qi_cache_free(edesc);
629 return ERR_PTR(-ENOMEM);
631 } else {
632 mapped_src_nents = 0;
635 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
636 DMA_FROM_DEVICE);
637 if (unlikely(!mapped_dst_nents)) {
638 dev_err(qidev, "unable to map destination\n");
639 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
640 qi_cache_free(edesc);
641 return ERR_PTR(-ENOMEM);
645 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
646 ivsize = crypto_aead_ivsize(aead);
647 iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
648 if (dma_mapping_error(qidev, iv_dma)) {
649 dev_err(qidev, "unable to map IV\n");
650 caam_unmap(qidev, req->src, req->dst, src_nents,
651 dst_nents, 0, 0, op_type, 0, 0);
652 qi_cache_free(edesc);
653 return ERR_PTR(-ENOMEM);
658 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
659 * Input is not contiguous.
661 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
662 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
663 sg_table = &edesc->sgt[0];
664 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
666 edesc->src_nents = src_nents;
667 edesc->dst_nents = dst_nents;
668 edesc->iv_dma = iv_dma;
669 edesc->drv_req.app_ctx = req;
670 edesc->drv_req.cbk = aead_done;
671 edesc->drv_req.drv_ctx = drv_ctx;
673 edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
674 DMA_TO_DEVICE);
675 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
676 dev_err(qidev, "unable to map assoclen\n");
677 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
678 iv_dma, ivsize, op_type, 0, 0);
679 qi_cache_free(edesc);
680 return ERR_PTR(-ENOMEM);
683 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
684 qm_sg_index++;
685 if (ivsize) {
686 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
687 qm_sg_index++;
689 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
690 qm_sg_index += mapped_src_nents;
692 if (mapped_dst_nents > 1)
693 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
694 qm_sg_index, 0);
696 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
697 if (dma_mapping_error(qidev, qm_sg_dma)) {
698 dev_err(qidev, "unable to map S/G table\n");
699 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
700 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
701 iv_dma, ivsize, op_type, 0, 0);
702 qi_cache_free(edesc);
703 return ERR_PTR(-ENOMEM);
706 edesc->qm_sg_dma = qm_sg_dma;
707 edesc->qm_sg_bytes = qm_sg_bytes;
709 out_len = req->assoclen + req->cryptlen +
710 (encrypt ? ctx->authsize : (-ctx->authsize));
711 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
713 fd_sgt = &edesc->drv_req.fd_sgt[0];
714 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
716 if (req->dst == req->src) {
717 if (mapped_src_nents == 1)
718 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
719 out_len, 0);
720 else
721 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
722 (1 + !!ivsize) * sizeof(*sg_table),
723 out_len, 0);
724 } else if (mapped_dst_nents == 1) {
725 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
727 } else {
728 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
729 qm_sg_index, out_len, 0);
732 return edesc;
735 static inline int aead_crypt(struct aead_request *req, bool encrypt)
737 struct aead_edesc *edesc;
738 struct crypto_aead *aead = crypto_aead_reqtfm(req);
739 struct caam_ctx *ctx = crypto_aead_ctx(aead);
740 int ret;
742 if (unlikely(caam_congested))
743 return -EAGAIN;
745 /* allocate extended descriptor */
746 edesc = aead_edesc_alloc(req, encrypt);
747 if (IS_ERR_OR_NULL(edesc))
748 return PTR_ERR(edesc);
750 /* Create and submit job descriptor */
751 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
752 if (!ret) {
753 ret = -EINPROGRESS;
754 } else {
755 aead_unmap(ctx->qidev, edesc, req);
756 qi_cache_free(edesc);
759 return ret;
762 static int aead_encrypt(struct aead_request *req)
764 return aead_crypt(req, true);
767 static int aead_decrypt(struct aead_request *req)
769 return aead_crypt(req, false);
772 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
774 struct ablkcipher_edesc *edesc;
775 struct ablkcipher_request *req = drv_req->app_ctx;
776 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
777 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
778 struct device *qidev = caam_ctx->qidev;
779 #ifdef DEBUG
780 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
782 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
783 #endif
785 edesc = container_of(drv_req, typeof(*edesc), drv_req);
787 if (status)
788 caam_jr_strstatus(qidev, status);
790 #ifdef DEBUG
791 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
792 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
793 edesc->src_nents > 1 ? 100 : ivsize, 1);
794 dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
795 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
796 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
797 #endif
799 ablkcipher_unmap(qidev, edesc, req);
800 qi_cache_free(edesc);
802 ablkcipher_request_complete(req, status);
805 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
806 *req, bool encrypt)
808 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
809 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
810 struct device *qidev = ctx->qidev;
811 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
812 GFP_KERNEL : GFP_ATOMIC;
813 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
814 struct ablkcipher_edesc *edesc;
815 dma_addr_t iv_dma;
816 bool in_contig;
817 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
818 int dst_sg_idx, qm_sg_ents;
819 struct qm_sg_entry *sg_table, *fd_sgt;
820 struct caam_drv_ctx *drv_ctx;
821 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
823 drv_ctx = get_drv_ctx(ctx, op_type);
824 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
825 return (struct ablkcipher_edesc *)drv_ctx;
827 src_nents = sg_nents_for_len(req->src, req->nbytes);
828 if (unlikely(src_nents < 0)) {
829 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
830 req->nbytes);
831 return ERR_PTR(src_nents);
834 if (unlikely(req->src != req->dst)) {
835 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
836 if (unlikely(dst_nents < 0)) {
837 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
838 req->nbytes);
839 return ERR_PTR(dst_nents);
842 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
843 DMA_TO_DEVICE);
844 if (unlikely(!mapped_src_nents)) {
845 dev_err(qidev, "unable to map source\n");
846 return ERR_PTR(-ENOMEM);
849 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
850 DMA_FROM_DEVICE);
851 if (unlikely(!mapped_dst_nents)) {
852 dev_err(qidev, "unable to map destination\n");
853 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
854 return ERR_PTR(-ENOMEM);
856 } else {
857 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
858 DMA_BIDIRECTIONAL);
859 if (unlikely(!mapped_src_nents)) {
860 dev_err(qidev, "unable to map source\n");
861 return ERR_PTR(-ENOMEM);
865 iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
866 if (dma_mapping_error(qidev, iv_dma)) {
867 dev_err(qidev, "unable to map IV\n");
868 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
869 0, 0, 0, 0);
870 return ERR_PTR(-ENOMEM);
873 if (mapped_src_nents == 1 &&
874 iv_dma + ivsize == sg_dma_address(req->src)) {
875 in_contig = true;
876 qm_sg_ents = 0;
877 } else {
878 in_contig = false;
879 qm_sg_ents = 1 + mapped_src_nents;
881 dst_sg_idx = qm_sg_ents;
883 /* allocate space for base edesc and link tables */
884 edesc = qi_cache_alloc(GFP_DMA | flags);
885 if (unlikely(!edesc)) {
886 dev_err(qidev, "could not allocate extended descriptor\n");
887 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
888 iv_dma, ivsize, op_type, 0, 0);
889 return ERR_PTR(-ENOMEM);
892 edesc->src_nents = src_nents;
893 edesc->dst_nents = dst_nents;
894 edesc->iv_dma = iv_dma;
895 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
896 sg_table = &edesc->sgt[0];
897 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
898 edesc->drv_req.app_ctx = req;
899 edesc->drv_req.cbk = ablkcipher_done;
900 edesc->drv_req.drv_ctx = drv_ctx;
902 if (!in_contig) {
903 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
904 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
907 if (mapped_dst_nents > 1)
908 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
909 dst_sg_idx, 0);
911 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
912 DMA_TO_DEVICE);
913 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
914 dev_err(qidev, "unable to map S/G table\n");
915 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
916 iv_dma, ivsize, op_type, 0, 0);
917 qi_cache_free(edesc);
918 return ERR_PTR(-ENOMEM);
921 fd_sgt = &edesc->drv_req.fd_sgt[0];
923 if (!in_contig)
924 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
925 ivsize + req->nbytes, 0);
926 else
927 dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
930 if (req->src == req->dst) {
931 if (!in_contig)
932 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
933 sizeof(*sg_table), req->nbytes, 0);
934 else
935 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
936 req->nbytes, 0);
937 } else if (mapped_dst_nents > 1) {
938 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
939 sizeof(*sg_table), req->nbytes, 0);
940 } else {
941 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
942 req->nbytes, 0);
945 return edesc;
948 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
949 struct skcipher_givcrypt_request *creq)
951 struct ablkcipher_request *req = &creq->creq;
952 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
953 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
954 struct device *qidev = ctx->qidev;
955 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
956 GFP_KERNEL : GFP_ATOMIC;
957 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
958 struct ablkcipher_edesc *edesc;
959 dma_addr_t iv_dma;
960 bool out_contig;
961 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
962 struct qm_sg_entry *sg_table, *fd_sgt;
963 int dst_sg_idx, qm_sg_ents;
964 struct caam_drv_ctx *drv_ctx;
966 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
967 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
968 return (struct ablkcipher_edesc *)drv_ctx;
970 src_nents = sg_nents_for_len(req->src, req->nbytes);
971 if (unlikely(src_nents < 0)) {
972 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
973 req->nbytes);
974 return ERR_PTR(src_nents);
977 if (unlikely(req->src != req->dst)) {
978 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
979 if (unlikely(dst_nents < 0)) {
980 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
981 req->nbytes);
982 return ERR_PTR(dst_nents);
985 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
986 DMA_TO_DEVICE);
987 if (unlikely(!mapped_src_nents)) {
988 dev_err(qidev, "unable to map source\n");
989 return ERR_PTR(-ENOMEM);
992 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
993 DMA_FROM_DEVICE);
994 if (unlikely(!mapped_dst_nents)) {
995 dev_err(qidev, "unable to map destination\n");
996 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
997 return ERR_PTR(-ENOMEM);
999 } else {
1000 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1001 DMA_BIDIRECTIONAL);
1002 if (unlikely(!mapped_src_nents)) {
1003 dev_err(qidev, "unable to map source\n");
1004 return ERR_PTR(-ENOMEM);
1007 dst_nents = src_nents;
1008 mapped_dst_nents = src_nents;
1011 iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
1012 if (dma_mapping_error(qidev, iv_dma)) {
1013 dev_err(qidev, "unable to map IV\n");
1014 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1015 0, 0, 0, 0);
1016 return ERR_PTR(-ENOMEM);
1019 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1020 dst_sg_idx = qm_sg_ents;
1021 if (mapped_dst_nents == 1 &&
1022 iv_dma + ivsize == sg_dma_address(req->dst)) {
1023 out_contig = true;
1024 } else {
1025 out_contig = false;
1026 qm_sg_ents += 1 + mapped_dst_nents;
1029 /* allocate space for base edesc and link tables */
1030 edesc = qi_cache_alloc(GFP_DMA | flags);
1031 if (!edesc) {
1032 dev_err(qidev, "could not allocate extended descriptor\n");
1033 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1034 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1035 return ERR_PTR(-ENOMEM);
1038 edesc->src_nents = src_nents;
1039 edesc->dst_nents = dst_nents;
1040 edesc->iv_dma = iv_dma;
1041 sg_table = &edesc->sgt[0];
1042 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1043 edesc->drv_req.app_ctx = req;
1044 edesc->drv_req.cbk = ablkcipher_done;
1045 edesc->drv_req.drv_ctx = drv_ctx;
1047 if (mapped_src_nents > 1)
1048 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1050 if (!out_contig) {
1051 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1052 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1053 dst_sg_idx + 1, 0);
1056 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1057 DMA_TO_DEVICE);
1058 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1059 dev_err(qidev, "unable to map S/G table\n");
1060 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1061 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1062 qi_cache_free(edesc);
1063 return ERR_PTR(-ENOMEM);
1066 fd_sgt = &edesc->drv_req.fd_sgt[0];
1068 if (mapped_src_nents > 1)
1069 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1071 else
1072 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1073 req->nbytes, 0);
1075 if (!out_contig)
1076 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1077 sizeof(*sg_table), ivsize + req->nbytes,
1079 else
1080 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1081 ivsize + req->nbytes, 0);
1083 return edesc;
1086 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1088 struct ablkcipher_edesc *edesc;
1089 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1090 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1091 int ret;
1093 if (unlikely(caam_congested))
1094 return -EAGAIN;
1096 /* allocate extended descriptor */
1097 edesc = ablkcipher_edesc_alloc(req, encrypt);
1098 if (IS_ERR(edesc))
1099 return PTR_ERR(edesc);
1101 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1102 if (!ret) {
1103 ret = -EINPROGRESS;
1104 } else {
1105 ablkcipher_unmap(ctx->qidev, edesc, req);
1106 qi_cache_free(edesc);
1109 return ret;
1112 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1114 return ablkcipher_crypt(req, true);
1117 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1119 return ablkcipher_crypt(req, false);
1122 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1124 struct ablkcipher_request *req = &creq->creq;
1125 struct ablkcipher_edesc *edesc;
1126 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1127 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1128 int ret;
1130 if (unlikely(caam_congested))
1131 return -EAGAIN;
1133 /* allocate extended descriptor */
1134 edesc = ablkcipher_giv_edesc_alloc(creq);
1135 if (IS_ERR(edesc))
1136 return PTR_ERR(edesc);
1138 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1139 if (!ret) {
1140 ret = -EINPROGRESS;
1141 } else {
1142 ablkcipher_unmap(ctx->qidev, edesc, req);
1143 qi_cache_free(edesc);
1146 return ret;
1149 #define template_ablkcipher template_u.ablkcipher
1150 struct caam_alg_template {
1151 char name[CRYPTO_MAX_ALG_NAME];
1152 char driver_name[CRYPTO_MAX_ALG_NAME];
1153 unsigned int blocksize;
1154 u32 type;
1155 union {
1156 struct ablkcipher_alg ablkcipher;
1157 } template_u;
1158 u32 class1_alg_type;
1159 u32 class2_alg_type;
1162 static struct caam_alg_template driver_algs[] = {
1163 /* ablkcipher descriptor */
1165 .name = "cbc(aes)",
1166 .driver_name = "cbc-aes-caam-qi",
1167 .blocksize = AES_BLOCK_SIZE,
1168 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1169 .template_ablkcipher = {
1170 .setkey = ablkcipher_setkey,
1171 .encrypt = ablkcipher_encrypt,
1172 .decrypt = ablkcipher_decrypt,
1173 .givencrypt = ablkcipher_givencrypt,
1174 .geniv = "<built-in>",
1175 .min_keysize = AES_MIN_KEY_SIZE,
1176 .max_keysize = AES_MAX_KEY_SIZE,
1177 .ivsize = AES_BLOCK_SIZE,
1179 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1182 .name = "cbc(des3_ede)",
1183 .driver_name = "cbc-3des-caam-qi",
1184 .blocksize = DES3_EDE_BLOCK_SIZE,
1185 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1186 .template_ablkcipher = {
1187 .setkey = ablkcipher_setkey,
1188 .encrypt = ablkcipher_encrypt,
1189 .decrypt = ablkcipher_decrypt,
1190 .givencrypt = ablkcipher_givencrypt,
1191 .geniv = "<built-in>",
1192 .min_keysize = DES3_EDE_KEY_SIZE,
1193 .max_keysize = DES3_EDE_KEY_SIZE,
1194 .ivsize = DES3_EDE_BLOCK_SIZE,
1196 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1199 .name = "cbc(des)",
1200 .driver_name = "cbc-des-caam-qi",
1201 .blocksize = DES_BLOCK_SIZE,
1202 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1203 .template_ablkcipher = {
1204 .setkey = ablkcipher_setkey,
1205 .encrypt = ablkcipher_encrypt,
1206 .decrypt = ablkcipher_decrypt,
1207 .givencrypt = ablkcipher_givencrypt,
1208 .geniv = "<built-in>",
1209 .min_keysize = DES_KEY_SIZE,
1210 .max_keysize = DES_KEY_SIZE,
1211 .ivsize = DES_BLOCK_SIZE,
1213 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1216 .name = "ctr(aes)",
1217 .driver_name = "ctr-aes-caam-qi",
1218 .blocksize = 1,
1219 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1220 .template_ablkcipher = {
1221 .setkey = ablkcipher_setkey,
1222 .encrypt = ablkcipher_encrypt,
1223 .decrypt = ablkcipher_decrypt,
1224 .geniv = "chainiv",
1225 .min_keysize = AES_MIN_KEY_SIZE,
1226 .max_keysize = AES_MAX_KEY_SIZE,
1227 .ivsize = AES_BLOCK_SIZE,
1229 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1232 .name = "rfc3686(ctr(aes))",
1233 .driver_name = "rfc3686-ctr-aes-caam-qi",
1234 .blocksize = 1,
1235 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1236 .template_ablkcipher = {
1237 .setkey = ablkcipher_setkey,
1238 .encrypt = ablkcipher_encrypt,
1239 .decrypt = ablkcipher_decrypt,
1240 .givencrypt = ablkcipher_givencrypt,
1241 .geniv = "<built-in>",
1242 .min_keysize = AES_MIN_KEY_SIZE +
1243 CTR_RFC3686_NONCE_SIZE,
1244 .max_keysize = AES_MAX_KEY_SIZE +
1245 CTR_RFC3686_NONCE_SIZE,
1246 .ivsize = CTR_RFC3686_IV_SIZE,
1248 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1251 .name = "xts(aes)",
1252 .driver_name = "xts-aes-caam-qi",
1253 .blocksize = AES_BLOCK_SIZE,
1254 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1255 .template_ablkcipher = {
1256 .setkey = xts_ablkcipher_setkey,
1257 .encrypt = ablkcipher_encrypt,
1258 .decrypt = ablkcipher_decrypt,
1259 .geniv = "eseqiv",
1260 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1261 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1262 .ivsize = AES_BLOCK_SIZE,
1264 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1268 static struct caam_aead_alg driver_aeads[] = {
1269 /* single-pass ipsec_esp descriptor */
1271 .aead = {
1272 .base = {
1273 .cra_name = "authenc(hmac(md5),cbc(aes))",
1274 .cra_driver_name = "authenc-hmac-md5-"
1275 "cbc-aes-caam-qi",
1276 .cra_blocksize = AES_BLOCK_SIZE,
1278 .setkey = aead_setkey,
1279 .setauthsize = aead_setauthsize,
1280 .encrypt = aead_encrypt,
1281 .decrypt = aead_decrypt,
1282 .ivsize = AES_BLOCK_SIZE,
1283 .maxauthsize = MD5_DIGEST_SIZE,
1285 .caam = {
1286 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1287 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1288 OP_ALG_AAI_HMAC_PRECOMP,
1292 .aead = {
1293 .base = {
1294 .cra_name = "echainiv(authenc(hmac(md5),"
1295 "cbc(aes)))",
1296 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1297 "cbc-aes-caam-qi",
1298 .cra_blocksize = AES_BLOCK_SIZE,
1300 .setkey = aead_setkey,
1301 .setauthsize = aead_setauthsize,
1302 .encrypt = aead_encrypt,
1303 .decrypt = aead_decrypt,
1304 .ivsize = AES_BLOCK_SIZE,
1305 .maxauthsize = MD5_DIGEST_SIZE,
1307 .caam = {
1308 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1309 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1310 OP_ALG_AAI_HMAC_PRECOMP,
1311 .geniv = true,
1315 .aead = {
1316 .base = {
1317 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1318 .cra_driver_name = "authenc-hmac-sha1-"
1319 "cbc-aes-caam-qi",
1320 .cra_blocksize = AES_BLOCK_SIZE,
1322 .setkey = aead_setkey,
1323 .setauthsize = aead_setauthsize,
1324 .encrypt = aead_encrypt,
1325 .decrypt = aead_decrypt,
1326 .ivsize = AES_BLOCK_SIZE,
1327 .maxauthsize = SHA1_DIGEST_SIZE,
1329 .caam = {
1330 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1331 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1332 OP_ALG_AAI_HMAC_PRECOMP,
1336 .aead = {
1337 .base = {
1338 .cra_name = "echainiv(authenc(hmac(sha1),"
1339 "cbc(aes)))",
1340 .cra_driver_name = "echainiv-authenc-"
1341 "hmac-sha1-cbc-aes-caam-qi",
1342 .cra_blocksize = AES_BLOCK_SIZE,
1344 .setkey = aead_setkey,
1345 .setauthsize = aead_setauthsize,
1346 .encrypt = aead_encrypt,
1347 .decrypt = aead_decrypt,
1348 .ivsize = AES_BLOCK_SIZE,
1349 .maxauthsize = SHA1_DIGEST_SIZE,
1351 .caam = {
1352 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1353 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1354 OP_ALG_AAI_HMAC_PRECOMP,
1355 .geniv = true,
1359 .aead = {
1360 .base = {
1361 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1362 .cra_driver_name = "authenc-hmac-sha224-"
1363 "cbc-aes-caam-qi",
1364 .cra_blocksize = AES_BLOCK_SIZE,
1366 .setkey = aead_setkey,
1367 .setauthsize = aead_setauthsize,
1368 .encrypt = aead_encrypt,
1369 .decrypt = aead_decrypt,
1370 .ivsize = AES_BLOCK_SIZE,
1371 .maxauthsize = SHA224_DIGEST_SIZE,
1373 .caam = {
1374 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1375 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1376 OP_ALG_AAI_HMAC_PRECOMP,
1380 .aead = {
1381 .base = {
1382 .cra_name = "echainiv(authenc(hmac(sha224),"
1383 "cbc(aes)))",
1384 .cra_driver_name = "echainiv-authenc-"
1385 "hmac-sha224-cbc-aes-caam-qi",
1386 .cra_blocksize = AES_BLOCK_SIZE,
1388 .setkey = aead_setkey,
1389 .setauthsize = aead_setauthsize,
1390 .encrypt = aead_encrypt,
1391 .decrypt = aead_decrypt,
1392 .ivsize = AES_BLOCK_SIZE,
1393 .maxauthsize = SHA224_DIGEST_SIZE,
1395 .caam = {
1396 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1397 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1398 OP_ALG_AAI_HMAC_PRECOMP,
1399 .geniv = true,
1403 .aead = {
1404 .base = {
1405 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1406 .cra_driver_name = "authenc-hmac-sha256-"
1407 "cbc-aes-caam-qi",
1408 .cra_blocksize = AES_BLOCK_SIZE,
1410 .setkey = aead_setkey,
1411 .setauthsize = aead_setauthsize,
1412 .encrypt = aead_encrypt,
1413 .decrypt = aead_decrypt,
1414 .ivsize = AES_BLOCK_SIZE,
1415 .maxauthsize = SHA256_DIGEST_SIZE,
1417 .caam = {
1418 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1419 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1420 OP_ALG_AAI_HMAC_PRECOMP,
1424 .aead = {
1425 .base = {
1426 .cra_name = "echainiv(authenc(hmac(sha256),"
1427 "cbc(aes)))",
1428 .cra_driver_name = "echainiv-authenc-"
1429 "hmac-sha256-cbc-aes-"
1430 "caam-qi",
1431 .cra_blocksize = AES_BLOCK_SIZE,
1433 .setkey = aead_setkey,
1434 .setauthsize = aead_setauthsize,
1435 .encrypt = aead_encrypt,
1436 .decrypt = aead_decrypt,
1437 .ivsize = AES_BLOCK_SIZE,
1438 .maxauthsize = SHA256_DIGEST_SIZE,
1440 .caam = {
1441 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1442 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1443 OP_ALG_AAI_HMAC_PRECOMP,
1444 .geniv = true,
1448 .aead = {
1449 .base = {
1450 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1451 .cra_driver_name = "authenc-hmac-sha384-"
1452 "cbc-aes-caam-qi",
1453 .cra_blocksize = AES_BLOCK_SIZE,
1455 .setkey = aead_setkey,
1456 .setauthsize = aead_setauthsize,
1457 .encrypt = aead_encrypt,
1458 .decrypt = aead_decrypt,
1459 .ivsize = AES_BLOCK_SIZE,
1460 .maxauthsize = SHA384_DIGEST_SIZE,
1462 .caam = {
1463 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1464 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1465 OP_ALG_AAI_HMAC_PRECOMP,
1469 .aead = {
1470 .base = {
1471 .cra_name = "echainiv(authenc(hmac(sha384),"
1472 "cbc(aes)))",
1473 .cra_driver_name = "echainiv-authenc-"
1474 "hmac-sha384-cbc-aes-"
1475 "caam-qi",
1476 .cra_blocksize = AES_BLOCK_SIZE,
1478 .setkey = aead_setkey,
1479 .setauthsize = aead_setauthsize,
1480 .encrypt = aead_encrypt,
1481 .decrypt = aead_decrypt,
1482 .ivsize = AES_BLOCK_SIZE,
1483 .maxauthsize = SHA384_DIGEST_SIZE,
1485 .caam = {
1486 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1487 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1488 OP_ALG_AAI_HMAC_PRECOMP,
1489 .geniv = true,
1493 .aead = {
1494 .base = {
1495 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1496 .cra_driver_name = "authenc-hmac-sha512-"
1497 "cbc-aes-caam-qi",
1498 .cra_blocksize = AES_BLOCK_SIZE,
1500 .setkey = aead_setkey,
1501 .setauthsize = aead_setauthsize,
1502 .encrypt = aead_encrypt,
1503 .decrypt = aead_decrypt,
1504 .ivsize = AES_BLOCK_SIZE,
1505 .maxauthsize = SHA512_DIGEST_SIZE,
1507 .caam = {
1508 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1509 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1510 OP_ALG_AAI_HMAC_PRECOMP,
1514 .aead = {
1515 .base = {
1516 .cra_name = "echainiv(authenc(hmac(sha512),"
1517 "cbc(aes)))",
1518 .cra_driver_name = "echainiv-authenc-"
1519 "hmac-sha512-cbc-aes-"
1520 "caam-qi",
1521 .cra_blocksize = AES_BLOCK_SIZE,
1523 .setkey = aead_setkey,
1524 .setauthsize = aead_setauthsize,
1525 .encrypt = aead_encrypt,
1526 .decrypt = aead_decrypt,
1527 .ivsize = AES_BLOCK_SIZE,
1528 .maxauthsize = SHA512_DIGEST_SIZE,
1530 .caam = {
1531 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1532 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1533 OP_ALG_AAI_HMAC_PRECOMP,
1534 .geniv = true,
1538 .aead = {
1539 .base = {
1540 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1541 .cra_driver_name = "authenc-hmac-md5-"
1542 "cbc-des3_ede-caam-qi",
1543 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1545 .setkey = aead_setkey,
1546 .setauthsize = aead_setauthsize,
1547 .encrypt = aead_encrypt,
1548 .decrypt = aead_decrypt,
1549 .ivsize = DES3_EDE_BLOCK_SIZE,
1550 .maxauthsize = MD5_DIGEST_SIZE,
1552 .caam = {
1553 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1554 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1555 OP_ALG_AAI_HMAC_PRECOMP,
1559 .aead = {
1560 .base = {
1561 .cra_name = "echainiv(authenc(hmac(md5),"
1562 "cbc(des3_ede)))",
1563 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1564 "cbc-des3_ede-caam-qi",
1565 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1567 .setkey = aead_setkey,
1568 .setauthsize = aead_setauthsize,
1569 .encrypt = aead_encrypt,
1570 .decrypt = aead_decrypt,
1571 .ivsize = DES3_EDE_BLOCK_SIZE,
1572 .maxauthsize = MD5_DIGEST_SIZE,
1574 .caam = {
1575 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1576 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1577 OP_ALG_AAI_HMAC_PRECOMP,
1578 .geniv = true,
1582 .aead = {
1583 .base = {
1584 .cra_name = "authenc(hmac(sha1),"
1585 "cbc(des3_ede))",
1586 .cra_driver_name = "authenc-hmac-sha1-"
1587 "cbc-des3_ede-caam-qi",
1588 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1590 .setkey = aead_setkey,
1591 .setauthsize = aead_setauthsize,
1592 .encrypt = aead_encrypt,
1593 .decrypt = aead_decrypt,
1594 .ivsize = DES3_EDE_BLOCK_SIZE,
1595 .maxauthsize = SHA1_DIGEST_SIZE,
1597 .caam = {
1598 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1599 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1600 OP_ALG_AAI_HMAC_PRECOMP,
1604 .aead = {
1605 .base = {
1606 .cra_name = "echainiv(authenc(hmac(sha1),"
1607 "cbc(des3_ede)))",
1608 .cra_driver_name = "echainiv-authenc-"
1609 "hmac-sha1-"
1610 "cbc-des3_ede-caam-qi",
1611 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1613 .setkey = aead_setkey,
1614 .setauthsize = aead_setauthsize,
1615 .encrypt = aead_encrypt,
1616 .decrypt = aead_decrypt,
1617 .ivsize = DES3_EDE_BLOCK_SIZE,
1618 .maxauthsize = SHA1_DIGEST_SIZE,
1620 .caam = {
1621 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1622 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1623 OP_ALG_AAI_HMAC_PRECOMP,
1624 .geniv = true,
1628 .aead = {
1629 .base = {
1630 .cra_name = "authenc(hmac(sha224),"
1631 "cbc(des3_ede))",
1632 .cra_driver_name = "authenc-hmac-sha224-"
1633 "cbc-des3_ede-caam-qi",
1634 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1636 .setkey = aead_setkey,
1637 .setauthsize = aead_setauthsize,
1638 .encrypt = aead_encrypt,
1639 .decrypt = aead_decrypt,
1640 .ivsize = DES3_EDE_BLOCK_SIZE,
1641 .maxauthsize = SHA224_DIGEST_SIZE,
1643 .caam = {
1644 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1645 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1646 OP_ALG_AAI_HMAC_PRECOMP,
1650 .aead = {
1651 .base = {
1652 .cra_name = "echainiv(authenc(hmac(sha224),"
1653 "cbc(des3_ede)))",
1654 .cra_driver_name = "echainiv-authenc-"
1655 "hmac-sha224-"
1656 "cbc-des3_ede-caam-qi",
1657 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1659 .setkey = aead_setkey,
1660 .setauthsize = aead_setauthsize,
1661 .encrypt = aead_encrypt,
1662 .decrypt = aead_decrypt,
1663 .ivsize = DES3_EDE_BLOCK_SIZE,
1664 .maxauthsize = SHA224_DIGEST_SIZE,
1666 .caam = {
1667 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1668 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1669 OP_ALG_AAI_HMAC_PRECOMP,
1670 .geniv = true,
1674 .aead = {
1675 .base = {
1676 .cra_name = "authenc(hmac(sha256),"
1677 "cbc(des3_ede))",
1678 .cra_driver_name = "authenc-hmac-sha256-"
1679 "cbc-des3_ede-caam-qi",
1680 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1682 .setkey = aead_setkey,
1683 .setauthsize = aead_setauthsize,
1684 .encrypt = aead_encrypt,
1685 .decrypt = aead_decrypt,
1686 .ivsize = DES3_EDE_BLOCK_SIZE,
1687 .maxauthsize = SHA256_DIGEST_SIZE,
1689 .caam = {
1690 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1691 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1692 OP_ALG_AAI_HMAC_PRECOMP,
1696 .aead = {
1697 .base = {
1698 .cra_name = "echainiv(authenc(hmac(sha256),"
1699 "cbc(des3_ede)))",
1700 .cra_driver_name = "echainiv-authenc-"
1701 "hmac-sha256-"
1702 "cbc-des3_ede-caam-qi",
1703 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1705 .setkey = aead_setkey,
1706 .setauthsize = aead_setauthsize,
1707 .encrypt = aead_encrypt,
1708 .decrypt = aead_decrypt,
1709 .ivsize = DES3_EDE_BLOCK_SIZE,
1710 .maxauthsize = SHA256_DIGEST_SIZE,
1712 .caam = {
1713 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1714 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1715 OP_ALG_AAI_HMAC_PRECOMP,
1716 .geniv = true,
1720 .aead = {
1721 .base = {
1722 .cra_name = "authenc(hmac(sha384),"
1723 "cbc(des3_ede))",
1724 .cra_driver_name = "authenc-hmac-sha384-"
1725 "cbc-des3_ede-caam-qi",
1726 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1728 .setkey = aead_setkey,
1729 .setauthsize = aead_setauthsize,
1730 .encrypt = aead_encrypt,
1731 .decrypt = aead_decrypt,
1732 .ivsize = DES3_EDE_BLOCK_SIZE,
1733 .maxauthsize = SHA384_DIGEST_SIZE,
1735 .caam = {
1736 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1737 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1738 OP_ALG_AAI_HMAC_PRECOMP,
1742 .aead = {
1743 .base = {
1744 .cra_name = "echainiv(authenc(hmac(sha384),"
1745 "cbc(des3_ede)))",
1746 .cra_driver_name = "echainiv-authenc-"
1747 "hmac-sha384-"
1748 "cbc-des3_ede-caam-qi",
1749 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1751 .setkey = aead_setkey,
1752 .setauthsize = aead_setauthsize,
1753 .encrypt = aead_encrypt,
1754 .decrypt = aead_decrypt,
1755 .ivsize = DES3_EDE_BLOCK_SIZE,
1756 .maxauthsize = SHA384_DIGEST_SIZE,
1758 .caam = {
1759 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1760 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1761 OP_ALG_AAI_HMAC_PRECOMP,
1762 .geniv = true,
1766 .aead = {
1767 .base = {
1768 .cra_name = "authenc(hmac(sha512),"
1769 "cbc(des3_ede))",
1770 .cra_driver_name = "authenc-hmac-sha512-"
1771 "cbc-des3_ede-caam-qi",
1772 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1774 .setkey = aead_setkey,
1775 .setauthsize = aead_setauthsize,
1776 .encrypt = aead_encrypt,
1777 .decrypt = aead_decrypt,
1778 .ivsize = DES3_EDE_BLOCK_SIZE,
1779 .maxauthsize = SHA512_DIGEST_SIZE,
1781 .caam = {
1782 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1783 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1784 OP_ALG_AAI_HMAC_PRECOMP,
1788 .aead = {
1789 .base = {
1790 .cra_name = "echainiv(authenc(hmac(sha512),"
1791 "cbc(des3_ede)))",
1792 .cra_driver_name = "echainiv-authenc-"
1793 "hmac-sha512-"
1794 "cbc-des3_ede-caam-qi",
1795 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1797 .setkey = aead_setkey,
1798 .setauthsize = aead_setauthsize,
1799 .encrypt = aead_encrypt,
1800 .decrypt = aead_decrypt,
1801 .ivsize = DES3_EDE_BLOCK_SIZE,
1802 .maxauthsize = SHA512_DIGEST_SIZE,
1804 .caam = {
1805 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1806 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1807 OP_ALG_AAI_HMAC_PRECOMP,
1808 .geniv = true,
1812 .aead = {
1813 .base = {
1814 .cra_name = "authenc(hmac(md5),cbc(des))",
1815 .cra_driver_name = "authenc-hmac-md5-"
1816 "cbc-des-caam-qi",
1817 .cra_blocksize = DES_BLOCK_SIZE,
1819 .setkey = aead_setkey,
1820 .setauthsize = aead_setauthsize,
1821 .encrypt = aead_encrypt,
1822 .decrypt = aead_decrypt,
1823 .ivsize = DES_BLOCK_SIZE,
1824 .maxauthsize = MD5_DIGEST_SIZE,
1826 .caam = {
1827 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1828 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1829 OP_ALG_AAI_HMAC_PRECOMP,
1833 .aead = {
1834 .base = {
1835 .cra_name = "echainiv(authenc(hmac(md5),"
1836 "cbc(des)))",
1837 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1838 "cbc-des-caam-qi",
1839 .cra_blocksize = DES_BLOCK_SIZE,
1841 .setkey = aead_setkey,
1842 .setauthsize = aead_setauthsize,
1843 .encrypt = aead_encrypt,
1844 .decrypt = aead_decrypt,
1845 .ivsize = DES_BLOCK_SIZE,
1846 .maxauthsize = MD5_DIGEST_SIZE,
1848 .caam = {
1849 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1850 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1851 OP_ALG_AAI_HMAC_PRECOMP,
1852 .geniv = true,
1856 .aead = {
1857 .base = {
1858 .cra_name = "authenc(hmac(sha1),cbc(des))",
1859 .cra_driver_name = "authenc-hmac-sha1-"
1860 "cbc-des-caam-qi",
1861 .cra_blocksize = DES_BLOCK_SIZE,
1863 .setkey = aead_setkey,
1864 .setauthsize = aead_setauthsize,
1865 .encrypt = aead_encrypt,
1866 .decrypt = aead_decrypt,
1867 .ivsize = DES_BLOCK_SIZE,
1868 .maxauthsize = SHA1_DIGEST_SIZE,
1870 .caam = {
1871 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1872 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1873 OP_ALG_AAI_HMAC_PRECOMP,
1877 .aead = {
1878 .base = {
1879 .cra_name = "echainiv(authenc(hmac(sha1),"
1880 "cbc(des)))",
1881 .cra_driver_name = "echainiv-authenc-"
1882 "hmac-sha1-cbc-des-caam-qi",
1883 .cra_blocksize = DES_BLOCK_SIZE,
1885 .setkey = aead_setkey,
1886 .setauthsize = aead_setauthsize,
1887 .encrypt = aead_encrypt,
1888 .decrypt = aead_decrypt,
1889 .ivsize = DES_BLOCK_SIZE,
1890 .maxauthsize = SHA1_DIGEST_SIZE,
1892 .caam = {
1893 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1894 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1895 OP_ALG_AAI_HMAC_PRECOMP,
1896 .geniv = true,
1900 .aead = {
1901 .base = {
1902 .cra_name = "authenc(hmac(sha224),cbc(des))",
1903 .cra_driver_name = "authenc-hmac-sha224-"
1904 "cbc-des-caam-qi",
1905 .cra_blocksize = DES_BLOCK_SIZE,
1907 .setkey = aead_setkey,
1908 .setauthsize = aead_setauthsize,
1909 .encrypt = aead_encrypt,
1910 .decrypt = aead_decrypt,
1911 .ivsize = DES_BLOCK_SIZE,
1912 .maxauthsize = SHA224_DIGEST_SIZE,
1914 .caam = {
1915 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1916 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1917 OP_ALG_AAI_HMAC_PRECOMP,
1921 .aead = {
1922 .base = {
1923 .cra_name = "echainiv(authenc(hmac(sha224),"
1924 "cbc(des)))",
1925 .cra_driver_name = "echainiv-authenc-"
1926 "hmac-sha224-cbc-des-"
1927 "caam-qi",
1928 .cra_blocksize = DES_BLOCK_SIZE,
1930 .setkey = aead_setkey,
1931 .setauthsize = aead_setauthsize,
1932 .encrypt = aead_encrypt,
1933 .decrypt = aead_decrypt,
1934 .ivsize = DES_BLOCK_SIZE,
1935 .maxauthsize = SHA224_DIGEST_SIZE,
1937 .caam = {
1938 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1939 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1940 OP_ALG_AAI_HMAC_PRECOMP,
1941 .geniv = true,
1945 .aead = {
1946 .base = {
1947 .cra_name = "authenc(hmac(sha256),cbc(des))",
1948 .cra_driver_name = "authenc-hmac-sha256-"
1949 "cbc-des-caam-qi",
1950 .cra_blocksize = DES_BLOCK_SIZE,
1952 .setkey = aead_setkey,
1953 .setauthsize = aead_setauthsize,
1954 .encrypt = aead_encrypt,
1955 .decrypt = aead_decrypt,
1956 .ivsize = DES_BLOCK_SIZE,
1957 .maxauthsize = SHA256_DIGEST_SIZE,
1959 .caam = {
1960 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1961 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1962 OP_ALG_AAI_HMAC_PRECOMP,
1966 .aead = {
1967 .base = {
1968 .cra_name = "echainiv(authenc(hmac(sha256),"
1969 "cbc(des)))",
1970 .cra_driver_name = "echainiv-authenc-"
1971 "hmac-sha256-cbc-desi-"
1972 "caam-qi",
1973 .cra_blocksize = DES_BLOCK_SIZE,
1975 .setkey = aead_setkey,
1976 .setauthsize = aead_setauthsize,
1977 .encrypt = aead_encrypt,
1978 .decrypt = aead_decrypt,
1979 .ivsize = DES_BLOCK_SIZE,
1980 .maxauthsize = SHA256_DIGEST_SIZE,
1982 .caam = {
1983 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1984 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1985 OP_ALG_AAI_HMAC_PRECOMP,
1986 .geniv = true,
1990 .aead = {
1991 .base = {
1992 .cra_name = "authenc(hmac(sha384),cbc(des))",
1993 .cra_driver_name = "authenc-hmac-sha384-"
1994 "cbc-des-caam-qi",
1995 .cra_blocksize = DES_BLOCK_SIZE,
1997 .setkey = aead_setkey,
1998 .setauthsize = aead_setauthsize,
1999 .encrypt = aead_encrypt,
2000 .decrypt = aead_decrypt,
2001 .ivsize = DES_BLOCK_SIZE,
2002 .maxauthsize = SHA384_DIGEST_SIZE,
2004 .caam = {
2005 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2006 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2007 OP_ALG_AAI_HMAC_PRECOMP,
2011 .aead = {
2012 .base = {
2013 .cra_name = "echainiv(authenc(hmac(sha384),"
2014 "cbc(des)))",
2015 .cra_driver_name = "echainiv-authenc-"
2016 "hmac-sha384-cbc-des-"
2017 "caam-qi",
2018 .cra_blocksize = DES_BLOCK_SIZE,
2020 .setkey = aead_setkey,
2021 .setauthsize = aead_setauthsize,
2022 .encrypt = aead_encrypt,
2023 .decrypt = aead_decrypt,
2024 .ivsize = DES_BLOCK_SIZE,
2025 .maxauthsize = SHA384_DIGEST_SIZE,
2027 .caam = {
2028 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2029 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2030 OP_ALG_AAI_HMAC_PRECOMP,
2031 .geniv = true,
2035 .aead = {
2036 .base = {
2037 .cra_name = "authenc(hmac(sha512),cbc(des))",
2038 .cra_driver_name = "authenc-hmac-sha512-"
2039 "cbc-des-caam-qi",
2040 .cra_blocksize = DES_BLOCK_SIZE,
2042 .setkey = aead_setkey,
2043 .setauthsize = aead_setauthsize,
2044 .encrypt = aead_encrypt,
2045 .decrypt = aead_decrypt,
2046 .ivsize = DES_BLOCK_SIZE,
2047 .maxauthsize = SHA512_DIGEST_SIZE,
2049 .caam = {
2050 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2051 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2052 OP_ALG_AAI_HMAC_PRECOMP,
2056 .aead = {
2057 .base = {
2058 .cra_name = "echainiv(authenc(hmac(sha512),"
2059 "cbc(des)))",
2060 .cra_driver_name = "echainiv-authenc-"
2061 "hmac-sha512-cbc-des-"
2062 "caam-qi",
2063 .cra_blocksize = DES_BLOCK_SIZE,
2065 .setkey = aead_setkey,
2066 .setauthsize = aead_setauthsize,
2067 .encrypt = aead_encrypt,
2068 .decrypt = aead_decrypt,
2069 .ivsize = DES_BLOCK_SIZE,
2070 .maxauthsize = SHA512_DIGEST_SIZE,
2072 .caam = {
2073 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2074 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2075 OP_ALG_AAI_HMAC_PRECOMP,
2076 .geniv = true,
2081 struct caam_crypto_alg {
2082 struct list_head entry;
2083 struct crypto_alg crypto_alg;
2084 struct caam_alg_entry caam;
2087 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2089 struct caam_drv_private *priv;
2092 * distribute tfms across job rings to ensure in-order
2093 * crypto request processing per tfm
2095 ctx->jrdev = caam_jr_alloc();
2096 if (IS_ERR(ctx->jrdev)) {
2097 pr_err("Job Ring Device allocation for transform failed\n");
2098 return PTR_ERR(ctx->jrdev);
2101 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2102 DMA_TO_DEVICE);
2103 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2104 dev_err(ctx->jrdev, "unable to map key\n");
2105 caam_jr_free(ctx->jrdev);
2106 return -ENOMEM;
2109 /* copy descriptor header template value */
2110 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2111 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2113 priv = dev_get_drvdata(ctx->jrdev->parent);
2114 ctx->qidev = priv->qidev;
2116 spin_lock_init(&ctx->lock);
2117 ctx->drv_ctx[ENCRYPT] = NULL;
2118 ctx->drv_ctx[DECRYPT] = NULL;
2119 ctx->drv_ctx[GIVENCRYPT] = NULL;
2121 return 0;
2124 static int caam_cra_init(struct crypto_tfm *tfm)
2126 struct crypto_alg *alg = tfm->__crt_alg;
2127 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2128 crypto_alg);
2129 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2131 return caam_init_common(ctx, &caam_alg->caam);
2134 static int caam_aead_init(struct crypto_aead *tfm)
2136 struct aead_alg *alg = crypto_aead_alg(tfm);
2137 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2138 aead);
2139 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2141 return caam_init_common(ctx, &caam_alg->caam);
2144 static void caam_exit_common(struct caam_ctx *ctx)
2146 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2147 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2148 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2150 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
2151 DMA_TO_DEVICE);
2153 caam_jr_free(ctx->jrdev);
2156 static void caam_cra_exit(struct crypto_tfm *tfm)
2158 caam_exit_common(crypto_tfm_ctx(tfm));
2161 static void caam_aead_exit(struct crypto_aead *tfm)
2163 caam_exit_common(crypto_aead_ctx(tfm));
2166 static struct list_head alg_list;
2167 static void __exit caam_qi_algapi_exit(void)
2169 struct caam_crypto_alg *t_alg, *n;
2170 int i;
2172 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2173 struct caam_aead_alg *t_alg = driver_aeads + i;
2175 if (t_alg->registered)
2176 crypto_unregister_aead(&t_alg->aead);
2179 if (!alg_list.next)
2180 return;
2182 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2183 crypto_unregister_alg(&t_alg->crypto_alg);
2184 list_del(&t_alg->entry);
2185 kfree(t_alg);
2189 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2190 *template)
2192 struct caam_crypto_alg *t_alg;
2193 struct crypto_alg *alg;
2195 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2196 if (!t_alg)
2197 return ERR_PTR(-ENOMEM);
2199 alg = &t_alg->crypto_alg;
2201 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2202 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2203 template->driver_name);
2204 alg->cra_module = THIS_MODULE;
2205 alg->cra_init = caam_cra_init;
2206 alg->cra_exit = caam_cra_exit;
2207 alg->cra_priority = CAAM_CRA_PRIORITY;
2208 alg->cra_blocksize = template->blocksize;
2209 alg->cra_alignmask = 0;
2210 alg->cra_ctxsize = sizeof(struct caam_ctx);
2211 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2212 template->type;
2213 switch (template->type) {
2214 case CRYPTO_ALG_TYPE_GIVCIPHER:
2215 alg->cra_type = &crypto_givcipher_type;
2216 alg->cra_ablkcipher = template->template_ablkcipher;
2217 break;
2218 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2219 alg->cra_type = &crypto_ablkcipher_type;
2220 alg->cra_ablkcipher = template->template_ablkcipher;
2221 break;
2224 t_alg->caam.class1_alg_type = template->class1_alg_type;
2225 t_alg->caam.class2_alg_type = template->class2_alg_type;
2227 return t_alg;
2230 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2232 struct aead_alg *alg = &t_alg->aead;
2234 alg->base.cra_module = THIS_MODULE;
2235 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2236 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2237 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2239 alg->init = caam_aead_init;
2240 alg->exit = caam_aead_exit;
2243 static int __init caam_qi_algapi_init(void)
2245 struct device_node *dev_node;
2246 struct platform_device *pdev;
2247 struct device *ctrldev;
2248 struct caam_drv_private *priv;
2249 int i = 0, err = 0;
2250 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2251 unsigned int md_limit = SHA512_DIGEST_SIZE;
2252 bool registered = false;
2254 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2255 if (!dev_node) {
2256 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2257 if (!dev_node)
2258 return -ENODEV;
2261 pdev = of_find_device_by_node(dev_node);
2262 of_node_put(dev_node);
2263 if (!pdev)
2264 return -ENODEV;
2266 ctrldev = &pdev->dev;
2267 priv = dev_get_drvdata(ctrldev);
2270 * If priv is NULL, it's probably because the caam driver wasn't
2271 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2273 if (!priv || !priv->qi_present)
2274 return -ENODEV;
2276 INIT_LIST_HEAD(&alg_list);
2279 * Register crypto algorithms the device supports.
2280 * First, detect presence and attributes of DES, AES, and MD blocks.
2282 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2283 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2284 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2285 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2286 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2288 /* If MD is present, limit digest size based on LP256 */
2289 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2290 md_limit = SHA256_DIGEST_SIZE;
2292 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2293 struct caam_crypto_alg *t_alg;
2294 struct caam_alg_template *alg = driver_algs + i;
2295 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2297 /* Skip DES algorithms if not supported by device */
2298 if (!des_inst &&
2299 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2300 (alg_sel == OP_ALG_ALGSEL_DES)))
2301 continue;
2303 /* Skip AES algorithms if not supported by device */
2304 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2305 continue;
2307 t_alg = caam_alg_alloc(alg);
2308 if (IS_ERR(t_alg)) {
2309 err = PTR_ERR(t_alg);
2310 dev_warn(priv->qidev, "%s alg allocation failed\n",
2311 alg->driver_name);
2312 continue;
2315 err = crypto_register_alg(&t_alg->crypto_alg);
2316 if (err) {
2317 dev_warn(priv->qidev, "%s alg registration failed\n",
2318 t_alg->crypto_alg.cra_driver_name);
2319 kfree(t_alg);
2320 continue;
2323 list_add_tail(&t_alg->entry, &alg_list);
2324 registered = true;
2327 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2328 struct caam_aead_alg *t_alg = driver_aeads + i;
2329 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2330 OP_ALG_ALGSEL_MASK;
2331 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2332 OP_ALG_ALGSEL_MASK;
2333 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2335 /* Skip DES algorithms if not supported by device */
2336 if (!des_inst &&
2337 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2338 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2339 continue;
2341 /* Skip AES algorithms if not supported by device */
2342 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2343 continue;
2346 * Check support for AES algorithms not available
2347 * on LP devices.
2349 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2350 (alg_aai == OP_ALG_AAI_GCM))
2351 continue;
2354 * Skip algorithms requiring message digests
2355 * if MD or MD size is not supported by device.
2357 if (c2_alg_sel &&
2358 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2359 continue;
2361 caam_aead_alg_init(t_alg);
2363 err = crypto_register_aead(&t_alg->aead);
2364 if (err) {
2365 pr_warn("%s alg registration failed\n",
2366 t_alg->aead.base.cra_driver_name);
2367 continue;
2370 t_alg->registered = true;
2371 registered = true;
2374 if (registered)
2375 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2377 return err;
2380 module_init(caam_qi_algapi_init);
2381 module_exit(caam_qi_algapi_exit);
2383 MODULE_LICENSE("GPL");
2384 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2385 MODULE_AUTHOR("Freescale Semiconductor");