Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / crypto / caam / caamalg_qi.c
blob4aecc9435f6916fba2a988a8c35cb6671694ea46
1 /*
2 * Freescale FSL CAAM support for crypto API over QI backend.
3 * Based on caamalg.c
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
7 */
9 #include "compat.h"
10 #include "ctrl.h"
11 #include "regs.h"
12 #include "intern.h"
13 #include "desc_constr.h"
14 #include "error.h"
15 #include "sg_sw_qm.h"
16 #include "key_gen.h"
17 #include "qi.h"
18 #include "jr.h"
19 #include "caamalg_desc.h"
22 * crypto alg
24 #define CAAM_CRA_PRIORITY 2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
29 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
30 CAAM_MAX_KEY_SIZE)
31 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
33 struct caam_alg_entry {
34 int class1_alg_type;
35 int class2_alg_type;
36 bool rfc3686;
37 bool geniv;
40 struct caam_aead_alg {
41 struct aead_alg aead;
42 struct caam_alg_entry caam;
43 bool registered;
47 * per-session context
49 struct caam_ctx {
50 struct device *jrdev;
51 u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 u8 key[CAAM_MAX_KEY_SIZE];
55 dma_addr_t key_dma;
56 enum dma_data_direction dir;
57 struct alginfo adata;
58 struct alginfo cdata;
59 unsigned int authsize;
60 struct device *qidev;
61 spinlock_t lock; /* Protects multiple init of driver context */
62 struct caam_drv_ctx *drv_ctx[NUM_OP];
65 static int aead_set_sh_desc(struct crypto_aead *aead)
67 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 typeof(*alg), aead);
69 struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 unsigned int ivsize = crypto_aead_ivsize(aead);
71 u32 ctx1_iv_off = 0;
72 u32 *nonce = NULL;
73 unsigned int data_len[2];
74 u32 inl_mask;
75 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 OP_ALG_AAI_CTR_MOD128);
77 const bool is_rfc3686 = alg->caam.rfc3686;
78 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
80 if (!ctx->cdata.keylen || !ctx->authsize)
81 return 0;
84 * AES-CTR needs to load IV in CONTEXT1 reg
85 * at an offset of 128bits (16bytes)
86 * CONTEXT1[255:128] = IV
88 if (ctr_mode)
89 ctx1_iv_off = 16;
92 * RFC3686 specific:
93 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
95 if (is_rfc3686) {
96 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
97 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
98 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
101 data_len[0] = ctx->adata.keylen_pad;
102 data_len[1] = ctx->cdata.keylen;
104 if (alg->caam.geniv)
105 goto skip_enc;
107 /* aead_encrypt shared descriptor */
108 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
109 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
110 DESC_JOB_IO_LEN, data_len, &inl_mask,
111 ARRAY_SIZE(data_len)) < 0)
112 return -EINVAL;
114 if (inl_mask & 1)
115 ctx->adata.key_virt = ctx->key;
116 else
117 ctx->adata.key_dma = ctx->key_dma;
119 if (inl_mask & 2)
120 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
121 else
122 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
124 ctx->adata.key_inline = !!(inl_mask & 1);
125 ctx->cdata.key_inline = !!(inl_mask & 2);
127 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
128 ivsize, ctx->authsize, is_rfc3686, nonce,
129 ctx1_iv_off, true, ctrlpriv->era);
131 skip_enc:
132 /* aead_decrypt shared descriptor */
133 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
134 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
135 DESC_JOB_IO_LEN, data_len, &inl_mask,
136 ARRAY_SIZE(data_len)) < 0)
137 return -EINVAL;
139 if (inl_mask & 1)
140 ctx->adata.key_virt = ctx->key;
141 else
142 ctx->adata.key_dma = ctx->key_dma;
144 if (inl_mask & 2)
145 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
146 else
147 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
149 ctx->adata.key_inline = !!(inl_mask & 1);
150 ctx->cdata.key_inline = !!(inl_mask & 2);
152 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
153 ivsize, ctx->authsize, alg->caam.geniv,
154 is_rfc3686, nonce, ctx1_iv_off, true,
155 ctrlpriv->era);
157 if (!alg->caam.geniv)
158 goto skip_givenc;
160 /* aead_givencrypt shared descriptor */
161 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
162 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
163 DESC_JOB_IO_LEN, data_len, &inl_mask,
164 ARRAY_SIZE(data_len)) < 0)
165 return -EINVAL;
167 if (inl_mask & 1)
168 ctx->adata.key_virt = ctx->key;
169 else
170 ctx->adata.key_dma = ctx->key_dma;
172 if (inl_mask & 2)
173 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
174 else
175 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
177 ctx->adata.key_inline = !!(inl_mask & 1);
178 ctx->cdata.key_inline = !!(inl_mask & 2);
180 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
181 ivsize, ctx->authsize, is_rfc3686, nonce,
182 ctx1_iv_off, true, ctrlpriv->era);
184 skip_givenc:
185 return 0;
188 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
190 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
192 ctx->authsize = authsize;
193 aead_set_sh_desc(authenc);
195 return 0;
198 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
199 unsigned int keylen)
201 struct caam_ctx *ctx = crypto_aead_ctx(aead);
202 struct device *jrdev = ctx->jrdev;
203 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
204 struct crypto_authenc_keys keys;
205 int ret = 0;
207 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
208 goto badkey;
210 #ifdef DEBUG
211 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
212 keys.authkeylen + keys.enckeylen, keys.enckeylen,
213 keys.authkeylen);
214 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
215 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
216 #endif
219 * If DKP is supported, use it in the shared descriptor to generate
220 * the split key.
222 if (ctrlpriv->era >= 6) {
223 ctx->adata.keylen = keys.authkeylen;
224 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
225 OP_ALG_ALGSEL_MASK);
227 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
228 goto badkey;
230 memcpy(ctx->key, keys.authkey, keys.authkeylen);
231 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
232 keys.enckeylen);
233 dma_sync_single_for_device(jrdev, ctx->key_dma,
234 ctx->adata.keylen_pad +
235 keys.enckeylen, ctx->dir);
236 goto skip_split_key;
239 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
240 keys.authkeylen, CAAM_MAX_KEY_SIZE -
241 keys.enckeylen);
242 if (ret)
243 goto badkey;
245 /* postpend encryption key to auth split key */
246 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
247 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
248 keys.enckeylen, ctx->dir);
249 #ifdef DEBUG
250 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
251 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
252 ctx->adata.keylen_pad + keys.enckeylen, 1);
253 #endif
255 skip_split_key:
256 ctx->cdata.keylen = keys.enckeylen;
258 ret = aead_set_sh_desc(aead);
259 if (ret)
260 goto badkey;
262 /* Now update the driver contexts with the new shared descriptor */
263 if (ctx->drv_ctx[ENCRYPT]) {
264 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
265 ctx->sh_desc_enc);
266 if (ret) {
267 dev_err(jrdev, "driver enc context update failed\n");
268 goto badkey;
272 if (ctx->drv_ctx[DECRYPT]) {
273 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
274 ctx->sh_desc_dec);
275 if (ret) {
276 dev_err(jrdev, "driver dec context update failed\n");
277 goto badkey;
281 return ret;
282 badkey:
283 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
284 return -EINVAL;
287 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
288 const u8 *key, unsigned int keylen)
290 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
291 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
292 const char *alg_name = crypto_tfm_alg_name(tfm);
293 struct device *jrdev = ctx->jrdev;
294 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
295 u32 ctx1_iv_off = 0;
296 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
297 OP_ALG_AAI_CTR_MOD128);
298 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
299 int ret = 0;
301 #ifdef DEBUG
302 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
303 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
304 #endif
306 * AES-CTR needs to load IV in CONTEXT1 reg
307 * at an offset of 128bits (16bytes)
308 * CONTEXT1[255:128] = IV
310 if (ctr_mode)
311 ctx1_iv_off = 16;
314 * RFC3686 specific:
315 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
316 * | *key = {KEY, NONCE}
318 if (is_rfc3686) {
319 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
320 keylen -= CTR_RFC3686_NONCE_SIZE;
323 ctx->cdata.keylen = keylen;
324 ctx->cdata.key_virt = key;
325 ctx->cdata.key_inline = true;
327 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
328 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
329 is_rfc3686, ctx1_iv_off);
330 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
331 is_rfc3686, ctx1_iv_off);
332 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
333 ivsize, is_rfc3686, ctx1_iv_off);
335 /* Now update the driver contexts with the new shared descriptor */
336 if (ctx->drv_ctx[ENCRYPT]) {
337 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
338 ctx->sh_desc_enc);
339 if (ret) {
340 dev_err(jrdev, "driver enc context update failed\n");
341 goto badkey;
345 if (ctx->drv_ctx[DECRYPT]) {
346 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
347 ctx->sh_desc_dec);
348 if (ret) {
349 dev_err(jrdev, "driver dec context update failed\n");
350 goto badkey;
354 if (ctx->drv_ctx[GIVENCRYPT]) {
355 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
356 ctx->sh_desc_givenc);
357 if (ret) {
358 dev_err(jrdev, "driver givenc context update failed\n");
359 goto badkey;
363 return ret;
364 badkey:
365 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
366 return -EINVAL;
369 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
370 const u8 *key, unsigned int keylen)
372 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
373 struct device *jrdev = ctx->jrdev;
374 int ret = 0;
376 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
377 crypto_ablkcipher_set_flags(ablkcipher,
378 CRYPTO_TFM_RES_BAD_KEY_LEN);
379 dev_err(jrdev, "key size mismatch\n");
380 return -EINVAL;
383 ctx->cdata.keylen = keylen;
384 ctx->cdata.key_virt = key;
385 ctx->cdata.key_inline = true;
387 /* xts ablkcipher encrypt, decrypt shared descriptors */
388 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
389 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
391 /* Now update the driver contexts with the new shared descriptor */
392 if (ctx->drv_ctx[ENCRYPT]) {
393 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
394 ctx->sh_desc_enc);
395 if (ret) {
396 dev_err(jrdev, "driver enc context update failed\n");
397 goto badkey;
401 if (ctx->drv_ctx[DECRYPT]) {
402 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
403 ctx->sh_desc_dec);
404 if (ret) {
405 dev_err(jrdev, "driver dec context update failed\n");
406 goto badkey;
410 return ret;
411 badkey:
412 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
413 return 0;
417 * aead_edesc - s/w-extended aead descriptor
418 * @src_nents: number of segments in input scatterlist
419 * @dst_nents: number of segments in output scatterlist
420 * @iv_dma: dma address of iv for checking continuity and link table
421 * @qm_sg_bytes: length of dma mapped h/w link table
422 * @qm_sg_dma: bus physical mapped address of h/w link table
423 * @assoclen: associated data length, in CAAM endianness
424 * @assoclen_dma: bus physical mapped address of req->assoclen
425 * @drv_req: driver-specific request structure
426 * @sgt: the h/w link table
428 struct aead_edesc {
429 int src_nents;
430 int dst_nents;
431 dma_addr_t iv_dma;
432 int qm_sg_bytes;
433 dma_addr_t qm_sg_dma;
434 unsigned int assoclen;
435 dma_addr_t assoclen_dma;
436 struct caam_drv_req drv_req;
437 #define CAAM_QI_MAX_AEAD_SG \
438 ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
439 sizeof(struct qm_sg_entry))
440 struct qm_sg_entry sgt[0];
444 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
445 * @src_nents: number of segments in input scatterlist
446 * @dst_nents: number of segments in output scatterlist
447 * @iv_dma: dma address of iv for checking continuity and link table
448 * @qm_sg_bytes: length of dma mapped h/w link table
449 * @qm_sg_dma: bus physical mapped address of h/w link table
450 * @drv_req: driver-specific request structure
451 * @sgt: the h/w link table
453 struct ablkcipher_edesc {
454 int src_nents;
455 int dst_nents;
456 dma_addr_t iv_dma;
457 int qm_sg_bytes;
458 dma_addr_t qm_sg_dma;
459 struct caam_drv_req drv_req;
460 #define CAAM_QI_MAX_ABLKCIPHER_SG \
461 ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
462 sizeof(struct qm_sg_entry))
463 struct qm_sg_entry sgt[0];
466 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
467 enum optype type)
470 * This function is called on the fast path with values of 'type'
471 * known at compile time. Invalid arguments are not expected and
472 * thus no checks are made.
474 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
475 u32 *desc;
477 if (unlikely(!drv_ctx)) {
478 spin_lock(&ctx->lock);
480 /* Read again to check if some other core init drv_ctx */
481 drv_ctx = ctx->drv_ctx[type];
482 if (!drv_ctx) {
483 int cpu;
485 if (type == ENCRYPT)
486 desc = ctx->sh_desc_enc;
487 else if (type == DECRYPT)
488 desc = ctx->sh_desc_dec;
489 else /* (type == GIVENCRYPT) */
490 desc = ctx->sh_desc_givenc;
492 cpu = smp_processor_id();
493 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
494 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
495 drv_ctx->op_type = type;
497 ctx->drv_ctx[type] = drv_ctx;
500 spin_unlock(&ctx->lock);
503 return drv_ctx;
506 static void caam_unmap(struct device *dev, struct scatterlist *src,
507 struct scatterlist *dst, int src_nents,
508 int dst_nents, dma_addr_t iv_dma, int ivsize,
509 enum optype op_type, dma_addr_t qm_sg_dma,
510 int qm_sg_bytes)
512 if (dst != src) {
513 if (src_nents)
514 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
515 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
516 } else {
517 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
520 if (iv_dma)
521 dma_unmap_single(dev, iv_dma, ivsize,
522 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
523 DMA_TO_DEVICE);
524 if (qm_sg_bytes)
525 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
528 static void aead_unmap(struct device *dev,
529 struct aead_edesc *edesc,
530 struct aead_request *req)
532 struct crypto_aead *aead = crypto_aead_reqtfm(req);
533 int ivsize = crypto_aead_ivsize(aead);
535 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
536 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
537 edesc->qm_sg_dma, edesc->qm_sg_bytes);
538 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
541 static void ablkcipher_unmap(struct device *dev,
542 struct ablkcipher_edesc *edesc,
543 struct ablkcipher_request *req)
545 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
546 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
548 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
549 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
550 edesc->qm_sg_dma, edesc->qm_sg_bytes);
553 static void aead_done(struct caam_drv_req *drv_req, u32 status)
555 struct device *qidev;
556 struct aead_edesc *edesc;
557 struct aead_request *aead_req = drv_req->app_ctx;
558 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
559 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
560 int ecode = 0;
562 qidev = caam_ctx->qidev;
564 if (unlikely(status)) {
565 caam_jr_strstatus(qidev, status);
566 ecode = -EIO;
569 edesc = container_of(drv_req, typeof(*edesc), drv_req);
570 aead_unmap(qidev, edesc, aead_req);
572 aead_request_complete(aead_req, ecode);
573 qi_cache_free(edesc);
577 * allocate and map the aead extended descriptor
579 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
580 bool encrypt)
582 struct crypto_aead *aead = crypto_aead_reqtfm(req);
583 struct caam_ctx *ctx = crypto_aead_ctx(aead);
584 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
585 typeof(*alg), aead);
586 struct device *qidev = ctx->qidev;
587 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
588 GFP_KERNEL : GFP_ATOMIC;
589 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
590 struct aead_edesc *edesc;
591 dma_addr_t qm_sg_dma, iv_dma = 0;
592 int ivsize = 0;
593 unsigned int authsize = ctx->authsize;
594 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
595 int in_len, out_len;
596 struct qm_sg_entry *sg_table, *fd_sgt;
597 struct caam_drv_ctx *drv_ctx;
598 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
600 drv_ctx = get_drv_ctx(ctx, op_type);
601 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
602 return (struct aead_edesc *)drv_ctx;
604 /* allocate space for base edesc and hw desc commands, link tables */
605 edesc = qi_cache_alloc(GFP_DMA | flags);
606 if (unlikely(!edesc)) {
607 dev_err(qidev, "could not allocate extended descriptor\n");
608 return ERR_PTR(-ENOMEM);
611 if (likely(req->src == req->dst)) {
612 src_nents = sg_nents_for_len(req->src, req->assoclen +
613 req->cryptlen +
614 (encrypt ? authsize : 0));
615 if (unlikely(src_nents < 0)) {
616 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
617 req->assoclen + req->cryptlen +
618 (encrypt ? authsize : 0));
619 qi_cache_free(edesc);
620 return ERR_PTR(src_nents);
623 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
624 DMA_BIDIRECTIONAL);
625 if (unlikely(!mapped_src_nents)) {
626 dev_err(qidev, "unable to map source\n");
627 qi_cache_free(edesc);
628 return ERR_PTR(-ENOMEM);
630 } else {
631 src_nents = sg_nents_for_len(req->src, req->assoclen +
632 req->cryptlen);
633 if (unlikely(src_nents < 0)) {
634 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
635 req->assoclen + req->cryptlen);
636 qi_cache_free(edesc);
637 return ERR_PTR(src_nents);
640 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
641 req->cryptlen +
642 (encrypt ? authsize :
643 (-authsize)));
644 if (unlikely(dst_nents < 0)) {
645 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
646 req->assoclen + req->cryptlen +
647 (encrypt ? authsize : (-authsize)));
648 qi_cache_free(edesc);
649 return ERR_PTR(dst_nents);
652 if (src_nents) {
653 mapped_src_nents = dma_map_sg(qidev, req->src,
654 src_nents, DMA_TO_DEVICE);
655 if (unlikely(!mapped_src_nents)) {
656 dev_err(qidev, "unable to map source\n");
657 qi_cache_free(edesc);
658 return ERR_PTR(-ENOMEM);
660 } else {
661 mapped_src_nents = 0;
664 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
665 DMA_FROM_DEVICE);
666 if (unlikely(!mapped_dst_nents)) {
667 dev_err(qidev, "unable to map destination\n");
668 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
669 qi_cache_free(edesc);
670 return ERR_PTR(-ENOMEM);
674 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
675 ivsize = crypto_aead_ivsize(aead);
676 iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
677 if (dma_mapping_error(qidev, iv_dma)) {
678 dev_err(qidev, "unable to map IV\n");
679 caam_unmap(qidev, req->src, req->dst, src_nents,
680 dst_nents, 0, 0, op_type, 0, 0);
681 qi_cache_free(edesc);
682 return ERR_PTR(-ENOMEM);
687 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
688 * Input is not contiguous.
690 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
691 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
692 if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
693 dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
694 qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
695 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
696 iv_dma, ivsize, op_type, 0, 0);
697 qi_cache_free(edesc);
698 return ERR_PTR(-ENOMEM);
700 sg_table = &edesc->sgt[0];
701 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
703 edesc->src_nents = src_nents;
704 edesc->dst_nents = dst_nents;
705 edesc->iv_dma = iv_dma;
706 edesc->drv_req.app_ctx = req;
707 edesc->drv_req.cbk = aead_done;
708 edesc->drv_req.drv_ctx = drv_ctx;
710 edesc->assoclen = cpu_to_caam32(req->assoclen);
711 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
712 DMA_TO_DEVICE);
713 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
714 dev_err(qidev, "unable to map assoclen\n");
715 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
716 iv_dma, ivsize, op_type, 0, 0);
717 qi_cache_free(edesc);
718 return ERR_PTR(-ENOMEM);
721 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
722 qm_sg_index++;
723 if (ivsize) {
724 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
725 qm_sg_index++;
727 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
728 qm_sg_index += mapped_src_nents;
730 if (mapped_dst_nents > 1)
731 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
732 qm_sg_index, 0);
734 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
735 if (dma_mapping_error(qidev, qm_sg_dma)) {
736 dev_err(qidev, "unable to map S/G table\n");
737 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
738 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
739 iv_dma, ivsize, op_type, 0, 0);
740 qi_cache_free(edesc);
741 return ERR_PTR(-ENOMEM);
744 edesc->qm_sg_dma = qm_sg_dma;
745 edesc->qm_sg_bytes = qm_sg_bytes;
747 out_len = req->assoclen + req->cryptlen +
748 (encrypt ? ctx->authsize : (-ctx->authsize));
749 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
751 fd_sgt = &edesc->drv_req.fd_sgt[0];
752 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
754 if (req->dst == req->src) {
755 if (mapped_src_nents == 1)
756 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
757 out_len, 0);
758 else
759 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
760 (1 + !!ivsize) * sizeof(*sg_table),
761 out_len, 0);
762 } else if (mapped_dst_nents == 1) {
763 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
765 } else {
766 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
767 qm_sg_index, out_len, 0);
770 return edesc;
773 static inline int aead_crypt(struct aead_request *req, bool encrypt)
775 struct aead_edesc *edesc;
776 struct crypto_aead *aead = crypto_aead_reqtfm(req);
777 struct caam_ctx *ctx = crypto_aead_ctx(aead);
778 int ret;
780 if (unlikely(caam_congested))
781 return -EAGAIN;
783 /* allocate extended descriptor */
784 edesc = aead_edesc_alloc(req, encrypt);
785 if (IS_ERR_OR_NULL(edesc))
786 return PTR_ERR(edesc);
788 /* Create and submit job descriptor */
789 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
790 if (!ret) {
791 ret = -EINPROGRESS;
792 } else {
793 aead_unmap(ctx->qidev, edesc, req);
794 qi_cache_free(edesc);
797 return ret;
800 static int aead_encrypt(struct aead_request *req)
802 return aead_crypt(req, true);
805 static int aead_decrypt(struct aead_request *req)
807 return aead_crypt(req, false);
810 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
812 struct ablkcipher_edesc *edesc;
813 struct ablkcipher_request *req = drv_req->app_ctx;
814 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
815 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
816 struct device *qidev = caam_ctx->qidev;
817 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
819 #ifdef DEBUG
820 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
821 #endif
823 edesc = container_of(drv_req, typeof(*edesc), drv_req);
825 if (status)
826 caam_jr_strstatus(qidev, status);
828 #ifdef DEBUG
829 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
830 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
831 edesc->src_nents > 1 ? 100 : ivsize, 1);
832 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
833 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
834 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
835 #endif
837 ablkcipher_unmap(qidev, edesc, req);
838 qi_cache_free(edesc);
841 * The crypto API expects us to set the IV (req->info) to the last
842 * ciphertext block. This is used e.g. by the CTS mode.
844 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
845 ivsize, 0);
847 ablkcipher_request_complete(req, status);
850 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
851 *req, bool encrypt)
853 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
854 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
855 struct device *qidev = ctx->qidev;
856 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
857 GFP_KERNEL : GFP_ATOMIC;
858 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
859 struct ablkcipher_edesc *edesc;
860 dma_addr_t iv_dma;
861 bool in_contig;
862 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
863 int dst_sg_idx, qm_sg_ents;
864 struct qm_sg_entry *sg_table, *fd_sgt;
865 struct caam_drv_ctx *drv_ctx;
866 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
868 drv_ctx = get_drv_ctx(ctx, op_type);
869 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
870 return (struct ablkcipher_edesc *)drv_ctx;
872 src_nents = sg_nents_for_len(req->src, req->nbytes);
873 if (unlikely(src_nents < 0)) {
874 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
875 req->nbytes);
876 return ERR_PTR(src_nents);
879 if (unlikely(req->src != req->dst)) {
880 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
881 if (unlikely(dst_nents < 0)) {
882 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
883 req->nbytes);
884 return ERR_PTR(dst_nents);
887 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
888 DMA_TO_DEVICE);
889 if (unlikely(!mapped_src_nents)) {
890 dev_err(qidev, "unable to map source\n");
891 return ERR_PTR(-ENOMEM);
894 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
895 DMA_FROM_DEVICE);
896 if (unlikely(!mapped_dst_nents)) {
897 dev_err(qidev, "unable to map destination\n");
898 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
899 return ERR_PTR(-ENOMEM);
901 } else {
902 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
903 DMA_BIDIRECTIONAL);
904 if (unlikely(!mapped_src_nents)) {
905 dev_err(qidev, "unable to map source\n");
906 return ERR_PTR(-ENOMEM);
910 iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
911 if (dma_mapping_error(qidev, iv_dma)) {
912 dev_err(qidev, "unable to map IV\n");
913 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
914 0, 0, 0, 0);
915 return ERR_PTR(-ENOMEM);
918 if (mapped_src_nents == 1 &&
919 iv_dma + ivsize == sg_dma_address(req->src)) {
920 in_contig = true;
921 qm_sg_ents = 0;
922 } else {
923 in_contig = false;
924 qm_sg_ents = 1 + mapped_src_nents;
926 dst_sg_idx = qm_sg_ents;
928 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
929 if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
930 dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
931 qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
932 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
933 iv_dma, ivsize, op_type, 0, 0);
934 return ERR_PTR(-ENOMEM);
937 /* allocate space for base edesc and link tables */
938 edesc = qi_cache_alloc(GFP_DMA | flags);
939 if (unlikely(!edesc)) {
940 dev_err(qidev, "could not allocate extended descriptor\n");
941 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
942 iv_dma, ivsize, op_type, 0, 0);
943 return ERR_PTR(-ENOMEM);
946 edesc->src_nents = src_nents;
947 edesc->dst_nents = dst_nents;
948 edesc->iv_dma = iv_dma;
949 sg_table = &edesc->sgt[0];
950 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
951 edesc->drv_req.app_ctx = req;
952 edesc->drv_req.cbk = ablkcipher_done;
953 edesc->drv_req.drv_ctx = drv_ctx;
955 if (!in_contig) {
956 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
957 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
960 if (mapped_dst_nents > 1)
961 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
962 dst_sg_idx, 0);
964 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
965 DMA_TO_DEVICE);
966 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
967 dev_err(qidev, "unable to map S/G table\n");
968 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
969 iv_dma, ivsize, op_type, 0, 0);
970 qi_cache_free(edesc);
971 return ERR_PTR(-ENOMEM);
974 fd_sgt = &edesc->drv_req.fd_sgt[0];
976 if (!in_contig)
977 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
978 ivsize + req->nbytes, 0);
979 else
980 dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
983 if (req->src == req->dst) {
984 if (!in_contig)
985 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
986 sizeof(*sg_table), req->nbytes, 0);
987 else
988 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
989 req->nbytes, 0);
990 } else if (mapped_dst_nents > 1) {
991 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
992 sizeof(*sg_table), req->nbytes, 0);
993 } else {
994 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
995 req->nbytes, 0);
998 return edesc;
1001 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1002 struct skcipher_givcrypt_request *creq)
1004 struct ablkcipher_request *req = &creq->creq;
1005 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1006 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1007 struct device *qidev = ctx->qidev;
1008 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1009 GFP_KERNEL : GFP_ATOMIC;
1010 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1011 struct ablkcipher_edesc *edesc;
1012 dma_addr_t iv_dma;
1013 bool out_contig;
1014 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1015 struct qm_sg_entry *sg_table, *fd_sgt;
1016 int dst_sg_idx, qm_sg_ents;
1017 struct caam_drv_ctx *drv_ctx;
1019 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1020 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1021 return (struct ablkcipher_edesc *)drv_ctx;
1023 src_nents = sg_nents_for_len(req->src, req->nbytes);
1024 if (unlikely(src_nents < 0)) {
1025 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1026 req->nbytes);
1027 return ERR_PTR(src_nents);
1030 if (unlikely(req->src != req->dst)) {
1031 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1032 if (unlikely(dst_nents < 0)) {
1033 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1034 req->nbytes);
1035 return ERR_PTR(dst_nents);
1038 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1039 DMA_TO_DEVICE);
1040 if (unlikely(!mapped_src_nents)) {
1041 dev_err(qidev, "unable to map source\n");
1042 return ERR_PTR(-ENOMEM);
1045 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1046 DMA_FROM_DEVICE);
1047 if (unlikely(!mapped_dst_nents)) {
1048 dev_err(qidev, "unable to map destination\n");
1049 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1050 return ERR_PTR(-ENOMEM);
1052 } else {
1053 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1054 DMA_BIDIRECTIONAL);
1055 if (unlikely(!mapped_src_nents)) {
1056 dev_err(qidev, "unable to map source\n");
1057 return ERR_PTR(-ENOMEM);
1060 dst_nents = src_nents;
1061 mapped_dst_nents = src_nents;
1064 iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
1065 if (dma_mapping_error(qidev, iv_dma)) {
1066 dev_err(qidev, "unable to map IV\n");
1067 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1068 0, 0, 0, 0);
1069 return ERR_PTR(-ENOMEM);
1072 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1073 dst_sg_idx = qm_sg_ents;
1074 if (mapped_dst_nents == 1 &&
1075 iv_dma + ivsize == sg_dma_address(req->dst)) {
1076 out_contig = true;
1077 } else {
1078 out_contig = false;
1079 qm_sg_ents += 1 + mapped_dst_nents;
1082 if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
1083 dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1084 qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
1085 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1086 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1087 return ERR_PTR(-ENOMEM);
1090 /* allocate space for base edesc and link tables */
1091 edesc = qi_cache_alloc(GFP_DMA | flags);
1092 if (!edesc) {
1093 dev_err(qidev, "could not allocate extended descriptor\n");
1094 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1095 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1096 return ERR_PTR(-ENOMEM);
1099 edesc->src_nents = src_nents;
1100 edesc->dst_nents = dst_nents;
1101 edesc->iv_dma = iv_dma;
1102 sg_table = &edesc->sgt[0];
1103 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1104 edesc->drv_req.app_ctx = req;
1105 edesc->drv_req.cbk = ablkcipher_done;
1106 edesc->drv_req.drv_ctx = drv_ctx;
1108 if (mapped_src_nents > 1)
1109 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1111 if (!out_contig) {
1112 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1113 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1114 dst_sg_idx + 1, 0);
1117 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1118 DMA_TO_DEVICE);
1119 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1120 dev_err(qidev, "unable to map S/G table\n");
1121 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1122 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1123 qi_cache_free(edesc);
1124 return ERR_PTR(-ENOMEM);
1127 fd_sgt = &edesc->drv_req.fd_sgt[0];
1129 if (mapped_src_nents > 1)
1130 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1132 else
1133 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1134 req->nbytes, 0);
1136 if (!out_contig)
1137 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1138 sizeof(*sg_table), ivsize + req->nbytes,
1140 else
1141 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1142 ivsize + req->nbytes, 0);
1144 return edesc;
1147 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1149 struct ablkcipher_edesc *edesc;
1150 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1151 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1152 int ret;
1154 if (unlikely(caam_congested))
1155 return -EAGAIN;
1157 /* allocate extended descriptor */
1158 edesc = ablkcipher_edesc_alloc(req, encrypt);
1159 if (IS_ERR(edesc))
1160 return PTR_ERR(edesc);
1162 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1163 if (!ret) {
1164 ret = -EINPROGRESS;
1165 } else {
1166 ablkcipher_unmap(ctx->qidev, edesc, req);
1167 qi_cache_free(edesc);
1170 return ret;
1173 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1175 return ablkcipher_crypt(req, true);
1178 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1180 return ablkcipher_crypt(req, false);
1183 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1185 struct ablkcipher_request *req = &creq->creq;
1186 struct ablkcipher_edesc *edesc;
1187 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1188 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1189 int ret;
1191 if (unlikely(caam_congested))
1192 return -EAGAIN;
1194 /* allocate extended descriptor */
1195 edesc = ablkcipher_giv_edesc_alloc(creq);
1196 if (IS_ERR(edesc))
1197 return PTR_ERR(edesc);
1199 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1200 if (!ret) {
1201 ret = -EINPROGRESS;
1202 } else {
1203 ablkcipher_unmap(ctx->qidev, edesc, req);
1204 qi_cache_free(edesc);
1207 return ret;
1210 #define template_ablkcipher template_u.ablkcipher
1211 struct caam_alg_template {
1212 char name[CRYPTO_MAX_ALG_NAME];
1213 char driver_name[CRYPTO_MAX_ALG_NAME];
1214 unsigned int blocksize;
1215 u32 type;
1216 union {
1217 struct ablkcipher_alg ablkcipher;
1218 } template_u;
1219 u32 class1_alg_type;
1220 u32 class2_alg_type;
1223 static struct caam_alg_template driver_algs[] = {
1224 /* ablkcipher descriptor */
1226 .name = "cbc(aes)",
1227 .driver_name = "cbc-aes-caam-qi",
1228 .blocksize = AES_BLOCK_SIZE,
1229 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1230 .template_ablkcipher = {
1231 .setkey = ablkcipher_setkey,
1232 .encrypt = ablkcipher_encrypt,
1233 .decrypt = ablkcipher_decrypt,
1234 .givencrypt = ablkcipher_givencrypt,
1235 .geniv = "<built-in>",
1236 .min_keysize = AES_MIN_KEY_SIZE,
1237 .max_keysize = AES_MAX_KEY_SIZE,
1238 .ivsize = AES_BLOCK_SIZE,
1240 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1243 .name = "cbc(des3_ede)",
1244 .driver_name = "cbc-3des-caam-qi",
1245 .blocksize = DES3_EDE_BLOCK_SIZE,
1246 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1247 .template_ablkcipher = {
1248 .setkey = ablkcipher_setkey,
1249 .encrypt = ablkcipher_encrypt,
1250 .decrypt = ablkcipher_decrypt,
1251 .givencrypt = ablkcipher_givencrypt,
1252 .geniv = "<built-in>",
1253 .min_keysize = DES3_EDE_KEY_SIZE,
1254 .max_keysize = DES3_EDE_KEY_SIZE,
1255 .ivsize = DES3_EDE_BLOCK_SIZE,
1257 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1260 .name = "cbc(des)",
1261 .driver_name = "cbc-des-caam-qi",
1262 .blocksize = DES_BLOCK_SIZE,
1263 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1264 .template_ablkcipher = {
1265 .setkey = ablkcipher_setkey,
1266 .encrypt = ablkcipher_encrypt,
1267 .decrypt = ablkcipher_decrypt,
1268 .givencrypt = ablkcipher_givencrypt,
1269 .geniv = "<built-in>",
1270 .min_keysize = DES_KEY_SIZE,
1271 .max_keysize = DES_KEY_SIZE,
1272 .ivsize = DES_BLOCK_SIZE,
1274 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1277 .name = "ctr(aes)",
1278 .driver_name = "ctr-aes-caam-qi",
1279 .blocksize = 1,
1280 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1281 .template_ablkcipher = {
1282 .setkey = ablkcipher_setkey,
1283 .encrypt = ablkcipher_encrypt,
1284 .decrypt = ablkcipher_decrypt,
1285 .geniv = "chainiv",
1286 .min_keysize = AES_MIN_KEY_SIZE,
1287 .max_keysize = AES_MAX_KEY_SIZE,
1288 .ivsize = AES_BLOCK_SIZE,
1290 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1293 .name = "rfc3686(ctr(aes))",
1294 .driver_name = "rfc3686-ctr-aes-caam-qi",
1295 .blocksize = 1,
1296 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1297 .template_ablkcipher = {
1298 .setkey = ablkcipher_setkey,
1299 .encrypt = ablkcipher_encrypt,
1300 .decrypt = ablkcipher_decrypt,
1301 .givencrypt = ablkcipher_givencrypt,
1302 .geniv = "<built-in>",
1303 .min_keysize = AES_MIN_KEY_SIZE +
1304 CTR_RFC3686_NONCE_SIZE,
1305 .max_keysize = AES_MAX_KEY_SIZE +
1306 CTR_RFC3686_NONCE_SIZE,
1307 .ivsize = CTR_RFC3686_IV_SIZE,
1309 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1312 .name = "xts(aes)",
1313 .driver_name = "xts-aes-caam-qi",
1314 .blocksize = AES_BLOCK_SIZE,
1315 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1316 .template_ablkcipher = {
1317 .setkey = xts_ablkcipher_setkey,
1318 .encrypt = ablkcipher_encrypt,
1319 .decrypt = ablkcipher_decrypt,
1320 .geniv = "eseqiv",
1321 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1322 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1323 .ivsize = AES_BLOCK_SIZE,
1325 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1329 static struct caam_aead_alg driver_aeads[] = {
1330 /* single-pass ipsec_esp descriptor */
1332 .aead = {
1333 .base = {
1334 .cra_name = "authenc(hmac(md5),cbc(aes))",
1335 .cra_driver_name = "authenc-hmac-md5-"
1336 "cbc-aes-caam-qi",
1337 .cra_blocksize = AES_BLOCK_SIZE,
1339 .setkey = aead_setkey,
1340 .setauthsize = aead_setauthsize,
1341 .encrypt = aead_encrypt,
1342 .decrypt = aead_decrypt,
1343 .ivsize = AES_BLOCK_SIZE,
1344 .maxauthsize = MD5_DIGEST_SIZE,
1346 .caam = {
1347 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1348 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1349 OP_ALG_AAI_HMAC_PRECOMP,
1353 .aead = {
1354 .base = {
1355 .cra_name = "echainiv(authenc(hmac(md5),"
1356 "cbc(aes)))",
1357 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1358 "cbc-aes-caam-qi",
1359 .cra_blocksize = AES_BLOCK_SIZE,
1361 .setkey = aead_setkey,
1362 .setauthsize = aead_setauthsize,
1363 .encrypt = aead_encrypt,
1364 .decrypt = aead_decrypt,
1365 .ivsize = AES_BLOCK_SIZE,
1366 .maxauthsize = MD5_DIGEST_SIZE,
1368 .caam = {
1369 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1370 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1371 OP_ALG_AAI_HMAC_PRECOMP,
1372 .geniv = true,
1376 .aead = {
1377 .base = {
1378 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1379 .cra_driver_name = "authenc-hmac-sha1-"
1380 "cbc-aes-caam-qi",
1381 .cra_blocksize = AES_BLOCK_SIZE,
1383 .setkey = aead_setkey,
1384 .setauthsize = aead_setauthsize,
1385 .encrypt = aead_encrypt,
1386 .decrypt = aead_decrypt,
1387 .ivsize = AES_BLOCK_SIZE,
1388 .maxauthsize = SHA1_DIGEST_SIZE,
1390 .caam = {
1391 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1392 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1393 OP_ALG_AAI_HMAC_PRECOMP,
1397 .aead = {
1398 .base = {
1399 .cra_name = "echainiv(authenc(hmac(sha1),"
1400 "cbc(aes)))",
1401 .cra_driver_name = "echainiv-authenc-"
1402 "hmac-sha1-cbc-aes-caam-qi",
1403 .cra_blocksize = AES_BLOCK_SIZE,
1405 .setkey = aead_setkey,
1406 .setauthsize = aead_setauthsize,
1407 .encrypt = aead_encrypt,
1408 .decrypt = aead_decrypt,
1409 .ivsize = AES_BLOCK_SIZE,
1410 .maxauthsize = SHA1_DIGEST_SIZE,
1412 .caam = {
1413 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1414 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1415 OP_ALG_AAI_HMAC_PRECOMP,
1416 .geniv = true,
1420 .aead = {
1421 .base = {
1422 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1423 .cra_driver_name = "authenc-hmac-sha224-"
1424 "cbc-aes-caam-qi",
1425 .cra_blocksize = AES_BLOCK_SIZE,
1427 .setkey = aead_setkey,
1428 .setauthsize = aead_setauthsize,
1429 .encrypt = aead_encrypt,
1430 .decrypt = aead_decrypt,
1431 .ivsize = AES_BLOCK_SIZE,
1432 .maxauthsize = SHA224_DIGEST_SIZE,
1434 .caam = {
1435 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1436 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1437 OP_ALG_AAI_HMAC_PRECOMP,
1441 .aead = {
1442 .base = {
1443 .cra_name = "echainiv(authenc(hmac(sha224),"
1444 "cbc(aes)))",
1445 .cra_driver_name = "echainiv-authenc-"
1446 "hmac-sha224-cbc-aes-caam-qi",
1447 .cra_blocksize = AES_BLOCK_SIZE,
1449 .setkey = aead_setkey,
1450 .setauthsize = aead_setauthsize,
1451 .encrypt = aead_encrypt,
1452 .decrypt = aead_decrypt,
1453 .ivsize = AES_BLOCK_SIZE,
1454 .maxauthsize = SHA224_DIGEST_SIZE,
1456 .caam = {
1457 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1458 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1459 OP_ALG_AAI_HMAC_PRECOMP,
1460 .geniv = true,
1464 .aead = {
1465 .base = {
1466 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1467 .cra_driver_name = "authenc-hmac-sha256-"
1468 "cbc-aes-caam-qi",
1469 .cra_blocksize = AES_BLOCK_SIZE,
1471 .setkey = aead_setkey,
1472 .setauthsize = aead_setauthsize,
1473 .encrypt = aead_encrypt,
1474 .decrypt = aead_decrypt,
1475 .ivsize = AES_BLOCK_SIZE,
1476 .maxauthsize = SHA256_DIGEST_SIZE,
1478 .caam = {
1479 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1480 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1481 OP_ALG_AAI_HMAC_PRECOMP,
1485 .aead = {
1486 .base = {
1487 .cra_name = "echainiv(authenc(hmac(sha256),"
1488 "cbc(aes)))",
1489 .cra_driver_name = "echainiv-authenc-"
1490 "hmac-sha256-cbc-aes-"
1491 "caam-qi",
1492 .cra_blocksize = AES_BLOCK_SIZE,
1494 .setkey = aead_setkey,
1495 .setauthsize = aead_setauthsize,
1496 .encrypt = aead_encrypt,
1497 .decrypt = aead_decrypt,
1498 .ivsize = AES_BLOCK_SIZE,
1499 .maxauthsize = SHA256_DIGEST_SIZE,
1501 .caam = {
1502 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1503 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1504 OP_ALG_AAI_HMAC_PRECOMP,
1505 .geniv = true,
1509 .aead = {
1510 .base = {
1511 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1512 .cra_driver_name = "authenc-hmac-sha384-"
1513 "cbc-aes-caam-qi",
1514 .cra_blocksize = AES_BLOCK_SIZE,
1516 .setkey = aead_setkey,
1517 .setauthsize = aead_setauthsize,
1518 .encrypt = aead_encrypt,
1519 .decrypt = aead_decrypt,
1520 .ivsize = AES_BLOCK_SIZE,
1521 .maxauthsize = SHA384_DIGEST_SIZE,
1523 .caam = {
1524 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1525 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1526 OP_ALG_AAI_HMAC_PRECOMP,
1530 .aead = {
1531 .base = {
1532 .cra_name = "echainiv(authenc(hmac(sha384),"
1533 "cbc(aes)))",
1534 .cra_driver_name = "echainiv-authenc-"
1535 "hmac-sha384-cbc-aes-"
1536 "caam-qi",
1537 .cra_blocksize = AES_BLOCK_SIZE,
1539 .setkey = aead_setkey,
1540 .setauthsize = aead_setauthsize,
1541 .encrypt = aead_encrypt,
1542 .decrypt = aead_decrypt,
1543 .ivsize = AES_BLOCK_SIZE,
1544 .maxauthsize = SHA384_DIGEST_SIZE,
1546 .caam = {
1547 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1548 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1549 OP_ALG_AAI_HMAC_PRECOMP,
1550 .geniv = true,
1554 .aead = {
1555 .base = {
1556 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1557 .cra_driver_name = "authenc-hmac-sha512-"
1558 "cbc-aes-caam-qi",
1559 .cra_blocksize = AES_BLOCK_SIZE,
1561 .setkey = aead_setkey,
1562 .setauthsize = aead_setauthsize,
1563 .encrypt = aead_encrypt,
1564 .decrypt = aead_decrypt,
1565 .ivsize = AES_BLOCK_SIZE,
1566 .maxauthsize = SHA512_DIGEST_SIZE,
1568 .caam = {
1569 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1570 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1571 OP_ALG_AAI_HMAC_PRECOMP,
1575 .aead = {
1576 .base = {
1577 .cra_name = "echainiv(authenc(hmac(sha512),"
1578 "cbc(aes)))",
1579 .cra_driver_name = "echainiv-authenc-"
1580 "hmac-sha512-cbc-aes-"
1581 "caam-qi",
1582 .cra_blocksize = AES_BLOCK_SIZE,
1584 .setkey = aead_setkey,
1585 .setauthsize = aead_setauthsize,
1586 .encrypt = aead_encrypt,
1587 .decrypt = aead_decrypt,
1588 .ivsize = AES_BLOCK_SIZE,
1589 .maxauthsize = SHA512_DIGEST_SIZE,
1591 .caam = {
1592 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1593 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1594 OP_ALG_AAI_HMAC_PRECOMP,
1595 .geniv = true,
1599 .aead = {
1600 .base = {
1601 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1602 .cra_driver_name = "authenc-hmac-md5-"
1603 "cbc-des3_ede-caam-qi",
1604 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1606 .setkey = aead_setkey,
1607 .setauthsize = aead_setauthsize,
1608 .encrypt = aead_encrypt,
1609 .decrypt = aead_decrypt,
1610 .ivsize = DES3_EDE_BLOCK_SIZE,
1611 .maxauthsize = MD5_DIGEST_SIZE,
1613 .caam = {
1614 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1615 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1616 OP_ALG_AAI_HMAC_PRECOMP,
1620 .aead = {
1621 .base = {
1622 .cra_name = "echainiv(authenc(hmac(md5),"
1623 "cbc(des3_ede)))",
1624 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1625 "cbc-des3_ede-caam-qi",
1626 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1628 .setkey = aead_setkey,
1629 .setauthsize = aead_setauthsize,
1630 .encrypt = aead_encrypt,
1631 .decrypt = aead_decrypt,
1632 .ivsize = DES3_EDE_BLOCK_SIZE,
1633 .maxauthsize = MD5_DIGEST_SIZE,
1635 .caam = {
1636 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1637 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1638 OP_ALG_AAI_HMAC_PRECOMP,
1639 .geniv = true,
1643 .aead = {
1644 .base = {
1645 .cra_name = "authenc(hmac(sha1),"
1646 "cbc(des3_ede))",
1647 .cra_driver_name = "authenc-hmac-sha1-"
1648 "cbc-des3_ede-caam-qi",
1649 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1651 .setkey = aead_setkey,
1652 .setauthsize = aead_setauthsize,
1653 .encrypt = aead_encrypt,
1654 .decrypt = aead_decrypt,
1655 .ivsize = DES3_EDE_BLOCK_SIZE,
1656 .maxauthsize = SHA1_DIGEST_SIZE,
1658 .caam = {
1659 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1660 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1661 OP_ALG_AAI_HMAC_PRECOMP,
1665 .aead = {
1666 .base = {
1667 .cra_name = "echainiv(authenc(hmac(sha1),"
1668 "cbc(des3_ede)))",
1669 .cra_driver_name = "echainiv-authenc-"
1670 "hmac-sha1-"
1671 "cbc-des3_ede-caam-qi",
1672 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1674 .setkey = aead_setkey,
1675 .setauthsize = aead_setauthsize,
1676 .encrypt = aead_encrypt,
1677 .decrypt = aead_decrypt,
1678 .ivsize = DES3_EDE_BLOCK_SIZE,
1679 .maxauthsize = SHA1_DIGEST_SIZE,
1681 .caam = {
1682 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1683 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1684 OP_ALG_AAI_HMAC_PRECOMP,
1685 .geniv = true,
1689 .aead = {
1690 .base = {
1691 .cra_name = "authenc(hmac(sha224),"
1692 "cbc(des3_ede))",
1693 .cra_driver_name = "authenc-hmac-sha224-"
1694 "cbc-des3_ede-caam-qi",
1695 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1697 .setkey = aead_setkey,
1698 .setauthsize = aead_setauthsize,
1699 .encrypt = aead_encrypt,
1700 .decrypt = aead_decrypt,
1701 .ivsize = DES3_EDE_BLOCK_SIZE,
1702 .maxauthsize = SHA224_DIGEST_SIZE,
1704 .caam = {
1705 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1706 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1707 OP_ALG_AAI_HMAC_PRECOMP,
1711 .aead = {
1712 .base = {
1713 .cra_name = "echainiv(authenc(hmac(sha224),"
1714 "cbc(des3_ede)))",
1715 .cra_driver_name = "echainiv-authenc-"
1716 "hmac-sha224-"
1717 "cbc-des3_ede-caam-qi",
1718 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1720 .setkey = aead_setkey,
1721 .setauthsize = aead_setauthsize,
1722 .encrypt = aead_encrypt,
1723 .decrypt = aead_decrypt,
1724 .ivsize = DES3_EDE_BLOCK_SIZE,
1725 .maxauthsize = SHA224_DIGEST_SIZE,
1727 .caam = {
1728 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1729 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1730 OP_ALG_AAI_HMAC_PRECOMP,
1731 .geniv = true,
1735 .aead = {
1736 .base = {
1737 .cra_name = "authenc(hmac(sha256),"
1738 "cbc(des3_ede))",
1739 .cra_driver_name = "authenc-hmac-sha256-"
1740 "cbc-des3_ede-caam-qi",
1741 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1743 .setkey = aead_setkey,
1744 .setauthsize = aead_setauthsize,
1745 .encrypt = aead_encrypt,
1746 .decrypt = aead_decrypt,
1747 .ivsize = DES3_EDE_BLOCK_SIZE,
1748 .maxauthsize = SHA256_DIGEST_SIZE,
1750 .caam = {
1751 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1752 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1753 OP_ALG_AAI_HMAC_PRECOMP,
1757 .aead = {
1758 .base = {
1759 .cra_name = "echainiv(authenc(hmac(sha256),"
1760 "cbc(des3_ede)))",
1761 .cra_driver_name = "echainiv-authenc-"
1762 "hmac-sha256-"
1763 "cbc-des3_ede-caam-qi",
1764 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1766 .setkey = aead_setkey,
1767 .setauthsize = aead_setauthsize,
1768 .encrypt = aead_encrypt,
1769 .decrypt = aead_decrypt,
1770 .ivsize = DES3_EDE_BLOCK_SIZE,
1771 .maxauthsize = SHA256_DIGEST_SIZE,
1773 .caam = {
1774 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1775 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1776 OP_ALG_AAI_HMAC_PRECOMP,
1777 .geniv = true,
1781 .aead = {
1782 .base = {
1783 .cra_name = "authenc(hmac(sha384),"
1784 "cbc(des3_ede))",
1785 .cra_driver_name = "authenc-hmac-sha384-"
1786 "cbc-des3_ede-caam-qi",
1787 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1789 .setkey = aead_setkey,
1790 .setauthsize = aead_setauthsize,
1791 .encrypt = aead_encrypt,
1792 .decrypt = aead_decrypt,
1793 .ivsize = DES3_EDE_BLOCK_SIZE,
1794 .maxauthsize = SHA384_DIGEST_SIZE,
1796 .caam = {
1797 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1798 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1799 OP_ALG_AAI_HMAC_PRECOMP,
1803 .aead = {
1804 .base = {
1805 .cra_name = "echainiv(authenc(hmac(sha384),"
1806 "cbc(des3_ede)))",
1807 .cra_driver_name = "echainiv-authenc-"
1808 "hmac-sha384-"
1809 "cbc-des3_ede-caam-qi",
1810 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1812 .setkey = aead_setkey,
1813 .setauthsize = aead_setauthsize,
1814 .encrypt = aead_encrypt,
1815 .decrypt = aead_decrypt,
1816 .ivsize = DES3_EDE_BLOCK_SIZE,
1817 .maxauthsize = SHA384_DIGEST_SIZE,
1819 .caam = {
1820 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1821 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1822 OP_ALG_AAI_HMAC_PRECOMP,
1823 .geniv = true,
1827 .aead = {
1828 .base = {
1829 .cra_name = "authenc(hmac(sha512),"
1830 "cbc(des3_ede))",
1831 .cra_driver_name = "authenc-hmac-sha512-"
1832 "cbc-des3_ede-caam-qi",
1833 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1835 .setkey = aead_setkey,
1836 .setauthsize = aead_setauthsize,
1837 .encrypt = aead_encrypt,
1838 .decrypt = aead_decrypt,
1839 .ivsize = DES3_EDE_BLOCK_SIZE,
1840 .maxauthsize = SHA512_DIGEST_SIZE,
1842 .caam = {
1843 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1844 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1845 OP_ALG_AAI_HMAC_PRECOMP,
1849 .aead = {
1850 .base = {
1851 .cra_name = "echainiv(authenc(hmac(sha512),"
1852 "cbc(des3_ede)))",
1853 .cra_driver_name = "echainiv-authenc-"
1854 "hmac-sha512-"
1855 "cbc-des3_ede-caam-qi",
1856 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1858 .setkey = aead_setkey,
1859 .setauthsize = aead_setauthsize,
1860 .encrypt = aead_encrypt,
1861 .decrypt = aead_decrypt,
1862 .ivsize = DES3_EDE_BLOCK_SIZE,
1863 .maxauthsize = SHA512_DIGEST_SIZE,
1865 .caam = {
1866 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1867 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1868 OP_ALG_AAI_HMAC_PRECOMP,
1869 .geniv = true,
1873 .aead = {
1874 .base = {
1875 .cra_name = "authenc(hmac(md5),cbc(des))",
1876 .cra_driver_name = "authenc-hmac-md5-"
1877 "cbc-des-caam-qi",
1878 .cra_blocksize = DES_BLOCK_SIZE,
1880 .setkey = aead_setkey,
1881 .setauthsize = aead_setauthsize,
1882 .encrypt = aead_encrypt,
1883 .decrypt = aead_decrypt,
1884 .ivsize = DES_BLOCK_SIZE,
1885 .maxauthsize = MD5_DIGEST_SIZE,
1887 .caam = {
1888 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1889 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1890 OP_ALG_AAI_HMAC_PRECOMP,
1894 .aead = {
1895 .base = {
1896 .cra_name = "echainiv(authenc(hmac(md5),"
1897 "cbc(des)))",
1898 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1899 "cbc-des-caam-qi",
1900 .cra_blocksize = DES_BLOCK_SIZE,
1902 .setkey = aead_setkey,
1903 .setauthsize = aead_setauthsize,
1904 .encrypt = aead_encrypt,
1905 .decrypt = aead_decrypt,
1906 .ivsize = DES_BLOCK_SIZE,
1907 .maxauthsize = MD5_DIGEST_SIZE,
1909 .caam = {
1910 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1911 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1912 OP_ALG_AAI_HMAC_PRECOMP,
1913 .geniv = true,
1917 .aead = {
1918 .base = {
1919 .cra_name = "authenc(hmac(sha1),cbc(des))",
1920 .cra_driver_name = "authenc-hmac-sha1-"
1921 "cbc-des-caam-qi",
1922 .cra_blocksize = DES_BLOCK_SIZE,
1924 .setkey = aead_setkey,
1925 .setauthsize = aead_setauthsize,
1926 .encrypt = aead_encrypt,
1927 .decrypt = aead_decrypt,
1928 .ivsize = DES_BLOCK_SIZE,
1929 .maxauthsize = SHA1_DIGEST_SIZE,
1931 .caam = {
1932 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1933 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1934 OP_ALG_AAI_HMAC_PRECOMP,
1938 .aead = {
1939 .base = {
1940 .cra_name = "echainiv(authenc(hmac(sha1),"
1941 "cbc(des)))",
1942 .cra_driver_name = "echainiv-authenc-"
1943 "hmac-sha1-cbc-des-caam-qi",
1944 .cra_blocksize = DES_BLOCK_SIZE,
1946 .setkey = aead_setkey,
1947 .setauthsize = aead_setauthsize,
1948 .encrypt = aead_encrypt,
1949 .decrypt = aead_decrypt,
1950 .ivsize = DES_BLOCK_SIZE,
1951 .maxauthsize = SHA1_DIGEST_SIZE,
1953 .caam = {
1954 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1955 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1956 OP_ALG_AAI_HMAC_PRECOMP,
1957 .geniv = true,
1961 .aead = {
1962 .base = {
1963 .cra_name = "authenc(hmac(sha224),cbc(des))",
1964 .cra_driver_name = "authenc-hmac-sha224-"
1965 "cbc-des-caam-qi",
1966 .cra_blocksize = DES_BLOCK_SIZE,
1968 .setkey = aead_setkey,
1969 .setauthsize = aead_setauthsize,
1970 .encrypt = aead_encrypt,
1971 .decrypt = aead_decrypt,
1972 .ivsize = DES_BLOCK_SIZE,
1973 .maxauthsize = SHA224_DIGEST_SIZE,
1975 .caam = {
1976 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1977 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1978 OP_ALG_AAI_HMAC_PRECOMP,
1982 .aead = {
1983 .base = {
1984 .cra_name = "echainiv(authenc(hmac(sha224),"
1985 "cbc(des)))",
1986 .cra_driver_name = "echainiv-authenc-"
1987 "hmac-sha224-cbc-des-"
1988 "caam-qi",
1989 .cra_blocksize = DES_BLOCK_SIZE,
1991 .setkey = aead_setkey,
1992 .setauthsize = aead_setauthsize,
1993 .encrypt = aead_encrypt,
1994 .decrypt = aead_decrypt,
1995 .ivsize = DES_BLOCK_SIZE,
1996 .maxauthsize = SHA224_DIGEST_SIZE,
1998 .caam = {
1999 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2000 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2001 OP_ALG_AAI_HMAC_PRECOMP,
2002 .geniv = true,
2006 .aead = {
2007 .base = {
2008 .cra_name = "authenc(hmac(sha256),cbc(des))",
2009 .cra_driver_name = "authenc-hmac-sha256-"
2010 "cbc-des-caam-qi",
2011 .cra_blocksize = DES_BLOCK_SIZE,
2013 .setkey = aead_setkey,
2014 .setauthsize = aead_setauthsize,
2015 .encrypt = aead_encrypt,
2016 .decrypt = aead_decrypt,
2017 .ivsize = DES_BLOCK_SIZE,
2018 .maxauthsize = SHA256_DIGEST_SIZE,
2020 .caam = {
2021 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2022 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2023 OP_ALG_AAI_HMAC_PRECOMP,
2027 .aead = {
2028 .base = {
2029 .cra_name = "echainiv(authenc(hmac(sha256),"
2030 "cbc(des)))",
2031 .cra_driver_name = "echainiv-authenc-"
2032 "hmac-sha256-cbc-des-"
2033 "caam-qi",
2034 .cra_blocksize = DES_BLOCK_SIZE,
2036 .setkey = aead_setkey,
2037 .setauthsize = aead_setauthsize,
2038 .encrypt = aead_encrypt,
2039 .decrypt = aead_decrypt,
2040 .ivsize = DES_BLOCK_SIZE,
2041 .maxauthsize = SHA256_DIGEST_SIZE,
2043 .caam = {
2044 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2045 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2046 OP_ALG_AAI_HMAC_PRECOMP,
2047 .geniv = true,
2051 .aead = {
2052 .base = {
2053 .cra_name = "authenc(hmac(sha384),cbc(des))",
2054 .cra_driver_name = "authenc-hmac-sha384-"
2055 "cbc-des-caam-qi",
2056 .cra_blocksize = DES_BLOCK_SIZE,
2058 .setkey = aead_setkey,
2059 .setauthsize = aead_setauthsize,
2060 .encrypt = aead_encrypt,
2061 .decrypt = aead_decrypt,
2062 .ivsize = DES_BLOCK_SIZE,
2063 .maxauthsize = SHA384_DIGEST_SIZE,
2065 .caam = {
2066 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2067 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2068 OP_ALG_AAI_HMAC_PRECOMP,
2072 .aead = {
2073 .base = {
2074 .cra_name = "echainiv(authenc(hmac(sha384),"
2075 "cbc(des)))",
2076 .cra_driver_name = "echainiv-authenc-"
2077 "hmac-sha384-cbc-des-"
2078 "caam-qi",
2079 .cra_blocksize = DES_BLOCK_SIZE,
2081 .setkey = aead_setkey,
2082 .setauthsize = aead_setauthsize,
2083 .encrypt = aead_encrypt,
2084 .decrypt = aead_decrypt,
2085 .ivsize = DES_BLOCK_SIZE,
2086 .maxauthsize = SHA384_DIGEST_SIZE,
2088 .caam = {
2089 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2090 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2091 OP_ALG_AAI_HMAC_PRECOMP,
2092 .geniv = true,
2096 .aead = {
2097 .base = {
2098 .cra_name = "authenc(hmac(sha512),cbc(des))",
2099 .cra_driver_name = "authenc-hmac-sha512-"
2100 "cbc-des-caam-qi",
2101 .cra_blocksize = DES_BLOCK_SIZE,
2103 .setkey = aead_setkey,
2104 .setauthsize = aead_setauthsize,
2105 .encrypt = aead_encrypt,
2106 .decrypt = aead_decrypt,
2107 .ivsize = DES_BLOCK_SIZE,
2108 .maxauthsize = SHA512_DIGEST_SIZE,
2110 .caam = {
2111 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2112 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2113 OP_ALG_AAI_HMAC_PRECOMP,
2117 .aead = {
2118 .base = {
2119 .cra_name = "echainiv(authenc(hmac(sha512),"
2120 "cbc(des)))",
2121 .cra_driver_name = "echainiv-authenc-"
2122 "hmac-sha512-cbc-des-"
2123 "caam-qi",
2124 .cra_blocksize = DES_BLOCK_SIZE,
2126 .setkey = aead_setkey,
2127 .setauthsize = aead_setauthsize,
2128 .encrypt = aead_encrypt,
2129 .decrypt = aead_decrypt,
2130 .ivsize = DES_BLOCK_SIZE,
2131 .maxauthsize = SHA512_DIGEST_SIZE,
2133 .caam = {
2134 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2135 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2136 OP_ALG_AAI_HMAC_PRECOMP,
2137 .geniv = true,
2142 struct caam_crypto_alg {
2143 struct list_head entry;
2144 struct crypto_alg crypto_alg;
2145 struct caam_alg_entry caam;
2148 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2149 bool uses_dkp)
2151 struct caam_drv_private *priv;
2154 * distribute tfms across job rings to ensure in-order
2155 * crypto request processing per tfm
2157 ctx->jrdev = caam_jr_alloc();
2158 if (IS_ERR(ctx->jrdev)) {
2159 pr_err("Job Ring Device allocation for transform failed\n");
2160 return PTR_ERR(ctx->jrdev);
2163 priv = dev_get_drvdata(ctx->jrdev->parent);
2164 if (priv->era >= 6 && uses_dkp)
2165 ctx->dir = DMA_BIDIRECTIONAL;
2166 else
2167 ctx->dir = DMA_TO_DEVICE;
2169 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2170 ctx->dir);
2171 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2172 dev_err(ctx->jrdev, "unable to map key\n");
2173 caam_jr_free(ctx->jrdev);
2174 return -ENOMEM;
2177 /* copy descriptor header template value */
2178 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2179 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2181 ctx->qidev = priv->qidev;
2183 spin_lock_init(&ctx->lock);
2184 ctx->drv_ctx[ENCRYPT] = NULL;
2185 ctx->drv_ctx[DECRYPT] = NULL;
2186 ctx->drv_ctx[GIVENCRYPT] = NULL;
2188 return 0;
2191 static int caam_cra_init(struct crypto_tfm *tfm)
2193 struct crypto_alg *alg = tfm->__crt_alg;
2194 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2195 crypto_alg);
2196 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2198 return caam_init_common(ctx, &caam_alg->caam, false);
2201 static int caam_aead_init(struct crypto_aead *tfm)
2203 struct aead_alg *alg = crypto_aead_alg(tfm);
2204 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2205 aead);
2206 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2208 return caam_init_common(ctx, &caam_alg->caam,
2209 alg->setkey == aead_setkey);
2212 static void caam_exit_common(struct caam_ctx *ctx)
2214 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2215 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2216 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2218 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2220 caam_jr_free(ctx->jrdev);
2223 static void caam_cra_exit(struct crypto_tfm *tfm)
2225 caam_exit_common(crypto_tfm_ctx(tfm));
2228 static void caam_aead_exit(struct crypto_aead *tfm)
2230 caam_exit_common(crypto_aead_ctx(tfm));
2233 static struct list_head alg_list;
2234 static void __exit caam_qi_algapi_exit(void)
2236 struct caam_crypto_alg *t_alg, *n;
2237 int i;
2239 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2240 struct caam_aead_alg *t_alg = driver_aeads + i;
2242 if (t_alg->registered)
2243 crypto_unregister_aead(&t_alg->aead);
2246 if (!alg_list.next)
2247 return;
2249 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2250 crypto_unregister_alg(&t_alg->crypto_alg);
2251 list_del(&t_alg->entry);
2252 kfree(t_alg);
2256 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2257 *template)
2259 struct caam_crypto_alg *t_alg;
2260 struct crypto_alg *alg;
2262 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2263 if (!t_alg)
2264 return ERR_PTR(-ENOMEM);
2266 alg = &t_alg->crypto_alg;
2268 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2269 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2270 template->driver_name);
2271 alg->cra_module = THIS_MODULE;
2272 alg->cra_init = caam_cra_init;
2273 alg->cra_exit = caam_cra_exit;
2274 alg->cra_priority = CAAM_CRA_PRIORITY;
2275 alg->cra_blocksize = template->blocksize;
2276 alg->cra_alignmask = 0;
2277 alg->cra_ctxsize = sizeof(struct caam_ctx);
2278 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2279 template->type;
2280 switch (template->type) {
2281 case CRYPTO_ALG_TYPE_GIVCIPHER:
2282 alg->cra_type = &crypto_givcipher_type;
2283 alg->cra_ablkcipher = template->template_ablkcipher;
2284 break;
2285 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2286 alg->cra_type = &crypto_ablkcipher_type;
2287 alg->cra_ablkcipher = template->template_ablkcipher;
2288 break;
2291 t_alg->caam.class1_alg_type = template->class1_alg_type;
2292 t_alg->caam.class2_alg_type = template->class2_alg_type;
2294 return t_alg;
2297 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2299 struct aead_alg *alg = &t_alg->aead;
2301 alg->base.cra_module = THIS_MODULE;
2302 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2303 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2304 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2306 alg->init = caam_aead_init;
2307 alg->exit = caam_aead_exit;
2310 static int __init caam_qi_algapi_init(void)
2312 struct device_node *dev_node;
2313 struct platform_device *pdev;
2314 struct device *ctrldev;
2315 struct caam_drv_private *priv;
2316 int i = 0, err = 0;
2317 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2318 unsigned int md_limit = SHA512_DIGEST_SIZE;
2319 bool registered = false;
2321 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2322 if (!dev_node) {
2323 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2324 if (!dev_node)
2325 return -ENODEV;
2328 pdev = of_find_device_by_node(dev_node);
2329 of_node_put(dev_node);
2330 if (!pdev)
2331 return -ENODEV;
2333 ctrldev = &pdev->dev;
2334 priv = dev_get_drvdata(ctrldev);
2337 * If priv is NULL, it's probably because the caam driver wasn't
2338 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2340 if (!priv || !priv->qi_present)
2341 return -ENODEV;
2343 if (caam_dpaa2) {
2344 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2345 return -ENODEV;
2348 INIT_LIST_HEAD(&alg_list);
2351 * Register crypto algorithms the device supports.
2352 * First, detect presence and attributes of DES, AES, and MD blocks.
2354 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2355 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2356 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2357 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2358 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2360 /* If MD is present, limit digest size based on LP256 */
2361 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2362 md_limit = SHA256_DIGEST_SIZE;
2364 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2365 struct caam_crypto_alg *t_alg;
2366 struct caam_alg_template *alg = driver_algs + i;
2367 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2369 /* Skip DES algorithms if not supported by device */
2370 if (!des_inst &&
2371 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2372 (alg_sel == OP_ALG_ALGSEL_DES)))
2373 continue;
2375 /* Skip AES algorithms if not supported by device */
2376 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2377 continue;
2379 t_alg = caam_alg_alloc(alg);
2380 if (IS_ERR(t_alg)) {
2381 err = PTR_ERR(t_alg);
2382 dev_warn(priv->qidev, "%s alg allocation failed\n",
2383 alg->driver_name);
2384 continue;
2387 err = crypto_register_alg(&t_alg->crypto_alg);
2388 if (err) {
2389 dev_warn(priv->qidev, "%s alg registration failed\n",
2390 t_alg->crypto_alg.cra_driver_name);
2391 kfree(t_alg);
2392 continue;
2395 list_add_tail(&t_alg->entry, &alg_list);
2396 registered = true;
2399 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2400 struct caam_aead_alg *t_alg = driver_aeads + i;
2401 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2402 OP_ALG_ALGSEL_MASK;
2403 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2404 OP_ALG_ALGSEL_MASK;
2405 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2407 /* Skip DES algorithms if not supported by device */
2408 if (!des_inst &&
2409 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2410 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2411 continue;
2413 /* Skip AES algorithms if not supported by device */
2414 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2415 continue;
2418 * Check support for AES algorithms not available
2419 * on LP devices.
2421 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2422 (alg_aai == OP_ALG_AAI_GCM))
2423 continue;
2426 * Skip algorithms requiring message digests
2427 * if MD or MD size is not supported by device.
2429 if (c2_alg_sel &&
2430 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2431 continue;
2433 caam_aead_alg_init(t_alg);
2435 err = crypto_register_aead(&t_alg->aead);
2436 if (err) {
2437 pr_warn("%s alg registration failed\n",
2438 t_alg->aead.base.cra_driver_name);
2439 continue;
2442 t_alg->registered = true;
2443 registered = true;
2446 if (registered)
2447 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2449 return err;
2452 module_init(caam_qi_algapi_init);
2453 module_exit(caam_qi_algapi_exit);
2455 MODULE_LICENSE("GPL");
2456 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2457 MODULE_AUTHOR("Freescale Semiconductor");