Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / drivers / crypto / caam / caamalg.c
blobb71f2fd749df02de9a1f06a5c444ec33d4d72b11
1 /*
2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | (output length) |
41 * | SEQ_IN_PTR |
42 * | (input buffer) |
43 * | (input length) |
44 * ---------------------
47 #include "compat.h"
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
58 * crypto alg
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH 16
67 /* length of descriptors text */
68 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
73 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
74 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
75 20 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
77 15 * CAAM_CMD_SZ)
79 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
80 CAAM_MAX_KEY_SIZE)
81 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
83 #ifdef DEBUG
84 /* for print_hex_dumps with line references */
85 #define debug(format, arg...) printk(format, arg)
86 #else
87 #define debug(format, arg...)
88 #endif
89 static struct list_head alg_list;
91 /* Set DK bit in class 1 operation if shared */
92 static inline void append_dec_op1(u32 *desc, u32 type)
94 u32 *jump_cmd, *uncond_jump_cmd;
96 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
97 append_operation(desc, type | OP_ALG_AS_INITFINAL |
98 OP_ALG_DECRYPT);
99 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
100 set_jump_tgt_here(desc, jump_cmd);
101 append_operation(desc, type | OP_ALG_AS_INITFINAL |
102 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
103 set_jump_tgt_here(desc, uncond_jump_cmd);
107 * Wait for completion of class 1 key loading before allowing
108 * error propagation
110 static inline void append_dec_shr_done(u32 *desc)
112 u32 *jump_cmd;
114 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
115 set_jump_tgt_here(desc, jump_cmd);
116 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
120 * For aead functions, read payload and write payload,
121 * both of which are specified in req->src and req->dst
123 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
125 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
126 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
127 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131 * For aead encrypt and decrypt, read iv for both classes
133 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
135 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
136 LDST_CLASS_1_CCB | ivsize);
137 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141 * For ablkcipher encrypt and decrypt, read from req->src and
142 * write to req->dst
144 static inline void ablkcipher_append_src_dst(u32 *desc)
146 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
147 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
148 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
149 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
150 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
154 * If all data, including src (with assoc and iv) or dst (with iv only) are
155 * contiguous
157 #define GIV_SRC_CONTIG 1
158 #define GIV_DST_CONTIG (1 << 1)
161 * per-session context
163 struct caam_ctx {
164 struct device *jrdev;
165 u32 sh_desc_enc[DESC_MAX_USED_LEN];
166 u32 sh_desc_dec[DESC_MAX_USED_LEN];
167 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
168 dma_addr_t sh_desc_enc_dma;
169 dma_addr_t sh_desc_dec_dma;
170 dma_addr_t sh_desc_givenc_dma;
171 u32 class1_alg_type;
172 u32 class2_alg_type;
173 u32 alg_op;
174 u8 key[CAAM_MAX_KEY_SIZE];
175 dma_addr_t key_dma;
176 unsigned int enckeylen;
177 unsigned int split_key_len;
178 unsigned int split_key_pad_len;
179 unsigned int authsize;
182 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
183 int keys_fit_inline)
185 if (keys_fit_inline) {
186 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
187 ctx->split_key_len, CLASS_2 |
188 KEY_DEST_MDHA_SPLIT | KEY_ENC);
189 append_key_as_imm(desc, (void *)ctx->key +
190 ctx->split_key_pad_len, ctx->enckeylen,
191 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
192 } else {
193 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
194 KEY_DEST_MDHA_SPLIT | KEY_ENC);
195 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
196 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
200 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
201 int keys_fit_inline)
203 u32 *key_jump_cmd;
205 init_sh_desc(desc, HDR_SHARE_SERIAL);
207 /* Skip if already shared */
208 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
209 JUMP_COND_SHRD);
211 append_key_aead(desc, ctx, keys_fit_inline);
213 set_jump_tgt_here(desc, key_jump_cmd);
215 /* Propagate errors from shared to job descriptor */
216 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
219 static int aead_set_sh_desc(struct crypto_aead *aead)
221 struct aead_tfm *tfm = &aead->base.crt_aead;
222 struct caam_ctx *ctx = crypto_aead_ctx(aead);
223 struct device *jrdev = ctx->jrdev;
224 bool keys_fit_inline = false;
225 u32 *key_jump_cmd, *jump_cmd;
226 u32 geniv, moveiv;
227 u32 *desc;
229 if (!ctx->enckeylen || !ctx->authsize)
230 return 0;
233 * Job Descriptor and Shared Descriptors
234 * must all fit into the 64-word Descriptor h/w Buffer
236 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
237 ctx->split_key_pad_len + ctx->enckeylen <=
238 CAAM_DESC_BYTES_MAX)
239 keys_fit_inline = true;
241 /* aead_encrypt shared descriptor */
242 desc = ctx->sh_desc_enc;
244 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
246 /* Class 2 operation */
247 append_operation(desc, ctx->class2_alg_type |
248 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
250 /* cryptlen = seqoutlen - authsize */
251 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
253 /* assoclen + cryptlen = seqinlen - ivsize */
254 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
256 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
257 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
259 /* read assoc before reading payload */
260 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
261 KEY_VLF);
262 aead_append_ld_iv(desc, tfm->ivsize);
264 /* Class 1 operation */
265 append_operation(desc, ctx->class1_alg_type |
266 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
268 /* Read and write cryptlen bytes */
269 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
270 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
271 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
273 /* Write ICV */
274 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
275 LDST_SRCDST_BYTE_CONTEXT);
277 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
278 desc_bytes(desc),
279 DMA_TO_DEVICE);
280 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
281 dev_err(jrdev, "unable to map shared descriptor\n");
282 return -ENOMEM;
284 #ifdef DEBUG
285 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
286 DUMP_PREFIX_ADDRESS, 16, 4, desc,
287 desc_bytes(desc), 1);
288 #endif
291 * Job Descriptor and Shared Descriptors
292 * must all fit into the 64-word Descriptor h/w Buffer
294 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
295 ctx->split_key_pad_len + ctx->enckeylen <=
296 CAAM_DESC_BYTES_MAX)
297 keys_fit_inline = true;
299 desc = ctx->sh_desc_dec;
301 /* aead_decrypt shared descriptor */
302 init_sh_desc(desc, HDR_SHARE_SERIAL);
304 /* Skip if already shared */
305 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
306 JUMP_COND_SHRD);
308 append_key_aead(desc, ctx, keys_fit_inline);
310 /* Only propagate error immediately if shared */
311 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
312 set_jump_tgt_here(desc, key_jump_cmd);
313 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
314 set_jump_tgt_here(desc, jump_cmd);
316 /* Class 2 operation */
317 append_operation(desc, ctx->class2_alg_type |
318 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
320 /* assoclen + cryptlen = seqinlen - ivsize */
321 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
322 ctx->authsize + tfm->ivsize)
323 /* assoclen = (assoclen + cryptlen) - cryptlen */
324 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
325 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
327 /* read assoc before reading payload */
328 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
329 KEY_VLF);
331 aead_append_ld_iv(desc, tfm->ivsize);
333 append_dec_op1(desc, ctx->class1_alg_type);
335 /* Read and write cryptlen bytes */
336 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
337 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
338 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
340 /* Load ICV */
341 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
342 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
343 append_dec_shr_done(desc);
345 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
346 desc_bytes(desc),
347 DMA_TO_DEVICE);
348 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
349 dev_err(jrdev, "unable to map shared descriptor\n");
350 return -ENOMEM;
352 #ifdef DEBUG
353 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 desc_bytes(desc), 1);
356 #endif
359 * Job Descriptor and Shared Descriptors
360 * must all fit into the 64-word Descriptor h/w Buffer
362 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
363 ctx->split_key_pad_len + ctx->enckeylen <=
364 CAAM_DESC_BYTES_MAX)
365 keys_fit_inline = true;
367 /* aead_givencrypt shared descriptor */
368 desc = ctx->sh_desc_givenc;
370 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
372 /* Generate IV */
373 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
374 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
375 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
376 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
377 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
378 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
379 append_move(desc, MOVE_SRC_INFIFO |
380 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
381 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
383 /* Copy IV to class 1 context */
384 append_move(desc, MOVE_SRC_CLASS1CTX |
385 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
387 /* Return to encryption */
388 append_operation(desc, ctx->class2_alg_type |
389 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
391 /* ivsize + cryptlen = seqoutlen - authsize */
392 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
394 /* assoclen = seqinlen - (ivsize + cryptlen) */
395 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
397 /* read assoc before reading payload */
398 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
399 KEY_VLF);
401 /* Copy iv from class 1 ctx to class 2 fifo*/
402 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
403 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
404 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
405 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
406 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
407 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
409 /* Class 1 operation */
410 append_operation(desc, ctx->class1_alg_type |
411 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
413 /* Will write ivsize + cryptlen */
414 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
416 /* Not need to reload iv */
417 append_seq_fifo_load(desc, tfm->ivsize,
418 FIFOLD_CLASS_SKIP);
420 /* Will read cryptlen */
421 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
422 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
424 /* Write ICV */
425 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
426 LDST_SRCDST_BYTE_CONTEXT);
428 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
429 desc_bytes(desc),
430 DMA_TO_DEVICE);
431 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
432 dev_err(jrdev, "unable to map shared descriptor\n");
433 return -ENOMEM;
435 #ifdef DEBUG
436 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
437 DUMP_PREFIX_ADDRESS, 16, 4, desc,
438 desc_bytes(desc), 1);
439 #endif
441 return 0;
444 static int aead_setauthsize(struct crypto_aead *authenc,
445 unsigned int authsize)
447 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
449 ctx->authsize = authsize;
450 aead_set_sh_desc(authenc);
452 return 0;
455 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
456 u32 authkeylen)
458 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
459 ctx->split_key_pad_len, key_in, authkeylen,
460 ctx->alg_op);
463 static int aead_setkey(struct crypto_aead *aead,
464 const u8 *key, unsigned int keylen)
466 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
467 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
468 struct caam_ctx *ctx = crypto_aead_ctx(aead);
469 struct device *jrdev = ctx->jrdev;
470 struct crypto_authenc_keys keys;
471 int ret = 0;
473 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
474 goto badkey;
476 /* Pick class 2 key length from algorithm submask */
477 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
478 OP_ALG_ALGSEL_SHIFT] * 2;
479 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
481 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
482 goto badkey;
484 #ifdef DEBUG
485 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
486 keys.authkeylen + keys.enckeylen, keys.enckeylen,
487 keys.authkeylen);
488 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
489 ctx->split_key_len, ctx->split_key_pad_len);
490 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
491 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
492 #endif
494 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
495 if (ret) {
496 goto badkey;
499 /* postpend encryption key to auth split key */
500 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
502 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
503 keys.enckeylen, DMA_TO_DEVICE);
504 if (dma_mapping_error(jrdev, ctx->key_dma)) {
505 dev_err(jrdev, "unable to map key i/o memory\n");
506 return -ENOMEM;
508 #ifdef DEBUG
509 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
510 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
511 ctx->split_key_pad_len + keys.enckeylen, 1);
512 #endif
514 ctx->enckeylen = keys.enckeylen;
516 ret = aead_set_sh_desc(aead);
517 if (ret) {
518 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
519 keys.enckeylen, DMA_TO_DEVICE);
522 return ret;
523 badkey:
524 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
525 return -EINVAL;
528 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
529 const u8 *key, unsigned int keylen)
531 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
532 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
533 struct device *jrdev = ctx->jrdev;
534 int ret = 0;
535 u32 *key_jump_cmd, *jump_cmd;
536 u32 *desc;
538 #ifdef DEBUG
539 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
540 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
541 #endif
543 memcpy(ctx->key, key, keylen);
544 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
545 DMA_TO_DEVICE);
546 if (dma_mapping_error(jrdev, ctx->key_dma)) {
547 dev_err(jrdev, "unable to map key i/o memory\n");
548 return -ENOMEM;
550 ctx->enckeylen = keylen;
552 /* ablkcipher_encrypt shared descriptor */
553 desc = ctx->sh_desc_enc;
554 init_sh_desc(desc, HDR_SHARE_SERIAL);
555 /* Skip if already shared */
556 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
557 JUMP_COND_SHRD);
559 /* Load class1 key only */
560 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
561 ctx->enckeylen, CLASS_1 |
562 KEY_DEST_CLASS_REG);
564 set_jump_tgt_here(desc, key_jump_cmd);
566 /* Propagate errors from shared to job descriptor */
567 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
569 /* Load iv */
570 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
571 LDST_CLASS_1_CCB | tfm->ivsize);
573 /* Load operation */
574 append_operation(desc, ctx->class1_alg_type |
575 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
577 /* Perform operation */
578 ablkcipher_append_src_dst(desc);
580 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
581 desc_bytes(desc),
582 DMA_TO_DEVICE);
583 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
584 dev_err(jrdev, "unable to map shared descriptor\n");
585 return -ENOMEM;
587 #ifdef DEBUG
588 print_hex_dump(KERN_ERR,
589 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
590 DUMP_PREFIX_ADDRESS, 16, 4, desc,
591 desc_bytes(desc), 1);
592 #endif
593 /* ablkcipher_decrypt shared descriptor */
594 desc = ctx->sh_desc_dec;
596 init_sh_desc(desc, HDR_SHARE_SERIAL);
597 /* Skip if already shared */
598 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
599 JUMP_COND_SHRD);
601 /* Load class1 key only */
602 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
603 ctx->enckeylen, CLASS_1 |
604 KEY_DEST_CLASS_REG);
606 /* For aead, only propagate error immediately if shared */
607 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
608 set_jump_tgt_here(desc, key_jump_cmd);
609 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
610 set_jump_tgt_here(desc, jump_cmd);
612 /* load IV */
613 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
614 LDST_CLASS_1_CCB | tfm->ivsize);
616 /* Choose operation */
617 append_dec_op1(desc, ctx->class1_alg_type);
619 /* Perform operation */
620 ablkcipher_append_src_dst(desc);
622 /* Wait for key to load before allowing propagating error */
623 append_dec_shr_done(desc);
625 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
626 desc_bytes(desc),
627 DMA_TO_DEVICE);
628 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
629 dev_err(jrdev, "unable to map shared descriptor\n");
630 return -ENOMEM;
633 #ifdef DEBUG
634 print_hex_dump(KERN_ERR,
635 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
636 DUMP_PREFIX_ADDRESS, 16, 4, desc,
637 desc_bytes(desc), 1);
638 #endif
640 return ret;
644 * aead_edesc - s/w-extended aead descriptor
645 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
646 * @assoc_chained: if source is chained
647 * @src_nents: number of segments in input scatterlist
648 * @src_chained: if source is chained
649 * @dst_nents: number of segments in output scatterlist
650 * @dst_chained: if destination is chained
651 * @iv_dma: dma address of iv for checking continuity and link table
652 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
653 * @sec4_sg_bytes: length of dma mapped sec4_sg space
654 * @sec4_sg_dma: bus physical mapped address of h/w link table
655 * @hw_desc: the h/w job descriptor followed by any referenced link tables
657 struct aead_edesc {
658 int assoc_nents;
659 bool assoc_chained;
660 int src_nents;
661 bool src_chained;
662 int dst_nents;
663 bool dst_chained;
664 dma_addr_t iv_dma;
665 int sec4_sg_bytes;
666 dma_addr_t sec4_sg_dma;
667 struct sec4_sg_entry *sec4_sg;
668 u32 hw_desc[0];
672 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
673 * @src_nents: number of segments in input scatterlist
674 * @src_chained: if source is chained
675 * @dst_nents: number of segments in output scatterlist
676 * @dst_chained: if destination is chained
677 * @iv_dma: dma address of iv for checking continuity and link table
678 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
679 * @sec4_sg_bytes: length of dma mapped sec4_sg space
680 * @sec4_sg_dma: bus physical mapped address of h/w link table
681 * @hw_desc: the h/w job descriptor followed by any referenced link tables
683 struct ablkcipher_edesc {
684 int src_nents;
685 bool src_chained;
686 int dst_nents;
687 bool dst_chained;
688 dma_addr_t iv_dma;
689 int sec4_sg_bytes;
690 dma_addr_t sec4_sg_dma;
691 struct sec4_sg_entry *sec4_sg;
692 u32 hw_desc[0];
695 static void caam_unmap(struct device *dev, struct scatterlist *src,
696 struct scatterlist *dst, int src_nents,
697 bool src_chained, int dst_nents, bool dst_chained,
698 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
699 int sec4_sg_bytes)
701 if (dst != src) {
702 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
703 src_chained);
704 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
705 dst_chained);
706 } else {
707 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
708 DMA_BIDIRECTIONAL, src_chained);
711 if (iv_dma)
712 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
713 if (sec4_sg_bytes)
714 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
715 DMA_TO_DEVICE);
718 static void aead_unmap(struct device *dev,
719 struct aead_edesc *edesc,
720 struct aead_request *req)
722 struct crypto_aead *aead = crypto_aead_reqtfm(req);
723 int ivsize = crypto_aead_ivsize(aead);
725 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
726 DMA_TO_DEVICE, edesc->assoc_chained);
728 caam_unmap(dev, req->src, req->dst,
729 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
730 edesc->dst_chained, edesc->iv_dma, ivsize,
731 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
734 static void ablkcipher_unmap(struct device *dev,
735 struct ablkcipher_edesc *edesc,
736 struct ablkcipher_request *req)
738 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
739 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
741 caam_unmap(dev, req->src, req->dst,
742 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
743 edesc->dst_chained, edesc->iv_dma, ivsize,
744 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
747 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
748 void *context)
750 struct aead_request *req = context;
751 struct aead_edesc *edesc;
752 #ifdef DEBUG
753 struct crypto_aead *aead = crypto_aead_reqtfm(req);
754 struct caam_ctx *ctx = crypto_aead_ctx(aead);
755 int ivsize = crypto_aead_ivsize(aead);
757 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
758 #endif
760 edesc = (struct aead_edesc *)((char *)desc -
761 offsetof(struct aead_edesc, hw_desc));
763 if (err) {
764 char tmp[CAAM_ERROR_STR_MAX];
766 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
769 aead_unmap(jrdev, edesc, req);
771 #ifdef DEBUG
772 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
773 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
774 req->assoclen , 1);
775 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
776 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
777 edesc->src_nents ? 100 : ivsize, 1);
778 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
779 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
780 edesc->src_nents ? 100 : req->cryptlen +
781 ctx->authsize + 4, 1);
782 #endif
784 kfree(edesc);
786 aead_request_complete(req, err);
789 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
790 void *context)
792 struct aead_request *req = context;
793 struct aead_edesc *edesc;
794 #ifdef DEBUG
795 struct crypto_aead *aead = crypto_aead_reqtfm(req);
796 struct caam_ctx *ctx = crypto_aead_ctx(aead);
797 int ivsize = crypto_aead_ivsize(aead);
799 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
800 #endif
802 edesc = (struct aead_edesc *)((char *)desc -
803 offsetof(struct aead_edesc, hw_desc));
805 #ifdef DEBUG
806 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
807 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
808 ivsize, 1);
809 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
810 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
811 req->cryptlen - ctx->authsize, 1);
812 #endif
814 if (err) {
815 char tmp[CAAM_ERROR_STR_MAX];
817 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
820 aead_unmap(jrdev, edesc, req);
823 * verify hw auth check passed else return -EBADMSG
825 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
826 err = -EBADMSG;
828 #ifdef DEBUG
829 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
830 DUMP_PREFIX_ADDRESS, 16, 4,
831 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
832 sizeof(struct iphdr) + req->assoclen +
833 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
834 ctx->authsize + 36, 1);
835 if (!err && edesc->sec4_sg_bytes) {
836 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
837 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
838 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
839 sg->length + ctx->authsize + 16, 1);
841 #endif
843 kfree(edesc);
845 aead_request_complete(req, err);
848 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
849 void *context)
851 struct ablkcipher_request *req = context;
852 struct ablkcipher_edesc *edesc;
853 #ifdef DEBUG
854 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
855 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
857 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
858 #endif
860 edesc = (struct ablkcipher_edesc *)((char *)desc -
861 offsetof(struct ablkcipher_edesc, hw_desc));
863 if (err) {
864 char tmp[CAAM_ERROR_STR_MAX];
866 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
869 #ifdef DEBUG
870 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
871 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
872 edesc->src_nents > 1 ? 100 : ivsize, 1);
873 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
874 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
875 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
876 #endif
878 ablkcipher_unmap(jrdev, edesc, req);
879 kfree(edesc);
881 ablkcipher_request_complete(req, err);
884 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
885 void *context)
887 struct ablkcipher_request *req = context;
888 struct ablkcipher_edesc *edesc;
889 #ifdef DEBUG
890 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
891 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
893 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
894 #endif
896 edesc = (struct ablkcipher_edesc *)((char *)desc -
897 offsetof(struct ablkcipher_edesc, hw_desc));
898 if (err) {
899 char tmp[CAAM_ERROR_STR_MAX];
901 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
904 #ifdef DEBUG
905 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
906 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
907 ivsize, 1);
908 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
909 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
910 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
911 #endif
913 ablkcipher_unmap(jrdev, edesc, req);
914 kfree(edesc);
916 ablkcipher_request_complete(req, err);
920 * Fill in aead job descriptor
922 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
923 struct aead_edesc *edesc,
924 struct aead_request *req,
925 bool all_contig, bool encrypt)
927 struct crypto_aead *aead = crypto_aead_reqtfm(req);
928 struct caam_ctx *ctx = crypto_aead_ctx(aead);
929 int ivsize = crypto_aead_ivsize(aead);
930 int authsize = ctx->authsize;
931 u32 *desc = edesc->hw_desc;
932 u32 out_options = 0, in_options;
933 dma_addr_t dst_dma, src_dma;
934 int len, sec4_sg_index = 0;
936 #ifdef DEBUG
937 debug("assoclen %d cryptlen %d authsize %d\n",
938 req->assoclen, req->cryptlen, authsize);
939 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
940 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
941 req->assoclen , 1);
942 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
943 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
944 edesc->src_nents ? 100 : ivsize, 1);
945 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
946 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
947 edesc->src_nents ? 100 : req->cryptlen, 1);
948 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
949 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
950 desc_bytes(sh_desc), 1);
951 #endif
953 len = desc_len(sh_desc);
954 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
956 if (all_contig) {
957 src_dma = sg_dma_address(req->assoc);
958 in_options = 0;
959 } else {
960 src_dma = edesc->sec4_sg_dma;
961 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
962 (edesc->src_nents ? : 1);
963 in_options = LDST_SGF;
966 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
967 in_options);
969 if (likely(req->src == req->dst)) {
970 if (all_contig) {
971 dst_dma = sg_dma_address(req->src);
972 } else {
973 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
974 ((edesc->assoc_nents ? : 1) + 1);
975 out_options = LDST_SGF;
977 } else {
978 if (!edesc->dst_nents) {
979 dst_dma = sg_dma_address(req->dst);
980 } else {
981 dst_dma = edesc->sec4_sg_dma +
982 sec4_sg_index *
983 sizeof(struct sec4_sg_entry);
984 out_options = LDST_SGF;
987 if (encrypt)
988 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
989 out_options);
990 else
991 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
992 out_options);
996 * Fill in aead givencrypt job descriptor
998 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
999 struct aead_edesc *edesc,
1000 struct aead_request *req,
1001 int contig)
1003 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1004 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1005 int ivsize = crypto_aead_ivsize(aead);
1006 int authsize = ctx->authsize;
1007 u32 *desc = edesc->hw_desc;
1008 u32 out_options = 0, in_options;
1009 dma_addr_t dst_dma, src_dma;
1010 int len, sec4_sg_index = 0;
1012 #ifdef DEBUG
1013 debug("assoclen %d cryptlen %d authsize %d\n",
1014 req->assoclen, req->cryptlen, authsize);
1015 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
1016 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1017 req->assoclen , 1);
1018 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1019 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1020 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1021 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1022 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1023 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1024 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1025 desc_bytes(sh_desc), 1);
1026 #endif
1028 len = desc_len(sh_desc);
1029 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1031 if (contig & GIV_SRC_CONTIG) {
1032 src_dma = sg_dma_address(req->assoc);
1033 in_options = 0;
1034 } else {
1035 src_dma = edesc->sec4_sg_dma;
1036 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1037 in_options = LDST_SGF;
1039 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1040 in_options);
1042 if (contig & GIV_DST_CONTIG) {
1043 dst_dma = edesc->iv_dma;
1044 } else {
1045 if (likely(req->src == req->dst)) {
1046 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1047 edesc->assoc_nents;
1048 out_options = LDST_SGF;
1049 } else {
1050 dst_dma = edesc->sec4_sg_dma +
1051 sec4_sg_index *
1052 sizeof(struct sec4_sg_entry);
1053 out_options = LDST_SGF;
1057 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1058 out_options);
1062 * Fill in ablkcipher job descriptor
1064 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1065 struct ablkcipher_edesc *edesc,
1066 struct ablkcipher_request *req,
1067 bool iv_contig)
1069 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1070 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1071 u32 *desc = edesc->hw_desc;
1072 u32 out_options = 0, in_options;
1073 dma_addr_t dst_dma, src_dma;
1074 int len, sec4_sg_index = 0;
1076 #ifdef DEBUG
1077 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1078 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1079 ivsize, 1);
1080 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1081 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1082 edesc->src_nents ? 100 : req->nbytes, 1);
1083 #endif
1085 len = desc_len(sh_desc);
1086 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1088 if (iv_contig) {
1089 src_dma = edesc->iv_dma;
1090 in_options = 0;
1091 } else {
1092 src_dma = edesc->sec4_sg_dma;
1093 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1094 in_options = LDST_SGF;
1096 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1098 if (likely(req->src == req->dst)) {
1099 if (!edesc->src_nents && iv_contig) {
1100 dst_dma = sg_dma_address(req->src);
1101 } else {
1102 dst_dma = edesc->sec4_sg_dma +
1103 sizeof(struct sec4_sg_entry);
1104 out_options = LDST_SGF;
1106 } else {
1107 if (!edesc->dst_nents) {
1108 dst_dma = sg_dma_address(req->dst);
1109 } else {
1110 dst_dma = edesc->sec4_sg_dma +
1111 sec4_sg_index * sizeof(struct sec4_sg_entry);
1112 out_options = LDST_SGF;
1115 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1119 * allocate and map the aead extended descriptor
1121 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1122 int desc_bytes, bool *all_contig_ptr,
1123 bool encrypt)
1125 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1126 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1127 struct device *jrdev = ctx->jrdev;
1128 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1129 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1130 int assoc_nents, src_nents, dst_nents = 0;
1131 struct aead_edesc *edesc;
1132 dma_addr_t iv_dma = 0;
1133 int sgc;
1134 bool all_contig = true;
1135 bool assoc_chained = false, src_chained = false, dst_chained = false;
1136 int ivsize = crypto_aead_ivsize(aead);
1137 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1138 unsigned int authsize = ctx->authsize;
1140 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1142 if (unlikely(req->dst != req->src)) {
1143 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1144 dst_nents = sg_count(req->dst,
1145 req->cryptlen +
1146 (encrypt ? authsize : (-authsize)),
1147 &dst_chained);
1148 } else {
1149 src_nents = sg_count(req->src,
1150 req->cryptlen +
1151 (encrypt ? authsize : 0),
1152 &src_chained);
1155 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1156 DMA_TO_DEVICE, assoc_chained);
1157 if (likely(req->src == req->dst)) {
1158 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1159 DMA_BIDIRECTIONAL, src_chained);
1160 } else {
1161 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1162 DMA_TO_DEVICE, src_chained);
1163 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1164 DMA_FROM_DEVICE, dst_chained);
1167 /* Check if data are contiguous */
1168 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1169 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1170 iv_dma || src_nents || iv_dma + ivsize !=
1171 sg_dma_address(req->src)) {
1172 all_contig = false;
1173 assoc_nents = assoc_nents ? : 1;
1174 src_nents = src_nents ? : 1;
1175 sec4_sg_len = assoc_nents + 1 + src_nents;
1177 sec4_sg_len += dst_nents;
1179 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1181 /* allocate space for base edesc and hw desc commands, link tables */
1182 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1183 sec4_sg_bytes, GFP_DMA | flags);
1184 if (!edesc) {
1185 dev_err(jrdev, "could not allocate extended descriptor\n");
1186 return ERR_PTR(-ENOMEM);
1189 edesc->assoc_nents = assoc_nents;
1190 edesc->assoc_chained = assoc_chained;
1191 edesc->src_nents = src_nents;
1192 edesc->src_chained = src_chained;
1193 edesc->dst_nents = dst_nents;
1194 edesc->dst_chained = dst_chained;
1195 edesc->iv_dma = iv_dma;
1196 edesc->sec4_sg_bytes = sec4_sg_bytes;
1197 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1198 desc_bytes;
1199 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1200 sec4_sg_bytes, DMA_TO_DEVICE);
1201 *all_contig_ptr = all_contig;
1203 sec4_sg_index = 0;
1204 if (!all_contig) {
1205 sg_to_sec4_sg(req->assoc,
1206 (assoc_nents ? : 1),
1207 edesc->sec4_sg +
1208 sec4_sg_index, 0);
1209 sec4_sg_index += assoc_nents ? : 1;
1210 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1211 iv_dma, ivsize, 0);
1212 sec4_sg_index += 1;
1213 sg_to_sec4_sg_last(req->src,
1214 (src_nents ? : 1),
1215 edesc->sec4_sg +
1216 sec4_sg_index, 0);
1217 sec4_sg_index += src_nents ? : 1;
1219 if (dst_nents) {
1220 sg_to_sec4_sg_last(req->dst, dst_nents,
1221 edesc->sec4_sg + sec4_sg_index, 0);
1224 return edesc;
1227 static int aead_encrypt(struct aead_request *req)
1229 struct aead_edesc *edesc;
1230 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1231 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1232 struct device *jrdev = ctx->jrdev;
1233 bool all_contig;
1234 u32 *desc;
1235 int ret = 0;
1237 /* allocate extended descriptor */
1238 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1239 CAAM_CMD_SZ, &all_contig, true);
1240 if (IS_ERR(edesc))
1241 return PTR_ERR(edesc);
1243 /* Create and submit job descriptor */
1244 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1245 all_contig, true);
1246 #ifdef DEBUG
1247 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1248 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1249 desc_bytes(edesc->hw_desc), 1);
1250 #endif
1252 desc = edesc->hw_desc;
1253 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1254 if (!ret) {
1255 ret = -EINPROGRESS;
1256 } else {
1257 aead_unmap(jrdev, edesc, req);
1258 kfree(edesc);
1261 return ret;
1264 static int aead_decrypt(struct aead_request *req)
1266 struct aead_edesc *edesc;
1267 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1268 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1269 struct device *jrdev = ctx->jrdev;
1270 bool all_contig;
1271 u32 *desc;
1272 int ret = 0;
1274 /* allocate extended descriptor */
1275 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1276 CAAM_CMD_SZ, &all_contig, false);
1277 if (IS_ERR(edesc))
1278 return PTR_ERR(edesc);
1280 #ifdef DEBUG
1281 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1282 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1283 req->cryptlen, 1);
1284 #endif
1286 /* Create and submit job descriptor*/
1287 init_aead_job(ctx->sh_desc_dec,
1288 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1289 #ifdef DEBUG
1290 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1291 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1292 desc_bytes(edesc->hw_desc), 1);
1293 #endif
1295 desc = edesc->hw_desc;
1296 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1297 if (!ret) {
1298 ret = -EINPROGRESS;
1299 } else {
1300 aead_unmap(jrdev, edesc, req);
1301 kfree(edesc);
1304 return ret;
1308 * allocate and map the aead extended descriptor for aead givencrypt
1310 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1311 *greq, int desc_bytes,
1312 u32 *contig_ptr)
1314 struct aead_request *req = &greq->areq;
1315 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1316 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1317 struct device *jrdev = ctx->jrdev;
1318 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1319 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1320 int assoc_nents, src_nents, dst_nents = 0;
1321 struct aead_edesc *edesc;
1322 dma_addr_t iv_dma = 0;
1323 int sgc;
1324 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1325 int ivsize = crypto_aead_ivsize(aead);
1326 bool assoc_chained = false, src_chained = false, dst_chained = false;
1327 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1329 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1330 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1332 if (unlikely(req->dst != req->src))
1333 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1334 &dst_chained);
1336 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1337 DMA_TO_DEVICE, assoc_chained);
1338 if (likely(req->src == req->dst)) {
1339 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1340 DMA_BIDIRECTIONAL, src_chained);
1341 } else {
1342 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1343 DMA_TO_DEVICE, src_chained);
1344 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1345 DMA_FROM_DEVICE, dst_chained);
1348 /* Check if data are contiguous */
1349 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1350 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1351 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1352 contig &= ~GIV_SRC_CONTIG;
1353 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1354 contig &= ~GIV_DST_CONTIG;
1355 if (unlikely(req->src != req->dst)) {
1356 dst_nents = dst_nents ? : 1;
1357 sec4_sg_len += 1;
1359 if (!(contig & GIV_SRC_CONTIG)) {
1360 assoc_nents = assoc_nents ? : 1;
1361 src_nents = src_nents ? : 1;
1362 sec4_sg_len += assoc_nents + 1 + src_nents;
1363 if (likely(req->src == req->dst))
1364 contig &= ~GIV_DST_CONTIG;
1366 sec4_sg_len += dst_nents;
1368 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1370 /* allocate space for base edesc and hw desc commands, link tables */
1371 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1372 sec4_sg_bytes, GFP_DMA | flags);
1373 if (!edesc) {
1374 dev_err(jrdev, "could not allocate extended descriptor\n");
1375 return ERR_PTR(-ENOMEM);
1378 edesc->assoc_nents = assoc_nents;
1379 edesc->assoc_chained = assoc_chained;
1380 edesc->src_nents = src_nents;
1381 edesc->src_chained = src_chained;
1382 edesc->dst_nents = dst_nents;
1383 edesc->dst_chained = dst_chained;
1384 edesc->iv_dma = iv_dma;
1385 edesc->sec4_sg_bytes = sec4_sg_bytes;
1386 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1387 desc_bytes;
1388 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1389 sec4_sg_bytes, DMA_TO_DEVICE);
1390 *contig_ptr = contig;
1392 sec4_sg_index = 0;
1393 if (!(contig & GIV_SRC_CONTIG)) {
1394 sg_to_sec4_sg(req->assoc, assoc_nents,
1395 edesc->sec4_sg +
1396 sec4_sg_index, 0);
1397 sec4_sg_index += assoc_nents;
1398 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1399 iv_dma, ivsize, 0);
1400 sec4_sg_index += 1;
1401 sg_to_sec4_sg_last(req->src, src_nents,
1402 edesc->sec4_sg +
1403 sec4_sg_index, 0);
1404 sec4_sg_index += src_nents;
1406 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1407 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1408 iv_dma, ivsize, 0);
1409 sec4_sg_index += 1;
1410 sg_to_sec4_sg_last(req->dst, dst_nents,
1411 edesc->sec4_sg + sec4_sg_index, 0);
1414 return edesc;
1417 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1419 struct aead_request *req = &areq->areq;
1420 struct aead_edesc *edesc;
1421 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1422 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1423 struct device *jrdev = ctx->jrdev;
1424 u32 contig;
1425 u32 *desc;
1426 int ret = 0;
1428 /* allocate extended descriptor */
1429 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1430 CAAM_CMD_SZ, &contig);
1432 if (IS_ERR(edesc))
1433 return PTR_ERR(edesc);
1435 #ifdef DEBUG
1436 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1437 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1438 req->cryptlen, 1);
1439 #endif
1441 /* Create and submit job descriptor*/
1442 init_aead_giv_job(ctx->sh_desc_givenc,
1443 ctx->sh_desc_givenc_dma, edesc, req, contig);
1444 #ifdef DEBUG
1445 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1446 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1447 desc_bytes(edesc->hw_desc), 1);
1448 #endif
1450 desc = edesc->hw_desc;
1451 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1452 if (!ret) {
1453 ret = -EINPROGRESS;
1454 } else {
1455 aead_unmap(jrdev, edesc, req);
1456 kfree(edesc);
1459 return ret;
1463 * allocate and map the ablkcipher extended descriptor for ablkcipher
1465 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1466 *req, int desc_bytes,
1467 bool *iv_contig_out)
1469 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1470 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1471 struct device *jrdev = ctx->jrdev;
1472 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1473 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1474 GFP_KERNEL : GFP_ATOMIC;
1475 int src_nents, dst_nents = 0, sec4_sg_bytes;
1476 struct ablkcipher_edesc *edesc;
1477 dma_addr_t iv_dma = 0;
1478 bool iv_contig = false;
1479 int sgc;
1480 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1481 bool src_chained = false, dst_chained = false;
1482 int sec4_sg_index;
1484 src_nents = sg_count(req->src, req->nbytes, &src_chained);
1486 if (req->dst != req->src)
1487 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1489 if (likely(req->src == req->dst)) {
1490 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1491 DMA_BIDIRECTIONAL, src_chained);
1492 } else {
1493 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1494 DMA_TO_DEVICE, src_chained);
1495 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1496 DMA_FROM_DEVICE, dst_chained);
1500 * Check if iv can be contiguous with source and destination.
1501 * If so, include it. If not, create scatterlist.
1503 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1504 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1505 iv_contig = true;
1506 else
1507 src_nents = src_nents ? : 1;
1508 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1509 sizeof(struct sec4_sg_entry);
1511 /* allocate space for base edesc and hw desc commands, link tables */
1512 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1513 sec4_sg_bytes, GFP_DMA | flags);
1514 if (!edesc) {
1515 dev_err(jrdev, "could not allocate extended descriptor\n");
1516 return ERR_PTR(-ENOMEM);
1519 edesc->src_nents = src_nents;
1520 edesc->src_chained = src_chained;
1521 edesc->dst_nents = dst_nents;
1522 edesc->dst_chained = dst_chained;
1523 edesc->sec4_sg_bytes = sec4_sg_bytes;
1524 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1525 desc_bytes;
1527 sec4_sg_index = 0;
1528 if (!iv_contig) {
1529 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1530 sg_to_sec4_sg_last(req->src, src_nents,
1531 edesc->sec4_sg + 1, 0);
1532 sec4_sg_index += 1 + src_nents;
1535 if (dst_nents) {
1536 sg_to_sec4_sg_last(req->dst, dst_nents,
1537 edesc->sec4_sg + sec4_sg_index, 0);
1540 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1541 sec4_sg_bytes, DMA_TO_DEVICE);
1542 edesc->iv_dma = iv_dma;
1544 #ifdef DEBUG
1545 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1546 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1547 sec4_sg_bytes, 1);
1548 #endif
1550 *iv_contig_out = iv_contig;
1551 return edesc;
1554 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1556 struct ablkcipher_edesc *edesc;
1557 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1558 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1559 struct device *jrdev = ctx->jrdev;
1560 bool iv_contig;
1561 u32 *desc;
1562 int ret = 0;
1564 /* allocate extended descriptor */
1565 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1566 CAAM_CMD_SZ, &iv_contig);
1567 if (IS_ERR(edesc))
1568 return PTR_ERR(edesc);
1570 /* Create and submit job descriptor*/
1571 init_ablkcipher_job(ctx->sh_desc_enc,
1572 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1573 #ifdef DEBUG
1574 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1575 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1576 desc_bytes(edesc->hw_desc), 1);
1577 #endif
1578 desc = edesc->hw_desc;
1579 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1581 if (!ret) {
1582 ret = -EINPROGRESS;
1583 } else {
1584 ablkcipher_unmap(jrdev, edesc, req);
1585 kfree(edesc);
1588 return ret;
1591 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1593 struct ablkcipher_edesc *edesc;
1594 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1595 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1596 struct device *jrdev = ctx->jrdev;
1597 bool iv_contig;
1598 u32 *desc;
1599 int ret = 0;
1601 /* allocate extended descriptor */
1602 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1603 CAAM_CMD_SZ, &iv_contig);
1604 if (IS_ERR(edesc))
1605 return PTR_ERR(edesc);
1607 /* Create and submit job descriptor*/
1608 init_ablkcipher_job(ctx->sh_desc_dec,
1609 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1610 desc = edesc->hw_desc;
1611 #ifdef DEBUG
1612 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1613 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1614 desc_bytes(edesc->hw_desc), 1);
1615 #endif
1617 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1618 if (!ret) {
1619 ret = -EINPROGRESS;
1620 } else {
1621 ablkcipher_unmap(jrdev, edesc, req);
1622 kfree(edesc);
1625 return ret;
1628 #define template_aead template_u.aead
1629 #define template_ablkcipher template_u.ablkcipher
1630 struct caam_alg_template {
1631 char name[CRYPTO_MAX_ALG_NAME];
1632 char driver_name[CRYPTO_MAX_ALG_NAME];
1633 unsigned int blocksize;
1634 u32 type;
1635 union {
1636 struct ablkcipher_alg ablkcipher;
1637 struct aead_alg aead;
1638 struct blkcipher_alg blkcipher;
1639 struct cipher_alg cipher;
1640 struct compress_alg compress;
1641 struct rng_alg rng;
1642 } template_u;
1643 u32 class1_alg_type;
1644 u32 class2_alg_type;
1645 u32 alg_op;
1648 static struct caam_alg_template driver_algs[] = {
1649 /* single-pass ipsec_esp descriptor */
1651 .name = "authenc(hmac(md5),cbc(aes))",
1652 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1653 .blocksize = AES_BLOCK_SIZE,
1654 .type = CRYPTO_ALG_TYPE_AEAD,
1655 .template_aead = {
1656 .setkey = aead_setkey,
1657 .setauthsize = aead_setauthsize,
1658 .encrypt = aead_encrypt,
1659 .decrypt = aead_decrypt,
1660 .givencrypt = aead_givencrypt,
1661 .geniv = "<built-in>",
1662 .ivsize = AES_BLOCK_SIZE,
1663 .maxauthsize = MD5_DIGEST_SIZE,
1665 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1666 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1667 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1670 .name = "authenc(hmac(sha1),cbc(aes))",
1671 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1672 .blocksize = AES_BLOCK_SIZE,
1673 .type = CRYPTO_ALG_TYPE_AEAD,
1674 .template_aead = {
1675 .setkey = aead_setkey,
1676 .setauthsize = aead_setauthsize,
1677 .encrypt = aead_encrypt,
1678 .decrypt = aead_decrypt,
1679 .givencrypt = aead_givencrypt,
1680 .geniv = "<built-in>",
1681 .ivsize = AES_BLOCK_SIZE,
1682 .maxauthsize = SHA1_DIGEST_SIZE,
1684 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1685 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1686 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1689 .name = "authenc(hmac(sha224),cbc(aes))",
1690 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1691 .blocksize = AES_BLOCK_SIZE,
1692 .type = CRYPTO_ALG_TYPE_AEAD,
1693 .template_aead = {
1694 .setkey = aead_setkey,
1695 .setauthsize = aead_setauthsize,
1696 .encrypt = aead_encrypt,
1697 .decrypt = aead_decrypt,
1698 .givencrypt = aead_givencrypt,
1699 .geniv = "<built-in>",
1700 .ivsize = AES_BLOCK_SIZE,
1701 .maxauthsize = SHA224_DIGEST_SIZE,
1703 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1704 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1705 OP_ALG_AAI_HMAC_PRECOMP,
1706 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1709 .name = "authenc(hmac(sha256),cbc(aes))",
1710 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1711 .blocksize = AES_BLOCK_SIZE,
1712 .type = CRYPTO_ALG_TYPE_AEAD,
1713 .template_aead = {
1714 .setkey = aead_setkey,
1715 .setauthsize = aead_setauthsize,
1716 .encrypt = aead_encrypt,
1717 .decrypt = aead_decrypt,
1718 .givencrypt = aead_givencrypt,
1719 .geniv = "<built-in>",
1720 .ivsize = AES_BLOCK_SIZE,
1721 .maxauthsize = SHA256_DIGEST_SIZE,
1723 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1724 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1725 OP_ALG_AAI_HMAC_PRECOMP,
1726 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1729 .name = "authenc(hmac(sha384),cbc(aes))",
1730 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1731 .blocksize = AES_BLOCK_SIZE,
1732 .type = CRYPTO_ALG_TYPE_AEAD,
1733 .template_aead = {
1734 .setkey = aead_setkey,
1735 .setauthsize = aead_setauthsize,
1736 .encrypt = aead_encrypt,
1737 .decrypt = aead_decrypt,
1738 .givencrypt = aead_givencrypt,
1739 .geniv = "<built-in>",
1740 .ivsize = AES_BLOCK_SIZE,
1741 .maxauthsize = SHA384_DIGEST_SIZE,
1743 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1744 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1745 OP_ALG_AAI_HMAC_PRECOMP,
1746 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1750 .name = "authenc(hmac(sha512),cbc(aes))",
1751 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1752 .blocksize = AES_BLOCK_SIZE,
1753 .type = CRYPTO_ALG_TYPE_AEAD,
1754 .template_aead = {
1755 .setkey = aead_setkey,
1756 .setauthsize = aead_setauthsize,
1757 .encrypt = aead_encrypt,
1758 .decrypt = aead_decrypt,
1759 .givencrypt = aead_givencrypt,
1760 .geniv = "<built-in>",
1761 .ivsize = AES_BLOCK_SIZE,
1762 .maxauthsize = SHA512_DIGEST_SIZE,
1764 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1765 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1766 OP_ALG_AAI_HMAC_PRECOMP,
1767 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1770 .name = "authenc(hmac(md5),cbc(des3_ede))",
1771 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1772 .blocksize = DES3_EDE_BLOCK_SIZE,
1773 .type = CRYPTO_ALG_TYPE_AEAD,
1774 .template_aead = {
1775 .setkey = aead_setkey,
1776 .setauthsize = aead_setauthsize,
1777 .encrypt = aead_encrypt,
1778 .decrypt = aead_decrypt,
1779 .givencrypt = aead_givencrypt,
1780 .geniv = "<built-in>",
1781 .ivsize = DES3_EDE_BLOCK_SIZE,
1782 .maxauthsize = MD5_DIGEST_SIZE,
1784 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1785 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1786 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1789 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1790 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1791 .blocksize = DES3_EDE_BLOCK_SIZE,
1792 .type = CRYPTO_ALG_TYPE_AEAD,
1793 .template_aead = {
1794 .setkey = aead_setkey,
1795 .setauthsize = aead_setauthsize,
1796 .encrypt = aead_encrypt,
1797 .decrypt = aead_decrypt,
1798 .givencrypt = aead_givencrypt,
1799 .geniv = "<built-in>",
1800 .ivsize = DES3_EDE_BLOCK_SIZE,
1801 .maxauthsize = SHA1_DIGEST_SIZE,
1803 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1804 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1805 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1808 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1809 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1810 .blocksize = DES3_EDE_BLOCK_SIZE,
1811 .type = CRYPTO_ALG_TYPE_AEAD,
1812 .template_aead = {
1813 .setkey = aead_setkey,
1814 .setauthsize = aead_setauthsize,
1815 .encrypt = aead_encrypt,
1816 .decrypt = aead_decrypt,
1817 .givencrypt = aead_givencrypt,
1818 .geniv = "<built-in>",
1819 .ivsize = DES3_EDE_BLOCK_SIZE,
1820 .maxauthsize = SHA224_DIGEST_SIZE,
1822 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1823 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1824 OP_ALG_AAI_HMAC_PRECOMP,
1825 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1828 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1829 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1830 .blocksize = DES3_EDE_BLOCK_SIZE,
1831 .type = CRYPTO_ALG_TYPE_AEAD,
1832 .template_aead = {
1833 .setkey = aead_setkey,
1834 .setauthsize = aead_setauthsize,
1835 .encrypt = aead_encrypt,
1836 .decrypt = aead_decrypt,
1837 .givencrypt = aead_givencrypt,
1838 .geniv = "<built-in>",
1839 .ivsize = DES3_EDE_BLOCK_SIZE,
1840 .maxauthsize = SHA256_DIGEST_SIZE,
1842 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1843 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1844 OP_ALG_AAI_HMAC_PRECOMP,
1845 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1848 .name = "authenc(hmac(sha384),cbc(des3_ede))",
1849 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1850 .blocksize = DES3_EDE_BLOCK_SIZE,
1851 .type = CRYPTO_ALG_TYPE_AEAD,
1852 .template_aead = {
1853 .setkey = aead_setkey,
1854 .setauthsize = aead_setauthsize,
1855 .encrypt = aead_encrypt,
1856 .decrypt = aead_decrypt,
1857 .givencrypt = aead_givencrypt,
1858 .geniv = "<built-in>",
1859 .ivsize = DES3_EDE_BLOCK_SIZE,
1860 .maxauthsize = SHA384_DIGEST_SIZE,
1862 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1863 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1864 OP_ALG_AAI_HMAC_PRECOMP,
1865 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1868 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1869 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1870 .blocksize = DES3_EDE_BLOCK_SIZE,
1871 .type = CRYPTO_ALG_TYPE_AEAD,
1872 .template_aead = {
1873 .setkey = aead_setkey,
1874 .setauthsize = aead_setauthsize,
1875 .encrypt = aead_encrypt,
1876 .decrypt = aead_decrypt,
1877 .givencrypt = aead_givencrypt,
1878 .geniv = "<built-in>",
1879 .ivsize = DES3_EDE_BLOCK_SIZE,
1880 .maxauthsize = SHA512_DIGEST_SIZE,
1882 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1883 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1884 OP_ALG_AAI_HMAC_PRECOMP,
1885 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1888 .name = "authenc(hmac(md5),cbc(des))",
1889 .driver_name = "authenc-hmac-md5-cbc-des-caam",
1890 .blocksize = DES_BLOCK_SIZE,
1891 .type = CRYPTO_ALG_TYPE_AEAD,
1892 .template_aead = {
1893 .setkey = aead_setkey,
1894 .setauthsize = aead_setauthsize,
1895 .encrypt = aead_encrypt,
1896 .decrypt = aead_decrypt,
1897 .givencrypt = aead_givencrypt,
1898 .geniv = "<built-in>",
1899 .ivsize = DES_BLOCK_SIZE,
1900 .maxauthsize = MD5_DIGEST_SIZE,
1902 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1903 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1904 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1907 .name = "authenc(hmac(sha1),cbc(des))",
1908 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1909 .blocksize = DES_BLOCK_SIZE,
1910 .type = CRYPTO_ALG_TYPE_AEAD,
1911 .template_aead = {
1912 .setkey = aead_setkey,
1913 .setauthsize = aead_setauthsize,
1914 .encrypt = aead_encrypt,
1915 .decrypt = aead_decrypt,
1916 .givencrypt = aead_givencrypt,
1917 .geniv = "<built-in>",
1918 .ivsize = DES_BLOCK_SIZE,
1919 .maxauthsize = SHA1_DIGEST_SIZE,
1921 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1922 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1923 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1926 .name = "authenc(hmac(sha224),cbc(des))",
1927 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1928 .blocksize = DES_BLOCK_SIZE,
1929 .type = CRYPTO_ALG_TYPE_AEAD,
1930 .template_aead = {
1931 .setkey = aead_setkey,
1932 .setauthsize = aead_setauthsize,
1933 .encrypt = aead_encrypt,
1934 .decrypt = aead_decrypt,
1935 .givencrypt = aead_givencrypt,
1936 .geniv = "<built-in>",
1937 .ivsize = DES_BLOCK_SIZE,
1938 .maxauthsize = SHA224_DIGEST_SIZE,
1940 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1941 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1942 OP_ALG_AAI_HMAC_PRECOMP,
1943 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1946 .name = "authenc(hmac(sha256),cbc(des))",
1947 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1948 .blocksize = DES_BLOCK_SIZE,
1949 .type = CRYPTO_ALG_TYPE_AEAD,
1950 .template_aead = {
1951 .setkey = aead_setkey,
1952 .setauthsize = aead_setauthsize,
1953 .encrypt = aead_encrypt,
1954 .decrypt = aead_decrypt,
1955 .givencrypt = aead_givencrypt,
1956 .geniv = "<built-in>",
1957 .ivsize = DES_BLOCK_SIZE,
1958 .maxauthsize = SHA256_DIGEST_SIZE,
1960 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1961 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1962 OP_ALG_AAI_HMAC_PRECOMP,
1963 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1966 .name = "authenc(hmac(sha384),cbc(des))",
1967 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1968 .blocksize = DES_BLOCK_SIZE,
1969 .type = CRYPTO_ALG_TYPE_AEAD,
1970 .template_aead = {
1971 .setkey = aead_setkey,
1972 .setauthsize = aead_setauthsize,
1973 .encrypt = aead_encrypt,
1974 .decrypt = aead_decrypt,
1975 .givencrypt = aead_givencrypt,
1976 .geniv = "<built-in>",
1977 .ivsize = DES_BLOCK_SIZE,
1978 .maxauthsize = SHA384_DIGEST_SIZE,
1980 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1981 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1982 OP_ALG_AAI_HMAC_PRECOMP,
1983 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1986 .name = "authenc(hmac(sha512),cbc(des))",
1987 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1988 .blocksize = DES_BLOCK_SIZE,
1989 .type = CRYPTO_ALG_TYPE_AEAD,
1990 .template_aead = {
1991 .setkey = aead_setkey,
1992 .setauthsize = aead_setauthsize,
1993 .encrypt = aead_encrypt,
1994 .decrypt = aead_decrypt,
1995 .givencrypt = aead_givencrypt,
1996 .geniv = "<built-in>",
1997 .ivsize = DES_BLOCK_SIZE,
1998 .maxauthsize = SHA512_DIGEST_SIZE,
2000 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2001 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2002 OP_ALG_AAI_HMAC_PRECOMP,
2003 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2005 /* ablkcipher descriptor */
2007 .name = "cbc(aes)",
2008 .driver_name = "cbc-aes-caam",
2009 .blocksize = AES_BLOCK_SIZE,
2010 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2011 .template_ablkcipher = {
2012 .setkey = ablkcipher_setkey,
2013 .encrypt = ablkcipher_encrypt,
2014 .decrypt = ablkcipher_decrypt,
2015 .geniv = "eseqiv",
2016 .min_keysize = AES_MIN_KEY_SIZE,
2017 .max_keysize = AES_MAX_KEY_SIZE,
2018 .ivsize = AES_BLOCK_SIZE,
2020 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2023 .name = "cbc(des3_ede)",
2024 .driver_name = "cbc-3des-caam",
2025 .blocksize = DES3_EDE_BLOCK_SIZE,
2026 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2027 .template_ablkcipher = {
2028 .setkey = ablkcipher_setkey,
2029 .encrypt = ablkcipher_encrypt,
2030 .decrypt = ablkcipher_decrypt,
2031 .geniv = "eseqiv",
2032 .min_keysize = DES3_EDE_KEY_SIZE,
2033 .max_keysize = DES3_EDE_KEY_SIZE,
2034 .ivsize = DES3_EDE_BLOCK_SIZE,
2036 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2039 .name = "cbc(des)",
2040 .driver_name = "cbc-des-caam",
2041 .blocksize = DES_BLOCK_SIZE,
2042 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2043 .template_ablkcipher = {
2044 .setkey = ablkcipher_setkey,
2045 .encrypt = ablkcipher_encrypt,
2046 .decrypt = ablkcipher_decrypt,
2047 .geniv = "eseqiv",
2048 .min_keysize = DES_KEY_SIZE,
2049 .max_keysize = DES_KEY_SIZE,
2050 .ivsize = DES_BLOCK_SIZE,
2052 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2056 struct caam_crypto_alg {
2057 struct list_head entry;
2058 int class1_alg_type;
2059 int class2_alg_type;
2060 int alg_op;
2061 struct crypto_alg crypto_alg;
2064 static int caam_cra_init(struct crypto_tfm *tfm)
2066 struct crypto_alg *alg = tfm->__crt_alg;
2067 struct caam_crypto_alg *caam_alg =
2068 container_of(alg, struct caam_crypto_alg, crypto_alg);
2069 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2071 ctx->jrdev = caam_jr_alloc();
2072 if (IS_ERR(ctx->jrdev)) {
2073 pr_err("Job Ring Device allocation for transform failed\n");
2074 return PTR_ERR(ctx->jrdev);
2077 /* copy descriptor header template value */
2078 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2079 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2080 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2082 return 0;
2085 static void caam_cra_exit(struct crypto_tfm *tfm)
2087 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2089 if (ctx->sh_desc_enc_dma &&
2090 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2091 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2092 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2093 if (ctx->sh_desc_dec_dma &&
2094 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2095 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2096 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2097 if (ctx->sh_desc_givenc_dma &&
2098 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2099 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2100 desc_bytes(ctx->sh_desc_givenc),
2101 DMA_TO_DEVICE);
2103 caam_jr_free(ctx->jrdev);
2106 static void __exit caam_algapi_exit(void)
2109 struct caam_crypto_alg *t_alg, *n;
2111 if (!alg_list.next)
2112 return;
2114 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2115 crypto_unregister_alg(&t_alg->crypto_alg);
2116 list_del(&t_alg->entry);
2117 kfree(t_alg);
2121 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2122 *template)
2124 struct caam_crypto_alg *t_alg;
2125 struct crypto_alg *alg;
2127 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2128 if (!t_alg) {
2129 pr_err("failed to allocate t_alg\n");
2130 return ERR_PTR(-ENOMEM);
2133 alg = &t_alg->crypto_alg;
2135 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2136 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2137 template->driver_name);
2138 alg->cra_module = THIS_MODULE;
2139 alg->cra_init = caam_cra_init;
2140 alg->cra_exit = caam_cra_exit;
2141 alg->cra_priority = CAAM_CRA_PRIORITY;
2142 alg->cra_blocksize = template->blocksize;
2143 alg->cra_alignmask = 0;
2144 alg->cra_ctxsize = sizeof(struct caam_ctx);
2145 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2146 template->type;
2147 switch (template->type) {
2148 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2149 alg->cra_type = &crypto_ablkcipher_type;
2150 alg->cra_ablkcipher = template->template_ablkcipher;
2151 break;
2152 case CRYPTO_ALG_TYPE_AEAD:
2153 alg->cra_type = &crypto_aead_type;
2154 alg->cra_aead = template->template_aead;
2155 break;
2158 t_alg->class1_alg_type = template->class1_alg_type;
2159 t_alg->class2_alg_type = template->class2_alg_type;
2160 t_alg->alg_op = template->alg_op;
2162 return t_alg;
2165 static int __init caam_algapi_init(void)
2167 int i = 0, err = 0;
2169 INIT_LIST_HEAD(&alg_list);
2171 /* register crypto algorithms the device supports */
2172 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2173 /* TODO: check if h/w supports alg */
2174 struct caam_crypto_alg *t_alg;
2176 t_alg = caam_alg_alloc(&driver_algs[i]);
2177 if (IS_ERR(t_alg)) {
2178 err = PTR_ERR(t_alg);
2179 pr_warn("%s alg allocation failed\n",
2180 driver_algs[i].driver_name);
2181 continue;
2184 err = crypto_register_alg(&t_alg->crypto_alg);
2185 if (err) {
2186 pr_warn("%s alg registration failed\n",
2187 t_alg->crypto_alg.cra_driver_name);
2188 kfree(t_alg);
2189 } else
2190 list_add_tail(&t_alg->entry, &alg_list);
2192 if (!list_empty(&alg_list))
2193 pr_info("caam algorithms registered in /proc/crypto\n");
2195 return err;
2198 module_init(caam_algapi_init);
2199 module_exit(caam_algapi_exit);
2201 MODULE_LICENSE("GPL");
2202 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2203 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");