watchdog: pic32-dmt: Remove .owner field for driver
[linux/fpc-iii.git] / drivers / crypto / caam / caamalg.c
blobea8189f4b0212cc038f5f4363cf10f5e8a54099a
1 /*
2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | (output length) |
41 * | SEQ_IN_PTR |
42 * | (input buffer) |
43 * | (input length) |
44 * ---------------------
47 #include "compat.h"
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
58 * crypto alg
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 CTR_RFC3686_NONCE_SIZE + \
64 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH 16
68 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
83 #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
87 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
91 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
95 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
99 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
101 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
103 15 * CAAM_CMD_SZ)
105 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114 static struct list_head alg_list;
116 struct caam_alg_entry {
117 int class1_alg_type;
118 int class2_alg_type;
119 int alg_op;
120 bool rfc3686;
121 bool geniv;
124 struct caam_aead_alg {
125 struct aead_alg aead;
126 struct caam_alg_entry caam;
127 bool registered;
130 /* Set DK bit in class 1 operation if shared */
131 static inline void append_dec_op1(u32 *desc, u32 type)
133 u32 *jump_cmd, *uncond_jump_cmd;
135 /* DK bit is valid only for AES */
136 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
137 append_operation(desc, type | OP_ALG_AS_INITFINAL |
138 OP_ALG_DECRYPT);
139 return;
142 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
143 append_operation(desc, type | OP_ALG_AS_INITFINAL |
144 OP_ALG_DECRYPT);
145 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
146 set_jump_tgt_here(desc, jump_cmd);
147 append_operation(desc, type | OP_ALG_AS_INITFINAL |
148 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
149 set_jump_tgt_here(desc, uncond_jump_cmd);
153 * For aead functions, read payload and write payload,
154 * both of which are specified in req->src and req->dst
156 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
158 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
159 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
160 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
164 * For ablkcipher encrypt and decrypt, read from req->src and
165 * write to req->dst
167 static inline void ablkcipher_append_src_dst(u32 *desc)
169 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
170 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
171 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
172 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
173 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
177 * per-session context
179 struct caam_ctx {
180 struct device *jrdev;
181 u32 sh_desc_enc[DESC_MAX_USED_LEN];
182 u32 sh_desc_dec[DESC_MAX_USED_LEN];
183 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
184 dma_addr_t sh_desc_enc_dma;
185 dma_addr_t sh_desc_dec_dma;
186 dma_addr_t sh_desc_givenc_dma;
187 u32 class1_alg_type;
188 u32 class2_alg_type;
189 u32 alg_op;
190 u8 key[CAAM_MAX_KEY_SIZE];
191 dma_addr_t key_dma;
192 unsigned int enckeylen;
193 unsigned int split_key_len;
194 unsigned int split_key_pad_len;
195 unsigned int authsize;
198 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
199 int keys_fit_inline, bool is_rfc3686)
201 u32 *nonce;
202 unsigned int enckeylen = ctx->enckeylen;
205 * RFC3686 specific:
206 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
207 * | enckeylen = encryption key size + nonce size
209 if (is_rfc3686)
210 enckeylen -= CTR_RFC3686_NONCE_SIZE;
212 if (keys_fit_inline) {
213 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
214 ctx->split_key_len, CLASS_2 |
215 KEY_DEST_MDHA_SPLIT | KEY_ENC);
216 append_key_as_imm(desc, (void *)ctx->key +
217 ctx->split_key_pad_len, enckeylen,
218 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
219 } else {
220 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
221 KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
223 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
226 /* Load Counter into CONTEXT1 reg */
227 if (is_rfc3686) {
228 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
229 enckeylen);
230 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
231 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
232 append_move(desc,
233 MOVE_SRC_OUTFIFO |
234 MOVE_DEST_CLASS1CTX |
235 (16 << MOVE_OFFSET_SHIFT) |
236 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
240 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
241 int keys_fit_inline, bool is_rfc3686)
243 u32 *key_jump_cmd;
245 /* Note: Context registers are saved. */
246 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
248 /* Skip if already shared */
249 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
250 JUMP_COND_SHRD);
252 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
254 set_jump_tgt_here(desc, key_jump_cmd);
257 static int aead_null_set_sh_desc(struct crypto_aead *aead)
259 struct caam_ctx *ctx = crypto_aead_ctx(aead);
260 struct device *jrdev = ctx->jrdev;
261 bool keys_fit_inline = false;
262 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
263 u32 *desc;
266 * Job Descriptor and Shared Descriptors
267 * must all fit into the 64-word Descriptor h/w Buffer
269 if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
270 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
271 keys_fit_inline = true;
273 /* aead_encrypt shared descriptor */
274 desc = ctx->sh_desc_enc;
276 init_sh_desc(desc, HDR_SHARE_SERIAL);
278 /* Skip if already shared */
279 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
280 JUMP_COND_SHRD);
281 if (keys_fit_inline)
282 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
283 ctx->split_key_len, CLASS_2 |
284 KEY_DEST_MDHA_SPLIT | KEY_ENC);
285 else
286 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
287 KEY_DEST_MDHA_SPLIT | KEY_ENC);
288 set_jump_tgt_here(desc, key_jump_cmd);
290 /* assoclen + cryptlen = seqinlen */
291 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
293 /* Prepare to read and write cryptlen + assoclen bytes */
294 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
295 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
298 * MOVE_LEN opcode is not available in all SEC HW revisions,
299 * thus need to do some magic, i.e. self-patch the descriptor
300 * buffer.
302 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
303 MOVE_DEST_MATH3 |
304 (0x6 << MOVE_LEN_SHIFT));
305 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
306 MOVE_DEST_DESCBUF |
307 MOVE_WAITCOMP |
308 (0x8 << MOVE_LEN_SHIFT));
310 /* Class 2 operation */
311 append_operation(desc, ctx->class2_alg_type |
312 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
314 /* Read and write cryptlen bytes */
315 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
317 set_move_tgt_here(desc, read_move_cmd);
318 set_move_tgt_here(desc, write_move_cmd);
319 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
320 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
321 MOVE_AUX_LS);
323 /* Write ICV */
324 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
325 LDST_SRCDST_BYTE_CONTEXT);
327 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
328 desc_bytes(desc),
329 DMA_TO_DEVICE);
330 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
331 dev_err(jrdev, "unable to map shared descriptor\n");
332 return -ENOMEM;
334 #ifdef DEBUG
335 print_hex_dump(KERN_ERR,
336 "aead null enc shdesc@"__stringify(__LINE__)": ",
337 DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 desc_bytes(desc), 1);
339 #endif
342 * Job Descriptor and Shared Descriptors
343 * must all fit into the 64-word Descriptor h/w Buffer
345 keys_fit_inline = false;
346 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
347 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
348 keys_fit_inline = true;
350 desc = ctx->sh_desc_dec;
352 /* aead_decrypt shared descriptor */
353 init_sh_desc(desc, HDR_SHARE_SERIAL);
355 /* Skip if already shared */
356 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
357 JUMP_COND_SHRD);
358 if (keys_fit_inline)
359 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
360 ctx->split_key_len, CLASS_2 |
361 KEY_DEST_MDHA_SPLIT | KEY_ENC);
362 else
363 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
364 KEY_DEST_MDHA_SPLIT | KEY_ENC);
365 set_jump_tgt_here(desc, key_jump_cmd);
367 /* Class 2 operation */
368 append_operation(desc, ctx->class2_alg_type |
369 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
371 /* assoclen + cryptlen = seqoutlen */
372 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
374 /* Prepare to read and write cryptlen + assoclen bytes */
375 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
376 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
379 * MOVE_LEN opcode is not available in all SEC HW revisions,
380 * thus need to do some magic, i.e. self-patch the descriptor
381 * buffer.
383 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
384 MOVE_DEST_MATH2 |
385 (0x6 << MOVE_LEN_SHIFT));
386 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
387 MOVE_DEST_DESCBUF |
388 MOVE_WAITCOMP |
389 (0x8 << MOVE_LEN_SHIFT));
391 /* Read and write cryptlen bytes */
392 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
395 * Insert a NOP here, since we need at least 4 instructions between
396 * code patching the descriptor buffer and the location being patched.
398 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
399 set_jump_tgt_here(desc, jump_cmd);
401 set_move_tgt_here(desc, read_move_cmd);
402 set_move_tgt_here(desc, write_move_cmd);
403 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
404 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
405 MOVE_AUX_LS);
406 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
408 /* Load ICV */
409 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
410 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
412 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
413 desc_bytes(desc),
414 DMA_TO_DEVICE);
415 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
416 dev_err(jrdev, "unable to map shared descriptor\n");
417 return -ENOMEM;
419 #ifdef DEBUG
420 print_hex_dump(KERN_ERR,
421 "aead null dec shdesc@"__stringify(__LINE__)": ",
422 DUMP_PREFIX_ADDRESS, 16, 4, desc,
423 desc_bytes(desc), 1);
424 #endif
426 return 0;
429 static int aead_set_sh_desc(struct crypto_aead *aead)
431 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
432 struct caam_aead_alg, aead);
433 unsigned int ivsize = crypto_aead_ivsize(aead);
434 struct caam_ctx *ctx = crypto_aead_ctx(aead);
435 struct device *jrdev = ctx->jrdev;
436 bool keys_fit_inline;
437 u32 geniv, moveiv;
438 u32 ctx1_iv_off = 0;
439 u32 *desc;
440 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
441 OP_ALG_AAI_CTR_MOD128);
442 const bool is_rfc3686 = alg->caam.rfc3686;
444 /* NULL encryption / decryption */
445 if (!ctx->enckeylen)
446 return aead_null_set_sh_desc(aead);
449 * AES-CTR needs to load IV in CONTEXT1 reg
450 * at an offset of 128bits (16bytes)
451 * CONTEXT1[255:128] = IV
453 if (ctr_mode)
454 ctx1_iv_off = 16;
457 * RFC3686 specific:
458 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
460 if (is_rfc3686)
461 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
463 if (alg->caam.geniv)
464 goto skip_enc;
467 * Job Descriptor and Shared Descriptors
468 * must all fit into the 64-word Descriptor h/w Buffer
470 keys_fit_inline = false;
471 if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
472 ctx->split_key_pad_len + ctx->enckeylen +
473 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
474 CAAM_DESC_BYTES_MAX)
475 keys_fit_inline = true;
477 /* aead_encrypt shared descriptor */
478 desc = ctx->sh_desc_enc;
480 /* Note: Context registers are saved. */
481 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
483 /* Class 2 operation */
484 append_operation(desc, ctx->class2_alg_type |
485 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
487 /* Read and write assoclen bytes */
488 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
489 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
491 /* Skip assoc data */
492 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
494 /* read assoc before reading payload */
495 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
496 FIFOLDST_VLF);
498 /* Load Counter into CONTEXT1 reg */
499 if (is_rfc3686)
500 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
501 LDST_CLASS_1_CCB |
502 LDST_SRCDST_BYTE_CONTEXT |
503 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
504 LDST_OFFSET_SHIFT));
506 /* Class 1 operation */
507 append_operation(desc, ctx->class1_alg_type |
508 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
510 /* Read and write cryptlen bytes */
511 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
512 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
513 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
515 /* Write ICV */
516 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
517 LDST_SRCDST_BYTE_CONTEXT);
519 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
520 desc_bytes(desc),
521 DMA_TO_DEVICE);
522 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
523 dev_err(jrdev, "unable to map shared descriptor\n");
524 return -ENOMEM;
526 #ifdef DEBUG
527 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
528 DUMP_PREFIX_ADDRESS, 16, 4, desc,
529 desc_bytes(desc), 1);
530 #endif
532 skip_enc:
534 * Job Descriptor and Shared Descriptors
535 * must all fit into the 64-word Descriptor h/w Buffer
537 keys_fit_inline = false;
538 if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
539 ctx->split_key_pad_len + ctx->enckeylen +
540 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
541 CAAM_DESC_BYTES_MAX)
542 keys_fit_inline = true;
544 /* aead_decrypt shared descriptor */
545 desc = ctx->sh_desc_dec;
547 /* Note: Context registers are saved. */
548 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
550 /* Class 2 operation */
551 append_operation(desc, ctx->class2_alg_type |
552 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
554 /* Read and write assoclen bytes */
555 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
556 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
558 /* Skip assoc data */
559 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
561 /* read assoc before reading payload */
562 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
563 KEY_VLF);
565 /* Load Counter into CONTEXT1 reg */
566 if (is_rfc3686)
567 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
568 LDST_CLASS_1_CCB |
569 LDST_SRCDST_BYTE_CONTEXT |
570 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
571 LDST_OFFSET_SHIFT));
573 /* Choose operation */
574 if (ctr_mode)
575 append_operation(desc, ctx->class1_alg_type |
576 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
577 else
578 append_dec_op1(desc, ctx->class1_alg_type);
580 /* Read and write cryptlen bytes */
581 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
582 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
583 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
585 /* Load ICV */
586 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
587 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
589 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
590 desc_bytes(desc),
591 DMA_TO_DEVICE);
592 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
593 dev_err(jrdev, "unable to map shared descriptor\n");
594 return -ENOMEM;
596 #ifdef DEBUG
597 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
598 DUMP_PREFIX_ADDRESS, 16, 4, desc,
599 desc_bytes(desc), 1);
600 #endif
602 if (!alg->caam.geniv)
603 goto skip_givenc;
606 * Job Descriptor and Shared Descriptors
607 * must all fit into the 64-word Descriptor h/w Buffer
609 keys_fit_inline = false;
610 if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
611 ctx->split_key_pad_len + ctx->enckeylen +
612 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
613 CAAM_DESC_BYTES_MAX)
614 keys_fit_inline = true;
616 /* aead_givencrypt shared descriptor */
617 desc = ctx->sh_desc_givenc;
619 /* Note: Context registers are saved. */
620 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
622 if (is_rfc3686)
623 goto copy_iv;
625 /* Generate IV */
626 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
627 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
628 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
629 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
630 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
631 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
632 append_move(desc, MOVE_WAITCOMP |
633 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
634 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
635 (ivsize << MOVE_LEN_SHIFT));
636 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
638 copy_iv:
639 /* Copy IV to class 1 context */
640 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
641 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
642 (ivsize << MOVE_LEN_SHIFT));
644 /* Return to encryption */
645 append_operation(desc, ctx->class2_alg_type |
646 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
648 /* ivsize + cryptlen = seqoutlen - authsize */
649 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
651 /* Read and write assoclen bytes */
652 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
653 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
655 /* Skip assoc data */
656 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
658 /* read assoc before reading payload */
659 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
660 KEY_VLF);
662 /* Copy iv from outfifo to class 2 fifo */
663 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
664 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
665 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
666 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
667 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
668 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
670 /* Load Counter into CONTEXT1 reg */
671 if (is_rfc3686)
672 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
673 LDST_CLASS_1_CCB |
674 LDST_SRCDST_BYTE_CONTEXT |
675 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
676 LDST_OFFSET_SHIFT));
678 /* Class 1 operation */
679 append_operation(desc, ctx->class1_alg_type |
680 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
682 /* Will write ivsize + cryptlen */
683 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
685 /* Not need to reload iv */
686 append_seq_fifo_load(desc, ivsize,
687 FIFOLD_CLASS_SKIP);
689 /* Will read cryptlen */
690 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
691 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
693 /* Write ICV */
694 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
695 LDST_SRCDST_BYTE_CONTEXT);
697 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
698 desc_bytes(desc),
699 DMA_TO_DEVICE);
700 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
701 dev_err(jrdev, "unable to map shared descriptor\n");
702 return -ENOMEM;
704 #ifdef DEBUG
705 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
706 DUMP_PREFIX_ADDRESS, 16, 4, desc,
707 desc_bytes(desc), 1);
708 #endif
710 skip_givenc:
711 return 0;
714 static int aead_setauthsize(struct crypto_aead *authenc,
715 unsigned int authsize)
717 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
719 ctx->authsize = authsize;
720 aead_set_sh_desc(authenc);
722 return 0;
725 static int gcm_set_sh_desc(struct crypto_aead *aead)
727 struct caam_ctx *ctx = crypto_aead_ctx(aead);
728 struct device *jrdev = ctx->jrdev;
729 bool keys_fit_inline = false;
730 u32 *key_jump_cmd, *zero_payload_jump_cmd,
731 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
732 u32 *desc;
734 if (!ctx->enckeylen || !ctx->authsize)
735 return 0;
738 * AES GCM encrypt shared descriptor
739 * Job Descriptor and Shared Descriptor
740 * must fit into the 64-word Descriptor h/w Buffer
742 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
743 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
744 keys_fit_inline = true;
746 desc = ctx->sh_desc_enc;
748 init_sh_desc(desc, HDR_SHARE_SERIAL);
750 /* skip key loading if they are loaded due to sharing */
751 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
752 JUMP_COND_SHRD | JUMP_COND_SELF);
753 if (keys_fit_inline)
754 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
755 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
756 else
757 append_key(desc, ctx->key_dma, ctx->enckeylen,
758 CLASS_1 | KEY_DEST_CLASS_REG);
759 set_jump_tgt_here(desc, key_jump_cmd);
761 /* class 1 operation */
762 append_operation(desc, ctx->class1_alg_type |
763 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
765 /* if assoclen + cryptlen is ZERO, skip to ICV write */
766 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
767 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
768 JUMP_COND_MATH_Z);
770 /* if assoclen is ZERO, skip reading the assoc data */
771 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
772 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
773 JUMP_COND_MATH_Z);
775 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
777 /* skip assoc data */
778 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
780 /* cryptlen = seqinlen - assoclen */
781 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
783 /* if cryptlen is ZERO jump to zero-payload commands */
784 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
785 JUMP_COND_MATH_Z);
787 /* read assoc data */
788 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
789 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
790 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
792 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
794 /* write encrypted data */
795 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
797 /* read payload data */
798 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
799 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
801 /* jump the zero-payload commands */
802 append_jump(desc, JUMP_TEST_ALL | 2);
804 /* zero-payload commands */
805 set_jump_tgt_here(desc, zero_payload_jump_cmd);
807 /* read assoc data */
808 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
809 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
811 /* There is no input data */
812 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
814 /* write ICV */
815 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
816 LDST_SRCDST_BYTE_CONTEXT);
818 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
819 desc_bytes(desc),
820 DMA_TO_DEVICE);
821 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
822 dev_err(jrdev, "unable to map shared descriptor\n");
823 return -ENOMEM;
825 #ifdef DEBUG
826 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
827 DUMP_PREFIX_ADDRESS, 16, 4, desc,
828 desc_bytes(desc), 1);
829 #endif
832 * Job Descriptor and Shared Descriptors
833 * must all fit into the 64-word Descriptor h/w Buffer
835 keys_fit_inline = false;
836 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
837 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
838 keys_fit_inline = true;
840 desc = ctx->sh_desc_dec;
842 init_sh_desc(desc, HDR_SHARE_SERIAL);
844 /* skip key loading if they are loaded due to sharing */
845 key_jump_cmd = append_jump(desc, JUMP_JSL |
846 JUMP_TEST_ALL | JUMP_COND_SHRD |
847 JUMP_COND_SELF);
848 if (keys_fit_inline)
849 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
850 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
851 else
852 append_key(desc, ctx->key_dma, ctx->enckeylen,
853 CLASS_1 | KEY_DEST_CLASS_REG);
854 set_jump_tgt_here(desc, key_jump_cmd);
856 /* class 1 operation */
857 append_operation(desc, ctx->class1_alg_type |
858 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
860 /* if assoclen is ZERO, skip reading the assoc data */
861 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
862 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
863 JUMP_COND_MATH_Z);
865 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
867 /* skip assoc data */
868 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
870 /* read assoc data */
871 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
872 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
874 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
876 /* cryptlen = seqoutlen - assoclen */
877 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
879 /* jump to zero-payload command if cryptlen is zero */
880 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
881 JUMP_COND_MATH_Z);
883 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
885 /* store encrypted data */
886 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
888 /* read payload data */
889 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
890 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
892 /* zero-payload command */
893 set_jump_tgt_here(desc, zero_payload_jump_cmd);
895 /* read ICV */
896 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
897 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
899 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
900 desc_bytes(desc),
901 DMA_TO_DEVICE);
902 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
903 dev_err(jrdev, "unable to map shared descriptor\n");
904 return -ENOMEM;
906 #ifdef DEBUG
907 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
908 DUMP_PREFIX_ADDRESS, 16, 4, desc,
909 desc_bytes(desc), 1);
910 #endif
912 return 0;
915 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
917 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
919 ctx->authsize = authsize;
920 gcm_set_sh_desc(authenc);
922 return 0;
925 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
927 struct caam_ctx *ctx = crypto_aead_ctx(aead);
928 struct device *jrdev = ctx->jrdev;
929 bool keys_fit_inline = false;
930 u32 *key_jump_cmd;
931 u32 *desc;
933 if (!ctx->enckeylen || !ctx->authsize)
934 return 0;
937 * RFC4106 encrypt shared descriptor
938 * Job Descriptor and Shared Descriptor
939 * must fit into the 64-word Descriptor h/w Buffer
941 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
942 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
943 keys_fit_inline = true;
945 desc = ctx->sh_desc_enc;
947 init_sh_desc(desc, HDR_SHARE_SERIAL);
949 /* Skip key loading if it is loaded due to sharing */
950 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
951 JUMP_COND_SHRD);
952 if (keys_fit_inline)
953 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
954 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
955 else
956 append_key(desc, ctx->key_dma, ctx->enckeylen,
957 CLASS_1 | KEY_DEST_CLASS_REG);
958 set_jump_tgt_here(desc, key_jump_cmd);
960 /* Class 1 operation */
961 append_operation(desc, ctx->class1_alg_type |
962 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
964 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
965 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
967 /* Read assoc data */
968 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
969 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
971 /* Skip IV */
972 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
974 /* Will read cryptlen bytes */
975 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
977 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
978 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
980 /* Skip assoc data */
981 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
983 /* cryptlen = seqoutlen - assoclen */
984 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
986 /* Write encrypted data */
987 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
989 /* Read payload data */
990 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
991 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
993 /* Write ICV */
994 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
995 LDST_SRCDST_BYTE_CONTEXT);
997 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
998 desc_bytes(desc),
999 DMA_TO_DEVICE);
1000 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1001 dev_err(jrdev, "unable to map shared descriptor\n");
1002 return -ENOMEM;
1004 #ifdef DEBUG
1005 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1006 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1007 desc_bytes(desc), 1);
1008 #endif
1011 * Job Descriptor and Shared Descriptors
1012 * must all fit into the 64-word Descriptor h/w Buffer
1014 keys_fit_inline = false;
1015 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1016 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1017 keys_fit_inline = true;
1019 desc = ctx->sh_desc_dec;
1021 init_sh_desc(desc, HDR_SHARE_SERIAL);
1023 /* Skip key loading if it is loaded due to sharing */
1024 key_jump_cmd = append_jump(desc, JUMP_JSL |
1025 JUMP_TEST_ALL | JUMP_COND_SHRD);
1026 if (keys_fit_inline)
1027 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1028 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1029 else
1030 append_key(desc, ctx->key_dma, ctx->enckeylen,
1031 CLASS_1 | KEY_DEST_CLASS_REG);
1032 set_jump_tgt_here(desc, key_jump_cmd);
1034 /* Class 1 operation */
1035 append_operation(desc, ctx->class1_alg_type |
1036 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1038 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1039 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1041 /* Read assoc data */
1042 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1043 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1045 /* Skip IV */
1046 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1048 /* Will read cryptlen bytes */
1049 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1051 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1052 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1054 /* Skip assoc data */
1055 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1057 /* Will write cryptlen bytes */
1058 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1060 /* Store payload data */
1061 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1063 /* Read encrypted data */
1064 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1065 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1067 /* Read ICV */
1068 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1069 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1071 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1072 desc_bytes(desc),
1073 DMA_TO_DEVICE);
1074 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1075 dev_err(jrdev, "unable to map shared descriptor\n");
1076 return -ENOMEM;
1078 #ifdef DEBUG
1079 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1080 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1081 desc_bytes(desc), 1);
1082 #endif
1084 return 0;
1087 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1088 unsigned int authsize)
1090 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1092 ctx->authsize = authsize;
1093 rfc4106_set_sh_desc(authenc);
1095 return 0;
1098 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1100 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1101 struct device *jrdev = ctx->jrdev;
1102 bool keys_fit_inline = false;
1103 u32 *key_jump_cmd;
1104 u32 *read_move_cmd, *write_move_cmd;
1105 u32 *desc;
1107 if (!ctx->enckeylen || !ctx->authsize)
1108 return 0;
1111 * RFC4543 encrypt shared descriptor
1112 * Job Descriptor and Shared Descriptor
1113 * must fit into the 64-word Descriptor h/w Buffer
1115 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1116 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1117 keys_fit_inline = true;
1119 desc = ctx->sh_desc_enc;
1121 init_sh_desc(desc, HDR_SHARE_SERIAL);
1123 /* Skip key loading if it is loaded due to sharing */
1124 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1125 JUMP_COND_SHRD);
1126 if (keys_fit_inline)
1127 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1128 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1129 else
1130 append_key(desc, ctx->key_dma, ctx->enckeylen,
1131 CLASS_1 | KEY_DEST_CLASS_REG);
1132 set_jump_tgt_here(desc, key_jump_cmd);
1134 /* Class 1 operation */
1135 append_operation(desc, ctx->class1_alg_type |
1136 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1138 /* assoclen + cryptlen = seqinlen */
1139 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1142 * MOVE_LEN opcode is not available in all SEC HW revisions,
1143 * thus need to do some magic, i.e. self-patch the descriptor
1144 * buffer.
1146 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1147 (0x6 << MOVE_LEN_SHIFT));
1148 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1149 (0x8 << MOVE_LEN_SHIFT));
1151 /* Will read assoclen + cryptlen bytes */
1152 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1154 /* Will write assoclen + cryptlen bytes */
1155 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1157 /* Read and write assoclen + cryptlen bytes */
1158 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1160 set_move_tgt_here(desc, read_move_cmd);
1161 set_move_tgt_here(desc, write_move_cmd);
1162 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1163 /* Move payload data to OFIFO */
1164 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1166 /* Write ICV */
1167 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1168 LDST_SRCDST_BYTE_CONTEXT);
1170 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1171 desc_bytes(desc),
1172 DMA_TO_DEVICE);
1173 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1174 dev_err(jrdev, "unable to map shared descriptor\n");
1175 return -ENOMEM;
1177 #ifdef DEBUG
1178 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1179 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1180 desc_bytes(desc), 1);
1181 #endif
1184 * Job Descriptor and Shared Descriptors
1185 * must all fit into the 64-word Descriptor h/w Buffer
1187 keys_fit_inline = false;
1188 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1189 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1190 keys_fit_inline = true;
1192 desc = ctx->sh_desc_dec;
1194 init_sh_desc(desc, HDR_SHARE_SERIAL);
1196 /* Skip key loading if it is loaded due to sharing */
1197 key_jump_cmd = append_jump(desc, JUMP_JSL |
1198 JUMP_TEST_ALL | JUMP_COND_SHRD);
1199 if (keys_fit_inline)
1200 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1201 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1202 else
1203 append_key(desc, ctx->key_dma, ctx->enckeylen,
1204 CLASS_1 | KEY_DEST_CLASS_REG);
1205 set_jump_tgt_here(desc, key_jump_cmd);
1207 /* Class 1 operation */
1208 append_operation(desc, ctx->class1_alg_type |
1209 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1211 /* assoclen + cryptlen = seqoutlen */
1212 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1215 * MOVE_LEN opcode is not available in all SEC HW revisions,
1216 * thus need to do some magic, i.e. self-patch the descriptor
1217 * buffer.
1219 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1220 (0x6 << MOVE_LEN_SHIFT));
1221 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1222 (0x8 << MOVE_LEN_SHIFT));
1224 /* Will read assoclen + cryptlen bytes */
1225 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1227 /* Will write assoclen + cryptlen bytes */
1228 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1230 /* Store payload data */
1231 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1233 /* In-snoop assoclen + cryptlen data */
1234 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1235 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1237 set_move_tgt_here(desc, read_move_cmd);
1238 set_move_tgt_here(desc, write_move_cmd);
1239 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1240 /* Move payload data to OFIFO */
1241 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1242 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1244 /* Read ICV */
1245 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1246 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1248 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1249 desc_bytes(desc),
1250 DMA_TO_DEVICE);
1251 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1252 dev_err(jrdev, "unable to map shared descriptor\n");
1253 return -ENOMEM;
1255 #ifdef DEBUG
1256 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1257 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1258 desc_bytes(desc), 1);
1259 #endif
1261 return 0;
1264 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1265 unsigned int authsize)
1267 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1269 ctx->authsize = authsize;
1270 rfc4543_set_sh_desc(authenc);
1272 return 0;
1275 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1276 u32 authkeylen)
1278 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1279 ctx->split_key_pad_len, key_in, authkeylen,
1280 ctx->alg_op);
1283 static int aead_setkey(struct crypto_aead *aead,
1284 const u8 *key, unsigned int keylen)
1286 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1287 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1288 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1289 struct device *jrdev = ctx->jrdev;
1290 struct crypto_authenc_keys keys;
1291 int ret = 0;
1293 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1294 goto badkey;
1296 /* Pick class 2 key length from algorithm submask */
1297 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1298 OP_ALG_ALGSEL_SHIFT] * 2;
1299 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1301 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1302 goto badkey;
1304 #ifdef DEBUG
1305 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1306 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1307 keys.authkeylen);
1308 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1309 ctx->split_key_len, ctx->split_key_pad_len);
1310 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1311 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1312 #endif
1314 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1315 if (ret) {
1316 goto badkey;
1319 /* postpend encryption key to auth split key */
1320 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1322 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1323 keys.enckeylen, DMA_TO_DEVICE);
1324 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1325 dev_err(jrdev, "unable to map key i/o memory\n");
1326 return -ENOMEM;
1328 #ifdef DEBUG
1329 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1330 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1331 ctx->split_key_pad_len + keys.enckeylen, 1);
1332 #endif
1334 ctx->enckeylen = keys.enckeylen;
1336 ret = aead_set_sh_desc(aead);
1337 if (ret) {
1338 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1339 keys.enckeylen, DMA_TO_DEVICE);
1342 return ret;
1343 badkey:
1344 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1345 return -EINVAL;
1348 static int gcm_setkey(struct crypto_aead *aead,
1349 const u8 *key, unsigned int keylen)
1351 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1352 struct device *jrdev = ctx->jrdev;
1353 int ret = 0;
1355 #ifdef DEBUG
1356 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1357 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1358 #endif
1360 memcpy(ctx->key, key, keylen);
1361 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1362 DMA_TO_DEVICE);
1363 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1364 dev_err(jrdev, "unable to map key i/o memory\n");
1365 return -ENOMEM;
1367 ctx->enckeylen = keylen;
1369 ret = gcm_set_sh_desc(aead);
1370 if (ret) {
1371 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1372 DMA_TO_DEVICE);
1375 return ret;
1378 static int rfc4106_setkey(struct crypto_aead *aead,
1379 const u8 *key, unsigned int keylen)
1381 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1382 struct device *jrdev = ctx->jrdev;
1383 int ret = 0;
1385 if (keylen < 4)
1386 return -EINVAL;
1388 #ifdef DEBUG
1389 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1390 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1391 #endif
1393 memcpy(ctx->key, key, keylen);
1396 * The last four bytes of the key material are used as the salt value
1397 * in the nonce. Update the AES key length.
1399 ctx->enckeylen = keylen - 4;
1401 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1402 DMA_TO_DEVICE);
1403 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1404 dev_err(jrdev, "unable to map key i/o memory\n");
1405 return -ENOMEM;
1408 ret = rfc4106_set_sh_desc(aead);
1409 if (ret) {
1410 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1411 DMA_TO_DEVICE);
1414 return ret;
1417 static int rfc4543_setkey(struct crypto_aead *aead,
1418 const u8 *key, unsigned int keylen)
1420 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1421 struct device *jrdev = ctx->jrdev;
1422 int ret = 0;
1424 if (keylen < 4)
1425 return -EINVAL;
1427 #ifdef DEBUG
1428 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1429 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1430 #endif
1432 memcpy(ctx->key, key, keylen);
1435 * The last four bytes of the key material are used as the salt value
1436 * in the nonce. Update the AES key length.
1438 ctx->enckeylen = keylen - 4;
1440 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1441 DMA_TO_DEVICE);
1442 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1443 dev_err(jrdev, "unable to map key i/o memory\n");
1444 return -ENOMEM;
1447 ret = rfc4543_set_sh_desc(aead);
1448 if (ret) {
1449 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1450 DMA_TO_DEVICE);
1453 return ret;
1456 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1457 const u8 *key, unsigned int keylen)
1459 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1460 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1461 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1462 const char *alg_name = crypto_tfm_alg_name(tfm);
1463 struct device *jrdev = ctx->jrdev;
1464 int ret = 0;
1465 u32 *key_jump_cmd;
1466 u32 *desc;
1467 u32 *nonce;
1468 u32 geniv;
1469 u32 ctx1_iv_off = 0;
1470 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1471 OP_ALG_AAI_CTR_MOD128);
1472 const bool is_rfc3686 = (ctr_mode &&
1473 (strstr(alg_name, "rfc3686") != NULL));
1475 #ifdef DEBUG
1476 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1477 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1478 #endif
1480 * AES-CTR needs to load IV in CONTEXT1 reg
1481 * at an offset of 128bits (16bytes)
1482 * CONTEXT1[255:128] = IV
1484 if (ctr_mode)
1485 ctx1_iv_off = 16;
1488 * RFC3686 specific:
1489 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1490 * | *key = {KEY, NONCE}
1492 if (is_rfc3686) {
1493 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1494 keylen -= CTR_RFC3686_NONCE_SIZE;
1497 memcpy(ctx->key, key, keylen);
1498 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1499 DMA_TO_DEVICE);
1500 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1501 dev_err(jrdev, "unable to map key i/o memory\n");
1502 return -ENOMEM;
1504 ctx->enckeylen = keylen;
1506 /* ablkcipher_encrypt shared descriptor */
1507 desc = ctx->sh_desc_enc;
1508 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1509 /* Skip if already shared */
1510 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1511 JUMP_COND_SHRD);
1513 /* Load class1 key only */
1514 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1515 ctx->enckeylen, CLASS_1 |
1516 KEY_DEST_CLASS_REG);
1518 /* Load nonce into CONTEXT1 reg */
1519 if (is_rfc3686) {
1520 nonce = (u32 *)(key + keylen);
1521 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1522 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1523 append_move(desc, MOVE_WAITCOMP |
1524 MOVE_SRC_OUTFIFO |
1525 MOVE_DEST_CLASS1CTX |
1526 (16 << MOVE_OFFSET_SHIFT) |
1527 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1530 set_jump_tgt_here(desc, key_jump_cmd);
1532 /* Load iv */
1533 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1534 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1536 /* Load counter into CONTEXT1 reg */
1537 if (is_rfc3686)
1538 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1539 LDST_CLASS_1_CCB |
1540 LDST_SRCDST_BYTE_CONTEXT |
1541 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1542 LDST_OFFSET_SHIFT));
1544 /* Load operation */
1545 append_operation(desc, ctx->class1_alg_type |
1546 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1548 /* Perform operation */
1549 ablkcipher_append_src_dst(desc);
1551 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1552 desc_bytes(desc),
1553 DMA_TO_DEVICE);
1554 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1555 dev_err(jrdev, "unable to map shared descriptor\n");
1556 return -ENOMEM;
1558 #ifdef DEBUG
1559 print_hex_dump(KERN_ERR,
1560 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1561 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1562 desc_bytes(desc), 1);
1563 #endif
1564 /* ablkcipher_decrypt shared descriptor */
1565 desc = ctx->sh_desc_dec;
1567 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1568 /* Skip if already shared */
1569 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1570 JUMP_COND_SHRD);
1572 /* Load class1 key only */
1573 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1574 ctx->enckeylen, CLASS_1 |
1575 KEY_DEST_CLASS_REG);
1577 /* Load nonce into CONTEXT1 reg */
1578 if (is_rfc3686) {
1579 nonce = (u32 *)(key + keylen);
1580 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1581 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1582 append_move(desc, MOVE_WAITCOMP |
1583 MOVE_SRC_OUTFIFO |
1584 MOVE_DEST_CLASS1CTX |
1585 (16 << MOVE_OFFSET_SHIFT) |
1586 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1589 set_jump_tgt_here(desc, key_jump_cmd);
1591 /* load IV */
1592 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1593 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1595 /* Load counter into CONTEXT1 reg */
1596 if (is_rfc3686)
1597 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1598 LDST_CLASS_1_CCB |
1599 LDST_SRCDST_BYTE_CONTEXT |
1600 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1601 LDST_OFFSET_SHIFT));
1603 /* Choose operation */
1604 if (ctr_mode)
1605 append_operation(desc, ctx->class1_alg_type |
1606 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1607 else
1608 append_dec_op1(desc, ctx->class1_alg_type);
1610 /* Perform operation */
1611 ablkcipher_append_src_dst(desc);
1613 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1614 desc_bytes(desc),
1615 DMA_TO_DEVICE);
1616 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1617 dev_err(jrdev, "unable to map shared descriptor\n");
1618 return -ENOMEM;
1621 #ifdef DEBUG
1622 print_hex_dump(KERN_ERR,
1623 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1624 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1625 desc_bytes(desc), 1);
1626 #endif
1627 /* ablkcipher_givencrypt shared descriptor */
1628 desc = ctx->sh_desc_givenc;
1630 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1631 /* Skip if already shared */
1632 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1633 JUMP_COND_SHRD);
1635 /* Load class1 key only */
1636 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1637 ctx->enckeylen, CLASS_1 |
1638 KEY_DEST_CLASS_REG);
1640 /* Load Nonce into CONTEXT1 reg */
1641 if (is_rfc3686) {
1642 nonce = (u32 *)(key + keylen);
1643 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1644 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1645 append_move(desc, MOVE_WAITCOMP |
1646 MOVE_SRC_OUTFIFO |
1647 MOVE_DEST_CLASS1CTX |
1648 (16 << MOVE_OFFSET_SHIFT) |
1649 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1651 set_jump_tgt_here(desc, key_jump_cmd);
1653 /* Generate IV */
1654 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1655 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1656 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1657 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1658 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1659 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1660 append_move(desc, MOVE_WAITCOMP |
1661 MOVE_SRC_INFIFO |
1662 MOVE_DEST_CLASS1CTX |
1663 (crt->ivsize << MOVE_LEN_SHIFT) |
1664 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1665 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1667 /* Copy generated IV to memory */
1668 append_seq_store(desc, crt->ivsize,
1669 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1670 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1672 /* Load Counter into CONTEXT1 reg */
1673 if (is_rfc3686)
1674 append_load_imm_u32(desc, (u32)1, LDST_IMM |
1675 LDST_CLASS_1_CCB |
1676 LDST_SRCDST_BYTE_CONTEXT |
1677 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1678 LDST_OFFSET_SHIFT));
1680 if (ctx1_iv_off)
1681 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1682 (1 << JUMP_OFFSET_SHIFT));
1684 /* Load operation */
1685 append_operation(desc, ctx->class1_alg_type |
1686 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1688 /* Perform operation */
1689 ablkcipher_append_src_dst(desc);
1691 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1692 desc_bytes(desc),
1693 DMA_TO_DEVICE);
1694 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1695 dev_err(jrdev, "unable to map shared descriptor\n");
1696 return -ENOMEM;
1698 #ifdef DEBUG
1699 print_hex_dump(KERN_ERR,
1700 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1701 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1702 desc_bytes(desc), 1);
1703 #endif
1705 return ret;
1708 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1709 const u8 *key, unsigned int keylen)
1711 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1712 struct device *jrdev = ctx->jrdev;
1713 u32 *key_jump_cmd, *desc;
1714 __be64 sector_size = cpu_to_be64(512);
1716 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1717 crypto_ablkcipher_set_flags(ablkcipher,
1718 CRYPTO_TFM_RES_BAD_KEY_LEN);
1719 dev_err(jrdev, "key size mismatch\n");
1720 return -EINVAL;
1723 memcpy(ctx->key, key, keylen);
1724 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1725 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1726 dev_err(jrdev, "unable to map key i/o memory\n");
1727 return -ENOMEM;
1729 ctx->enckeylen = keylen;
1731 /* xts_ablkcipher_encrypt shared descriptor */
1732 desc = ctx->sh_desc_enc;
1733 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1734 /* Skip if already shared */
1735 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1736 JUMP_COND_SHRD);
1738 /* Load class1 keys only */
1739 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1740 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1742 /* Load sector size with index 40 bytes (0x28) */
1743 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1744 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1745 append_data(desc, (void *)&sector_size, 8);
1747 set_jump_tgt_here(desc, key_jump_cmd);
1750 * create sequence for loading the sector index
1751 * Upper 8B of IV - will be used as sector index
1752 * Lower 8B of IV - will be discarded
1754 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1755 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1756 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1758 /* Load operation */
1759 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1760 OP_ALG_ENCRYPT);
1762 /* Perform operation */
1763 ablkcipher_append_src_dst(desc);
1765 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1766 DMA_TO_DEVICE);
1767 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1768 dev_err(jrdev, "unable to map shared descriptor\n");
1769 return -ENOMEM;
1771 #ifdef DEBUG
1772 print_hex_dump(KERN_ERR,
1773 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1774 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1775 #endif
1777 /* xts_ablkcipher_decrypt shared descriptor */
1778 desc = ctx->sh_desc_dec;
1780 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1781 /* Skip if already shared */
1782 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1783 JUMP_COND_SHRD);
1785 /* Load class1 key only */
1786 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1787 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1789 /* Load sector size with index 40 bytes (0x28) */
1790 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1791 LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1792 append_data(desc, (void *)&sector_size, 8);
1794 set_jump_tgt_here(desc, key_jump_cmd);
1797 * create sequence for loading the sector index
1798 * Upper 8B of IV - will be used as sector index
1799 * Lower 8B of IV - will be discarded
1801 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1802 LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1803 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1805 /* Load operation */
1806 append_dec_op1(desc, ctx->class1_alg_type);
1808 /* Perform operation */
1809 ablkcipher_append_src_dst(desc);
1811 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1812 DMA_TO_DEVICE);
1813 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1814 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1815 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1816 dev_err(jrdev, "unable to map shared descriptor\n");
1817 return -ENOMEM;
1819 #ifdef DEBUG
1820 print_hex_dump(KERN_ERR,
1821 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1822 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1823 #endif
1825 return 0;
1829 * aead_edesc - s/w-extended aead descriptor
1830 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1831 * @src_nents: number of segments in input scatterlist
1832 * @dst_nents: number of segments in output scatterlist
1833 * @iv_dma: dma address of iv for checking continuity and link table
1834 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1835 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1836 * @sec4_sg_dma: bus physical mapped address of h/w link table
1837 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1839 struct aead_edesc {
1840 int assoc_nents;
1841 int src_nents;
1842 int dst_nents;
1843 dma_addr_t iv_dma;
1844 int sec4_sg_bytes;
1845 dma_addr_t sec4_sg_dma;
1846 struct sec4_sg_entry *sec4_sg;
1847 u32 hw_desc[];
1851 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1852 * @src_nents: number of segments in input scatterlist
1853 * @dst_nents: number of segments in output scatterlist
1854 * @iv_dma: dma address of iv for checking continuity and link table
1855 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1856 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1857 * @sec4_sg_dma: bus physical mapped address of h/w link table
1858 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1860 struct ablkcipher_edesc {
1861 int src_nents;
1862 int dst_nents;
1863 dma_addr_t iv_dma;
1864 int sec4_sg_bytes;
1865 dma_addr_t sec4_sg_dma;
1866 struct sec4_sg_entry *sec4_sg;
1867 u32 hw_desc[0];
1870 static void caam_unmap(struct device *dev, struct scatterlist *src,
1871 struct scatterlist *dst, int src_nents,
1872 int dst_nents,
1873 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1874 int sec4_sg_bytes)
1876 if (dst != src) {
1877 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1878 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1879 } else {
1880 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1883 if (iv_dma)
1884 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1885 if (sec4_sg_bytes)
1886 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1887 DMA_TO_DEVICE);
1890 static void aead_unmap(struct device *dev,
1891 struct aead_edesc *edesc,
1892 struct aead_request *req)
1894 caam_unmap(dev, req->src, req->dst,
1895 edesc->src_nents, edesc->dst_nents, 0, 0,
1896 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1899 static void ablkcipher_unmap(struct device *dev,
1900 struct ablkcipher_edesc *edesc,
1901 struct ablkcipher_request *req)
1903 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1904 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1906 caam_unmap(dev, req->src, req->dst,
1907 edesc->src_nents, edesc->dst_nents,
1908 edesc->iv_dma, ivsize,
1909 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1912 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1913 void *context)
1915 struct aead_request *req = context;
1916 struct aead_edesc *edesc;
1918 #ifdef DEBUG
1919 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1920 #endif
1922 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1924 if (err)
1925 caam_jr_strstatus(jrdev, err);
1927 aead_unmap(jrdev, edesc, req);
1929 kfree(edesc);
1931 aead_request_complete(req, err);
1934 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1935 void *context)
1937 struct aead_request *req = context;
1938 struct aead_edesc *edesc;
1940 #ifdef DEBUG
1941 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1942 #endif
1944 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1946 if (err)
1947 caam_jr_strstatus(jrdev, err);
1949 aead_unmap(jrdev, edesc, req);
1952 * verify hw auth check passed else return -EBADMSG
1954 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1955 err = -EBADMSG;
1957 kfree(edesc);
1959 aead_request_complete(req, err);
1962 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1963 void *context)
1965 struct ablkcipher_request *req = context;
1966 struct ablkcipher_edesc *edesc;
1967 #ifdef DEBUG
1968 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1969 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1971 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1972 #endif
1974 edesc = (struct ablkcipher_edesc *)((char *)desc -
1975 offsetof(struct ablkcipher_edesc, hw_desc));
1977 if (err)
1978 caam_jr_strstatus(jrdev, err);
1980 #ifdef DEBUG
1981 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
1982 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1983 edesc->src_nents > 1 ? 100 : ivsize, 1);
1984 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1985 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1986 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1987 #endif
1989 ablkcipher_unmap(jrdev, edesc, req);
1990 kfree(edesc);
1992 ablkcipher_request_complete(req, err);
1995 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1996 void *context)
1998 struct ablkcipher_request *req = context;
1999 struct ablkcipher_edesc *edesc;
2000 #ifdef DEBUG
2001 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2002 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2004 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2005 #endif
2007 edesc = (struct ablkcipher_edesc *)((char *)desc -
2008 offsetof(struct ablkcipher_edesc, hw_desc));
2009 if (err)
2010 caam_jr_strstatus(jrdev, err);
2012 #ifdef DEBUG
2013 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
2014 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2015 ivsize, 1);
2016 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
2017 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2018 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2019 #endif
2021 ablkcipher_unmap(jrdev, edesc, req);
2022 kfree(edesc);
2024 ablkcipher_request_complete(req, err);
2028 * Fill in aead job descriptor
2030 static void init_aead_job(struct aead_request *req,
2031 struct aead_edesc *edesc,
2032 bool all_contig, bool encrypt)
2034 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2035 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2036 int authsize = ctx->authsize;
2037 u32 *desc = edesc->hw_desc;
2038 u32 out_options, in_options;
2039 dma_addr_t dst_dma, src_dma;
2040 int len, sec4_sg_index = 0;
2041 dma_addr_t ptr;
2042 u32 *sh_desc;
2044 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2045 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2047 len = desc_len(sh_desc);
2048 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2050 if (all_contig) {
2051 src_dma = sg_dma_address(req->src);
2052 in_options = 0;
2053 } else {
2054 src_dma = edesc->sec4_sg_dma;
2055 sec4_sg_index += edesc->src_nents;
2056 in_options = LDST_SGF;
2059 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2060 in_options);
2062 dst_dma = src_dma;
2063 out_options = in_options;
2065 if (unlikely(req->src != req->dst)) {
2066 if (!edesc->dst_nents) {
2067 dst_dma = sg_dma_address(req->dst);
2068 } else {
2069 dst_dma = edesc->sec4_sg_dma +
2070 sec4_sg_index *
2071 sizeof(struct sec4_sg_entry);
2072 out_options = LDST_SGF;
2076 if (encrypt)
2077 append_seq_out_ptr(desc, dst_dma,
2078 req->assoclen + req->cryptlen + authsize,
2079 out_options);
2080 else
2081 append_seq_out_ptr(desc, dst_dma,
2082 req->assoclen + req->cryptlen - authsize,
2083 out_options);
2085 /* REG3 = assoclen */
2086 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2089 static void init_gcm_job(struct aead_request *req,
2090 struct aead_edesc *edesc,
2091 bool all_contig, bool encrypt)
2093 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2094 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2095 unsigned int ivsize = crypto_aead_ivsize(aead);
2096 u32 *desc = edesc->hw_desc;
2097 bool generic_gcm = (ivsize == 12);
2098 unsigned int last;
2100 init_aead_job(req, edesc, all_contig, encrypt);
2102 /* BUG This should not be specific to generic GCM. */
2103 last = 0;
2104 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2105 last = FIFOLD_TYPE_LAST1;
2107 /* Read GCM IV */
2108 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2109 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2110 /* Append Salt */
2111 if (!generic_gcm)
2112 append_data(desc, ctx->key + ctx->enckeylen, 4);
2113 /* Append IV */
2114 append_data(desc, req->iv, ivsize);
2115 /* End of blank commands */
2118 static void init_authenc_job(struct aead_request *req,
2119 struct aead_edesc *edesc,
2120 bool all_contig, bool encrypt)
2122 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2123 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2124 struct caam_aead_alg, aead);
2125 unsigned int ivsize = crypto_aead_ivsize(aead);
2126 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2127 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2128 OP_ALG_AAI_CTR_MOD128);
2129 const bool is_rfc3686 = alg->caam.rfc3686;
2130 u32 *desc = edesc->hw_desc;
2131 u32 ivoffset = 0;
2134 * AES-CTR needs to load IV in CONTEXT1 reg
2135 * at an offset of 128bits (16bytes)
2136 * CONTEXT1[255:128] = IV
2138 if (ctr_mode)
2139 ivoffset = 16;
2142 * RFC3686 specific:
2143 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2145 if (is_rfc3686)
2146 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2148 init_aead_job(req, edesc, all_contig, encrypt);
2150 if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
2151 append_load_as_imm(desc, req->iv, ivsize,
2152 LDST_CLASS_1_CCB |
2153 LDST_SRCDST_BYTE_CONTEXT |
2154 (ivoffset << LDST_OFFSET_SHIFT));
2158 * Fill in ablkcipher job descriptor
2160 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2161 struct ablkcipher_edesc *edesc,
2162 struct ablkcipher_request *req,
2163 bool iv_contig)
2165 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2166 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2167 u32 *desc = edesc->hw_desc;
2168 u32 out_options = 0, in_options;
2169 dma_addr_t dst_dma, src_dma;
2170 int len, sec4_sg_index = 0;
2172 #ifdef DEBUG
2173 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2174 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2175 ivsize, 1);
2176 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
2177 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2178 edesc->src_nents ? 100 : req->nbytes, 1);
2179 #endif
2181 len = desc_len(sh_desc);
2182 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2184 if (iv_contig) {
2185 src_dma = edesc->iv_dma;
2186 in_options = 0;
2187 } else {
2188 src_dma = edesc->sec4_sg_dma;
2189 sec4_sg_index += edesc->src_nents + 1;
2190 in_options = LDST_SGF;
2192 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2194 if (likely(req->src == req->dst)) {
2195 if (!edesc->src_nents && iv_contig) {
2196 dst_dma = sg_dma_address(req->src);
2197 } else {
2198 dst_dma = edesc->sec4_sg_dma +
2199 sizeof(struct sec4_sg_entry);
2200 out_options = LDST_SGF;
2202 } else {
2203 if (!edesc->dst_nents) {
2204 dst_dma = sg_dma_address(req->dst);
2205 } else {
2206 dst_dma = edesc->sec4_sg_dma +
2207 sec4_sg_index * sizeof(struct sec4_sg_entry);
2208 out_options = LDST_SGF;
2211 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2215 * Fill in ablkcipher givencrypt job descriptor
2217 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2218 struct ablkcipher_edesc *edesc,
2219 struct ablkcipher_request *req,
2220 bool iv_contig)
2222 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2223 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2224 u32 *desc = edesc->hw_desc;
2225 u32 out_options, in_options;
2226 dma_addr_t dst_dma, src_dma;
2227 int len, sec4_sg_index = 0;
2229 #ifdef DEBUG
2230 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2231 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2232 ivsize, 1);
2233 print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2234 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2235 edesc->src_nents ? 100 : req->nbytes, 1);
2236 #endif
2238 len = desc_len(sh_desc);
2239 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2241 if (!edesc->src_nents) {
2242 src_dma = sg_dma_address(req->src);
2243 in_options = 0;
2244 } else {
2245 src_dma = edesc->sec4_sg_dma;
2246 sec4_sg_index += edesc->src_nents;
2247 in_options = LDST_SGF;
2249 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2251 if (iv_contig) {
2252 dst_dma = edesc->iv_dma;
2253 out_options = 0;
2254 } else {
2255 dst_dma = edesc->sec4_sg_dma +
2256 sec4_sg_index * sizeof(struct sec4_sg_entry);
2257 out_options = LDST_SGF;
2259 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2263 * allocate and map the aead extended descriptor
2265 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2266 int desc_bytes, bool *all_contig_ptr,
2267 bool encrypt)
2269 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2270 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2271 struct device *jrdev = ctx->jrdev;
2272 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2273 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2274 int src_nents, dst_nents = 0;
2275 struct aead_edesc *edesc;
2276 int sgc;
2277 bool all_contig = true;
2278 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2279 unsigned int authsize = ctx->authsize;
2281 if (unlikely(req->dst != req->src)) {
2282 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2283 dst_nents = sg_count(req->dst,
2284 req->assoclen + req->cryptlen +
2285 (encrypt ? authsize : (-authsize)));
2286 } else {
2287 src_nents = sg_count(req->src,
2288 req->assoclen + req->cryptlen +
2289 (encrypt ? authsize : 0));
2292 /* Check if data are contiguous. */
2293 all_contig = !src_nents;
2294 if (!all_contig) {
2295 src_nents = src_nents ? : 1;
2296 sec4_sg_len = src_nents;
2299 sec4_sg_len += dst_nents;
2301 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2303 /* allocate space for base edesc and hw desc commands, link tables */
2304 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2305 GFP_DMA | flags);
2306 if (!edesc) {
2307 dev_err(jrdev, "could not allocate extended descriptor\n");
2308 return ERR_PTR(-ENOMEM);
2311 if (likely(req->src == req->dst)) {
2312 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2313 DMA_BIDIRECTIONAL);
2314 if (unlikely(!sgc)) {
2315 dev_err(jrdev, "unable to map source\n");
2316 kfree(edesc);
2317 return ERR_PTR(-ENOMEM);
2319 } else {
2320 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2321 DMA_TO_DEVICE);
2322 if (unlikely(!sgc)) {
2323 dev_err(jrdev, "unable to map source\n");
2324 kfree(edesc);
2325 return ERR_PTR(-ENOMEM);
2328 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2329 DMA_FROM_DEVICE);
2330 if (unlikely(!sgc)) {
2331 dev_err(jrdev, "unable to map destination\n");
2332 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2333 DMA_TO_DEVICE);
2334 kfree(edesc);
2335 return ERR_PTR(-ENOMEM);
2339 edesc->src_nents = src_nents;
2340 edesc->dst_nents = dst_nents;
2341 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2342 desc_bytes;
2343 *all_contig_ptr = all_contig;
2345 sec4_sg_index = 0;
2346 if (!all_contig) {
2347 sg_to_sec4_sg_last(req->src, src_nents,
2348 edesc->sec4_sg + sec4_sg_index, 0);
2349 sec4_sg_index += src_nents;
2351 if (dst_nents) {
2352 sg_to_sec4_sg_last(req->dst, dst_nents,
2353 edesc->sec4_sg + sec4_sg_index, 0);
2356 if (!sec4_sg_bytes)
2357 return edesc;
2359 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2360 sec4_sg_bytes, DMA_TO_DEVICE);
2361 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2362 dev_err(jrdev, "unable to map S/G table\n");
2363 aead_unmap(jrdev, edesc, req);
2364 kfree(edesc);
2365 return ERR_PTR(-ENOMEM);
2368 edesc->sec4_sg_bytes = sec4_sg_bytes;
2370 return edesc;
2373 static int gcm_encrypt(struct aead_request *req)
2375 struct aead_edesc *edesc;
2376 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2377 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2378 struct device *jrdev = ctx->jrdev;
2379 bool all_contig;
2380 u32 *desc;
2381 int ret = 0;
2383 /* allocate extended descriptor */
2384 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2385 if (IS_ERR(edesc))
2386 return PTR_ERR(edesc);
2388 /* Create and submit job descriptor */
2389 init_gcm_job(req, edesc, all_contig, true);
2390 #ifdef DEBUG
2391 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2392 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2393 desc_bytes(edesc->hw_desc), 1);
2394 #endif
2396 desc = edesc->hw_desc;
2397 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2398 if (!ret) {
2399 ret = -EINPROGRESS;
2400 } else {
2401 aead_unmap(jrdev, edesc, req);
2402 kfree(edesc);
2405 return ret;
2408 static int ipsec_gcm_encrypt(struct aead_request *req)
2410 if (req->assoclen < 8)
2411 return -EINVAL;
2413 return gcm_encrypt(req);
2416 static int aead_encrypt(struct aead_request *req)
2418 struct aead_edesc *edesc;
2419 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2420 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2421 struct device *jrdev = ctx->jrdev;
2422 bool all_contig;
2423 u32 *desc;
2424 int ret = 0;
2426 /* allocate extended descriptor */
2427 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2428 &all_contig, true);
2429 if (IS_ERR(edesc))
2430 return PTR_ERR(edesc);
2432 /* Create and submit job descriptor */
2433 init_authenc_job(req, edesc, all_contig, true);
2434 #ifdef DEBUG
2435 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2436 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2437 desc_bytes(edesc->hw_desc), 1);
2438 #endif
2440 desc = edesc->hw_desc;
2441 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2442 if (!ret) {
2443 ret = -EINPROGRESS;
2444 } else {
2445 aead_unmap(jrdev, edesc, req);
2446 kfree(edesc);
2449 return ret;
2452 static int gcm_decrypt(struct aead_request *req)
2454 struct aead_edesc *edesc;
2455 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2456 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2457 struct device *jrdev = ctx->jrdev;
2458 bool all_contig;
2459 u32 *desc;
2460 int ret = 0;
2462 /* allocate extended descriptor */
2463 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2464 if (IS_ERR(edesc))
2465 return PTR_ERR(edesc);
2467 /* Create and submit job descriptor*/
2468 init_gcm_job(req, edesc, all_contig, false);
2469 #ifdef DEBUG
2470 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2471 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2472 desc_bytes(edesc->hw_desc), 1);
2473 #endif
2475 desc = edesc->hw_desc;
2476 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2477 if (!ret) {
2478 ret = -EINPROGRESS;
2479 } else {
2480 aead_unmap(jrdev, edesc, req);
2481 kfree(edesc);
2484 return ret;
2487 static int ipsec_gcm_decrypt(struct aead_request *req)
2489 if (req->assoclen < 8)
2490 return -EINVAL;
2492 return gcm_decrypt(req);
2495 static int aead_decrypt(struct aead_request *req)
2497 struct aead_edesc *edesc;
2498 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2499 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2500 struct device *jrdev = ctx->jrdev;
2501 bool all_contig;
2502 u32 *desc;
2503 int ret = 0;
2505 /* allocate extended descriptor */
2506 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2507 &all_contig, false);
2508 if (IS_ERR(edesc))
2509 return PTR_ERR(edesc);
2511 #ifdef DEBUG
2512 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2513 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2514 req->assoclen + req->cryptlen, 1);
2515 #endif
2517 /* Create and submit job descriptor*/
2518 init_authenc_job(req, edesc, all_contig, false);
2519 #ifdef DEBUG
2520 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2521 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2522 desc_bytes(edesc->hw_desc), 1);
2523 #endif
2525 desc = edesc->hw_desc;
2526 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2527 if (!ret) {
2528 ret = -EINPROGRESS;
2529 } else {
2530 aead_unmap(jrdev, edesc, req);
2531 kfree(edesc);
2534 return ret;
2537 static int aead_givdecrypt(struct aead_request *req)
2539 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2540 unsigned int ivsize = crypto_aead_ivsize(aead);
2542 if (req->cryptlen < ivsize)
2543 return -EINVAL;
2545 req->cryptlen -= ivsize;
2546 req->assoclen += ivsize;
2548 return aead_decrypt(req);
2552 * allocate and map the ablkcipher extended descriptor for ablkcipher
2554 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2555 *req, int desc_bytes,
2556 bool *iv_contig_out)
2558 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2559 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2560 struct device *jrdev = ctx->jrdev;
2561 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2562 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2563 GFP_KERNEL : GFP_ATOMIC;
2564 int src_nents, dst_nents = 0, sec4_sg_bytes;
2565 struct ablkcipher_edesc *edesc;
2566 dma_addr_t iv_dma = 0;
2567 bool iv_contig = false;
2568 int sgc;
2569 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2570 int sec4_sg_index;
2572 src_nents = sg_count(req->src, req->nbytes);
2574 if (req->dst != req->src)
2575 dst_nents = sg_count(req->dst, req->nbytes);
2577 if (likely(req->src == req->dst)) {
2578 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2579 DMA_BIDIRECTIONAL);
2580 } else {
2581 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2582 DMA_TO_DEVICE);
2583 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2584 DMA_FROM_DEVICE);
2587 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2588 if (dma_mapping_error(jrdev, iv_dma)) {
2589 dev_err(jrdev, "unable to map IV\n");
2590 return ERR_PTR(-ENOMEM);
2594 * Check if iv can be contiguous with source and destination.
2595 * If so, include it. If not, create scatterlist.
2597 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2598 iv_contig = true;
2599 else
2600 src_nents = src_nents ? : 1;
2601 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2602 sizeof(struct sec4_sg_entry);
2604 /* allocate space for base edesc and hw desc commands, link tables */
2605 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2606 GFP_DMA | flags);
2607 if (!edesc) {
2608 dev_err(jrdev, "could not allocate extended descriptor\n");
2609 return ERR_PTR(-ENOMEM);
2612 edesc->src_nents = src_nents;
2613 edesc->dst_nents = dst_nents;
2614 edesc->sec4_sg_bytes = sec4_sg_bytes;
2615 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2616 desc_bytes;
2618 sec4_sg_index = 0;
2619 if (!iv_contig) {
2620 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2621 sg_to_sec4_sg_last(req->src, src_nents,
2622 edesc->sec4_sg + 1, 0);
2623 sec4_sg_index += 1 + src_nents;
2626 if (dst_nents) {
2627 sg_to_sec4_sg_last(req->dst, dst_nents,
2628 edesc->sec4_sg + sec4_sg_index, 0);
2631 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2632 sec4_sg_bytes, DMA_TO_DEVICE);
2633 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2634 dev_err(jrdev, "unable to map S/G table\n");
2635 return ERR_PTR(-ENOMEM);
2638 edesc->iv_dma = iv_dma;
2640 #ifdef DEBUG
2641 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2642 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2643 sec4_sg_bytes, 1);
2644 #endif
2646 *iv_contig_out = iv_contig;
2647 return edesc;
2650 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2652 struct ablkcipher_edesc *edesc;
2653 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2654 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2655 struct device *jrdev = ctx->jrdev;
2656 bool iv_contig;
2657 u32 *desc;
2658 int ret = 0;
2660 /* allocate extended descriptor */
2661 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2662 CAAM_CMD_SZ, &iv_contig);
2663 if (IS_ERR(edesc))
2664 return PTR_ERR(edesc);
2666 /* Create and submit job descriptor*/
2667 init_ablkcipher_job(ctx->sh_desc_enc,
2668 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2669 #ifdef DEBUG
2670 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2671 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2672 desc_bytes(edesc->hw_desc), 1);
2673 #endif
2674 desc = edesc->hw_desc;
2675 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2677 if (!ret) {
2678 ret = -EINPROGRESS;
2679 } else {
2680 ablkcipher_unmap(jrdev, edesc, req);
2681 kfree(edesc);
2684 return ret;
2687 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2689 struct ablkcipher_edesc *edesc;
2690 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2691 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2692 struct device *jrdev = ctx->jrdev;
2693 bool iv_contig;
2694 u32 *desc;
2695 int ret = 0;
2697 /* allocate extended descriptor */
2698 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2699 CAAM_CMD_SZ, &iv_contig);
2700 if (IS_ERR(edesc))
2701 return PTR_ERR(edesc);
2703 /* Create and submit job descriptor*/
2704 init_ablkcipher_job(ctx->sh_desc_dec,
2705 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2706 desc = edesc->hw_desc;
2707 #ifdef DEBUG
2708 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2709 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2710 desc_bytes(edesc->hw_desc), 1);
2711 #endif
2713 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2714 if (!ret) {
2715 ret = -EINPROGRESS;
2716 } else {
2717 ablkcipher_unmap(jrdev, edesc, req);
2718 kfree(edesc);
2721 return ret;
2725 * allocate and map the ablkcipher extended descriptor
2726 * for ablkcipher givencrypt
2728 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2729 struct skcipher_givcrypt_request *greq,
2730 int desc_bytes,
2731 bool *iv_contig_out)
2733 struct ablkcipher_request *req = &greq->creq;
2734 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2735 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2736 struct device *jrdev = ctx->jrdev;
2737 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2738 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2739 GFP_KERNEL : GFP_ATOMIC;
2740 int src_nents, dst_nents = 0, sec4_sg_bytes;
2741 struct ablkcipher_edesc *edesc;
2742 dma_addr_t iv_dma = 0;
2743 bool iv_contig = false;
2744 int sgc;
2745 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2746 int sec4_sg_index;
2748 src_nents = sg_count(req->src, req->nbytes);
2750 if (unlikely(req->dst != req->src))
2751 dst_nents = sg_count(req->dst, req->nbytes);
2753 if (likely(req->src == req->dst)) {
2754 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2755 DMA_BIDIRECTIONAL);
2756 } else {
2757 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2758 DMA_TO_DEVICE);
2759 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2760 DMA_FROM_DEVICE);
2764 * Check if iv can be contiguous with source and destination.
2765 * If so, include it. If not, create scatterlist.
2767 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2768 if (dma_mapping_error(jrdev, iv_dma)) {
2769 dev_err(jrdev, "unable to map IV\n");
2770 return ERR_PTR(-ENOMEM);
2773 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2774 iv_contig = true;
2775 else
2776 dst_nents = dst_nents ? : 1;
2777 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2778 sizeof(struct sec4_sg_entry);
2780 /* allocate space for base edesc and hw desc commands, link tables */
2781 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2782 GFP_DMA | flags);
2783 if (!edesc) {
2784 dev_err(jrdev, "could not allocate extended descriptor\n");
2785 return ERR_PTR(-ENOMEM);
2788 edesc->src_nents = src_nents;
2789 edesc->dst_nents = dst_nents;
2790 edesc->sec4_sg_bytes = sec4_sg_bytes;
2791 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2792 desc_bytes;
2794 sec4_sg_index = 0;
2795 if (src_nents) {
2796 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2797 sec4_sg_index += src_nents;
2800 if (!iv_contig) {
2801 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2802 iv_dma, ivsize, 0);
2803 sec4_sg_index += 1;
2804 sg_to_sec4_sg_last(req->dst, dst_nents,
2805 edesc->sec4_sg + sec4_sg_index, 0);
2808 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2809 sec4_sg_bytes, DMA_TO_DEVICE);
2810 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2811 dev_err(jrdev, "unable to map S/G table\n");
2812 return ERR_PTR(-ENOMEM);
2814 edesc->iv_dma = iv_dma;
2816 #ifdef DEBUG
2817 print_hex_dump(KERN_ERR,
2818 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2819 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2820 sec4_sg_bytes, 1);
2821 #endif
2823 *iv_contig_out = iv_contig;
2824 return edesc;
2827 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2829 struct ablkcipher_request *req = &creq->creq;
2830 struct ablkcipher_edesc *edesc;
2831 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2832 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2833 struct device *jrdev = ctx->jrdev;
2834 bool iv_contig;
2835 u32 *desc;
2836 int ret = 0;
2838 /* allocate extended descriptor */
2839 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2840 CAAM_CMD_SZ, &iv_contig);
2841 if (IS_ERR(edesc))
2842 return PTR_ERR(edesc);
2844 /* Create and submit job descriptor*/
2845 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2846 edesc, req, iv_contig);
2847 #ifdef DEBUG
2848 print_hex_dump(KERN_ERR,
2849 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2850 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2851 desc_bytes(edesc->hw_desc), 1);
2852 #endif
2853 desc = edesc->hw_desc;
2854 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2856 if (!ret) {
2857 ret = -EINPROGRESS;
2858 } else {
2859 ablkcipher_unmap(jrdev, edesc, req);
2860 kfree(edesc);
2863 return ret;
2866 #define template_aead template_u.aead
2867 #define template_ablkcipher template_u.ablkcipher
2868 struct caam_alg_template {
2869 char name[CRYPTO_MAX_ALG_NAME];
2870 char driver_name[CRYPTO_MAX_ALG_NAME];
2871 unsigned int blocksize;
2872 u32 type;
2873 union {
2874 struct ablkcipher_alg ablkcipher;
2875 } template_u;
2876 u32 class1_alg_type;
2877 u32 class2_alg_type;
2878 u32 alg_op;
2881 static struct caam_alg_template driver_algs[] = {
2882 /* ablkcipher descriptor */
2884 .name = "cbc(aes)",
2885 .driver_name = "cbc-aes-caam",
2886 .blocksize = AES_BLOCK_SIZE,
2887 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2888 .template_ablkcipher = {
2889 .setkey = ablkcipher_setkey,
2890 .encrypt = ablkcipher_encrypt,
2891 .decrypt = ablkcipher_decrypt,
2892 .givencrypt = ablkcipher_givencrypt,
2893 .geniv = "<built-in>",
2894 .min_keysize = AES_MIN_KEY_SIZE,
2895 .max_keysize = AES_MAX_KEY_SIZE,
2896 .ivsize = AES_BLOCK_SIZE,
2898 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2901 .name = "cbc(des3_ede)",
2902 .driver_name = "cbc-3des-caam",
2903 .blocksize = DES3_EDE_BLOCK_SIZE,
2904 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2905 .template_ablkcipher = {
2906 .setkey = ablkcipher_setkey,
2907 .encrypt = ablkcipher_encrypt,
2908 .decrypt = ablkcipher_decrypt,
2909 .givencrypt = ablkcipher_givencrypt,
2910 .geniv = "<built-in>",
2911 .min_keysize = DES3_EDE_KEY_SIZE,
2912 .max_keysize = DES3_EDE_KEY_SIZE,
2913 .ivsize = DES3_EDE_BLOCK_SIZE,
2915 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2918 .name = "cbc(des)",
2919 .driver_name = "cbc-des-caam",
2920 .blocksize = DES_BLOCK_SIZE,
2921 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2922 .template_ablkcipher = {
2923 .setkey = ablkcipher_setkey,
2924 .encrypt = ablkcipher_encrypt,
2925 .decrypt = ablkcipher_decrypt,
2926 .givencrypt = ablkcipher_givencrypt,
2927 .geniv = "<built-in>",
2928 .min_keysize = DES_KEY_SIZE,
2929 .max_keysize = DES_KEY_SIZE,
2930 .ivsize = DES_BLOCK_SIZE,
2932 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2935 .name = "ctr(aes)",
2936 .driver_name = "ctr-aes-caam",
2937 .blocksize = 1,
2938 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2939 .template_ablkcipher = {
2940 .setkey = ablkcipher_setkey,
2941 .encrypt = ablkcipher_encrypt,
2942 .decrypt = ablkcipher_decrypt,
2943 .geniv = "chainiv",
2944 .min_keysize = AES_MIN_KEY_SIZE,
2945 .max_keysize = AES_MAX_KEY_SIZE,
2946 .ivsize = AES_BLOCK_SIZE,
2948 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2951 .name = "rfc3686(ctr(aes))",
2952 .driver_name = "rfc3686-ctr-aes-caam",
2953 .blocksize = 1,
2954 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2955 .template_ablkcipher = {
2956 .setkey = ablkcipher_setkey,
2957 .encrypt = ablkcipher_encrypt,
2958 .decrypt = ablkcipher_decrypt,
2959 .givencrypt = ablkcipher_givencrypt,
2960 .geniv = "<built-in>",
2961 .min_keysize = AES_MIN_KEY_SIZE +
2962 CTR_RFC3686_NONCE_SIZE,
2963 .max_keysize = AES_MAX_KEY_SIZE +
2964 CTR_RFC3686_NONCE_SIZE,
2965 .ivsize = CTR_RFC3686_IV_SIZE,
2967 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2970 .name = "xts(aes)",
2971 .driver_name = "xts-aes-caam",
2972 .blocksize = AES_BLOCK_SIZE,
2973 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2974 .template_ablkcipher = {
2975 .setkey = xts_ablkcipher_setkey,
2976 .encrypt = ablkcipher_encrypt,
2977 .decrypt = ablkcipher_decrypt,
2978 .geniv = "eseqiv",
2979 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2980 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2981 .ivsize = AES_BLOCK_SIZE,
2983 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2987 static struct caam_aead_alg driver_aeads[] = {
2989 .aead = {
2990 .base = {
2991 .cra_name = "rfc4106(gcm(aes))",
2992 .cra_driver_name = "rfc4106-gcm-aes-caam",
2993 .cra_blocksize = 1,
2995 .setkey = rfc4106_setkey,
2996 .setauthsize = rfc4106_setauthsize,
2997 .encrypt = ipsec_gcm_encrypt,
2998 .decrypt = ipsec_gcm_decrypt,
2999 .ivsize = 8,
3000 .maxauthsize = AES_BLOCK_SIZE,
3002 .caam = {
3003 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3007 .aead = {
3008 .base = {
3009 .cra_name = "rfc4543(gcm(aes))",
3010 .cra_driver_name = "rfc4543-gcm-aes-caam",
3011 .cra_blocksize = 1,
3013 .setkey = rfc4543_setkey,
3014 .setauthsize = rfc4543_setauthsize,
3015 .encrypt = ipsec_gcm_encrypt,
3016 .decrypt = ipsec_gcm_decrypt,
3017 .ivsize = 8,
3018 .maxauthsize = AES_BLOCK_SIZE,
3020 .caam = {
3021 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3024 /* Galois Counter Mode */
3026 .aead = {
3027 .base = {
3028 .cra_name = "gcm(aes)",
3029 .cra_driver_name = "gcm-aes-caam",
3030 .cra_blocksize = 1,
3032 .setkey = gcm_setkey,
3033 .setauthsize = gcm_setauthsize,
3034 .encrypt = gcm_encrypt,
3035 .decrypt = gcm_decrypt,
3036 .ivsize = 12,
3037 .maxauthsize = AES_BLOCK_SIZE,
3039 .caam = {
3040 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3043 /* single-pass ipsec_esp descriptor */
3045 .aead = {
3046 .base = {
3047 .cra_name = "authenc(hmac(md5),"
3048 "ecb(cipher_null))",
3049 .cra_driver_name = "authenc-hmac-md5-"
3050 "ecb-cipher_null-caam",
3051 .cra_blocksize = NULL_BLOCK_SIZE,
3053 .setkey = aead_setkey,
3054 .setauthsize = aead_setauthsize,
3055 .encrypt = aead_encrypt,
3056 .decrypt = aead_decrypt,
3057 .ivsize = NULL_IV_SIZE,
3058 .maxauthsize = MD5_DIGEST_SIZE,
3060 .caam = {
3061 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3062 OP_ALG_AAI_HMAC_PRECOMP,
3063 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3067 .aead = {
3068 .base = {
3069 .cra_name = "authenc(hmac(sha1),"
3070 "ecb(cipher_null))",
3071 .cra_driver_name = "authenc-hmac-sha1-"
3072 "ecb-cipher_null-caam",
3073 .cra_blocksize = NULL_BLOCK_SIZE,
3075 .setkey = aead_setkey,
3076 .setauthsize = aead_setauthsize,
3077 .encrypt = aead_encrypt,
3078 .decrypt = aead_decrypt,
3079 .ivsize = NULL_IV_SIZE,
3080 .maxauthsize = SHA1_DIGEST_SIZE,
3082 .caam = {
3083 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3084 OP_ALG_AAI_HMAC_PRECOMP,
3085 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3089 .aead = {
3090 .base = {
3091 .cra_name = "authenc(hmac(sha224),"
3092 "ecb(cipher_null))",
3093 .cra_driver_name = "authenc-hmac-sha224-"
3094 "ecb-cipher_null-caam",
3095 .cra_blocksize = NULL_BLOCK_SIZE,
3097 .setkey = aead_setkey,
3098 .setauthsize = aead_setauthsize,
3099 .encrypt = aead_encrypt,
3100 .decrypt = aead_decrypt,
3101 .ivsize = NULL_IV_SIZE,
3102 .maxauthsize = SHA224_DIGEST_SIZE,
3104 .caam = {
3105 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3106 OP_ALG_AAI_HMAC_PRECOMP,
3107 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3111 .aead = {
3112 .base = {
3113 .cra_name = "authenc(hmac(sha256),"
3114 "ecb(cipher_null))",
3115 .cra_driver_name = "authenc-hmac-sha256-"
3116 "ecb-cipher_null-caam",
3117 .cra_blocksize = NULL_BLOCK_SIZE,
3119 .setkey = aead_setkey,
3120 .setauthsize = aead_setauthsize,
3121 .encrypt = aead_encrypt,
3122 .decrypt = aead_decrypt,
3123 .ivsize = NULL_IV_SIZE,
3124 .maxauthsize = SHA256_DIGEST_SIZE,
3126 .caam = {
3127 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3128 OP_ALG_AAI_HMAC_PRECOMP,
3129 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3133 .aead = {
3134 .base = {
3135 .cra_name = "authenc(hmac(sha384),"
3136 "ecb(cipher_null))",
3137 .cra_driver_name = "authenc-hmac-sha384-"
3138 "ecb-cipher_null-caam",
3139 .cra_blocksize = NULL_BLOCK_SIZE,
3141 .setkey = aead_setkey,
3142 .setauthsize = aead_setauthsize,
3143 .encrypt = aead_encrypt,
3144 .decrypt = aead_decrypt,
3145 .ivsize = NULL_IV_SIZE,
3146 .maxauthsize = SHA384_DIGEST_SIZE,
3148 .caam = {
3149 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3150 OP_ALG_AAI_HMAC_PRECOMP,
3151 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3155 .aead = {
3156 .base = {
3157 .cra_name = "authenc(hmac(sha512),"
3158 "ecb(cipher_null))",
3159 .cra_driver_name = "authenc-hmac-sha512-"
3160 "ecb-cipher_null-caam",
3161 .cra_blocksize = NULL_BLOCK_SIZE,
3163 .setkey = aead_setkey,
3164 .setauthsize = aead_setauthsize,
3165 .encrypt = aead_encrypt,
3166 .decrypt = aead_decrypt,
3167 .ivsize = NULL_IV_SIZE,
3168 .maxauthsize = SHA512_DIGEST_SIZE,
3170 .caam = {
3171 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3172 OP_ALG_AAI_HMAC_PRECOMP,
3173 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3177 .aead = {
3178 .base = {
3179 .cra_name = "authenc(hmac(md5),cbc(aes))",
3180 .cra_driver_name = "authenc-hmac-md5-"
3181 "cbc-aes-caam",
3182 .cra_blocksize = AES_BLOCK_SIZE,
3184 .setkey = aead_setkey,
3185 .setauthsize = aead_setauthsize,
3186 .encrypt = aead_encrypt,
3187 .decrypt = aead_decrypt,
3188 .ivsize = AES_BLOCK_SIZE,
3189 .maxauthsize = MD5_DIGEST_SIZE,
3191 .caam = {
3192 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3193 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3194 OP_ALG_AAI_HMAC_PRECOMP,
3195 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3199 .aead = {
3200 .base = {
3201 .cra_name = "echainiv(authenc(hmac(md5),"
3202 "cbc(aes)))",
3203 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3204 "cbc-aes-caam",
3205 .cra_blocksize = AES_BLOCK_SIZE,
3207 .setkey = aead_setkey,
3208 .setauthsize = aead_setauthsize,
3209 .encrypt = aead_encrypt,
3210 .decrypt = aead_givdecrypt,
3211 .ivsize = AES_BLOCK_SIZE,
3212 .maxauthsize = MD5_DIGEST_SIZE,
3214 .caam = {
3215 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3216 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3217 OP_ALG_AAI_HMAC_PRECOMP,
3218 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3219 .geniv = true,
3223 .aead = {
3224 .base = {
3225 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3226 .cra_driver_name = "authenc-hmac-sha1-"
3227 "cbc-aes-caam",
3228 .cra_blocksize = AES_BLOCK_SIZE,
3230 .setkey = aead_setkey,
3231 .setauthsize = aead_setauthsize,
3232 .encrypt = aead_encrypt,
3233 .decrypt = aead_decrypt,
3234 .ivsize = AES_BLOCK_SIZE,
3235 .maxauthsize = SHA1_DIGEST_SIZE,
3237 .caam = {
3238 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3239 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3240 OP_ALG_AAI_HMAC_PRECOMP,
3241 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3245 .aead = {
3246 .base = {
3247 .cra_name = "echainiv(authenc(hmac(sha1),"
3248 "cbc(aes)))",
3249 .cra_driver_name = "echainiv-authenc-"
3250 "hmac-sha1-cbc-aes-caam",
3251 .cra_blocksize = AES_BLOCK_SIZE,
3253 .setkey = aead_setkey,
3254 .setauthsize = aead_setauthsize,
3255 .encrypt = aead_encrypt,
3256 .decrypt = aead_givdecrypt,
3257 .ivsize = AES_BLOCK_SIZE,
3258 .maxauthsize = SHA1_DIGEST_SIZE,
3260 .caam = {
3261 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3262 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3263 OP_ALG_AAI_HMAC_PRECOMP,
3264 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3265 .geniv = true,
3269 .aead = {
3270 .base = {
3271 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3272 .cra_driver_name = "authenc-hmac-sha224-"
3273 "cbc-aes-caam",
3274 .cra_blocksize = AES_BLOCK_SIZE,
3276 .setkey = aead_setkey,
3277 .setauthsize = aead_setauthsize,
3278 .encrypt = aead_encrypt,
3279 .decrypt = aead_decrypt,
3280 .ivsize = AES_BLOCK_SIZE,
3281 .maxauthsize = SHA224_DIGEST_SIZE,
3283 .caam = {
3284 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3285 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3286 OP_ALG_AAI_HMAC_PRECOMP,
3287 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3291 .aead = {
3292 .base = {
3293 .cra_name = "echainiv(authenc(hmac(sha224),"
3294 "cbc(aes)))",
3295 .cra_driver_name = "echainiv-authenc-"
3296 "hmac-sha224-cbc-aes-caam",
3297 .cra_blocksize = AES_BLOCK_SIZE,
3299 .setkey = aead_setkey,
3300 .setauthsize = aead_setauthsize,
3301 .encrypt = aead_encrypt,
3302 .decrypt = aead_givdecrypt,
3303 .ivsize = AES_BLOCK_SIZE,
3304 .maxauthsize = SHA224_DIGEST_SIZE,
3306 .caam = {
3307 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3308 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3309 OP_ALG_AAI_HMAC_PRECOMP,
3310 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3311 .geniv = true,
3315 .aead = {
3316 .base = {
3317 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3318 .cra_driver_name = "authenc-hmac-sha256-"
3319 "cbc-aes-caam",
3320 .cra_blocksize = AES_BLOCK_SIZE,
3322 .setkey = aead_setkey,
3323 .setauthsize = aead_setauthsize,
3324 .encrypt = aead_encrypt,
3325 .decrypt = aead_decrypt,
3326 .ivsize = AES_BLOCK_SIZE,
3327 .maxauthsize = SHA256_DIGEST_SIZE,
3329 .caam = {
3330 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3331 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3332 OP_ALG_AAI_HMAC_PRECOMP,
3333 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3337 .aead = {
3338 .base = {
3339 .cra_name = "echainiv(authenc(hmac(sha256),"
3340 "cbc(aes)))",
3341 .cra_driver_name = "echainiv-authenc-"
3342 "hmac-sha256-cbc-aes-caam",
3343 .cra_blocksize = AES_BLOCK_SIZE,
3345 .setkey = aead_setkey,
3346 .setauthsize = aead_setauthsize,
3347 .encrypt = aead_encrypt,
3348 .decrypt = aead_givdecrypt,
3349 .ivsize = AES_BLOCK_SIZE,
3350 .maxauthsize = SHA256_DIGEST_SIZE,
3352 .caam = {
3353 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3354 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3355 OP_ALG_AAI_HMAC_PRECOMP,
3356 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3357 .geniv = true,
3361 .aead = {
3362 .base = {
3363 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3364 .cra_driver_name = "authenc-hmac-sha384-"
3365 "cbc-aes-caam",
3366 .cra_blocksize = AES_BLOCK_SIZE,
3368 .setkey = aead_setkey,
3369 .setauthsize = aead_setauthsize,
3370 .encrypt = aead_encrypt,
3371 .decrypt = aead_decrypt,
3372 .ivsize = AES_BLOCK_SIZE,
3373 .maxauthsize = SHA384_DIGEST_SIZE,
3375 .caam = {
3376 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3377 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3378 OP_ALG_AAI_HMAC_PRECOMP,
3379 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3383 .aead = {
3384 .base = {
3385 .cra_name = "echainiv(authenc(hmac(sha384),"
3386 "cbc(aes)))",
3387 .cra_driver_name = "echainiv-authenc-"
3388 "hmac-sha384-cbc-aes-caam",
3389 .cra_blocksize = AES_BLOCK_SIZE,
3391 .setkey = aead_setkey,
3392 .setauthsize = aead_setauthsize,
3393 .encrypt = aead_encrypt,
3394 .decrypt = aead_givdecrypt,
3395 .ivsize = AES_BLOCK_SIZE,
3396 .maxauthsize = SHA384_DIGEST_SIZE,
3398 .caam = {
3399 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3400 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3401 OP_ALG_AAI_HMAC_PRECOMP,
3402 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3403 .geniv = true,
3407 .aead = {
3408 .base = {
3409 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3410 .cra_driver_name = "authenc-hmac-sha512-"
3411 "cbc-aes-caam",
3412 .cra_blocksize = AES_BLOCK_SIZE,
3414 .setkey = aead_setkey,
3415 .setauthsize = aead_setauthsize,
3416 .encrypt = aead_encrypt,
3417 .decrypt = aead_decrypt,
3418 .ivsize = AES_BLOCK_SIZE,
3419 .maxauthsize = SHA512_DIGEST_SIZE,
3421 .caam = {
3422 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3423 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3424 OP_ALG_AAI_HMAC_PRECOMP,
3425 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3429 .aead = {
3430 .base = {
3431 .cra_name = "echainiv(authenc(hmac(sha512),"
3432 "cbc(aes)))",
3433 .cra_driver_name = "echainiv-authenc-"
3434 "hmac-sha512-cbc-aes-caam",
3435 .cra_blocksize = AES_BLOCK_SIZE,
3437 .setkey = aead_setkey,
3438 .setauthsize = aead_setauthsize,
3439 .encrypt = aead_encrypt,
3440 .decrypt = aead_givdecrypt,
3441 .ivsize = AES_BLOCK_SIZE,
3442 .maxauthsize = SHA512_DIGEST_SIZE,
3444 .caam = {
3445 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3446 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3447 OP_ALG_AAI_HMAC_PRECOMP,
3448 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3449 .geniv = true,
3453 .aead = {
3454 .base = {
3455 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3456 .cra_driver_name = "authenc-hmac-md5-"
3457 "cbc-des3_ede-caam",
3458 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3460 .setkey = aead_setkey,
3461 .setauthsize = aead_setauthsize,
3462 .encrypt = aead_encrypt,
3463 .decrypt = aead_decrypt,
3464 .ivsize = DES3_EDE_BLOCK_SIZE,
3465 .maxauthsize = MD5_DIGEST_SIZE,
3467 .caam = {
3468 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3469 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3470 OP_ALG_AAI_HMAC_PRECOMP,
3471 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3475 .aead = {
3476 .base = {
3477 .cra_name = "echainiv(authenc(hmac(md5),"
3478 "cbc(des3_ede)))",
3479 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3480 "cbc-des3_ede-caam",
3481 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3483 .setkey = aead_setkey,
3484 .setauthsize = aead_setauthsize,
3485 .encrypt = aead_encrypt,
3486 .decrypt = aead_givdecrypt,
3487 .ivsize = DES3_EDE_BLOCK_SIZE,
3488 .maxauthsize = MD5_DIGEST_SIZE,
3490 .caam = {
3491 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3492 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3493 OP_ALG_AAI_HMAC_PRECOMP,
3494 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3495 .geniv = true,
3499 .aead = {
3500 .base = {
3501 .cra_name = "authenc(hmac(sha1),"
3502 "cbc(des3_ede))",
3503 .cra_driver_name = "authenc-hmac-sha1-"
3504 "cbc-des3_ede-caam",
3505 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3507 .setkey = aead_setkey,
3508 .setauthsize = aead_setauthsize,
3509 .encrypt = aead_encrypt,
3510 .decrypt = aead_decrypt,
3511 .ivsize = DES3_EDE_BLOCK_SIZE,
3512 .maxauthsize = SHA1_DIGEST_SIZE,
3514 .caam = {
3515 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3516 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3517 OP_ALG_AAI_HMAC_PRECOMP,
3518 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3522 .aead = {
3523 .base = {
3524 .cra_name = "echainiv(authenc(hmac(sha1),"
3525 "cbc(des3_ede)))",
3526 .cra_driver_name = "echainiv-authenc-"
3527 "hmac-sha1-"
3528 "cbc-des3_ede-caam",
3529 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3531 .setkey = aead_setkey,
3532 .setauthsize = aead_setauthsize,
3533 .encrypt = aead_encrypt,
3534 .decrypt = aead_givdecrypt,
3535 .ivsize = DES3_EDE_BLOCK_SIZE,
3536 .maxauthsize = SHA1_DIGEST_SIZE,
3538 .caam = {
3539 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3540 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3541 OP_ALG_AAI_HMAC_PRECOMP,
3542 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3543 .geniv = true,
3547 .aead = {
3548 .base = {
3549 .cra_name = "authenc(hmac(sha224),"
3550 "cbc(des3_ede))",
3551 .cra_driver_name = "authenc-hmac-sha224-"
3552 "cbc-des3_ede-caam",
3553 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3555 .setkey = aead_setkey,
3556 .setauthsize = aead_setauthsize,
3557 .encrypt = aead_encrypt,
3558 .decrypt = aead_decrypt,
3559 .ivsize = DES3_EDE_BLOCK_SIZE,
3560 .maxauthsize = SHA224_DIGEST_SIZE,
3562 .caam = {
3563 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3564 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3565 OP_ALG_AAI_HMAC_PRECOMP,
3566 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3570 .aead = {
3571 .base = {
3572 .cra_name = "echainiv(authenc(hmac(sha224),"
3573 "cbc(des3_ede)))",
3574 .cra_driver_name = "echainiv-authenc-"
3575 "hmac-sha224-"
3576 "cbc-des3_ede-caam",
3577 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3579 .setkey = aead_setkey,
3580 .setauthsize = aead_setauthsize,
3581 .encrypt = aead_encrypt,
3582 .decrypt = aead_givdecrypt,
3583 .ivsize = DES3_EDE_BLOCK_SIZE,
3584 .maxauthsize = SHA224_DIGEST_SIZE,
3586 .caam = {
3587 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3588 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3589 OP_ALG_AAI_HMAC_PRECOMP,
3590 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3591 .geniv = true,
3595 .aead = {
3596 .base = {
3597 .cra_name = "authenc(hmac(sha256),"
3598 "cbc(des3_ede))",
3599 .cra_driver_name = "authenc-hmac-sha256-"
3600 "cbc-des3_ede-caam",
3601 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3603 .setkey = aead_setkey,
3604 .setauthsize = aead_setauthsize,
3605 .encrypt = aead_encrypt,
3606 .decrypt = aead_decrypt,
3607 .ivsize = DES3_EDE_BLOCK_SIZE,
3608 .maxauthsize = SHA256_DIGEST_SIZE,
3610 .caam = {
3611 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3612 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3613 OP_ALG_AAI_HMAC_PRECOMP,
3614 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3618 .aead = {
3619 .base = {
3620 .cra_name = "echainiv(authenc(hmac(sha256),"
3621 "cbc(des3_ede)))",
3622 .cra_driver_name = "echainiv-authenc-"
3623 "hmac-sha256-"
3624 "cbc-des3_ede-caam",
3625 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3627 .setkey = aead_setkey,
3628 .setauthsize = aead_setauthsize,
3629 .encrypt = aead_encrypt,
3630 .decrypt = aead_givdecrypt,
3631 .ivsize = DES3_EDE_BLOCK_SIZE,
3632 .maxauthsize = SHA256_DIGEST_SIZE,
3634 .caam = {
3635 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3636 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3637 OP_ALG_AAI_HMAC_PRECOMP,
3638 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3639 .geniv = true,
3643 .aead = {
3644 .base = {
3645 .cra_name = "authenc(hmac(sha384),"
3646 "cbc(des3_ede))",
3647 .cra_driver_name = "authenc-hmac-sha384-"
3648 "cbc-des3_ede-caam",
3649 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3651 .setkey = aead_setkey,
3652 .setauthsize = aead_setauthsize,
3653 .encrypt = aead_encrypt,
3654 .decrypt = aead_decrypt,
3655 .ivsize = DES3_EDE_BLOCK_SIZE,
3656 .maxauthsize = SHA384_DIGEST_SIZE,
3658 .caam = {
3659 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3660 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3661 OP_ALG_AAI_HMAC_PRECOMP,
3662 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3666 .aead = {
3667 .base = {
3668 .cra_name = "echainiv(authenc(hmac(sha384),"
3669 "cbc(des3_ede)))",
3670 .cra_driver_name = "echainiv-authenc-"
3671 "hmac-sha384-"
3672 "cbc-des3_ede-caam",
3673 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3675 .setkey = aead_setkey,
3676 .setauthsize = aead_setauthsize,
3677 .encrypt = aead_encrypt,
3678 .decrypt = aead_givdecrypt,
3679 .ivsize = DES3_EDE_BLOCK_SIZE,
3680 .maxauthsize = SHA384_DIGEST_SIZE,
3682 .caam = {
3683 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3684 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3685 OP_ALG_AAI_HMAC_PRECOMP,
3686 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3687 .geniv = true,
3691 .aead = {
3692 .base = {
3693 .cra_name = "authenc(hmac(sha512),"
3694 "cbc(des3_ede))",
3695 .cra_driver_name = "authenc-hmac-sha512-"
3696 "cbc-des3_ede-caam",
3697 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3699 .setkey = aead_setkey,
3700 .setauthsize = aead_setauthsize,
3701 .encrypt = aead_encrypt,
3702 .decrypt = aead_decrypt,
3703 .ivsize = DES3_EDE_BLOCK_SIZE,
3704 .maxauthsize = SHA512_DIGEST_SIZE,
3706 .caam = {
3707 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3708 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3709 OP_ALG_AAI_HMAC_PRECOMP,
3710 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3714 .aead = {
3715 .base = {
3716 .cra_name = "echainiv(authenc(hmac(sha512),"
3717 "cbc(des3_ede)))",
3718 .cra_driver_name = "echainiv-authenc-"
3719 "hmac-sha512-"
3720 "cbc-des3_ede-caam",
3721 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3723 .setkey = aead_setkey,
3724 .setauthsize = aead_setauthsize,
3725 .encrypt = aead_encrypt,
3726 .decrypt = aead_givdecrypt,
3727 .ivsize = DES3_EDE_BLOCK_SIZE,
3728 .maxauthsize = SHA512_DIGEST_SIZE,
3730 .caam = {
3731 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3732 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3733 OP_ALG_AAI_HMAC_PRECOMP,
3734 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3735 .geniv = true,
3739 .aead = {
3740 .base = {
3741 .cra_name = "authenc(hmac(md5),cbc(des))",
3742 .cra_driver_name = "authenc-hmac-md5-"
3743 "cbc-des-caam",
3744 .cra_blocksize = DES_BLOCK_SIZE,
3746 .setkey = aead_setkey,
3747 .setauthsize = aead_setauthsize,
3748 .encrypt = aead_encrypt,
3749 .decrypt = aead_decrypt,
3750 .ivsize = DES_BLOCK_SIZE,
3751 .maxauthsize = MD5_DIGEST_SIZE,
3753 .caam = {
3754 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3755 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3756 OP_ALG_AAI_HMAC_PRECOMP,
3757 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3761 .aead = {
3762 .base = {
3763 .cra_name = "echainiv(authenc(hmac(md5),"
3764 "cbc(des)))",
3765 .cra_driver_name = "echainiv-authenc-hmac-md5-"
3766 "cbc-des-caam",
3767 .cra_blocksize = DES_BLOCK_SIZE,
3769 .setkey = aead_setkey,
3770 .setauthsize = aead_setauthsize,
3771 .encrypt = aead_encrypt,
3772 .decrypt = aead_givdecrypt,
3773 .ivsize = DES_BLOCK_SIZE,
3774 .maxauthsize = MD5_DIGEST_SIZE,
3776 .caam = {
3777 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3778 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3779 OP_ALG_AAI_HMAC_PRECOMP,
3780 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3781 .geniv = true,
3785 .aead = {
3786 .base = {
3787 .cra_name = "authenc(hmac(sha1),cbc(des))",
3788 .cra_driver_name = "authenc-hmac-sha1-"
3789 "cbc-des-caam",
3790 .cra_blocksize = DES_BLOCK_SIZE,
3792 .setkey = aead_setkey,
3793 .setauthsize = aead_setauthsize,
3794 .encrypt = aead_encrypt,
3795 .decrypt = aead_decrypt,
3796 .ivsize = DES_BLOCK_SIZE,
3797 .maxauthsize = SHA1_DIGEST_SIZE,
3799 .caam = {
3800 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3801 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3802 OP_ALG_AAI_HMAC_PRECOMP,
3803 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3807 .aead = {
3808 .base = {
3809 .cra_name = "echainiv(authenc(hmac(sha1),"
3810 "cbc(des)))",
3811 .cra_driver_name = "echainiv-authenc-"
3812 "hmac-sha1-cbc-des-caam",
3813 .cra_blocksize = DES_BLOCK_SIZE,
3815 .setkey = aead_setkey,
3816 .setauthsize = aead_setauthsize,
3817 .encrypt = aead_encrypt,
3818 .decrypt = aead_givdecrypt,
3819 .ivsize = DES_BLOCK_SIZE,
3820 .maxauthsize = SHA1_DIGEST_SIZE,
3822 .caam = {
3823 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3824 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3825 OP_ALG_AAI_HMAC_PRECOMP,
3826 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3827 .geniv = true,
3831 .aead = {
3832 .base = {
3833 .cra_name = "authenc(hmac(sha224),cbc(des))",
3834 .cra_driver_name = "authenc-hmac-sha224-"
3835 "cbc-des-caam",
3836 .cra_blocksize = DES_BLOCK_SIZE,
3838 .setkey = aead_setkey,
3839 .setauthsize = aead_setauthsize,
3840 .encrypt = aead_encrypt,
3841 .decrypt = aead_decrypt,
3842 .ivsize = DES_BLOCK_SIZE,
3843 .maxauthsize = SHA224_DIGEST_SIZE,
3845 .caam = {
3846 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3847 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3848 OP_ALG_AAI_HMAC_PRECOMP,
3849 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3853 .aead = {
3854 .base = {
3855 .cra_name = "echainiv(authenc(hmac(sha224),"
3856 "cbc(des)))",
3857 .cra_driver_name = "echainiv-authenc-"
3858 "hmac-sha224-cbc-des-caam",
3859 .cra_blocksize = DES_BLOCK_SIZE,
3861 .setkey = aead_setkey,
3862 .setauthsize = aead_setauthsize,
3863 .encrypt = aead_encrypt,
3864 .decrypt = aead_givdecrypt,
3865 .ivsize = DES_BLOCK_SIZE,
3866 .maxauthsize = SHA224_DIGEST_SIZE,
3868 .caam = {
3869 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3870 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3871 OP_ALG_AAI_HMAC_PRECOMP,
3872 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3873 .geniv = true,
3877 .aead = {
3878 .base = {
3879 .cra_name = "authenc(hmac(sha256),cbc(des))",
3880 .cra_driver_name = "authenc-hmac-sha256-"
3881 "cbc-des-caam",
3882 .cra_blocksize = DES_BLOCK_SIZE,
3884 .setkey = aead_setkey,
3885 .setauthsize = aead_setauthsize,
3886 .encrypt = aead_encrypt,
3887 .decrypt = aead_decrypt,
3888 .ivsize = DES_BLOCK_SIZE,
3889 .maxauthsize = SHA256_DIGEST_SIZE,
3891 .caam = {
3892 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3893 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3894 OP_ALG_AAI_HMAC_PRECOMP,
3895 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3899 .aead = {
3900 .base = {
3901 .cra_name = "echainiv(authenc(hmac(sha256),"
3902 "cbc(des)))",
3903 .cra_driver_name = "echainiv-authenc-"
3904 "hmac-sha256-cbc-des-caam",
3905 .cra_blocksize = DES_BLOCK_SIZE,
3907 .setkey = aead_setkey,
3908 .setauthsize = aead_setauthsize,
3909 .encrypt = aead_encrypt,
3910 .decrypt = aead_givdecrypt,
3911 .ivsize = DES_BLOCK_SIZE,
3912 .maxauthsize = SHA256_DIGEST_SIZE,
3914 .caam = {
3915 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3916 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3917 OP_ALG_AAI_HMAC_PRECOMP,
3918 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3919 .geniv = true,
3923 .aead = {
3924 .base = {
3925 .cra_name = "authenc(hmac(sha384),cbc(des))",
3926 .cra_driver_name = "authenc-hmac-sha384-"
3927 "cbc-des-caam",
3928 .cra_blocksize = DES_BLOCK_SIZE,
3930 .setkey = aead_setkey,
3931 .setauthsize = aead_setauthsize,
3932 .encrypt = aead_encrypt,
3933 .decrypt = aead_decrypt,
3934 .ivsize = DES_BLOCK_SIZE,
3935 .maxauthsize = SHA384_DIGEST_SIZE,
3937 .caam = {
3938 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3939 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3940 OP_ALG_AAI_HMAC_PRECOMP,
3941 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3945 .aead = {
3946 .base = {
3947 .cra_name = "echainiv(authenc(hmac(sha384),"
3948 "cbc(des)))",
3949 .cra_driver_name = "echainiv-authenc-"
3950 "hmac-sha384-cbc-des-caam",
3951 .cra_blocksize = DES_BLOCK_SIZE,
3953 .setkey = aead_setkey,
3954 .setauthsize = aead_setauthsize,
3955 .encrypt = aead_encrypt,
3956 .decrypt = aead_givdecrypt,
3957 .ivsize = DES_BLOCK_SIZE,
3958 .maxauthsize = SHA384_DIGEST_SIZE,
3960 .caam = {
3961 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3962 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3963 OP_ALG_AAI_HMAC_PRECOMP,
3964 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3965 .geniv = true,
3969 .aead = {
3970 .base = {
3971 .cra_name = "authenc(hmac(sha512),cbc(des))",
3972 .cra_driver_name = "authenc-hmac-sha512-"
3973 "cbc-des-caam",
3974 .cra_blocksize = DES_BLOCK_SIZE,
3976 .setkey = aead_setkey,
3977 .setauthsize = aead_setauthsize,
3978 .encrypt = aead_encrypt,
3979 .decrypt = aead_decrypt,
3980 .ivsize = DES_BLOCK_SIZE,
3981 .maxauthsize = SHA512_DIGEST_SIZE,
3983 .caam = {
3984 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3985 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3986 OP_ALG_AAI_HMAC_PRECOMP,
3987 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3991 .aead = {
3992 .base = {
3993 .cra_name = "echainiv(authenc(hmac(sha512),"
3994 "cbc(des)))",
3995 .cra_driver_name = "echainiv-authenc-"
3996 "hmac-sha512-cbc-des-caam",
3997 .cra_blocksize = DES_BLOCK_SIZE,
3999 .setkey = aead_setkey,
4000 .setauthsize = aead_setauthsize,
4001 .encrypt = aead_encrypt,
4002 .decrypt = aead_givdecrypt,
4003 .ivsize = DES_BLOCK_SIZE,
4004 .maxauthsize = SHA512_DIGEST_SIZE,
4006 .caam = {
4007 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4008 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4009 OP_ALG_AAI_HMAC_PRECOMP,
4010 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4011 .geniv = true,
4015 .aead = {
4016 .base = {
4017 .cra_name = "authenc(hmac(md5),"
4018 "rfc3686(ctr(aes)))",
4019 .cra_driver_name = "authenc-hmac-md5-"
4020 "rfc3686-ctr-aes-caam",
4021 .cra_blocksize = 1,
4023 .setkey = aead_setkey,
4024 .setauthsize = aead_setauthsize,
4025 .encrypt = aead_encrypt,
4026 .decrypt = aead_decrypt,
4027 .ivsize = CTR_RFC3686_IV_SIZE,
4028 .maxauthsize = MD5_DIGEST_SIZE,
4030 .caam = {
4031 .class1_alg_type = OP_ALG_ALGSEL_AES |
4032 OP_ALG_AAI_CTR_MOD128,
4033 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4034 OP_ALG_AAI_HMAC_PRECOMP,
4035 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4036 .rfc3686 = true,
4040 .aead = {
4041 .base = {
4042 .cra_name = "seqiv(authenc("
4043 "hmac(md5),rfc3686(ctr(aes))))",
4044 .cra_driver_name = "seqiv-authenc-hmac-md5-"
4045 "rfc3686-ctr-aes-caam",
4046 .cra_blocksize = 1,
4048 .setkey = aead_setkey,
4049 .setauthsize = aead_setauthsize,
4050 .encrypt = aead_encrypt,
4051 .decrypt = aead_givdecrypt,
4052 .ivsize = CTR_RFC3686_IV_SIZE,
4053 .maxauthsize = MD5_DIGEST_SIZE,
4055 .caam = {
4056 .class1_alg_type = OP_ALG_ALGSEL_AES |
4057 OP_ALG_AAI_CTR_MOD128,
4058 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
4059 OP_ALG_AAI_HMAC_PRECOMP,
4060 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4061 .rfc3686 = true,
4062 .geniv = true,
4066 .aead = {
4067 .base = {
4068 .cra_name = "authenc(hmac(sha1),"
4069 "rfc3686(ctr(aes)))",
4070 .cra_driver_name = "authenc-hmac-sha1-"
4071 "rfc3686-ctr-aes-caam",
4072 .cra_blocksize = 1,
4074 .setkey = aead_setkey,
4075 .setauthsize = aead_setauthsize,
4076 .encrypt = aead_encrypt,
4077 .decrypt = aead_decrypt,
4078 .ivsize = CTR_RFC3686_IV_SIZE,
4079 .maxauthsize = SHA1_DIGEST_SIZE,
4081 .caam = {
4082 .class1_alg_type = OP_ALG_ALGSEL_AES |
4083 OP_ALG_AAI_CTR_MOD128,
4084 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4085 OP_ALG_AAI_HMAC_PRECOMP,
4086 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4087 .rfc3686 = true,
4091 .aead = {
4092 .base = {
4093 .cra_name = "seqiv(authenc("
4094 "hmac(sha1),rfc3686(ctr(aes))))",
4095 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
4096 "rfc3686-ctr-aes-caam",
4097 .cra_blocksize = 1,
4099 .setkey = aead_setkey,
4100 .setauthsize = aead_setauthsize,
4101 .encrypt = aead_encrypt,
4102 .decrypt = aead_givdecrypt,
4103 .ivsize = CTR_RFC3686_IV_SIZE,
4104 .maxauthsize = SHA1_DIGEST_SIZE,
4106 .caam = {
4107 .class1_alg_type = OP_ALG_ALGSEL_AES |
4108 OP_ALG_AAI_CTR_MOD128,
4109 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4110 OP_ALG_AAI_HMAC_PRECOMP,
4111 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4112 .rfc3686 = true,
4113 .geniv = true,
4117 .aead = {
4118 .base = {
4119 .cra_name = "authenc(hmac(sha224),"
4120 "rfc3686(ctr(aes)))",
4121 .cra_driver_name = "authenc-hmac-sha224-"
4122 "rfc3686-ctr-aes-caam",
4123 .cra_blocksize = 1,
4125 .setkey = aead_setkey,
4126 .setauthsize = aead_setauthsize,
4127 .encrypt = aead_encrypt,
4128 .decrypt = aead_decrypt,
4129 .ivsize = CTR_RFC3686_IV_SIZE,
4130 .maxauthsize = SHA224_DIGEST_SIZE,
4132 .caam = {
4133 .class1_alg_type = OP_ALG_ALGSEL_AES |
4134 OP_ALG_AAI_CTR_MOD128,
4135 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4136 OP_ALG_AAI_HMAC_PRECOMP,
4137 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4138 .rfc3686 = true,
4142 .aead = {
4143 .base = {
4144 .cra_name = "seqiv(authenc("
4145 "hmac(sha224),rfc3686(ctr(aes))))",
4146 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
4147 "rfc3686-ctr-aes-caam",
4148 .cra_blocksize = 1,
4150 .setkey = aead_setkey,
4151 .setauthsize = aead_setauthsize,
4152 .encrypt = aead_encrypt,
4153 .decrypt = aead_givdecrypt,
4154 .ivsize = CTR_RFC3686_IV_SIZE,
4155 .maxauthsize = SHA224_DIGEST_SIZE,
4157 .caam = {
4158 .class1_alg_type = OP_ALG_ALGSEL_AES |
4159 OP_ALG_AAI_CTR_MOD128,
4160 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4161 OP_ALG_AAI_HMAC_PRECOMP,
4162 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4163 .rfc3686 = true,
4164 .geniv = true,
4168 .aead = {
4169 .base = {
4170 .cra_name = "authenc(hmac(sha256),"
4171 "rfc3686(ctr(aes)))",
4172 .cra_driver_name = "authenc-hmac-sha256-"
4173 "rfc3686-ctr-aes-caam",
4174 .cra_blocksize = 1,
4176 .setkey = aead_setkey,
4177 .setauthsize = aead_setauthsize,
4178 .encrypt = aead_encrypt,
4179 .decrypt = aead_decrypt,
4180 .ivsize = CTR_RFC3686_IV_SIZE,
4181 .maxauthsize = SHA256_DIGEST_SIZE,
4183 .caam = {
4184 .class1_alg_type = OP_ALG_ALGSEL_AES |
4185 OP_ALG_AAI_CTR_MOD128,
4186 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4187 OP_ALG_AAI_HMAC_PRECOMP,
4188 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4189 .rfc3686 = true,
4193 .aead = {
4194 .base = {
4195 .cra_name = "seqiv(authenc(hmac(sha256),"
4196 "rfc3686(ctr(aes))))",
4197 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
4198 "rfc3686-ctr-aes-caam",
4199 .cra_blocksize = 1,
4201 .setkey = aead_setkey,
4202 .setauthsize = aead_setauthsize,
4203 .encrypt = aead_encrypt,
4204 .decrypt = aead_givdecrypt,
4205 .ivsize = CTR_RFC3686_IV_SIZE,
4206 .maxauthsize = SHA256_DIGEST_SIZE,
4208 .caam = {
4209 .class1_alg_type = OP_ALG_ALGSEL_AES |
4210 OP_ALG_AAI_CTR_MOD128,
4211 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4212 OP_ALG_AAI_HMAC_PRECOMP,
4213 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4214 .rfc3686 = true,
4215 .geniv = true,
4219 .aead = {
4220 .base = {
4221 .cra_name = "authenc(hmac(sha384),"
4222 "rfc3686(ctr(aes)))",
4223 .cra_driver_name = "authenc-hmac-sha384-"
4224 "rfc3686-ctr-aes-caam",
4225 .cra_blocksize = 1,
4227 .setkey = aead_setkey,
4228 .setauthsize = aead_setauthsize,
4229 .encrypt = aead_encrypt,
4230 .decrypt = aead_decrypt,
4231 .ivsize = CTR_RFC3686_IV_SIZE,
4232 .maxauthsize = SHA384_DIGEST_SIZE,
4234 .caam = {
4235 .class1_alg_type = OP_ALG_ALGSEL_AES |
4236 OP_ALG_AAI_CTR_MOD128,
4237 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4238 OP_ALG_AAI_HMAC_PRECOMP,
4239 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4240 .rfc3686 = true,
4244 .aead = {
4245 .base = {
4246 .cra_name = "seqiv(authenc(hmac(sha384),"
4247 "rfc3686(ctr(aes))))",
4248 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
4249 "rfc3686-ctr-aes-caam",
4250 .cra_blocksize = 1,
4252 .setkey = aead_setkey,
4253 .setauthsize = aead_setauthsize,
4254 .encrypt = aead_encrypt,
4255 .decrypt = aead_givdecrypt,
4256 .ivsize = CTR_RFC3686_IV_SIZE,
4257 .maxauthsize = SHA384_DIGEST_SIZE,
4259 .caam = {
4260 .class1_alg_type = OP_ALG_ALGSEL_AES |
4261 OP_ALG_AAI_CTR_MOD128,
4262 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4263 OP_ALG_AAI_HMAC_PRECOMP,
4264 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4265 .rfc3686 = true,
4266 .geniv = true,
4270 .aead = {
4271 .base = {
4272 .cra_name = "authenc(hmac(sha512),"
4273 "rfc3686(ctr(aes)))",
4274 .cra_driver_name = "authenc-hmac-sha512-"
4275 "rfc3686-ctr-aes-caam",
4276 .cra_blocksize = 1,
4278 .setkey = aead_setkey,
4279 .setauthsize = aead_setauthsize,
4280 .encrypt = aead_encrypt,
4281 .decrypt = aead_decrypt,
4282 .ivsize = CTR_RFC3686_IV_SIZE,
4283 .maxauthsize = SHA512_DIGEST_SIZE,
4285 .caam = {
4286 .class1_alg_type = OP_ALG_ALGSEL_AES |
4287 OP_ALG_AAI_CTR_MOD128,
4288 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4289 OP_ALG_AAI_HMAC_PRECOMP,
4290 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4291 .rfc3686 = true,
4295 .aead = {
4296 .base = {
4297 .cra_name = "seqiv(authenc(hmac(sha512),"
4298 "rfc3686(ctr(aes))))",
4299 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
4300 "rfc3686-ctr-aes-caam",
4301 .cra_blocksize = 1,
4303 .setkey = aead_setkey,
4304 .setauthsize = aead_setauthsize,
4305 .encrypt = aead_encrypt,
4306 .decrypt = aead_givdecrypt,
4307 .ivsize = CTR_RFC3686_IV_SIZE,
4308 .maxauthsize = SHA512_DIGEST_SIZE,
4310 .caam = {
4311 .class1_alg_type = OP_ALG_ALGSEL_AES |
4312 OP_ALG_AAI_CTR_MOD128,
4313 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4314 OP_ALG_AAI_HMAC_PRECOMP,
4315 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4316 .rfc3686 = true,
4317 .geniv = true,
4322 struct caam_crypto_alg {
4323 struct crypto_alg crypto_alg;
4324 struct list_head entry;
4325 struct caam_alg_entry caam;
4328 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4330 ctx->jrdev = caam_jr_alloc();
4331 if (IS_ERR(ctx->jrdev)) {
4332 pr_err("Job Ring Device allocation for transform failed\n");
4333 return PTR_ERR(ctx->jrdev);
4336 /* copy descriptor header template value */
4337 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4338 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4339 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4341 return 0;
4344 static int caam_cra_init(struct crypto_tfm *tfm)
4346 struct crypto_alg *alg = tfm->__crt_alg;
4347 struct caam_crypto_alg *caam_alg =
4348 container_of(alg, struct caam_crypto_alg, crypto_alg);
4349 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4351 return caam_init_common(ctx, &caam_alg->caam);
4354 static int caam_aead_init(struct crypto_aead *tfm)
4356 struct aead_alg *alg = crypto_aead_alg(tfm);
4357 struct caam_aead_alg *caam_alg =
4358 container_of(alg, struct caam_aead_alg, aead);
4359 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4361 return caam_init_common(ctx, &caam_alg->caam);
4364 static void caam_exit_common(struct caam_ctx *ctx)
4366 if (ctx->sh_desc_enc_dma &&
4367 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4368 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4369 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4370 if (ctx->sh_desc_dec_dma &&
4371 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4372 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4373 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4374 if (ctx->sh_desc_givenc_dma &&
4375 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4376 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4377 desc_bytes(ctx->sh_desc_givenc),
4378 DMA_TO_DEVICE);
4379 if (ctx->key_dma &&
4380 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4381 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4382 ctx->enckeylen + ctx->split_key_pad_len,
4383 DMA_TO_DEVICE);
4385 caam_jr_free(ctx->jrdev);
4388 static void caam_cra_exit(struct crypto_tfm *tfm)
4390 caam_exit_common(crypto_tfm_ctx(tfm));
4393 static void caam_aead_exit(struct crypto_aead *tfm)
4395 caam_exit_common(crypto_aead_ctx(tfm));
4398 static void __exit caam_algapi_exit(void)
4401 struct caam_crypto_alg *t_alg, *n;
4402 int i;
4404 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4405 struct caam_aead_alg *t_alg = driver_aeads + i;
4407 if (t_alg->registered)
4408 crypto_unregister_aead(&t_alg->aead);
4411 if (!alg_list.next)
4412 return;
4414 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4415 crypto_unregister_alg(&t_alg->crypto_alg);
4416 list_del(&t_alg->entry);
4417 kfree(t_alg);
4421 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4422 *template)
4424 struct caam_crypto_alg *t_alg;
4425 struct crypto_alg *alg;
4427 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4428 if (!t_alg) {
4429 pr_err("failed to allocate t_alg\n");
4430 return ERR_PTR(-ENOMEM);
4433 alg = &t_alg->crypto_alg;
4435 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4436 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4437 template->driver_name);
4438 alg->cra_module = THIS_MODULE;
4439 alg->cra_init = caam_cra_init;
4440 alg->cra_exit = caam_cra_exit;
4441 alg->cra_priority = CAAM_CRA_PRIORITY;
4442 alg->cra_blocksize = template->blocksize;
4443 alg->cra_alignmask = 0;
4444 alg->cra_ctxsize = sizeof(struct caam_ctx);
4445 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4446 template->type;
4447 switch (template->type) {
4448 case CRYPTO_ALG_TYPE_GIVCIPHER:
4449 alg->cra_type = &crypto_givcipher_type;
4450 alg->cra_ablkcipher = template->template_ablkcipher;
4451 break;
4452 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4453 alg->cra_type = &crypto_ablkcipher_type;
4454 alg->cra_ablkcipher = template->template_ablkcipher;
4455 break;
4458 t_alg->caam.class1_alg_type = template->class1_alg_type;
4459 t_alg->caam.class2_alg_type = template->class2_alg_type;
4460 t_alg->caam.alg_op = template->alg_op;
4462 return t_alg;
4465 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4467 struct aead_alg *alg = &t_alg->aead;
4469 alg->base.cra_module = THIS_MODULE;
4470 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4471 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4472 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4474 alg->init = caam_aead_init;
4475 alg->exit = caam_aead_exit;
4478 static int __init caam_algapi_init(void)
4480 struct device_node *dev_node;
4481 struct platform_device *pdev;
4482 struct device *ctrldev;
4483 struct caam_drv_private *priv;
4484 int i = 0, err = 0;
4485 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4486 unsigned int md_limit = SHA512_DIGEST_SIZE;
4487 bool registered = false;
4489 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4490 if (!dev_node) {
4491 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4492 if (!dev_node)
4493 return -ENODEV;
4496 pdev = of_find_device_by_node(dev_node);
4497 if (!pdev) {
4498 of_node_put(dev_node);
4499 return -ENODEV;
4502 ctrldev = &pdev->dev;
4503 priv = dev_get_drvdata(ctrldev);
4504 of_node_put(dev_node);
4507 * If priv is NULL, it's probably because the caam driver wasn't
4508 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4510 if (!priv)
4511 return -ENODEV;
4514 INIT_LIST_HEAD(&alg_list);
4517 * Register crypto algorithms the device supports.
4518 * First, detect presence and attributes of DES, AES, and MD blocks.
4520 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4521 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4522 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4523 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4524 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4526 /* If MD is present, limit digest size based on LP256 */
4527 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4528 md_limit = SHA256_DIGEST_SIZE;
4530 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4531 struct caam_crypto_alg *t_alg;
4532 struct caam_alg_template *alg = driver_algs + i;
4533 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4535 /* Skip DES algorithms if not supported by device */
4536 if (!des_inst &&
4537 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4538 (alg_sel == OP_ALG_ALGSEL_DES)))
4539 continue;
4541 /* Skip AES algorithms if not supported by device */
4542 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4543 continue;
4545 t_alg = caam_alg_alloc(alg);
4546 if (IS_ERR(t_alg)) {
4547 err = PTR_ERR(t_alg);
4548 pr_warn("%s alg allocation failed\n", alg->driver_name);
4549 continue;
4552 err = crypto_register_alg(&t_alg->crypto_alg);
4553 if (err) {
4554 pr_warn("%s alg registration failed\n",
4555 t_alg->crypto_alg.cra_driver_name);
4556 kfree(t_alg);
4557 continue;
4560 list_add_tail(&t_alg->entry, &alg_list);
4561 registered = true;
4564 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4565 struct caam_aead_alg *t_alg = driver_aeads + i;
4566 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4567 OP_ALG_ALGSEL_MASK;
4568 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4569 OP_ALG_ALGSEL_MASK;
4570 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4572 /* Skip DES algorithms if not supported by device */
4573 if (!des_inst &&
4574 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4575 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4576 continue;
4578 /* Skip AES algorithms if not supported by device */
4579 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4580 continue;
4583 * Check support for AES algorithms not available
4584 * on LP devices.
4586 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4587 if (alg_aai == OP_ALG_AAI_GCM)
4588 continue;
4591 * Skip algorithms requiring message digests
4592 * if MD or MD size is not supported by device.
4594 if (c2_alg_sel &&
4595 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4596 continue;
4598 caam_aead_alg_init(t_alg);
4600 err = crypto_register_aead(&t_alg->aead);
4601 if (err) {
4602 pr_warn("%s alg registration failed\n",
4603 t_alg->aead.base.cra_driver_name);
4604 continue;
4607 t_alg->registered = true;
4608 registered = true;
4611 if (registered)
4612 pr_info("caam algorithms registered in /proc/crypto\n");
4614 return err;
4617 module_init(caam_algapi_init);
4618 module_exit(caam_algapi_exit);
4620 MODULE_LICENSE("GPL");
4621 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4622 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");