2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
23 * | JobDesc #3 |------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
37 * | ShareDesc Pointer |
44 * ---------------------
51 #include "desc_constr.h"
54 #include "sg_sw_sec4.h"
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH 16
67 /* length of descriptors text */
68 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
73 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
74 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
76 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
79 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
81 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
84 /* for print_hex_dumps with line references */
85 #define debug(format, arg...) printk(format, arg)
87 #define debug(format, arg...)
90 /* Set DK bit in class 1 operation if shared */
91 static inline void append_dec_op1(u32
*desc
, u32 type
)
93 u32
*jump_cmd
, *uncond_jump_cmd
;
95 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
| JUMP_COND_SHRD
);
96 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
98 uncond_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
99 set_jump_tgt_here(desc
, jump_cmd
);
100 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
101 OP_ALG_DECRYPT
| OP_ALG_AAI_DK
);
102 set_jump_tgt_here(desc
, uncond_jump_cmd
);
106 * Wait for completion of class 1 key loading before allowing
109 static inline void append_dec_shr_done(u32
*desc
)
113 jump_cmd
= append_jump(desc
, JUMP_CLASS_CLASS1
| JUMP_TEST_ALL
);
114 set_jump_tgt_here(desc
, jump_cmd
);
115 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
119 * For aead functions, read payload and write payload,
120 * both of which are specified in req->src and req->dst
122 static inline void aead_append_src_dst(u32
*desc
, u32 msg_type
)
124 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_BOTH
|
125 KEY_VLF
| msg_type
| FIFOLD_TYPE_LASTBOTH
);
126 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
);
130 * For aead encrypt and decrypt, read iv for both classes
132 static inline void aead_append_ld_iv(u32
*desc
, int ivsize
)
134 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
135 LDST_CLASS_1_CCB
| ivsize
);
136 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_CLASS2INFIFO
| ivsize
);
140 * For ablkcipher encrypt and decrypt, read from req->src and
143 static inline void ablkcipher_append_src_dst(u32
*desc
)
145 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
146 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
147 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
|
148 KEY_VLF
| FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
149 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
);
153 * If all data, including src (with assoc and iv) or dst (with iv only) are
156 #define GIV_SRC_CONTIG 1
157 #define GIV_DST_CONTIG (1 << 1)
160 * per-session context
163 struct device
*jrdev
;
164 u32 sh_desc_enc
[DESC_MAX_USED_LEN
];
165 u32 sh_desc_dec
[DESC_MAX_USED_LEN
];
166 u32 sh_desc_givenc
[DESC_MAX_USED_LEN
];
167 dma_addr_t sh_desc_enc_dma
;
168 dma_addr_t sh_desc_dec_dma
;
169 dma_addr_t sh_desc_givenc_dma
;
173 u8 key
[CAAM_MAX_KEY_SIZE
];
175 unsigned int enckeylen
;
176 unsigned int split_key_len
;
177 unsigned int split_key_pad_len
;
178 unsigned int authsize
;
181 static void append_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
184 if (keys_fit_inline
) {
185 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
186 ctx
->split_key_len
, CLASS_2
|
187 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
188 append_key_as_imm(desc
, (void *)ctx
->key
+
189 ctx
->split_key_pad_len
, ctx
->enckeylen
,
190 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
192 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
193 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
194 append_key(desc
, ctx
->key_dma
+ ctx
->split_key_pad_len
,
195 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
199 static void init_sh_desc_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
204 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
206 /* Skip if already shared */
207 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
210 append_key_aead(desc
, ctx
, keys_fit_inline
);
212 set_jump_tgt_here(desc
, key_jump_cmd
);
214 /* Propagate errors from shared to job descriptor */
215 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
218 static int aead_set_sh_desc(struct crypto_aead
*aead
)
220 struct aead_tfm
*tfm
= &aead
->base
.crt_aead
;
221 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
222 struct device
*jrdev
= ctx
->jrdev
;
223 bool keys_fit_inline
= false;
224 u32
*key_jump_cmd
, *jump_cmd
;
228 if (!ctx
->enckeylen
|| !ctx
->authsize
)
232 * Job Descriptor and Shared Descriptors
233 * must all fit into the 64-word Descriptor h/w Buffer
235 if (DESC_AEAD_ENC_LEN
+ DESC_JOB_IO_LEN
+
236 ctx
->split_key_pad_len
+ ctx
->enckeylen
<=
238 keys_fit_inline
= true;
240 /* aead_encrypt shared descriptor */
241 desc
= ctx
->sh_desc_enc
;
243 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
);
245 /* Class 2 operation */
246 append_operation(desc
, ctx
->class2_alg_type
|
247 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
249 /* cryptlen = seqoutlen - authsize */
250 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
252 /* assoclen + cryptlen = seqinlen - ivsize */
253 append_math_sub_imm_u32(desc
, REG2
, SEQINLEN
, IMM
, tfm
->ivsize
);
255 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
256 append_math_sub(desc
, VARSEQINLEN
, REG2
, REG3
, CAAM_CMD_SZ
);
258 /* read assoc before reading payload */
259 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
261 aead_append_ld_iv(desc
, tfm
->ivsize
);
263 /* Class 1 operation */
264 append_operation(desc
, ctx
->class1_alg_type
|
265 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
267 /* Read and write cryptlen bytes */
268 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
269 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
270 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
273 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
274 LDST_SRCDST_BYTE_CONTEXT
);
276 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
279 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
280 dev_err(jrdev
, "unable to map shared descriptor\n");
284 print_hex_dump(KERN_ERR
, "aead enc shdesc@"__stringify(__LINE__
)": ",
285 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
286 desc_bytes(desc
), 1);
290 * Job Descriptor and Shared Descriptors
291 * must all fit into the 64-word Descriptor h/w Buffer
293 if (DESC_AEAD_DEC_LEN
+ DESC_JOB_IO_LEN
+
294 ctx
->split_key_pad_len
+ ctx
->enckeylen
<=
296 keys_fit_inline
= true;
298 desc
= ctx
->sh_desc_dec
;
300 /* aead_decrypt shared descriptor */
301 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
303 /* Skip if already shared */
304 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
307 append_key_aead(desc
, ctx
, keys_fit_inline
);
309 /* Only propagate error immediately if shared */
310 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
311 set_jump_tgt_here(desc
, key_jump_cmd
);
312 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
313 set_jump_tgt_here(desc
, jump_cmd
);
315 /* Class 2 operation */
316 append_operation(desc
, ctx
->class2_alg_type
|
317 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
319 /* assoclen + cryptlen = seqinlen - ivsize */
320 append_math_sub_imm_u32(desc
, REG3
, SEQINLEN
, IMM
,
321 ctx
->authsize
+ tfm
->ivsize
)
322 /* assoclen = (assoclen + cryptlen) - cryptlen */
323 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
324 append_math_sub(desc
, VARSEQINLEN
, REG3
, REG2
, CAAM_CMD_SZ
);
326 /* read assoc before reading payload */
327 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
330 aead_append_ld_iv(desc
, tfm
->ivsize
);
332 append_dec_op1(desc
, ctx
->class1_alg_type
);
334 /* Read and write cryptlen bytes */
335 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
336 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
337 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
);
340 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS2
|
341 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_ICV
);
342 append_dec_shr_done(desc
);
344 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
347 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
348 dev_err(jrdev
, "unable to map shared descriptor\n");
352 print_hex_dump(KERN_ERR
, "aead dec shdesc@"__stringify(__LINE__
)": ",
353 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
354 desc_bytes(desc
), 1);
358 * Job Descriptor and Shared Descriptors
359 * must all fit into the 64-word Descriptor h/w Buffer
361 if (DESC_AEAD_GIVENC_LEN
+ DESC_JOB_IO_LEN
+
362 ctx
->split_key_pad_len
+ ctx
->enckeylen
<=
364 keys_fit_inline
= true;
366 /* aead_givencrypt shared descriptor */
367 desc
= ctx
->sh_desc_givenc
;
369 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
);
372 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
373 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
374 NFIFOENTRY_PTYPE_RND
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
375 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
376 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
377 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
378 append_move(desc
, MOVE_SRC_INFIFO
|
379 MOVE_DEST_CLASS1CTX
| (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
380 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
382 /* Copy IV to class 1 context */
383 append_move(desc
, MOVE_SRC_CLASS1CTX
|
384 MOVE_DEST_OUTFIFO
| (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
386 /* Return to encryption */
387 append_operation(desc
, ctx
->class2_alg_type
|
388 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
390 /* ivsize + cryptlen = seqoutlen - authsize */
391 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
393 /* assoclen = seqinlen - (ivsize + cryptlen) */
394 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
396 /* read assoc before reading payload */
397 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
400 /* Copy iv from class 1 ctx to class 2 fifo*/
401 moveiv
= NFIFOENTRY_STYPE_OFIFO
| NFIFOENTRY_DEST_CLASS2
|
402 NFIFOENTRY_DTYPE_MSG
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
403 append_load_imm_u32(desc
, moveiv
, LDST_CLASS_IND_CCB
|
404 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
405 append_load_imm_u32(desc
, tfm
->ivsize
, LDST_CLASS_2_CCB
|
406 LDST_SRCDST_WORD_DATASZ_REG
| LDST_IMM
);
408 /* Class 1 operation */
409 append_operation(desc
, ctx
->class1_alg_type
|
410 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
412 /* Will write ivsize + cryptlen */
413 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
415 /* Not need to reload iv */
416 append_seq_fifo_load(desc
, tfm
->ivsize
,
419 /* Will read cryptlen */
420 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
421 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
424 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
425 LDST_SRCDST_BYTE_CONTEXT
);
427 ctx
->sh_desc_givenc_dma
= dma_map_single(jrdev
, desc
,
430 if (dma_mapping_error(jrdev
, ctx
->sh_desc_givenc_dma
)) {
431 dev_err(jrdev
, "unable to map shared descriptor\n");
435 print_hex_dump(KERN_ERR
, "aead givenc shdesc@"__stringify(__LINE__
)": ",
436 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
437 desc_bytes(desc
), 1);
443 static int aead_setauthsize(struct crypto_aead
*authenc
,
444 unsigned int authsize
)
446 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
448 ctx
->authsize
= authsize
;
449 aead_set_sh_desc(authenc
);
454 static u32
gen_split_aead_key(struct caam_ctx
*ctx
, const u8
*key_in
,
457 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
458 ctx
->split_key_pad_len
, key_in
, authkeylen
,
462 static int aead_setkey(struct crypto_aead
*aead
,
463 const u8
*key
, unsigned int keylen
)
465 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
466 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
467 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
468 struct device
*jrdev
= ctx
->jrdev
;
469 struct rtattr
*rta
= (void *)key
;
470 struct crypto_authenc_key_param
*param
;
471 unsigned int authkeylen
;
472 unsigned int enckeylen
;
475 param
= RTA_DATA(rta
);
476 enckeylen
= be32_to_cpu(param
->enckeylen
);
478 key
+= RTA_ALIGN(rta
->rta_len
);
479 keylen
-= RTA_ALIGN(rta
->rta_len
);
481 if (keylen
< enckeylen
)
484 authkeylen
= keylen
- enckeylen
;
486 if (keylen
> CAAM_MAX_KEY_SIZE
)
489 /* Pick class 2 key length from algorithm submask */
490 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
491 OP_ALG_ALGSEL_SHIFT
] * 2;
492 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
495 printk(KERN_ERR
"keylen %d enckeylen %d authkeylen %d\n",
496 keylen
, enckeylen
, authkeylen
);
497 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
498 ctx
->split_key_len
, ctx
->split_key_pad_len
);
499 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
500 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
503 ret
= gen_split_aead_key(ctx
, key
, authkeylen
);
508 /* postpend encryption key to auth split key */
509 memcpy(ctx
->key
+ ctx
->split_key_pad_len
, key
+ authkeylen
, enckeylen
);
511 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
+
512 enckeylen
, DMA_TO_DEVICE
);
513 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
514 dev_err(jrdev
, "unable to map key i/o memory\n");
518 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
519 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
520 ctx
->split_key_pad_len
+ enckeylen
, 1);
523 ctx
->enckeylen
= enckeylen
;
525 ret
= aead_set_sh_desc(aead
);
527 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
+
528 enckeylen
, DMA_TO_DEVICE
);
533 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
537 static int ablkcipher_setkey(struct crypto_ablkcipher
*ablkcipher
,
538 const u8
*key
, unsigned int keylen
)
540 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
541 struct ablkcipher_tfm
*tfm
= &ablkcipher
->base
.crt_ablkcipher
;
542 struct device
*jrdev
= ctx
->jrdev
;
544 u32
*key_jump_cmd
, *jump_cmd
;
548 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
549 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
552 memcpy(ctx
->key
, key
, keylen
);
553 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, keylen
,
555 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
556 dev_err(jrdev
, "unable to map key i/o memory\n");
559 ctx
->enckeylen
= keylen
;
561 /* ablkcipher_encrypt shared descriptor */
562 desc
= ctx
->sh_desc_enc
;
563 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
564 /* Skip if already shared */
565 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
568 /* Load class1 key only */
569 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
570 ctx
->enckeylen
, CLASS_1
|
573 set_jump_tgt_here(desc
, key_jump_cmd
);
575 /* Propagate errors from shared to job descriptor */
576 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
579 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
580 LDST_CLASS_1_CCB
| tfm
->ivsize
);
583 append_operation(desc
, ctx
->class1_alg_type
|
584 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
586 /* Perform operation */
587 ablkcipher_append_src_dst(desc
);
589 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
592 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
593 dev_err(jrdev
, "unable to map shared descriptor\n");
597 print_hex_dump(KERN_ERR
,
598 "ablkcipher enc shdesc@"__stringify(__LINE__
)": ",
599 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
600 desc_bytes(desc
), 1);
602 /* ablkcipher_decrypt shared descriptor */
603 desc
= ctx
->sh_desc_dec
;
605 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
606 /* Skip if already shared */
607 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
610 /* Load class1 key only */
611 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
612 ctx
->enckeylen
, CLASS_1
|
615 /* For aead, only propagate error immediately if shared */
616 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
617 set_jump_tgt_here(desc
, key_jump_cmd
);
618 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
619 set_jump_tgt_here(desc
, jump_cmd
);
622 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
623 LDST_CLASS_1_CCB
| tfm
->ivsize
);
625 /* Choose operation */
626 append_dec_op1(desc
, ctx
->class1_alg_type
);
628 /* Perform operation */
629 ablkcipher_append_src_dst(desc
);
631 /* Wait for key to load before allowing propagating error */
632 append_dec_shr_done(desc
);
634 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
637 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
638 dev_err(jrdev
, "unable to map shared descriptor\n");
643 print_hex_dump(KERN_ERR
,
644 "ablkcipher dec shdesc@"__stringify(__LINE__
)": ",
645 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
646 desc_bytes(desc
), 1);
653 * aead_edesc - s/w-extended aead descriptor
654 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
655 * @assoc_chained: if source is chained
656 * @src_nents: number of segments in input scatterlist
657 * @src_chained: if source is chained
658 * @dst_nents: number of segments in output scatterlist
659 * @dst_chained: if destination is chained
660 * @iv_dma: dma address of iv for checking continuity and link table
661 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
662 * @sec4_sg_bytes: length of dma mapped sec4_sg space
663 * @sec4_sg_dma: bus physical mapped address of h/w link table
664 * @hw_desc: the h/w job descriptor followed by any referenced link tables
675 dma_addr_t sec4_sg_dma
;
676 struct sec4_sg_entry
*sec4_sg
;
681 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
682 * @src_nents: number of segments in input scatterlist
683 * @src_chained: if source is chained
684 * @dst_nents: number of segments in output scatterlist
685 * @dst_chained: if destination is chained
686 * @iv_dma: dma address of iv for checking continuity and link table
687 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
688 * @sec4_sg_bytes: length of dma mapped sec4_sg space
689 * @sec4_sg_dma: bus physical mapped address of h/w link table
690 * @hw_desc: the h/w job descriptor followed by any referenced link tables
692 struct ablkcipher_edesc
{
699 dma_addr_t sec4_sg_dma
;
700 struct sec4_sg_entry
*sec4_sg
;
704 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
705 struct scatterlist
*dst
, int src_nents
,
706 bool src_chained
, int dst_nents
, bool dst_chained
,
707 dma_addr_t iv_dma
, int ivsize
, dma_addr_t sec4_sg_dma
,
711 dma_unmap_sg_chained(dev
, src
, src_nents
? : 1, DMA_TO_DEVICE
,
713 dma_unmap_sg_chained(dev
, dst
, dst_nents
? : 1, DMA_FROM_DEVICE
,
716 dma_unmap_sg_chained(dev
, src
, src_nents
? : 1,
717 DMA_BIDIRECTIONAL
, src_chained
);
721 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
723 dma_unmap_single(dev
, sec4_sg_dma
, sec4_sg_bytes
,
727 static void aead_unmap(struct device
*dev
,
728 struct aead_edesc
*edesc
,
729 struct aead_request
*req
)
731 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
732 int ivsize
= crypto_aead_ivsize(aead
);
734 dma_unmap_sg_chained(dev
, req
->assoc
, edesc
->assoc_nents
,
735 DMA_TO_DEVICE
, edesc
->assoc_chained
);
737 caam_unmap(dev
, req
->src
, req
->dst
,
738 edesc
->src_nents
, edesc
->src_chained
, edesc
->dst_nents
,
739 edesc
->dst_chained
, edesc
->iv_dma
, ivsize
,
740 edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
);
743 static void ablkcipher_unmap(struct device
*dev
,
744 struct ablkcipher_edesc
*edesc
,
745 struct ablkcipher_request
*req
)
747 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
748 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
750 caam_unmap(dev
, req
->src
, req
->dst
,
751 edesc
->src_nents
, edesc
->src_chained
, edesc
->dst_nents
,
752 edesc
->dst_chained
, edesc
->iv_dma
, ivsize
,
753 edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
);
756 static void aead_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
759 struct aead_request
*req
= context
;
760 struct aead_edesc
*edesc
;
762 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
763 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
764 int ivsize
= crypto_aead_ivsize(aead
);
766 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
769 edesc
= (struct aead_edesc
*)((char *)desc
-
770 offsetof(struct aead_edesc
, hw_desc
));
773 char tmp
[CAAM_ERROR_STR_MAX
];
775 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
778 aead_unmap(jrdev
, edesc
, req
);
781 print_hex_dump(KERN_ERR
, "assoc @"__stringify(__LINE__
)": ",
782 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
784 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
785 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
) - ivsize
,
786 edesc
->src_nents
? 100 : ivsize
, 1);
787 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
788 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
789 edesc
->src_nents
? 100 : req
->cryptlen
+
790 ctx
->authsize
+ 4, 1);
795 aead_request_complete(req
, err
);
798 static void aead_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
801 struct aead_request
*req
= context
;
802 struct aead_edesc
*edesc
;
804 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
805 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
806 int ivsize
= crypto_aead_ivsize(aead
);
808 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
811 edesc
= (struct aead_edesc
*)((char *)desc
-
812 offsetof(struct aead_edesc
, hw_desc
));
815 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
816 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
818 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
819 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->dst
),
824 char tmp
[CAAM_ERROR_STR_MAX
];
826 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
829 aead_unmap(jrdev
, edesc
, req
);
832 * verify hw auth check passed else return -EBADMSG
834 if ((err
& JRSTA_CCBERR_ERRID_MASK
) == JRSTA_CCBERR_ERRID_ICVCHK
)
838 print_hex_dump(KERN_ERR
, "iphdrout@"__stringify(__LINE__
)": ",
839 DUMP_PREFIX_ADDRESS
, 16, 4,
840 ((char *)sg_virt(req
->assoc
) - sizeof(struct iphdr
)),
841 sizeof(struct iphdr
) + req
->assoclen
+
842 ((req
->cryptlen
> 1500) ? 1500 : req
->cryptlen
) +
843 ctx
->authsize
+ 36, 1);
844 if (!err
&& edesc
->sec4_sg_bytes
) {
845 struct scatterlist
*sg
= sg_last(req
->src
, edesc
->src_nents
);
846 print_hex_dump(KERN_ERR
, "sglastout@"__stringify(__LINE__
)": ",
847 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(sg
),
848 sg
->length
+ ctx
->authsize
+ 16, 1);
854 aead_request_complete(req
, err
);
857 static void ablkcipher_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
860 struct ablkcipher_request
*req
= context
;
861 struct ablkcipher_edesc
*edesc
;
863 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
864 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
866 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
869 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
870 offsetof(struct ablkcipher_edesc
, hw_desc
));
873 char tmp
[CAAM_ERROR_STR_MAX
];
875 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
879 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
880 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
881 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
882 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
883 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
884 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1);
887 ablkcipher_unmap(jrdev
, edesc
, req
);
890 ablkcipher_request_complete(req
, err
);
893 static void ablkcipher_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
896 struct ablkcipher_request
*req
= context
;
897 struct ablkcipher_edesc
*edesc
;
899 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
900 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
902 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
905 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
906 offsetof(struct ablkcipher_edesc
, hw_desc
));
908 char tmp
[CAAM_ERROR_STR_MAX
];
910 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
914 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
915 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
917 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
918 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
919 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1);
922 ablkcipher_unmap(jrdev
, edesc
, req
);
925 ablkcipher_request_complete(req
, err
);
929 * Fill in aead job descriptor
931 static void init_aead_job(u32
*sh_desc
, dma_addr_t ptr
,
932 struct aead_edesc
*edesc
,
933 struct aead_request
*req
,
934 bool all_contig
, bool encrypt
)
936 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
937 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
938 int ivsize
= crypto_aead_ivsize(aead
);
939 int authsize
= ctx
->authsize
;
940 u32
*desc
= edesc
->hw_desc
;
941 u32 out_options
= 0, in_options
;
942 dma_addr_t dst_dma
, src_dma
;
943 int len
, sec4_sg_index
= 0;
946 debug("assoclen %d cryptlen %d authsize %d\n",
947 req
->assoclen
, req
->cryptlen
, authsize
);
948 print_hex_dump(KERN_ERR
, "assoc @"__stringify(__LINE__
)": ",
949 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
951 print_hex_dump(KERN_ERR
, "presciv@"__stringify(__LINE__
)": ",
952 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
953 edesc
->src_nents
? 100 : ivsize
, 1);
954 print_hex_dump(KERN_ERR
, "src @"__stringify(__LINE__
)": ",
955 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
956 edesc
->src_nents
? 100 : req
->cryptlen
, 1);
957 print_hex_dump(KERN_ERR
, "shrdesc@"__stringify(__LINE__
)": ",
958 DUMP_PREFIX_ADDRESS
, 16, 4, sh_desc
,
959 desc_bytes(sh_desc
), 1);
962 len
= desc_len(sh_desc
);
963 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
966 src_dma
= sg_dma_address(req
->assoc
);
969 src_dma
= edesc
->sec4_sg_dma
;
970 sec4_sg_index
+= (edesc
->assoc_nents
? : 1) + 1 +
971 (edesc
->src_nents
? : 1);
972 in_options
= LDST_SGF
;
975 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+
976 req
->cryptlen
- authsize
, in_options
);
978 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+
979 req
->cryptlen
, in_options
);
981 if (likely(req
->src
== req
->dst
)) {
983 dst_dma
= sg_dma_address(req
->src
);
985 dst_dma
= src_dma
+ sizeof(struct sec4_sg_entry
) *
986 ((edesc
->assoc_nents
? : 1) + 1);
987 out_options
= LDST_SGF
;
990 if (!edesc
->dst_nents
) {
991 dst_dma
= sg_dma_address(req
->dst
);
993 dst_dma
= edesc
->sec4_sg_dma
+
995 sizeof(struct sec4_sg_entry
);
996 out_options
= LDST_SGF
;
1000 append_seq_out_ptr(desc
, dst_dma
, req
->cryptlen
, out_options
);
1002 append_seq_out_ptr(desc
, dst_dma
, req
->cryptlen
- authsize
,
1007 * Fill in aead givencrypt job descriptor
1009 static void init_aead_giv_job(u32
*sh_desc
, dma_addr_t ptr
,
1010 struct aead_edesc
*edesc
,
1011 struct aead_request
*req
,
1014 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1015 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1016 int ivsize
= crypto_aead_ivsize(aead
);
1017 int authsize
= ctx
->authsize
;
1018 u32
*desc
= edesc
->hw_desc
;
1019 u32 out_options
= 0, in_options
;
1020 dma_addr_t dst_dma
, src_dma
;
1021 int len
, sec4_sg_index
= 0;
1024 debug("assoclen %d cryptlen %d authsize %d\n",
1025 req
->assoclen
, req
->cryptlen
, authsize
);
1026 print_hex_dump(KERN_ERR
, "assoc @"__stringify(__LINE__
)": ",
1027 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
1029 print_hex_dump(KERN_ERR
, "presciv@"__stringify(__LINE__
)": ",
1030 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
, ivsize
, 1);
1031 print_hex_dump(KERN_ERR
, "src @"__stringify(__LINE__
)": ",
1032 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1033 edesc
->src_nents
> 1 ? 100 : req
->cryptlen
, 1);
1034 print_hex_dump(KERN_ERR
, "shrdesc@"__stringify(__LINE__
)": ",
1035 DUMP_PREFIX_ADDRESS
, 16, 4, sh_desc
,
1036 desc_bytes(sh_desc
), 1);
1039 len
= desc_len(sh_desc
);
1040 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1042 if (contig
& GIV_SRC_CONTIG
) {
1043 src_dma
= sg_dma_address(req
->assoc
);
1046 src_dma
= edesc
->sec4_sg_dma
;
1047 sec4_sg_index
+= edesc
->assoc_nents
+ 1 + edesc
->src_nents
;
1048 in_options
= LDST_SGF
;
1050 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+
1051 req
->cryptlen
- authsize
, in_options
);
1053 if (contig
& GIV_DST_CONTIG
) {
1054 dst_dma
= edesc
->iv_dma
;
1056 if (likely(req
->src
== req
->dst
)) {
1057 dst_dma
= src_dma
+ sizeof(struct sec4_sg_entry
) *
1059 out_options
= LDST_SGF
;
1061 dst_dma
= edesc
->sec4_sg_dma
+
1063 sizeof(struct sec4_sg_entry
);
1064 out_options
= LDST_SGF
;
1068 append_seq_out_ptr(desc
, dst_dma
, ivsize
+ req
->cryptlen
, out_options
);
1072 * Fill in ablkcipher job descriptor
1074 static void init_ablkcipher_job(u32
*sh_desc
, dma_addr_t ptr
,
1075 struct ablkcipher_edesc
*edesc
,
1076 struct ablkcipher_request
*req
,
1079 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1080 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
1081 u32
*desc
= edesc
->hw_desc
;
1082 u32 out_options
= 0, in_options
;
1083 dma_addr_t dst_dma
, src_dma
;
1084 int len
, sec4_sg_index
= 0;
1087 print_hex_dump(KERN_ERR
, "presciv@"__stringify(__LINE__
)": ",
1088 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
1090 print_hex_dump(KERN_ERR
, "src @"__stringify(__LINE__
)": ",
1091 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1092 edesc
->src_nents
? 100 : req
->nbytes
, 1);
1095 len
= desc_len(sh_desc
);
1096 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1099 src_dma
= edesc
->iv_dma
;
1102 src_dma
= edesc
->sec4_sg_dma
;
1103 sec4_sg_index
+= (iv_contig
? 0 : 1) + edesc
->src_nents
;
1104 in_options
= LDST_SGF
;
1106 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
+ ivsize
, in_options
);
1108 if (likely(req
->src
== req
->dst
)) {
1109 if (!edesc
->src_nents
&& iv_contig
) {
1110 dst_dma
= sg_dma_address(req
->src
);
1112 dst_dma
= edesc
->sec4_sg_dma
+
1113 sizeof(struct sec4_sg_entry
);
1114 out_options
= LDST_SGF
;
1117 if (!edesc
->dst_nents
) {
1118 dst_dma
= sg_dma_address(req
->dst
);
1120 dst_dma
= edesc
->sec4_sg_dma
+
1121 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
1122 out_options
= LDST_SGF
;
1125 append_seq_out_ptr(desc
, dst_dma
, req
->nbytes
, out_options
);
1129 * allocate and map the aead extended descriptor
1131 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
1132 int desc_bytes
, bool *all_contig_ptr
)
1134 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1135 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1136 struct device
*jrdev
= ctx
->jrdev
;
1137 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1138 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1139 int assoc_nents
, src_nents
, dst_nents
= 0;
1140 struct aead_edesc
*edesc
;
1141 dma_addr_t iv_dma
= 0;
1143 bool all_contig
= true;
1144 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
1145 int ivsize
= crypto_aead_ivsize(aead
);
1146 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
1148 assoc_nents
= sg_count(req
->assoc
, req
->assoclen
, &assoc_chained
);
1149 src_nents
= sg_count(req
->src
, req
->cryptlen
, &src_chained
);
1151 if (unlikely(req
->dst
!= req
->src
))
1152 dst_nents
= sg_count(req
->dst
, req
->cryptlen
, &dst_chained
);
1154 sgc
= dma_map_sg_chained(jrdev
, req
->assoc
, assoc_nents
? : 1,
1155 DMA_TO_DEVICE
, assoc_chained
);
1156 if (likely(req
->src
== req
->dst
)) {
1157 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1158 DMA_BIDIRECTIONAL
, src_chained
);
1160 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1161 DMA_TO_DEVICE
, src_chained
);
1162 sgc
= dma_map_sg_chained(jrdev
, req
->dst
, dst_nents
? : 1,
1163 DMA_FROM_DEVICE
, dst_chained
);
1166 /* Check if data are contiguous */
1167 iv_dma
= dma_map_single(jrdev
, req
->iv
, ivsize
, DMA_TO_DEVICE
);
1168 if (assoc_nents
|| sg_dma_address(req
->assoc
) + req
->assoclen
!=
1169 iv_dma
|| src_nents
|| iv_dma
+ ivsize
!=
1170 sg_dma_address(req
->src
)) {
1172 assoc_nents
= assoc_nents
? : 1;
1173 src_nents
= src_nents
? : 1;
1174 sec4_sg_len
= assoc_nents
+ 1 + src_nents
;
1176 sec4_sg_len
+= dst_nents
;
1178 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
1180 /* allocate space for base edesc and hw desc commands, link tables */
1181 edesc
= kmalloc(sizeof(struct aead_edesc
) + desc_bytes
+
1182 sec4_sg_bytes
, GFP_DMA
| flags
);
1184 dev_err(jrdev
, "could not allocate extended descriptor\n");
1185 return ERR_PTR(-ENOMEM
);
1188 edesc
->assoc_nents
= assoc_nents
;
1189 edesc
->assoc_chained
= assoc_chained
;
1190 edesc
->src_nents
= src_nents
;
1191 edesc
->src_chained
= src_chained
;
1192 edesc
->dst_nents
= dst_nents
;
1193 edesc
->dst_chained
= dst_chained
;
1194 edesc
->iv_dma
= iv_dma
;
1195 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1196 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct aead_edesc
) +
1198 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1199 sec4_sg_bytes
, DMA_TO_DEVICE
);
1200 *all_contig_ptr
= all_contig
;
1204 sg_to_sec4_sg(req
->assoc
,
1205 (assoc_nents
? : 1),
1208 sec4_sg_index
+= assoc_nents
? : 1;
1209 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
1212 sg_to_sec4_sg_last(req
->src
,
1216 sec4_sg_index
+= src_nents
? : 1;
1219 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
1220 edesc
->sec4_sg
+ sec4_sg_index
, 0);
1226 static int aead_encrypt(struct aead_request
*req
)
1228 struct aead_edesc
*edesc
;
1229 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1230 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1231 struct device
*jrdev
= ctx
->jrdev
;
1236 req
->cryptlen
+= ctx
->authsize
;
1238 /* allocate extended descriptor */
1239 edesc
= aead_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1240 CAAM_CMD_SZ
, &all_contig
);
1242 return PTR_ERR(edesc
);
1244 /* Create and submit job descriptor */
1245 init_aead_job(ctx
->sh_desc_enc
, ctx
->sh_desc_enc_dma
, edesc
, req
,
1248 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
1249 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1250 desc_bytes(edesc
->hw_desc
), 1);
1253 desc
= edesc
->hw_desc
;
1254 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
1258 aead_unmap(jrdev
, edesc
, req
);
1265 static int aead_decrypt(struct aead_request
*req
)
1267 struct aead_edesc
*edesc
;
1268 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1269 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1270 struct device
*jrdev
= ctx
->jrdev
;
1275 /* allocate extended descriptor */
1276 edesc
= aead_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1277 CAAM_CMD_SZ
, &all_contig
);
1279 return PTR_ERR(edesc
);
1282 print_hex_dump(KERN_ERR
, "dec src@"__stringify(__LINE__
)": ",
1283 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1287 /* Create and submit job descriptor*/
1288 init_aead_job(ctx
->sh_desc_dec
,
1289 ctx
->sh_desc_dec_dma
, edesc
, req
, all_contig
, false);
1291 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
1292 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1293 desc_bytes(edesc
->hw_desc
), 1);
1296 desc
= edesc
->hw_desc
;
1297 ret
= caam_jr_enqueue(jrdev
, desc
, aead_decrypt_done
, req
);
1301 aead_unmap(jrdev
, edesc
, req
);
1309 * allocate and map the aead extended descriptor for aead givencrypt
1311 static struct aead_edesc
*aead_giv_edesc_alloc(struct aead_givcrypt_request
1312 *greq
, int desc_bytes
,
1315 struct aead_request
*req
= &greq
->areq
;
1316 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1317 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1318 struct device
*jrdev
= ctx
->jrdev
;
1319 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1320 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1321 int assoc_nents
, src_nents
, dst_nents
= 0;
1322 struct aead_edesc
*edesc
;
1323 dma_addr_t iv_dma
= 0;
1325 u32 contig
= GIV_SRC_CONTIG
| GIV_DST_CONTIG
;
1326 int ivsize
= crypto_aead_ivsize(aead
);
1327 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
1328 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
1330 assoc_nents
= sg_count(req
->assoc
, req
->assoclen
, &assoc_chained
);
1331 src_nents
= sg_count(req
->src
, req
->cryptlen
, &src_chained
);
1333 if (unlikely(req
->dst
!= req
->src
))
1334 dst_nents
= sg_count(req
->dst
, req
->cryptlen
, &dst_chained
);
1336 sgc
= dma_map_sg_chained(jrdev
, req
->assoc
, assoc_nents
? : 1,
1337 DMA_TO_DEVICE
, assoc_chained
);
1338 if (likely(req
->src
== req
->dst
)) {
1339 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1340 DMA_BIDIRECTIONAL
, src_chained
);
1342 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1343 DMA_TO_DEVICE
, src_chained
);
1344 sgc
= dma_map_sg_chained(jrdev
, req
->dst
, dst_nents
? : 1,
1345 DMA_FROM_DEVICE
, dst_chained
);
1348 /* Check if data are contiguous */
1349 iv_dma
= dma_map_single(jrdev
, greq
->giv
, ivsize
, DMA_TO_DEVICE
);
1350 if (assoc_nents
|| sg_dma_address(req
->assoc
) + req
->assoclen
!=
1351 iv_dma
|| src_nents
|| iv_dma
+ ivsize
!= sg_dma_address(req
->src
))
1352 contig
&= ~GIV_SRC_CONTIG
;
1353 if (dst_nents
|| iv_dma
+ ivsize
!= sg_dma_address(req
->dst
))
1354 contig
&= ~GIV_DST_CONTIG
;
1355 if (unlikely(req
->src
!= req
->dst
)) {
1356 dst_nents
= dst_nents
? : 1;
1359 if (!(contig
& GIV_SRC_CONTIG
)) {
1360 assoc_nents
= assoc_nents
? : 1;
1361 src_nents
= src_nents
? : 1;
1362 sec4_sg_len
+= assoc_nents
+ 1 + src_nents
;
1363 if (likely(req
->src
== req
->dst
))
1364 contig
&= ~GIV_DST_CONTIG
;
1366 sec4_sg_len
+= dst_nents
;
1368 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
1370 /* allocate space for base edesc and hw desc commands, link tables */
1371 edesc
= kmalloc(sizeof(struct aead_edesc
) + desc_bytes
+
1372 sec4_sg_bytes
, GFP_DMA
| flags
);
1374 dev_err(jrdev
, "could not allocate extended descriptor\n");
1375 return ERR_PTR(-ENOMEM
);
1378 edesc
->assoc_nents
= assoc_nents
;
1379 edesc
->assoc_chained
= assoc_chained
;
1380 edesc
->src_nents
= src_nents
;
1381 edesc
->src_chained
= src_chained
;
1382 edesc
->dst_nents
= dst_nents
;
1383 edesc
->dst_chained
= dst_chained
;
1384 edesc
->iv_dma
= iv_dma
;
1385 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1386 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct aead_edesc
) +
1388 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1389 sec4_sg_bytes
, DMA_TO_DEVICE
);
1390 *contig_ptr
= contig
;
1393 if (!(contig
& GIV_SRC_CONTIG
)) {
1394 sg_to_sec4_sg(req
->assoc
, assoc_nents
,
1397 sec4_sg_index
+= assoc_nents
;
1398 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
1401 sg_to_sec4_sg_last(req
->src
, src_nents
,
1404 sec4_sg_index
+= src_nents
;
1406 if (unlikely(req
->src
!= req
->dst
&& !(contig
& GIV_DST_CONTIG
))) {
1407 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
1410 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
1411 edesc
->sec4_sg
+ sec4_sg_index
, 0);
1417 static int aead_givencrypt(struct aead_givcrypt_request
*areq
)
1419 struct aead_request
*req
= &areq
->areq
;
1420 struct aead_edesc
*edesc
;
1421 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1422 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1423 struct device
*jrdev
= ctx
->jrdev
;
1428 req
->cryptlen
+= ctx
->authsize
;
1430 /* allocate extended descriptor */
1431 edesc
= aead_giv_edesc_alloc(areq
, DESC_JOB_IO_LEN
*
1432 CAAM_CMD_SZ
, &contig
);
1435 return PTR_ERR(edesc
);
1438 print_hex_dump(KERN_ERR
, "giv src@"__stringify(__LINE__
)": ",
1439 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1443 /* Create and submit job descriptor*/
1444 init_aead_giv_job(ctx
->sh_desc_givenc
,
1445 ctx
->sh_desc_givenc_dma
, edesc
, req
, contig
);
1447 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
1448 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1449 desc_bytes(edesc
->hw_desc
), 1);
1452 desc
= edesc
->hw_desc
;
1453 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
1457 aead_unmap(jrdev
, edesc
, req
);
1465 * allocate and map the ablkcipher extended descriptor for ablkcipher
1467 static struct ablkcipher_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
1468 *req
, int desc_bytes
,
1469 bool *iv_contig_out
)
1471 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1472 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1473 struct device
*jrdev
= ctx
->jrdev
;
1474 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1475 CRYPTO_TFM_REQ_MAY_SLEEP
)) ?
1476 GFP_KERNEL
: GFP_ATOMIC
;
1477 int src_nents
, dst_nents
= 0, sec4_sg_bytes
;
1478 struct ablkcipher_edesc
*edesc
;
1479 dma_addr_t iv_dma
= 0;
1480 bool iv_contig
= false;
1482 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
1483 bool src_chained
= false, dst_chained
= false;
1486 src_nents
= sg_count(req
->src
, req
->nbytes
, &src_chained
);
1488 if (req
->dst
!= req
->src
)
1489 dst_nents
= sg_count(req
->dst
, req
->nbytes
, &dst_chained
);
1491 if (likely(req
->src
== req
->dst
)) {
1492 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1493 DMA_BIDIRECTIONAL
, src_chained
);
1495 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1496 DMA_TO_DEVICE
, src_chained
);
1497 sgc
= dma_map_sg_chained(jrdev
, req
->dst
, dst_nents
? : 1,
1498 DMA_FROM_DEVICE
, dst_chained
);
1502 * Check if iv can be contiguous with source and destination.
1503 * If so, include it. If not, create scatterlist.
1505 iv_dma
= dma_map_single(jrdev
, req
->info
, ivsize
, DMA_TO_DEVICE
);
1506 if (!src_nents
&& iv_dma
+ ivsize
== sg_dma_address(req
->src
))
1509 src_nents
= src_nents
? : 1;
1510 sec4_sg_bytes
= ((iv_contig
? 0 : 1) + src_nents
+ dst_nents
) *
1511 sizeof(struct sec4_sg_entry
);
1513 /* allocate space for base edesc and hw desc commands, link tables */
1514 edesc
= kmalloc(sizeof(struct ablkcipher_edesc
) + desc_bytes
+
1515 sec4_sg_bytes
, GFP_DMA
| flags
);
1517 dev_err(jrdev
, "could not allocate extended descriptor\n");
1518 return ERR_PTR(-ENOMEM
);
1521 edesc
->src_nents
= src_nents
;
1522 edesc
->src_chained
= src_chained
;
1523 edesc
->dst_nents
= dst_nents
;
1524 edesc
->dst_chained
= dst_chained
;
1525 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1526 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ablkcipher_edesc
) +
1531 dma_to_sec4_sg_one(edesc
->sec4_sg
, iv_dma
, ivsize
, 0);
1532 sg_to_sec4_sg_last(req
->src
, src_nents
,
1533 edesc
->sec4_sg
+ 1, 0);
1534 sec4_sg_index
+= 1 + src_nents
;
1538 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
1539 edesc
->sec4_sg
+ sec4_sg_index
, 0);
1542 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1543 sec4_sg_bytes
, DMA_TO_DEVICE
);
1544 edesc
->iv_dma
= iv_dma
;
1547 print_hex_dump(KERN_ERR
, "ablkcipher sec4_sg@"__stringify(__LINE__
)": ",
1548 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->sec4_sg
,
1552 *iv_contig_out
= iv_contig
;
1556 static int ablkcipher_encrypt(struct ablkcipher_request
*req
)
1558 struct ablkcipher_edesc
*edesc
;
1559 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1560 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1561 struct device
*jrdev
= ctx
->jrdev
;
1566 /* allocate extended descriptor */
1567 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1568 CAAM_CMD_SZ
, &iv_contig
);
1570 return PTR_ERR(edesc
);
1572 /* Create and submit job descriptor*/
1573 init_ablkcipher_job(ctx
->sh_desc_enc
,
1574 ctx
->sh_desc_enc_dma
, edesc
, req
, iv_contig
);
1576 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"__stringify(__LINE__
)": ",
1577 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1578 desc_bytes(edesc
->hw_desc
), 1);
1580 desc
= edesc
->hw_desc
;
1581 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_encrypt_done
, req
);
1586 ablkcipher_unmap(jrdev
, edesc
, req
);
1593 static int ablkcipher_decrypt(struct ablkcipher_request
*req
)
1595 struct ablkcipher_edesc
*edesc
;
1596 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1597 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1598 struct device
*jrdev
= ctx
->jrdev
;
1603 /* allocate extended descriptor */
1604 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1605 CAAM_CMD_SZ
, &iv_contig
);
1607 return PTR_ERR(edesc
);
1609 /* Create and submit job descriptor*/
1610 init_ablkcipher_job(ctx
->sh_desc_dec
,
1611 ctx
->sh_desc_dec_dma
, edesc
, req
, iv_contig
);
1612 desc
= edesc
->hw_desc
;
1614 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"__stringify(__LINE__
)": ",
1615 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1616 desc_bytes(edesc
->hw_desc
), 1);
1619 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_decrypt_done
, req
);
1623 ablkcipher_unmap(jrdev
, edesc
, req
);
1630 #define template_aead template_u.aead
1631 #define template_ablkcipher template_u.ablkcipher
1632 struct caam_alg_template
{
1633 char name
[CRYPTO_MAX_ALG_NAME
];
1634 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1635 unsigned int blocksize
;
1638 struct ablkcipher_alg ablkcipher
;
1639 struct aead_alg aead
;
1640 struct blkcipher_alg blkcipher
;
1641 struct cipher_alg cipher
;
1642 struct compress_alg compress
;
1645 u32 class1_alg_type
;
1646 u32 class2_alg_type
;
1650 static struct caam_alg_template driver_algs
[] = {
1651 /* single-pass ipsec_esp descriptor */
1653 .name
= "authenc(hmac(md5),cbc(aes))",
1654 .driver_name
= "authenc-hmac-md5-cbc-aes-caam",
1655 .blocksize
= AES_BLOCK_SIZE
,
1656 .type
= CRYPTO_ALG_TYPE_AEAD
,
1658 .setkey
= aead_setkey
,
1659 .setauthsize
= aead_setauthsize
,
1660 .encrypt
= aead_encrypt
,
1661 .decrypt
= aead_decrypt
,
1662 .givencrypt
= aead_givencrypt
,
1663 .geniv
= "<built-in>",
1664 .ivsize
= AES_BLOCK_SIZE
,
1665 .maxauthsize
= MD5_DIGEST_SIZE
,
1667 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1668 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
1669 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1672 .name
= "authenc(hmac(sha1),cbc(aes))",
1673 .driver_name
= "authenc-hmac-sha1-cbc-aes-caam",
1674 .blocksize
= AES_BLOCK_SIZE
,
1675 .type
= CRYPTO_ALG_TYPE_AEAD
,
1677 .setkey
= aead_setkey
,
1678 .setauthsize
= aead_setauthsize
,
1679 .encrypt
= aead_encrypt
,
1680 .decrypt
= aead_decrypt
,
1681 .givencrypt
= aead_givencrypt
,
1682 .geniv
= "<built-in>",
1683 .ivsize
= AES_BLOCK_SIZE
,
1684 .maxauthsize
= SHA1_DIGEST_SIZE
,
1686 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1687 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
1688 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1691 .name
= "authenc(hmac(sha224),cbc(aes))",
1692 .driver_name
= "authenc-hmac-sha224-cbc-aes-caam",
1693 .blocksize
= AES_BLOCK_SIZE
,
1694 .type
= CRYPTO_ALG_TYPE_AEAD
,
1696 .setkey
= aead_setkey
,
1697 .setauthsize
= aead_setauthsize
,
1698 .encrypt
= aead_encrypt
,
1699 .decrypt
= aead_decrypt
,
1700 .givencrypt
= aead_givencrypt
,
1701 .geniv
= "<built-in>",
1702 .ivsize
= AES_BLOCK_SIZE
,
1703 .maxauthsize
= SHA224_DIGEST_SIZE
,
1705 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1706 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1707 OP_ALG_AAI_HMAC_PRECOMP
,
1708 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1711 .name
= "authenc(hmac(sha256),cbc(aes))",
1712 .driver_name
= "authenc-hmac-sha256-cbc-aes-caam",
1713 .blocksize
= AES_BLOCK_SIZE
,
1714 .type
= CRYPTO_ALG_TYPE_AEAD
,
1716 .setkey
= aead_setkey
,
1717 .setauthsize
= aead_setauthsize
,
1718 .encrypt
= aead_encrypt
,
1719 .decrypt
= aead_decrypt
,
1720 .givencrypt
= aead_givencrypt
,
1721 .geniv
= "<built-in>",
1722 .ivsize
= AES_BLOCK_SIZE
,
1723 .maxauthsize
= SHA256_DIGEST_SIZE
,
1725 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1726 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1727 OP_ALG_AAI_HMAC_PRECOMP
,
1728 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1731 .name
= "authenc(hmac(sha384),cbc(aes))",
1732 .driver_name
= "authenc-hmac-sha384-cbc-aes-caam",
1733 .blocksize
= AES_BLOCK_SIZE
,
1734 .type
= CRYPTO_ALG_TYPE_AEAD
,
1736 .setkey
= aead_setkey
,
1737 .setauthsize
= aead_setauthsize
,
1738 .encrypt
= aead_encrypt
,
1739 .decrypt
= aead_decrypt
,
1740 .givencrypt
= aead_givencrypt
,
1741 .geniv
= "<built-in>",
1742 .ivsize
= AES_BLOCK_SIZE
,
1743 .maxauthsize
= SHA384_DIGEST_SIZE
,
1745 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1746 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1747 OP_ALG_AAI_HMAC_PRECOMP
,
1748 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1752 .name
= "authenc(hmac(sha512),cbc(aes))",
1753 .driver_name
= "authenc-hmac-sha512-cbc-aes-caam",
1754 .blocksize
= AES_BLOCK_SIZE
,
1755 .type
= CRYPTO_ALG_TYPE_AEAD
,
1757 .setkey
= aead_setkey
,
1758 .setauthsize
= aead_setauthsize
,
1759 .encrypt
= aead_encrypt
,
1760 .decrypt
= aead_decrypt
,
1761 .givencrypt
= aead_givencrypt
,
1762 .geniv
= "<built-in>",
1763 .ivsize
= AES_BLOCK_SIZE
,
1764 .maxauthsize
= SHA512_DIGEST_SIZE
,
1766 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1767 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1768 OP_ALG_AAI_HMAC_PRECOMP
,
1769 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1772 .name
= "authenc(hmac(md5),cbc(des3_ede))",
1773 .driver_name
= "authenc-hmac-md5-cbc-des3_ede-caam",
1774 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1775 .type
= CRYPTO_ALG_TYPE_AEAD
,
1777 .setkey
= aead_setkey
,
1778 .setauthsize
= aead_setauthsize
,
1779 .encrypt
= aead_encrypt
,
1780 .decrypt
= aead_decrypt
,
1781 .givencrypt
= aead_givencrypt
,
1782 .geniv
= "<built-in>",
1783 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1784 .maxauthsize
= MD5_DIGEST_SIZE
,
1786 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1787 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
1788 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1791 .name
= "authenc(hmac(sha1),cbc(des3_ede))",
1792 .driver_name
= "authenc-hmac-sha1-cbc-des3_ede-caam",
1793 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1794 .type
= CRYPTO_ALG_TYPE_AEAD
,
1796 .setkey
= aead_setkey
,
1797 .setauthsize
= aead_setauthsize
,
1798 .encrypt
= aead_encrypt
,
1799 .decrypt
= aead_decrypt
,
1800 .givencrypt
= aead_givencrypt
,
1801 .geniv
= "<built-in>",
1802 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1803 .maxauthsize
= SHA1_DIGEST_SIZE
,
1805 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1806 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
1807 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1810 .name
= "authenc(hmac(sha224),cbc(des3_ede))",
1811 .driver_name
= "authenc-hmac-sha224-cbc-des3_ede-caam",
1812 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1813 .type
= CRYPTO_ALG_TYPE_AEAD
,
1815 .setkey
= aead_setkey
,
1816 .setauthsize
= aead_setauthsize
,
1817 .encrypt
= aead_encrypt
,
1818 .decrypt
= aead_decrypt
,
1819 .givencrypt
= aead_givencrypt
,
1820 .geniv
= "<built-in>",
1821 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1822 .maxauthsize
= SHA224_DIGEST_SIZE
,
1824 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1825 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1826 OP_ALG_AAI_HMAC_PRECOMP
,
1827 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1830 .name
= "authenc(hmac(sha256),cbc(des3_ede))",
1831 .driver_name
= "authenc-hmac-sha256-cbc-des3_ede-caam",
1832 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1833 .type
= CRYPTO_ALG_TYPE_AEAD
,
1835 .setkey
= aead_setkey
,
1836 .setauthsize
= aead_setauthsize
,
1837 .encrypt
= aead_encrypt
,
1838 .decrypt
= aead_decrypt
,
1839 .givencrypt
= aead_givencrypt
,
1840 .geniv
= "<built-in>",
1841 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1842 .maxauthsize
= SHA256_DIGEST_SIZE
,
1844 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1845 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1846 OP_ALG_AAI_HMAC_PRECOMP
,
1847 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1850 .name
= "authenc(hmac(sha384),cbc(des3_ede))",
1851 .driver_name
= "authenc-hmac-sha384-cbc-des3_ede-caam",
1852 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1853 .type
= CRYPTO_ALG_TYPE_AEAD
,
1855 .setkey
= aead_setkey
,
1856 .setauthsize
= aead_setauthsize
,
1857 .encrypt
= aead_encrypt
,
1858 .decrypt
= aead_decrypt
,
1859 .givencrypt
= aead_givencrypt
,
1860 .geniv
= "<built-in>",
1861 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1862 .maxauthsize
= SHA384_DIGEST_SIZE
,
1864 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1865 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1866 OP_ALG_AAI_HMAC_PRECOMP
,
1867 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1870 .name
= "authenc(hmac(sha512),cbc(des3_ede))",
1871 .driver_name
= "authenc-hmac-sha512-cbc-des3_ede-caam",
1872 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1873 .type
= CRYPTO_ALG_TYPE_AEAD
,
1875 .setkey
= aead_setkey
,
1876 .setauthsize
= aead_setauthsize
,
1877 .encrypt
= aead_encrypt
,
1878 .decrypt
= aead_decrypt
,
1879 .givencrypt
= aead_givencrypt
,
1880 .geniv
= "<built-in>",
1881 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1882 .maxauthsize
= SHA512_DIGEST_SIZE
,
1884 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1885 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1886 OP_ALG_AAI_HMAC_PRECOMP
,
1887 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1890 .name
= "authenc(hmac(md5),cbc(des))",
1891 .driver_name
= "authenc-hmac-md5-cbc-des-caam",
1892 .blocksize
= DES_BLOCK_SIZE
,
1893 .type
= CRYPTO_ALG_TYPE_AEAD
,
1895 .setkey
= aead_setkey
,
1896 .setauthsize
= aead_setauthsize
,
1897 .encrypt
= aead_encrypt
,
1898 .decrypt
= aead_decrypt
,
1899 .givencrypt
= aead_givencrypt
,
1900 .geniv
= "<built-in>",
1901 .ivsize
= DES_BLOCK_SIZE
,
1902 .maxauthsize
= MD5_DIGEST_SIZE
,
1904 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1905 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
1906 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1909 .name
= "authenc(hmac(sha1),cbc(des))",
1910 .driver_name
= "authenc-hmac-sha1-cbc-des-caam",
1911 .blocksize
= DES_BLOCK_SIZE
,
1912 .type
= CRYPTO_ALG_TYPE_AEAD
,
1914 .setkey
= aead_setkey
,
1915 .setauthsize
= aead_setauthsize
,
1916 .encrypt
= aead_encrypt
,
1917 .decrypt
= aead_decrypt
,
1918 .givencrypt
= aead_givencrypt
,
1919 .geniv
= "<built-in>",
1920 .ivsize
= DES_BLOCK_SIZE
,
1921 .maxauthsize
= SHA1_DIGEST_SIZE
,
1923 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1924 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
1925 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1928 .name
= "authenc(hmac(sha224),cbc(des))",
1929 .driver_name
= "authenc-hmac-sha224-cbc-des-caam",
1930 .blocksize
= DES_BLOCK_SIZE
,
1931 .type
= CRYPTO_ALG_TYPE_AEAD
,
1933 .setkey
= aead_setkey
,
1934 .setauthsize
= aead_setauthsize
,
1935 .encrypt
= aead_encrypt
,
1936 .decrypt
= aead_decrypt
,
1937 .givencrypt
= aead_givencrypt
,
1938 .geniv
= "<built-in>",
1939 .ivsize
= DES_BLOCK_SIZE
,
1940 .maxauthsize
= SHA224_DIGEST_SIZE
,
1942 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1943 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1944 OP_ALG_AAI_HMAC_PRECOMP
,
1945 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1948 .name
= "authenc(hmac(sha256),cbc(des))",
1949 .driver_name
= "authenc-hmac-sha256-cbc-des-caam",
1950 .blocksize
= DES_BLOCK_SIZE
,
1951 .type
= CRYPTO_ALG_TYPE_AEAD
,
1953 .setkey
= aead_setkey
,
1954 .setauthsize
= aead_setauthsize
,
1955 .encrypt
= aead_encrypt
,
1956 .decrypt
= aead_decrypt
,
1957 .givencrypt
= aead_givencrypt
,
1958 .geniv
= "<built-in>",
1959 .ivsize
= DES_BLOCK_SIZE
,
1960 .maxauthsize
= SHA256_DIGEST_SIZE
,
1962 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1963 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1964 OP_ALG_AAI_HMAC_PRECOMP
,
1965 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1968 .name
= "authenc(hmac(sha384),cbc(des))",
1969 .driver_name
= "authenc-hmac-sha384-cbc-des-caam",
1970 .blocksize
= DES_BLOCK_SIZE
,
1971 .type
= CRYPTO_ALG_TYPE_AEAD
,
1973 .setkey
= aead_setkey
,
1974 .setauthsize
= aead_setauthsize
,
1975 .encrypt
= aead_encrypt
,
1976 .decrypt
= aead_decrypt
,
1977 .givencrypt
= aead_givencrypt
,
1978 .geniv
= "<built-in>",
1979 .ivsize
= DES_BLOCK_SIZE
,
1980 .maxauthsize
= SHA384_DIGEST_SIZE
,
1982 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1983 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1984 OP_ALG_AAI_HMAC_PRECOMP
,
1985 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1988 .name
= "authenc(hmac(sha512),cbc(des))",
1989 .driver_name
= "authenc-hmac-sha512-cbc-des-caam",
1990 .blocksize
= DES_BLOCK_SIZE
,
1991 .type
= CRYPTO_ALG_TYPE_AEAD
,
1993 .setkey
= aead_setkey
,
1994 .setauthsize
= aead_setauthsize
,
1995 .encrypt
= aead_encrypt
,
1996 .decrypt
= aead_decrypt
,
1997 .givencrypt
= aead_givencrypt
,
1998 .geniv
= "<built-in>",
1999 .ivsize
= DES_BLOCK_SIZE
,
2000 .maxauthsize
= SHA512_DIGEST_SIZE
,
2002 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2003 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2004 OP_ALG_AAI_HMAC_PRECOMP
,
2005 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
2007 /* ablkcipher descriptor */
2010 .driver_name
= "cbc-aes-caam",
2011 .blocksize
= AES_BLOCK_SIZE
,
2012 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2013 .template_ablkcipher
= {
2014 .setkey
= ablkcipher_setkey
,
2015 .encrypt
= ablkcipher_encrypt
,
2016 .decrypt
= ablkcipher_decrypt
,
2018 .min_keysize
= AES_MIN_KEY_SIZE
,
2019 .max_keysize
= AES_MAX_KEY_SIZE
,
2020 .ivsize
= AES_BLOCK_SIZE
,
2022 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2025 .name
= "cbc(des3_ede)",
2026 .driver_name
= "cbc-3des-caam",
2027 .blocksize
= DES3_EDE_BLOCK_SIZE
,
2028 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2029 .template_ablkcipher
= {
2030 .setkey
= ablkcipher_setkey
,
2031 .encrypt
= ablkcipher_encrypt
,
2032 .decrypt
= ablkcipher_decrypt
,
2034 .min_keysize
= DES3_EDE_KEY_SIZE
,
2035 .max_keysize
= DES3_EDE_KEY_SIZE
,
2036 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2038 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2042 .driver_name
= "cbc-des-caam",
2043 .blocksize
= DES_BLOCK_SIZE
,
2044 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2045 .template_ablkcipher
= {
2046 .setkey
= ablkcipher_setkey
,
2047 .encrypt
= ablkcipher_encrypt
,
2048 .decrypt
= ablkcipher_decrypt
,
2050 .min_keysize
= DES_KEY_SIZE
,
2051 .max_keysize
= DES_KEY_SIZE
,
2052 .ivsize
= DES_BLOCK_SIZE
,
2054 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2058 struct caam_crypto_alg
{
2059 struct list_head entry
;
2060 struct device
*ctrldev
;
2061 int class1_alg_type
;
2062 int class2_alg_type
;
2064 struct crypto_alg crypto_alg
;
2067 static int caam_cra_init(struct crypto_tfm
*tfm
)
2069 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2070 struct caam_crypto_alg
*caam_alg
=
2071 container_of(alg
, struct caam_crypto_alg
, crypto_alg
);
2072 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2073 struct caam_drv_private
*priv
= dev_get_drvdata(caam_alg
->ctrldev
);
2074 int tgt_jr
= atomic_inc_return(&priv
->tfm_count
);
2077 * distribute tfms across job rings to ensure in-order
2078 * crypto request processing per tfm
2080 ctx
->jrdev
= priv
->jrdev
[(tgt_jr
/ 2) % priv
->total_jobrs
];
2082 /* copy descriptor header template value */
2083 ctx
->class1_alg_type
= OP_TYPE_CLASS1_ALG
| caam_alg
->class1_alg_type
;
2084 ctx
->class2_alg_type
= OP_TYPE_CLASS2_ALG
| caam_alg
->class2_alg_type
;
2085 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_alg
->alg_op
;
2090 static void caam_cra_exit(struct crypto_tfm
*tfm
)
2092 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2094 if (ctx
->sh_desc_enc_dma
&&
2095 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_enc_dma
))
2096 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_enc_dma
,
2097 desc_bytes(ctx
->sh_desc_enc
), DMA_TO_DEVICE
);
2098 if (ctx
->sh_desc_dec_dma
&&
2099 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_dec_dma
))
2100 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_dec_dma
,
2101 desc_bytes(ctx
->sh_desc_dec
), DMA_TO_DEVICE
);
2102 if (ctx
->sh_desc_givenc_dma
&&
2103 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
))
2104 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
,
2105 desc_bytes(ctx
->sh_desc_givenc
),
2109 static void __exit
caam_algapi_exit(void)
2112 struct device_node
*dev_node
;
2113 struct platform_device
*pdev
;
2114 struct device
*ctrldev
;
2115 struct caam_drv_private
*priv
;
2116 struct caam_crypto_alg
*t_alg
, *n
;
2118 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
2120 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
2125 pdev
= of_find_device_by_node(dev_node
);
2129 ctrldev
= &pdev
->dev
;
2130 of_node_put(dev_node
);
2131 priv
= dev_get_drvdata(ctrldev
);
2133 if (!priv
->alg_list
.next
)
2136 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2137 crypto_unregister_alg(&t_alg
->crypto_alg
);
2138 list_del(&t_alg
->entry
);
2143 static struct caam_crypto_alg
*caam_alg_alloc(struct device
*ctrldev
,
2144 struct caam_alg_template
2147 struct caam_crypto_alg
*t_alg
;
2148 struct crypto_alg
*alg
;
2150 t_alg
= kzalloc(sizeof(struct caam_crypto_alg
), GFP_KERNEL
);
2152 dev_err(ctrldev
, "failed to allocate t_alg\n");
2153 return ERR_PTR(-ENOMEM
);
2156 alg
= &t_alg
->crypto_alg
;
2158 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", template->name
);
2159 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2160 template->driver_name
);
2161 alg
->cra_module
= THIS_MODULE
;
2162 alg
->cra_init
= caam_cra_init
;
2163 alg
->cra_exit
= caam_cra_exit
;
2164 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
2165 alg
->cra_blocksize
= template->blocksize
;
2166 alg
->cra_alignmask
= 0;
2167 alg
->cra_ctxsize
= sizeof(struct caam_ctx
);
2168 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
|
2170 switch (template->type
) {
2171 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2172 alg
->cra_type
= &crypto_ablkcipher_type
;
2173 alg
->cra_ablkcipher
= template->template_ablkcipher
;
2175 case CRYPTO_ALG_TYPE_AEAD
:
2176 alg
->cra_type
= &crypto_aead_type
;
2177 alg
->cra_aead
= template->template_aead
;
2181 t_alg
->class1_alg_type
= template->class1_alg_type
;
2182 t_alg
->class2_alg_type
= template->class2_alg_type
;
2183 t_alg
->alg_op
= template->alg_op
;
2184 t_alg
->ctrldev
= ctrldev
;
2189 static int __init
caam_algapi_init(void)
2191 struct device_node
*dev_node
;
2192 struct platform_device
*pdev
;
2193 struct device
*ctrldev
;
2194 struct caam_drv_private
*priv
;
2197 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
2199 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
2204 pdev
= of_find_device_by_node(dev_node
);
2208 ctrldev
= &pdev
->dev
;
2209 priv
= dev_get_drvdata(ctrldev
);
2210 of_node_put(dev_node
);
2212 INIT_LIST_HEAD(&priv
->alg_list
);
2214 atomic_set(&priv
->tfm_count
, -1);
2216 /* register crypto algorithms the device supports */
2217 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2218 /* TODO: check if h/w supports alg */
2219 struct caam_crypto_alg
*t_alg
;
2221 t_alg
= caam_alg_alloc(ctrldev
, &driver_algs
[i
]);
2222 if (IS_ERR(t_alg
)) {
2223 err
= PTR_ERR(t_alg
);
2224 dev_warn(ctrldev
, "%s alg allocation failed\n",
2225 driver_algs
[i
].driver_name
);
2229 err
= crypto_register_alg(&t_alg
->crypto_alg
);
2231 dev_warn(ctrldev
, "%s alg registration failed\n",
2232 t_alg
->crypto_alg
.cra_driver_name
);
2235 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
2237 if (!list_empty(&priv
->alg_list
))
2238 dev_info(ctrldev
, "%s algorithms registered in /proc/crypto\n",
2239 (char *)of_get_property(dev_node
, "compatible", NULL
));
2244 module_init(caam_algapi_init
);
2245 module_exit(caam_algapi_exit
);
2247 MODULE_LICENSE("GPL");
2248 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2249 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");