2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
23 * | JobDesc #3 |------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
37 * | ShareDesc Pointer |
43 * ---------------------
50 #include "desc_constr.h"
57 #define CAAM_CRA_PRIORITY 3000
58 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62 #define CAAM_MAX_IV_LENGTH 16
64 /* length of descriptors text */
65 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
67 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
68 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
69 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
70 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
73 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
75 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
78 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
80 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
83 /* for print_hex_dumps with line references */
84 #define xstr(s) str(s)
86 #define debug(format, arg...) printk(format, arg)
88 #define debug(format, arg...)
91 /* Set DK bit in class 1 operation if shared */
92 static inline void append_dec_op1(u32
*desc
, u32 type
)
94 u32
*jump_cmd
, *uncond_jump_cmd
;
96 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
| JUMP_COND_SHRD
);
97 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
99 uncond_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
100 set_jump_tgt_here(desc
, jump_cmd
);
101 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
102 OP_ALG_DECRYPT
| OP_ALG_AAI_DK
);
103 set_jump_tgt_here(desc
, uncond_jump_cmd
);
107 * Wait for completion of class 1 key loading before allowing
110 static inline void append_dec_shr_done(u32
*desc
)
114 jump_cmd
= append_jump(desc
, JUMP_CLASS_CLASS1
| JUMP_TEST_ALL
);
115 set_jump_tgt_here(desc
, jump_cmd
);
116 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
120 * For aead functions, read payload and write payload,
121 * both of which are specified in req->src and req->dst
123 static inline void aead_append_src_dst(u32
*desc
, u32 msg_type
)
125 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_BOTH
|
126 KEY_VLF
| msg_type
| FIFOLD_TYPE_LASTBOTH
);
127 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
);
131 * For aead encrypt and decrypt, read iv for both classes
133 static inline void aead_append_ld_iv(u32
*desc
, int ivsize
)
135 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
136 LDST_CLASS_1_CCB
| ivsize
);
137 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_CLASS2INFIFO
| ivsize
);
141 * For ablkcipher encrypt and decrypt, read from req->src and
144 static inline void ablkcipher_append_src_dst(u32
*desc
)
146 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
); \
147 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
); \
148 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| \
149 KEY_VLF
| FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
); \
150 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
); \
154 * If all data, including src (with assoc and iv) or dst (with iv only) are
157 #define GIV_SRC_CONTIG 1
158 #define GIV_DST_CONTIG (1 << 1)
161 * per-session context
164 struct device
*jrdev
;
165 u32 sh_desc_enc
[DESC_MAX_USED_LEN
];
166 u32 sh_desc_dec
[DESC_MAX_USED_LEN
];
167 u32 sh_desc_givenc
[DESC_MAX_USED_LEN
];
168 dma_addr_t sh_desc_enc_dma
;
169 dma_addr_t sh_desc_dec_dma
;
170 dma_addr_t sh_desc_givenc_dma
;
174 u8 key
[CAAM_MAX_KEY_SIZE
];
176 unsigned int enckeylen
;
177 unsigned int split_key_len
;
178 unsigned int split_key_pad_len
;
179 unsigned int authsize
;
182 static void append_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
185 if (keys_fit_inline
) {
186 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
187 ctx
->split_key_len
, CLASS_2
|
188 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
189 append_key_as_imm(desc
, (void *)ctx
->key
+
190 ctx
->split_key_pad_len
, ctx
->enckeylen
,
191 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
193 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
194 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
195 append_key(desc
, ctx
->key_dma
+ ctx
->split_key_pad_len
,
196 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
200 static void init_sh_desc_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
205 init_sh_desc(desc
, HDR_SHARE_WAIT
);
207 /* Skip if already shared */
208 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
211 append_key_aead(desc
, ctx
, keys_fit_inline
);
213 set_jump_tgt_here(desc
, key_jump_cmd
);
215 /* Propagate errors from shared to job descriptor */
216 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
219 static int aead_set_sh_desc(struct crypto_aead
*aead
)
221 struct aead_tfm
*tfm
= &aead
->base
.crt_aead
;
222 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
223 struct device
*jrdev
= ctx
->jrdev
;
224 bool keys_fit_inline
= 0;
225 u32
*key_jump_cmd
, *jump_cmd
;
229 if (!ctx
->enckeylen
|| !ctx
->authsize
)
233 * Job Descriptor and Shared Descriptors
234 * must all fit into the 64-word Descriptor h/w Buffer
236 if (DESC_AEAD_ENC_LEN
+ DESC_JOB_IO_LEN
+
237 ctx
->split_key_pad_len
+ ctx
->enckeylen
<=
241 /* aead_encrypt shared descriptor */
242 desc
= ctx
->sh_desc_enc
;
244 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
);
246 /* Class 2 operation */
247 append_operation(desc
, ctx
->class2_alg_type
|
248 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
250 /* cryptlen = seqoutlen - authsize */
251 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
253 /* assoclen + cryptlen = seqinlen - ivsize */
254 append_math_sub_imm_u32(desc
, REG2
, SEQINLEN
, IMM
, tfm
->ivsize
);
256 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
257 append_math_sub(desc
, VARSEQINLEN
, REG2
, REG3
, CAAM_CMD_SZ
);
259 /* read assoc before reading payload */
260 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
262 aead_append_ld_iv(desc
, tfm
->ivsize
);
264 /* Class 1 operation */
265 append_operation(desc
, ctx
->class1_alg_type
|
266 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
268 /* Read and write cryptlen bytes */
269 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
270 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
271 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
274 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
275 LDST_SRCDST_BYTE_CONTEXT
);
277 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
280 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
281 dev_err(jrdev
, "unable to map shared descriptor\n");
285 print_hex_dump(KERN_ERR
, "aead enc shdesc@"xstr(__LINE__
)": ",
286 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
287 desc_bytes(desc
), 1);
291 * Job Descriptor and Shared Descriptors
292 * must all fit into the 64-word Descriptor h/w Buffer
294 if (DESC_AEAD_DEC_LEN
+ DESC_JOB_IO_LEN
+
295 ctx
->split_key_pad_len
+ ctx
->enckeylen
<=
299 desc
= ctx
->sh_desc_dec
;
301 /* aead_decrypt shared descriptor */
302 init_sh_desc(desc
, HDR_SHARE_WAIT
);
304 /* Skip if already shared */
305 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
308 append_key_aead(desc
, ctx
, keys_fit_inline
);
310 /* Only propagate error immediately if shared */
311 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
312 set_jump_tgt_here(desc
, key_jump_cmd
);
313 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
314 set_jump_tgt_here(desc
, jump_cmd
);
316 /* Class 2 operation */
317 append_operation(desc
, ctx
->class2_alg_type
|
318 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
320 /* assoclen + cryptlen = seqinlen - ivsize */
321 append_math_sub_imm_u32(desc
, REG3
, SEQINLEN
, IMM
,
322 ctx
->authsize
+ tfm
->ivsize
)
323 /* assoclen = (assoclen + cryptlen) - cryptlen */
324 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
325 append_math_sub(desc
, VARSEQINLEN
, REG3
, REG2
, CAAM_CMD_SZ
);
327 /* read assoc before reading payload */
328 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
331 aead_append_ld_iv(desc
, tfm
->ivsize
);
333 append_dec_op1(desc
, ctx
->class1_alg_type
);
335 /* Read and write cryptlen bytes */
336 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
337 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
338 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
);
341 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS2
|
342 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_ICV
);
343 append_dec_shr_done(desc
);
345 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
348 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
349 dev_err(jrdev
, "unable to map shared descriptor\n");
353 print_hex_dump(KERN_ERR
, "aead dec shdesc@"xstr(__LINE__
)": ",
354 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
355 desc_bytes(desc
), 1);
359 * Job Descriptor and Shared Descriptors
360 * must all fit into the 64-word Descriptor h/w Buffer
362 if (DESC_AEAD_GIVENC_LEN
+ DESC_JOB_IO_LEN
+
363 ctx
->split_key_pad_len
+ ctx
->enckeylen
<=
367 /* aead_givencrypt shared descriptor */
368 desc
= ctx
->sh_desc_givenc
;
370 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
);
373 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
374 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
375 NFIFOENTRY_PTYPE_RND
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
376 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
377 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
378 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
379 append_move(desc
, MOVE_SRC_INFIFO
|
380 MOVE_DEST_CLASS1CTX
| (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
381 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
383 /* Copy IV to class 1 context */
384 append_move(desc
, MOVE_SRC_CLASS1CTX
|
385 MOVE_DEST_OUTFIFO
| (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
387 /* Return to encryption */
388 append_operation(desc
, ctx
->class2_alg_type
|
389 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
391 /* ivsize + cryptlen = seqoutlen - authsize */
392 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
394 /* assoclen = seqinlen - (ivsize + cryptlen) */
395 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
397 /* read assoc before reading payload */
398 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
401 /* Copy iv from class 1 ctx to class 2 fifo*/
402 moveiv
= NFIFOENTRY_STYPE_OFIFO
| NFIFOENTRY_DEST_CLASS2
|
403 NFIFOENTRY_DTYPE_MSG
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
404 append_load_imm_u32(desc
, moveiv
, LDST_CLASS_IND_CCB
|
405 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
406 append_load_imm_u32(desc
, tfm
->ivsize
, LDST_CLASS_2_CCB
|
407 LDST_SRCDST_WORD_DATASZ_REG
| LDST_IMM
);
409 /* Class 1 operation */
410 append_operation(desc
, ctx
->class1_alg_type
|
411 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
413 /* Will write ivsize + cryptlen */
414 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
416 /* Not need to reload iv */
417 append_seq_fifo_load(desc
, tfm
->ivsize
,
420 /* Will read cryptlen */
421 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
422 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
425 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
426 LDST_SRCDST_BYTE_CONTEXT
);
428 ctx
->sh_desc_givenc_dma
= dma_map_single(jrdev
, desc
,
431 if (dma_mapping_error(jrdev
, ctx
->sh_desc_givenc_dma
)) {
432 dev_err(jrdev
, "unable to map shared descriptor\n");
436 print_hex_dump(KERN_ERR
, "aead givenc shdesc@"xstr(__LINE__
)": ",
437 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
438 desc_bytes(desc
), 1);
444 static int aead_setauthsize(struct crypto_aead
*authenc
,
445 unsigned int authsize
)
447 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
449 ctx
->authsize
= authsize
;
450 aead_set_sh_desc(authenc
);
455 struct split_key_result
{
456 struct completion completion
;
460 static void split_key_done(struct device
*dev
, u32
*desc
, u32 err
,
463 struct split_key_result
*res
= context
;
466 dev_err(dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
470 char tmp
[CAAM_ERROR_STR_MAX
];
472 dev_err(dev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
477 complete(&res
->completion
);
481 get a split ipad/opad key
483 Split key generation-----------------------------------------------
485 [00] 0xb0810008 jobdesc: stidx=1 share=never len=8
486 [01] 0x04000014 key: class2->keyreg len=20
488 [03] 0x84410014 operation: cls2-op sha1 hmac init dec
489 [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
490 [05] 0xa4000001 jump: class2 local all ->1 [06]
491 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
494 static u32
gen_split_key(struct caam_ctx
*ctx
, const u8
*key_in
, u32 authkeylen
)
496 struct device
*jrdev
= ctx
->jrdev
;
498 struct split_key_result result
;
499 dma_addr_t dma_addr_in
, dma_addr_out
;
502 desc
= kmalloc(CAAM_CMD_SZ
* 6 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
504 init_job_desc(desc
, 0);
506 dma_addr_in
= dma_map_single(jrdev
, (void *)key_in
, authkeylen
,
508 if (dma_mapping_error(jrdev
, dma_addr_in
)) {
509 dev_err(jrdev
, "unable to map key input memory\n");
513 append_key(desc
, dma_addr_in
, authkeylen
, CLASS_2
|
516 /* Sets MDHA up into an HMAC-INIT */
517 append_operation(desc
, ctx
->alg_op
| OP_ALG_DECRYPT
|
521 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
522 into both pads inside MDHA
524 append_fifo_load_as_imm(desc
, NULL
, 0, LDST_CLASS_2_CCB
|
525 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST2
);
528 * FIFO_STORE with the explicit split-key content store
531 dma_addr_out
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
533 if (dma_mapping_error(jrdev
, dma_addr_out
)) {
534 dev_err(jrdev
, "unable to map key output memory\n");
538 append_fifo_store(desc
, dma_addr_out
, ctx
->split_key_len
,
539 LDST_CLASS_2_CCB
| FIFOST_TYPE_SPLIT_KEK
);
542 print_hex_dump(KERN_ERR
, "ctx.key@"xstr(__LINE__
)": ",
543 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, authkeylen
, 1);
544 print_hex_dump(KERN_ERR
, "jobdesc@"xstr(__LINE__
)": ",
545 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
549 init_completion(&result
.completion
);
551 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
554 wait_for_completion_interruptible(&result
.completion
);
557 print_hex_dump(KERN_ERR
, "ctx.key@"xstr(__LINE__
)": ",
558 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
559 ctx
->split_key_pad_len
, 1);
563 dma_unmap_single(jrdev
, dma_addr_out
, ctx
->split_key_pad_len
,
565 dma_unmap_single(jrdev
, dma_addr_in
, authkeylen
, DMA_TO_DEVICE
);
572 static int aead_setkey(struct crypto_aead
*aead
,
573 const u8
*key
, unsigned int keylen
)
575 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
576 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
577 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
578 struct device
*jrdev
= ctx
->jrdev
;
579 struct rtattr
*rta
= (void *)key
;
580 struct crypto_authenc_key_param
*param
;
581 unsigned int authkeylen
;
582 unsigned int enckeylen
;
585 param
= RTA_DATA(rta
);
586 enckeylen
= be32_to_cpu(param
->enckeylen
);
588 key
+= RTA_ALIGN(rta
->rta_len
);
589 keylen
-= RTA_ALIGN(rta
->rta_len
);
591 if (keylen
< enckeylen
)
594 authkeylen
= keylen
- enckeylen
;
596 if (keylen
> CAAM_MAX_KEY_SIZE
)
599 /* Pick class 2 key length from algorithm submask */
600 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
601 OP_ALG_ALGSEL_SHIFT
] * 2;
602 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
605 printk(KERN_ERR
"keylen %d enckeylen %d authkeylen %d\n",
606 keylen
, enckeylen
, authkeylen
);
607 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
608 ctx
->split_key_len
, ctx
->split_key_pad_len
);
609 print_hex_dump(KERN_ERR
, "key in @"xstr(__LINE__
)": ",
610 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
613 ret
= gen_split_key(ctx
, key
, authkeylen
);
618 /* postpend encryption key to auth split key */
619 memcpy(ctx
->key
+ ctx
->split_key_pad_len
, key
+ authkeylen
, enckeylen
);
621 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
+
622 enckeylen
, DMA_TO_DEVICE
);
623 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
624 dev_err(jrdev
, "unable to map key i/o memory\n");
628 print_hex_dump(KERN_ERR
, "ctx.key@"xstr(__LINE__
)": ",
629 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
630 ctx
->split_key_pad_len
+ enckeylen
, 1);
633 ctx
->enckeylen
= enckeylen
;
635 ret
= aead_set_sh_desc(aead
);
637 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
+
638 enckeylen
, DMA_TO_DEVICE
);
643 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
647 static int ablkcipher_setkey(struct crypto_ablkcipher
*ablkcipher
,
648 const u8
*key
, unsigned int keylen
)
650 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
651 struct ablkcipher_tfm
*tfm
= &ablkcipher
->base
.crt_ablkcipher
;
652 struct device
*jrdev
= ctx
->jrdev
;
654 u32
*key_jump_cmd
, *jump_cmd
;
658 print_hex_dump(KERN_ERR
, "key in @"xstr(__LINE__
)": ",
659 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
662 memcpy(ctx
->key
, key
, keylen
);
663 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, keylen
,
665 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
666 dev_err(jrdev
, "unable to map key i/o memory\n");
669 ctx
->enckeylen
= keylen
;
671 /* ablkcipher_encrypt shared descriptor */
672 desc
= ctx
->sh_desc_enc
;
673 init_sh_desc(desc
, HDR_SHARE_WAIT
);
674 /* Skip if already shared */
675 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
678 /* Load class1 key only */
679 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
680 ctx
->enckeylen
, CLASS_1
|
683 set_jump_tgt_here(desc
, key_jump_cmd
);
685 /* Propagate errors from shared to job descriptor */
686 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
689 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
690 LDST_CLASS_1_CCB
| tfm
->ivsize
);
693 append_operation(desc
, ctx
->class1_alg_type
|
694 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
696 /* Perform operation */
697 ablkcipher_append_src_dst(desc
);
699 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
702 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
703 dev_err(jrdev
, "unable to map shared descriptor\n");
707 print_hex_dump(KERN_ERR
, "ablkcipher enc shdesc@"xstr(__LINE__
)": ",
708 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
709 desc_bytes(desc
), 1);
711 /* ablkcipher_decrypt shared descriptor */
712 desc
= ctx
->sh_desc_dec
;
714 init_sh_desc(desc
, HDR_SHARE_WAIT
);
715 /* Skip if already shared */
716 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
719 /* Load class1 key only */
720 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
721 ctx
->enckeylen
, CLASS_1
|
724 /* For aead, only propagate error immediately if shared */
725 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
726 set_jump_tgt_here(desc
, key_jump_cmd
);
727 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
728 set_jump_tgt_here(desc
, jump_cmd
);
731 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
732 LDST_CLASS_1_CCB
| tfm
->ivsize
);
734 /* Choose operation */
735 append_dec_op1(desc
, ctx
->class1_alg_type
);
737 /* Perform operation */
738 ablkcipher_append_src_dst(desc
);
740 /* Wait for key to load before allowing propagating error */
741 append_dec_shr_done(desc
);
743 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
746 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
747 dev_err(jrdev
, "unable to map shared descriptor\n");
752 print_hex_dump(KERN_ERR
, "ablkcipher dec shdesc@"xstr(__LINE__
)": ",
753 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
754 desc_bytes(desc
), 1);
760 struct link_tbl_entry
{
769 * aead_edesc - s/w-extended aead descriptor
770 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
771 * @src_nents: number of segments in input scatterlist
772 * @dst_nents: number of segments in output scatterlist
773 * @iv_dma: dma address of iv for checking continuity and link table
774 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
775 * @link_tbl_bytes: length of dma mapped link_tbl space
776 * @link_tbl_dma: bus physical mapped address of h/w link table
777 * @hw_desc: the h/w job descriptor followed by any referenced link tables
785 dma_addr_t link_tbl_dma
;
786 struct link_tbl_entry
*link_tbl
;
791 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
792 * @src_nents: number of segments in input scatterlist
793 * @dst_nents: number of segments in output scatterlist
794 * @iv_dma: dma address of iv for checking continuity and link table
795 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
796 * @link_tbl_bytes: length of dma mapped link_tbl space
797 * @link_tbl_dma: bus physical mapped address of h/w link table
798 * @hw_desc: the h/w job descriptor followed by any referenced link tables
800 struct ablkcipher_edesc
{
805 dma_addr_t link_tbl_dma
;
806 struct link_tbl_entry
*link_tbl
;
810 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
811 struct scatterlist
*dst
, int src_nents
, int dst_nents
,
812 dma_addr_t iv_dma
, int ivsize
, dma_addr_t link_tbl_dma
,
815 if (unlikely(dst
!= src
)) {
816 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
817 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
819 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
823 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
825 dma_unmap_single(dev
, link_tbl_dma
, link_tbl_bytes
,
829 static void aead_unmap(struct device
*dev
,
830 struct aead_edesc
*edesc
,
831 struct aead_request
*req
)
833 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
834 int ivsize
= crypto_aead_ivsize(aead
);
836 dma_unmap_sg(dev
, req
->assoc
, edesc
->assoc_nents
, DMA_TO_DEVICE
);
838 caam_unmap(dev
, req
->src
, req
->dst
,
839 edesc
->src_nents
, edesc
->dst_nents
,
840 edesc
->iv_dma
, ivsize
, edesc
->link_tbl_dma
,
841 edesc
->link_tbl_bytes
);
844 static void ablkcipher_unmap(struct device
*dev
,
845 struct ablkcipher_edesc
*edesc
,
846 struct ablkcipher_request
*req
)
848 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
849 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
851 caam_unmap(dev
, req
->src
, req
->dst
,
852 edesc
->src_nents
, edesc
->dst_nents
,
853 edesc
->iv_dma
, ivsize
, edesc
->link_tbl_dma
,
854 edesc
->link_tbl_bytes
);
857 static void aead_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
860 struct aead_request
*req
= context
;
861 struct aead_edesc
*edesc
;
863 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
864 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
865 int ivsize
= crypto_aead_ivsize(aead
);
867 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
870 edesc
= (struct aead_edesc
*)((char *)desc
-
871 offsetof(struct aead_edesc
, hw_desc
));
874 char tmp
[CAAM_ERROR_STR_MAX
];
876 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
879 aead_unmap(jrdev
, edesc
, req
);
882 print_hex_dump(KERN_ERR
, "assoc @"xstr(__LINE__
)": ",
883 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
885 print_hex_dump(KERN_ERR
, "dstiv @"xstr(__LINE__
)": ",
886 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
) - ivsize
,
887 edesc
->src_nents
? 100 : ivsize
, 1);
888 print_hex_dump(KERN_ERR
, "dst @"xstr(__LINE__
)": ",
889 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
890 edesc
->src_nents
? 100 : req
->cryptlen
+
891 ctx
->authsize
+ 4, 1);
896 aead_request_complete(req
, err
);
899 static void aead_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
902 struct aead_request
*req
= context
;
903 struct aead_edesc
*edesc
;
905 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
906 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
907 int ivsize
= crypto_aead_ivsize(aead
);
909 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
912 edesc
= (struct aead_edesc
*)((char *)desc
-
913 offsetof(struct aead_edesc
, hw_desc
));
916 print_hex_dump(KERN_ERR
, "dstiv @"xstr(__LINE__
)": ",
917 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
919 print_hex_dump(KERN_ERR
, "dst @"xstr(__LINE__
)": ",
920 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->dst
),
925 char tmp
[CAAM_ERROR_STR_MAX
];
927 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
930 aead_unmap(jrdev
, edesc
, req
);
933 * verify hw auth check passed else return -EBADMSG
935 if ((err
& JRSTA_CCBERR_ERRID_MASK
) == JRSTA_CCBERR_ERRID_ICVCHK
)
939 print_hex_dump(KERN_ERR
, "iphdrout@"xstr(__LINE__
)": ",
940 DUMP_PREFIX_ADDRESS
, 16, 4,
941 ((char *)sg_virt(req
->assoc
) - sizeof(struct iphdr
)),
942 sizeof(struct iphdr
) + req
->assoclen
+
943 ((req
->cryptlen
> 1500) ? 1500 : req
->cryptlen
) +
944 ctx
->authsize
+ 36, 1);
945 if (!err
&& edesc
->link_tbl_bytes
) {
946 struct scatterlist
*sg
= sg_last(req
->src
, edesc
->src_nents
);
947 print_hex_dump(KERN_ERR
, "sglastout@"xstr(__LINE__
)": ",
948 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(sg
),
949 sg
->length
+ ctx
->authsize
+ 16, 1);
955 aead_request_complete(req
, err
);
958 static void ablkcipher_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
961 struct ablkcipher_request
*req
= context
;
962 struct ablkcipher_edesc
*edesc
;
964 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
965 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
967 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
970 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
971 offsetof(struct ablkcipher_edesc
, hw_desc
));
974 char tmp
[CAAM_ERROR_STR_MAX
];
976 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
980 print_hex_dump(KERN_ERR
, "dstiv @"xstr(__LINE__
)": ",
981 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
982 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
983 print_hex_dump(KERN_ERR
, "dst @"xstr(__LINE__
)": ",
984 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
985 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1);
988 ablkcipher_unmap(jrdev
, edesc
, req
);
991 ablkcipher_request_complete(req
, err
);
994 static void ablkcipher_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
997 struct ablkcipher_request
*req
= context
;
998 struct ablkcipher_edesc
*edesc
;
1000 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1001 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
1003 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
1006 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
1007 offsetof(struct ablkcipher_edesc
, hw_desc
));
1009 char tmp
[CAAM_ERROR_STR_MAX
];
1011 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
1015 print_hex_dump(KERN_ERR
, "dstiv @"xstr(__LINE__
)": ",
1016 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
1018 print_hex_dump(KERN_ERR
, "dst @"xstr(__LINE__
)": ",
1019 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1020 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1);
1023 ablkcipher_unmap(jrdev
, edesc
, req
);
1026 ablkcipher_request_complete(req
, err
);
1029 static void sg_to_link_tbl_one(struct link_tbl_entry
*link_tbl_ptr
,
1030 dma_addr_t dma
, u32 len
, u32 offset
)
1032 link_tbl_ptr
->ptr
= dma
;
1033 link_tbl_ptr
->len
= len
;
1034 link_tbl_ptr
->reserved
= 0;
1035 link_tbl_ptr
->buf_pool_id
= 0;
1036 link_tbl_ptr
->offset
= offset
;
1038 print_hex_dump(KERN_ERR
, "link_tbl_ptr@"xstr(__LINE__
)": ",
1039 DUMP_PREFIX_ADDRESS
, 16, 4, link_tbl_ptr
,
1040 sizeof(struct link_tbl_entry
), 1);
1045 * convert scatterlist to h/w link table format
1046 * but does not have final bit; instead, returns last entry
1048 static struct link_tbl_entry
*sg_to_link_tbl(struct scatterlist
*sg
,
1049 int sg_count
, struct link_tbl_entry
1050 *link_tbl_ptr
, u32 offset
)
1053 sg_to_link_tbl_one(link_tbl_ptr
, sg_dma_address(sg
),
1054 sg_dma_len(sg
), offset
);
1059 return link_tbl_ptr
- 1;
1063 * convert scatterlist to h/w link table format
1064 * scatterlist must have been previously dma mapped
1066 static void sg_to_link_tbl_last(struct scatterlist
*sg
, int sg_count
,
1067 struct link_tbl_entry
*link_tbl_ptr
, u32 offset
)
1069 link_tbl_ptr
= sg_to_link_tbl(sg
, sg_count
, link_tbl_ptr
, offset
);
1070 link_tbl_ptr
->len
|= 0x40000000;
1074 * Fill in aead job descriptor
1076 static void init_aead_job(u32
*sh_desc
, dma_addr_t ptr
,
1077 struct aead_edesc
*edesc
,
1078 struct aead_request
*req
,
1079 bool all_contig
, bool encrypt
)
1081 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1082 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1083 int ivsize
= crypto_aead_ivsize(aead
);
1084 int authsize
= ctx
->authsize
;
1085 u32
*desc
= edesc
->hw_desc
;
1086 u32 out_options
= 0, in_options
;
1087 dma_addr_t dst_dma
, src_dma
;
1088 int len
, link_tbl_index
= 0;
1091 debug("assoclen %d cryptlen %d authsize %d\n",
1092 req
->assoclen
, req
->cryptlen
, authsize
);
1093 print_hex_dump(KERN_ERR
, "assoc @"xstr(__LINE__
)": ",
1094 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
1096 print_hex_dump(KERN_ERR
, "presciv@"xstr(__LINE__
)": ",
1097 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1098 edesc
->src_nents
? 100 : ivsize
, 1);
1099 print_hex_dump(KERN_ERR
, "src @"xstr(__LINE__
)": ",
1100 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1101 edesc
->src_nents
? 100 : req
->cryptlen
, 1);
1102 print_hex_dump(KERN_ERR
, "shrdesc@"xstr(__LINE__
)": ",
1103 DUMP_PREFIX_ADDRESS
, 16, 4, sh_desc
,
1104 desc_bytes(sh_desc
), 1);
1107 len
= desc_len(sh_desc
);
1108 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1111 src_dma
= sg_dma_address(req
->assoc
);
1114 src_dma
= edesc
->link_tbl_dma
;
1115 link_tbl_index
+= (edesc
->assoc_nents
? : 1) + 1 +
1116 (edesc
->src_nents
? : 1);
1117 in_options
= LDST_SGF
;
1120 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+
1121 req
->cryptlen
- authsize
, in_options
);
1123 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+
1124 req
->cryptlen
, in_options
);
1126 if (likely(req
->src
== req
->dst
)) {
1128 dst_dma
= sg_dma_address(req
->src
);
1130 dst_dma
= src_dma
+ sizeof(struct link_tbl_entry
) *
1131 ((edesc
->assoc_nents
? : 1) + 1);
1132 out_options
= LDST_SGF
;
1135 if (!edesc
->dst_nents
) {
1136 dst_dma
= sg_dma_address(req
->dst
);
1138 dst_dma
= edesc
->link_tbl_dma
+
1140 sizeof(struct link_tbl_entry
);
1141 out_options
= LDST_SGF
;
1145 append_seq_out_ptr(desc
, dst_dma
, req
->cryptlen
, out_options
);
1147 append_seq_out_ptr(desc
, dst_dma
, req
->cryptlen
- authsize
,
1152 * Fill in aead givencrypt job descriptor
1154 static void init_aead_giv_job(u32
*sh_desc
, dma_addr_t ptr
,
1155 struct aead_edesc
*edesc
,
1156 struct aead_request
*req
,
1159 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1160 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1161 int ivsize
= crypto_aead_ivsize(aead
);
1162 int authsize
= ctx
->authsize
;
1163 u32
*desc
= edesc
->hw_desc
;
1164 u32 out_options
= 0, in_options
;
1165 dma_addr_t dst_dma
, src_dma
;
1166 int len
, link_tbl_index
= 0;
1169 debug("assoclen %d cryptlen %d authsize %d\n",
1170 req
->assoclen
, req
->cryptlen
, authsize
);
1171 print_hex_dump(KERN_ERR
, "assoc @"xstr(__LINE__
)": ",
1172 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
1174 print_hex_dump(KERN_ERR
, "presciv@"xstr(__LINE__
)": ",
1175 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
, ivsize
, 1);
1176 print_hex_dump(KERN_ERR
, "src @"xstr(__LINE__
)": ",
1177 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1178 edesc
->src_nents
> 1 ? 100 : req
->cryptlen
, 1);
1179 print_hex_dump(KERN_ERR
, "shrdesc@"xstr(__LINE__
)": ",
1180 DUMP_PREFIX_ADDRESS
, 16, 4, sh_desc
,
1181 desc_bytes(sh_desc
), 1);
1184 len
= desc_len(sh_desc
);
1185 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1187 if (contig
& GIV_SRC_CONTIG
) {
1188 src_dma
= sg_dma_address(req
->assoc
);
1191 src_dma
= edesc
->link_tbl_dma
;
1192 link_tbl_index
+= edesc
->assoc_nents
+ 1 + edesc
->src_nents
;
1193 in_options
= LDST_SGF
;
1195 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+
1196 req
->cryptlen
- authsize
, in_options
);
1198 if (contig
& GIV_DST_CONTIG
) {
1199 dst_dma
= edesc
->iv_dma
;
1201 if (likely(req
->src
== req
->dst
)) {
1202 dst_dma
= src_dma
+ sizeof(struct link_tbl_entry
) *
1204 out_options
= LDST_SGF
;
1206 dst_dma
= edesc
->link_tbl_dma
+
1208 sizeof(struct link_tbl_entry
);
1209 out_options
= LDST_SGF
;
1213 append_seq_out_ptr(desc
, dst_dma
, ivsize
+ req
->cryptlen
, out_options
);
1217 * Fill in ablkcipher job descriptor
1219 static void init_ablkcipher_job(u32
*sh_desc
, dma_addr_t ptr
,
1220 struct ablkcipher_edesc
*edesc
,
1221 struct ablkcipher_request
*req
,
1224 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1225 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
1226 u32
*desc
= edesc
->hw_desc
;
1227 u32 out_options
= 0, in_options
;
1228 dma_addr_t dst_dma
, src_dma
;
1229 int len
, link_tbl_index
= 0;
1232 print_hex_dump(KERN_ERR
, "presciv@"xstr(__LINE__
)": ",
1233 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
1235 print_hex_dump(KERN_ERR
, "src @"xstr(__LINE__
)": ",
1236 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1237 edesc
->src_nents
? 100 : req
->nbytes
, 1);
1240 len
= desc_len(sh_desc
);
1241 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1244 src_dma
= edesc
->iv_dma
;
1247 src_dma
= edesc
->link_tbl_dma
;
1248 link_tbl_index
+= (iv_contig
? 0 : 1) + edesc
->src_nents
;
1249 in_options
= LDST_SGF
;
1251 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
+ ivsize
, in_options
);
1253 if (likely(req
->src
== req
->dst
)) {
1254 if (!edesc
->src_nents
&& iv_contig
) {
1255 dst_dma
= sg_dma_address(req
->src
);
1257 dst_dma
= edesc
->link_tbl_dma
+
1258 sizeof(struct link_tbl_entry
);
1259 out_options
= LDST_SGF
;
1262 if (!edesc
->dst_nents
) {
1263 dst_dma
= sg_dma_address(req
->dst
);
1265 dst_dma
= edesc
->link_tbl_dma
+
1266 link_tbl_index
* sizeof(struct link_tbl_entry
);
1267 out_options
= LDST_SGF
;
1270 append_seq_out_ptr(desc
, dst_dma
, req
->nbytes
, out_options
);
1274 * derive number of elements in scatterlist
1276 static int sg_count(struct scatterlist
*sg_list
, int nbytes
)
1278 struct scatterlist
*sg
= sg_list
;
1281 while (nbytes
> 0) {
1283 nbytes
-= sg
->length
;
1284 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
1285 BUG(); /* Not support chaining */
1286 sg
= scatterwalk_sg_next(sg
);
1289 if (likely(sg_nents
== 1))
1296 * allocate and map the aead extended descriptor
1298 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
1299 int desc_bytes
, bool *all_contig_ptr
)
1301 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1302 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1303 struct device
*jrdev
= ctx
->jrdev
;
1304 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1305 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1306 int assoc_nents
, src_nents
, dst_nents
= 0;
1307 struct aead_edesc
*edesc
;
1308 dma_addr_t iv_dma
= 0;
1310 bool all_contig
= true;
1311 int ivsize
= crypto_aead_ivsize(aead
);
1312 int link_tbl_index
, link_tbl_len
= 0, link_tbl_bytes
;
1314 assoc_nents
= sg_count(req
->assoc
, req
->assoclen
);
1315 src_nents
= sg_count(req
->src
, req
->cryptlen
);
1317 if (unlikely(req
->dst
!= req
->src
))
1318 dst_nents
= sg_count(req
->dst
, req
->cryptlen
);
1320 sgc
= dma_map_sg(jrdev
, req
->assoc
, assoc_nents
? : 1,
1322 if (likely(req
->src
== req
->dst
)) {
1323 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
1326 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
1328 sgc
= dma_map_sg(jrdev
, req
->dst
, dst_nents
? : 1,
1332 /* Check if data are contiguous */
1333 iv_dma
= dma_map_single(jrdev
, req
->iv
, ivsize
, DMA_TO_DEVICE
);
1334 if (assoc_nents
|| sg_dma_address(req
->assoc
) + req
->assoclen
!=
1335 iv_dma
|| src_nents
|| iv_dma
+ ivsize
!=
1336 sg_dma_address(req
->src
)) {
1338 assoc_nents
= assoc_nents
? : 1;
1339 src_nents
= src_nents
? : 1;
1340 link_tbl_len
= assoc_nents
+ 1 + src_nents
;
1342 link_tbl_len
+= dst_nents
;
1344 link_tbl_bytes
= link_tbl_len
* sizeof(struct link_tbl_entry
);
1346 /* allocate space for base edesc and hw desc commands, link tables */
1347 edesc
= kmalloc(sizeof(struct aead_edesc
) + desc_bytes
+
1348 link_tbl_bytes
, GFP_DMA
| flags
);
1350 dev_err(jrdev
, "could not allocate extended descriptor\n");
1351 return ERR_PTR(-ENOMEM
);
1354 edesc
->assoc_nents
= assoc_nents
;
1355 edesc
->src_nents
= src_nents
;
1356 edesc
->dst_nents
= dst_nents
;
1357 edesc
->iv_dma
= iv_dma
;
1358 edesc
->link_tbl_bytes
= link_tbl_bytes
;
1359 edesc
->link_tbl
= (void *)edesc
+ sizeof(struct aead_edesc
) +
1361 edesc
->link_tbl_dma
= dma_map_single(jrdev
, edesc
->link_tbl
,
1362 link_tbl_bytes
, DMA_TO_DEVICE
);
1363 *all_contig_ptr
= all_contig
;
1367 sg_to_link_tbl(req
->assoc
,
1368 (assoc_nents
? : 1),
1371 link_tbl_index
+= assoc_nents
? : 1;
1372 sg_to_link_tbl_one(edesc
->link_tbl
+ link_tbl_index
,
1374 link_tbl_index
+= 1;
1375 sg_to_link_tbl_last(req
->src
,
1379 link_tbl_index
+= src_nents
? : 1;
1382 sg_to_link_tbl_last(req
->dst
, dst_nents
,
1383 edesc
->link_tbl
+ link_tbl_index
, 0);
1389 static int aead_encrypt(struct aead_request
*req
)
1391 struct aead_edesc
*edesc
;
1392 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1393 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1394 struct device
*jrdev
= ctx
->jrdev
;
1399 req
->cryptlen
+= ctx
->authsize
;
1401 /* allocate extended descriptor */
1402 edesc
= aead_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1403 CAAM_CMD_SZ
, &all_contig
);
1405 return PTR_ERR(edesc
);
1407 /* Create and submit job descriptor */
1408 init_aead_job(ctx
->sh_desc_enc
, ctx
->sh_desc_enc_dma
, edesc
, req
,
1411 print_hex_dump(KERN_ERR
, "aead jobdesc@"xstr(__LINE__
)": ",
1412 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1413 desc_bytes(edesc
->hw_desc
), 1);
1416 desc
= edesc
->hw_desc
;
1417 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
1421 aead_unmap(jrdev
, edesc
, req
);
1428 static int aead_decrypt(struct aead_request
*req
)
1430 struct aead_edesc
*edesc
;
1431 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1432 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1433 struct device
*jrdev
= ctx
->jrdev
;
1438 /* allocate extended descriptor */
1439 edesc
= aead_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1440 CAAM_CMD_SZ
, &all_contig
);
1442 return PTR_ERR(edesc
);
1445 print_hex_dump(KERN_ERR
, "dec src@"xstr(__LINE__
)": ",
1446 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1450 /* Create and submit job descriptor*/
1451 init_aead_job(ctx
->sh_desc_dec
,
1452 ctx
->sh_desc_dec_dma
, edesc
, req
, all_contig
, false);
1454 print_hex_dump(KERN_ERR
, "aead jobdesc@"xstr(__LINE__
)": ",
1455 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1456 desc_bytes(edesc
->hw_desc
), 1);
1459 desc
= edesc
->hw_desc
;
1460 ret
= caam_jr_enqueue(jrdev
, desc
, aead_decrypt_done
, req
);
1464 aead_unmap(jrdev
, edesc
, req
);
1472 * allocate and map the aead extended descriptor for aead givencrypt
1474 static struct aead_edesc
*aead_giv_edesc_alloc(struct aead_givcrypt_request
1475 *greq
, int desc_bytes
,
1478 struct aead_request
*req
= &greq
->areq
;
1479 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1480 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1481 struct device
*jrdev
= ctx
->jrdev
;
1482 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1483 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1484 int assoc_nents
, src_nents
, dst_nents
= 0;
1485 struct aead_edesc
*edesc
;
1486 dma_addr_t iv_dma
= 0;
1488 u32 contig
= GIV_SRC_CONTIG
| GIV_DST_CONTIG
;
1489 int ivsize
= crypto_aead_ivsize(aead
);
1490 int link_tbl_index
, link_tbl_len
= 0, link_tbl_bytes
;
1492 assoc_nents
= sg_count(req
->assoc
, req
->assoclen
);
1493 src_nents
= sg_count(req
->src
, req
->cryptlen
);
1495 if (unlikely(req
->dst
!= req
->src
))
1496 dst_nents
= sg_count(req
->dst
, req
->cryptlen
);
1498 sgc
= dma_map_sg(jrdev
, req
->assoc
, assoc_nents
? : 1,
1500 if (likely(req
->src
== req
->dst
)) {
1501 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
1504 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
1506 sgc
= dma_map_sg(jrdev
, req
->dst
, dst_nents
? : 1,
1510 /* Check if data are contiguous */
1511 iv_dma
= dma_map_single(jrdev
, greq
->giv
, ivsize
, DMA_TO_DEVICE
);
1512 if (assoc_nents
|| sg_dma_address(req
->assoc
) + req
->assoclen
!=
1513 iv_dma
|| src_nents
|| iv_dma
+ ivsize
!= sg_dma_address(req
->src
))
1514 contig
&= ~GIV_SRC_CONTIG
;
1515 if (dst_nents
|| iv_dma
+ ivsize
!= sg_dma_address(req
->dst
))
1516 contig
&= ~GIV_DST_CONTIG
;
1517 if (unlikely(req
->src
!= req
->dst
)) {
1518 dst_nents
= dst_nents
? : 1;
1521 if (!(contig
& GIV_SRC_CONTIG
)) {
1522 assoc_nents
= assoc_nents
? : 1;
1523 src_nents
= src_nents
? : 1;
1524 link_tbl_len
+= assoc_nents
+ 1 + src_nents
;
1525 if (likely(req
->src
== req
->dst
))
1526 contig
&= ~GIV_DST_CONTIG
;
1528 link_tbl_len
+= dst_nents
;
1530 link_tbl_bytes
= link_tbl_len
* sizeof(struct link_tbl_entry
);
1532 /* allocate space for base edesc and hw desc commands, link tables */
1533 edesc
= kmalloc(sizeof(struct aead_edesc
) + desc_bytes
+
1534 link_tbl_bytes
, GFP_DMA
| flags
);
1536 dev_err(jrdev
, "could not allocate extended descriptor\n");
1537 return ERR_PTR(-ENOMEM
);
1540 edesc
->assoc_nents
= assoc_nents
;
1541 edesc
->src_nents
= src_nents
;
1542 edesc
->dst_nents
= dst_nents
;
1543 edesc
->iv_dma
= iv_dma
;
1544 edesc
->link_tbl_bytes
= link_tbl_bytes
;
1545 edesc
->link_tbl
= (void *)edesc
+ sizeof(struct aead_edesc
) +
1547 edesc
->link_tbl_dma
= dma_map_single(jrdev
, edesc
->link_tbl
,
1548 link_tbl_bytes
, DMA_TO_DEVICE
);
1549 *contig_ptr
= contig
;
1552 if (!(contig
& GIV_SRC_CONTIG
)) {
1553 sg_to_link_tbl(req
->assoc
, assoc_nents
,
1556 link_tbl_index
+= assoc_nents
;
1557 sg_to_link_tbl_one(edesc
->link_tbl
+ link_tbl_index
,
1559 link_tbl_index
+= 1;
1560 sg_to_link_tbl_last(req
->src
, src_nents
,
1563 link_tbl_index
+= src_nents
;
1565 if (unlikely(req
->src
!= req
->dst
&& !(contig
& GIV_DST_CONTIG
))) {
1566 sg_to_link_tbl_one(edesc
->link_tbl
+ link_tbl_index
,
1568 link_tbl_index
+= 1;
1569 sg_to_link_tbl_last(req
->dst
, dst_nents
,
1570 edesc
->link_tbl
+ link_tbl_index
, 0);
1576 static int aead_givencrypt(struct aead_givcrypt_request
*areq
)
1578 struct aead_request
*req
= &areq
->areq
;
1579 struct aead_edesc
*edesc
;
1580 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1581 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1582 struct device
*jrdev
= ctx
->jrdev
;
1587 req
->cryptlen
+= ctx
->authsize
;
1589 /* allocate extended descriptor */
1590 edesc
= aead_giv_edesc_alloc(areq
, DESC_JOB_IO_LEN
*
1591 CAAM_CMD_SZ
, &contig
);
1594 return PTR_ERR(edesc
);
1597 print_hex_dump(KERN_ERR
, "giv src@"xstr(__LINE__
)": ",
1598 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
1602 /* Create and submit job descriptor*/
1603 init_aead_giv_job(ctx
->sh_desc_givenc
,
1604 ctx
->sh_desc_givenc_dma
, edesc
, req
, contig
);
1606 print_hex_dump(KERN_ERR
, "aead jobdesc@"xstr(__LINE__
)": ",
1607 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1608 desc_bytes(edesc
->hw_desc
), 1);
1611 desc
= edesc
->hw_desc
;
1612 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
1616 aead_unmap(jrdev
, edesc
, req
);
1624 * allocate and map the ablkcipher extended descriptor for ablkcipher
1626 static struct ablkcipher_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
1627 *req
, int desc_bytes
,
1628 bool *iv_contig_out
)
1630 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1631 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1632 struct device
*jrdev
= ctx
->jrdev
;
1633 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1634 CRYPTO_TFM_REQ_MAY_SLEEP
)) ?
1635 GFP_KERNEL
: GFP_ATOMIC
;
1636 int src_nents
, dst_nents
= 0, link_tbl_bytes
;
1637 struct ablkcipher_edesc
*edesc
;
1638 dma_addr_t iv_dma
= 0;
1639 bool iv_contig
= false;
1641 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
1644 src_nents
= sg_count(req
->src
, req
->nbytes
);
1646 if (unlikely(req
->dst
!= req
->src
))
1647 dst_nents
= sg_count(req
->dst
, req
->nbytes
);
1649 if (likely(req
->src
== req
->dst
)) {
1650 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
1653 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
1655 sgc
= dma_map_sg(jrdev
, req
->dst
, dst_nents
? : 1,
1660 * Check if iv can be contiguous with source and destination.
1661 * If so, include it. If not, create scatterlist.
1663 iv_dma
= dma_map_single(jrdev
, req
->info
, ivsize
, DMA_TO_DEVICE
);
1664 if (!src_nents
&& iv_dma
+ ivsize
== sg_dma_address(req
->src
))
1667 src_nents
= src_nents
? : 1;
1668 link_tbl_bytes
= ((iv_contig
? 0 : 1) + src_nents
+ dst_nents
) *
1669 sizeof(struct link_tbl_entry
);
1671 /* allocate space for base edesc and hw desc commands, link tables */
1672 edesc
= kmalloc(sizeof(struct ablkcipher_edesc
) + desc_bytes
+
1673 link_tbl_bytes
, GFP_DMA
| flags
);
1675 dev_err(jrdev
, "could not allocate extended descriptor\n");
1676 return ERR_PTR(-ENOMEM
);
1679 edesc
->src_nents
= src_nents
;
1680 edesc
->dst_nents
= dst_nents
;
1681 edesc
->link_tbl_bytes
= link_tbl_bytes
;
1682 edesc
->link_tbl
= (void *)edesc
+ sizeof(struct ablkcipher_edesc
) +
1687 sg_to_link_tbl_one(edesc
->link_tbl
, iv_dma
, ivsize
, 0);
1688 sg_to_link_tbl_last(req
->src
, src_nents
,
1689 edesc
->link_tbl
+ 1, 0);
1690 link_tbl_index
+= 1 + src_nents
;
1693 if (unlikely(dst_nents
)) {
1694 sg_to_link_tbl_last(req
->dst
, dst_nents
,
1695 edesc
->link_tbl
+ link_tbl_index
, 0);
1698 edesc
->link_tbl_dma
= dma_map_single(jrdev
, edesc
->link_tbl
,
1699 link_tbl_bytes
, DMA_TO_DEVICE
);
1700 edesc
->iv_dma
= iv_dma
;
1703 print_hex_dump(KERN_ERR
, "ablkcipher link_tbl@"xstr(__LINE__
)": ",
1704 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->link_tbl
,
1708 *iv_contig_out
= iv_contig
;
1712 static int ablkcipher_encrypt(struct ablkcipher_request
*req
)
1714 struct ablkcipher_edesc
*edesc
;
1715 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1716 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1717 struct device
*jrdev
= ctx
->jrdev
;
1722 /* allocate extended descriptor */
1723 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1724 CAAM_CMD_SZ
, &iv_contig
);
1726 return PTR_ERR(edesc
);
1728 /* Create and submit job descriptor*/
1729 init_ablkcipher_job(ctx
->sh_desc_enc
,
1730 ctx
->sh_desc_enc_dma
, edesc
, req
, iv_contig
);
1732 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"xstr(__LINE__
)": ",
1733 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1734 desc_bytes(edesc
->hw_desc
), 1);
1736 desc
= edesc
->hw_desc
;
1737 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_encrypt_done
, req
);
1742 ablkcipher_unmap(jrdev
, edesc
, req
);
1749 static int ablkcipher_decrypt(struct ablkcipher_request
*req
)
1751 struct ablkcipher_edesc
*edesc
;
1752 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1753 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1754 struct device
*jrdev
= ctx
->jrdev
;
1759 /* allocate extended descriptor */
1760 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
1761 CAAM_CMD_SZ
, &iv_contig
);
1763 return PTR_ERR(edesc
);
1765 /* Create and submit job descriptor*/
1766 init_ablkcipher_job(ctx
->sh_desc_dec
,
1767 ctx
->sh_desc_dec_dma
, edesc
, req
, iv_contig
);
1768 desc
= edesc
->hw_desc
;
1770 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"xstr(__LINE__
)": ",
1771 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
1772 desc_bytes(edesc
->hw_desc
), 1);
1775 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_decrypt_done
, req
);
1779 ablkcipher_unmap(jrdev
, edesc
, req
);
1786 #define template_aead template_u.aead
1787 #define template_ablkcipher template_u.ablkcipher
1788 struct caam_alg_template
{
1789 char name
[CRYPTO_MAX_ALG_NAME
];
1790 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1791 unsigned int blocksize
;
1794 struct ablkcipher_alg ablkcipher
;
1795 struct aead_alg aead
;
1796 struct blkcipher_alg blkcipher
;
1797 struct cipher_alg cipher
;
1798 struct compress_alg compress
;
1801 u32 class1_alg_type
;
1802 u32 class2_alg_type
;
1806 static struct caam_alg_template driver_algs
[] = {
1807 /* single-pass ipsec_esp descriptor */
1809 .name
= "authenc(hmac(md5),cbc(aes))",
1810 .driver_name
= "authenc-hmac-md5-cbc-aes-caam",
1811 .blocksize
= AES_BLOCK_SIZE
,
1812 .type
= CRYPTO_ALG_TYPE_AEAD
,
1814 .setkey
= aead_setkey
,
1815 .setauthsize
= aead_setauthsize
,
1816 .encrypt
= aead_encrypt
,
1817 .decrypt
= aead_decrypt
,
1818 .givencrypt
= aead_givencrypt
,
1819 .geniv
= "<built-in>",
1820 .ivsize
= AES_BLOCK_SIZE
,
1821 .maxauthsize
= MD5_DIGEST_SIZE
,
1823 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1824 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
1825 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1828 .name
= "authenc(hmac(sha1),cbc(aes))",
1829 .driver_name
= "authenc-hmac-sha1-cbc-aes-caam",
1830 .blocksize
= AES_BLOCK_SIZE
,
1831 .type
= CRYPTO_ALG_TYPE_AEAD
,
1833 .setkey
= aead_setkey
,
1834 .setauthsize
= aead_setauthsize
,
1835 .encrypt
= aead_encrypt
,
1836 .decrypt
= aead_decrypt
,
1837 .givencrypt
= aead_givencrypt
,
1838 .geniv
= "<built-in>",
1839 .ivsize
= AES_BLOCK_SIZE
,
1840 .maxauthsize
= SHA1_DIGEST_SIZE
,
1842 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1843 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
1844 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1847 .name
= "authenc(hmac(sha256),cbc(aes))",
1848 .driver_name
= "authenc-hmac-sha256-cbc-aes-caam",
1849 .blocksize
= AES_BLOCK_SIZE
,
1850 .type
= CRYPTO_ALG_TYPE_AEAD
,
1852 .setkey
= aead_setkey
,
1853 .setauthsize
= aead_setauthsize
,
1854 .encrypt
= aead_encrypt
,
1855 .decrypt
= aead_decrypt
,
1856 .givencrypt
= aead_givencrypt
,
1857 .geniv
= "<built-in>",
1858 .ivsize
= AES_BLOCK_SIZE
,
1859 .maxauthsize
= SHA256_DIGEST_SIZE
,
1861 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1862 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1863 OP_ALG_AAI_HMAC_PRECOMP
,
1864 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1867 .name
= "authenc(hmac(sha512),cbc(aes))",
1868 .driver_name
= "authenc-hmac-sha512-cbc-aes-caam",
1869 .blocksize
= AES_BLOCK_SIZE
,
1870 .type
= CRYPTO_ALG_TYPE_AEAD
,
1872 .setkey
= aead_setkey
,
1873 .setauthsize
= aead_setauthsize
,
1874 .encrypt
= aead_encrypt
,
1875 .decrypt
= aead_decrypt
,
1876 .givencrypt
= aead_givencrypt
,
1877 .geniv
= "<built-in>",
1878 .ivsize
= AES_BLOCK_SIZE
,
1879 .maxauthsize
= SHA512_DIGEST_SIZE
,
1881 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1882 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1883 OP_ALG_AAI_HMAC_PRECOMP
,
1884 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1887 .name
= "authenc(hmac(md5),cbc(des3_ede))",
1888 .driver_name
= "authenc-hmac-md5-cbc-des3_ede-caam",
1889 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1890 .type
= CRYPTO_ALG_TYPE_AEAD
,
1892 .setkey
= aead_setkey
,
1893 .setauthsize
= aead_setauthsize
,
1894 .encrypt
= aead_encrypt
,
1895 .decrypt
= aead_decrypt
,
1896 .givencrypt
= aead_givencrypt
,
1897 .geniv
= "<built-in>",
1898 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1899 .maxauthsize
= MD5_DIGEST_SIZE
,
1901 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1902 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
1903 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1906 .name
= "authenc(hmac(sha1),cbc(des3_ede))",
1907 .driver_name
= "authenc-hmac-sha1-cbc-des3_ede-caam",
1908 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1909 .type
= CRYPTO_ALG_TYPE_AEAD
,
1911 .setkey
= aead_setkey
,
1912 .setauthsize
= aead_setauthsize
,
1913 .encrypt
= aead_encrypt
,
1914 .decrypt
= aead_decrypt
,
1915 .givencrypt
= aead_givencrypt
,
1916 .geniv
= "<built-in>",
1917 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1918 .maxauthsize
= SHA1_DIGEST_SIZE
,
1920 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1921 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
1922 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1925 .name
= "authenc(hmac(sha256),cbc(des3_ede))",
1926 .driver_name
= "authenc-hmac-sha256-cbc-des3_ede-caam",
1927 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1928 .type
= CRYPTO_ALG_TYPE_AEAD
,
1930 .setkey
= aead_setkey
,
1931 .setauthsize
= aead_setauthsize
,
1932 .encrypt
= aead_encrypt
,
1933 .decrypt
= aead_decrypt
,
1934 .givencrypt
= aead_givencrypt
,
1935 .geniv
= "<built-in>",
1936 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1937 .maxauthsize
= SHA256_DIGEST_SIZE
,
1939 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1940 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1941 OP_ALG_AAI_HMAC_PRECOMP
,
1942 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1945 .name
= "authenc(hmac(sha512),cbc(des3_ede))",
1946 .driver_name
= "authenc-hmac-sha512-cbc-des3_ede-caam",
1947 .blocksize
= DES3_EDE_BLOCK_SIZE
,
1948 .type
= CRYPTO_ALG_TYPE_AEAD
,
1950 .setkey
= aead_setkey
,
1951 .setauthsize
= aead_setauthsize
,
1952 .encrypt
= aead_encrypt
,
1953 .decrypt
= aead_decrypt
,
1954 .givencrypt
= aead_givencrypt
,
1955 .geniv
= "<built-in>",
1956 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1957 .maxauthsize
= SHA512_DIGEST_SIZE
,
1959 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1960 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1961 OP_ALG_AAI_HMAC_PRECOMP
,
1962 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1965 .name
= "authenc(hmac(md5),cbc(des))",
1966 .driver_name
= "authenc-hmac-md5-cbc-des-caam",
1967 .blocksize
= DES_BLOCK_SIZE
,
1968 .type
= CRYPTO_ALG_TYPE_AEAD
,
1970 .setkey
= aead_setkey
,
1971 .setauthsize
= aead_setauthsize
,
1972 .encrypt
= aead_encrypt
,
1973 .decrypt
= aead_decrypt
,
1974 .givencrypt
= aead_givencrypt
,
1975 .geniv
= "<built-in>",
1976 .ivsize
= DES_BLOCK_SIZE
,
1977 .maxauthsize
= MD5_DIGEST_SIZE
,
1979 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1980 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
1981 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1984 .name
= "authenc(hmac(sha1),cbc(des))",
1985 .driver_name
= "authenc-hmac-sha1-cbc-des-caam",
1986 .blocksize
= DES_BLOCK_SIZE
,
1987 .type
= CRYPTO_ALG_TYPE_AEAD
,
1989 .setkey
= aead_setkey
,
1990 .setauthsize
= aead_setauthsize
,
1991 .encrypt
= aead_encrypt
,
1992 .decrypt
= aead_decrypt
,
1993 .givencrypt
= aead_givencrypt
,
1994 .geniv
= "<built-in>",
1995 .ivsize
= DES_BLOCK_SIZE
,
1996 .maxauthsize
= SHA1_DIGEST_SIZE
,
1998 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1999 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
2000 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
2003 .name
= "authenc(hmac(sha256),cbc(des))",
2004 .driver_name
= "authenc-hmac-sha256-cbc-des-caam",
2005 .blocksize
= DES_BLOCK_SIZE
,
2006 .type
= CRYPTO_ALG_TYPE_AEAD
,
2008 .setkey
= aead_setkey
,
2009 .setauthsize
= aead_setauthsize
,
2010 .encrypt
= aead_encrypt
,
2011 .decrypt
= aead_decrypt
,
2012 .givencrypt
= aead_givencrypt
,
2013 .geniv
= "<built-in>",
2014 .ivsize
= DES_BLOCK_SIZE
,
2015 .maxauthsize
= SHA256_DIGEST_SIZE
,
2017 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2018 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2019 OP_ALG_AAI_HMAC_PRECOMP
,
2020 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
2023 .name
= "authenc(hmac(sha512),cbc(des))",
2024 .driver_name
= "authenc-hmac-sha512-cbc-des-caam",
2025 .blocksize
= DES_BLOCK_SIZE
,
2026 .type
= CRYPTO_ALG_TYPE_AEAD
,
2028 .setkey
= aead_setkey
,
2029 .setauthsize
= aead_setauthsize
,
2030 .encrypt
= aead_encrypt
,
2031 .decrypt
= aead_decrypt
,
2032 .givencrypt
= aead_givencrypt
,
2033 .geniv
= "<built-in>",
2034 .ivsize
= DES_BLOCK_SIZE
,
2035 .maxauthsize
= SHA512_DIGEST_SIZE
,
2037 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2038 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2039 OP_ALG_AAI_HMAC_PRECOMP
,
2040 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
2042 /* ablkcipher descriptor */
2045 .driver_name
= "cbc-aes-caam",
2046 .blocksize
= AES_BLOCK_SIZE
,
2047 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2048 .template_ablkcipher
= {
2049 .setkey
= ablkcipher_setkey
,
2050 .encrypt
= ablkcipher_encrypt
,
2051 .decrypt
= ablkcipher_decrypt
,
2053 .min_keysize
= AES_MIN_KEY_SIZE
,
2054 .max_keysize
= AES_MAX_KEY_SIZE
,
2055 .ivsize
= AES_BLOCK_SIZE
,
2057 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2060 .name
= "cbc(des3_ede)",
2061 .driver_name
= "cbc-3des-caam",
2062 .blocksize
= DES3_EDE_BLOCK_SIZE
,
2063 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2064 .template_ablkcipher
= {
2065 .setkey
= ablkcipher_setkey
,
2066 .encrypt
= ablkcipher_encrypt
,
2067 .decrypt
= ablkcipher_decrypt
,
2069 .min_keysize
= DES3_EDE_KEY_SIZE
,
2070 .max_keysize
= DES3_EDE_KEY_SIZE
,
2071 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2073 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2077 .driver_name
= "cbc-des-caam",
2078 .blocksize
= DES_BLOCK_SIZE
,
2079 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2080 .template_ablkcipher
= {
2081 .setkey
= ablkcipher_setkey
,
2082 .encrypt
= ablkcipher_encrypt
,
2083 .decrypt
= ablkcipher_decrypt
,
2085 .min_keysize
= DES_KEY_SIZE
,
2086 .max_keysize
= DES_KEY_SIZE
,
2087 .ivsize
= DES_BLOCK_SIZE
,
2089 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2093 struct caam_crypto_alg
{
2094 struct list_head entry
;
2095 struct device
*ctrldev
;
2096 int class1_alg_type
;
2097 int class2_alg_type
;
2099 struct crypto_alg crypto_alg
;
2102 static int caam_cra_init(struct crypto_tfm
*tfm
)
2104 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2105 struct caam_crypto_alg
*caam_alg
=
2106 container_of(alg
, struct caam_crypto_alg
, crypto_alg
);
2107 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2108 struct caam_drv_private
*priv
= dev_get_drvdata(caam_alg
->ctrldev
);
2109 int tgt_jr
= atomic_inc_return(&priv
->tfm_count
);
2112 * distribute tfms across job rings to ensure in-order
2113 * crypto request processing per tfm
2115 ctx
->jrdev
= priv
->algapi_jr
[(tgt_jr
/ 2) % priv
->num_jrs_for_algapi
];
2117 /* copy descriptor header template value */
2118 ctx
->class1_alg_type
= OP_TYPE_CLASS1_ALG
| caam_alg
->class1_alg_type
;
2119 ctx
->class2_alg_type
= OP_TYPE_CLASS2_ALG
| caam_alg
->class2_alg_type
;
2120 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_alg
->alg_op
;
2125 static void caam_cra_exit(struct crypto_tfm
*tfm
)
2127 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2129 if (ctx
->sh_desc_enc_dma
&&
2130 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_enc_dma
))
2131 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_enc_dma
,
2132 desc_bytes(ctx
->sh_desc_enc
), DMA_TO_DEVICE
);
2133 if (ctx
->sh_desc_dec_dma
&&
2134 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_dec_dma
))
2135 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_dec_dma
,
2136 desc_bytes(ctx
->sh_desc_dec
), DMA_TO_DEVICE
);
2137 if (ctx
->sh_desc_givenc_dma
&&
2138 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
))
2139 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
,
2140 desc_bytes(ctx
->sh_desc_givenc
),
2144 static void __exit
caam_algapi_exit(void)
2147 struct device_node
*dev_node
;
2148 struct platform_device
*pdev
;
2149 struct device
*ctrldev
;
2150 struct caam_drv_private
*priv
;
2151 struct caam_crypto_alg
*t_alg
, *n
;
2154 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
2158 pdev
= of_find_device_by_node(dev_node
);
2162 ctrldev
= &pdev
->dev
;
2163 of_node_put(dev_node
);
2164 priv
= dev_get_drvdata(ctrldev
);
2166 if (!priv
->alg_list
.next
)
2169 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2170 crypto_unregister_alg(&t_alg
->crypto_alg
);
2171 list_del(&t_alg
->entry
);
2175 for (i
= 0; i
< priv
->total_jobrs
; i
++) {
2176 err
= caam_jr_deregister(priv
->algapi_jr
[i
]);
2180 kfree(priv
->algapi_jr
);
2183 static struct caam_crypto_alg
*caam_alg_alloc(struct device
*ctrldev
,
2184 struct caam_alg_template
2187 struct caam_crypto_alg
*t_alg
;
2188 struct crypto_alg
*alg
;
2190 t_alg
= kzalloc(sizeof(struct caam_crypto_alg
), GFP_KERNEL
);
2192 dev_err(ctrldev
, "failed to allocate t_alg\n");
2193 return ERR_PTR(-ENOMEM
);
2196 alg
= &t_alg
->crypto_alg
;
2198 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", template->name
);
2199 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2200 template->driver_name
);
2201 alg
->cra_module
= THIS_MODULE
;
2202 alg
->cra_init
= caam_cra_init
;
2203 alg
->cra_exit
= caam_cra_exit
;
2204 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
2205 alg
->cra_blocksize
= template->blocksize
;
2206 alg
->cra_alignmask
= 0;
2207 alg
->cra_ctxsize
= sizeof(struct caam_ctx
);
2208 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| template->type
;
2209 switch (template->type
) {
2210 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2211 alg
->cra_type
= &crypto_ablkcipher_type
;
2212 alg
->cra_ablkcipher
= template->template_ablkcipher
;
2214 case CRYPTO_ALG_TYPE_AEAD
:
2215 alg
->cra_type
= &crypto_aead_type
;
2216 alg
->cra_aead
= template->template_aead
;
2220 t_alg
->class1_alg_type
= template->class1_alg_type
;
2221 t_alg
->class2_alg_type
= template->class2_alg_type
;
2222 t_alg
->alg_op
= template->alg_op
;
2223 t_alg
->ctrldev
= ctrldev
;
2228 static int __init
caam_algapi_init(void)
2230 struct device_node
*dev_node
;
2231 struct platform_device
*pdev
;
2232 struct device
*ctrldev
, **jrdev
;
2233 struct caam_drv_private
*priv
;
2236 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
2240 pdev
= of_find_device_by_node(dev_node
);
2244 ctrldev
= &pdev
->dev
;
2245 priv
= dev_get_drvdata(ctrldev
);
2246 of_node_put(dev_node
);
2248 INIT_LIST_HEAD(&priv
->alg_list
);
2250 jrdev
= kmalloc(sizeof(*jrdev
) * priv
->total_jobrs
, GFP_KERNEL
);
2254 for (i
= 0; i
< priv
->total_jobrs
; i
++) {
2255 err
= caam_jr_register(ctrldev
, &jrdev
[i
]);
2259 if (err
< 0 && i
== 0) {
2260 dev_err(ctrldev
, "algapi error in job ring registration: %d\n",
2266 priv
->num_jrs_for_algapi
= i
;
2267 priv
->algapi_jr
= jrdev
;
2268 atomic_set(&priv
->tfm_count
, -1);
2270 /* register crypto algorithms the device supports */
2271 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2272 /* TODO: check if h/w supports alg */
2273 struct caam_crypto_alg
*t_alg
;
2275 t_alg
= caam_alg_alloc(ctrldev
, &driver_algs
[i
]);
2276 if (IS_ERR(t_alg
)) {
2277 err
= PTR_ERR(t_alg
);
2278 dev_warn(ctrldev
, "%s alg allocation failed\n",
2279 driver_algs
[i
].driver_name
);
2283 err
= crypto_register_alg(&t_alg
->crypto_alg
);
2285 dev_warn(ctrldev
, "%s alg registration failed\n",
2286 t_alg
->crypto_alg
.cra_driver_name
);
2289 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
2290 dev_info(ctrldev
, "%s\n",
2291 t_alg
->crypto_alg
.cra_driver_name
);
2298 module_init(caam_algapi_init
);
2299 module_exit(caam_algapi_exit
);
2301 MODULE_LICENSE("GPL");
2302 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2303 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");