2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
23 * | JobDesc #3 |------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
37 * | ShareDesc Pointer |
43 * ---------------------
50 #include "desc_constr.h"
57 #define CAAM_CRA_PRIORITY 3000
58 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62 #define CAAM_MAX_IV_LENGTH 16
64 /* length of descriptors text */
65 #define DESC_AEAD_SHARED_TEXT_LEN 4
66 #define DESC_AEAD_ENCRYPT_TEXT_LEN 21
67 #define DESC_AEAD_DECRYPT_TEXT_LEN 24
68 #define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27
71 /* for print_hex_dumps with line references */
72 #define xstr(s) str(s)
74 #define debug(format, arg...) printk(format, arg)
76 #define debug(format, arg...)
85 dma_addr_t shared_desc_phys
;
91 unsigned int enckeylen
;
92 unsigned int split_key_len
;
93 unsigned int split_key_pad_len
;
94 unsigned int authsize
;
97 static int aead_authenc_setauthsize(struct crypto_aead
*authenc
,
98 unsigned int authsize
)
100 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
102 ctx
->authsize
= authsize
;
107 struct split_key_result
{
108 struct completion completion
;
112 static void split_key_done(struct device
*dev
, u32
*desc
, u32 err
,
115 struct split_key_result
*res
= context
;
118 dev_err(dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
121 char tmp
[CAAM_ERROR_STR_MAX
];
123 dev_err(dev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
128 complete(&res
->completion
);
132 get a split ipad/opad key
134 Split key generation-----------------------------------------------
136 [00] 0xb0810008 jobdesc: stidx=1 share=never len=8
137 [01] 0x04000014 key: class2->keyreg len=20
139 [03] 0x84410014 operation: cls2-op sha1 hmac init dec
140 [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
141 [05] 0xa4000001 jump: class2 local all ->1 [06]
142 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
145 static u32
gen_split_key(struct caam_ctx
*ctx
, const u8
*key_in
, u32 authkeylen
)
147 struct device
*jrdev
= ctx
->jrdev
;
149 struct split_key_result result
;
150 dma_addr_t dma_addr_in
, dma_addr_out
;
153 desc
= kmalloc(CAAM_CMD_SZ
* 6 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
155 init_job_desc(desc
, 0);
157 dma_addr_in
= dma_map_single(jrdev
, (void *)key_in
, authkeylen
,
159 if (dma_mapping_error(jrdev
, dma_addr_in
)) {
160 dev_err(jrdev
, "unable to map key input memory\n");
164 append_key(desc
, dma_addr_in
, authkeylen
, CLASS_2
|
167 /* Sets MDHA up into an HMAC-INIT */
168 append_operation(desc
, ctx
->alg_op
| OP_ALG_DECRYPT
|
172 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
173 into both pads inside MDHA
175 append_fifo_load_as_imm(desc
, NULL
, 0, LDST_CLASS_2_CCB
|
176 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST2
);
179 * FIFO_STORE with the explicit split-key content store
182 dma_addr_out
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
184 if (dma_mapping_error(jrdev
, dma_addr_out
)) {
185 dev_err(jrdev
, "unable to map key output memory\n");
189 append_fifo_store(desc
, dma_addr_out
, ctx
->split_key_len
,
190 LDST_CLASS_2_CCB
| FIFOST_TYPE_SPLIT_KEK
);
193 print_hex_dump(KERN_ERR
, "ctx.key@"xstr(__LINE__
)": ",
194 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, authkeylen
, 1);
195 print_hex_dump(KERN_ERR
, "jobdesc@"xstr(__LINE__
)": ",
196 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
200 init_completion(&result
.completion
);
202 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
205 wait_for_completion_interruptible(&result
.completion
);
208 print_hex_dump(KERN_ERR
, "ctx.key@"xstr(__LINE__
)": ",
209 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
210 ctx
->split_key_pad_len
, 1);
214 dma_unmap_single(jrdev
, dma_addr_out
, ctx
->split_key_pad_len
,
216 dma_unmap_single(jrdev
, dma_addr_in
, authkeylen
, DMA_TO_DEVICE
);
223 static int build_sh_desc_ipsec(struct caam_ctx
*ctx
)
225 struct device
*jrdev
= ctx
->jrdev
;
228 bool keys_fit_inline
= 0;
231 * largest Job Descriptor and its Shared Descriptor
232 * must both fit into the 64-word Descriptor h/w Buffer
234 if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN
+
235 DESC_AEAD_SHARED_TEXT_LEN
) * CAAM_CMD_SZ
+
236 ctx
->split_key_pad_len
+ ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
239 /* build shared descriptor for this session */
240 sh_desc
= kmalloc(CAAM_CMD_SZ
* DESC_AEAD_SHARED_TEXT_LEN
+
242 ctx
->split_key_pad_len
+ ctx
->enckeylen
:
243 CAAM_PTR_SZ
* 2, GFP_DMA
| GFP_KERNEL
);
245 dev_err(jrdev
, "could not allocate shared descriptor\n");
249 init_sh_desc(sh_desc
, HDR_SAVECTX
| HDR_SHARE_SERIAL
);
251 jump_cmd
= append_jump(sh_desc
, CLASS_BOTH
| JUMP_TEST_ALL
|
252 JUMP_COND_SHRD
| JUMP_COND_SELF
);
255 * process keys, starting with class 2/authentication.
257 if (keys_fit_inline
) {
258 append_key_as_imm(sh_desc
, ctx
->key
, ctx
->split_key_pad_len
,
260 CLASS_2
| KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
262 append_key_as_imm(sh_desc
, (void *)ctx
->key
+
263 ctx
->split_key_pad_len
, ctx
->enckeylen
,
264 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
266 append_key(sh_desc
, ctx
->key_phys
, ctx
->split_key_len
, CLASS_2
|
267 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
268 append_key(sh_desc
, ctx
->key_phys
+ ctx
->split_key_pad_len
,
269 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
272 /* update jump cmd now that we are at the jump target */
273 set_jump_tgt_here(sh_desc
, jump_cmd
);
275 ctx
->shared_desc_phys
= dma_map_single(jrdev
, sh_desc
,
278 if (dma_mapping_error(jrdev
, ctx
->shared_desc_phys
)) {
279 dev_err(jrdev
, "unable to map shared descriptor\n");
284 ctx
->sh_desc
= sh_desc
;
289 static int aead_authenc_setkey(struct crypto_aead
*aead
,
290 const u8
*key
, unsigned int keylen
)
292 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
293 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
294 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
295 struct device
*jrdev
= ctx
->jrdev
;
296 struct rtattr
*rta
= (void *)key
;
297 struct crypto_authenc_key_param
*param
;
298 unsigned int authkeylen
;
299 unsigned int enckeylen
;
302 param
= RTA_DATA(rta
);
303 enckeylen
= be32_to_cpu(param
->enckeylen
);
305 key
+= RTA_ALIGN(rta
->rta_len
);
306 keylen
-= RTA_ALIGN(rta
->rta_len
);
308 if (keylen
< enckeylen
)
311 authkeylen
= keylen
- enckeylen
;
313 if (keylen
> CAAM_MAX_KEY_SIZE
)
316 /* Pick class 2 key length from algorithm submask */
317 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
318 OP_ALG_ALGSEL_SHIFT
] * 2;
319 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
322 printk(KERN_ERR
"keylen %d enckeylen %d authkeylen %d\n",
323 keylen
, enckeylen
, authkeylen
);
324 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
325 ctx
->split_key_len
, ctx
->split_key_pad_len
);
326 print_hex_dump(KERN_ERR
, "key in @"xstr(__LINE__
)": ",
327 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
329 ctx
->key
= kmalloc(ctx
->split_key_pad_len
+ enckeylen
,
330 GFP_KERNEL
| GFP_DMA
);
332 dev_err(jrdev
, "could not allocate key output memory\n");
336 ret
= gen_split_key(ctx
, key
, authkeylen
);
342 /* postpend encryption key to auth split key */
343 memcpy(ctx
->key
+ ctx
->split_key_pad_len
, key
+ authkeylen
, enckeylen
);
345 ctx
->key_phys
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
+
346 enckeylen
, DMA_TO_DEVICE
);
347 if (dma_mapping_error(jrdev
, ctx
->key_phys
)) {
348 dev_err(jrdev
, "unable to map key i/o memory\n");
353 print_hex_dump(KERN_ERR
, "ctx.key@"xstr(__LINE__
)": ",
354 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
355 ctx
->split_key_pad_len
+ enckeylen
, 1);
358 ctx
->enckeylen
= enckeylen
;
360 ret
= build_sh_desc_ipsec(ctx
);
362 dma_unmap_single(jrdev
, ctx
->key_phys
, ctx
->split_key_pad_len
+
363 enckeylen
, DMA_TO_DEVICE
);
369 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
373 struct link_tbl_entry
{
382 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
383 * @src_nents: number of segments in input scatterlist
384 * @dst_nents: number of segments in output scatterlist
385 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
386 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
387 * @link_tbl_bytes: length of dma mapped link_tbl space
388 * @link_tbl_dma: bus physical mapped address of h/w link table
389 * @hw_desc: the h/w job descriptor followed by any referenced link tables
391 struct ipsec_esp_edesc
{
396 dma_addr_t link_tbl_dma
;
397 struct link_tbl_entry
*link_tbl
;
401 static void ipsec_esp_unmap(struct device
*dev
,
402 struct ipsec_esp_edesc
*edesc
,
403 struct aead_request
*areq
)
405 dma_unmap_sg(dev
, areq
->assoc
, edesc
->assoc_nents
, DMA_TO_DEVICE
);
407 if (unlikely(areq
->dst
!= areq
->src
)) {
408 dma_unmap_sg(dev
, areq
->src
, edesc
->src_nents
,
410 dma_unmap_sg(dev
, areq
->dst
, edesc
->dst_nents
,
413 dma_unmap_sg(dev
, areq
->src
, edesc
->src_nents
,
417 if (edesc
->link_tbl_bytes
)
418 dma_unmap_single(dev
, edesc
->link_tbl_dma
,
419 edesc
->link_tbl_bytes
,
424 * ipsec_esp descriptor callbacks
426 static void ipsec_esp_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
429 struct aead_request
*areq
= context
;
430 struct ipsec_esp_edesc
*edesc
;
432 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
433 int ivsize
= crypto_aead_ivsize(aead
);
434 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
436 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
438 edesc
= (struct ipsec_esp_edesc
*)((char *)desc
-
439 offsetof(struct ipsec_esp_edesc
, hw_desc
));
442 char tmp
[CAAM_ERROR_STR_MAX
];
444 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
447 ipsec_esp_unmap(jrdev
, edesc
, areq
);
450 print_hex_dump(KERN_ERR
, "assoc @"xstr(__LINE__
)": ",
451 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(areq
->assoc
),
453 print_hex_dump(KERN_ERR
, "dstiv @"xstr(__LINE__
)": ",
454 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(areq
->src
) - ivsize
,
455 edesc
->src_nents
? 100 : ivsize
, 1);
456 print_hex_dump(KERN_ERR
, "dst @"xstr(__LINE__
)": ",
457 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(areq
->src
),
458 edesc
->src_nents
? 100 : areq
->cryptlen
+
459 ctx
->authsize
+ 4, 1);
464 aead_request_complete(areq
, err
);
467 static void ipsec_esp_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
470 struct aead_request
*areq
= context
;
471 struct ipsec_esp_edesc
*edesc
;
473 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
474 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
476 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
478 edesc
= (struct ipsec_esp_edesc
*)((char *)desc
-
479 offsetof(struct ipsec_esp_edesc
, hw_desc
));
482 char tmp
[CAAM_ERROR_STR_MAX
];
484 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
487 ipsec_esp_unmap(jrdev
, edesc
, areq
);
490 * verify hw auth check passed else return -EBADMSG
492 if ((err
& JRSTA_CCBERR_ERRID_MASK
) == JRSTA_CCBERR_ERRID_ICVCHK
)
496 print_hex_dump(KERN_ERR
, "iphdrout@"xstr(__LINE__
)": ",
497 DUMP_PREFIX_ADDRESS
, 16, 4,
498 ((char *)sg_virt(areq
->assoc
) - sizeof(struct iphdr
)),
499 sizeof(struct iphdr
) + areq
->assoclen
+
500 ((areq
->cryptlen
> 1500) ? 1500 : areq
->cryptlen
) +
501 ctx
->authsize
+ 36, 1);
502 if (!err
&& edesc
->link_tbl_bytes
) {
503 struct scatterlist
*sg
= sg_last(areq
->src
, edesc
->src_nents
);
504 print_hex_dump(KERN_ERR
, "sglastout@"xstr(__LINE__
)": ",
505 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(sg
),
506 sg
->length
+ ctx
->authsize
+ 16, 1);
511 aead_request_complete(areq
, err
);
515 * convert scatterlist to h/w link table format
516 * scatterlist must have been previously dma mapped
518 static void sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
519 struct link_tbl_entry
*link_tbl_ptr
, u32 offset
)
522 link_tbl_ptr
->ptr
= sg_dma_address(sg
);
523 link_tbl_ptr
->len
= sg_dma_len(sg
);
524 link_tbl_ptr
->reserved
= 0;
525 link_tbl_ptr
->buf_pool_id
= 0;
526 link_tbl_ptr
->offset
= offset
;
532 /* set Final bit (marks end of link table) */
534 link_tbl_ptr
->len
|= 0x40000000;
538 * fill in and submit ipsec_esp job descriptor
540 static int ipsec_esp(struct ipsec_esp_edesc
*edesc
, struct aead_request
*areq
,
542 void (*callback
) (struct device
*dev
, u32
*desc
,
543 u32 err
, void *context
))
545 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
546 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
547 struct device
*jrdev
= ctx
->jrdev
;
548 u32
*desc
= edesc
->hw_desc
, options
;
549 int ret
, sg_count
, assoc_sg_count
;
550 int ivsize
= crypto_aead_ivsize(aead
);
551 int authsize
= ctx
->authsize
;
552 dma_addr_t ptr
, dst_dma
, src_dma
;
554 u32
*sh_desc
= ctx
->sh_desc
;
556 debug("assoclen %d cryptlen %d authsize %d\n",
557 areq
->assoclen
, areq
->cryptlen
, authsize
);
558 print_hex_dump(KERN_ERR
, "assoc @"xstr(__LINE__
)": ",
559 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(areq
->assoc
),
561 print_hex_dump(KERN_ERR
, "presciv@"xstr(__LINE__
)": ",
562 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(areq
->src
) - ivsize
,
563 edesc
->src_nents
? 100 : ivsize
, 1);
564 print_hex_dump(KERN_ERR
, "src @"xstr(__LINE__
)": ",
565 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(areq
->src
),
566 edesc
->src_nents
? 100 : areq
->cryptlen
+ authsize
, 1);
567 print_hex_dump(KERN_ERR
, "shrdesc@"xstr(__LINE__
)": ",
568 DUMP_PREFIX_ADDRESS
, 16, 4, sh_desc
,
569 desc_bytes(sh_desc
), 1);
571 assoc_sg_count
= dma_map_sg(jrdev
, areq
->assoc
, edesc
->assoc_nents
?: 1,
573 if (areq
->src
== areq
->dst
)
574 sg_count
= dma_map_sg(jrdev
, areq
->src
, edesc
->src_nents
? : 1,
577 sg_count
= dma_map_sg(jrdev
, areq
->src
, edesc
->src_nents
? : 1,
580 /* start auth operation */
581 append_operation(desc
, ctx
->class2_alg_type
| OP_ALG_AS_INITFINAL
|
582 (encrypt
? : OP_ALG_ICV_ON
));
584 /* Load FIFO with data for Class 2 CHA */
585 options
= FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
;
586 if (!edesc
->assoc_nents
) {
587 ptr
= sg_dma_address(areq
->assoc
);
589 sg_to_link_tbl(areq
->assoc
, edesc
->assoc_nents
,
591 ptr
= edesc
->link_tbl_dma
;
594 append_fifo_load(desc
, ptr
, areq
->assoclen
, options
);
596 /* copy iv from cipher/class1 input context to class2 infifo */
597 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_CLASS2INFIFO
| ivsize
);
600 u32
*jump_cmd
, *uncond_jump_cmd
;
603 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
| JUMP_COND_SHRD
);
605 /* start class 1 (cipher) operation, non-shared version */
606 append_operation(desc
, ctx
->class1_alg_type
|
607 OP_ALG_AS_INITFINAL
);
609 uncond_jump_cmd
= append_jump(desc
, 0);
611 set_jump_tgt_here(desc
, jump_cmd
);
613 /* start class 1 (cipher) operation, shared version */
614 append_operation(desc
, ctx
->class1_alg_type
|
615 OP_ALG_AS_INITFINAL
| OP_ALG_AAI_DK
);
616 set_jump_tgt_here(desc
, uncond_jump_cmd
);
618 append_operation(desc
, ctx
->class1_alg_type
|
619 OP_ALG_AS_INITFINAL
| encrypt
);
621 /* load payload & instruct to class2 to snoop class 1 if encrypting */
623 if (!edesc
->src_nents
) {
624 src_dma
= sg_dma_address(areq
->src
);
626 sg_to_link_tbl(areq
->src
, edesc
->src_nents
, edesc
->link_tbl
+
627 edesc
->assoc_nents
, 0);
628 src_dma
= edesc
->link_tbl_dma
+ edesc
->assoc_nents
*
629 sizeof(struct link_tbl_entry
);
632 append_seq_in_ptr(desc
, src_dma
, areq
->cryptlen
+ authsize
, options
);
633 append_seq_fifo_load(desc
, areq
->cryptlen
, FIFOLD_CLASS_BOTH
|
634 FIFOLD_TYPE_LASTBOTH
|
635 (encrypt
? FIFOLD_TYPE_MSG1OUT2
638 /* specify destination */
639 if (areq
->src
== areq
->dst
) {
642 sg_count
= dma_map_sg(jrdev
, areq
->dst
, edesc
->dst_nents
? : 1,
644 if (!edesc
->dst_nents
) {
645 dst_dma
= sg_dma_address(areq
->dst
);
648 sg_to_link_tbl(areq
->dst
, edesc
->dst_nents
,
649 edesc
->link_tbl
+ edesc
->assoc_nents
+
650 edesc
->src_nents
, 0);
651 dst_dma
= edesc
->link_tbl_dma
+ (edesc
->assoc_nents
+
653 sizeof(struct link_tbl_entry
);
657 append_seq_out_ptr(desc
, dst_dma
, areq
->cryptlen
+ authsize
, options
);
658 append_seq_fifo_store(desc
, areq
->cryptlen
, FIFOST_TYPE_MESSAGE_DATA
);
662 append_seq_store(desc
, authsize
, LDST_CLASS_2_CCB
|
663 LDST_SRCDST_BYTE_CONTEXT
);
665 append_seq_fifo_load(desc
, authsize
, FIFOLD_CLASS_CLASS2
|
666 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_ICV
);
669 debug("job_desc_len %d\n", desc_len(desc
));
670 print_hex_dump(KERN_ERR
, "jobdesc@"xstr(__LINE__
)": ",
671 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
) , 1);
672 print_hex_dump(KERN_ERR
, "jdlinkt@"xstr(__LINE__
)": ",
673 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->link_tbl
,
674 edesc
->link_tbl_bytes
, 1);
677 ret
= caam_jr_enqueue(jrdev
, desc
, callback
, areq
);
681 ipsec_esp_unmap(jrdev
, edesc
, areq
);
689 * derive number of elements in scatterlist
691 static int sg_count(struct scatterlist
*sg_list
, int nbytes
, int *chained
)
693 struct scatterlist
*sg
= sg_list
;
699 nbytes
-= sg
->length
;
700 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
702 sg
= scatterwalk_sg_next(sg
);
709 * allocate and map the ipsec_esp extended descriptor
711 static struct ipsec_esp_edesc
*ipsec_esp_edesc_alloc(struct aead_request
*areq
,
714 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
715 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
716 struct device
*jrdev
= ctx
->jrdev
;
717 gfp_t flags
= areq
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
719 int assoc_nents
, src_nents
, dst_nents
= 0, chained
, link_tbl_bytes
;
720 struct ipsec_esp_edesc
*edesc
;
722 assoc_nents
= sg_count(areq
->assoc
, areq
->assoclen
, &chained
);
724 if (likely(assoc_nents
== 1))
727 src_nents
= sg_count(areq
->src
, areq
->cryptlen
+ ctx
->authsize
,
733 if (unlikely(areq
->dst
!= areq
->src
)) {
734 dst_nents
= sg_count(areq
->dst
, areq
->cryptlen
+ ctx
->authsize
,
741 link_tbl_bytes
= (assoc_nents
+ src_nents
+ dst_nents
) *
742 sizeof(struct link_tbl_entry
);
743 debug("link_tbl_bytes %d\n", link_tbl_bytes
);
745 /* allocate space for base edesc and hw desc commands, link tables */
746 edesc
= kmalloc(sizeof(struct ipsec_esp_edesc
) + desc_bytes
+
747 link_tbl_bytes
, GFP_DMA
| flags
);
749 dev_err(jrdev
, "could not allocate extended descriptor\n");
750 return ERR_PTR(-ENOMEM
);
753 edesc
->assoc_nents
= assoc_nents
;
754 edesc
->src_nents
= src_nents
;
755 edesc
->dst_nents
= dst_nents
;
756 edesc
->link_tbl
= (void *)edesc
+ sizeof(struct ipsec_esp_edesc
) +
758 edesc
->link_tbl_dma
= dma_map_single(jrdev
, edesc
->link_tbl
,
759 link_tbl_bytes
, DMA_TO_DEVICE
);
760 edesc
->link_tbl_bytes
= link_tbl_bytes
;
765 static int aead_authenc_encrypt(struct aead_request
*areq
)
767 struct ipsec_esp_edesc
*edesc
;
768 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
769 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
770 struct device
*jrdev
= ctx
->jrdev
;
771 int ivsize
= crypto_aead_ivsize(aead
);
775 /* allocate extended descriptor */
776 edesc
= ipsec_esp_edesc_alloc(areq
, DESC_AEAD_ENCRYPT_TEXT_LEN
*
779 return PTR_ERR(edesc
);
781 desc
= edesc
->hw_desc
;
783 /* insert shared descriptor pointer */
784 init_job_desc_shared(desc
, ctx
->shared_desc_phys
,
785 desc_len(ctx
->sh_desc
), HDR_SHARE_DEFER
);
787 iv_dma
= dma_map_single(jrdev
, areq
->iv
, ivsize
, DMA_TO_DEVICE
);
788 /* check dma error */
790 append_load(desc
, iv_dma
, ivsize
,
791 LDST_CLASS_1_CCB
| LDST_SRCDST_BYTE_CONTEXT
);
793 return ipsec_esp(edesc
, areq
, OP_ALG_ENCRYPT
, ipsec_esp_encrypt_done
);
796 static int aead_authenc_decrypt(struct aead_request
*req
)
798 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
799 int ivsize
= crypto_aead_ivsize(aead
);
800 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
801 struct device
*jrdev
= ctx
->jrdev
;
802 struct ipsec_esp_edesc
*edesc
;
806 req
->cryptlen
-= ctx
->authsize
;
808 /* allocate extended descriptor */
809 edesc
= ipsec_esp_edesc_alloc(req
, DESC_AEAD_DECRYPT_TEXT_LEN
*
812 return PTR_ERR(edesc
);
814 desc
= edesc
->hw_desc
;
816 /* insert shared descriptor pointer */
817 init_job_desc_shared(desc
, ctx
->shared_desc_phys
,
818 desc_len(ctx
->sh_desc
), HDR_SHARE_DEFER
);
820 iv_dma
= dma_map_single(jrdev
, req
->iv
, ivsize
, DMA_TO_DEVICE
);
821 /* check dma error */
823 append_load(desc
, iv_dma
, ivsize
,
824 LDST_CLASS_1_CCB
| LDST_SRCDST_BYTE_CONTEXT
);
826 return ipsec_esp(edesc
, req
, !OP_ALG_ENCRYPT
, ipsec_esp_decrypt_done
);
829 static int aead_authenc_givencrypt(struct aead_givcrypt_request
*req
)
831 struct aead_request
*areq
= &req
->areq
;
832 struct ipsec_esp_edesc
*edesc
;
833 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
834 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
835 struct device
*jrdev
= ctx
->jrdev
;
836 int ivsize
= crypto_aead_ivsize(aead
);
840 iv_dma
= dma_map_single(jrdev
, req
->giv
, ivsize
, DMA_FROM_DEVICE
);
842 debug("%s: giv %p\n", __func__
, req
->giv
);
844 /* allocate extended descriptor */
845 edesc
= ipsec_esp_edesc_alloc(areq
, DESC_AEAD_GIVENCRYPT_TEXT_LEN
*
848 return PTR_ERR(edesc
);
850 desc
= edesc
->hw_desc
;
852 /* insert shared descriptor pointer */
853 init_job_desc_shared(desc
, ctx
->shared_desc_phys
,
854 desc_len(ctx
->sh_desc
), HDR_SHARE_DEFER
);
858 * to DECO, Last, Padding, Random, Message, 16 bytes
860 append_load_imm_u32(desc
, NFIFOENTRY_DEST_DECO
| NFIFOENTRY_LC1
|
861 NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DTYPE_MSG
|
862 NFIFOENTRY_PTYPE_RND
| ivsize
,
863 LDST_SRCDST_WORD_INFO_FIFO
);
866 * disable info fifo entries since the above serves as the entry
867 * this way, the MOVE command won't generate an entry.
868 * Note that this isn't required in more recent versions of
869 * SEC as a MOVE that doesn't do info FIFO entries is available.
871 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
873 /* MOVE DECO Alignment -> C1 Context 16 bytes */
874 append_move(desc
, MOVE_SRC_INFIFO
| MOVE_DEST_CLASS1CTX
| ivsize
);
876 /* re-enable info fifo entries */
877 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
879 /* MOVE C1 Context -> OFIFO 16 bytes */
880 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_OUTFIFO
| ivsize
);
882 append_fifo_store(desc
, iv_dma
, ivsize
, FIFOST_TYPE_MESSAGE_DATA
);
884 return ipsec_esp(edesc
, areq
, OP_ALG_ENCRYPT
, ipsec_esp_encrypt_done
);
887 struct caam_alg_template
{
888 char name
[CRYPTO_MAX_ALG_NAME
];
889 char driver_name
[CRYPTO_MAX_ALG_NAME
];
890 unsigned int blocksize
;
891 struct aead_alg aead
;
897 static struct caam_alg_template driver_algs
[] = {
898 /* single-pass ipsec_esp descriptor */
900 .name
= "authenc(hmac(sha1),cbc(aes))",
901 .driver_name
= "authenc-hmac-sha1-cbc-aes-caam",
902 .blocksize
= AES_BLOCK_SIZE
,
904 .setkey
= aead_authenc_setkey
,
905 .setauthsize
= aead_authenc_setauthsize
,
906 .encrypt
= aead_authenc_encrypt
,
907 .decrypt
= aead_authenc_decrypt
,
908 .givencrypt
= aead_authenc_givencrypt
,
909 .geniv
= "<built-in>",
910 .ivsize
= AES_BLOCK_SIZE
,
911 .maxauthsize
= SHA1_DIGEST_SIZE
,
913 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
914 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
915 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
918 .name
= "authenc(hmac(sha256),cbc(aes))",
919 .driver_name
= "authenc-hmac-sha256-cbc-aes-caam",
920 .blocksize
= AES_BLOCK_SIZE
,
922 .setkey
= aead_authenc_setkey
,
923 .setauthsize
= aead_authenc_setauthsize
,
924 .encrypt
= aead_authenc_encrypt
,
925 .decrypt
= aead_authenc_decrypt
,
926 .givencrypt
= aead_authenc_givencrypt
,
927 .geniv
= "<built-in>",
928 .ivsize
= AES_BLOCK_SIZE
,
929 .maxauthsize
= SHA256_DIGEST_SIZE
,
931 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
932 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
933 OP_ALG_AAI_HMAC_PRECOMP
,
934 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
937 .name
= "authenc(hmac(sha512),cbc(aes))",
938 .driver_name
= "authenc-hmac-sha512-cbc-aes-caam",
939 .blocksize
= AES_BLOCK_SIZE
,
941 .setkey
= aead_authenc_setkey
,
942 .setauthsize
= aead_authenc_setauthsize
,
943 .encrypt
= aead_authenc_encrypt
,
944 .decrypt
= aead_authenc_decrypt
,
945 .givencrypt
= aead_authenc_givencrypt
,
946 .geniv
= "<built-in>",
947 .ivsize
= AES_BLOCK_SIZE
,
948 .maxauthsize
= SHA512_DIGEST_SIZE
,
950 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
951 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
952 OP_ALG_AAI_HMAC_PRECOMP
,
953 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
956 .name
= "authenc(hmac(sha1),cbc(des3_ede))",
957 .driver_name
= "authenc-hmac-sha1-cbc-des3_ede-caam",
958 .blocksize
= DES3_EDE_BLOCK_SIZE
,
960 .setkey
= aead_authenc_setkey
,
961 .setauthsize
= aead_authenc_setauthsize
,
962 .encrypt
= aead_authenc_encrypt
,
963 .decrypt
= aead_authenc_decrypt
,
964 .givencrypt
= aead_authenc_givencrypt
,
965 .geniv
= "<built-in>",
966 .ivsize
= DES3_EDE_BLOCK_SIZE
,
967 .maxauthsize
= SHA1_DIGEST_SIZE
,
969 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
970 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
971 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
974 .name
= "authenc(hmac(sha256),cbc(des3_ede))",
975 .driver_name
= "authenc-hmac-sha256-cbc-des3_ede-caam",
976 .blocksize
= DES3_EDE_BLOCK_SIZE
,
978 .setkey
= aead_authenc_setkey
,
979 .setauthsize
= aead_authenc_setauthsize
,
980 .encrypt
= aead_authenc_encrypt
,
981 .decrypt
= aead_authenc_decrypt
,
982 .givencrypt
= aead_authenc_givencrypt
,
983 .geniv
= "<built-in>",
984 .ivsize
= DES3_EDE_BLOCK_SIZE
,
985 .maxauthsize
= SHA256_DIGEST_SIZE
,
987 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
988 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
989 OP_ALG_AAI_HMAC_PRECOMP
,
990 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
993 .name
= "authenc(hmac(sha512),cbc(des3_ede))",
994 .driver_name
= "authenc-hmac-sha512-cbc-des3_ede-caam",
995 .blocksize
= DES3_EDE_BLOCK_SIZE
,
997 .setkey
= aead_authenc_setkey
,
998 .setauthsize
= aead_authenc_setauthsize
,
999 .encrypt
= aead_authenc_encrypt
,
1000 .decrypt
= aead_authenc_decrypt
,
1001 .givencrypt
= aead_authenc_givencrypt
,
1002 .geniv
= "<built-in>",
1003 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1004 .maxauthsize
= SHA512_DIGEST_SIZE
,
1006 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1007 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1008 OP_ALG_AAI_HMAC_PRECOMP
,
1009 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1012 .name
= "authenc(hmac(sha1),cbc(des))",
1013 .driver_name
= "authenc-hmac-sha1-cbc-des-caam",
1014 .blocksize
= DES_BLOCK_SIZE
,
1016 .setkey
= aead_authenc_setkey
,
1017 .setauthsize
= aead_authenc_setauthsize
,
1018 .encrypt
= aead_authenc_encrypt
,
1019 .decrypt
= aead_authenc_decrypt
,
1020 .givencrypt
= aead_authenc_givencrypt
,
1021 .geniv
= "<built-in>",
1022 .ivsize
= DES_BLOCK_SIZE
,
1023 .maxauthsize
= SHA1_DIGEST_SIZE
,
1025 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1026 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
1027 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1030 .name
= "authenc(hmac(sha256),cbc(des))",
1031 .driver_name
= "authenc-hmac-sha256-cbc-des-caam",
1032 .blocksize
= DES_BLOCK_SIZE
,
1034 .setkey
= aead_authenc_setkey
,
1035 .setauthsize
= aead_authenc_setauthsize
,
1036 .encrypt
= aead_authenc_encrypt
,
1037 .decrypt
= aead_authenc_decrypt
,
1038 .givencrypt
= aead_authenc_givencrypt
,
1039 .geniv
= "<built-in>",
1040 .ivsize
= DES_BLOCK_SIZE
,
1041 .maxauthsize
= SHA256_DIGEST_SIZE
,
1043 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1044 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1045 OP_ALG_AAI_HMAC_PRECOMP
,
1046 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1049 .name
= "authenc(hmac(sha512),cbc(des))",
1050 .driver_name
= "authenc-hmac-sha512-cbc-des-caam",
1051 .blocksize
= DES_BLOCK_SIZE
,
1053 .setkey
= aead_authenc_setkey
,
1054 .setauthsize
= aead_authenc_setauthsize
,
1055 .encrypt
= aead_authenc_encrypt
,
1056 .decrypt
= aead_authenc_decrypt
,
1057 .givencrypt
= aead_authenc_givencrypt
,
1058 .geniv
= "<built-in>",
1059 .ivsize
= DES_BLOCK_SIZE
,
1060 .maxauthsize
= SHA512_DIGEST_SIZE
,
1062 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1063 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1064 OP_ALG_AAI_HMAC_PRECOMP
,
1065 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1069 struct caam_crypto_alg
{
1070 struct list_head entry
;
1071 struct device
*ctrldev
;
1072 int class1_alg_type
;
1073 int class2_alg_type
;
1075 struct crypto_alg crypto_alg
;
1078 static int caam_cra_init(struct crypto_tfm
*tfm
)
1080 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1081 struct caam_crypto_alg
*caam_alg
=
1082 container_of(alg
, struct caam_crypto_alg
, crypto_alg
);
1083 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1084 struct caam_drv_private
*priv
= dev_get_drvdata(caam_alg
->ctrldev
);
1085 int tgt_jr
= atomic_inc_return(&priv
->tfm_count
);
1088 * distribute tfms across job rings to ensure in-order
1089 * crypto request processing per tfm
1091 ctx
->jrdev
= priv
->algapi_jr
[(tgt_jr
/ 2) % priv
->num_jrs_for_algapi
];
1093 /* copy descriptor header template value */
1094 ctx
->class1_alg_type
= OP_TYPE_CLASS1_ALG
| caam_alg
->class1_alg_type
;
1095 ctx
->class2_alg_type
= OP_TYPE_CLASS2_ALG
| caam_alg
->class2_alg_type
;
1096 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_alg
->alg_op
;
1101 static void caam_cra_exit(struct crypto_tfm
*tfm
)
1103 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1105 if (!dma_mapping_error(ctx
->jrdev
, ctx
->shared_desc_phys
))
1106 dma_unmap_single(ctx
->jrdev
, ctx
->shared_desc_phys
,
1107 desc_bytes(ctx
->sh_desc
), DMA_TO_DEVICE
);
1108 kfree(ctx
->sh_desc
);
1110 if (!dma_mapping_error(ctx
->jrdev
, ctx
->key_phys
))
1111 dma_unmap_single(ctx
->jrdev
, ctx
->key_phys
,
1112 ctx
->split_key_pad_len
+ ctx
->enckeylen
,
1117 static void __exit
caam_algapi_exit(void)
1120 struct device_node
*dev_node
;
1121 struct platform_device
*pdev
;
1122 struct device
*ctrldev
;
1123 struct caam_drv_private
*priv
;
1124 struct caam_crypto_alg
*t_alg
, *n
;
1127 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1131 pdev
= of_find_device_by_node(dev_node
);
1135 ctrldev
= &pdev
->dev
;
1136 of_node_put(dev_node
);
1137 priv
= dev_get_drvdata(ctrldev
);
1139 if (!priv
->alg_list
.next
)
1142 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
1143 crypto_unregister_alg(&t_alg
->crypto_alg
);
1144 list_del(&t_alg
->entry
);
1148 for (i
= 0; i
< priv
->total_jobrs
; i
++) {
1149 err
= caam_jr_deregister(priv
->algapi_jr
[i
]);
1153 kfree(priv
->algapi_jr
);
1156 static struct caam_crypto_alg
*caam_alg_alloc(struct device
*ctrldev
,
1157 struct caam_alg_template
1160 struct caam_crypto_alg
*t_alg
;
1161 struct crypto_alg
*alg
;
1163 t_alg
= kzalloc(sizeof(struct caam_crypto_alg
), GFP_KERNEL
);
1165 dev_err(ctrldev
, "failed to allocate t_alg\n");
1166 return ERR_PTR(-ENOMEM
);
1169 alg
= &t_alg
->crypto_alg
;
1171 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", template->name
);
1172 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1173 template->driver_name
);
1174 alg
->cra_module
= THIS_MODULE
;
1175 alg
->cra_init
= caam_cra_init
;
1176 alg
->cra_exit
= caam_cra_exit
;
1177 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1178 alg
->cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
;
1179 alg
->cra_blocksize
= template->blocksize
;
1180 alg
->cra_alignmask
= 0;
1181 alg
->cra_type
= &crypto_aead_type
;
1182 alg
->cra_ctxsize
= sizeof(struct caam_ctx
);
1183 alg
->cra_u
.aead
= template->aead
;
1185 t_alg
->class1_alg_type
= template->class1_alg_type
;
1186 t_alg
->class2_alg_type
= template->class2_alg_type
;
1187 t_alg
->alg_op
= template->alg_op
;
1188 t_alg
->ctrldev
= ctrldev
;
1193 static int __init
caam_algapi_init(void)
1195 struct device_node
*dev_node
;
1196 struct platform_device
*pdev
;
1197 struct device
*ctrldev
, **jrdev
;
1198 struct caam_drv_private
*priv
;
1201 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1205 pdev
= of_find_device_by_node(dev_node
);
1209 ctrldev
= &pdev
->dev
;
1210 priv
= dev_get_drvdata(ctrldev
);
1211 of_node_put(dev_node
);
1213 INIT_LIST_HEAD(&priv
->alg_list
);
1215 jrdev
= kmalloc(sizeof(*jrdev
) * priv
->total_jobrs
, GFP_KERNEL
);
1219 for (i
= 0; i
< priv
->total_jobrs
; i
++) {
1220 err
= caam_jr_register(ctrldev
, &jrdev
[i
]);
1224 if (err
< 0 && i
== 0) {
1225 dev_err(ctrldev
, "algapi error in job ring registration: %d\n",
1231 priv
->num_jrs_for_algapi
= i
;
1232 priv
->algapi_jr
= jrdev
;
1233 atomic_set(&priv
->tfm_count
, -1);
1235 /* register crypto algorithms the device supports */
1236 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
1237 /* TODO: check if h/w supports alg */
1238 struct caam_crypto_alg
*t_alg
;
1240 t_alg
= caam_alg_alloc(ctrldev
, &driver_algs
[i
]);
1241 if (IS_ERR(t_alg
)) {
1242 err
= PTR_ERR(t_alg
);
1243 dev_warn(ctrldev
, "%s alg allocation failed\n",
1244 driver_algs
[i
].driver_name
);
1248 err
= crypto_register_alg(&t_alg
->crypto_alg
);
1250 dev_warn(ctrldev
, "%s alg registration failed\n",
1251 t_alg
->crypto_alg
.cra_driver_name
);
1254 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
1255 dev_info(ctrldev
, "%s\n",
1256 t_alg
->crypto_alg
.cra_driver_name
);
1263 module_init(caam_algapi_init
);
1264 module_exit(caam_algapi_exit
);
1266 MODULE_LICENSE("GPL");
1267 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
1268 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");