3 * sep_crypto.c - Crypto interface structures
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/miscdevice.h>
39 #include <linux/cdev.h>
40 #include <linux/kdev_t.h>
41 #include <linux/mutex.h>
42 #include <linux/sched.h>
44 #include <linux/poll.h>
45 #include <linux/wait.h>
46 #include <linux/pci.h>
47 #include <linux/pm_runtime.h>
48 #include <linux/err.h>
49 #include <linux/device.h>
50 #include <linux/errno.h>
51 #include <linux/interrupt.h>
52 #include <linux/kernel.h>
53 #include <linux/clk.h>
54 #include <linux/irq.h>
56 #include <linux/platform_device.h>
57 #include <linux/list.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/delay.h>
60 #include <linux/jiffies.h>
61 #include <linux/workqueue.h>
62 #include <linux/crypto.h>
63 #include <crypto/internal/hash.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/sha.h>
66 #include <crypto/md5.h>
67 #include <crypto/aes.h>
68 #include <crypto/des.h>
69 #include <crypto/hash.h>
70 #include "sep_driver_hw_defs.h"
71 #include "sep_driver_config.h"
72 #include "sep_driver_api.h"
74 #include "sep_crypto.h"
76 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
78 /* Globals for queuing */
79 static spinlock_t queue_lock
;
80 static struct crypto_queue sep_queue
;
82 /* Declare of dequeuer */
83 static void sep_dequeuer(void *data
);
88 * @work: pointer to work_struct
89 * This is what is called by the queue; it is generic so that it
90 * can be used by any type of operation as each different callback
91 * function can use the data parameter in its own way
93 static void sep_do_callback(struct work_struct
*work
)
95 struct sep_work_struct
*sep_work
= container_of(work
,
96 struct sep_work_struct
, work
);
97 if (sep_work
!= NULL
) {
98 (sep_work
->callback
)(sep_work
->data
);
101 pr_debug("sep crypto: do callback - NULL container\n");
107 * @work_queue: pointer to struct_workqueue
108 * @funct: pointer to function to execute
109 * @data: pointer to data; function will know
111 * This is a generic API to submit something to
112 * the queue. The callback function will depend
113 * on what operation is to be done
115 static int sep_submit_work(struct workqueue_struct
*work_queue
,
116 void(*funct
)(void *),
119 struct sep_work_struct
*sep_work
;
122 sep_work
= kmalloc(sizeof(struct sep_work_struct
), GFP_ATOMIC
);
124 if (sep_work
== NULL
) {
125 pr_debug("sep crypto: cant allocate work structure\n");
129 sep_work
->callback
= funct
;
130 sep_work
->data
= data
;
131 INIT_WORK(&sep_work
->work
, sep_do_callback
);
132 result
= queue_work(work_queue
, &sep_work
->work
);
134 pr_debug("sep_crypto: queue_work failed\n");
142 * @sep: pointer to struct sep_device
143 * @size: total size of area
144 * @block_size: minimum size of chunks
145 * each page is minimum or modulo this size
146 * @returns: pointer to struct scatterlist for new
149 static struct scatterlist
*sep_alloc_sg_buf(
150 struct sep_device
*sep
,
158 size_t real_page_size
;
160 struct scatterlist
*sg
, *sg_temp
;
165 dev_dbg(&sep
->pdev
->dev
, "sep alloc sg buf\n");
169 real_page_size
= PAGE_SIZE
- (PAGE_SIZE
% block_size
);
171 * The size of each page must be modulo of the operation
172 * block size; increment by the modified page size until
173 * the total size is reached, then you have the number of
176 while (current_size
< size
) {
177 current_size
+= real_page_size
;
181 sg
= kmalloc((sizeof(struct scatterlist
) * nbr_pages
), GFP_ATOMIC
);
183 dev_warn(&sep
->pdev
->dev
, "Cannot allocate page for new sg\n");
187 sg_init_table(sg
, nbr_pages
);
191 for (ct1
= 0; ct1
< nbr_pages
; ct1
+= 1) {
192 buf
= (void *)get_zeroed_page(GFP_ATOMIC
);
194 dev_warn(&sep
->pdev
->dev
,
195 "Cannot allocate page for new buffer\n");
200 sg_set_buf(sg_temp
, buf
, real_page_size
);
201 if ((size
- current_size
) > real_page_size
) {
202 sg_temp
->length
= real_page_size
;
203 current_size
+= real_page_size
;
205 sg_temp
->length
= (size
- current_size
);
208 sg_temp
= sg_next(sg
);
215 * @sg: pointer to struct scatterlist; points to area to free
217 static void sep_free_sg_buf(struct scatterlist
*sg
)
219 struct scatterlist
*sg_temp
= sg
;
221 free_page((unsigned long)sg_virt(sg_temp
));
222 sg_temp
= sg_next(sg_temp
);
229 * @sep: pointer to struct sep_device
230 * @sg_src: pointer to struct scatterlist for source
231 * @sg_dst: pointer to struct scatterlist for destination
232 * @size: size (in bytes) of data to copy
234 * Copy data from one scatterlist to another; both must
237 static void sep_copy_sg(
238 struct sep_device
*sep
,
239 struct scatterlist
*sg_src
,
240 struct scatterlist
*sg_dst
,
244 u32 in_offset
, out_offset
;
247 struct scatterlist
*sg_src_tmp
= sg_src
;
248 struct scatterlist
*sg_dst_tmp
= sg_dst
;
252 dev_dbg(&sep
->pdev
->dev
, "sep copy sg\n");
254 if ((sg_src
== NULL
) || (sg_dst
== NULL
) || (size
== 0))
257 dev_dbg(&sep
->pdev
->dev
, "sep copy sg not null\n");
259 while (count
< size
) {
260 if ((sg_src_tmp
->length
- in_offset
) >
261 (sg_dst_tmp
->length
- out_offset
))
262 seg_size
= sg_dst_tmp
->length
- out_offset
;
264 seg_size
= sg_src_tmp
->length
- in_offset
;
266 if (seg_size
> (size
- count
))
267 seg_size
= (size
= count
);
269 memcpy(sg_virt(sg_dst_tmp
) + out_offset
,
270 sg_virt(sg_src_tmp
) + in_offset
,
273 in_offset
+= seg_size
;
274 out_offset
+= seg_size
;
277 if (in_offset
>= sg_src_tmp
->length
) {
278 sg_src_tmp
= sg_next(sg_src_tmp
);
282 if (out_offset
>= sg_dst_tmp
->length
) {
283 sg_dst_tmp
= sg_next(sg_dst_tmp
);
290 * sep_oddball_pages -
291 * @sep: pointer to struct sep_device
292 * @sg: pointer to struct scatterlist - buffer to check
293 * @size: total data size
294 * @blocksize: minimum block size; must be multiples of this size
295 * @to_copy: 1 means do copy, 0 means do not copy
296 * @new_sg: pointer to location to put pointer to new sg area
297 * @returns: 1 if new scatterlist is needed; 0 if not needed;
298 * error value if operation failed
300 * The SEP device requires all pages to be multiples of the
301 * minimum block size appropriate for the operation
302 * This function check all pages; if any are oddball sizes
303 * (not multiple of block sizes), it creates a new scatterlist.
304 * If the to_copy parameter is set to 1, then a scatter list
305 * copy is performed. The pointer to the new scatterlist is
306 * put into the address supplied by the new_sg parameter; if
307 * no new scatterlist is needed, then a NULL is put into
308 * the location at new_sg.
311 static int sep_oddball_pages(
312 struct sep_device
*sep
,
313 struct scatterlist
*sg
,
316 struct scatterlist
**new_sg
,
319 struct scatterlist
*sg_temp
;
321 u32 nbr_pages
, page_count
;
323 dev_dbg(&sep
->pdev
->dev
, "sep oddball\n");
324 if ((sg
== NULL
) || (data_size
== 0) || (data_size
< block_size
))
327 dev_dbg(&sep
->pdev
->dev
, "sep oddball not null\n");
335 sg_temp
= sg_next(sg_temp
);
339 while ((sg_temp
) && (flag
== 0)) {
341 if (sg_temp
->length
% block_size
)
344 sg_temp
= sg_next(sg_temp
);
347 /* Do not process if last (or only) page is oddball */
348 if (nbr_pages
== page_count
)
352 dev_dbg(&sep
->pdev
->dev
, "sep oddball processing\n");
353 *new_sg
= sep_alloc_sg_buf(sep
, data_size
, block_size
);
354 if (*new_sg
== NULL
) {
355 dev_warn(&sep
->pdev
->dev
, "cannot allocate new sg\n");
360 sep_copy_sg(sep
, sg
, *new_sg
, data_size
);
369 * sep_copy_offset_sg -
370 * @sep: pointer to struct sep_device;
371 * @sg: pointer to struct scatterlist
372 * @offset: offset into scatterlist memory
373 * @dst: place to put data
374 * @len: length of data
375 * @returns: number of bytes copies
377 * This copies data from scatterlist buffer
378 * offset from beginning - it is needed for
379 * handling tail data in hash
381 static size_t sep_copy_offset_sg(
382 struct sep_device
*sep
,
383 struct scatterlist
*sg
,
390 size_t offset_within_page
;
391 size_t length_within_page
;
392 size_t length_remaining
;
393 size_t current_offset
;
395 /* Find which page is beginning of segment */
397 page_end
= sg
->length
;
398 while ((sg
) && (offset
> page_end
)) {
399 page_start
+= sg
->length
;
402 page_end
+= sg
->length
;
408 offset_within_page
= offset
- page_start
;
409 if ((sg
->length
- offset_within_page
) >= len
) {
410 /* All within this page */
411 memcpy(dst
, sg_virt(sg
) + offset_within_page
, len
);
414 /* Scattered multiple pages */
416 length_remaining
= len
;
417 while ((sg
) && (current_offset
< len
)) {
418 length_within_page
= sg
->length
- offset_within_page
;
419 if (length_within_page
>= length_remaining
) {
420 memcpy(dst
+current_offset
,
421 sg_virt(sg
) + offset_within_page
,
423 length_remaining
= 0;
424 current_offset
= len
;
426 memcpy(dst
+current_offset
,
427 sg_virt(sg
) + offset_within_page
,
429 length_remaining
-= length_within_page
;
430 current_offset
+= length_within_page
;
431 offset_within_page
= 0;
444 * @src_ptr: source pointer
445 * @dst_ptr: destination pointer
446 * @nbytes: number of bytes
447 * @returns: 0 for success; -1 for failure
448 * We cannot have any partial overlap. Total overlap
449 * where src is the same as dst is okay
451 static int partial_overlap(void *src_ptr
, void *dst_ptr
, u32 nbytes
)
453 /* Check for partial overlap */
454 if (src_ptr
!= dst_ptr
) {
455 if (src_ptr
< dst_ptr
) {
456 if ((src_ptr
+ nbytes
) > dst_ptr
)
459 if ((dst_ptr
+ nbytes
) > src_ptr
)
467 /* Debug - prints only if DEBUG is defined */
468 static void sep_dump_ivs(struct ablkcipher_request
*req
, char *reason
)
472 struct sep_aes_internal_context
*aes_internal
;
473 struct sep_des_internal_context
*des_internal
;
476 struct this_task_ctx
*ta_ctx
;
477 struct crypto_ablkcipher
*tfm
;
478 struct sep_system_ctx
*sctx
;
480 ta_ctx
= ablkcipher_request_ctx(req
);
481 tfm
= crypto_ablkcipher_reqtfm(req
);
482 sctx
= crypto_ablkcipher_ctx(tfm
);
484 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "IV DUMP - %s\n", reason
);
485 if ((ta_ctx
->current_request
== DES_CBC
) &&
486 (ta_ctx
->des_opmode
== SEP_DES_CBC
)) {
488 des_internal
= (struct sep_des_internal_context
*)
489 sctx
->des_private_ctx
.ctx_buf
;
491 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
492 "sep - vendor iv for DES\n");
493 cptr
= (unsigned char *)des_internal
->iv_context
;
494 for (ct1
= 0; ct1
< crypto_ablkcipher_ivsize(tfm
); ct1
+= 1)
495 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
496 "%02x\n", *(cptr
+ ct1
));
499 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
500 "sep - walk from kernel crypto iv for DES\n");
501 cptr
= (unsigned char *)ta_ctx
->walk
.iv
;
502 for (ct1
= 0; ct1
< crypto_ablkcipher_ivsize(tfm
); ct1
+= 1)
503 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
504 "%02x\n", *(cptr
+ ct1
));
505 } else if ((ta_ctx
->current_request
== AES_CBC
) &&
506 (ta_ctx
->aes_opmode
== SEP_AES_CBC
)) {
508 aes_internal
= (struct sep_aes_internal_context
*)
509 sctx
->aes_private_ctx
.cbuff
;
511 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
512 "sep - vendor iv for AES\n");
513 cptr
= (unsigned char *)aes_internal
->aes_ctx_iv
;
514 for (ct1
= 0; ct1
< crypto_ablkcipher_ivsize(tfm
); ct1
+= 1)
515 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
516 "%02x\n", *(cptr
+ ct1
));
519 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
520 "sep - walk from kernel crypto iv for AES\n");
521 cptr
= (unsigned char *)ta_ctx
->walk
.iv
;
522 for (ct1
= 0; ct1
< crypto_ablkcipher_ivsize(tfm
); ct1
+= 1)
523 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
524 "%02x\n", *(cptr
+ ct1
));
529 * RFC2451: Weak key check
530 * Returns: 1 (weak), 0 (not weak)
532 static int sep_weak_key(const u8
*key
, unsigned int keylen
)
534 static const u8 parity
[] = {
535 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
536 0, 8, 8, 0, 8, 0, 0, 8, 8,
538 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
539 8, 0, 0, 8, 0, 8, 8, 0, 0,
541 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
542 8, 0, 0, 8, 0, 8, 8, 0, 0,
544 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
545 0, 8, 8, 0, 8, 0, 0, 8, 8,
547 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
548 8, 0, 0, 8, 0, 8, 8, 0, 0,
550 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
551 0, 8, 8, 0, 8, 0, 0, 8, 8,
553 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
554 0, 8, 8, 0, 8, 0, 0, 8, 8,
556 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
557 8, 5, 0, 8, 0, 8, 8, 0, 0,
563 n
= parity
[key
[0]]; n
<<= 4;
564 n
|= parity
[key
[1]]; n
<<= 4;
565 n
|= parity
[key
[2]]; n
<<= 4;
566 n
|= parity
[key
[3]]; n
<<= 4;
567 n
|= parity
[key
[4]]; n
<<= 4;
568 n
|= parity
[key
[5]]; n
<<= 4;
569 n
|= parity
[key
[6]]; n
<<= 4;
573 /* 1 in 10^10 keys passes this test */
574 if (!((n
- (w
>> 3)) & w
)) {
575 if (n
< 0x41415151) {
576 if (n
< 0x31312121) {
577 if (n
< 0x14141515) {
578 /* 01 01 01 01 01 01 01 01 */
581 /* 01 1F 01 1F 01 0E 01 0E */
585 /* 01 E0 01 E0 01 F1 01 F1 */
588 /* 01 FE 01 FE 01 FE 01 FE */
593 if (n
< 0x34342525) {
594 /* 1F 01 1F 01 0E 01 0E 01 */
597 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
601 /* 1F E0 1F E0 0E F1 0E F1 */
604 /* 1F FE 1F FE 0E FE 0E FE */
610 if (n
< 0x61616161) {
611 if (n
< 0x44445555) {
612 /* E0 01 E0 01 F1 01 F1 01 */
615 /* E0 1F E0 1F F1 0E F1 0E */
619 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
622 /* E0 FE E0 FE F1 FE F1 FE */
627 if (n
< 0x64646565) {
628 /* FE 01 FE 01 FE 01 FE 01 */
631 /* FE 1F FE 1F FE 0E FE 0E */
635 /* FE E0 FE E0 FE F1 FE F1 */
638 /* FE FE FE FE FE FE FE FE */
652 static u32
sep_sg_nents(struct scatterlist
*sg
)
665 * @ta_ctx: pointer to struct this_task_ctx
666 * @returns: offset to place for the next word in the message
667 * Set up pointer in message pool for new message
669 static u32
sep_start_msg(struct this_task_ctx
*ta_ctx
)
672 ta_ctx
->msg_len_words
= 2;
673 ta_ctx
->msgptr
= ta_ctx
->msg
;
674 memset(ta_ctx
->msg
, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
675 ta_ctx
->msgptr
+= sizeof(u32
) * 2;
676 word_ptr
= (u32
*)ta_ctx
->msgptr
;
677 *word_ptr
= SEP_START_MSG_TOKEN
;
678 return sizeof(u32
) * 2;
683 * @ta_ctx: pointer to struct this_task_ctx
684 * @messages_offset: current message offset
685 * Returns: 0 for success; <0 otherwise
686 * End message; set length and CRC; and
687 * send interrupt to the SEP
689 static void sep_end_msg(struct this_task_ctx
*ta_ctx
, u32 msg_offset
)
692 /* Msg size goes into msg after token */
693 ta_ctx
->msg_len_words
= msg_offset
/ sizeof(u32
) + 1;
694 word_ptr
= (u32
*)ta_ctx
->msgptr
;
696 *word_ptr
= ta_ctx
->msg_len_words
;
698 /* CRC (currently 0) goes at end of msg */
699 word_ptr
= (u32
*)(ta_ctx
->msgptr
+ msg_offset
);
704 * sep_start_inbound_msg -
705 * @ta_ctx: pointer to struct this_task_ctx
706 * @msg_offset: offset to place for the next word in the message
707 * @returns: 0 for success; error value for failure
708 * Set up pointer in message pool for inbound message
710 static u32
sep_start_inbound_msg(struct this_task_ctx
*ta_ctx
, u32
*msg_offset
)
716 *msg_offset
= sizeof(u32
) * 2;
717 word_ptr
= (u32
*)ta_ctx
->msgptr
;
719 ta_ctx
->msg_len_words
= *(word_ptr
+ 1);
721 if (token
!= SEP_START_MSG_TOKEN
) {
722 error
= SEP_INVALID_START
;
733 * @ta_ctx: pointer to struct this_task_ctx
734 * @in_addr: pointer to start of parameter
735 * @size: size of parameter to copy (in bytes)
736 * @max_size: size to move up offset; SEP mesg is in word sizes
737 * @msg_offset: pointer to current offset (is updated)
738 * @byte_array: flag ti indicate whether endian must be changed
739 * Copies data into the message area from caller
741 static void sep_write_msg(struct this_task_ctx
*ta_ctx
, void *in_addr
,
742 u32 size
, u32 max_size
, u32
*msg_offset
, u32 byte_array
)
746 void_ptr
= ta_ctx
->msgptr
+ *msg_offset
;
747 word_ptr
= (u32
*)void_ptr
;
748 memcpy(void_ptr
, in_addr
, size
);
749 *msg_offset
+= max_size
;
751 /* Do we need to manipulate endian? */
754 for (i
= 0; i
< ((size
+ 3) / 4); i
+= 1)
755 *(word_ptr
+ i
) = CHG_ENDIAN(*(word_ptr
+ i
));
761 * @ta_ctx: pointer to struct this_task_ctx
762 * @msg_offset: pointer to current offset (is updated)
763 * @op_code: op code to put into message
764 * Puts op code into message and updates offset
766 static void sep_make_header(struct this_task_ctx
*ta_ctx
, u32
*msg_offset
,
771 *msg_offset
= sep_start_msg(ta_ctx
);
772 word_ptr
= (u32
*)(ta_ctx
->msgptr
+ *msg_offset
);
774 *msg_offset
+= sizeof(u32
);
781 * @ta_ctx: pointer to struct this_task_ctx
782 * @in_addr: pointer to start of parameter
783 * @size: size of parameter to copy (in bytes)
784 * @max_size: size to move up offset; SEP mesg is in word sizes
785 * @msg_offset: pointer to current offset (is updated)
786 * @byte_array: flag ti indicate whether endian must be changed
787 * Copies data out of the message area to caller
789 static void sep_read_msg(struct this_task_ctx
*ta_ctx
, void *in_addr
,
790 u32 size
, u32 max_size
, u32
*msg_offset
, u32 byte_array
)
794 void_ptr
= ta_ctx
->msgptr
+ *msg_offset
;
795 word_ptr
= (u32
*)void_ptr
;
797 /* Do we need to manipulate endian? */
800 for (i
= 0; i
< ((size
+ 3) / 4); i
+= 1)
801 *(word_ptr
+ i
) = CHG_ENDIAN(*(word_ptr
+ i
));
804 memcpy(in_addr
, void_ptr
, size
);
805 *msg_offset
+= max_size
;
810 * @ta_ctx: pointer to struct this_task_ctx
811 * @op_code: expected op_code
812 * @msg_offset: pointer to current offset (is updated)
813 * @returns: 0 for success; error for failure
815 static u32
sep_verify_op(struct this_task_ctx
*ta_ctx
, u32 op_code
,
821 struct sep_device
*sep
= ta_ctx
->sep_used
;
823 dev_dbg(&sep
->pdev
->dev
, "dumping return message\n");
824 error
= sep_start_inbound_msg(ta_ctx
, msg_offset
);
826 dev_warn(&sep
->pdev
->dev
,
827 "sep_start_inbound_msg error\n");
831 sep_read_msg(ta_ctx
, in_ary
, sizeof(u32
) * 2, sizeof(u32
) * 2,
834 if (in_ary
[0] != op_code
) {
835 dev_warn(&sep
->pdev
->dev
,
836 "sep got back wrong opcode\n");
837 dev_warn(&sep
->pdev
->dev
,
838 "got back %x; expected %x\n",
840 return SEP_WRONG_OPCODE
;
843 if (in_ary
[1] != SEP_OK
) {
844 dev_warn(&sep
->pdev
->dev
,
845 "sep execution error\n");
846 dev_warn(&sep
->pdev
->dev
,
847 "got back %x; expected %x\n",
857 * @ta_ctx: pointer to struct this_task_ctx
858 * @msg_offset: point to current place in SEP msg; is updated
859 * @dst: pointer to place to put the context
860 * @len: size of the context structure (differs for crypro/hash)
861 * This function reads the context from the msg area
862 * There is a special way the vendor needs to have the maximum
863 * length calculated so that the msg_offset is updated properly;
864 * it skips over some words in the msg area depending on the size
867 static void sep_read_context(struct this_task_ctx
*ta_ctx
, u32
*msg_offset
,
870 u32 max_length
= ((len
+ 3) / sizeof(u32
)) * sizeof(u32
);
871 sep_read_msg(ta_ctx
, dst
, len
, max_length
, msg_offset
, 0);
875 * sep_write_context -
876 * @ta_ctx: pointer to struct this_task_ctx
877 * @msg_offset: point to current place in SEP msg; is updated
878 * @src: pointer to the current context
879 * @len: size of the context structure (differs for crypro/hash)
880 * This function writes the context to the msg area
881 * There is a special way the vendor needs to have the maximum
882 * length calculated so that the msg_offset is updated properly;
883 * it skips over some words in the msg area depending on the size
886 static void sep_write_context(struct this_task_ctx
*ta_ctx
, u32
*msg_offset
,
889 u32 max_length
= ((len
+ 3) / sizeof(u32
)) * sizeof(u32
);
890 sep_write_msg(ta_ctx
, src
, len
, max_length
, msg_offset
, 0);
895 * @ta_ctx: pointer to struct this_task_ctx
896 * Clear out crypto related values in sep device structure
897 * to enable device to be used by anyone; either kernel
898 * crypto or userspace app via middleware
900 static void sep_clear_out(struct this_task_ctx
*ta_ctx
)
902 if (ta_ctx
->src_sg_hold
) {
903 sep_free_sg_buf(ta_ctx
->src_sg_hold
);
904 ta_ctx
->src_sg_hold
= NULL
;
907 if (ta_ctx
->dst_sg_hold
) {
908 sep_free_sg_buf(ta_ctx
->dst_sg_hold
);
909 ta_ctx
->dst_sg_hold
= NULL
;
912 ta_ctx
->src_sg
= NULL
;
913 ta_ctx
->dst_sg
= NULL
;
915 sep_free_dma_table_data_handler(ta_ctx
->sep_used
, &ta_ctx
->dma_ctx
);
917 if (ta_ctx
->i_own_sep
) {
919 * The following unlocks the sep and makes it available
920 * to any other application
921 * First, null out crypto entries in sep before releasing it
923 ta_ctx
->sep_used
->current_hash_req
= NULL
;
924 ta_ctx
->sep_used
->current_cypher_req
= NULL
;
925 ta_ctx
->sep_used
->current_request
= 0;
926 ta_ctx
->sep_used
->current_hash_stage
= 0;
927 ta_ctx
->sep_used
->ta_ctx
= NULL
;
928 ta_ctx
->sep_used
->in_kernel
= 0;
930 ta_ctx
->call_status
.status
= 0;
932 /* Remove anything confidential */
933 memset(ta_ctx
->sep_used
->shared_addr
, 0,
934 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
936 sep_queue_status_remove(ta_ctx
->sep_used
, &ta_ctx
->queue_elem
);
938 #ifdef SEP_ENABLE_RUNTIME_PM
939 ta_ctx
->sep_used
->in_use
= 0;
940 pm_runtime_mark_last_busy(&ta_ctx
->sep_used
->pdev
->dev
);
941 pm_runtime_put_autosuspend(&ta_ctx
->sep_used
->pdev
->dev
);
944 clear_bit(SEP_WORKING_LOCK_BIT
,
945 &ta_ctx
->sep_used
->in_use_flags
);
946 ta_ctx
->sep_used
->pid_doing_transaction
= 0;
948 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
949 "[PID%d] waking up next transaction\n",
952 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
953 &ta_ctx
->sep_used
->in_use_flags
);
954 wake_up(&ta_ctx
->sep_used
->event_transactions
);
956 ta_ctx
->i_own_sep
= 0;
961 * Release crypto infrastructure from EINPROGRESS and
962 * clear sep_dev so that SEP is available to anyone
964 static void sep_crypto_release(struct sep_system_ctx
*sctx
,
965 struct this_task_ctx
*ta_ctx
, u32 error
)
967 struct ahash_request
*hash_req
= ta_ctx
->current_hash_req
;
968 struct ablkcipher_request
*cypher_req
=
969 ta_ctx
->current_cypher_req
;
970 struct sep_device
*sep
= ta_ctx
->sep_used
;
972 sep_clear_out(ta_ctx
);
975 * This may not yet exist depending when we
976 * chose to bail out. If it does exist, set
979 if (ta_ctx
->are_we_done_yet
!= NULL
)
980 *ta_ctx
->are_we_done_yet
= 1;
982 if (cypher_req
!= NULL
) {
983 if ((sctx
->key_sent
== 1) ||
984 ((error
!= 0) && (error
!= -EINPROGRESS
))) {
985 if (cypher_req
->base
.complete
== NULL
) {
986 dev_dbg(&sep
->pdev
->dev
,
987 "release is null for cypher!");
989 cypher_req
->base
.complete(
990 &cypher_req
->base
, error
);
995 if (hash_req
!= NULL
) {
996 if (hash_req
->base
.complete
== NULL
) {
997 dev_dbg(&sep
->pdev
->dev
,
998 "release is null for hash!");
1000 hash_req
->base
.complete(
1001 &hash_req
->base
, error
);
1007 * This is where we grab the sep itself and tell it to do something.
1008 * It will sleep if the sep is currently busy
1009 * and it will return 0 if sep is now ours; error value if there
1012 static int sep_crypto_take_sep(struct this_task_ctx
*ta_ctx
)
1014 struct sep_device
*sep
= ta_ctx
->sep_used
;
1016 struct sep_msgarea_hdr
*my_msg_header
;
1018 my_msg_header
= (struct sep_msgarea_hdr
*)ta_ctx
->msg
;
1020 /* add to status queue */
1021 ta_ctx
->queue_elem
= sep_queue_status_add(sep
, my_msg_header
->opcode
,
1022 ta_ctx
->nbytes
, current
->pid
,
1023 current
->comm
, sizeof(current
->comm
));
1025 if (!ta_ctx
->queue_elem
) {
1026 dev_dbg(&sep
->pdev
->dev
,
1027 "[PID%d] updating queue status error\n", current
->pid
);
1031 /* get the device; this can sleep */
1032 result
= sep_wait_transaction(sep
);
1036 if (sep_dev
->power_save_setup
== 1)
1037 pm_runtime_get_sync(&sep_dev
->pdev
->dev
);
1039 /* Copy in the message */
1040 memcpy(sep
->shared_addr
, ta_ctx
->msg
,
1041 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
1043 /* Copy in the dcb information if there is any */
1044 if (ta_ctx
->dcb_region
) {
1045 result
= sep_activate_dcb_dmatables_context(sep
,
1046 &ta_ctx
->dcb_region
, &ta_ctx
->dmatables_region
,
1052 /* Mark the device so we know how to finish the job in the tasklet */
1053 if (ta_ctx
->current_hash_req
)
1054 sep
->current_hash_req
= ta_ctx
->current_hash_req
;
1056 sep
->current_cypher_req
= ta_ctx
->current_cypher_req
;
1058 sep
->current_request
= ta_ctx
->current_request
;
1059 sep
->current_hash_stage
= ta_ctx
->current_hash_stage
;
1060 sep
->ta_ctx
= ta_ctx
;
1062 ta_ctx
->i_own_sep
= 1;
1064 /* need to set bit first to avoid race condition with interrupt */
1065 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
, &ta_ctx
->call_status
.status
);
1067 result
= sep_send_command_handler(sep
);
1069 dev_dbg(&sep
->pdev
->dev
, "[PID%d]: sending command to the sep\n",
1073 dev_dbg(&sep
->pdev
->dev
, "[PID%d]: command sent okay\n",
1076 dev_dbg(&sep
->pdev
->dev
, "[PID%d]: cant send command\n",
1078 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
1079 &ta_ctx
->call_status
.status
);
1086 * This function sets things up for a crypto data block process
1087 * This does all preparation, but does not try to grab the
1089 * @req: pointer to struct ablkcipher_request
1090 * returns: 0 if all went well, non zero if error
1092 static int sep_crypto_block_data(struct ablkcipher_request
*req
)
1101 static char small_buf
[100];
1102 ssize_t copy_result
;
1105 struct scatterlist
*new_sg
;
1106 struct this_task_ctx
*ta_ctx
;
1107 struct crypto_ablkcipher
*tfm
;
1108 struct sep_system_ctx
*sctx
;
1110 struct sep_des_internal_context
*des_internal
;
1111 struct sep_aes_internal_context
*aes_internal
;
1113 ta_ctx
= ablkcipher_request_ctx(req
);
1114 tfm
= crypto_ablkcipher_reqtfm(req
);
1115 sctx
= crypto_ablkcipher_ctx(tfm
);
1117 /* start the walk on scatterlists */
1118 ablkcipher_walk_init(&ta_ctx
->walk
, req
->src
, req
->dst
, req
->nbytes
);
1119 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "sep crypto block data size of %x\n",
1122 int_error
= ablkcipher_walk_phys(req
, &ta_ctx
->walk
);
1124 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "walk phys error %x\n",
1129 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1130 "crypto block: src is %lx dst is %lx\n",
1131 (unsigned long)req
->src
, (unsigned long)req
->dst
);
1133 /* Make sure all pages are even block */
1134 int_error
= sep_oddball_pages(ta_ctx
->sep_used
, req
->src
,
1135 req
->nbytes
, ta_ctx
->walk
.blocksize
, &new_sg
, 1);
1137 if (int_error
< 0) {
1138 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "oddball page error\n");
1140 } else if (int_error
== 1) {
1141 ta_ctx
->src_sg
= new_sg
;
1142 ta_ctx
->src_sg_hold
= new_sg
;
1144 ta_ctx
->src_sg
= req
->src
;
1145 ta_ctx
->src_sg_hold
= NULL
;
1148 int_error
= sep_oddball_pages(ta_ctx
->sep_used
, req
->dst
,
1149 req
->nbytes
, ta_ctx
->walk
.blocksize
, &new_sg
, 0);
1151 if (int_error
< 0) {
1152 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "walk phys error %x\n",
1155 } else if (int_error
== 1) {
1156 ta_ctx
->dst_sg
= new_sg
;
1157 ta_ctx
->dst_sg_hold
= new_sg
;
1159 ta_ctx
->dst_sg
= req
->dst
;
1160 ta_ctx
->dst_sg_hold
= NULL
;
1163 /* set nbytes for queue status */
1164 ta_ctx
->nbytes
= req
->nbytes
;
1166 /* Key already done; this is for data */
1167 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "sending data\n");
1169 /* check for valid data and proper spacing */
1170 src_ptr
= sg_virt(ta_ctx
->src_sg
);
1171 dst_ptr
= sg_virt(ta_ctx
->dst_sg
);
1173 if (!src_ptr
|| !dst_ptr
||
1174 (ta_ctx
->current_cypher_req
->nbytes
%
1175 crypto_ablkcipher_blocksize(tfm
))) {
1177 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1178 "cipher block size odd\n");
1179 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1180 "cipher block size is %x\n",
1181 crypto_ablkcipher_blocksize(tfm
));
1182 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1183 "cipher data size is %x\n",
1184 ta_ctx
->current_cypher_req
->nbytes
);
1188 if (partial_overlap(src_ptr
, dst_ptr
,
1189 ta_ctx
->current_cypher_req
->nbytes
)) {
1190 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1191 "block partial overlap\n");
1195 /* Put together the message */
1196 sep_make_header(ta_ctx
, &msg_offset
, ta_ctx
->block_opcode
);
1198 /* If des, and size is 1 block, put directly in msg */
1199 if ((ta_ctx
->block_opcode
== SEP_DES_BLOCK_OPCODE
) &&
1200 (req
->nbytes
== crypto_ablkcipher_blocksize(tfm
))) {
1202 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1203 "writing out one block des\n");
1205 copy_result
= sg_copy_to_buffer(
1206 ta_ctx
->src_sg
, sep_sg_nents(ta_ctx
->src_sg
),
1207 small_buf
, crypto_ablkcipher_blocksize(tfm
));
1209 if (copy_result
!= crypto_ablkcipher_blocksize(tfm
)) {
1210 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1211 "des block copy faild\n");
1215 /* Put data into message */
1216 sep_write_msg(ta_ctx
, small_buf
,
1217 crypto_ablkcipher_blocksize(tfm
),
1218 crypto_ablkcipher_blocksize(tfm
) * 2,
1221 /* Put size into message */
1222 sep_write_msg(ta_ctx
, &req
->nbytes
,
1223 sizeof(u32
), sizeof(u32
), &msg_offset
, 0);
1225 /* Otherwise, fill out dma tables */
1226 ta_ctx
->dcb_input_data
.app_in_address
= src_ptr
;
1227 ta_ctx
->dcb_input_data
.data_in_size
= req
->nbytes
;
1228 ta_ctx
->dcb_input_data
.app_out_address
= dst_ptr
;
1229 ta_ctx
->dcb_input_data
.block_size
=
1230 crypto_ablkcipher_blocksize(tfm
);
1231 ta_ctx
->dcb_input_data
.tail_block_size
= 0;
1232 ta_ctx
->dcb_input_data
.is_applet
= 0;
1233 ta_ctx
->dcb_input_data
.src_sg
= ta_ctx
->src_sg
;
1234 ta_ctx
->dcb_input_data
.dst_sg
= ta_ctx
->dst_sg
;
1236 result
= sep_create_dcb_dmatables_context_kernel(
1238 &ta_ctx
->dcb_region
,
1239 &ta_ctx
->dmatables_region
,
1241 &ta_ctx
->dcb_input_data
,
1244 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1245 "crypto dma table create failed\n");
1249 /* Portion of msg is nulled (no data) */
1255 sep_write_msg(ta_ctx
, (void *)msg
, sizeof(u32
) * 5,
1256 sizeof(u32
) * 5, &msg_offset
, 0);
1260 * Before we write the message, we need to overwrite the
1261 * vendor's IV with the one from our own ablkcipher walk
1262 * iv because this is needed for dm-crypt
1264 sep_dump_ivs(req
, "sending data block to sep\n");
1265 if ((ta_ctx
->current_request
== DES_CBC
) &&
1266 (ta_ctx
->des_opmode
== SEP_DES_CBC
)) {
1268 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1269 "overwrite vendor iv on DES\n");
1270 des_internal
= (struct sep_des_internal_context
*)
1271 sctx
->des_private_ctx
.ctx_buf
;
1272 memcpy((void *)des_internal
->iv_context
,
1273 ta_ctx
->walk
.iv
, crypto_ablkcipher_ivsize(tfm
));
1274 } else if ((ta_ctx
->current_request
== AES_CBC
) &&
1275 (ta_ctx
->aes_opmode
== SEP_AES_CBC
)) {
1277 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1278 "overwrite vendor iv on AES\n");
1279 aes_internal
= (struct sep_aes_internal_context
*)
1280 sctx
->aes_private_ctx
.cbuff
;
1281 memcpy((void *)aes_internal
->aes_ctx_iv
,
1282 ta_ctx
->walk
.iv
, crypto_ablkcipher_ivsize(tfm
));
1285 /* Write context into message */
1286 if (ta_ctx
->block_opcode
== SEP_DES_BLOCK_OPCODE
) {
1287 sep_write_context(ta_ctx
, &msg_offset
,
1288 &sctx
->des_private_ctx
,
1289 sizeof(struct sep_des_private_context
));
1291 sep_write_context(ta_ctx
, &msg_offset
,
1292 &sctx
->aes_private_ctx
,
1293 sizeof(struct sep_aes_private_context
));
1296 /* conclude message */
1297 sep_end_msg(ta_ctx
, msg_offset
);
1299 /* Parent (caller) is now ready to tell the sep to do ahead */
1305 * This function sets things up for a crypto key submit process
1306 * This does all preparation, but does not try to grab the
1308 * @req: pointer to struct ablkcipher_request
1309 * returns: 0 if all went well, non zero if error
1311 static int sep_crypto_send_key(struct ablkcipher_request
*req
)
1319 struct this_task_ctx
*ta_ctx
;
1320 struct crypto_ablkcipher
*tfm
;
1321 struct sep_system_ctx
*sctx
;
1323 ta_ctx
= ablkcipher_request_ctx(req
);
1324 tfm
= crypto_ablkcipher_reqtfm(req
);
1325 sctx
= crypto_ablkcipher_ctx(tfm
);
1327 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "sending key\n");
1329 /* start the walk on scatterlists */
1330 ablkcipher_walk_init(&ta_ctx
->walk
, req
->src
, req
->dst
, req
->nbytes
);
1331 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1332 "sep crypto block data size of %x\n", req
->nbytes
);
1334 int_error
= ablkcipher_walk_phys(req
, &ta_ctx
->walk
);
1336 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "walk phys error %x\n",
1342 if ((ta_ctx
->current_request
== DES_CBC
) &&
1343 (ta_ctx
->des_opmode
== SEP_DES_CBC
)) {
1344 if (!ta_ctx
->walk
.iv
) {
1345 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "no iv found\n");
1349 memcpy(ta_ctx
->iv
, ta_ctx
->walk
.iv
, SEP_DES_IV_SIZE_BYTES
);
1352 if ((ta_ctx
->current_request
== AES_CBC
) &&
1353 (ta_ctx
->aes_opmode
== SEP_AES_CBC
)) {
1354 if (!ta_ctx
->walk
.iv
) {
1355 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "no iv found\n");
1359 memcpy(ta_ctx
->iv
, ta_ctx
->walk
.iv
, SEP_AES_IV_SIZE_BYTES
);
1362 /* put together message to SEP */
1363 /* Start with op code */
1364 sep_make_header(ta_ctx
, &msg_offset
, ta_ctx
->init_opcode
);
1366 /* now deal with IV */
1367 if (ta_ctx
->init_opcode
== SEP_DES_INIT_OPCODE
) {
1368 if (ta_ctx
->des_opmode
== SEP_DES_CBC
) {
1369 sep_write_msg(ta_ctx
, ta_ctx
->iv
,
1370 SEP_DES_IV_SIZE_BYTES
, sizeof(u32
) * 4,
1374 msg_offset
+= 4 * sizeof(u32
);
1377 max_length
= ((SEP_AES_IV_SIZE_BYTES
+ 3) /
1378 sizeof(u32
)) * sizeof(u32
);
1379 if (ta_ctx
->aes_opmode
== SEP_AES_CBC
) {
1380 sep_write_msg(ta_ctx
, ta_ctx
->iv
,
1381 SEP_AES_IV_SIZE_BYTES
, max_length
,
1385 msg_offset
+= max_length
;
1390 if (ta_ctx
->init_opcode
== SEP_DES_INIT_OPCODE
) {
1391 sep_write_msg(ta_ctx
, (void *)&sctx
->key
.des
.key1
,
1392 sizeof(u32
) * 8, sizeof(u32
) * 8,
1395 msg
[0] = (u32
)sctx
->des_nbr_keys
;
1396 msg
[1] = (u32
)ta_ctx
->des_encmode
;
1397 msg
[2] = (u32
)ta_ctx
->des_opmode
;
1399 sep_write_msg(ta_ctx
, (void *)msg
,
1400 sizeof(u32
) * 3, sizeof(u32
) * 3,
1403 sep_write_msg(ta_ctx
, (void *)&sctx
->key
.aes
,
1405 SEP_AES_MAX_KEY_SIZE_BYTES
,
1408 msg
[0] = (u32
)sctx
->aes_key_size
;
1409 msg
[1] = (u32
)ta_ctx
->aes_encmode
;
1410 msg
[2] = (u32
)ta_ctx
->aes_opmode
;
1411 msg
[3] = (u32
)0; /* Secret key is not used */
1412 sep_write_msg(ta_ctx
, (void *)msg
,
1413 sizeof(u32
) * 4, sizeof(u32
) * 4,
1417 /* conclude message */
1418 sep_end_msg(ta_ctx
, msg_offset
);
1420 /* Parent (caller) is now ready to tell the sep to do ahead */
1425 /* This needs to be run as a work queue as it can be put asleep */
1426 static void sep_crypto_block(void *data
)
1428 unsigned long end_time
;
1432 struct ablkcipher_request
*req
;
1433 struct this_task_ctx
*ta_ctx
;
1434 struct crypto_ablkcipher
*tfm
;
1435 struct sep_system_ctx
*sctx
;
1436 int are_we_done_yet
;
1438 req
= (struct ablkcipher_request
*)data
;
1439 ta_ctx
= ablkcipher_request_ctx(req
);
1440 tfm
= crypto_ablkcipher_reqtfm(req
);
1441 sctx
= crypto_ablkcipher_ctx(tfm
);
1443 ta_ctx
->are_we_done_yet
= &are_we_done_yet
;
1445 pr_debug("sep_crypto_block\n");
1446 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1448 pr_debug("key_sent is %d\n", sctx
->key_sent
);
1450 /* do we need to send the key */
1451 if (sctx
->key_sent
== 0) {
1452 are_we_done_yet
= 0;
1453 result
= sep_crypto_send_key(req
); /* prep to send key */
1455 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1456 "could not prep key %x\n", result
);
1457 sep_crypto_release(sctx
, ta_ctx
, result
);
1461 result
= sep_crypto_take_sep(ta_ctx
);
1463 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1464 "sep_crypto_take_sep for key send failed\n");
1465 sep_crypto_release(sctx
, ta_ctx
, result
);
1469 /* now we sit and wait up to a fixed time for completion */
1470 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
1471 while ((time_before(jiffies
, end_time
)) &&
1472 (are_we_done_yet
== 0))
1475 /* Done waiting; still not done yet? */
1476 if (are_we_done_yet
== 0) {
1477 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1478 "Send key job never got done\n");
1479 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
1483 /* Set the key sent variable so this can be skipped later */
1487 /* Key sent (or maybe not if we did not have to), now send block */
1488 are_we_done_yet
= 0;
1490 result
= sep_crypto_block_data(req
);
1493 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1494 "could prep not send block %x\n", result
);
1495 sep_crypto_release(sctx
, ta_ctx
, result
);
1499 result
= sep_crypto_take_sep(ta_ctx
);
1501 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1502 "sep_crypto_take_sep for block send failed\n");
1503 sep_crypto_release(sctx
, ta_ctx
, result
);
1507 /* now we sit and wait up to a fixed time for completion */
1508 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
1509 while ((time_before(jiffies
, end_time
)) && (are_we_done_yet
== 0))
1512 /* Done waiting; still not done yet? */
1513 if (are_we_done_yet
== 0) {
1514 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1515 "Send block job never got done\n");
1516 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
1520 /* That's it; entire thing done, get out of queue */
1522 pr_debug("crypto_block leaving\n");
1523 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm
, sctx
, ta_ctx
);
1527 * Post operation (after interrupt) for crypto block
1529 static u32
crypto_post_op(struct sep_device
*sep
)
1535 ssize_t copy_result
;
1536 static char small_buf
[100];
1538 struct ablkcipher_request
*req
;
1539 struct this_task_ctx
*ta_ctx
;
1540 struct sep_system_ctx
*sctx
;
1541 struct crypto_ablkcipher
*tfm
;
1543 struct sep_des_internal_context
*des_internal
;
1544 struct sep_aes_internal_context
*aes_internal
;
1546 if (!sep
->current_cypher_req
)
1549 /* hold req since we need to submit work after clearing sep */
1550 req
= sep
->current_cypher_req
;
1552 ta_ctx
= ablkcipher_request_ctx(sep
->current_cypher_req
);
1553 tfm
= crypto_ablkcipher_reqtfm(sep
->current_cypher_req
);
1554 sctx
= crypto_ablkcipher_ctx(tfm
);
1556 pr_debug("crypto_post op\n");
1557 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1558 sctx
->key_sent
, tfm
, sctx
, ta_ctx
);
1560 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "crypto post_op\n");
1561 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "crypto post_op message dump\n");
1563 /* first bring msg from shared area to local area */
1564 memcpy(ta_ctx
->msg
, sep
->shared_addr
,
1565 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
1567 /* Is this the result of performing init (key to SEP */
1568 if (sctx
->key_sent
== 0) {
1570 /* Did SEP do it okay */
1571 u32_error
= sep_verify_op(ta_ctx
, ta_ctx
->init_opcode
,
1574 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1575 "aes init error %x\n", u32_error
);
1576 sep_crypto_release(sctx
, ta_ctx
, u32_error
);
1581 if (ta_ctx
->init_opcode
== SEP_DES_INIT_OPCODE
) {
1582 sep_read_context(ta_ctx
, &msg_offset
,
1583 &sctx
->des_private_ctx
,
1584 sizeof(struct sep_des_private_context
));
1586 sep_read_context(ta_ctx
, &msg_offset
,
1587 &sctx
->aes_private_ctx
,
1588 sizeof(struct sep_aes_private_context
));
1591 sep_dump_ivs(req
, "after sending key to sep\n");
1593 /* key sent went okay; release sep, and set are_we_done_yet */
1595 sep_crypto_release(sctx
, ta_ctx
, -EINPROGRESS
);
1600 * This is the result of a block request
1602 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1603 "crypto_post_op block response\n");
1605 u32_error
= sep_verify_op(ta_ctx
, ta_ctx
->block_opcode
,
1609 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1610 "sep block error %x\n", u32_error
);
1611 sep_crypto_release(sctx
, ta_ctx
, u32_error
);
1615 if (ta_ctx
->block_opcode
== SEP_DES_BLOCK_OPCODE
) {
1617 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1618 "post op for DES\n");
1620 /* special case for 1 block des */
1621 if (sep
->current_cypher_req
->nbytes
==
1622 crypto_ablkcipher_blocksize(tfm
)) {
1624 sep_read_msg(ta_ctx
, small_buf
,
1625 crypto_ablkcipher_blocksize(tfm
),
1626 crypto_ablkcipher_blocksize(tfm
) * 2,
1629 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1630 "reading in block des\n");
1632 copy_result
= sg_copy_from_buffer(
1634 sep_sg_nents(ta_ctx
->dst_sg
),
1636 crypto_ablkcipher_blocksize(tfm
));
1639 crypto_ablkcipher_blocksize(tfm
)) {
1641 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1642 "des block copy faild\n");
1643 sep_crypto_release(sctx
, ta_ctx
,
1650 sep_read_context(ta_ctx
, &msg_offset
,
1651 &sctx
->des_private_ctx
,
1652 sizeof(struct sep_des_private_context
));
1655 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1656 "post op for AES\n");
1658 /* Skip the MAC Output */
1659 msg_offset
+= (sizeof(u32
) * 4);
1662 sep_read_context(ta_ctx
, &msg_offset
,
1663 &sctx
->aes_private_ctx
,
1664 sizeof(struct sep_aes_private_context
));
1667 /* Copy to correct sg if this block had oddball pages */
1668 if (ta_ctx
->dst_sg_hold
)
1669 sep_copy_sg(ta_ctx
->sep_used
,
1671 ta_ctx
->current_cypher_req
->dst
,
1672 ta_ctx
->current_cypher_req
->nbytes
);
1675 * Copy the iv's back to the walk.iv
1676 * This is required for dm_crypt
1678 sep_dump_ivs(req
, "got data block from sep\n");
1679 if ((ta_ctx
->current_request
== DES_CBC
) &&
1680 (ta_ctx
->des_opmode
== SEP_DES_CBC
)) {
1682 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1683 "returning result iv to walk on DES\n");
1684 des_internal
= (struct sep_des_internal_context
*)
1685 sctx
->des_private_ctx
.ctx_buf
;
1686 memcpy(ta_ctx
->walk
.iv
,
1687 (void *)des_internal
->iv_context
,
1688 crypto_ablkcipher_ivsize(tfm
));
1689 } else if ((ta_ctx
->current_request
== AES_CBC
) &&
1690 (ta_ctx
->aes_opmode
== SEP_AES_CBC
)) {
1692 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1693 "returning result iv to walk on AES\n");
1694 aes_internal
= (struct sep_aes_internal_context
*)
1695 sctx
->aes_private_ctx
.cbuff
;
1696 memcpy(ta_ctx
->walk
.iv
,
1697 (void *)aes_internal
->aes_ctx_iv
,
1698 crypto_ablkcipher_ivsize(tfm
));
1701 /* finished, release everything */
1702 sep_crypto_release(sctx
, ta_ctx
, 0);
1704 pr_debug("crypto_post_op done\n");
1705 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1706 sctx
->key_sent
, tfm
, sctx
, ta_ctx
);
1711 static u32
hash_init_post_op(struct sep_device
*sep
)
1715 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(sep
->current_hash_req
);
1716 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(sep
->current_hash_req
);
1717 struct sep_system_ctx
*sctx
= crypto_ahash_ctx(tfm
);
1718 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1719 "hash init post op\n");
1721 /* first bring msg from shared area to local area */
1722 memcpy(ta_ctx
->msg
, sep
->shared_addr
,
1723 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
1725 u32_error
= sep_verify_op(ta_ctx
, SEP_HASH_INIT_OPCODE
,
1729 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "hash init error %x\n",
1731 sep_crypto_release(sctx
, ta_ctx
, u32_error
);
1736 sep_read_context(ta_ctx
, &msg_offset
,
1737 &sctx
->hash_private_ctx
,
1738 sizeof(struct sep_hash_private_context
));
1740 /* Signal to crypto infrastructure and clear out */
1741 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "hash init post op done\n");
1742 sep_crypto_release(sctx
, ta_ctx
, 0);
1746 static u32
hash_update_post_op(struct sep_device
*sep
)
1750 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(sep
->current_hash_req
);
1751 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(sep
->current_hash_req
);
1752 struct sep_system_ctx
*sctx
= crypto_ahash_ctx(tfm
);
1753 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1754 "hash update post op\n");
1756 /* first bring msg from shared area to local area */
1757 memcpy(ta_ctx
->msg
, sep
->shared_addr
,
1758 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
1760 u32_error
= sep_verify_op(ta_ctx
, SEP_HASH_UPDATE_OPCODE
,
1764 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "hash init error %x\n",
1766 sep_crypto_release(sctx
, ta_ctx
, u32_error
);
1771 sep_read_context(ta_ctx
, &msg_offset
,
1772 &sctx
->hash_private_ctx
,
1773 sizeof(struct sep_hash_private_context
));
1776 * Following is only for finup; if we just completed the
1777 * data portion of finup, we now need to kick off the
1778 * finish portion of finup.
1781 if (ta_ctx
->sep_used
->current_hash_stage
== HASH_FINUP_DATA
) {
1783 /* first reset stage to HASH_FINUP_FINISH */
1784 ta_ctx
->sep_used
->current_hash_stage
= HASH_FINUP_FINISH
;
1786 /* now enqueue the finish operation */
1787 spin_lock_irq(&queue_lock
);
1788 u32_error
= crypto_enqueue_request(&sep_queue
,
1789 &ta_ctx
->sep_used
->current_hash_req
->base
);
1790 spin_unlock_irq(&queue_lock
);
1792 if ((u32_error
!= 0) && (u32_error
!= -EINPROGRESS
)) {
1793 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1794 "spe cypher post op cant queue\n");
1795 sep_crypto_release(sctx
, ta_ctx
, u32_error
);
1799 /* schedule the data send */
1800 u32_error
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
1801 sep_dequeuer
, (void *)&sep_queue
);
1804 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1805 "cant submit work sep_crypto_block\n");
1806 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
1811 /* Signal to crypto infrastructure and clear out */
1812 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "hash update post op done\n");
1813 sep_crypto_release(sctx
, ta_ctx
, 0);
1817 static u32
hash_final_post_op(struct sep_device
*sep
)
1822 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(sep
->current_hash_req
);
1823 struct sep_system_ctx
*sctx
= crypto_ahash_ctx(tfm
);
1824 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(sep
->current_hash_req
);
1825 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1826 "hash final post op\n");
1828 /* first bring msg from shared area to local area */
1829 memcpy(ta_ctx
->msg
, sep
->shared_addr
,
1830 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
1832 u32_error
= sep_verify_op(ta_ctx
, SEP_HASH_FINISH_OPCODE
,
1836 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
, "hash finish error %x\n",
1838 sep_crypto_release(sctx
, ta_ctx
, u32_error
);
1842 /* Grab the result */
1843 if (ta_ctx
->current_hash_req
->result
== NULL
) {
1844 /* Oops, null buffer; error out here */
1845 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1846 "hash finish null buffer\n");
1847 sep_crypto_release(sctx
, ta_ctx
, (u32
)-ENOMEM
);
1851 max_length
= (((SEP_HASH_RESULT_SIZE_WORDS
* sizeof(u32
)) + 3) /
1852 sizeof(u32
)) * sizeof(u32
);
1854 sep_read_msg(ta_ctx
,
1855 ta_ctx
->current_hash_req
->result
,
1856 crypto_ahash_digestsize(tfm
), max_length
,
1859 /* Signal to crypto infrastructure and clear out */
1860 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "hash finish post op done\n");
1861 sep_crypto_release(sctx
, ta_ctx
, 0);
1865 static u32
hash_digest_post_op(struct sep_device
*sep
)
1870 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(sep
->current_hash_req
);
1871 struct sep_system_ctx
*sctx
= crypto_ahash_ctx(tfm
);
1872 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(sep
->current_hash_req
);
1873 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1874 "hash digest post op\n");
1876 /* first bring msg from shared area to local area */
1877 memcpy(ta_ctx
->msg
, sep
->shared_addr
,
1878 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
1880 u32_error
= sep_verify_op(ta_ctx
, SEP_HASH_SINGLE_OPCODE
,
1884 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1885 "hash digest finish error %x\n", u32_error
);
1887 sep_crypto_release(sctx
, ta_ctx
, u32_error
);
1891 /* Grab the result */
1892 if (ta_ctx
->current_hash_req
->result
== NULL
) {
1893 /* Oops, null buffer; error out here */
1894 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
1895 "hash digest finish null buffer\n");
1896 sep_crypto_release(sctx
, ta_ctx
, (u32
)-ENOMEM
);
1900 max_length
= (((SEP_HASH_RESULT_SIZE_WORDS
* sizeof(u32
)) + 3) /
1901 sizeof(u32
)) * sizeof(u32
);
1903 sep_read_msg(ta_ctx
,
1904 ta_ctx
->current_hash_req
->result
,
1905 crypto_ahash_digestsize(tfm
), max_length
,
1908 /* Signal to crypto infrastructure and clear out */
1909 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
1910 "hash digest finish post op done\n");
1912 sep_crypto_release(sctx
, ta_ctx
, 0);
1917 * The sep_finish function is the function that is scheduled (via tasklet)
1918 * by the interrupt service routine when the SEP sends and interrupt
1919 * This is only called by the interrupt handler as a tasklet.
1921 static void sep_finish(unsigned long data
)
1923 struct sep_device
*sep_dev
;
1929 pr_debug("sep_finish called with null data\n");
1933 sep_dev
= (struct sep_device
*)data
;
1934 if (sep_dev
== NULL
) {
1935 pr_debug("sep_finish; sep_dev is NULL\n");
1939 if (sep_dev
->in_kernel
== (u32
)0) {
1940 dev_warn(&sep_dev
->pdev
->dev
,
1941 "sep_finish; not in kernel operation\n");
1945 /* Did we really do a sep command prior to this? */
1946 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
1947 &sep_dev
->ta_ctx
->call_status
.status
)) {
1949 dev_warn(&sep_dev
->pdev
->dev
, "[PID%d] sendmsg not called\n",
1954 if (sep_dev
->send_ct
!= sep_dev
->reply_ct
) {
1955 dev_warn(&sep_dev
->pdev
->dev
,
1956 "[PID%d] poll; no message came back\n",
1961 /* Check for error (In case time ran out) */
1962 if ((res
!= 0x0) && (res
!= 0x8)) {
1963 dev_warn(&sep_dev
->pdev
->dev
,
1964 "[PID%d] poll; poll error GPR3 is %x\n",
1969 /* What kind of interrupt from sep was this? */
1970 res
= sep_read_reg(sep_dev
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
1972 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] GPR2 at crypto finish is %x\n",
1975 /* Print request? */
1976 if ((res
>> 30) & 0x1) {
1977 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] sep print req\n",
1979 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] contents: %s\n",
1981 (char *)(sep_dev
->shared_addr
+
1982 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES
));
1986 /* Request for daemon (not currently in POR)? */
1988 dev_dbg(&sep_dev
->pdev
->dev
,
1989 "[PID%d] sep request; ignoring\n",
1994 /* If we got here, then we have a replay to a sep command */
1996 dev_dbg(&sep_dev
->pdev
->dev
,
1997 "[PID%d] sep reply to command; processing request: %x\n",
1998 current
->pid
, sep_dev
->current_request
);
2000 switch (sep_dev
->current_request
) {
2005 res
= crypto_post_op(sep_dev
);
2011 switch (sep_dev
->current_hash_stage
) {
2013 res
= hash_init_post_op(sep_dev
);
2016 case HASH_FINUP_DATA
:
2017 res
= hash_update_post_op(sep_dev
);
2019 case HASH_FINUP_FINISH
:
2021 res
= hash_final_post_op(sep_dev
);
2024 res
= hash_digest_post_op(sep_dev
);
2027 pr_debug("sep - invalid stage for hash finish\n");
2031 pr_debug("sep - invalid request for finish\n");
2035 pr_debug("sep - finish returned error %x\n", res
);
2038 static int sep_hash_cra_init(struct crypto_tfm
*tfm
)
2040 const char *alg_name
= crypto_tfm_alg_name(tfm
);
2042 pr_debug("sep_hash_cra_init name is %s\n", alg_name
);
2044 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2045 sizeof(struct this_task_ctx
));
2049 static void sep_hash_cra_exit(struct crypto_tfm
*tfm
)
2051 pr_debug("sep_hash_cra_exit\n");
2054 static void sep_hash_init(void *data
)
2058 struct ahash_request
*req
;
2059 struct crypto_ahash
*tfm
;
2060 struct this_task_ctx
*ta_ctx
;
2061 struct sep_system_ctx
*sctx
;
2062 unsigned long end_time
;
2063 int are_we_done_yet
;
2065 req
= (struct ahash_request
*)data
;
2066 tfm
= crypto_ahash_reqtfm(req
);
2067 sctx
= crypto_ahash_ctx(tfm
);
2068 ta_ctx
= ahash_request_ctx(req
);
2069 ta_ctx
->sep_used
= sep_dev
;
2071 ta_ctx
->are_we_done_yet
= &are_we_done_yet
;
2073 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2075 ta_ctx
->current_hash_stage
= HASH_INIT
;
2076 /* opcode and mode */
2077 sep_make_header(ta_ctx
, &msg_offset
, SEP_HASH_INIT_OPCODE
);
2078 sep_write_msg(ta_ctx
, &ta_ctx
->hash_opmode
,
2079 sizeof(u32
), sizeof(u32
), &msg_offset
, 0);
2080 sep_end_msg(ta_ctx
, msg_offset
);
2082 are_we_done_yet
= 0;
2083 result
= sep_crypto_take_sep(ta_ctx
);
2085 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2086 "sep_hash_init take sep failed\n");
2087 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2090 /* now we sit and wait up to a fixed time for completion */
2091 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
2092 while ((time_before(jiffies
, end_time
)) && (are_we_done_yet
== 0))
2095 /* Done waiting; still not done yet? */
2096 if (are_we_done_yet
== 0) {
2097 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2098 "hash init never got done\n");
2099 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2105 static void sep_hash_update(void *data
)
2110 struct sep_hash_internal_context
*int_ctx
;
2114 int are_we_done_yet
;
2117 static char small_buf
[100];
2119 struct scatterlist
*new_sg
;
2120 ssize_t copy_result
;
2121 struct ahash_request
*req
;
2122 struct crypto_ahash
*tfm
;
2123 struct this_task_ctx
*ta_ctx
;
2124 struct sep_system_ctx
*sctx
;
2125 unsigned long end_time
;
2127 req
= (struct ahash_request
*)data
;
2128 tfm
= crypto_ahash_reqtfm(req
);
2129 sctx
= crypto_ahash_ctx(tfm
);
2130 ta_ctx
= ahash_request_ctx(req
);
2131 ta_ctx
->sep_used
= sep_dev
;
2133 ta_ctx
->are_we_done_yet
= &are_we_done_yet
;
2135 /* length for queue status */
2136 ta_ctx
->nbytes
= req
->nbytes
;
2138 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2139 "sep_hash_update\n");
2140 ta_ctx
->current_hash_stage
= HASH_UPDATE
;
2143 block_size
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2144 tail_len
= req
->nbytes
% block_size
;
2145 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "length is %x\n", len
);
2146 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "block_size is %x\n", block_size
);
2147 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "tail len is %x\n", tail_len
);
2149 /* Compute header/tail sizes */
2150 int_ctx
= (struct sep_hash_internal_context
*)&sctx
->
2151 hash_private_ctx
.internal_context
;
2152 head_len
= (block_size
- int_ctx
->prev_update_bytes
) % block_size
;
2153 tail_len
= (req
->nbytes
- head_len
) % block_size
;
2155 /* Make sure all pages are an even block */
2156 int_error
= sep_oddball_pages(ta_ctx
->sep_used
, req
->src
,
2158 block_size
, &new_sg
, 1);
2160 if (int_error
< 0) {
2161 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2162 "oddball pages error in crash update\n");
2163 sep_crypto_release(sctx
, ta_ctx
, -ENOMEM
);
2165 } else if (int_error
== 1) {
2166 ta_ctx
->src_sg
= new_sg
;
2167 ta_ctx
->src_sg_hold
= new_sg
;
2169 ta_ctx
->src_sg
= req
->src
;
2170 ta_ctx
->src_sg_hold
= NULL
;
2173 src_ptr
= sg_virt(ta_ctx
->src_sg
);
2175 if ((!req
->nbytes
) || (!ta_ctx
->src_sg
)) {
2180 ta_ctx
->dcb_input_data
.app_in_address
= src_ptr
;
2181 ta_ctx
->dcb_input_data
.data_in_size
=
2182 req
->nbytes
- (head_len
+ tail_len
);
2183 ta_ctx
->dcb_input_data
.app_out_address
= NULL
;
2184 ta_ctx
->dcb_input_data
.block_size
= block_size
;
2185 ta_ctx
->dcb_input_data
.tail_block_size
= 0;
2186 ta_ctx
->dcb_input_data
.is_applet
= 0;
2187 ta_ctx
->dcb_input_data
.src_sg
= ta_ctx
->src_sg
;
2188 ta_ctx
->dcb_input_data
.dst_sg
= NULL
;
2190 int_error
= sep_create_dcb_dmatables_context_kernel(
2192 &ta_ctx
->dcb_region
,
2193 &ta_ctx
->dmatables_region
,
2195 &ta_ctx
->dcb_input_data
,
2198 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2199 "hash update dma table create failed\n");
2200 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2204 /* Construct message to SEP */
2205 sep_make_header(ta_ctx
, &msg_offset
, SEP_HASH_UPDATE_OPCODE
);
2211 sep_write_msg(ta_ctx
, msg
, sizeof(u32
) * 3, sizeof(u32
) * 3,
2214 /* Handle remainders */
2217 sep_write_msg(ta_ctx
, &head_len
, sizeof(u32
),
2218 sizeof(u32
), &msg_offset
, 0);
2221 copy_result
= sg_copy_to_buffer(
2223 sep_sg_nents(ta_ctx
->src_sg
),
2224 small_buf
, head_len
);
2226 if (copy_result
!= head_len
) {
2227 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2228 "sg head copy failure in hash block\n");
2229 sep_crypto_release(sctx
, ta_ctx
, -ENOMEM
);
2233 sep_write_msg(ta_ctx
, small_buf
, head_len
,
2234 sizeof(u32
) * 32, &msg_offset
, 1);
2236 msg_offset
+= sizeof(u32
) * 32;
2240 sep_write_msg(ta_ctx
, &tail_len
, sizeof(u32
),
2241 sizeof(u32
), &msg_offset
, 0);
2244 copy_result
= sep_copy_offset_sg(
2247 req
->nbytes
- tail_len
,
2248 small_buf
, tail_len
);
2250 if (copy_result
!= tail_len
) {
2251 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2252 "sg tail copy failure in hash block\n");
2253 sep_crypto_release(sctx
, ta_ctx
, -ENOMEM
);
2257 sep_write_msg(ta_ctx
, small_buf
, tail_len
,
2258 sizeof(u32
) * 32, &msg_offset
, 1);
2260 msg_offset
+= sizeof(u32
) * 32;
2264 sep_write_context(ta_ctx
, &msg_offset
, &sctx
->hash_private_ctx
,
2265 sizeof(struct sep_hash_private_context
));
2267 sep_end_msg(ta_ctx
, msg_offset
);
2268 are_we_done_yet
= 0;
2269 int_error
= sep_crypto_take_sep(ta_ctx
);
2271 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2272 "sep_hash_update take sep failed\n");
2273 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2276 /* now we sit and wait up to a fixed time for completion */
2277 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
2278 while ((time_before(jiffies
, end_time
)) && (are_we_done_yet
== 0))
2281 /* Done waiting; still not done yet? */
2282 if (are_we_done_yet
== 0) {
2283 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2284 "hash update never got done\n");
2285 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2291 static void sep_hash_final(void *data
)
2294 struct ahash_request
*req
;
2295 struct crypto_ahash
*tfm
;
2296 struct this_task_ctx
*ta_ctx
;
2297 struct sep_system_ctx
*sctx
;
2299 unsigned long end_time
;
2300 int are_we_done_yet
;
2302 req
= (struct ahash_request
*)data
;
2303 tfm
= crypto_ahash_reqtfm(req
);
2304 sctx
= crypto_ahash_ctx(tfm
);
2305 ta_ctx
= ahash_request_ctx(req
);
2306 ta_ctx
->sep_used
= sep_dev
;
2308 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2309 "sep_hash_final\n");
2310 ta_ctx
->current_hash_stage
= HASH_FINISH
;
2312 ta_ctx
->are_we_done_yet
= &are_we_done_yet
;
2314 /* opcode and mode */
2315 sep_make_header(ta_ctx
, &msg_offset
, SEP_HASH_FINISH_OPCODE
);
2318 sep_write_context(ta_ctx
, &msg_offset
, &sctx
->hash_private_ctx
,
2319 sizeof(struct sep_hash_private_context
));
2321 sep_end_msg(ta_ctx
, msg_offset
);
2322 are_we_done_yet
= 0;
2323 result
= sep_crypto_take_sep(ta_ctx
);
2325 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2326 "sep_hash_final take sep failed\n");
2327 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2330 /* now we sit and wait up to a fixed time for completion */
2331 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
2332 while ((time_before(jiffies
, end_time
)) && (are_we_done_yet
== 0))
2335 /* Done waiting; still not done yet? */
2336 if (are_we_done_yet
== 0) {
2337 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2338 "hash final job never got done\n");
2339 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2345 static void sep_hash_digest(void *data
)
2353 int are_we_done_yet
;
2355 static char small_buf
[100];
2356 struct scatterlist
*new_sg
;
2359 struct ahash_request
*req
;
2360 struct crypto_ahash
*tfm
;
2361 struct this_task_ctx
*ta_ctx
;
2362 struct sep_system_ctx
*sctx
;
2363 unsigned long end_time
;
2365 req
= (struct ahash_request
*)data
;
2366 tfm
= crypto_ahash_reqtfm(req
);
2367 sctx
= crypto_ahash_ctx(tfm
);
2368 ta_ctx
= ahash_request_ctx(req
);
2369 ta_ctx
->sep_used
= sep_dev
;
2371 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2372 "sep_hash_digest\n");
2373 ta_ctx
->current_hash_stage
= HASH_DIGEST
;
2375 ta_ctx
->are_we_done_yet
= &are_we_done_yet
;
2377 /* length for queue status */
2378 ta_ctx
->nbytes
= req
->nbytes
;
2380 block_size
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2381 tail_len
= req
->nbytes
% block_size
;
2382 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "length is %x\n", req
->nbytes
);
2383 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "block_size is %x\n", block_size
);
2384 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
, "tail len is %x\n", tail_len
);
2386 /* Make sure all pages are an even block */
2387 int_error
= sep_oddball_pages(ta_ctx
->sep_used
, req
->src
,
2389 block_size
, &new_sg
, 1);
2391 if (int_error
< 0) {
2392 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2393 "oddball pages error in crash update\n");
2394 sep_crypto_release(sctx
, ta_ctx
, -ENOMEM
);
2396 } else if (int_error
== 1) {
2397 ta_ctx
->src_sg
= new_sg
;
2398 ta_ctx
->src_sg_hold
= new_sg
;
2400 ta_ctx
->src_sg
= req
->src
;
2401 ta_ctx
->src_sg_hold
= NULL
;
2404 src_ptr
= sg_virt(ta_ctx
->src_sg
);
2406 if ((!req
->nbytes
) || (!ta_ctx
->src_sg
)) {
2411 ta_ctx
->dcb_input_data
.app_in_address
= src_ptr
;
2412 ta_ctx
->dcb_input_data
.data_in_size
= req
->nbytes
- tail_len
;
2413 ta_ctx
->dcb_input_data
.app_out_address
= NULL
;
2414 ta_ctx
->dcb_input_data
.block_size
= block_size
;
2415 ta_ctx
->dcb_input_data
.tail_block_size
= 0;
2416 ta_ctx
->dcb_input_data
.is_applet
= 0;
2417 ta_ctx
->dcb_input_data
.src_sg
= ta_ctx
->src_sg
;
2418 ta_ctx
->dcb_input_data
.dst_sg
= NULL
;
2420 int_error
= sep_create_dcb_dmatables_context_kernel(
2422 &ta_ctx
->dcb_region
,
2423 &ta_ctx
->dmatables_region
,
2425 &ta_ctx
->dcb_input_data
,
2428 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2429 "hash update dma table create failed\n");
2430 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2434 /* Construct message to SEP */
2435 sep_make_header(ta_ctx
, &msg_offset
, SEP_HASH_SINGLE_OPCODE
);
2436 sep_write_msg(ta_ctx
, &ta_ctx
->hash_opmode
,
2437 sizeof(u32
), sizeof(u32
), &msg_offset
, 0);
2443 sep_write_msg(ta_ctx
, msg
, sizeof(u32
) * 3, sizeof(u32
) * 3,
2447 sep_write_msg(ta_ctx
, &tail_len
, sizeof(u32
),
2448 sizeof(u32
), &msg_offset
, 0);
2451 copy_result
= sep_copy_offset_sg(
2454 req
->nbytes
- tail_len
,
2455 small_buf
, tail_len
);
2457 if (copy_result
!= tail_len
) {
2458 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2459 "sg tail copy failure in hash block\n");
2460 sep_crypto_release(sctx
, ta_ctx
, -ENOMEM
);
2464 sep_write_msg(ta_ctx
, small_buf
, tail_len
,
2465 sizeof(u32
) * 32, &msg_offset
, 1);
2467 msg_offset
+= sizeof(u32
) * 32;
2470 sep_end_msg(ta_ctx
, msg_offset
);
2472 are_we_done_yet
= 0;
2473 result
= sep_crypto_take_sep(ta_ctx
);
2475 dev_warn(&ta_ctx
->sep_used
->pdev
->dev
,
2476 "sep_hash_digest take sep failed\n");
2477 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2480 /* now we sit and wait up to a fixed time for completion */
2481 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
2482 while ((time_before(jiffies
, end_time
)) && (are_we_done_yet
== 0))
2485 /* Done waiting; still not done yet? */
2486 if (are_we_done_yet
== 0) {
2487 dev_dbg(&ta_ctx
->sep_used
->pdev
->dev
,
2488 "hash digest job never got done\n");
2489 sep_crypto_release(sctx
, ta_ctx
, -EINVAL
);
2496 * This is what is called by each of the API's provided
2497 * in the kernel crypto descriptors. It is run in a process
2498 * context using the kernel workqueues. Therefore it can
2501 static void sep_dequeuer(void *data
)
2503 struct crypto_queue
*this_queue
;
2504 struct crypto_async_request
*async_req
;
2505 struct crypto_async_request
*backlog
;
2506 struct ablkcipher_request
*cypher_req
;
2507 struct ahash_request
*hash_req
;
2508 struct sep_system_ctx
*sctx
;
2509 struct crypto_ahash
*hash_tfm
;
2510 struct this_task_ctx
*ta_ctx
;
2513 this_queue
= (struct crypto_queue
*)data
;
2515 spin_lock_irq(&queue_lock
);
2516 backlog
= crypto_get_backlog(this_queue
);
2517 async_req
= crypto_dequeue_request(this_queue
);
2518 spin_unlock_irq(&queue_lock
);
2521 pr_debug("sep crypto queue is empty\n");
2526 pr_debug("sep crypto backlog set\n");
2527 if (backlog
->complete
)
2528 backlog
->complete(backlog
, -EINPROGRESS
);
2532 if (!async_req
->tfm
) {
2533 pr_debug("sep crypto queue null tfm\n");
2537 if (!async_req
->tfm
->__crt_alg
) {
2538 pr_debug("sep crypto queue null __crt_alg\n");
2542 if (!async_req
->tfm
->__crt_alg
->cra_type
) {
2543 pr_debug("sep crypto queue null cra_type\n");
2547 /* we have stuff in the queue */
2548 if (async_req
->tfm
->__crt_alg
->cra_type
!=
2549 &crypto_ahash_type
) {
2550 /* This is for a cypher */
2551 pr_debug("sep crypto queue doing cipher\n");
2552 cypher_req
= container_of(async_req
,
2553 struct ablkcipher_request
,
2556 pr_debug("sep crypto queue null cypher_req\n");
2560 sep_crypto_block((void *)cypher_req
);
2563 /* This is a hash */
2564 pr_debug("sep crypto queue doing hash\n");
2566 * This is a bit more complex than cipher; we
2567 * need to figure out what type of operation
2569 hash_req
= ahash_request_cast(async_req
);
2571 pr_debug("sep crypto queue null hash_req\n");
2575 hash_tfm
= crypto_ahash_reqtfm(hash_req
);
2577 pr_debug("sep crypto queue null hash_tfm\n");
2582 sctx
= crypto_ahash_ctx(hash_tfm
);
2584 pr_debug("sep crypto queue null sctx\n");
2588 ta_ctx
= ahash_request_ctx(hash_req
);
2590 if (ta_ctx
->current_hash_stage
== HASH_INIT
) {
2591 pr_debug("sep crypto queue hash init\n");
2592 sep_hash_init((void *)hash_req
);
2594 } else if (ta_ctx
->current_hash_stage
== HASH_UPDATE
) {
2595 pr_debug("sep crypto queue hash update\n");
2596 sep_hash_update((void *)hash_req
);
2598 } else if (ta_ctx
->current_hash_stage
== HASH_FINISH
) {
2599 pr_debug("sep crypto queue hash final\n");
2600 sep_hash_final((void *)hash_req
);
2602 } else if (ta_ctx
->current_hash_stage
== HASH_DIGEST
) {
2603 pr_debug("sep crypto queue hash digest\n");
2604 sep_hash_digest((void *)hash_req
);
2606 } else if (ta_ctx
->current_hash_stage
== HASH_FINUP_DATA
) {
2607 pr_debug("sep crypto queue hash digest\n");
2608 sep_hash_update((void *)hash_req
);
2610 } else if (ta_ctx
->current_hash_stage
== HASH_FINUP_FINISH
) {
2611 pr_debug("sep crypto queue hash digest\n");
2612 sep_hash_final((void *)hash_req
);
2615 pr_debug("sep crypto queue hash oops nothing\n");
2621 static int sep_sha1_init(struct ahash_request
*req
)
2625 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2627 pr_debug("sep - doing sha1 init\n");
2629 /* Clear out task context */
2630 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
2632 ta_ctx
->sep_used
= sep_dev
;
2633 ta_ctx
->current_request
= SHA1
;
2634 ta_ctx
->current_hash_req
= req
;
2635 ta_ctx
->current_cypher_req
= NULL
;
2636 ta_ctx
->hash_opmode
= SEP_HASH_SHA1
;
2637 ta_ctx
->current_hash_stage
= HASH_INIT
;
2639 /* lock necessary so that only one entity touches the queues */
2640 spin_lock_irq(&queue_lock
);
2641 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2643 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2644 pr_debug(" sep - crypto enqueue failed: %x\n",
2646 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2647 sep_dequeuer
, (void *)&sep_queue
);
2649 pr_debug(" sep - workqueue submit failed: %x\n",
2651 spin_unlock_irq(&queue_lock
);
2652 /* We return result of crypto enqueue */
2656 static int sep_sha1_update(struct ahash_request
*req
)
2660 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2662 pr_debug("sep - doing sha1 update\n");
2664 ta_ctx
->sep_used
= sep_dev
;
2665 ta_ctx
->current_request
= SHA1
;
2666 ta_ctx
->current_hash_req
= req
;
2667 ta_ctx
->current_cypher_req
= NULL
;
2668 ta_ctx
->hash_opmode
= SEP_HASH_SHA1
;
2669 ta_ctx
->current_hash_stage
= HASH_UPDATE
;
2671 /* lock necessary so that only one entity touches the queues */
2672 spin_lock_irq(&queue_lock
);
2673 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2675 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2676 pr_debug(" sep - crypto enqueue failed: %x\n",
2678 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2679 sep_dequeuer
, (void *)&sep_queue
);
2681 pr_debug(" sep - workqueue submit failed: %x\n",
2683 spin_unlock_irq(&queue_lock
);
2684 /* We return result of crypto enqueue */
2688 static int sep_sha1_final(struct ahash_request
*req
)
2692 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2693 pr_debug("sep - doing sha1 final\n");
2695 ta_ctx
->sep_used
= sep_dev
;
2696 ta_ctx
->current_request
= SHA1
;
2697 ta_ctx
->current_hash_req
= req
;
2698 ta_ctx
->current_cypher_req
= NULL
;
2699 ta_ctx
->hash_opmode
= SEP_HASH_SHA1
;
2700 ta_ctx
->current_hash_stage
= HASH_FINISH
;
2702 /* lock necessary so that only one entity touches the queues */
2703 spin_lock_irq(&queue_lock
);
2704 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2706 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2707 pr_debug(" sep - crypto enqueue failed: %x\n",
2709 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2710 sep_dequeuer
, (void *)&sep_queue
);
2712 pr_debug(" sep - workqueue submit failed: %x\n",
2714 spin_unlock_irq(&queue_lock
);
2715 /* We return result of crypto enqueue */
2719 static int sep_sha1_digest(struct ahash_request
*req
)
2723 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2724 pr_debug("sep - doing sha1 digest\n");
2726 /* Clear out task context */
2727 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
2729 ta_ctx
->sep_used
= sep_dev
;
2730 ta_ctx
->current_request
= SHA1
;
2731 ta_ctx
->current_hash_req
= req
;
2732 ta_ctx
->current_cypher_req
= NULL
;
2733 ta_ctx
->hash_opmode
= SEP_HASH_SHA1
;
2734 ta_ctx
->current_hash_stage
= HASH_DIGEST
;
2736 /* lock necessary so that only one entity touches the queues */
2737 spin_lock_irq(&queue_lock
);
2738 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2740 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2741 pr_debug(" sep - crypto enqueue failed: %x\n",
2743 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2744 sep_dequeuer
, (void *)&sep_queue
);
2746 pr_debug(" sep - workqueue submit failed: %x\n",
2748 spin_unlock_irq(&queue_lock
);
2749 /* We return result of crypto enqueue */
2753 static int sep_sha1_finup(struct ahash_request
*req
)
2757 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2758 pr_debug("sep - doing sha1 finup\n");
2760 ta_ctx
->sep_used
= sep_dev
;
2761 ta_ctx
->current_request
= SHA1
;
2762 ta_ctx
->current_hash_req
= req
;
2763 ta_ctx
->current_cypher_req
= NULL
;
2764 ta_ctx
->hash_opmode
= SEP_HASH_SHA1
;
2765 ta_ctx
->current_hash_stage
= HASH_FINUP_DATA
;
2767 /* lock necessary so that only one entity touches the queues */
2768 spin_lock_irq(&queue_lock
);
2769 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2771 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2772 pr_debug(" sep - crypto enqueue failed: %x\n",
2774 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2775 sep_dequeuer
, (void *)&sep_queue
);
2777 pr_debug(" sep - workqueue submit failed: %x\n",
2779 spin_unlock_irq(&queue_lock
);
2780 /* We return result of crypto enqueue */
2784 static int sep_md5_init(struct ahash_request
*req
)
2788 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2789 pr_debug("sep - doing md5 init\n");
2791 /* Clear out task context */
2792 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
2794 ta_ctx
->sep_used
= sep_dev
;
2795 ta_ctx
->current_request
= MD5
;
2796 ta_ctx
->current_hash_req
= req
;
2797 ta_ctx
->current_cypher_req
= NULL
;
2798 ta_ctx
->hash_opmode
= SEP_HASH_MD5
;
2799 ta_ctx
->current_hash_stage
= HASH_INIT
;
2801 /* lock necessary so that only one entity touches the queues */
2802 spin_lock_irq(&queue_lock
);
2803 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2805 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2806 pr_debug(" sep - crypto enqueue failed: %x\n",
2808 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2809 sep_dequeuer
, (void *)&sep_queue
);
2811 pr_debug(" sep - workqueue submit failed: %x\n",
2813 spin_unlock_irq(&queue_lock
);
2814 /* We return result of crypto enqueue */
2818 static int sep_md5_update(struct ahash_request
*req
)
2822 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2823 pr_debug("sep - doing md5 update\n");
2825 ta_ctx
->sep_used
= sep_dev
;
2826 ta_ctx
->current_request
= MD5
;
2827 ta_ctx
->current_hash_req
= req
;
2828 ta_ctx
->current_cypher_req
= NULL
;
2829 ta_ctx
->hash_opmode
= SEP_HASH_MD5
;
2830 ta_ctx
->current_hash_stage
= HASH_UPDATE
;
2832 /* lock necessary so that only one entity touches the queues */
2833 spin_lock_irq(&queue_lock
);
2834 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2836 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2837 pr_debug(" sep - crypto enqueue failed: %x\n",
2839 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2840 sep_dequeuer
, (void *)&sep_queue
);
2842 pr_debug(" sep - workqueue submit failed: %x\n",
2844 spin_unlock_irq(&queue_lock
);
2845 /* We return result of crypto enqueue */
2849 static int sep_md5_final(struct ahash_request
*req
)
2853 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2854 pr_debug("sep - doing md5 final\n");
2856 ta_ctx
->sep_used
= sep_dev
;
2857 ta_ctx
->current_request
= MD5
;
2858 ta_ctx
->current_hash_req
= req
;
2859 ta_ctx
->current_cypher_req
= NULL
;
2860 ta_ctx
->hash_opmode
= SEP_HASH_MD5
;
2861 ta_ctx
->current_hash_stage
= HASH_FINISH
;
2863 /* lock necessary so that only one entity touches the queues */
2864 spin_lock_irq(&queue_lock
);
2865 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2867 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2868 pr_debug(" sep - crypto enqueue failed: %x\n",
2870 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2871 sep_dequeuer
, (void *)&sep_queue
);
2873 pr_debug(" sep - workqueue submit failed: %x\n",
2875 spin_unlock_irq(&queue_lock
);
2876 /* We return result of crypto enqueue */
2880 static int sep_md5_digest(struct ahash_request
*req
)
2884 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2886 pr_debug("sep - doing md5 digest\n");
2888 /* Clear out task context */
2889 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
2891 ta_ctx
->sep_used
= sep_dev
;
2892 ta_ctx
->current_request
= MD5
;
2893 ta_ctx
->current_hash_req
= req
;
2894 ta_ctx
->current_cypher_req
= NULL
;
2895 ta_ctx
->hash_opmode
= SEP_HASH_MD5
;
2896 ta_ctx
->current_hash_stage
= HASH_DIGEST
;
2898 /* lock necessary so that only one entity touches the queues */
2899 spin_lock_irq(&queue_lock
);
2900 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2902 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2903 pr_debug(" sep - crypto enqueue failed: %x\n",
2905 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2906 sep_dequeuer
, (void *)&sep_queue
);
2908 pr_debug(" sep - workqueue submit failed: %x\n",
2910 spin_unlock_irq(&queue_lock
);
2911 /* We return result of crypto enqueue */
2915 static int sep_md5_finup(struct ahash_request
*req
)
2919 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2921 pr_debug("sep - doing md5 finup\n");
2923 ta_ctx
->sep_used
= sep_dev
;
2924 ta_ctx
->current_request
= MD5
;
2925 ta_ctx
->current_hash_req
= req
;
2926 ta_ctx
->current_cypher_req
= NULL
;
2927 ta_ctx
->hash_opmode
= SEP_HASH_MD5
;
2928 ta_ctx
->current_hash_stage
= HASH_FINUP_DATA
;
2930 /* lock necessary so that only one entity touches the queues */
2931 spin_lock_irq(&queue_lock
);
2932 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2934 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2935 pr_debug(" sep - crypto enqueue failed: %x\n",
2937 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2938 sep_dequeuer
, (void *)&sep_queue
);
2940 pr_debug(" sep - workqueue submit failed: %x\n",
2942 spin_unlock_irq(&queue_lock
);
2943 /* We return result of crypto enqueue */
2947 static int sep_sha224_init(struct ahash_request
*req
)
2951 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2952 pr_debug("sep - doing sha224 init\n");
2954 /* Clear out task context */
2955 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
2957 ta_ctx
->sep_used
= sep_dev
;
2958 ta_ctx
->current_request
= SHA224
;
2959 ta_ctx
->current_hash_req
= req
;
2960 ta_ctx
->current_cypher_req
= NULL
;
2961 ta_ctx
->hash_opmode
= SEP_HASH_SHA224
;
2962 ta_ctx
->current_hash_stage
= HASH_INIT
;
2964 /* lock necessary so that only one entity touches the queues */
2965 spin_lock_irq(&queue_lock
);
2966 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2968 if ((error
!= 0) && (error
!= -EINPROGRESS
))
2969 pr_debug(" sep - crypto enqueue failed: %x\n",
2971 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
2972 sep_dequeuer
, (void *)&sep_queue
);
2974 pr_debug(" sep - workqueue submit failed: %x\n",
2976 spin_unlock_irq(&queue_lock
);
2977 /* We return result of crypto enqueue */
2981 static int sep_sha224_update(struct ahash_request
*req
)
2985 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
2986 pr_debug("sep - doing sha224 update\n");
2988 ta_ctx
->sep_used
= sep_dev
;
2989 ta_ctx
->current_request
= SHA224
;
2990 ta_ctx
->current_hash_req
= req
;
2991 ta_ctx
->current_cypher_req
= NULL
;
2992 ta_ctx
->hash_opmode
= SEP_HASH_SHA224
;
2993 ta_ctx
->current_hash_stage
= HASH_UPDATE
;
2995 /* lock necessary so that only one entity touches the queues */
2996 spin_lock_irq(&queue_lock
);
2997 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
2999 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3000 pr_debug(" sep - crypto enqueue failed: %x\n",
3002 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3003 sep_dequeuer
, (void *)&sep_queue
);
3005 pr_debug(" sep - workqueue submit failed: %x\n",
3007 spin_unlock_irq(&queue_lock
);
3008 /* We return result of crypto enqueue */
3012 static int sep_sha224_final(struct ahash_request
*req
)
3016 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3017 pr_debug("sep - doing sha224 final\n");
3019 ta_ctx
->sep_used
= sep_dev
;
3020 ta_ctx
->current_request
= SHA224
;
3021 ta_ctx
->current_hash_req
= req
;
3022 ta_ctx
->current_cypher_req
= NULL
;
3023 ta_ctx
->hash_opmode
= SEP_HASH_SHA224
;
3024 ta_ctx
->current_hash_stage
= HASH_FINISH
;
3026 /* lock necessary so that only one entity touches the queues */
3027 spin_lock_irq(&queue_lock
);
3028 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3030 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3031 pr_debug(" sep - crypto enqueue failed: %x\n",
3033 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3034 sep_dequeuer
, (void *)&sep_queue
);
3036 pr_debug(" sep - workqueue submit failed: %x\n",
3038 spin_unlock_irq(&queue_lock
);
3039 /* We return result of crypto enqueue */
3043 static int sep_sha224_digest(struct ahash_request
*req
)
3047 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3049 pr_debug("sep - doing sha224 digest\n");
3051 /* Clear out task context */
3052 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3054 ta_ctx
->sep_used
= sep_dev
;
3055 ta_ctx
->current_request
= SHA224
;
3056 ta_ctx
->current_hash_req
= req
;
3057 ta_ctx
->current_cypher_req
= NULL
;
3058 ta_ctx
->hash_opmode
= SEP_HASH_SHA224
;
3059 ta_ctx
->current_hash_stage
= HASH_DIGEST
;
3061 /* lock necessary so that only one entity touches the queues */
3062 spin_lock_irq(&queue_lock
);
3063 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3065 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3066 pr_debug(" sep - crypto enqueue failed: %x\n",
3068 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3069 sep_dequeuer
, (void *)&sep_queue
);
3071 pr_debug(" sep - workqueue submit failed: %x\n",
3073 spin_unlock_irq(&queue_lock
);
3074 /* We return result of crypto enqueue */
3078 static int sep_sha224_finup(struct ahash_request
*req
)
3082 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3084 pr_debug("sep - doing sha224 finup\n");
3086 ta_ctx
->sep_used
= sep_dev
;
3087 ta_ctx
->current_request
= SHA224
;
3088 ta_ctx
->current_hash_req
= req
;
3089 ta_ctx
->current_cypher_req
= NULL
;
3090 ta_ctx
->hash_opmode
= SEP_HASH_SHA224
;
3091 ta_ctx
->current_hash_stage
= HASH_FINUP_DATA
;
3093 /* lock necessary so that only one entity touches the queues */
3094 spin_lock_irq(&queue_lock
);
3095 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3097 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3098 pr_debug(" sep - crypto enqueue failed: %x\n",
3100 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3101 sep_dequeuer
, (void *)&sep_queue
);
3103 pr_debug(" sep - workqueue submit failed: %x\n",
3105 spin_unlock_irq(&queue_lock
);
3106 /* We return result of crypto enqueue */
3110 static int sep_sha256_init(struct ahash_request
*req
)
3114 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3115 pr_debug("sep - doing sha256 init\n");
3117 /* Clear out task context */
3118 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3120 ta_ctx
->sep_used
= sep_dev
;
3121 ta_ctx
->current_request
= SHA256
;
3122 ta_ctx
->current_hash_req
= req
;
3123 ta_ctx
->current_cypher_req
= NULL
;
3124 ta_ctx
->hash_opmode
= SEP_HASH_SHA256
;
3125 ta_ctx
->current_hash_stage
= HASH_INIT
;
3127 /* lock necessary so that only one entity touches the queues */
3128 spin_lock_irq(&queue_lock
);
3129 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3131 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3132 pr_debug(" sep - crypto enqueue failed: %x\n",
3134 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3135 sep_dequeuer
, (void *)&sep_queue
);
3137 pr_debug(" sep - workqueue submit failed: %x\n",
3139 spin_unlock_irq(&queue_lock
);
3140 /* We return result of crypto enqueue */
3144 static int sep_sha256_update(struct ahash_request
*req
)
3148 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3149 pr_debug("sep - doing sha256 update\n");
3151 ta_ctx
->sep_used
= sep_dev
;
3152 ta_ctx
->current_request
= SHA256
;
3153 ta_ctx
->current_hash_req
= req
;
3154 ta_ctx
->current_cypher_req
= NULL
;
3155 ta_ctx
->hash_opmode
= SEP_HASH_SHA256
;
3156 ta_ctx
->current_hash_stage
= HASH_UPDATE
;
3158 /* lock necessary so that only one entity touches the queues */
3159 spin_lock_irq(&queue_lock
);
3160 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3162 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3163 pr_debug(" sep - crypto enqueue failed: %x\n",
3165 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3166 sep_dequeuer
, (void *)&sep_queue
);
3168 pr_debug(" sep - workqueue submit failed: %x\n",
3170 spin_unlock_irq(&queue_lock
);
3171 /* We return result of crypto enqueue */
3175 static int sep_sha256_final(struct ahash_request
*req
)
3179 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3180 pr_debug("sep - doing sha256 final\n");
3182 ta_ctx
->sep_used
= sep_dev
;
3183 ta_ctx
->current_request
= SHA256
;
3184 ta_ctx
->current_hash_req
= req
;
3185 ta_ctx
->current_cypher_req
= NULL
;
3186 ta_ctx
->hash_opmode
= SEP_HASH_SHA256
;
3187 ta_ctx
->current_hash_stage
= HASH_FINISH
;
3189 /* lock necessary so that only one entity touches the queues */
3190 spin_lock_irq(&queue_lock
);
3191 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3193 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3194 pr_debug(" sep - crypto enqueue failed: %x\n",
3196 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3197 sep_dequeuer
, (void *)&sep_queue
);
3199 pr_debug(" sep - workqueue submit failed: %x\n",
3201 spin_unlock_irq(&queue_lock
);
3202 /* We return result of crypto enqueue */
3206 static int sep_sha256_digest(struct ahash_request
*req
)
3210 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3212 pr_debug("sep - doing sha256 digest\n");
3214 /* Clear out task context */
3215 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3217 ta_ctx
->sep_used
= sep_dev
;
3218 ta_ctx
->current_request
= SHA256
;
3219 ta_ctx
->current_hash_req
= req
;
3220 ta_ctx
->current_cypher_req
= NULL
;
3221 ta_ctx
->hash_opmode
= SEP_HASH_SHA256
;
3222 ta_ctx
->current_hash_stage
= HASH_DIGEST
;
3224 /* lock necessary so that only one entity touches the queues */
3225 spin_lock_irq(&queue_lock
);
3226 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3228 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3229 pr_debug(" sep - crypto enqueue failed: %x\n",
3231 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3232 sep_dequeuer
, (void *)&sep_queue
);
3234 pr_debug(" sep - workqueue submit failed: %x\n",
3236 spin_unlock_irq(&queue_lock
);
3237 /* We return result of crypto enqueue */
3241 static int sep_sha256_finup(struct ahash_request
*req
)
3245 struct this_task_ctx
*ta_ctx
= ahash_request_ctx(req
);
3247 pr_debug("sep - doing sha256 finup\n");
3249 ta_ctx
->sep_used
= sep_dev
;
3250 ta_ctx
->current_request
= SHA256
;
3251 ta_ctx
->current_hash_req
= req
;
3252 ta_ctx
->current_cypher_req
= NULL
;
3253 ta_ctx
->hash_opmode
= SEP_HASH_SHA256
;
3254 ta_ctx
->current_hash_stage
= HASH_FINUP_DATA
;
3256 /* lock necessary so that only one entity touches the queues */
3257 spin_lock_irq(&queue_lock
);
3258 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3260 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3261 pr_debug(" sep - crypto enqueue failed: %x\n",
3263 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3264 sep_dequeuer
, (void *)&sep_queue
);
3266 pr_debug(" sep - workqueue submit failed: %x\n",
3268 spin_unlock_irq(&queue_lock
);
3269 /* We return result of crypto enqueue */
3273 static int sep_crypto_init(struct crypto_tfm
*tfm
)
3275 const char *alg_name
= crypto_tfm_alg_name(tfm
);
3277 if (alg_name
== NULL
)
3278 pr_debug("sep_crypto_init alg is NULL\n");
3280 pr_debug("sep_crypto_init alg is %s\n", alg_name
);
3282 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct this_task_ctx
);
3286 static void sep_crypto_exit(struct crypto_tfm
*tfm
)
3288 pr_debug("sep_crypto_exit\n");
3291 static int sep_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
3292 unsigned int keylen
)
3294 struct sep_system_ctx
*sctx
= crypto_ablkcipher_ctx(tfm
);
3296 pr_debug("sep aes setkey\n");
3298 pr_debug("tfm is %p sctx is %p\n", tfm
, sctx
);
3300 case SEP_AES_KEY_128_SIZE
:
3301 sctx
->aes_key_size
= AES_128
;
3303 case SEP_AES_KEY_192_SIZE
:
3304 sctx
->aes_key_size
= AES_192
;
3306 case SEP_AES_KEY_256_SIZE
:
3307 sctx
->aes_key_size
= AES_256
;
3309 case SEP_AES_KEY_512_SIZE
:
3310 sctx
->aes_key_size
= AES_512
;
3313 pr_debug("invalid sep aes key size %x\n",
3318 memset(&sctx
->key
.aes
, 0, sizeof(u32
) *
3319 SEP_AES_MAX_KEY_SIZE_WORDS
);
3320 memcpy(&sctx
->key
.aes
, key
, keylen
);
3321 sctx
->keylen
= keylen
;
3322 /* Indicate to encrypt/decrypt function to send key to SEP */
3328 static int sep_aes_ecb_encrypt(struct ablkcipher_request
*req
)
3332 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3334 pr_debug("sep - doing aes ecb encrypt\n");
3336 /* Clear out task context */
3337 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3339 ta_ctx
->sep_used
= sep_dev
;
3340 ta_ctx
->current_request
= AES_ECB
;
3341 ta_ctx
->current_hash_req
= NULL
;
3342 ta_ctx
->current_cypher_req
= req
;
3343 ta_ctx
->aes_encmode
= SEP_AES_ENCRYPT
;
3344 ta_ctx
->aes_opmode
= SEP_AES_ECB
;
3345 ta_ctx
->init_opcode
= SEP_AES_INIT_OPCODE
;
3346 ta_ctx
->block_opcode
= SEP_AES_BLOCK_OPCODE
;
3348 /* lock necessary so that only one entity touches the queues */
3349 spin_lock_irq(&queue_lock
);
3350 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3352 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3353 pr_debug(" sep - crypto enqueue failed: %x\n",
3355 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3356 sep_dequeuer
, (void *)&sep_queue
);
3358 pr_debug(" sep - workqueue submit failed: %x\n",
3360 spin_unlock_irq(&queue_lock
);
3361 /* We return result of crypto enqueue */
3365 static int sep_aes_ecb_decrypt(struct ablkcipher_request
*req
)
3369 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3371 pr_debug("sep - doing aes ecb decrypt\n");
3373 /* Clear out task context */
3374 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3376 ta_ctx
->sep_used
= sep_dev
;
3377 ta_ctx
->current_request
= AES_ECB
;
3378 ta_ctx
->current_hash_req
= NULL
;
3379 ta_ctx
->current_cypher_req
= req
;
3380 ta_ctx
->aes_encmode
= SEP_AES_DECRYPT
;
3381 ta_ctx
->aes_opmode
= SEP_AES_ECB
;
3382 ta_ctx
->init_opcode
= SEP_AES_INIT_OPCODE
;
3383 ta_ctx
->block_opcode
= SEP_AES_BLOCK_OPCODE
;
3385 /* lock necessary so that only one entity touches the queues */
3386 spin_lock_irq(&queue_lock
);
3387 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3389 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3390 pr_debug(" sep - crypto enqueue failed: %x\n",
3392 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3393 sep_dequeuer
, (void *)&sep_queue
);
3395 pr_debug(" sep - workqueue submit failed: %x\n",
3397 spin_unlock_irq(&queue_lock
);
3398 /* We return result of crypto enqueue */
3402 static int sep_aes_cbc_encrypt(struct ablkcipher_request
*req
)
3406 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3407 struct sep_system_ctx
*sctx
= crypto_ablkcipher_ctx(
3408 crypto_ablkcipher_reqtfm(req
));
3410 pr_debug("sep - doing aes cbc encrypt\n");
3412 /* Clear out task context */
3413 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3415 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3416 crypto_ablkcipher_reqtfm(req
), sctx
, ta_ctx
);
3418 ta_ctx
->sep_used
= sep_dev
;
3419 ta_ctx
->current_request
= AES_CBC
;
3420 ta_ctx
->current_hash_req
= NULL
;
3421 ta_ctx
->current_cypher_req
= req
;
3422 ta_ctx
->aes_encmode
= SEP_AES_ENCRYPT
;
3423 ta_ctx
->aes_opmode
= SEP_AES_CBC
;
3424 ta_ctx
->init_opcode
= SEP_AES_INIT_OPCODE
;
3425 ta_ctx
->block_opcode
= SEP_AES_BLOCK_OPCODE
;
3427 /* lock necessary so that only one entity touches the queues */
3428 spin_lock_irq(&queue_lock
);
3429 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3431 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3432 pr_debug(" sep - crypto enqueue failed: %x\n",
3434 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3435 sep_dequeuer
, (void *)&sep_queue
);
3437 pr_debug(" sep - workqueue submit failed: %x\n",
3439 spin_unlock_irq(&queue_lock
);
3440 /* We return result of crypto enqueue */
3444 static int sep_aes_cbc_decrypt(struct ablkcipher_request
*req
)
3448 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3449 struct sep_system_ctx
*sctx
= crypto_ablkcipher_ctx(
3450 crypto_ablkcipher_reqtfm(req
));
3452 pr_debug("sep - doing aes cbc decrypt\n");
3454 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3455 crypto_ablkcipher_reqtfm(req
), sctx
, ta_ctx
);
3457 /* Clear out task context */
3458 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3460 ta_ctx
->sep_used
= sep_dev
;
3461 ta_ctx
->current_request
= AES_CBC
;
3462 ta_ctx
->current_hash_req
= NULL
;
3463 ta_ctx
->current_cypher_req
= req
;
3464 ta_ctx
->aes_encmode
= SEP_AES_DECRYPT
;
3465 ta_ctx
->aes_opmode
= SEP_AES_CBC
;
3466 ta_ctx
->init_opcode
= SEP_AES_INIT_OPCODE
;
3467 ta_ctx
->block_opcode
= SEP_AES_BLOCK_OPCODE
;
3469 /* lock necessary so that only one entity touches the queues */
3470 spin_lock_irq(&queue_lock
);
3471 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3473 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3474 pr_debug(" sep - crypto enqueue failed: %x\n",
3476 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3477 sep_dequeuer
, (void *)&sep_queue
);
3479 pr_debug(" sep - workqueue submit failed: %x\n",
3481 spin_unlock_irq(&queue_lock
);
3482 /* We return result of crypto enqueue */
3486 static int sep_des_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
3487 unsigned int keylen
)
3489 struct sep_system_ctx
*sctx
= crypto_ablkcipher_ctx(tfm
);
3490 struct crypto_tfm
*ctfm
= crypto_ablkcipher_tfm(tfm
);
3491 u32
*flags
= &ctfm
->crt_flags
;
3493 pr_debug("sep des setkey\n");
3497 sctx
->des_nbr_keys
= DES_KEY_1
;
3499 case DES_KEY_SIZE
* 2:
3500 sctx
->des_nbr_keys
= DES_KEY_2
;
3502 case DES_KEY_SIZE
* 3:
3503 sctx
->des_nbr_keys
= DES_KEY_3
;
3506 pr_debug("invalid key size %x\n",
3511 if ((*flags
& CRYPTO_TFM_REQ_WEAK_KEY
) &&
3512 (sep_weak_key(key
, keylen
))) {
3514 *flags
|= CRYPTO_TFM_RES_WEAK_KEY
;
3515 pr_debug("weak key\n");
3519 memset(&sctx
->key
.des
, 0, sizeof(struct sep_des_key
));
3520 memcpy(&sctx
->key
.des
.key1
, key
, keylen
);
3521 sctx
->keylen
= keylen
;
3522 /* Indicate to encrypt/decrypt function to send key to SEP */
3528 static int sep_des_ebc_encrypt(struct ablkcipher_request
*req
)
3532 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3534 pr_debug("sep - doing des ecb encrypt\n");
3536 /* Clear out task context */
3537 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3539 ta_ctx
->sep_used
= sep_dev
;
3540 ta_ctx
->current_request
= DES_ECB
;
3541 ta_ctx
->current_hash_req
= NULL
;
3542 ta_ctx
->current_cypher_req
= req
;
3543 ta_ctx
->des_encmode
= SEP_DES_ENCRYPT
;
3544 ta_ctx
->des_opmode
= SEP_DES_ECB
;
3545 ta_ctx
->init_opcode
= SEP_DES_INIT_OPCODE
;
3546 ta_ctx
->block_opcode
= SEP_DES_BLOCK_OPCODE
;
3548 /* lock necessary so that only one entity touches the queues */
3549 spin_lock_irq(&queue_lock
);
3550 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3552 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3553 pr_debug(" sep - crypto enqueue failed: %x\n",
3555 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3556 sep_dequeuer
, (void *)&sep_queue
);
3558 pr_debug(" sep - workqueue submit failed: %x\n",
3560 spin_unlock_irq(&queue_lock
);
3561 /* We return result of crypto enqueue */
3565 static int sep_des_ebc_decrypt(struct ablkcipher_request
*req
)
3569 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3571 pr_debug("sep - doing des ecb decrypt\n");
3573 /* Clear out task context */
3574 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3576 ta_ctx
->sep_used
= sep_dev
;
3577 ta_ctx
->current_request
= DES_ECB
;
3578 ta_ctx
->current_hash_req
= NULL
;
3579 ta_ctx
->current_cypher_req
= req
;
3580 ta_ctx
->des_encmode
= SEP_DES_DECRYPT
;
3581 ta_ctx
->des_opmode
= SEP_DES_ECB
;
3582 ta_ctx
->init_opcode
= SEP_DES_INIT_OPCODE
;
3583 ta_ctx
->block_opcode
= SEP_DES_BLOCK_OPCODE
;
3585 /* lock necessary so that only one entity touches the queues */
3586 spin_lock_irq(&queue_lock
);
3587 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3589 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3590 pr_debug(" sep - crypto enqueue failed: %x\n",
3592 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3593 sep_dequeuer
, (void *)&sep_queue
);
3595 pr_debug(" sep - workqueue submit failed: %x\n",
3597 spin_unlock_irq(&queue_lock
);
3598 /* We return result of crypto enqueue */
3602 static int sep_des_cbc_encrypt(struct ablkcipher_request
*req
)
3606 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3608 pr_debug("sep - doing des cbc encrypt\n");
3610 /* Clear out task context */
3611 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3613 ta_ctx
->sep_used
= sep_dev
;
3614 ta_ctx
->current_request
= DES_CBC
;
3615 ta_ctx
->current_hash_req
= NULL
;
3616 ta_ctx
->current_cypher_req
= req
;
3617 ta_ctx
->des_encmode
= SEP_DES_ENCRYPT
;
3618 ta_ctx
->des_opmode
= SEP_DES_CBC
;
3619 ta_ctx
->init_opcode
= SEP_DES_INIT_OPCODE
;
3620 ta_ctx
->block_opcode
= SEP_DES_BLOCK_OPCODE
;
3622 /* lock necessary so that only one entity touches the queues */
3623 spin_lock_irq(&queue_lock
);
3624 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3626 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3627 pr_debug(" sep - crypto enqueue failed: %x\n",
3629 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3630 sep_dequeuer
, (void *)&sep_queue
);
3632 pr_debug(" sep - workqueue submit failed: %x\n",
3634 spin_unlock_irq(&queue_lock
);
3635 /* We return result of crypto enqueue */
3639 static int sep_des_cbc_decrypt(struct ablkcipher_request
*req
)
3643 struct this_task_ctx
*ta_ctx
= ablkcipher_request_ctx(req
);
3645 pr_debug("sep - doing des ecb decrypt\n");
3647 /* Clear out task context */
3648 memset(ta_ctx
, 0, sizeof(struct this_task_ctx
));
3650 ta_ctx
->sep_used
= sep_dev
;
3651 ta_ctx
->current_request
= DES_CBC
;
3652 ta_ctx
->current_hash_req
= NULL
;
3653 ta_ctx
->current_cypher_req
= req
;
3654 ta_ctx
->des_encmode
= SEP_DES_DECRYPT
;
3655 ta_ctx
->des_opmode
= SEP_DES_CBC
;
3656 ta_ctx
->init_opcode
= SEP_DES_INIT_OPCODE
;
3657 ta_ctx
->block_opcode
= SEP_DES_BLOCK_OPCODE
;
3659 /* lock necessary so that only one entity touches the queues */
3660 spin_lock_irq(&queue_lock
);
3661 error
= crypto_enqueue_request(&sep_queue
, &req
->base
);
3663 if ((error
!= 0) && (error
!= -EINPROGRESS
))
3664 pr_debug(" sep - crypto enqueue failed: %x\n",
3666 error1
= sep_submit_work(ta_ctx
->sep_used
->workqueue
,
3667 sep_dequeuer
, (void *)&sep_queue
);
3669 pr_debug(" sep - workqueue submit failed: %x\n",
3671 spin_unlock_irq(&queue_lock
);
3672 /* We return result of crypto enqueue */
3676 static struct ahash_alg hash_algs
[] = {
3678 .init
= sep_sha1_init
,
3679 .update
= sep_sha1_update
,
3680 .final
= sep_sha1_final
,
3681 .digest
= sep_sha1_digest
,
3682 .finup
= sep_sha1_finup
,
3684 .digestsize
= SHA1_DIGEST_SIZE
,
3687 .cra_driver_name
= "sha1-sep",
3688 .cra_priority
= 100,
3689 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
3691 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3692 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3694 .cra_module
= THIS_MODULE
,
3695 .cra_init
= sep_hash_cra_init
,
3696 .cra_exit
= sep_hash_cra_exit
,
3701 .init
= sep_md5_init
,
3702 .update
= sep_md5_update
,
3703 .final
= sep_md5_final
,
3704 .digest
= sep_md5_digest
,
3705 .finup
= sep_md5_finup
,
3707 .digestsize
= MD5_DIGEST_SIZE
,
3710 .cra_driver_name
= "md5-sep",
3711 .cra_priority
= 100,
3712 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
3714 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3715 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3717 .cra_module
= THIS_MODULE
,
3718 .cra_init
= sep_hash_cra_init
,
3719 .cra_exit
= sep_hash_cra_exit
,
3724 .init
= sep_sha224_init
,
3725 .update
= sep_sha224_update
,
3726 .final
= sep_sha224_final
,
3727 .digest
= sep_sha224_digest
,
3728 .finup
= sep_sha224_finup
,
3730 .digestsize
= SHA224_DIGEST_SIZE
,
3732 .cra_name
= "sha224",
3733 .cra_driver_name
= "sha224-sep",
3734 .cra_priority
= 100,
3735 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
3737 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3738 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3740 .cra_module
= THIS_MODULE
,
3741 .cra_init
= sep_hash_cra_init
,
3742 .cra_exit
= sep_hash_cra_exit
,
3747 .init
= sep_sha256_init
,
3748 .update
= sep_sha256_update
,
3749 .final
= sep_sha256_final
,
3750 .digest
= sep_sha256_digest
,
3751 .finup
= sep_sha256_finup
,
3753 .digestsize
= SHA256_DIGEST_SIZE
,
3755 .cra_name
= "sha256",
3756 .cra_driver_name
= "sha256-sep",
3757 .cra_priority
= 100,
3758 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
3760 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3761 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3763 .cra_module
= THIS_MODULE
,
3764 .cra_init
= sep_hash_cra_init
,
3765 .cra_exit
= sep_hash_cra_exit
,
3771 static struct crypto_alg crypto_algs
[] = {
3773 .cra_name
= "ecb(aes)",
3774 .cra_driver_name
= "ecb-aes-sep",
3775 .cra_priority
= 100,
3776 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
3777 .cra_blocksize
= AES_BLOCK_SIZE
,
3778 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3780 .cra_type
= &crypto_ablkcipher_type
,
3781 .cra_module
= THIS_MODULE
,
3782 .cra_init
= sep_crypto_init
,
3783 .cra_exit
= sep_crypto_exit
,
3784 .cra_u
.ablkcipher
= {
3785 .min_keysize
= AES_MIN_KEY_SIZE
,
3786 .max_keysize
= AES_MAX_KEY_SIZE
,
3787 .setkey
= sep_aes_setkey
,
3788 .encrypt
= sep_aes_ecb_encrypt
,
3789 .decrypt
= sep_aes_ecb_decrypt
,
3793 .cra_name
= "cbc(aes)",
3794 .cra_driver_name
= "cbc-aes-sep",
3795 .cra_priority
= 100,
3796 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
3797 .cra_blocksize
= AES_BLOCK_SIZE
,
3798 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3800 .cra_type
= &crypto_ablkcipher_type
,
3801 .cra_module
= THIS_MODULE
,
3802 .cra_init
= sep_crypto_init
,
3803 .cra_exit
= sep_crypto_exit
,
3804 .cra_u
.ablkcipher
= {
3805 .min_keysize
= AES_MIN_KEY_SIZE
,
3806 .max_keysize
= AES_MAX_KEY_SIZE
,
3807 .setkey
= sep_aes_setkey
,
3808 .encrypt
= sep_aes_cbc_encrypt
,
3809 .ivsize
= AES_BLOCK_SIZE
,
3810 .decrypt
= sep_aes_cbc_decrypt
,
3814 .cra_name
= "ebc(des)",
3815 .cra_driver_name
= "ebc-des-sep",
3816 .cra_priority
= 100,
3817 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
3818 .cra_blocksize
= DES_BLOCK_SIZE
,
3819 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3821 .cra_type
= &crypto_ablkcipher_type
,
3822 .cra_module
= THIS_MODULE
,
3823 .cra_init
= sep_crypto_init
,
3824 .cra_exit
= sep_crypto_exit
,
3825 .cra_u
.ablkcipher
= {
3826 .min_keysize
= DES_KEY_SIZE
,
3827 .max_keysize
= DES_KEY_SIZE
,
3828 .setkey
= sep_des_setkey
,
3829 .encrypt
= sep_des_ebc_encrypt
,
3830 .decrypt
= sep_des_ebc_decrypt
,
3834 .cra_name
= "cbc(des)",
3835 .cra_driver_name
= "cbc-des-sep",
3836 .cra_priority
= 100,
3837 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
3838 .cra_blocksize
= DES_BLOCK_SIZE
,
3839 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3841 .cra_type
= &crypto_ablkcipher_type
,
3842 .cra_module
= THIS_MODULE
,
3843 .cra_init
= sep_crypto_init
,
3844 .cra_exit
= sep_crypto_exit
,
3845 .cra_u
.ablkcipher
= {
3846 .min_keysize
= DES_KEY_SIZE
,
3847 .max_keysize
= DES_KEY_SIZE
,
3848 .setkey
= sep_des_setkey
,
3849 .encrypt
= sep_des_cbc_encrypt
,
3850 .ivsize
= DES_BLOCK_SIZE
,
3851 .decrypt
= sep_des_cbc_decrypt
,
3855 .cra_name
= "ebc(des3-ede)",
3856 .cra_driver_name
= "ebc-des3-ede-sep",
3857 .cra_priority
= 100,
3858 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
3859 .cra_blocksize
= DES_BLOCK_SIZE
,
3860 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3862 .cra_type
= &crypto_ablkcipher_type
,
3863 .cra_module
= THIS_MODULE
,
3864 .cra_init
= sep_crypto_init
,
3865 .cra_exit
= sep_crypto_exit
,
3866 .cra_u
.ablkcipher
= {
3867 .min_keysize
= DES3_EDE_KEY_SIZE
,
3868 .max_keysize
= DES3_EDE_KEY_SIZE
,
3869 .setkey
= sep_des_setkey
,
3870 .encrypt
= sep_des_ebc_encrypt
,
3871 .decrypt
= sep_des_ebc_decrypt
,
3875 .cra_name
= "cbc(des3-ede)",
3876 .cra_driver_name
= "cbc-des3--ede-sep",
3877 .cra_priority
= 100,
3878 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
3879 .cra_blocksize
= DES_BLOCK_SIZE
,
3880 .cra_ctxsize
= sizeof(struct sep_system_ctx
),
3882 .cra_type
= &crypto_ablkcipher_type
,
3883 .cra_module
= THIS_MODULE
,
3884 .cra_init
= sep_crypto_init
,
3885 .cra_exit
= sep_crypto_exit
,
3886 .cra_u
.ablkcipher
= {
3887 .min_keysize
= DES3_EDE_KEY_SIZE
,
3888 .max_keysize
= DES3_EDE_KEY_SIZE
,
3889 .setkey
= sep_des_setkey
,
3890 .encrypt
= sep_des_cbc_encrypt
,
3891 .decrypt
= sep_des_cbc_decrypt
,
3896 int sep_crypto_setup(void)
3899 tasklet_init(&sep_dev
->finish_tasklet
, sep_finish
,
3900 (unsigned long)sep_dev
);
3902 crypto_init_queue(&sep_queue
, SEP_QUEUE_LENGTH
);
3904 sep_dev
->workqueue
= create_singlethread_workqueue(
3905 "sep_crypto_workqueue");
3906 if (!sep_dev
->workqueue
) {
3907 dev_warn(&sep_dev
->pdev
->dev
, "cant create workqueue\n");
3914 spin_lock_init(&queue_lock
);
3918 for (i
= 0; i
< ARRAY_SIZE(hash_algs
); i
++) {
3919 err
= crypto_register_ahash(&hash_algs
[i
]);
3925 for (j
= 0; j
< ARRAY_SIZE(crypto_algs
); j
++) {
3926 err
= crypto_register_alg(&crypto_algs
[j
]);
3928 goto err_crypto_algs
;
3934 for (k
= 0; k
< i
; k
++)
3935 crypto_unregister_ahash(&hash_algs
[k
]);
3939 for (k
= 0; k
< j
; k
++)
3940 crypto_unregister_alg(&crypto_algs
[k
]);
3944 void sep_crypto_takedown(void)
3949 for (i
= 0; i
< ARRAY_SIZE(hash_algs
); i
++)
3950 crypto_unregister_ahash(&hash_algs
[i
]);
3951 for (i
= 0; i
< ARRAY_SIZE(crypto_algs
); i
++)
3952 crypto_unregister_alg(&crypto_algs
[i
]);
3954 tasklet_kill(&sep_dev
->finish_tasklet
);