1 // SPDX-License-Identifier: GPL-2.0
3 * Stream co-processor driver for the ETRAX FS
5 * Copyright (C) 2003-2007 Axis Communications AB
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
15 #include <linux/spinlock.h>
16 #include <linux/stddef.h>
18 #include <linux/uaccess.h>
20 #include <linux/atomic.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
25 #include <asm/signal.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/reg_map.h>
31 #include <hwregs/reg_rdwr.h>
32 #include <hwregs/intr_vect_defs.h>
34 #include <hwregs/strcop.h>
35 #include <hwregs/strcop_defs.h>
36 #include <cryptocop.h>
41 #define IN_DMA_INST regi_dma9
42 #define OUT_DMA_INST regi_dma8
43 #define DMA_IRQ DMA9_INTR_VECT
47 #define IN_DMA_INST regi_dma3
48 #define OUT_DMA_INST regi_dma2
49 #define DMA_IRQ DMA3_INTR_VECT
52 #define DESCR_ALLOC_PAD (31)
54 struct cryptocop_dma_desc
{
55 char *free_buf
; /* If non-null will be kfreed in free_cdesc() */
56 dma_descr_data
*dma_descr
;
58 unsigned char dma_descr_buf
[sizeof(dma_descr_data
) + DESCR_ALLOC_PAD
];
60 unsigned int from_pool
:1; /* If 1 'allocated' from the descriptor pool. */
61 struct cryptocop_dma_desc
*next
;
65 struct cryptocop_int_operation
{
67 cryptocop_session_id sid
;
69 dma_descr_context ctx_out
;
70 dma_descr_context ctx_in
;
72 /* DMA descriptors allocated by driver. */
73 struct cryptocop_dma_desc
*cdesc_out
;
74 struct cryptocop_dma_desc
*cdesc_in
;
76 /* Strcop config to use. */
77 cryptocop_3des_mode tdes_mode
;
78 cryptocop_csum_type csum_mode
;
80 /* DMA descrs provided by consumer. */
81 dma_descr_data
*ddesc_out
;
82 dma_descr_data
*ddesc_in
;
86 struct cryptocop_tfrm_ctx
{
87 cryptocop_tfrm_id tid
;
88 unsigned int blocklength
;
90 unsigned int start_ix
;
92 struct cryptocop_tfrm_cfg
*tcfg
;
93 struct cryptocop_transform_ctx
*tctx
;
95 unsigned char previous_src
;
96 unsigned char current_src
;
98 /* Values to use in metadata out. */
99 unsigned char hash_conf
;
100 unsigned char hash_mode
;
101 unsigned char ciph_conf
;
102 unsigned char cbcmode
;
103 unsigned char decrypt
;
105 unsigned int requires_padding
:1;
106 unsigned int strict_block_length
:1;
107 unsigned int active
:1;
112 /* Pad (input) descriptors to put in the DMA out list when the transform
113 * output is put on the DMA in list. */
114 struct cryptocop_dma_desc
*pad_descs
;
116 struct cryptocop_tfrm_ctx
*prev_src
;
117 struct cryptocop_tfrm_ctx
*curr_src
;
120 unsigned char unit_no
;
124 struct cryptocop_private
{
125 cryptocop_session_id sid
;
126 struct cryptocop_private
*next
;
131 struct cryptocop_transform_ctx
{
132 struct cryptocop_transform_init init
;
133 unsigned char dec_key
[CRYPTOCOP_MAX_KEY_LENGTH
];
134 unsigned int dec_key_set
:1;
136 struct cryptocop_transform_ctx
*next
;
140 struct cryptocop_session
{
141 cryptocop_session_id sid
;
143 struct cryptocop_transform_ctx
*tfrm_ctx
;
145 struct cryptocop_session
*next
;
148 /* Priority levels for jobs sent to the cryptocop. Checksum operations from
149 kernel have highest priority since TCPIP stack processing must not
152 cryptocop_prio_kernel_csum
= 0,
153 cryptocop_prio_kernel
= 1,
154 cryptocop_prio_user
= 2,
155 cryptocop_prio_no_prios
= 3
156 } cryptocop_queue_priority
;
158 struct cryptocop_prio_queue
{
159 struct list_head jobs
;
160 cryptocop_queue_priority prio
;
163 struct cryptocop_prio_job
{
164 struct list_head node
;
165 cryptocop_queue_priority prio
;
167 struct cryptocop_operation
*oper
;
168 struct cryptocop_int_operation
*iop
;
171 struct ioctl_job_cb_ctx
{
172 unsigned int processed
:1;
176 static struct cryptocop_session
*cryptocop_sessions
= NULL
;
177 spinlock_t cryptocop_sessions_lock
;
179 /* Next Session ID to assign. */
180 static cryptocop_session_id next_sid
= 1;
182 /* Pad for checksum. */
183 static const char csum_zero_pad
[1] = {0x00};
185 /* Trash buffer for mem2mem operations. */
186 #define MEM2MEM_DISCARD_BUF_LENGTH (512)
187 static unsigned char mem2mem_discard_buf
[MEM2MEM_DISCARD_BUF_LENGTH
];
189 /* Descriptor pool. */
190 /* FIXME Tweak this value. */
191 #define CRYPTOCOP_DESCRIPTOR_POOL_SIZE (100)
192 static struct cryptocop_dma_desc descr_pool
[CRYPTOCOP_DESCRIPTOR_POOL_SIZE
];
193 static struct cryptocop_dma_desc
*descr_pool_free_list
;
194 static int descr_pool_no_free
;
195 static spinlock_t descr_pool_lock
;
197 /* Lock to stop cryptocop to start processing of a new operation. The holder
198 of this lock MUST call cryptocop_start_job() after it is unlocked. */
199 spinlock_t cryptocop_process_lock
;
201 static struct cryptocop_prio_queue cryptocop_job_queues
[cryptocop_prio_no_prios
];
202 static spinlock_t cryptocop_job_queue_lock
;
203 static struct cryptocop_prio_job
*cryptocop_running_job
= NULL
;
204 static spinlock_t running_job_lock
;
206 /* The interrupt handler appends completed jobs to this list. The scehduled
207 * tasklet removes them upon sending the response to the crypto consumer. */
208 static struct list_head cryptocop_completed_jobs
;
209 static spinlock_t cryptocop_completed_jobs_lock
;
211 DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq
);
214 /** Local functions. **/
216 static int cryptocop_open(struct inode
*, struct file
*);
218 static int cryptocop_release(struct inode
*, struct file
*);
220 static long cryptocop_ioctl(struct file
*file
,
221 unsigned int cmd
, unsigned long arg
);
223 static void cryptocop_start_job(void);
225 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio
, struct cryptocop_operation
*operation
);
226 static int cryptocop_job_setup(struct cryptocop_prio_job
**pj
, struct cryptocop_operation
*operation
);
228 static int cryptocop_job_queue_init(void);
229 static void cryptocop_job_queue_close(void);
231 static int create_md5_pad(int alloc_flag
, unsigned long long hashed_length
, char **pad
, size_t *pad_length
);
233 static int create_sha1_pad(int alloc_flag
, unsigned long long hashed_length
, char **pad
, size_t *pad_length
);
235 static int transform_ok(struct cryptocop_transform_init
*tinit
);
237 static struct cryptocop_session
*get_session(cryptocop_session_id sid
);
239 static struct cryptocop_transform_ctx
*get_transform_ctx(struct cryptocop_session
*sess
, cryptocop_tfrm_id tid
);
241 static void delete_internal_operation(struct cryptocop_int_operation
*iop
);
243 static void get_aes_decrypt_key(unsigned char *dec_key
, const unsigned char *key
, unsigned int keylength
);
245 static int init_stream_coprocessor(void);
247 static void __exit
exit_stream_coprocessor(void);
252 #define DEBUG_API(s) s
253 static void print_cryptocop_operation(struct cryptocop_operation
*cop
);
254 static void print_dma_descriptors(struct cryptocop_int_operation
*iop
);
255 static void print_strcop_crypto_op(struct strcop_crypto_op
*cop
);
256 static void print_lock_status(void);
257 static void print_user_dma_lists(struct cryptocop_dma_list_operation
*dma_op
);
258 #define assert(s) do{if (!(s)) panic(#s);} while(0);
266 /* Transform constants. */
267 #define DES_BLOCK_LENGTH (8)
268 #define AES_BLOCK_LENGTH (16)
269 #define MD5_BLOCK_LENGTH (64)
270 #define SHA1_BLOCK_LENGTH (64)
271 #define CSUM_BLOCK_LENGTH (2)
272 #define MD5_STATE_LENGTH (16)
273 #define SHA1_STATE_LENGTH (20)
275 /* The device number. */
276 #define CRYPTOCOP_MAJOR (254)
277 #define CRYPTOCOP_MINOR (0)
281 const struct file_operations cryptocop_fops
= {
282 .owner
= THIS_MODULE
,
283 .open
= cryptocop_open
,
284 .release
= cryptocop_release
,
285 .unlocked_ioctl
= cryptocop_ioctl
,
286 .llseek
= noop_llseek
,
290 static void free_cdesc(struct cryptocop_dma_desc
*cdesc
)
292 DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc
, cdesc
->from_pool
));
293 kfree(cdesc
->free_buf
);
295 if (cdesc
->from_pool
) {
296 unsigned long int flags
;
297 spin_lock_irqsave(&descr_pool_lock
, flags
);
298 cdesc
->next
= descr_pool_free_list
;
299 descr_pool_free_list
= cdesc
;
300 ++descr_pool_no_free
;
301 spin_unlock_irqrestore(&descr_pool_lock
, flags
);
308 static struct cryptocop_dma_desc
*alloc_cdesc(int alloc_flag
)
310 int use_pool
= (alloc_flag
& GFP_ATOMIC
) ? 1 : 0;
311 struct cryptocop_dma_desc
*cdesc
;
314 unsigned long int flags
;
315 spin_lock_irqsave(&descr_pool_lock
, flags
);
316 if (!descr_pool_free_list
) {
317 spin_unlock_irqrestore(&descr_pool_lock
, flags
);
318 DEBUG_API(printk("alloc_cdesc: pool is empty\n"));
321 cdesc
= descr_pool_free_list
;
322 descr_pool_free_list
= descr_pool_free_list
->next
;
323 --descr_pool_no_free
;
324 spin_unlock_irqrestore(&descr_pool_lock
, flags
);
325 cdesc
->from_pool
= 1;
327 cdesc
= kmalloc(sizeof(struct cryptocop_dma_desc
), alloc_flag
);
329 DEBUG_API(printk("alloc_cdesc: kmalloc\n"));
332 cdesc
->from_pool
= 0;
334 cdesc
->dma_descr
= (dma_descr_data
*)(((unsigned long int)cdesc
+ offsetof(struct cryptocop_dma_desc
, dma_descr_buf
) + DESCR_ALLOC_PAD
) & ~0x0000001F);
338 cdesc
->free_buf
= NULL
;
339 cdesc
->dma_descr
->out_eop
= 0;
340 cdesc
->dma_descr
->in_eop
= 0;
341 cdesc
->dma_descr
->intr
= 0;
342 cdesc
->dma_descr
->eol
= 0;
343 cdesc
->dma_descr
->wait
= 0;
344 cdesc
->dma_descr
->buf
= NULL
;
345 cdesc
->dma_descr
->after
= NULL
;
347 DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc
, cdesc
->dma_descr
, cdesc
->from_pool
));
352 static void setup_descr_chain(struct cryptocop_dma_desc
*cd
)
354 DEBUG(printk("setup_descr_chain: entering\n"));
357 cd
->dma_descr
->next
= (dma_descr_data
*)virt_to_phys(cd
->next
->dma_descr
);
359 cd
->dma_descr
->next
= NULL
;
363 DEBUG(printk("setup_descr_chain: exit\n"));
367 /* Create a pad descriptor for the transform.
368 * Return -1 for error, 0 if pad created. */
369 static int create_pad_descriptor(struct cryptocop_tfrm_ctx
*tc
, struct cryptocop_dma_desc
**pad_desc
, int alloc_flag
)
371 struct cryptocop_dma_desc
*cdesc
= NULL
;
373 struct strcop_meta_out mo
= {
381 DEBUG(printk("create_pad_descriptor: start.\n"));
382 /* Setup pad descriptor. */
384 DEBUG(printk("create_pad_descriptor: setting up padding.\n"));
385 cdesc
= alloc_cdesc(alloc_flag
);
387 DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n"));
390 switch (tc
->unit_no
) {
392 error
= create_md5_pad(alloc_flag
, tc
->consumed
, &pad
, &plen
);
394 DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n"));
397 cdesc
->free_buf
= pad
;
398 mo
.hashsel
= src_dma
;
399 mo
.hashconf
= tc
->hash_conf
;
400 mo
.hashmode
= tc
->hash_mode
;
403 error
= create_sha1_pad(alloc_flag
, tc
->consumed
, &pad
, &plen
);
405 DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n"));
408 cdesc
->free_buf
= pad
;
409 mo
.hashsel
= src_dma
;
410 mo
.hashconf
= tc
->hash_conf
;
411 mo
.hashmode
= tc
->hash_mode
;
414 if (tc
->consumed
% tc
->blocklength
){
415 pad
= (char*)csum_zero_pad
;
418 pad
= (char*)cdesc
; /* Use any pointer. */
421 mo
.csumsel
= src_dma
;
424 cdesc
->dma_descr
->wait
= 1;
425 cdesc
->dma_descr
->out_eop
= 1; /* Since this is a pad output is pushed. EOP is ok here since the padded unit is the only one active. */
426 cdesc
->dma_descr
->buf
= (char*)virt_to_phys((char*)pad
);
427 cdesc
->dma_descr
->after
= cdesc
->dma_descr
->buf
+ plen
;
429 cdesc
->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_out
, mo
);
435 if (cdesc
) free_cdesc(cdesc
);
440 static int setup_key_dl_desc(struct cryptocop_tfrm_ctx
*tc
, struct cryptocop_dma_desc
**kd
, int alloc_flag
)
442 struct cryptocop_dma_desc
*key_desc
= alloc_cdesc(alloc_flag
);
443 struct strcop_meta_out mo
= {0};
445 DEBUG(printk("setup_key_dl_desc\n"));
448 DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n"));
453 if ((tc
->tctx
->init
.alg
== cryptocop_alg_aes
) && (tc
->tcfg
->flags
& CRYPTOCOP_DECRYPT
)) {
454 /* Precook the AES decrypt key. */
455 if (!tc
->tctx
->dec_key_set
){
456 get_aes_decrypt_key(tc
->tctx
->dec_key
, tc
->tctx
->init
.key
, tc
->tctx
->init
.keylen
);
457 tc
->tctx
->dec_key_set
= 1;
459 key_desc
->dma_descr
->buf
= (char*)virt_to_phys(tc
->tctx
->dec_key
);
460 key_desc
->dma_descr
->after
= key_desc
->dma_descr
->buf
+ tc
->tctx
->init
.keylen
/8;
462 key_desc
->dma_descr
->buf
= (char*)virt_to_phys(tc
->tctx
->init
.key
);
463 key_desc
->dma_descr
->after
= key_desc
->dma_descr
->buf
+ tc
->tctx
->init
.keylen
/8;
465 /* Setup metadata. */
467 switch (tc
->tctx
->init
.keylen
) {
487 mo
.ciphsel
= mo
.hashsel
= mo
.csumsel
= src_none
;
488 key_desc
->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_out
, mo
);
490 key_desc
->dma_descr
->out_eop
= 1;
491 key_desc
->dma_descr
->wait
= 1;
492 key_desc
->dma_descr
->intr
= 0;
498 static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx
*tc
, struct cryptocop_dma_desc
**id
, int alloc_flag
)
500 struct cryptocop_dma_desc
*iv_desc
= alloc_cdesc(alloc_flag
);
501 struct strcop_meta_out mo
= {0};
503 DEBUG(printk("setup_cipher_iv_desc\n"));
506 DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n"));
510 iv_desc
->dma_descr
->buf
= (char*)virt_to_phys(tc
->tcfg
->iv
);
511 iv_desc
->dma_descr
->after
= iv_desc
->dma_descr
->buf
+ tc
->blocklength
;
513 /* Setup metadata. */
514 mo
.hashsel
= mo
.csumsel
= src_none
;
515 mo
.ciphsel
= src_dma
;
516 mo
.ciphconf
= tc
->ciph_conf
;
517 mo
.cbcmode
= tc
->cbcmode
;
519 iv_desc
->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_out
, mo
);
521 iv_desc
->dma_descr
->out_eop
= 0;
522 iv_desc
->dma_descr
->wait
= 1;
523 iv_desc
->dma_descr
->intr
= 0;
529 /* Map the output length of the transform to operation output starting on the inject index. */
530 static int create_input_descriptors(struct cryptocop_operation
*operation
, struct cryptocop_tfrm_ctx
*tc
, struct cryptocop_dma_desc
**id
, int alloc_flag
)
533 struct cryptocop_dma_desc head
= {0};
534 struct cryptocop_dma_desc
*outdesc
= &head
;
535 size_t iov_offset
= 0;
538 struct strcop_meta_in mi
= {0};
540 size_t out_length
= tc
->produced
;
544 assert(out_length
!= 0);
545 if (((tc
->produced
+ tc
->tcfg
->inject_ix
) > operation
->tfrm_op
.outlen
) || (tc
->produced
&& (operation
->tfrm_op
.outlen
== 0))) {
546 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
549 /* Traverse the out iovec until the result inject index is reached. */
550 while ((outiov_ix
< operation
->tfrm_op
.outcount
) && ((out_ix
+ operation
->tfrm_op
.outdata
[outiov_ix
].iov_len
) <= tc
->tcfg
->inject_ix
)){
551 out_ix
+= operation
->tfrm_op
.outdata
[outiov_ix
].iov_len
;
554 if (outiov_ix
>= operation
->tfrm_op
.outcount
){
555 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
558 iov_offset
= tc
->tcfg
->inject_ix
- out_ix
;
559 mi
.dmasel
= tc
->unit_no
;
561 /* Setup the output descriptors. */
562 while ((out_length
> 0) && (outiov_ix
< operation
->tfrm_op
.outcount
)) {
563 outdesc
->next
= alloc_cdesc(alloc_flag
);
564 if (!outdesc
->next
) {
565 DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n"));
569 outdesc
= outdesc
->next
;
570 rem_length
= operation
->tfrm_op
.outdata
[outiov_ix
].iov_len
- iov_offset
;
571 dlength
= (out_length
< rem_length
) ? out_length
: rem_length
;
573 DEBUG(printk("create_input_descriptors:\n"
574 "outiov_ix=%d, rem_length=%d, dlength=%d\n"
575 "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n"
576 "outcount=%d, outiov_ix=%d\n",
577 outiov_ix
, rem_length
, dlength
, iov_offset
, operation
->tfrm_op
.outdata
[outiov_ix
].iov_len
, operation
->tfrm_op
.outcount
, outiov_ix
));
579 outdesc
->dma_descr
->buf
= (char*)virt_to_phys(operation
->tfrm_op
.outdata
[outiov_ix
].iov_base
+ iov_offset
);
580 outdesc
->dma_descr
->after
= outdesc
->dma_descr
->buf
+ dlength
;
581 outdesc
->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_in
, mi
);
583 out_length
-= dlength
;
584 iov_offset
+= dlength
;
585 if (iov_offset
>= operation
->tfrm_op
.outdata
[outiov_ix
].iov_len
) {
591 DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length
));
595 /* Set sync in last descriptor. */
597 outdesc
->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_in
, mi
);
604 outdesc
= head
.next
->next
;
605 free_cdesc(head
.next
);
612 static int create_output_descriptors(struct cryptocop_operation
*operation
, int *iniov_ix
, int *iniov_offset
, size_t desc_len
, struct cryptocop_dma_desc
**current_out_cdesc
, struct strcop_meta_out
*meta_out
, int alloc_flag
)
614 while (desc_len
!= 0) {
615 struct cryptocop_dma_desc
*cdesc
;
616 int rem_length
= operation
->tfrm_op
.indata
[*iniov_ix
].iov_len
- *iniov_offset
;
617 int dlength
= (desc_len
< rem_length
) ? desc_len
: rem_length
;
619 cdesc
= alloc_cdesc(alloc_flag
);
621 DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n"));
624 (*current_out_cdesc
)->next
= cdesc
;
625 (*current_out_cdesc
) = cdesc
;
627 cdesc
->free_buf
= NULL
;
629 cdesc
->dma_descr
->buf
= (char*)virt_to_phys(operation
->tfrm_op
.indata
[*iniov_ix
].iov_base
+ *iniov_offset
);
630 cdesc
->dma_descr
->after
= cdesc
->dma_descr
->buf
+ dlength
;
632 assert(desc_len
>= dlength
);
634 *iniov_offset
+= dlength
;
635 if (*iniov_offset
>= operation
->tfrm_op
.indata
[*iniov_ix
].iov_len
) {
638 if (*iniov_ix
> operation
->tfrm_op
.incount
) {
639 DEBUG_API(printk("create_output_descriptors: not enough indata in operation."));
643 cdesc
->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_out
, (*meta_out
));
644 } /* while (desc_len != 0) */
645 /* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */
646 (*current_out_cdesc
)->dma_descr
->wait
= 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */
652 static int append_input_descriptors(struct cryptocop_operation
*operation
, struct cryptocop_dma_desc
**current_in_cdesc
, struct cryptocop_dma_desc
**current_out_cdesc
, struct cryptocop_tfrm_ctx
*tc
, int alloc_flag
)
654 DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc
, tc
->unit_no
));
657 struct cryptocop_dma_desc
*idescs
= NULL
;
658 DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc
->consumed
, tc
->produced
));
660 DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n"));
661 while (tc
->pad_descs
) {
662 DEBUG(printk("append descriptor 0x%p\n", tc
->pad_descs
));
663 (*current_out_cdesc
)->next
= tc
->pad_descs
;
664 tc
->pad_descs
= tc
->pad_descs
->next
;
665 (*current_out_cdesc
) = (*current_out_cdesc
)->next
;
669 /* Setup and append output descriptors to DMA in list. */
670 if (tc
->unit_no
== src_dma
){
671 /* mem2mem. Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */
672 struct strcop_meta_in mi
= {.sync
= 0, .dmasel
= src_dma
};
673 unsigned int start_ix
= tc
->start_ix
;
675 unsigned int desclen
= start_ix
< MEM2MEM_DISCARD_BUF_LENGTH
? start_ix
: MEM2MEM_DISCARD_BUF_LENGTH
;
676 (*current_in_cdesc
)->next
= alloc_cdesc(alloc_flag
);
677 if (!(*current_in_cdesc
)->next
){
678 DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n"));
681 (*current_in_cdesc
) = (*current_in_cdesc
)->next
;
682 (*current_in_cdesc
)->dma_descr
->buf
= (char*)virt_to_phys(mem2mem_discard_buf
);
683 (*current_in_cdesc
)->dma_descr
->after
= (*current_in_cdesc
)->dma_descr
->buf
+ desclen
;
684 (*current_in_cdesc
)->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_in
, mi
);
688 (*current_in_cdesc
)->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_in
, mi
);
691 failed
= create_input_descriptors(operation
, tc
, &idescs
, alloc_flag
);
693 DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n"));
696 DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n"));
698 DEBUG(printk("append descriptor 0x%p\n", idescs
));
699 (*current_in_cdesc
)->next
= idescs
;
700 idescs
= idescs
->next
;
701 (*current_in_cdesc
) = (*current_in_cdesc
)->next
;
709 static int cryptocop_setup_dma_list(struct cryptocop_operation
*operation
, struct cryptocop_int_operation
**int_op
, int alloc_flag
)
711 struct cryptocop_session
*sess
;
712 struct cryptocop_transform_ctx
*tctx
;
714 struct cryptocop_tfrm_ctx digest_ctx
= {
715 .previous_src
= src_none
,
716 .current_src
= src_none
,
718 .requires_padding
= 1,
719 .strict_block_length
= 0,
733 struct cryptocop_tfrm_ctx cipher_ctx
= {
734 .previous_src
= src_none
,
735 .current_src
= src_none
,
737 .requires_padding
= 0,
738 .strict_block_length
= 1,
752 struct cryptocop_tfrm_ctx csum_ctx
= {
753 .previous_src
= src_none
,
754 .current_src
= src_none
,
757 .requires_padding
= 1,
758 .strict_block_length
= 0,
772 .unit_no
= src_csum
};
773 struct cryptocop_tfrm_cfg
*tcfg
= operation
->tfrm_op
.tfrm_cfg
;
775 unsigned int indata_ix
= 0;
777 /* iovec accounting. */
779 int iniov_offset
= 0;
781 /* Operation descriptor cfg traversal pointer. */
782 struct cryptocop_desc
*odsc
;
785 /* List heads for allocated descriptors. */
786 struct cryptocop_dma_desc out_cdesc_head
= {0};
787 struct cryptocop_dma_desc in_cdesc_head
= {0};
789 struct cryptocop_dma_desc
*current_out_cdesc
= &out_cdesc_head
;
790 struct cryptocop_dma_desc
*current_in_cdesc
= &in_cdesc_head
;
792 struct cryptocop_tfrm_ctx
*output_tc
= NULL
;
795 assert(operation
!= NULL
);
796 assert(int_op
!= NULL
);
798 DEBUG(printk("cryptocop_setup_dma_list: start\n"));
799 DEBUG(print_cryptocop_operation(operation
));
801 sess
= get_session(operation
->sid
);
803 DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n"));
807 iop_alloc_ptr
= kmalloc(DESCR_ALLOC_PAD
+ sizeof(struct cryptocop_int_operation
), alloc_flag
);
808 if (!iop_alloc_ptr
) {
809 DEBUG_API(printk("cryptocop_setup_dma_list: kmalloc cryptocop_int_operation\n"));
813 (*int_op
) = (struct cryptocop_int_operation
*)(((unsigned long int)(iop_alloc_ptr
+ DESCR_ALLOC_PAD
+ offsetof(struct cryptocop_int_operation
, ctx_out
)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation
, ctx_out
));
814 DEBUG(memset((*int_op
), 0xff, sizeof(struct cryptocop_int_operation
)));
815 (*int_op
)->alloc_ptr
= iop_alloc_ptr
;
816 DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op
, (*int_op
)->alloc_ptr
));
818 (*int_op
)->sid
= operation
->sid
;
819 (*int_op
)->cdesc_out
= NULL
;
820 (*int_op
)->cdesc_in
= NULL
;
821 (*int_op
)->tdes_mode
= cryptocop_3des_ede
;
822 (*int_op
)->csum_mode
= cryptocop_csum_le
;
823 (*int_op
)->ddesc_out
= NULL
;
824 (*int_op
)->ddesc_in
= NULL
;
826 /* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */
828 DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n"));
833 tctx
= get_transform_ctx(sess
, tcfg
->tid
);
835 DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg
->tid
));
839 if (tcfg
->inject_ix
> operation
->tfrm_op
.outlen
){
840 DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg
->tid
, tcfg
->inject_ix
, operation
->tfrm_op
.outlen
));
844 switch (tctx
->init
.alg
){
845 case cryptocop_alg_mem2mem
:
846 if (cipher_ctx
.tcfg
!= NULL
){
847 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
851 /* mem2mem is handled as a NULL cipher. */
852 cipher_ctx
.cbcmode
= 0;
853 cipher_ctx
.decrypt
= 0;
854 cipher_ctx
.blocklength
= 1;
855 cipher_ctx
.ciph_conf
= 0;
856 cipher_ctx
.unit_no
= src_dma
;
857 cipher_ctx
.tcfg
= tcfg
;
858 cipher_ctx
.tctx
= tctx
;
860 case cryptocop_alg_des
:
861 case cryptocop_alg_3des
:
862 case cryptocop_alg_aes
:
864 if (cipher_ctx
.tcfg
!= NULL
){
865 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
869 cipher_ctx
.tcfg
= tcfg
;
870 cipher_ctx
.tctx
= tctx
;
871 if (cipher_ctx
.tcfg
->flags
& CRYPTOCOP_DECRYPT
){
872 cipher_ctx
.decrypt
= 1;
874 switch (tctx
->init
.cipher_mode
) {
875 case cryptocop_cipher_mode_ecb
:
876 cipher_ctx
.cbcmode
= 0;
878 case cryptocop_cipher_mode_cbc
:
879 cipher_ctx
.cbcmode
= 1;
882 DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx
->init
.cipher_mode
));
886 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx
.cbcmode
));
887 switch (tctx
->init
.alg
){
888 case cryptocop_alg_des
:
889 cipher_ctx
.ciph_conf
= 0;
890 cipher_ctx
.unit_no
= src_des
;
891 cipher_ctx
.blocklength
= DES_BLOCK_LENGTH
;
893 case cryptocop_alg_3des
:
894 cipher_ctx
.ciph_conf
= 1;
895 cipher_ctx
.unit_no
= src_des
;
896 cipher_ctx
.blocklength
= DES_BLOCK_LENGTH
;
898 case cryptocop_alg_aes
:
899 cipher_ctx
.ciph_conf
= 2;
900 cipher_ctx
.unit_no
= src_aes
;
901 cipher_ctx
.blocklength
= AES_BLOCK_LENGTH
;
904 panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx
->init
.alg
);
906 (*int_op
)->tdes_mode
= tctx
->init
.tdes_mode
;
908 case cryptocop_alg_md5
:
909 case cryptocop_alg_sha1
:
911 if (digest_ctx
.tcfg
!= NULL
){
912 DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n"));
916 digest_ctx
.tcfg
= tcfg
;
917 digest_ctx
.tctx
= tctx
;
918 digest_ctx
.hash_mode
= 0; /* Don't use explicit IV in this API. */
919 switch (tctx
->init
.alg
){
920 case cryptocop_alg_md5
:
921 digest_ctx
.blocklength
= MD5_BLOCK_LENGTH
;
922 digest_ctx
.unit_no
= src_md5
;
923 digest_ctx
.hash_conf
= 1; /* 1 => MD-5 */
925 case cryptocop_alg_sha1
:
926 digest_ctx
.blocklength
= SHA1_BLOCK_LENGTH
;
927 digest_ctx
.unit_no
= src_sha1
;
928 digest_ctx
.hash_conf
= 0; /* 0 => SHA-1 */
931 panic("cryptocop_setup_dma_list: impossible digest algorithm\n");
934 case cryptocop_alg_csum
:
936 if (csum_ctx
.tcfg
!= NULL
){
937 DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n"));
941 (*int_op
)->csum_mode
= tctx
->init
.csum_mode
;
942 csum_ctx
.tcfg
= tcfg
;
943 csum_ctx
.tctx
= tctx
;
947 DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx
->init
.alg
, tcfg
->tid
));
953 /* Download key if a cipher is used. */
954 if (cipher_ctx
.tcfg
&& (cipher_ctx
.tctx
->init
.alg
!= cryptocop_alg_mem2mem
)){
955 struct cryptocop_dma_desc
*key_desc
= NULL
;
957 failed
= setup_key_dl_desc(&cipher_ctx
, &key_desc
, alloc_flag
);
959 DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n"));
962 current_out_cdesc
->next
= key_desc
;
963 current_out_cdesc
= key_desc
;
964 indata_ix
+= (unsigned int)(key_desc
->dma_descr
->after
- key_desc
->dma_descr
->buf
);
966 /* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */
967 if ((cipher_ctx
.tctx
->init
.cipher_mode
== cryptocop_cipher_mode_cbc
) && (cipher_ctx
.tcfg
->flags
& CRYPTOCOP_EXPLICIT_IV
)) {
968 struct cryptocop_dma_desc
*iv_desc
= NULL
;
970 DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n"));
972 failed
= setup_cipher_iv_desc(&cipher_ctx
, &iv_desc
, alloc_flag
);
974 DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n"));
977 current_out_cdesc
->next
= iv_desc
;
978 current_out_cdesc
= iv_desc
;
979 indata_ix
+= (unsigned int)(iv_desc
->dma_descr
->after
- iv_desc
->dma_descr
->buf
);
983 /* Process descriptors. */
984 odsc
= operation
->tfrm_op
.desc
;
986 struct cryptocop_desc_cfg
*dcfg
= odsc
->cfg
;
987 struct strcop_meta_out meta_out
= {0};
988 size_t desc_len
= odsc
->length
;
989 int active_count
, eop_needed_count
;
993 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n"));
996 struct cryptocop_tfrm_ctx
*tc
= NULL
;
998 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n"));
999 /* Get the local context for the transform and mark it as the output unit if it produces output. */
1000 if (digest_ctx
.tcfg
&& (digest_ctx
.tcfg
->tid
== dcfg
->tid
)){
1002 } else if (cipher_ctx
.tcfg
&& (cipher_ctx
.tcfg
->tid
== dcfg
->tid
)){
1004 } else if (csum_ctx
.tcfg
&& (csum_ctx
.tcfg
->tid
== dcfg
->tid
)){
1008 DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg
->tid
));
1013 DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg
->tid
));
1018 tc
->start_ix
= indata_ix
;
1022 tc
->previous_src
= tc
->current_src
;
1023 tc
->prev_src
= tc
->curr_src
;
1024 /* Map source unit id to DMA source config. */
1026 case cryptocop_source_dma
:
1027 tc
->current_src
= src_dma
;
1029 case cryptocop_source_des
:
1030 tc
->current_src
= src_des
;
1032 case cryptocop_source_3des
:
1033 tc
->current_src
= src_des
;
1035 case cryptocop_source_aes
:
1036 tc
->current_src
= src_aes
;
1038 case cryptocop_source_md5
:
1039 case cryptocop_source_sha1
:
1040 case cryptocop_source_csum
:
1041 case cryptocop_source_none
:
1043 /* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units.
1045 DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg
->src
));
1049 if (tc
->current_src
!= src_dma
) {
1050 /* Find the unit we are sourcing from. */
1051 if (digest_ctx
.unit_no
== tc
->current_src
){
1052 tc
->curr_src
= &digest_ctx
;
1053 } else if (cipher_ctx
.unit_no
== tc
->current_src
){
1054 tc
->curr_src
= &cipher_ctx
;
1055 } else if (csum_ctx
.unit_no
== tc
->current_src
){
1056 tc
->curr_src
= &csum_ctx
;
1058 if ((tc
->curr_src
== tc
) && (tc
->unit_no
!= src_dma
)){
1059 DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc
->unit_no
));
1064 tc
->curr_src
= NULL
;
1067 /* Detect source switch. */
1068 DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc
->active
, tc
->unit_no
, tc
->current_src
, tc
->previous_src
, tc
->curr_src
, tc
->prev_src
));
1069 if (tc
->active
&& (tc
->current_src
!= tc
->previous_src
)) {
1070 /* Only allow source switch when both the old source unit and the new one have
1071 * no pending data to process (i.e. the consumed length must be a multiple of the
1072 * transform blocklength). */
1073 /* Note: if the src == NULL we are actually sourcing from DMA out. */
1074 if (((tc
->prev_src
!= NULL
) && (tc
->prev_src
->consumed
% tc
->prev_src
->blocklength
)) ||
1075 ((tc
->curr_src
!= NULL
) && (tc
->curr_src
->consumed
% tc
->curr_src
->blocklength
)))
1077 DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc
->prev_src
? tc
->prev_src
->consumed
: INT_MIN
, tc
->prev_src
? tc
->prev_src
->produced
: INT_MIN
, tc
->prev_src
? tc
->prev_src
->blocklength
: INT_MIN
, tc
->curr_src
? tc
->curr_src
->consumed
: INT_MIN
, tc
->curr_src
? tc
->curr_src
->produced
: INT_MIN
, tc
->curr_src
? tc
->curr_src
->blocklength
: INT_MIN
));
1082 /* Detect unit deactivation. */
1084 /* Length check of this is handled below. */
1088 } /* while (dcfg) */
1089 DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n"));
1091 if (cipher_ctx
.active
&& (cipher_ctx
.curr_src
!= NULL
) && !cipher_ctx
.curr_src
->active
){
1092 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx
.curr_src
->unit_no
));
1096 if (digest_ctx
.active
&& (digest_ctx
.curr_src
!= NULL
) && !digest_ctx
.curr_src
->active
){
1097 DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx
.curr_src
->unit_no
));
1101 if (csum_ctx
.active
&& (csum_ctx
.curr_src
!= NULL
) && !csum_ctx
.curr_src
->active
){
1102 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx
.curr_src
->unit_no
));
1107 /* Update consumed and produced lengths.
1109 The consumed length accounting here is actually cheating. If a unit source from DMA (or any
1110 other unit that process data in blocks of one octet) it is correct, but if it source from a
1111 block processing unit, i.e. a cipher, it will be temporarily incorrect at some times. However
1112 since it is only allowed--by the HW--to change source to or from a block processing unit at times where that
1113 unit has processed an exact multiple of its block length the end result will be correct.
1114 Beware that if the source change restriction change this code will need to be (much) reworked.
1116 DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc
->length
, desc_len
));
1118 if (csum_ctx
.active
) {
1119 csum_ctx
.consumed
+= desc_len
;
1120 if (csum_ctx
.done
) {
1121 csum_ctx
.produced
= 2;
1123 DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx
.consumed
, csum_ctx
.produced
, csum_ctx
.blocklength
));
1125 if (digest_ctx
.active
) {
1126 digest_ctx
.consumed
+= desc_len
;
1127 if (digest_ctx
.done
) {
1128 if (digest_ctx
.unit_no
== src_md5
) {
1129 digest_ctx
.produced
= MD5_STATE_LENGTH
;
1131 digest_ctx
.produced
= SHA1_STATE_LENGTH
;
1134 DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx
.consumed
, digest_ctx
.produced
, digest_ctx
.blocklength
));
1136 if (cipher_ctx
.active
) {
1137 /* Ciphers are allowed only to source from DMA out. That is filtered above. */
1138 assert(cipher_ctx
.current_src
== src_dma
);
1139 cipher_ctx
.consumed
+= desc_len
;
1140 cipher_ctx
.produced
= cipher_ctx
.blocklength
* (cipher_ctx
.consumed
/ cipher_ctx
.blocklength
);
1141 if (cipher_ctx
.cbcmode
&& !(cipher_ctx
.tcfg
->flags
& CRYPTOCOP_EXPLICIT_IV
) && cipher_ctx
.produced
){
1142 cipher_ctx
.produced
-= cipher_ctx
.blocklength
; /* Compensate for CBC iv. */
1144 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx
.consumed
, cipher_ctx
.produced
, cipher_ctx
.blocklength
));
1147 /* Setup the DMA out descriptors. */
1148 /* Configure the metadata. */
1150 eop_needed_count
= 0;
1151 if (cipher_ctx
.active
) {
1153 if (cipher_ctx
.unit_no
== src_dma
){
1155 meta_out
.ciphsel
= src_none
;
1157 meta_out
.ciphsel
= cipher_ctx
.current_src
;
1159 meta_out
.ciphconf
= cipher_ctx
.ciph_conf
;
1160 meta_out
.cbcmode
= cipher_ctx
.cbcmode
;
1161 meta_out
.decrypt
= cipher_ctx
.decrypt
;
1162 DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out
.ciphsel
, meta_out
.ciphconf
, meta_out
.cbcmode
, meta_out
.decrypt
));
1163 if (cipher_ctx
.done
) ++eop_needed_count
;
1165 meta_out
.ciphsel
= src_none
;
1168 if (digest_ctx
.active
) {
1170 meta_out
.hashsel
= digest_ctx
.current_src
;
1171 meta_out
.hashconf
= digest_ctx
.hash_conf
;
1172 meta_out
.hashmode
= 0; /* Explicit mode is not used here. */
1173 DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out
.hashsel
, meta_out
.hashconf
, meta_out
.hashmode
));
1174 if (digest_ctx
.done
) {
1175 assert(digest_ctx
.pad_descs
== NULL
);
1176 failed
= create_pad_descriptor(&digest_ctx
, &digest_ctx
.pad_descs
, alloc_flag
);
1178 DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n"));
1183 meta_out
.hashsel
= src_none
;
1186 if (csum_ctx
.active
) {
1188 meta_out
.csumsel
= csum_ctx
.current_src
;
1189 if (csum_ctx
.done
) {
1190 assert(csum_ctx
.pad_descs
== NULL
);
1191 failed
= create_pad_descriptor(&csum_ctx
, &csum_ctx
.pad_descs
, alloc_flag
);
1193 DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n"));
1198 meta_out
.csumsel
= src_none
;
1200 DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count
, active_count
));
1201 /* Setup DMA out descriptors for the indata. */
1202 failed
= create_output_descriptors(operation
, &iniov_ix
, &iniov_offset
, desc_len
, ¤t_out_cdesc
, &meta_out
, alloc_flag
);
1204 DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed
));
1207 /* Setup out EOP. If there are active units that are not done here they cannot get an EOP
1208 * so we ust setup a zero length descriptor to DMA to signal EOP only to done units.
1209 * If there is a pad descriptor EOP for the padded unit will be EOPed by it.
1211 assert(active_count
>= eop_needed_count
);
1212 assert((eop_needed_count
== 0) || (eop_needed_count
== 1));
1213 if (eop_needed_count
) {
1214 /* This means that the bulk operation (cipher/m2m) is terminated. */
1215 if (active_count
> 1) {
1216 /* Use zero length EOP descriptor. */
1217 struct cryptocop_dma_desc
*ed
= alloc_cdesc(alloc_flag
);
1218 struct strcop_meta_out ed_mo
= {0};
1220 DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n"));
1225 assert(cipher_ctx
.active
&& cipher_ctx
.done
);
1227 if (cipher_ctx
.unit_no
== src_dma
){
1229 ed_mo
.ciphsel
= src_none
;
1231 ed_mo
.ciphsel
= cipher_ctx
.current_src
;
1233 ed_mo
.ciphconf
= cipher_ctx
.ciph_conf
;
1234 ed_mo
.cbcmode
= cipher_ctx
.cbcmode
;
1235 ed_mo
.decrypt
= cipher_ctx
.decrypt
;
1237 ed
->free_buf
= NULL
;
1238 ed
->dma_descr
->wait
= 1;
1239 ed
->dma_descr
->out_eop
= 1;
1241 ed
->dma_descr
->buf
= (char*)virt_to_phys(&ed
); /* Use any valid physical address for zero length descriptor. */
1242 ed
->dma_descr
->after
= ed
->dma_descr
->buf
;
1243 ed
->dma_descr
->md
= REG_TYPE_CONV(unsigned short int, struct strcop_meta_out
, ed_mo
);
1244 current_out_cdesc
->next
= ed
;
1245 current_out_cdesc
= ed
;
1247 /* Set EOP in the current out descriptor since the only active module is
1248 * the one needing the EOP. */
1250 current_out_cdesc
->dma_descr
->out_eop
= 1;
1254 if (cipher_ctx
.done
&& cipher_ctx
.active
) cipher_ctx
.active
= 0;
1255 if (digest_ctx
.done
&& digest_ctx
.active
) digest_ctx
.active
= 0;
1256 if (csum_ctx
.done
&& csum_ctx
.active
) csum_ctx
.active
= 0;
1257 indata_ix
+= odsc
->length
;
1259 } /* while (odsc) */ /* Process descriptors. */
1260 DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n"));
1261 if (cipher_ctx
.tcfg
&& (cipher_ctx
.active
|| !cipher_ctx
.done
)){
1262 DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n"));
1266 if (digest_ctx
.tcfg
&& (digest_ctx
.active
|| !digest_ctx
.done
)){
1267 DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n"));
1271 if (csum_ctx
.tcfg
&& (csum_ctx
.active
|| !csum_ctx
.done
)){
1272 DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n"));
1277 failed
= append_input_descriptors(operation
, ¤t_in_cdesc
, ¤t_out_cdesc
, &cipher_ctx
, alloc_flag
);
1279 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed
));
1282 failed
= append_input_descriptors(operation
, ¤t_in_cdesc
, ¤t_out_cdesc
, &digest_ctx
, alloc_flag
);
1284 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed
));
1287 failed
= append_input_descriptors(operation
, ¤t_in_cdesc
, ¤t_out_cdesc
, &csum_ctx
, alloc_flag
);
1289 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed
));
1293 DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op
, *int_op
));
1294 (*int_op
)->cdesc_out
= out_cdesc_head
.next
;
1295 (*int_op
)->cdesc_in
= in_cdesc_head
.next
;
1296 DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op
)->cdesc_out
, (*int_op
)->cdesc_in
));
1298 setup_descr_chain(out_cdesc_head
.next
);
1299 setup_descr_chain(in_cdesc_head
.next
);
1301 /* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the
1302 * last DMA out descriptor for EOL.
1304 current_in_cdesc
->dma_descr
->intr
= 1;
1305 current_in_cdesc
->dma_descr
->eol
= 1;
1306 current_out_cdesc
->dma_descr
->eol
= 1;
1308 /* Setup DMA contexts. */
1309 (*int_op
)->ctx_out
.next
= NULL
;
1310 (*int_op
)->ctx_out
.eol
= 1;
1311 (*int_op
)->ctx_out
.intr
= 0;
1312 (*int_op
)->ctx_out
.store_mode
= 0;
1313 (*int_op
)->ctx_out
.en
= 0;
1314 (*int_op
)->ctx_out
.dis
= 0;
1315 (*int_op
)->ctx_out
.md0
= 0;
1316 (*int_op
)->ctx_out
.md1
= 0;
1317 (*int_op
)->ctx_out
.md2
= 0;
1318 (*int_op
)->ctx_out
.md3
= 0;
1319 (*int_op
)->ctx_out
.md4
= 0;
1320 (*int_op
)->ctx_out
.saved_data
= (dma_descr_data
*)virt_to_phys((*int_op
)->cdesc_out
->dma_descr
);
1321 (*int_op
)->ctx_out
.saved_data_buf
= (*int_op
)->cdesc_out
->dma_descr
->buf
; /* Already physical address. */
1323 (*int_op
)->ctx_in
.next
= NULL
;
1324 (*int_op
)->ctx_in
.eol
= 1;
1325 (*int_op
)->ctx_in
.intr
= 0;
1326 (*int_op
)->ctx_in
.store_mode
= 0;
1327 (*int_op
)->ctx_in
.en
= 0;
1328 (*int_op
)->ctx_in
.dis
= 0;
1329 (*int_op
)->ctx_in
.md0
= 0;
1330 (*int_op
)->ctx_in
.md1
= 0;
1331 (*int_op
)->ctx_in
.md2
= 0;
1332 (*int_op
)->ctx_in
.md3
= 0;
1333 (*int_op
)->ctx_in
.md4
= 0;
1335 (*int_op
)->ctx_in
.saved_data
= (dma_descr_data
*)virt_to_phys((*int_op
)->cdesc_in
->dma_descr
);
1336 (*int_op
)->ctx_in
.saved_data_buf
= (*int_op
)->cdesc_in
->dma_descr
->buf
; /* Already physical address. */
1338 DEBUG(printk("cryptocop_setup_dma_list: done\n"));
1343 /* Free all allocated resources. */
1344 struct cryptocop_dma_desc
*tmp_cdesc
;
1345 while (digest_ctx
.pad_descs
){
1346 tmp_cdesc
= digest_ctx
.pad_descs
->next
;
1347 free_cdesc(digest_ctx
.pad_descs
);
1348 digest_ctx
.pad_descs
= tmp_cdesc
;
1350 while (csum_ctx
.pad_descs
){
1351 tmp_cdesc
= csum_ctx
.pad_descs
->next
;
1352 free_cdesc(csum_ctx
.pad_descs
);
1353 csum_ctx
.pad_descs
= tmp_cdesc
;
1355 assert(cipher_ctx
.pad_descs
== NULL
); /* The ciphers are never padded. */
1357 if (*int_op
!= NULL
) delete_internal_operation(*int_op
);
1359 DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed
));
1364 static void delete_internal_operation(struct cryptocop_int_operation
*iop
)
1366 void *ptr
= iop
->alloc_ptr
;
1367 struct cryptocop_dma_desc
*cd
= iop
->cdesc_out
;
1368 struct cryptocop_dma_desc
*next
;
1370 DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop
, ptr
));
1386 #define MD5_MIN_PAD_LENGTH (9)
1387 #define MD5_PAD_LENGTH_FIELD_LENGTH (8)
1389 static int create_md5_pad(int alloc_flag
, unsigned long long hashed_length
, char **pad
, size_t *pad_length
)
1391 size_t padlen
= MD5_BLOCK_LENGTH
- (hashed_length
% MD5_BLOCK_LENGTH
);
1394 unsigned long long int bit_length
= hashed_length
<< 3;
1396 if (padlen
< MD5_MIN_PAD_LENGTH
) padlen
+= MD5_BLOCK_LENGTH
;
1398 p
= kzalloc(padlen
, alloc_flag
);
1399 if (!p
) return -ENOMEM
;
1403 DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length
, hashed_length
));
1405 i
= padlen
- MD5_PAD_LENGTH_FIELD_LENGTH
;
1406 while (bit_length
!= 0){
1407 p
[i
++] = bit_length
% 0x100;
1412 *pad_length
= padlen
;
1417 #define SHA1_MIN_PAD_LENGTH (9)
1418 #define SHA1_PAD_LENGTH_FIELD_LENGTH (8)
1420 static int create_sha1_pad(int alloc_flag
, unsigned long long hashed_length
, char **pad
, size_t *pad_length
)
1422 size_t padlen
= SHA1_BLOCK_LENGTH
- (hashed_length
% SHA1_BLOCK_LENGTH
);
1425 unsigned long long int bit_length
= hashed_length
<< 3;
1427 if (padlen
< SHA1_MIN_PAD_LENGTH
) padlen
+= SHA1_BLOCK_LENGTH
;
1429 p
= kzalloc(padlen
, alloc_flag
);
1430 if (!p
) return -ENOMEM
;
1434 DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length
, hashed_length
));
1437 while (bit_length
!= 0){
1438 p
[i
--] = bit_length
% 0x100;
1443 *pad_length
= padlen
;
1449 static int transform_ok(struct cryptocop_transform_init
*tinit
)
1451 switch (tinit
->alg
){
1452 case cryptocop_alg_csum
:
1453 switch (tinit
->csum_mode
){
1454 case cryptocop_csum_le
:
1455 case cryptocop_csum_be
:
1458 DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n"));
1461 case cryptocop_alg_mem2mem
:
1462 case cryptocop_alg_md5
:
1463 case cryptocop_alg_sha1
:
1464 if (tinit
->keylen
!= 0) {
1465 DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit
->keylen
));
1466 return -EINVAL
; /* This check is a bit strict. */
1469 case cryptocop_alg_des
:
1470 if (tinit
->keylen
!= 64) {
1471 DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit
->keylen
));
1475 case cryptocop_alg_3des
:
1476 if (tinit
->keylen
!= 192) {
1477 DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit
->keylen
));
1481 case cryptocop_alg_aes
:
1482 if (tinit
->keylen
!= 128 && tinit
->keylen
!= 192 && tinit
->keylen
!= 256) {
1483 DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit
->keylen
));
1487 case cryptocop_no_alg
:
1489 DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit
->alg
));
1493 switch (tinit
->alg
){
1494 case cryptocop_alg_des
:
1495 case cryptocop_alg_3des
:
1496 case cryptocop_alg_aes
:
1497 if (tinit
->cipher_mode
!= cryptocop_cipher_mode_ecb
&& tinit
->cipher_mode
!= cryptocop_cipher_mode_cbc
) return -EINVAL
;
1505 int cryptocop_new_session(cryptocop_session_id
*sid
, struct cryptocop_transform_init
*tinit
, int alloc_flag
)
1507 struct cryptocop_session
*sess
;
1508 struct cryptocop_transform_init
*tfrm_in
= tinit
;
1509 struct cryptocop_transform_init
*tmp_in
;
1512 unsigned long int flags
;
1514 init_stream_coprocessor(); /* For safety if we are called early */
1519 if ((err
= transform_ok(tfrm_in
))) {
1520 DEBUG_API(printk("cryptocop_new_session, bad transform\n"));
1523 tfrm_in
= tfrm_in
->next
;
1525 if (0 == no_tfrms
) {
1526 DEBUG_API(printk("cryptocop_new_session, no transforms specified\n"));
1530 sess
= kmalloc(sizeof(struct cryptocop_session
), alloc_flag
);
1532 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n"));
1536 sess
->tfrm_ctx
= kmalloc(no_tfrms
* sizeof(struct cryptocop_transform_ctx
), alloc_flag
);
1537 if (!sess
->tfrm_ctx
) {
1538 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n"));
1544 for (i
= 0; i
< no_tfrms
; i
++){
1545 tmp_in
= tfrm_in
->next
;
1547 if (tmp_in
->tid
== tfrm_in
->tid
) {
1548 DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n"));
1549 kfree(sess
->tfrm_ctx
);
1553 tmp_in
= tmp_in
->next
;
1555 memcpy(&sess
->tfrm_ctx
[i
].init
, tfrm_in
, sizeof(struct cryptocop_transform_init
));
1556 sess
->tfrm_ctx
[i
].dec_key_set
= 0;
1557 sess
->tfrm_ctx
[i
].next
= &sess
->tfrm_ctx
[i
] + 1;
1559 tfrm_in
= tfrm_in
->next
;
1561 sess
->tfrm_ctx
[i
-1].next
= NULL
;
1563 spin_lock_irqsave(&cryptocop_sessions_lock
, flags
);
1564 sess
->sid
= next_sid
;
1566 /* TODO If we are really paranoid we should do duplicate check to handle sid wraparound.
1567 * OTOH 2^64 is a really large number of session. */
1568 if (next_sid
== 0) next_sid
= 1;
1570 /* Prepend to session list. */
1571 sess
->next
= cryptocop_sessions
;
1572 cryptocop_sessions
= sess
;
1573 spin_unlock_irqrestore(&cryptocop_sessions_lock
, flags
);
1579 int cryptocop_free_session(cryptocop_session_id sid
)
1581 struct cryptocop_transform_ctx
*tc
;
1582 struct cryptocop_session
*sess
= NULL
;
1583 struct cryptocop_session
*psess
= NULL
;
1584 unsigned long int flags
;
1586 LIST_HEAD(remove_list
);
1587 struct list_head
*node
, *tmp
;
1588 struct cryptocop_prio_job
*pj
;
1590 DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid
));
1592 spin_lock_irqsave(&cryptocop_sessions_lock
, flags
);
1593 sess
= cryptocop_sessions
;
1594 while (sess
&& sess
->sid
!= sid
){
1600 psess
->next
= sess
->next
;
1602 cryptocop_sessions
= sess
->next
;
1605 spin_unlock_irqrestore(&cryptocop_sessions_lock
, flags
);
1607 if (!sess
) return -EINVAL
;
1609 /* Remove queued jobs. */
1610 spin_lock_irqsave(&cryptocop_job_queue_lock
, flags
);
1612 for (i
= 0; i
< cryptocop_prio_no_prios
; i
++){
1613 if (!list_empty(&(cryptocop_job_queues
[i
].jobs
))){
1614 list_for_each_safe(node
, tmp
, &(cryptocop_job_queues
[i
].jobs
)) {
1615 pj
= list_entry(node
, struct cryptocop_prio_job
, node
);
1616 if (pj
->oper
->sid
== sid
) {
1617 list_move_tail(node
, &remove_list
);
1622 spin_unlock_irqrestore(&cryptocop_job_queue_lock
, flags
);
1624 list_for_each_safe(node
, tmp
, &remove_list
) {
1626 pj
= list_entry(node
, struct cryptocop_prio_job
, node
);
1627 pj
->oper
->operation_status
= -EAGAIN
; /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */
1628 DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj
, pj
->oper
, pj
->iop
));
1629 pj
->oper
->cb(pj
->oper
, pj
->oper
->cb_data
);
1630 delete_internal_operation(pj
->iop
);
1634 tc
= sess
->tfrm_ctx
;
1635 /* Erase keying data. */
1637 DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc
->init
.tid
));
1638 memset(tc
->init
.key
, 0xff, CRYPTOCOP_MAX_KEY_LENGTH
);
1639 memset(tc
->dec_key
, 0xff, CRYPTOCOP_MAX_KEY_LENGTH
);
1642 kfree(sess
->tfrm_ctx
);
1648 static struct cryptocop_session
*get_session(cryptocop_session_id sid
)
1650 struct cryptocop_session
*sess
;
1651 unsigned long int flags
;
1653 spin_lock_irqsave(&cryptocop_sessions_lock
, flags
);
1654 sess
= cryptocop_sessions
;
1655 while (sess
&& (sess
->sid
!= sid
)){
1658 spin_unlock_irqrestore(&cryptocop_sessions_lock
, flags
);
1663 static struct cryptocop_transform_ctx
*get_transform_ctx(struct cryptocop_session
*sess
, cryptocop_tfrm_id tid
)
1665 struct cryptocop_transform_ctx
*tc
= sess
->tfrm_ctx
;
1667 DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess
, tid
));
1668 assert(sess
!= NULL
);
1669 while (tc
&& tc
->init
.tid
!= tid
){
1670 DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc
, tc
->next
));
1673 DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc
));
1679 /* The AES s-transform matrix (s-box). */
1680 static const u8 aes_sbox
[256] = {
1681 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118,
1682 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192,
1683 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21,
1684 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117,
1685 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132,
1686 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207,
1687 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
1688 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210,
1689 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115,
1690 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219,
1691 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121,
1692 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8,
1693 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138,
1694 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
1695 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
1696 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22
1699 /* AES has a 32 bit word round constants for each round in the
1700 * key schedule. round_constant[i] is really Rcon[i+1] in FIPS187.
1702 static u32 round_constant
[11] = {
1703 0x01000000, 0x02000000, 0x04000000, 0x08000000,
1704 0x10000000, 0x20000000, 0x40000000, 0x80000000,
1705 0x1B000000, 0x36000000, 0x6C000000
1708 /* Apply the s-box to each of the four occtets in w. */
1709 static u32
aes_ks_subword(const u32 w
)
1713 *(u32
*)(&bytes
[0]) = w
;
1714 bytes
[0] = aes_sbox
[bytes
[0]];
1715 bytes
[1] = aes_sbox
[bytes
[1]];
1716 bytes
[2] = aes_sbox
[bytes
[2]];
1717 bytes
[3] = aes_sbox
[bytes
[3]];
1718 return *(u32
*)(&bytes
[0]);
1721 /* The encrypt (forward) Rijndael key schedule algorithm pseudo code:
1722 * (Note that AES words are 32 bit long)
1724 * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){
1728 * w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3])
1733 * while (i < (Nb * (Nr + 1))) {
1735 * if ((i mod Nk) == 0) {
1736 * temp = SubWord(RotWord(temp)) xor Rcon[i/Nk]
1738 * else if ((Nk > 6) && ((i mod Nk) == 4)) {
1739 * temp = SubWord(temp)
1741 * w[i] = w[i - Nk] xor temp
1743 * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word.
1744 * SubWord(t) applies the AES s-box individually to each octet
1747 * For AES Nk can have the values 4, 6, and 8 (corresponding to
1748 * values for Nr of 10, 12, and 14). Nb is always 4.
1750 * To construct w[i], w[i - 1] and w[i - Nk] must be
1751 * available. Consequently we must keep a state of the last Nk words
1752 * to be able to create the last round keys.
1754 static void get_aes_decrypt_key(unsigned char *dec_key
, const unsigned char *key
, unsigned int keylength
)
1757 u32 w_ring
[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */
1776 panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n");
1779 /* Need to do host byte order correction here since key is byte oriented and the
1780 * kx algorithm is word (u32) oriented. */
1781 for (i
= 0; i
< nk
; i
+=1) {
1782 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4*i
]);
1787 while (i
< (4 * (nr
+ 2))) {
1788 temp
= w_ring
[w_last_ix
];
1791 temp
= (temp
<< 8) | (temp
>> 24);
1792 temp
= aes_ks_subword(temp
);
1793 temp
^= round_constant
[i
/nk
- 1];
1794 } else if ((nk
> 6) && ((i
% nk
) == 4)) {
1795 temp
= aes_ks_subword(temp
);
1797 w_last_ix
= (w_last_ix
+ 1) % nk
; /* This is the same as (i-Nk) mod Nk */
1798 temp
^= w_ring
[w_last_ix
];
1799 w_ring
[w_last_ix
] = temp
;
1801 /* We need the round keys for round Nr+1 and Nr+2 (round key
1802 * Nr+2 is the round key beyond the last one used when
1803 * encrypting). Rounds are numbered starting from 0, Nr=10
1804 * implies 11 rounds are used in encryption/decryption.
1806 if (i
>= (4 * nr
)) {
1807 /* Need to do host byte order correction here, the key
1808 * is byte oriented. */
1809 *(u32
*)dec_key
= cpu_to_be32(temp
);
1817 /**** Job/operation management. ****/
1819 int cryptocop_job_queue_insert_csum(struct cryptocop_operation
*operation
)
1821 return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum
, operation
);
1824 int cryptocop_job_queue_insert_crypto(struct cryptocop_operation
*operation
)
1826 return cryptocop_job_queue_insert(cryptocop_prio_kernel
, operation
);
1829 int cryptocop_job_queue_insert_user_job(struct cryptocop_operation
*operation
)
1831 return cryptocop_job_queue_insert(cryptocop_prio_user
, operation
);
1834 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio
, struct cryptocop_operation
*operation
)
1837 struct cryptocop_prio_job
*pj
= NULL
;
1838 unsigned long int flags
;
1840 DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio
, operation
));
1842 if (!operation
|| !operation
->cb
){
1843 DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation
));
1847 if ((ret
= cryptocop_job_setup(&pj
, operation
)) != 0){
1848 DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n"));
1853 spin_lock_irqsave(&cryptocop_job_queue_lock
, flags
);
1854 list_add_tail(&pj
->node
, &cryptocop_job_queues
[prio
].jobs
);
1855 spin_unlock_irqrestore(&cryptocop_job_queue_lock
, flags
);
1857 /* Make sure a job is running */
1858 cryptocop_start_job();
1862 static void cryptocop_do_tasklet(unsigned long unused
);
1863 DECLARE_TASKLET (cryptocop_tasklet
, cryptocop_do_tasklet
, 0);
1865 static void cryptocop_do_tasklet(unsigned long unused
)
1867 struct list_head
*node
;
1868 struct cryptocop_prio_job
*pj
= NULL
;
1869 unsigned long flags
;
1871 DEBUG(printk("cryptocop_do_tasklet: entering\n"));
1874 spin_lock_irqsave(&cryptocop_completed_jobs_lock
, flags
);
1875 if (!list_empty(&cryptocop_completed_jobs
)){
1876 node
= cryptocop_completed_jobs
.next
;
1878 pj
= list_entry(node
, struct cryptocop_prio_job
, node
);
1882 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock
, flags
);
1884 assert(pj
->oper
!= NULL
);
1886 /* Notify consumer of operation completeness. */
1887 DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj
->oper
->cb
, pj
->oper
->cb_data
));
1889 pj
->oper
->operation_status
= 0; /* Job is completed. */
1890 pj
->oper
->cb(pj
->oper
, pj
->oper
->cb_data
);
1891 delete_internal_operation(pj
->iop
);
1894 } while (pj
!= NULL
);
1896 DEBUG(printk("cryptocop_do_tasklet: exiting\n"));
1900 dma_done_interrupt(int irq
, void *dev_id
)
1902 struct cryptocop_prio_job
*done_job
;
1903 reg_dma_rw_ack_intr ack_intr
= {
1907 REG_WR(dma
, IN_DMA_INST
, rw_ack_intr
, ack_intr
);
1909 DEBUG(printk("cryptocop DMA done\n"));
1911 spin_lock(&running_job_lock
);
1912 if (cryptocop_running_job
== NULL
){
1913 printk("stream co-processor got interrupt when not busy\n");
1914 spin_unlock(&running_job_lock
);
1917 done_job
= cryptocop_running_job
;
1918 cryptocop_running_job
= NULL
;
1919 spin_unlock(&running_job_lock
);
1921 /* Start processing a job. */
1922 if (!spin_trylock(&cryptocop_process_lock
)){
1923 DEBUG(printk("cryptocop irq handler, not starting a job\n"));
1925 cryptocop_start_job();
1926 spin_unlock(&cryptocop_process_lock
);
1929 done_job
->oper
->operation_status
= 0; /* Job is completed. */
1930 if (done_job
->oper
->fast_callback
){
1931 /* This operation wants callback from interrupt. */
1932 done_job
->oper
->cb(done_job
->oper
, done_job
->oper
->cb_data
);
1933 delete_internal_operation(done_job
->iop
);
1936 spin_lock(&cryptocop_completed_jobs_lock
);
1937 list_add_tail(&(done_job
->node
), &cryptocop_completed_jobs
);
1938 spin_unlock(&cryptocop_completed_jobs_lock
);
1939 tasklet_schedule(&cryptocop_tasklet
);
1942 DEBUG(printk("cryptocop leave irq handler\n"));
1947 /* Setup interrupts and DMA channels. */
1948 static int init_cryptocop(void)
1950 unsigned long flags
;
1951 reg_dma_rw_cfg dma_cfg
= {.en
= 1};
1952 reg_dma_rw_intr_mask intr_mask_in
= {.data
= regk_dma_yes
}; /* Only want descriptor interrupts from the DMA in channel. */
1953 reg_dma_rw_ack_intr ack_intr
= {.data
= 1,.in_eop
= 1 };
1954 reg_strcop_rw_cfg strcop_cfg
= {
1955 .ipend
= regk_strcop_little
,
1956 .td1
= regk_strcop_e
,
1957 .td2
= regk_strcop_d
,
1958 .td3
= regk_strcop_e
,
1963 if (request_irq(DMA_IRQ
, dma_done_interrupt
, 0,
1964 "stream co-processor DMA", NULL
))
1965 panic("request_irq stream co-processor irq dma9");
1967 (void)crisv32_request_dma(OUT_DMA
, "strcop", DMA_PANIC_ON_ERROR
,
1969 (void)crisv32_request_dma(IN_DMA
, "strcop", DMA_PANIC_ON_ERROR
,
1972 local_irq_save(flags
);
1974 /* Reset and enable the cryptocop. */
1976 REG_WR(strcop
, regi_strcop
, rw_cfg
, strcop_cfg
);
1978 REG_WR(strcop
, regi_strcop
, rw_cfg
, strcop_cfg
);
1981 REG_WR(dma
, IN_DMA_INST
, rw_cfg
, dma_cfg
); /* input DMA */
1982 REG_WR(dma
, OUT_DMA_INST
, rw_cfg
, dma_cfg
); /* output DMA */
1984 /* Set up wordsize = 4 for DMAs. */
1985 DMA_WR_CMD(OUT_DMA_INST
, regk_dma_set_w_size4
);
1986 DMA_WR_CMD(IN_DMA_INST
, regk_dma_set_w_size4
);
1988 /* Enable interrupts. */
1989 REG_WR(dma
, IN_DMA_INST
, rw_intr_mask
, intr_mask_in
);
1991 /* Clear intr ack. */
1992 REG_WR(dma
, IN_DMA_INST
, rw_ack_intr
, ack_intr
);
1994 local_irq_restore(flags
);
1999 /* Free used cryptocop hw resources (interrupt and DMA channels). */
2000 static void release_cryptocop(void)
2002 unsigned long flags
;
2003 reg_dma_rw_cfg dma_cfg
= {.en
= 0};
2004 reg_dma_rw_intr_mask intr_mask_in
= {0};
2005 reg_dma_rw_ack_intr ack_intr
= {.data
= 1,.in_eop
= 1 };
2007 local_irq_save(flags
);
2009 /* Clear intr ack. */
2010 REG_WR(dma
, IN_DMA_INST
, rw_ack_intr
, ack_intr
);
2013 REG_WR(dma
, IN_DMA_INST
, rw_cfg
, dma_cfg
); /* input DMA */
2014 REG_WR(dma
, OUT_DMA_INST
, rw_cfg
, dma_cfg
); /* output DMA */
2016 /* Disable interrupts. */
2017 REG_WR(dma
, IN_DMA_INST
, rw_intr_mask
, intr_mask_in
);
2019 local_irq_restore(flags
);
2021 free_irq(DMA_IRQ
, NULL
);
2023 (void)crisv32_free_dma(OUT_DMA
);
2024 (void)crisv32_free_dma(IN_DMA
);
2028 /* Init job queue. */
2029 static int cryptocop_job_queue_init(void)
2033 INIT_LIST_HEAD(&cryptocop_completed_jobs
);
2035 for (i
= 0; i
< cryptocop_prio_no_prios
; i
++){
2036 cryptocop_job_queues
[i
].prio
= (cryptocop_queue_priority
)i
;
2037 INIT_LIST_HEAD(&cryptocop_job_queues
[i
].jobs
);
2043 static void cryptocop_job_queue_close(void)
2045 struct list_head
*node
, *tmp
;
2046 struct cryptocop_prio_job
*pj
= NULL
;
2047 unsigned long int process_flags
, flags
;
2050 /* FIXME: This is as yet untested code. */
2052 /* Stop strcop from getting an operation to process while we are closing the
2054 spin_lock_irqsave(&cryptocop_process_lock
, process_flags
);
2056 /* Empty the job queue. */
2057 for (i
= 0; i
< cryptocop_prio_no_prios
; i
++){
2058 if (!list_empty(&(cryptocop_job_queues
[i
].jobs
))){
2059 list_for_each_safe(node
, tmp
, &(cryptocop_job_queues
[i
].jobs
)) {
2060 pj
= list_entry(node
, struct cryptocop_prio_job
, node
);
2063 /* Call callback to notify consumer of job removal. */
2064 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj
->oper
->cb
, pj
->oper
->cb_data
));
2065 pj
->oper
->operation_status
= -EINTR
; /* Job is terminated without completion. */
2066 pj
->oper
->cb(pj
->oper
, pj
->oper
->cb_data
);
2068 delete_internal_operation(pj
->iop
);
2073 spin_unlock_irqrestore(&cryptocop_process_lock
, process_flags
);
2075 /* Remove the running job, if any. */
2076 spin_lock_irqsave(&running_job_lock
, flags
);
2077 if (cryptocop_running_job
){
2078 reg_strcop_rw_cfg rw_cfg
;
2079 reg_dma_rw_cfg dma_out_cfg
, dma_in_cfg
;
2082 dma_out_cfg
= REG_RD(dma
, OUT_DMA_INST
, rw_cfg
);
2083 dma_out_cfg
.en
= regk_dma_no
;
2084 REG_WR(dma
, OUT_DMA_INST
, rw_cfg
, dma_out_cfg
);
2086 dma_in_cfg
= REG_RD(dma
, IN_DMA_INST
, rw_cfg
);
2087 dma_in_cfg
.en
= regk_dma_no
;
2088 REG_WR(dma
, IN_DMA_INST
, rw_cfg
, dma_in_cfg
);
2090 /* Disable the cryptocop. */
2091 rw_cfg
= REG_RD(strcop
, regi_strcop
, rw_cfg
);
2093 REG_WR(strcop
, regi_strcop
, rw_cfg
, rw_cfg
);
2095 pj
= cryptocop_running_job
;
2096 cryptocop_running_job
= NULL
;
2098 /* Call callback to notify consumer of job removal. */
2099 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj
->oper
->cb
, pj
->oper
->cb_data
));
2100 pj
->oper
->operation_status
= -EINTR
; /* Job is terminated without completion. */
2101 pj
->oper
->cb(pj
->oper
, pj
->oper
->cb_data
);
2103 delete_internal_operation(pj
->iop
);
2106 spin_unlock_irqrestore(&running_job_lock
, flags
);
2108 /* Remove completed jobs, if any. */
2109 spin_lock_irqsave(&cryptocop_completed_jobs_lock
, flags
);
2111 list_for_each_safe(node
, tmp
, &cryptocop_completed_jobs
) {
2112 pj
= list_entry(node
, struct cryptocop_prio_job
, node
);
2114 /* Call callback to notify consumer of job removal. */
2115 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj
->oper
->cb
, pj
->oper
->cb_data
));
2116 pj
->oper
->operation_status
= -EINTR
; /* Job is terminated without completion. */
2117 pj
->oper
->cb(pj
->oper
, pj
->oper
->cb_data
);
2119 delete_internal_operation(pj
->iop
);
2122 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock
, flags
);
2126 static void cryptocop_start_job(void)
2129 struct cryptocop_prio_job
*pj
;
2130 unsigned long int flags
;
2131 unsigned long int running_job_flags
;
2132 reg_strcop_rw_cfg rw_cfg
= {.en
= 1, .ignore_sync
= 0};
2134 DEBUG(printk("cryptocop_start_job: entering\n"));
2136 spin_lock_irqsave(&running_job_lock
, running_job_flags
);
2137 if (cryptocop_running_job
!= NULL
){
2138 /* Already running. */
2139 DEBUG(printk("cryptocop_start_job: already running, exit\n"));
2140 spin_unlock_irqrestore(&running_job_lock
, running_job_flags
);
2143 spin_lock_irqsave(&cryptocop_job_queue_lock
, flags
);
2145 /* Check the queues in priority order. */
2146 for (i
= cryptocop_prio_kernel_csum
; (i
< cryptocop_prio_no_prios
) && list_empty(&cryptocop_job_queues
[i
].jobs
); i
++);
2147 if (i
== cryptocop_prio_no_prios
) {
2148 spin_unlock_irqrestore(&cryptocop_job_queue_lock
, flags
);
2149 spin_unlock_irqrestore(&running_job_lock
, running_job_flags
);
2150 DEBUG(printk("cryptocop_start_job: no jobs to run\n"));
2151 return; /* No jobs to run */
2153 DEBUG(printk("starting job for prio %d\n", i
));
2155 /* TODO: Do not starve lower priority jobs. Let in a lower
2156 * prio job for every N-th processed higher prio job or some
2157 * other scheduling policy. This could reasonably be
2158 * tweakable since the optimal balance would depend on the
2159 * type of load on the system. */
2161 /* Pull the DMA lists from the job and start the DMA client. */
2162 pj
= list_entry(cryptocop_job_queues
[i
].jobs
.next
, struct cryptocop_prio_job
, node
);
2163 list_del(&pj
->node
);
2164 spin_unlock_irqrestore(&cryptocop_job_queue_lock
, flags
);
2165 cryptocop_running_job
= pj
;
2167 /* Set config register (3DES and CSUM modes). */
2168 switch (pj
->iop
->tdes_mode
){
2169 case cryptocop_3des_eee
:
2170 rw_cfg
.td1
= regk_strcop_e
;
2171 rw_cfg
.td2
= regk_strcop_e
;
2172 rw_cfg
.td3
= regk_strcop_e
;
2174 case cryptocop_3des_eed
:
2175 rw_cfg
.td1
= regk_strcop_e
;
2176 rw_cfg
.td2
= regk_strcop_e
;
2177 rw_cfg
.td3
= regk_strcop_d
;
2179 case cryptocop_3des_ede
:
2180 rw_cfg
.td1
= regk_strcop_e
;
2181 rw_cfg
.td2
= regk_strcop_d
;
2182 rw_cfg
.td3
= regk_strcop_e
;
2184 case cryptocop_3des_edd
:
2185 rw_cfg
.td1
= regk_strcop_e
;
2186 rw_cfg
.td2
= regk_strcop_d
;
2187 rw_cfg
.td3
= regk_strcop_d
;
2189 case cryptocop_3des_dee
:
2190 rw_cfg
.td1
= regk_strcop_d
;
2191 rw_cfg
.td2
= regk_strcop_e
;
2192 rw_cfg
.td3
= regk_strcop_e
;
2194 case cryptocop_3des_ded
:
2195 rw_cfg
.td1
= regk_strcop_d
;
2196 rw_cfg
.td2
= regk_strcop_e
;
2197 rw_cfg
.td3
= regk_strcop_d
;
2199 case cryptocop_3des_dde
:
2200 rw_cfg
.td1
= regk_strcop_d
;
2201 rw_cfg
.td2
= regk_strcop_d
;
2202 rw_cfg
.td3
= regk_strcop_e
;
2204 case cryptocop_3des_ddd
:
2205 rw_cfg
.td1
= regk_strcop_d
;
2206 rw_cfg
.td2
= regk_strcop_d
;
2207 rw_cfg
.td3
= regk_strcop_d
;
2210 DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n"));
2212 switch (pj
->iop
->csum_mode
){
2213 case cryptocop_csum_le
:
2214 rw_cfg
.ipend
= regk_strcop_little
;
2216 case cryptocop_csum_be
:
2217 rw_cfg
.ipend
= regk_strcop_big
;
2220 DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n"));
2222 REG_WR(strcop
, regi_strcop
, rw_cfg
, rw_cfg
);
2224 DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n"
2225 "ctx_in: 0x%p, phys: 0x%p\n"
2226 "ctx_out: 0x%p, phys: 0x%p\n",
2228 &pj
->iop
->ctx_in
, (char*)virt_to_phys(&pj
->iop
->ctx_in
),
2229 &pj
->iop
->ctx_out
, (char*)virt_to_phys(&pj
->iop
->ctx_out
)));
2231 /* Start input DMA. */
2232 flush_dma_context(&pj
->iop
->ctx_in
);
2233 DMA_START_CONTEXT(IN_DMA_INST
, virt_to_phys(&pj
->iop
->ctx_in
));
2235 /* Start output DMA. */
2236 DMA_START_CONTEXT(OUT_DMA_INST
, virt_to_phys(&pj
->iop
->ctx_out
));
2238 spin_unlock_irqrestore(&running_job_lock
, running_job_flags
);
2239 DEBUG(printk("cryptocop_start_job: exiting\n"));
2243 static int cryptocop_job_setup(struct cryptocop_prio_job
**pj
, struct cryptocop_operation
*operation
)
2246 int alloc_flag
= operation
->in_interrupt
? GFP_ATOMIC
: GFP_KERNEL
;
2247 void *iop_alloc_ptr
= NULL
;
2249 *pj
= kmalloc(sizeof (struct cryptocop_prio_job
), alloc_flag
);
2250 if (!*pj
) return -ENOMEM
;
2252 DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation
));
2254 (*pj
)->oper
= operation
;
2255 DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n", (*pj
)->oper
->cb
, (*pj
)->oper
->cb_data
));
2257 if (operation
->use_dmalists
) {
2258 DEBUG(print_user_dma_lists(&operation
->list_op
));
2259 if (!operation
->list_op
.inlist
|| !operation
->list_op
.outlist
|| !operation
->list_op
.out_data_buf
|| !operation
->list_op
.in_data_buf
){
2260 DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n"));
2264 iop_alloc_ptr
= kmalloc(DESCR_ALLOC_PAD
+ sizeof(struct cryptocop_int_operation
), alloc_flag
);
2265 if (!iop_alloc_ptr
) {
2266 DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n"));
2270 (*pj
)->iop
= (struct cryptocop_int_operation
*)(((unsigned long int)(iop_alloc_ptr
+ DESCR_ALLOC_PAD
+ offsetof(struct cryptocop_int_operation
, ctx_out
)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation
, ctx_out
));
2271 DEBUG(memset((*pj
)->iop
, 0xff, sizeof(struct cryptocop_int_operation
)));
2272 (*pj
)->iop
->alloc_ptr
= iop_alloc_ptr
;
2273 (*pj
)->iop
->sid
= operation
->sid
;
2274 (*pj
)->iop
->cdesc_out
= NULL
;
2275 (*pj
)->iop
->cdesc_in
= NULL
;
2276 (*pj
)->iop
->tdes_mode
= operation
->list_op
.tdes_mode
;
2277 (*pj
)->iop
->csum_mode
= operation
->list_op
.csum_mode
;
2278 (*pj
)->iop
->ddesc_out
= operation
->list_op
.outlist
;
2279 (*pj
)->iop
->ddesc_in
= operation
->list_op
.inlist
;
2281 /* Setup DMA contexts. */
2282 (*pj
)->iop
->ctx_out
.next
= NULL
;
2283 (*pj
)->iop
->ctx_out
.eol
= 1;
2284 (*pj
)->iop
->ctx_out
.saved_data
= operation
->list_op
.outlist
;
2285 (*pj
)->iop
->ctx_out
.saved_data_buf
= operation
->list_op
.out_data_buf
;
2287 (*pj
)->iop
->ctx_in
.next
= NULL
;
2288 (*pj
)->iop
->ctx_in
.eol
= 1;
2289 (*pj
)->iop
->ctx_in
.saved_data
= operation
->list_op
.inlist
;
2290 (*pj
)->iop
->ctx_in
.saved_data_buf
= operation
->list_op
.in_data_buf
;
2292 if ((err
= cryptocop_setup_dma_list(operation
, &(*pj
)->iop
, alloc_flag
))) {
2293 DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err
));
2298 DEBUG(print_dma_descriptors((*pj
)->iop
));
2300 DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n"));
2305 static int cryptocop_open(struct inode
*inode
, struct file
*filp
)
2307 int p
= iminor(inode
);
2309 if (p
!= CRYPTOCOP_MINOR
) return -EINVAL
;
2311 filp
->private_data
= NULL
;
2316 static int cryptocop_release(struct inode
*inode
, struct file
*filp
)
2318 struct cryptocop_private
*dev
= filp
->private_data
;
2319 struct cryptocop_private
*dev_next
;
2322 dev_next
= dev
->next
;
2323 if (dev
->sid
!= CRYPTOCOP_SESSION_ID_NONE
) {
2324 (void)cryptocop_free_session(dev
->sid
);
2334 static int cryptocop_ioctl_close_session(struct inode
*inode
, struct file
*filp
,
2335 unsigned int cmd
, unsigned long arg
)
2337 struct cryptocop_private
*dev
= filp
->private_data
;
2338 struct cryptocop_private
*prev_dev
= NULL
;
2339 struct strcop_session_op
*sess_op
= (struct strcop_session_op
*)arg
;
2340 struct strcop_session_op sop
;
2343 DEBUG(printk("cryptocop_ioctl_close_session\n"));
2345 if (!access_ok(VERIFY_READ
, sess_op
, sizeof(struct strcop_session_op
)))
2347 err
= copy_from_user(&sop
, sess_op
, sizeof(struct strcop_session_op
));
2348 if (err
) return -EFAULT
;
2350 while (dev
&& (dev
->sid
!= sop
.ses_id
)) {
2356 prev_dev
->next
= dev
->next
;
2358 filp
->private_data
= dev
->next
;
2360 err
= cryptocop_free_session(dev
->sid
);
2361 if (err
) return -EFAULT
;
2363 DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop
.ses_id
));
2370 static void ioctl_process_job_callback(struct cryptocop_operation
*op
, void*cb_data
)
2372 struct ioctl_job_cb_ctx
*jc
= (struct ioctl_job_cb_ctx
*)cb_data
;
2374 DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op
, cb_data
));
2377 wake_up(&cryptocop_ioc_process_wq
);
2381 #define CRYPTOCOP_IOCTL_CIPHER_TID (1)
2382 #define CRYPTOCOP_IOCTL_DIGEST_TID (2)
2383 #define CRYPTOCOP_IOCTL_CSUM_TID (3)
2385 static size_t first_cfg_change_ix(struct strcop_crypto_op
*crp_op
)
2389 if (crp_op
->do_cipher
) ch_ix
= crp_op
->cipher_start
;
2390 if (crp_op
->do_digest
&& (crp_op
->digest_start
< ch_ix
)) ch_ix
= crp_op
->digest_start
;
2391 if (crp_op
->do_csum
&& (crp_op
->csum_start
< ch_ix
)) ch_ix
= crp_op
->csum_start
;
2393 DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix
));
2398 static size_t next_cfg_change_ix(struct strcop_crypto_op
*crp_op
, size_t ix
)
2400 size_t ch_ix
= INT_MAX
;
2403 if (crp_op
->do_cipher
&& ((crp_op
->cipher_start
+ crp_op
->cipher_len
) > ix
)){
2404 if (crp_op
->cipher_start
> ix
) {
2405 ch_ix
= crp_op
->cipher_start
;
2407 ch_ix
= crp_op
->cipher_start
+ crp_op
->cipher_len
;
2410 if (crp_op
->do_digest
&& ((crp_op
->digest_start
+ crp_op
->digest_len
) > ix
)){
2411 if (crp_op
->digest_start
> ix
) {
2412 tmp_ix
= crp_op
->digest_start
;
2414 tmp_ix
= crp_op
->digest_start
+ crp_op
->digest_len
;
2416 if (tmp_ix
< ch_ix
) ch_ix
= tmp_ix
;
2418 if (crp_op
->do_csum
&& ((crp_op
->csum_start
+ crp_op
->csum_len
) > ix
)){
2419 if (crp_op
->csum_start
> ix
) {
2420 tmp_ix
= crp_op
->csum_start
;
2422 tmp_ix
= crp_op
->csum_start
+ crp_op
->csum_len
;
2424 if (tmp_ix
< ch_ix
) ch_ix
= tmp_ix
;
2426 if (ch_ix
== INT_MAX
) ch_ix
= ix
;
2427 DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix
, ch_ix
));
2432 /* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix.
2433 * Return -1 for ok, 0 for fail. */
2434 static int map_pages_to_iovec(struct iovec
*iov
, int iovlen
, int *iovix
, struct page
**pages
, int nopages
, int *pageix
, int *pageoffset
, int map_length
)
2438 assert(iov
!= NULL
);
2439 assert(iovix
!= NULL
);
2440 assert(pages
!= NULL
);
2441 assert(pageix
!= NULL
);
2442 assert(pageoffset
!= NULL
);
2444 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length
, iovlen
, *iovix
, nopages
, *pageix
, *pageoffset
));
2446 while (map_length
> 0){
2447 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length
, iovlen
, *iovix
, nopages
, *pageix
, *pageoffset
));
2448 if (*iovix
>= iovlen
){
2449 DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix
, iovlen
));
2452 if (*pageix
>= nopages
){
2453 DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix
, nopages
));
2456 iov
[*iovix
].iov_base
= (unsigned char*)page_address(pages
[*pageix
]) + *pageoffset
;
2457 tmplen
= PAGE_SIZE
- *pageoffset
;
2458 if (tmplen
< map_length
){
2462 tmplen
= map_length
;
2463 (*pageoffset
) += map_length
;
2465 DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen
, *pageix
, *pageix
-1, *iovix
));
2466 iov
[*iovix
].iov_len
= tmplen
;
2467 map_length
-= tmplen
;
2470 DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix
));
2476 static int cryptocop_ioctl_process(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2479 struct cryptocop_private
*dev
= filp
->private_data
;
2480 struct strcop_crypto_op
*crp_oper
= (struct strcop_crypto_op
*)arg
;
2481 struct strcop_crypto_op oper
= {0};
2483 struct cryptocop_operation
*cop
= NULL
;
2485 struct ioctl_job_cb_ctx
*jc
= NULL
;
2487 struct page
**inpages
= NULL
;
2488 struct page
**outpages
= NULL
;
2492 struct cryptocop_desc descs
[5]; /* Max 5 descriptors are needed, there are three transforms that
2493 * can get connected/disconnected on different places in the indata. */
2494 struct cryptocop_desc_cfg dcfgs
[5*3];
2497 struct cryptocop_tfrm_cfg ciph_tcfg
= {0};
2498 struct cryptocop_tfrm_cfg digest_tcfg
= {0};
2499 struct cryptocop_tfrm_cfg csum_tcfg
= {0};
2501 unsigned char *digest_result
= NULL
;
2502 int digest_length
= 0;
2504 unsigned char csum_result
[CSUM_BLOCK_LENGTH
];
2505 struct cryptocop_session
*sess
;
2515 int cipher_active
, digest_active
, csum_active
;
2516 int end_digest
, end_csum
;
2517 int digest_done
= 0;
2518 int cipher_done
= 0;
2521 DEBUG(printk("cryptocop_ioctl_process\n"));
2523 if (!access_ok(VERIFY_WRITE
, crp_oper
, sizeof(struct strcop_crypto_op
))){
2524 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n"));
2527 if (copy_from_user(&oper
, crp_oper
, sizeof(struct strcop_crypto_op
))) {
2528 DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n"));
2531 DEBUG(print_strcop_crypto_op(&oper
));
2533 while (dev
&& dev
->sid
!= oper
.ses_id
) dev
= dev
->next
;
2535 DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper
.ses_id
));
2539 /* Check buffers. */
2540 if (((oper
.indata
+ oper
.inlen
) < oper
.indata
) || ((oper
.cipher_outdata
+ oper
.cipher_outlen
) < oper
.cipher_outdata
)){
2541 DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n"));
2545 if (!access_ok(VERIFY_WRITE
, oper
.cipher_outdata
, oper
.cipher_outlen
)){
2546 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n"));
2549 if (!access_ok(VERIFY_READ
, oper
.indata
, oper
.inlen
)){
2550 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n"));
2554 cop
= kmalloc(sizeof(struct cryptocop_operation
), GFP_KERNEL
);
2556 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2559 jc
= kmalloc(sizeof(struct ioctl_job_cb_ctx
), GFP_KERNEL
);
2561 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2568 cop
->cb
= ioctl_process_job_callback
;
2569 cop
->operation_status
= 0;
2570 cop
->use_dmalists
= 0;
2571 cop
->in_interrupt
= 0;
2572 cop
->fast_callback
= 0;
2573 cop
->tfrm_op
.tfrm_cfg
= NULL
;
2574 cop
->tfrm_op
.desc
= NULL
;
2575 cop
->tfrm_op
.indata
= NULL
;
2576 cop
->tfrm_op
.incount
= 0;
2577 cop
->tfrm_op
.inlen
= 0;
2578 cop
->tfrm_op
.outdata
= NULL
;
2579 cop
->tfrm_op
.outcount
= 0;
2580 cop
->tfrm_op
.outlen
= 0;
2582 sess
= get_session(oper
.ses_id
);
2584 DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n"));
2590 if (oper
.do_cipher
) {
2591 unsigned int cipher_outlen
= 0;
2592 struct cryptocop_transform_ctx
*tc
= get_transform_ctx(sess
, CRYPTOCOP_IOCTL_CIPHER_TID
);
2594 DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n"));
2598 ciph_tcfg
.tid
= CRYPTOCOP_IOCTL_CIPHER_TID
;
2599 ciph_tcfg
.inject_ix
= 0;
2600 ciph_tcfg
.flags
= 0;
2601 if ((oper
.cipher_start
< 0) || (oper
.cipher_len
<= 0) || (oper
.cipher_start
> oper
.inlen
) || ((oper
.cipher_start
+ oper
.cipher_len
) > oper
.inlen
)){
2602 DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n"));
2607 cblocklen
= tc
->init
.alg
== cryptocop_alg_aes
? AES_BLOCK_LENGTH
: DES_BLOCK_LENGTH
;
2608 if (oper
.cipher_len
% cblocklen
) {
2611 DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n"));
2614 cipher_outlen
= oper
.cipher_len
;
2615 if (tc
->init
.cipher_mode
== cryptocop_cipher_mode_cbc
){
2616 if (oper
.cipher_explicit
) {
2617 ciph_tcfg
.flags
|= CRYPTOCOP_EXPLICIT_IV
;
2618 memcpy(ciph_tcfg
.iv
, oper
.cipher_iv
, cblocklen
);
2620 cipher_outlen
= oper
.cipher_len
- cblocklen
;
2623 if (oper
.cipher_explicit
){
2626 DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n"));
2630 if (oper
.cipher_outlen
!= cipher_outlen
) {
2633 DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen
, oper
.cipher_outlen
));
2638 ciph_tcfg
.flags
|= CRYPTOCOP_DECRYPT
;
2640 ciph_tcfg
.flags
|= CRYPTOCOP_ENCRYPT
;
2642 ciph_tcfg
.next
= cop
->tfrm_op
.tfrm_cfg
;
2643 cop
->tfrm_op
.tfrm_cfg
= &ciph_tcfg
;
2645 if (oper
.do_digest
){
2646 struct cryptocop_transform_ctx
*tc
= get_transform_ctx(sess
, CRYPTOCOP_IOCTL_DIGEST_TID
);
2648 DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n"));
2652 digest_length
= tc
->init
.alg
== cryptocop_alg_md5
? 16 : 20;
2653 digest_result
= kmalloc(digest_length
, GFP_KERNEL
);
2654 if (!digest_result
) {
2655 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n"));
2659 DEBUG(memset(digest_result
, 0xff, digest_length
));
2661 digest_tcfg
.tid
= CRYPTOCOP_IOCTL_DIGEST_TID
;
2662 digest_tcfg
.inject_ix
= 0;
2663 ciph_tcfg
.inject_ix
+= digest_length
;
2664 if ((oper
.digest_start
< 0) || (oper
.digest_len
<= 0) || (oper
.digest_start
> oper
.inlen
) || ((oper
.digest_start
+ oper
.digest_len
) > oper
.inlen
)){
2665 DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n"));
2670 digest_tcfg
.next
= cop
->tfrm_op
.tfrm_cfg
;
2671 cop
->tfrm_op
.tfrm_cfg
= &digest_tcfg
;
2674 csum_tcfg
.tid
= CRYPTOCOP_IOCTL_CSUM_TID
;
2675 csum_tcfg
.inject_ix
= digest_length
;
2676 ciph_tcfg
.inject_ix
+= 2;
2678 if ((oper
.csum_start
< 0) || (oper
.csum_len
<= 0) || (oper
.csum_start
> oper
.inlen
) || ((oper
.csum_start
+ oper
.csum_len
) > oper
.inlen
)){
2679 DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n"));
2685 csum_tcfg
.next
= cop
->tfrm_op
.tfrm_cfg
;
2686 cop
->tfrm_op
.tfrm_cfg
= &csum_tcfg
;
2689 prev_ix
= first_cfg_change_ix(&oper
);
2690 if (prev_ix
> oper
.inlen
) {
2691 DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n"));
2692 nooutpages
= noinpages
= 0;
2696 DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper
.inlen
, oper
.cipher_outlen
));
2698 /* Map user pages for in and out data of the operation. */
2699 noinpages
= (((unsigned long int)(oper
.indata
+ prev_ix
) & ~PAGE_MASK
) + oper
.inlen
- 1 - prev_ix
+ ~PAGE_MASK
) >> PAGE_SHIFT
;
2700 DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages
));
2701 inpages
= kmalloc(noinpages
* sizeof(struct page
*), GFP_KERNEL
);
2703 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n"));
2704 nooutpages
= noinpages
= 0;
2708 if (oper
.do_cipher
){
2709 nooutpages
= (((unsigned long int)oper
.cipher_outdata
& ~PAGE_MASK
) + oper
.cipher_outlen
- 1 + ~PAGE_MASK
) >> PAGE_SHIFT
;
2710 DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages
));
2711 outpages
= kmalloc(nooutpages
* sizeof(struct page
*), GFP_KERNEL
);
2713 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n"));
2714 nooutpages
= noinpages
= 0;
2720 err
= get_user_pages_fast((unsigned long)(oper
.indata
+ prev_ix
),
2722 false, /* read access only for in data */
2726 nooutpages
= noinpages
= 0;
2727 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
2731 if (oper
.do_cipher
) {
2732 err
= get_user_pages_fast((unsigned long)oper
.cipher_outdata
,
2734 true, /* write access for out data */
2738 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
2744 /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
2745 * csum output and splits when units are (dis-)connected. */
2746 cop
->tfrm_op
.indata
= kmalloc((noinpages
) * sizeof(struct iovec
), GFP_KERNEL
);
2747 cop
->tfrm_op
.outdata
= kmalloc((6 + nooutpages
) * sizeof(struct iovec
), GFP_KERNEL
);
2748 if (!cop
->tfrm_op
.indata
|| !cop
->tfrm_op
.outdata
) {
2749 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n"));
2754 cop
->tfrm_op
.inlen
= oper
.inlen
- prev_ix
;
2755 cop
->tfrm_op
.outlen
= 0;
2756 if (oper
.do_cipher
) cop
->tfrm_op
.outlen
+= oper
.cipher_outlen
;
2757 if (oper
.do_digest
) cop
->tfrm_op
.outlen
+= digest_length
;
2758 if (oper
.do_csum
) cop
->tfrm_op
.outlen
+= 2;
2760 /* Setup the in iovecs. */
2761 cop
->tfrm_op
.incount
= noinpages
;
2763 size_t tmplen
= cop
->tfrm_op
.inlen
;
2765 cop
->tfrm_op
.indata
[0].iov_len
= PAGE_SIZE
- ((unsigned long int)(oper
.indata
+ prev_ix
) & ~PAGE_MASK
);
2766 cop
->tfrm_op
.indata
[0].iov_base
= (unsigned char*)page_address(inpages
[0]) + ((unsigned long int)(oper
.indata
+ prev_ix
) & ~PAGE_MASK
);
2767 tmplen
-= cop
->tfrm_op
.indata
[0].iov_len
;
2768 for (i
= 1; i
<noinpages
; i
++){
2769 cop
->tfrm_op
.indata
[i
].iov_len
= tmplen
< PAGE_SIZE
? tmplen
: PAGE_SIZE
;
2770 cop
->tfrm_op
.indata
[i
].iov_base
= (unsigned char*)page_address(inpages
[i
]);
2771 tmplen
-= PAGE_SIZE
;
2774 cop
->tfrm_op
.indata
[0].iov_len
= oper
.inlen
- prev_ix
;
2775 cop
->tfrm_op
.indata
[0].iov_base
= (unsigned char*)page_address(inpages
[0]) + ((unsigned long int)(oper
.indata
+ prev_ix
) & ~PAGE_MASK
);
2778 iovlen
= nooutpages
+ 6;
2779 pageoffset
= oper
.do_cipher
? ((unsigned long int)oper
.cipher_outdata
& ~PAGE_MASK
) : 0;
2781 next_ix
= next_cfg_change_ix(&oper
, prev_ix
);
2782 if (prev_ix
== next_ix
){
2783 DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n"));
2784 err
= -EINVAL
; /* This should be impossible barring bugs. */
2787 while (prev_ix
!= next_ix
){
2788 end_digest
= end_csum
= cipher_active
= digest_active
= csum_active
= 0;
2789 descs
[desc_ix
].cfg
= NULL
;
2790 descs
[desc_ix
].length
= next_ix
- prev_ix
;
2792 if (oper
.do_cipher
&& (oper
.cipher_start
< next_ix
) && (prev_ix
< (oper
.cipher_start
+ oper
.cipher_len
))) {
2793 dcfgs
[dcfg_ix
].tid
= CRYPTOCOP_IOCTL_CIPHER_TID
;
2794 dcfgs
[dcfg_ix
].src
= cryptocop_source_dma
;
2797 if (next_ix
== (oper
.cipher_start
+ oper
.cipher_len
)){
2799 dcfgs
[dcfg_ix
].last
= 1;
2801 dcfgs
[dcfg_ix
].last
= 0;
2803 dcfgs
[dcfg_ix
].next
= descs
[desc_ix
].cfg
;
2804 descs
[desc_ix
].cfg
= &dcfgs
[dcfg_ix
];
2807 if (oper
.do_digest
&& (oper
.digest_start
< next_ix
) && (prev_ix
< (oper
.digest_start
+ oper
.digest_len
))) {
2809 dcfgs
[dcfg_ix
].tid
= CRYPTOCOP_IOCTL_DIGEST_TID
;
2810 dcfgs
[dcfg_ix
].src
= cryptocop_source_dma
;
2811 if (next_ix
== (oper
.digest_start
+ oper
.digest_len
)){
2812 assert(!digest_done
);
2814 dcfgs
[dcfg_ix
].last
= 1;
2816 dcfgs
[dcfg_ix
].last
= 0;
2818 dcfgs
[dcfg_ix
].next
= descs
[desc_ix
].cfg
;
2819 descs
[desc_ix
].cfg
= &dcfgs
[dcfg_ix
];
2822 if (oper
.do_csum
&& (oper
.csum_start
< next_ix
) && (prev_ix
< (oper
.csum_start
+ oper
.csum_len
))){
2824 dcfgs
[dcfg_ix
].tid
= CRYPTOCOP_IOCTL_CSUM_TID
;
2825 dcfgs
[dcfg_ix
].src
= cryptocop_source_dma
;
2826 if (next_ix
== (oper
.csum_start
+ oper
.csum_len
)){
2828 dcfgs
[dcfg_ix
].last
= 1;
2830 dcfgs
[dcfg_ix
].last
= 0;
2832 dcfgs
[dcfg_ix
].next
= descs
[desc_ix
].cfg
;
2833 descs
[desc_ix
].cfg
= &dcfgs
[dcfg_ix
];
2836 if (!descs
[desc_ix
].cfg
){
2837 DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix
, prev_ix
, next_ix
));
2841 descs
[desc_ix
].next
= &(descs
[desc_ix
]) + 1;
2844 next_ix
= next_cfg_change_ix(&oper
, prev_ix
);
2847 descs
[desc_ix
-1].next
= NULL
;
2849 descs
[0].next
= NULL
;
2851 if (oper
.do_digest
) {
2852 DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length
, iovix
));
2853 /* Add outdata iovec, length == <length of type of digest> */
2854 cop
->tfrm_op
.outdata
[iovix
].iov_base
= digest_result
;
2855 cop
->tfrm_op
.outdata
[iovix
].iov_len
= digest_length
;
2859 /* Add outdata iovec, length == 2, the length of csum. */
2860 DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix
));
2861 /* Add outdata iovec, length == <length of type of digest> */
2862 cop
->tfrm_op
.outdata
[iovix
].iov_base
= csum_result
;
2863 cop
->tfrm_op
.outdata
[iovix
].iov_len
= 2;
2866 if (oper
.do_cipher
) {
2867 if (!map_pages_to_iovec(cop
->tfrm_op
.outdata
, iovlen
, &iovix
, outpages
, nooutpages
, &pageix
, &pageoffset
, oper
.cipher_outlen
)){
2868 DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n"));
2869 err
= -ENOSYS
; /* This should be impossible barring bugs. */
2873 DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix
));
2874 cop
->tfrm_op
.outcount
= iovix
;
2875 assert(iovix
<= (nooutpages
+ 6));
2877 cop
->sid
= oper
.ses_id
;
2878 cop
->tfrm_op
.desc
= &descs
[0];
2880 DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop
->cb_data
));
2882 if ((err
= cryptocop_job_queue_insert_user_job(cop
)) != 0) {
2883 DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err
));
2888 DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n"));
2890 wait_event(cryptocop_ioc_process_wq
, (jc
->processed
!= 0));
2891 DEBUG(printk("cryptocop_ioctl_process: end wait for result\n"));
2892 if (!jc
->processed
){
2893 printk(KERN_WARNING
"cryptocop_ioctl_process: job not processed at completion\n");
2898 /* Job process done. Cipher output should already be correct in job so no post processing of outdata. */
2899 DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop
->operation_status
));
2900 if (cop
->operation_status
== 0){
2901 if (oper
.do_digest
){
2902 DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length
));
2903 err
= copy_to_user((unsigned char*)crp_oper
+ offsetof(struct strcop_crypto_op
, digest
), digest_result
, digest_length
);
2905 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length
, err
));
2911 DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n"));
2912 err
= copy_to_user((unsigned char*)crp_oper
+ offsetof(struct strcop_crypto_op
, csum
), csum_result
, 2);
2914 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err
));
2921 DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop
->operation_status
));
2922 err
= cop
->operation_status
;
2926 /* Release page caches. */
2927 for (i
= 0; i
< noinpages
; i
++){
2928 put_page(inpages
[i
]);
2930 for (i
= 0; i
< nooutpages
; i
++){
2932 /* Mark output pages dirty. */
2933 spdl_err
= set_page_dirty_lock(outpages
[i
]);
2934 DEBUG(if (spdl_err
< 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err
));
2936 for (i
= 0; i
< nooutpages
; i
++){
2937 put_page(outpages
[i
]);
2940 kfree(digest_result
);
2944 kfree(cop
->tfrm_op
.indata
);
2945 kfree(cop
->tfrm_op
.outdata
);
2950 DEBUG(print_lock_status());
2956 static int cryptocop_ioctl_create_session(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2958 cryptocop_session_id sid
;
2960 struct cryptocop_private
*dev
;
2961 struct strcop_session_op
*sess_op
= (struct strcop_session_op
*)arg
;
2962 struct strcop_session_op sop
;
2963 struct cryptocop_transform_init
*tis
= NULL
;
2964 struct cryptocop_transform_init ti_cipher
= {0};
2965 struct cryptocop_transform_init ti_digest
= {0};
2966 struct cryptocop_transform_init ti_csum
= {0};
2968 if (!access_ok(VERIFY_WRITE
, sess_op
, sizeof(struct strcop_session_op
)))
2970 err
= copy_from_user(&sop
, sess_op
, sizeof(struct strcop_session_op
));
2971 if (err
) return -EFAULT
;
2972 if (sop
.cipher
!= cryptocop_cipher_none
) {
2973 if (!access_ok(VERIFY_READ
, sop
.key
, sop
.keylen
)) return -EFAULT
;
2975 DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n"));
2977 DEBUG(printk("\tcipher:%d\n"
2978 "\tcipher_mode:%d\n"
2986 if (sop
.cipher
!= cryptocop_cipher_none
){
2987 /* Init the cipher. */
2988 switch (sop
.cipher
){
2989 case cryptocop_cipher_des
:
2990 ti_cipher
.alg
= cryptocop_alg_des
;
2992 case cryptocop_cipher_3des
:
2993 ti_cipher
.alg
= cryptocop_alg_3des
;
2995 case cryptocop_cipher_aes
:
2996 ti_cipher
.alg
= cryptocop_alg_aes
;
2999 DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop
.cipher
));
3002 DEBUG(printk("setting cipher transform %d\n", ti_cipher
.alg
));
3003 copy_from_user(ti_cipher
.key
, sop
.key
, sop
.keylen
/8);
3004 ti_cipher
.keylen
= sop
.keylen
;
3006 case cryptocop_cipher_mode_cbc
:
3007 case cryptocop_cipher_mode_ecb
:
3008 ti_cipher
.cipher_mode
= sop
.cmode
;
3011 DEBUG_API(printk("create session, bad cipher mode %d\n", sop
.cmode
));
3014 DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher
.cipher_mode
));
3015 switch (sop
.des3_mode
){
3016 case cryptocop_3des_eee
:
3017 case cryptocop_3des_eed
:
3018 case cryptocop_3des_ede
:
3019 case cryptocop_3des_edd
:
3020 case cryptocop_3des_dee
:
3021 case cryptocop_3des_ded
:
3022 case cryptocop_3des_dde
:
3023 case cryptocop_3des_ddd
:
3024 ti_cipher
.tdes_mode
= sop
.des3_mode
;
3027 DEBUG_API(printk("create session, bad 3DES mode %d\n", sop
.des3_mode
));
3030 ti_cipher
.tid
= CRYPTOCOP_IOCTL_CIPHER_TID
;
3031 ti_cipher
.next
= tis
;
3033 } /* if (sop.cipher != cryptocop_cipher_none) */
3034 if (sop
.digest
!= cryptocop_digest_none
){
3035 DEBUG(printk("setting digest transform\n"));
3036 switch (sop
.digest
){
3037 case cryptocop_digest_md5
:
3038 ti_digest
.alg
= cryptocop_alg_md5
;
3040 case cryptocop_digest_sha1
:
3041 ti_digest
.alg
= cryptocop_alg_sha1
;
3044 DEBUG_API(printk("create session, bad digest algorithm %d\n", sop
.digest
));
3047 ti_digest
.tid
= CRYPTOCOP_IOCTL_DIGEST_TID
;
3048 ti_digest
.next
= tis
;
3050 } /* if (sop.digest != cryptocop_digest_none) */
3051 if (sop
.csum
!= cryptocop_csum_none
){
3052 DEBUG(printk("setting csum transform\n"));
3054 case cryptocop_csum_le
:
3055 case cryptocop_csum_be
:
3056 ti_csum
.csum_mode
= sop
.csum
;
3059 DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop
.csum
));
3062 ti_csum
.alg
= cryptocop_alg_csum
;
3063 ti_csum
.tid
= CRYPTOCOP_IOCTL_CSUM_TID
;
3066 } /* (sop.csum != cryptocop_csum_none) */
3067 dev
= kmalloc(sizeof(struct cryptocop_private
), GFP_KERNEL
);
3069 DEBUG_API(printk("create session, alloc dev\n"));
3073 err
= cryptocop_new_session(&sid
, tis
, GFP_KERNEL
);
3074 DEBUG({ if (err
) printk("create session, cryptocop_new_session %d\n", err
);});
3080 sess_op
->ses_id
= sid
;
3082 dev
->next
= filp
->private_data
;
3083 filp
->private_data
= dev
;
3088 static long cryptocop_ioctl_unlocked(struct inode
*inode
,
3089 struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3092 if (_IOC_TYPE(cmd
) != ETRAXCRYPTOCOP_IOCTYPE
) {
3093 DEBUG_API(printk("cryptocop_ioctl: wrong type\n"));
3096 if (_IOC_NR(cmd
) > CRYPTOCOP_IO_MAXNR
){
3099 /* Access check of the argument. Some commands, e.g. create session and process op,
3100 needs additional checks. Those are handled in the command handling functions. */
3101 if (_IOC_DIR(cmd
) & _IOC_READ
)
3102 err
= !access_ok(VERIFY_WRITE
, (void *)arg
, _IOC_SIZE(cmd
));
3103 else if (_IOC_DIR(cmd
) & _IOC_WRITE
)
3104 err
= !access_ok(VERIFY_READ
, (void *)arg
, _IOC_SIZE(cmd
));
3105 if (err
) return -EFAULT
;
3108 case CRYPTOCOP_IO_CREATE_SESSION
:
3109 return cryptocop_ioctl_create_session(inode
, filp
, cmd
, arg
);
3110 case CRYPTOCOP_IO_CLOSE_SESSION
:
3111 return cryptocop_ioctl_close_session(inode
, filp
, cmd
, arg
);
3112 case CRYPTOCOP_IO_PROCESS_OP
:
3113 return cryptocop_ioctl_process(inode
, filp
, cmd
, arg
);
3115 DEBUG_API(printk("cryptocop_ioctl: unknown command\n"));
3122 cryptocop_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3126 mutex_lock(&cryptocop_mutex
);
3127 ret
= cryptocop_ioctl_unlocked(file_inode(filp
), filp
, cmd
, arg
);
3128 mutex_unlock(&cryptocop_mutex
);
3135 static void print_dma_descriptors(struct cryptocop_int_operation
*iop
)
3137 struct cryptocop_dma_desc
*cdesc_out
= iop
->cdesc_out
;
3138 struct cryptocop_dma_desc
*cdesc_in
= iop
->cdesc_in
;
3141 printk("print_dma_descriptors start\n");
3144 printk("\tsid: 0x%llx\n", iop
->sid
);
3146 printk("\tcdesc_out: 0x%p\n", iop
->cdesc_out
);
3147 printk("\tcdesc_in: 0x%p\n", iop
->cdesc_in
);
3148 printk("\tddesc_out: 0x%p\n", iop
->ddesc_out
);
3149 printk("\tddesc_in: 0x%p\n", iop
->ddesc_in
);
3151 printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop
->ctx_out
, (char*)virt_to_phys(&iop
->ctx_out
));
3152 printk("\tnext: 0x%p\n"
3153 "\tsaved_data: 0x%p\n"
3154 "\tsaved_data_buf: 0x%p\n",
3156 iop
->ctx_out
.saved_data
,
3157 iop
->ctx_out
.saved_data_buf
);
3159 printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop
->ctx_in
, (char*)virt_to_phys(&iop
->ctx_in
));
3160 printk("\tnext: 0x%p\n"
3161 "\tsaved_data: 0x%p\n"
3162 "\tsaved_data_buf: 0x%p\n",
3164 iop
->ctx_in
.saved_data
,
3165 iop
->ctx_in
.saved_data_buf
);
3170 printk("cdesc_out %d, desc=0x%p\n", i
, cdesc_out
->dma_descr
);
3171 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out
->dma_descr
));
3172 td
= cdesc_out
->dma_descr
;
3173 printk("\n\tbuf: 0x%p\n"
3192 cdesc_out
= cdesc_out
->next
;
3198 printk("cdesc_in %d, desc=0x%p\n", i
, cdesc_in
->dma_descr
);
3199 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in
->dma_descr
));
3200 td
= cdesc_in
->dma_descr
;
3201 printk("\n\tbuf: 0x%p\n"
3220 cdesc_in
= cdesc_in
->next
;
3224 printk("print_dma_descriptors end\n");
3228 static void print_strcop_crypto_op(struct strcop_crypto_op
*cop
)
3230 printk("print_strcop_crypto_op, 0x%p\n", cop
);
3233 printk("indata=0x%p\n"
3237 "cipher_explicit=%d\n"
3246 cop
->cipher_explicit
,
3249 cop
->cipher_outdata
,
3250 cop
->cipher_outlen
);
3252 printk("do_digest=%d\n"
3259 printk("do_csum=%d\n"
3267 static void print_cryptocop_operation(struct cryptocop_operation
*cop
)
3269 struct cryptocop_desc
*d
;
3270 struct cryptocop_tfrm_cfg
*tc
;
3271 struct cryptocop_desc_cfg
*dc
;
3274 printk("print_cryptocop_operation, cop=0x%p\n\n", cop
);
3275 printk("sid: %lld\n", cop
->sid
);
3276 printk("operation_status=%d\n"
3279 "fast_callback=%d\n",
3280 cop
->operation_status
,
3283 cop
->fast_callback
);
3285 if (cop
->use_dmalists
){
3286 print_user_dma_lists(&cop
->list_op
);
3288 printk("cop->tfrm_op\n"
3297 cop
->tfrm_op
.tfrm_cfg
,
3299 cop
->tfrm_op
.indata
,
3300 cop
->tfrm_op
.incount
,
3302 cop
->tfrm_op
.outdata
,
3303 cop
->tfrm_op
.outcount
,
3304 cop
->tfrm_op
.outlen
);
3306 tc
= cop
->tfrm_op
.tfrm_cfg
;
3308 printk("tfrm_cfg, 0x%p\n"
3320 d
= cop
->tfrm_op
.desc
;
3322 printk("\n======================desc, 0x%p\n"
3332 printk("=========desc_cfg, 0x%p\n"
3346 printk("\n====iniov\n");
3347 for (i
= 0; i
< cop
->tfrm_op
.incount
; i
++){
3348 printk("indata[%d]\n"
3352 cop
->tfrm_op
.indata
[i
].iov_base
,
3353 cop
->tfrm_op
.indata
[i
].iov_len
);
3355 printk("\n====outiov\n");
3356 for (i
= 0; i
< cop
->tfrm_op
.outcount
; i
++){
3357 printk("outdata[%d]\n"
3361 cop
->tfrm_op
.outdata
[i
].iov_base
,
3362 cop
->tfrm_op
.outdata
[i
].iov_len
);
3365 printk("------------end print_cryptocop_operation\n");
3369 static void print_user_dma_lists(struct cryptocop_dma_list_operation
*dma_op
)
3374 printk("print_user_dma_lists, dma_op=0x%p\n", dma_op
);
3376 printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op
->out_data_buf
, phys_to_virt((unsigned long int)dma_op
->out_data_buf
));
3377 printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op
->in_data_buf
, phys_to_virt((unsigned long int)dma_op
->in_data_buf
));
3379 printk("##############outlist\n");
3380 dd
= phys_to_virt((unsigned long int)dma_op
->outlist
);
3382 while (dd
!= NULL
) {
3383 printk("#%d phys_to_virt(desc) 0x%p\n", i
, dd
);
3384 printk("\n\tbuf: 0x%p\n"
3406 dd
= phys_to_virt((unsigned long int)dd
->next
);
3410 printk("##############inlist\n");
3411 dd
= phys_to_virt((unsigned long int)dma_op
->inlist
);
3413 while (dd
!= NULL
) {
3414 printk("#%d phys_to_virt(desc) 0x%p\n", i
, dd
);
3415 printk("\n\tbuf: 0x%p\n"
3437 dd
= phys_to_virt((unsigned long int)dd
->next
);
3443 static void print_lock_status(void)
3445 printk("**********************print_lock_status\n");
3446 printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock
));
3447 printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock
));
3448 printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock
));
3449 printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock
));
3450 printk("running_job_lock %d\n", spin_is_locked(running_job_lock
));
3451 printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock
));
3456 static const char cryptocop_name
[] = "ETRAX FS stream co-processor";
3458 static int init_stream_coprocessor(void)
3462 static int initialized
= 0;
3469 printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n");
3471 err
= register_chrdev(CRYPTOCOP_MAJOR
, cryptocop_name
, &cryptocop_fops
);
3473 printk(KERN_ERR
"stream co-processor: could not get major number.\n");
3477 err
= init_cryptocop();
3479 (void)unregister_chrdev(CRYPTOCOP_MAJOR
, cryptocop_name
);
3482 err
= cryptocop_job_queue_init();
3484 release_cryptocop();
3485 (void)unregister_chrdev(CRYPTOCOP_MAJOR
, cryptocop_name
);
3488 /* Init the descriptor pool. */
3489 for (i
= 0; i
< CRYPTOCOP_DESCRIPTOR_POOL_SIZE
- 1; i
++) {
3490 descr_pool
[i
].from_pool
= 1;
3491 descr_pool
[i
].next
= &descr_pool
[i
+ 1];
3493 descr_pool
[i
].from_pool
= 1;
3494 descr_pool
[i
].next
= NULL
;
3495 descr_pool_free_list
= &descr_pool
[0];
3496 descr_pool_no_free
= CRYPTOCOP_DESCRIPTOR_POOL_SIZE
;
3498 spin_lock_init(&cryptocop_completed_jobs_lock
);
3499 spin_lock_init(&cryptocop_job_queue_lock
);
3500 spin_lock_init(&descr_pool_lock
);
3501 spin_lock_init(&cryptocop_sessions_lock
);
3502 spin_lock_init(&running_job_lock
);
3503 spin_lock_init(&cryptocop_process_lock
);
3505 cryptocop_sessions
= NULL
;
3508 cryptocop_running_job
= NULL
;
3510 printk("stream co-processor: init done.\n");
3514 static void __exit
exit_stream_coprocessor(void)
3516 release_cryptocop();
3517 cryptocop_job_queue_close();
3520 module_init(init_stream_coprocessor
);
3521 module_exit(exit_stream_coprocessor
);