2 * linux/drivers/s390/crypto/z90main.c
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <asm/uaccess.h> // copy_(from|to)_user
28 #include <linux/compat.h>
29 #include <linux/compiler.h>
30 #include <linux/delay.h> // mdelay
31 #include <linux/init.h>
32 #include <linux/interrupt.h> // for tasklets
33 #include <linux/ioctl32.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/kobject_uevent.h>
37 #include <linux/proc_fs.h>
38 #include <linux/syscalls.h>
39 #include <linux/version.h>
41 #include "z90common.h"
42 #ifndef Z90CRYPT_USE_HOTPLUG
43 #include <linux/miscdevice.h>
46 #define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
47 #if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
48 # error "This kernel is too old: not supported"
50 #if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
51 # error "This kernel is too recent: not supported by this file"
54 #define VERSION_Z90MAIN_C "$Revision: 1.57 $"
56 static char z90main_version
[] __initdata
=
57 "z90main.o (" VERSION_Z90MAIN_C
"/"
58 VERSION_Z90COMMON_H
"/" VERSION_Z90CRYPT_H
")";
60 extern char z90hardware_version
[];
63 * Defaults that may be modified.
66 #ifndef Z90CRYPT_USE_HOTPLUG
68 * You can specify a different minor at compile time.
70 #ifndef Z90CRYPT_MINOR
71 #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
75 * You can specify a different major at compile time.
77 #ifndef Z90CRYPT_MAJOR
78 #define Z90CRYPT_MAJOR 0
83 * You can specify a different domain at compile time or on the insmod
87 #define DOMAIN_INDEX -1
91 * This is the name under which the device is registered in /proc/modules.
93 #define REG_NAME "z90crypt"
96 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
97 * older than CLEANUPTIME seconds in the past.
100 #define CLEANUPTIME 20
104 * Config should run every CONFIGTIME seconds
107 #define CONFIGTIME 30
111 * The first execution of the config task should take place
112 * immediately after initialization
114 #ifndef INITIAL_CONFIGTIME
115 #define INITIAL_CONFIGTIME 1
119 * Reader should run every READERTIME milliseconds
120 * With the 100Hz patch for s390, z90crypt can lock the system solid while
121 * under heavy load. We'll try to avoid that.
127 #define READERTIME 10
132 * turn long device array index into device pointer
134 #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
137 * turn short device array index into long device array index
139 #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
142 * turn short device array index into device pointer
144 #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
147 * Status for a work-element
149 #define STAT_DEFAULT 0x00 // request has not been processed
151 #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
152 // else, device is determined each write
153 #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
154 // before being sent to the hardware.
155 #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
156 // 0x20 // UNUSED state
157 #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
158 #define STAT_NOWORK 0x00 // bits off: no work on any queue
159 #define STAT_RDWRMASK 0x30 // mask for bits 5-4
162 * Macros to check the status RDWRMASK
164 #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
165 #define SET_RDWRMASK(statbyte, newval) \
166 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
169 * Audit Trail. Progress of a Work element
170 * audit[0]: Unless noted otherwise, these bits are all set by the process
172 #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
173 #define FP_BUFFREQ 0x40 // Low Level buffer requested
174 #define FP_BUFFGOT 0x20 // Low Level buffer obtained
175 #define FP_SENT 0x10 // Work element sent to a crypto device
176 // (may be set by process or by reader task)
177 #define FP_PENDING 0x08 // Work element placed on pending queue
178 // (may be set by process or by reader task)
179 #define FP_REQUEST 0x04 // Work element placed on request queue
180 #define FP_ASLEEP 0x02 // Work element about to sleep
181 #define FP_AWAKE 0x01 // Work element has been awakened
184 * audit[1]: These bits are set by the reader task and/or the cleanup task
186 #define FP_NOTPENDING 0x80 // Work element removed from pending queue
187 #define FP_AWAKENING 0x40 // Caller about to be awakened
188 #define FP_TIMEDOUT 0x20 // Caller timed out
189 #define FP_RESPSIZESET 0x10 // Response size copied to work element
190 #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
191 #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
192 #define FP_REMREQUEST 0x02 // Work element removed from request queue
193 #define FP_SIGNALED 0x01 // Work element was awakened by a signal
200 * state of the file handle in private_data.status
203 #define STAT_CLOSED 1
206 * PID() expands to the process ID of the current process
208 #define PID() (current->pid)
211 * Selected Constants. The number of APs and the number of devices
213 #ifndef Z90CRYPT_NUM_APS
214 #define Z90CRYPT_NUM_APS 64
216 #ifndef Z90CRYPT_NUM_DEVS
217 #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
221 * Buffer size for receiving responses. The maximum Response Size
222 * is actually the maximum request size, since in an error condition
223 * the request itself may be returned unchanged.
225 #define MAX_RESPONSE_SIZE 0x0000077C
228 * A count and status-byte mask
231 int st_count
; // # of enabled devices
232 int disabled_count
; // # of disabled devices
233 int user_disabled_count
; // # of devices disabled via proc fs
234 unsigned char st_mask
[Z90CRYPT_NUM_APS
]; // current status mask
238 * The array of device indexes is a mechanism for fast indexing into
239 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
240 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
241 * z90CDeviceIndex[2] is 47.
244 int device_index
[Z90CRYPT_NUM_DEVS
];
248 * All devices are arranged in a single array: 64 APs
251 int dev_type
; // PCICA, PCICC, PCIXCC_MCL2,
252 // PCIXCC_MCL3, CEX2C
253 enum devstat dev_stat
; // current device status
254 int dev_self_x
; // Index in array
255 int disabled
; // Set when device is in error
256 int user_disabled
; // Set when device is disabled by user
257 int dev_q_depth
; // q depth
258 unsigned char * dev_resp_p
; // Response buffer address
259 int dev_resp_l
; // Response Buffer length
260 int dev_caller_count
; // Number of callers
261 int dev_total_req_cnt
; // # requests for device since load
262 struct list_head dev_caller_list
; // List of callers
266 * There's a struct status and a struct device_x for each device type.
268 struct hdware_block
{
269 struct status hdware_mask
;
270 struct status type_mask
[Z90CRYPT_NUM_TYPES
];
271 struct device_x type_x_addr
[Z90CRYPT_NUM_TYPES
];
272 unsigned char device_type_array
[Z90CRYPT_NUM_APS
];
276 * z90crypt is the topmost data structure in the hierarchy.
279 int max_count
; // Nr of possible crypto devices
281 int q_depth_array
[Z90CRYPT_NUM_DEVS
];
282 int dev_type_array
[Z90CRYPT_NUM_DEVS
];
283 struct device_x overall_device_x
; // array device indexes
284 struct device
* device_p
[Z90CRYPT_NUM_DEVS
];
286 int domain_established
;// TRUE: domain has been found
287 int cdx
; // Crypto Domain Index
288 int len
; // Length of this data structure
289 struct hdware_block
*hdware_info
;
293 * An array of these structures is pointed to from dev_caller
294 * The length of the array depends on the device type. For APs,
297 * The caller buffer is allocated to the user at OPEN. At WRITE,
298 * it contains the request; at READ, the response. The function
299 * send_to_crypto_device converts the request to device-dependent
300 * form and use the caller's OPEN-allocated buffer for the response.
303 int caller_buf_l
; // length of original request
304 unsigned char * caller_buf_p
; // Original request on WRITE
305 int caller_dev_dep_req_l
; // len device dependent request
306 unsigned char * caller_dev_dep_req_p
; // Device dependent form
307 unsigned char caller_id
[8]; // caller-supplied message id
308 struct list_head caller_liste
;
309 unsigned char caller_dev_dep_req
[MAX_RESPONSE_SIZE
];
313 * Function prototypes from z90hardware.c
315 enum hdstat
query_online(int, int, int, int *, int *);
316 enum devstat
reset_device(int, int, int);
317 enum devstat
send_to_AP(int, int, int, unsigned char *);
318 enum devstat
receive_from_AP(int, int, int, unsigned char *, unsigned char *);
319 int convert_request(unsigned char *, int, short, int, int, int *,
321 int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
324 * Low level function prototypes
326 static int create_z90crypt(int *);
327 static int refresh_z90crypt(int *);
328 static int find_crypto_devices(struct status
*);
329 static int create_crypto_device(int);
330 static int destroy_crypto_device(int);
331 static void destroy_z90crypt(void);
332 static int refresh_index_array(struct status
*, struct device_x
*);
333 static int probe_device_type(struct device
*);
334 static int probe_PCIXCC_type(struct device
*);
337 * proc fs definitions
339 static struct proc_dir_entry
*z90crypt_entry
;
346 * work_element.opener points back to this structure
350 unsigned char status
; // 0: open 1: closed
354 * A work element is allocated for each request
356 struct work_element
{
357 struct priv_data
*priv_data
;
359 int devindex
; // index of device processing this w_e
360 // (If request did not specify device,
361 // -1 until placed onto a queue)
363 struct list_head liste
; // used for requestq and pendingq
364 char buffer
[128]; // local copy of user request
365 int buff_size
; // size of the buffer for the request
366 char resp_buff
[RESPBUFFSIZE
];
368 char __user
* resp_addr
; // address of response in user space
369 unsigned int funccode
; // function code of request
370 wait_queue_head_t waitq
;
371 unsigned long requestsent
; // time at which the request was sent
372 atomic_t alarmrung
; // wake-up signal
373 unsigned char caller_id
[8]; // pid + counter, for this w_e
374 unsigned char status
[1]; // bits to mark status of the request
375 unsigned char audit
[3]; // record of work element's progress
376 unsigned char * requestptr
; // address of request buffer
377 int retcode
; // return code of request
381 * High level function prototypes
383 static int z90crypt_open(struct inode
*, struct file
*);
384 static int z90crypt_release(struct inode
*, struct file
*);
385 static ssize_t
z90crypt_read(struct file
*, char __user
*, size_t, loff_t
*);
386 static ssize_t
z90crypt_write(struct file
*, const char __user
*,
388 static long z90crypt_unlocked_ioctl(struct file
*, unsigned int, unsigned long);
389 static long z90crypt_compat_ioctl(struct file
*, unsigned int, unsigned long);
391 static void z90crypt_reader_task(unsigned long);
392 static void z90crypt_schedule_reader_task(unsigned long);
393 static void z90crypt_config_task(unsigned long);
394 static void z90crypt_cleanup_task(unsigned long);
396 static int z90crypt_status(char *, char **, off_t
, int, int *, void *);
397 static int z90crypt_status_write(struct file
*, const char __user
*,
398 unsigned long, void *);
404 #ifdef Z90CRYPT_USE_HOTPLUG
405 #define Z90CRYPT_HOTPLUG_ADD 1
406 #define Z90CRYPT_HOTPLUG_REMOVE 2
408 static void z90crypt_hotplug_event(int, int, int);
412 * Storage allocated at initialization and used throughout the life of
415 #ifdef Z90CRYPT_USE_HOTPLUG
416 static int z90crypt_major
= Z90CRYPT_MAJOR
;
419 static int domain
= DOMAIN_INDEX
;
420 static struct z90crypt z90crypt
;
421 static int quiesce_z90crypt
;
422 static spinlock_t queuespinlock
;
423 static struct list_head request_list
;
424 static int requestq_count
;
425 static struct list_head pending_list
;
426 static int pendingq_count
;
428 static struct tasklet_struct reader_tasklet
;
429 static struct timer_list reader_timer
;
430 static struct timer_list config_timer
;
431 static struct timer_list cleanup_timer
;
432 static atomic_t total_open
;
433 static atomic_t z90crypt_step
;
435 static struct file_operations z90crypt_fops
= {
436 .owner
= THIS_MODULE
,
437 .read
= z90crypt_read
,
438 .write
= z90crypt_write
,
439 .unlocked_ioctl
= z90crypt_unlocked_ioctl
,
441 .compat_ioctl
= z90crypt_compat_ioctl
,
443 .open
= z90crypt_open
,
444 .release
= z90crypt_release
447 #ifndef Z90CRYPT_USE_HOTPLUG
448 static struct miscdevice z90crypt_misc_device
= {
449 .minor
= Z90CRYPT_MINOR
,
451 .fops
= &z90crypt_fops
,
452 .devfs_name
= DEV_NAME
457 * Documentation values.
459 MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
460 "and Jochen Roehrig");
461 MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
462 "Copyright 2001, 2004 IBM Corporation");
463 MODULE_LICENSE("GPL");
464 module_param(domain
, int, 0);
465 MODULE_PARM_DESC(domain
, "domain index for device");
469 * ioctl32 conversion routines
471 struct ica_rsa_modexpo_32
{ // For 32-bit callers
472 compat_uptr_t inputdata
;
473 unsigned int inputdatalength
;
474 compat_uptr_t outputdata
;
475 unsigned int outputdatalength
;
477 compat_uptr_t n_modulus
;
481 trans_modexpo32(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
483 struct ica_rsa_modexpo_32 __user
*mex32u
= compat_ptr(arg
);
484 struct ica_rsa_modexpo_32 mex32k
;
485 struct ica_rsa_modexpo __user
*mex64
;
489 if (!access_ok(VERIFY_WRITE
, mex32u
, sizeof(struct ica_rsa_modexpo_32
)))
491 mex64
= compat_alloc_user_space(sizeof(struct ica_rsa_modexpo
));
492 if (!access_ok(VERIFY_WRITE
, mex64
, sizeof(struct ica_rsa_modexpo
)))
494 if (copy_from_user(&mex32k
, mex32u
, sizeof(struct ica_rsa_modexpo_32
)))
496 if (__put_user(compat_ptr(mex32k
.inputdata
), &mex64
->inputdata
) ||
497 __put_user(mex32k
.inputdatalength
, &mex64
->inputdatalength
) ||
498 __put_user(compat_ptr(mex32k
.outputdata
), &mex64
->outputdata
) ||
499 __put_user(mex32k
.outputdatalength
, &mex64
->outputdatalength
) ||
500 __put_user(compat_ptr(mex32k
.b_key
), &mex64
->b_key
) ||
501 __put_user(compat_ptr(mex32k
.n_modulus
), &mex64
->n_modulus
))
503 ret
= z90crypt_unlocked_ioctl(filp
, cmd
, (unsigned long)mex64
);
505 if (__get_user(i
, &mex64
->outputdatalength
) ||
506 __put_user(i
, &mex32u
->outputdatalength
))
511 struct ica_rsa_modexpo_crt_32
{ // For 32-bit callers
512 compat_uptr_t inputdata
;
513 unsigned int inputdatalength
;
514 compat_uptr_t outputdata
;
515 unsigned int outputdatalength
;
516 compat_uptr_t bp_key
;
517 compat_uptr_t bq_key
;
518 compat_uptr_t np_prime
;
519 compat_uptr_t nq_prime
;
520 compat_uptr_t u_mult_inv
;
524 trans_modexpo_crt32(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
526 struct ica_rsa_modexpo_crt_32 __user
*crt32u
= compat_ptr(arg
);
527 struct ica_rsa_modexpo_crt_32 crt32k
;
528 struct ica_rsa_modexpo_crt __user
*crt64
;
532 if (!access_ok(VERIFY_WRITE
, crt32u
,
533 sizeof(struct ica_rsa_modexpo_crt_32
)))
535 crt64
= compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt
));
536 if (!access_ok(VERIFY_WRITE
, crt64
, sizeof(struct ica_rsa_modexpo_crt
)))
538 if (copy_from_user(&crt32k
, crt32u
,
539 sizeof(struct ica_rsa_modexpo_crt_32
)))
541 if (__put_user(compat_ptr(crt32k
.inputdata
), &crt64
->inputdata
) ||
542 __put_user(crt32k
.inputdatalength
, &crt64
->inputdatalength
) ||
543 __put_user(compat_ptr(crt32k
.outputdata
), &crt64
->outputdata
) ||
544 __put_user(crt32k
.outputdatalength
, &crt64
->outputdatalength
) ||
545 __put_user(compat_ptr(crt32k
.bp_key
), &crt64
->bp_key
) ||
546 __put_user(compat_ptr(crt32k
.bq_key
), &crt64
->bq_key
) ||
547 __put_user(compat_ptr(crt32k
.np_prime
), &crt64
->np_prime
) ||
548 __put_user(compat_ptr(crt32k
.nq_prime
), &crt64
->nq_prime
) ||
549 __put_user(compat_ptr(crt32k
.u_mult_inv
), &crt64
->u_mult_inv
))
551 ret
= z90crypt_unlocked_ioctl(filp
, cmd
, (unsigned long)crt64
);
553 if (__get_user(i
, &crt64
->outputdatalength
) ||
554 __put_user(i
, &crt32u
->outputdatalength
))
560 z90crypt_compat_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
565 case Z90STAT_TOTALCOUNT
:
566 case Z90STAT_PCICACOUNT
:
567 case Z90STAT_PCICCCOUNT
:
568 case Z90STAT_PCIXCCCOUNT
:
569 case Z90STAT_PCIXCCMCL2COUNT
:
570 case Z90STAT_PCIXCCMCL3COUNT
:
571 case Z90STAT_CEX2CCOUNT
:
572 case Z90STAT_REQUESTQ_COUNT
:
573 case Z90STAT_PENDINGQ_COUNT
:
574 case Z90STAT_TOTALOPEN_COUNT
:
575 case Z90STAT_DOMAIN_INDEX
:
576 case Z90STAT_STATUS_MASK
:
577 case Z90STAT_QDEPTH_MASK
:
578 case Z90STAT_PERDEV_REQCNT
:
579 return z90crypt_unlocked_ioctl(filp
, cmd
, arg
);
581 return trans_modexpo32(filp
, cmd
, arg
);
583 return trans_modexpo_crt32(filp
, cmd
, arg
);
591 * The module initialization code.
594 z90crypt_init_module(void)
597 struct proc_dir_entry
*entry
;
599 PDEBUG("PID %d\n", PID());
601 if ((domain
< -1) || (domain
> 15)) {
602 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain
);
606 #ifndef Z90CRYPT_USE_HOTPLUG
607 /* Register as misc device with given minor (or get a dynamic one). */
608 result
= misc_register(&z90crypt_misc_device
);
610 PRINTKW(KERN_ERR
"misc_register (minor %d) failed with %d\n",
611 z90crypt_misc_device
.minor
, result
);
615 /* Register the major (or get a dynamic one). */
616 result
= register_chrdev(z90crypt_major
, REG_NAME
, &z90crypt_fops
);
618 PRINTKW("register_chrdev (major %d) failed with %d.\n",
619 z90crypt_major
, result
);
623 if (z90crypt_major
== 0)
624 z90crypt_major
= result
;
627 PDEBUG("Registered " DEV_NAME
" with result %d\n", result
);
629 result
= create_z90crypt(&domain
);
631 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
634 goto init_module_cleanup
;
638 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
639 z90crypt_VERSION
, z90crypt_RELEASE
, z90crypt_VARIANT
,
641 PRINTKN("%s\n", z90main_version
);
642 PRINTKN("%s\n", z90hardware_version
);
643 PDEBUG("create_z90crypt (domain index %d) successful.\n",
646 PRINTK("No devices at startup\n");
648 #ifdef Z90CRYPT_USE_HOTPLUG
649 /* generate hotplug event for device node generation */
650 z90crypt_hotplug_event(z90crypt_major
, 0, Z90CRYPT_HOTPLUG_ADD
);
653 /* Initialize globals. */
654 spin_lock_init(&queuespinlock
);
656 INIT_LIST_HEAD(&pending_list
);
659 INIT_LIST_HEAD(&request_list
);
662 quiesce_z90crypt
= 0;
664 atomic_set(&total_open
, 0);
665 atomic_set(&z90crypt_step
, 0);
667 /* Set up the cleanup task. */
668 init_timer(&cleanup_timer
);
669 cleanup_timer
.function
= z90crypt_cleanup_task
;
670 cleanup_timer
.data
= 0;
671 cleanup_timer
.expires
= jiffies
+ (CLEANUPTIME
* HZ
);
672 add_timer(&cleanup_timer
);
674 /* Set up the proc file system */
675 entry
= create_proc_entry("driver/z90crypt", 0644, 0);
679 entry
->read_proc
= z90crypt_status
;
680 entry
->write_proc
= z90crypt_status_write
;
683 PRINTK("Couldn't create z90crypt proc entry\n");
684 z90crypt_entry
= entry
;
686 /* Set up the configuration task. */
687 init_timer(&config_timer
);
688 config_timer
.function
= z90crypt_config_task
;
689 config_timer
.data
= 0;
690 config_timer
.expires
= jiffies
+ (INITIAL_CONFIGTIME
* HZ
);
691 add_timer(&config_timer
);
693 /* Set up the reader task */
694 tasklet_init(&reader_tasklet
, z90crypt_reader_task
, 0);
695 init_timer(&reader_timer
);
696 reader_timer
.function
= z90crypt_schedule_reader_task
;
697 reader_timer
.data
= 0;
698 reader_timer
.expires
= jiffies
+ (READERTIME
* HZ
/ 1000);
699 add_timer(&reader_timer
);
704 #ifndef Z90CRYPT_USE_HOTPLUG
705 if ((nresult
= misc_deregister(&z90crypt_misc_device
)))
706 PRINTK("misc_deregister failed with %d.\n", nresult
);
708 PDEBUG("misc_deregister successful.\n");
710 if ((nresult
= unregister_chrdev(z90crypt_major
, REG_NAME
)))
711 PRINTK("unregister_chrdev failed with %d.\n", nresult
);
713 PDEBUG("unregister_chrdev successful.\n");
716 return result
; // failure
720 * The module termination code
723 z90crypt_cleanup_module(void)
727 PDEBUG("PID %d\n", PID());
729 remove_proc_entry("driver/z90crypt", 0);
731 #ifndef Z90CRYPT_USE_HOTPLUG
732 if ((nresult
= misc_deregister(&z90crypt_misc_device
)))
733 PRINTK("misc_deregister failed with %d.\n", nresult
);
735 PDEBUG("misc_deregister successful.\n");
737 z90crypt_hotplug_event(z90crypt_major
, 0, Z90CRYPT_HOTPLUG_REMOVE
);
739 if ((nresult
= unregister_chrdev(z90crypt_major
, REG_NAME
)))
740 PRINTK("unregister_chrdev failed with %d.\n", nresult
);
742 PDEBUG("unregister_chrdev successful.\n");
745 /* Remove the tasks */
746 tasklet_kill(&reader_tasklet
);
747 del_timer(&reader_timer
);
748 del_timer(&config_timer
);
749 del_timer(&cleanup_timer
);
753 PRINTKN("Unloaded.\n");
757 * Functions running under a process id
764 * z90crypt_unlocked_ioctl
766 * z90crypt_status_write
776 * z90crypt_process_results
780 z90crypt_open(struct inode
*inode
, struct file
*filp
)
782 struct priv_data
*private_data_p
;
784 if (quiesce_z90crypt
)
787 private_data_p
= kmalloc(sizeof(struct priv_data
), GFP_KERNEL
);
788 if (!private_data_p
) {
789 PRINTK("Memory allocate failed\n");
793 memset((void *)private_data_p
, 0, sizeof(struct priv_data
));
794 private_data_p
->status
= STAT_OPEN
;
795 private_data_p
->opener_pid
= PID();
796 filp
->private_data
= private_data_p
;
797 atomic_inc(&total_open
);
803 z90crypt_release(struct inode
*inode
, struct file
*filp
)
805 struct priv_data
*private_data_p
= filp
->private_data
;
807 PDEBUG("PID %d (filp %p)\n", PID(), filp
);
809 private_data_p
->status
= STAT_CLOSED
;
810 memset(private_data_p
, 0, sizeof(struct priv_data
));
811 kfree(private_data_p
);
812 atomic_dec(&total_open
);
818 * there are two read functions, of which compile options will choose one
819 * without USE_GET_RANDOM_BYTES
820 * => read() always returns -EPERM;
822 * => read() uses get_random_bytes() kernel function
824 #ifndef USE_GET_RANDOM_BYTES
826 * z90crypt_read will not be supported beyond z90crypt 1.3.1
829 z90crypt_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*f_pos
)
831 PDEBUG("filp %p (PID %d)\n", filp
, PID());
834 #else // we want to use get_random_bytes
836 * read() just returns a string of random bytes. Since we have no way
837 * to generate these cryptographically, we just execute get_random_bytes
838 * for the length specified.
840 #include <linux/random.h>
842 z90crypt_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*f_pos
)
844 unsigned char *temp_buff
;
846 PDEBUG("filp %p (PID %d)\n", filp
, PID());
848 if (quiesce_z90crypt
)
851 PRINTK("Requested random byte count negative: %ld\n", count
);
854 if (count
> RESPBUFFSIZE
) {
855 PDEBUG("count[%d] > RESPBUFFSIZE", count
);
860 temp_buff
= kmalloc(RESPBUFFSIZE
, GFP_KERNEL
);
862 PRINTK("Memory allocate failed\n");
865 get_random_bytes(temp_buff
, count
);
867 if (copy_to_user(buf
, temp_buff
, count
) != 0) {
877 * Write is is not allowed
880 z90crypt_write(struct file
*filp
, const char __user
*buf
, size_t count
, loff_t
*f_pos
)
882 PDEBUG("filp %p (PID %d)\n", filp
, PID());
887 * New status functions
890 get_status_totalcount(void)
892 return z90crypt
.hdware_info
->hdware_mask
.st_count
;
896 get_status_PCICAcount(void)
898 return z90crypt
.hdware_info
->type_mask
[PCICA
].st_count
;
902 get_status_PCICCcount(void)
904 return z90crypt
.hdware_info
->type_mask
[PCICC
].st_count
;
908 get_status_PCIXCCcount(void)
910 return z90crypt
.hdware_info
->type_mask
[PCIXCC_MCL2
].st_count
+
911 z90crypt
.hdware_info
->type_mask
[PCIXCC_MCL3
].st_count
;
915 get_status_PCIXCCMCL2count(void)
917 return z90crypt
.hdware_info
->type_mask
[PCIXCC_MCL2
].st_count
;
921 get_status_PCIXCCMCL3count(void)
923 return z90crypt
.hdware_info
->type_mask
[PCIXCC_MCL3
].st_count
;
927 get_status_CEX2Ccount(void)
929 return z90crypt
.hdware_info
->type_mask
[CEX2C
].st_count
;
933 get_status_requestq_count(void)
935 return requestq_count
;
939 get_status_pendingq_count(void)
941 return pendingq_count
;
945 get_status_totalopen_count(void)
947 return atomic_read(&total_open
);
951 get_status_domain_index(void)
956 static inline unsigned char *
957 get_status_status_mask(unsigned char status
[Z90CRYPT_NUM_APS
])
961 memcpy(status
, z90crypt
.hdware_info
->device_type_array
,
964 for (i
= 0; i
< get_status_totalcount(); i
++) {
966 if (LONG2DEVPTR(ix
)->user_disabled
)
973 static inline unsigned char *
974 get_status_qdepth_mask(unsigned char qdepth
[Z90CRYPT_NUM_APS
])
978 memset(qdepth
, 0, Z90CRYPT_NUM_APS
);
980 for (i
= 0; i
< get_status_totalcount(); i
++) {
982 qdepth
[ix
] = LONG2DEVPTR(ix
)->dev_caller_count
;
988 static inline unsigned int *
989 get_status_perdevice_reqcnt(unsigned int reqcnt
[Z90CRYPT_NUM_APS
])
993 memset(reqcnt
, 0, Z90CRYPT_NUM_APS
* sizeof(int));
995 for (i
= 0; i
< get_status_totalcount(); i
++) {
997 reqcnt
[ix
] = LONG2DEVPTR(ix
)->dev_total_req_cnt
;
1004 init_work_element(struct work_element
*we_p
,
1005 struct priv_data
*priv_data
, pid_t pid
)
1009 we_p
->requestptr
= (unsigned char *)we_p
+ sizeof(struct work_element
);
1010 /* Come up with a unique id for this caller. */
1011 step
= atomic_inc_return(&z90crypt_step
);
1012 memcpy(we_p
->caller_id
+0, (void *) &pid
, sizeof(pid
));
1013 memcpy(we_p
->caller_id
+4, (void *) &step
, sizeof(step
));
1015 we_p
->priv_data
= priv_data
;
1016 we_p
->status
[0] = STAT_DEFAULT
;
1017 we_p
->audit
[0] = 0x00;
1018 we_p
->audit
[1] = 0x00;
1019 we_p
->audit
[2] = 0x00;
1020 we_p
->resp_buff_size
= 0;
1022 we_p
->devindex
= -1;
1024 atomic_set(&we_p
->alarmrung
, 0);
1025 init_waitqueue_head(&we_p
->waitq
);
1026 INIT_LIST_HEAD(&(we_p
->liste
));
1030 allocate_work_element(struct work_element
**we_pp
,
1031 struct priv_data
*priv_data_p
, pid_t pid
)
1033 struct work_element
*we_p
;
1035 we_p
= (struct work_element
*) get_zeroed_page(GFP_KERNEL
);
1038 init_work_element(we_p
, priv_data_p
, pid
);
1044 remove_device(struct device
*device_p
)
1046 if (!device_p
|| (device_p
->disabled
!= 0))
1048 device_p
->disabled
= 1;
1049 z90crypt
.hdware_info
->type_mask
[device_p
->dev_type
].disabled_count
++;
1050 z90crypt
.hdware_info
->hdware_mask
.disabled_count
++;
1054 * Bitlength limits for each card
1056 * There are new MCLs which allow more bitlengths. See the table for details.
1057 * The MCL must be applied and the newer bitlengths enabled for these to work.
1059 * Card Type Old limit New limit
1060 * PCICC 512-1024 512-2048
1061 * PCIXCC_MCL2 512-2048 no change (applying this MCL == card is MCL3+)
1062 * PCIXCC_MCL3 512-2048 128-2048
1063 * CEX2C 512-2048 128-2048
1065 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
1066 * MCL to just one card in a machine. We assume, at first, that all cards have
1067 * these capabilities.
1069 int ext_bitlens
= 1; // This is global
1070 #define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1071 #define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1072 #define PCICC_MIN_MOD_SIZE 64 // 512 bits
1073 #define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1074 #define MAX_MOD_SIZE 256 // 2048 bits
1077 select_device_type(int *dev_type_p
, int bytelength
)
1079 static int count
= 0;
1080 int PCICA_avail
, PCIXCC_MCL3_avail
, CEX2C_avail
, index_to_use
;
1081 struct status
*stat
;
1082 if ((*dev_type_p
!= PCICC
) && (*dev_type_p
!= PCICA
) &&
1083 (*dev_type_p
!= PCIXCC_MCL2
) && (*dev_type_p
!= PCIXCC_MCL3
) &&
1084 (*dev_type_p
!= CEX2C
) && (*dev_type_p
!= ANYDEV
))
1086 if (*dev_type_p
!= ANYDEV
) {
1087 stat
= &z90crypt
.hdware_info
->type_mask
[*dev_type_p
];
1088 if (stat
->st_count
>
1089 (stat
->disabled_count
+ stat
->user_disabled_count
))
1094 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
1095 stat
= &z90crypt
.hdware_info
->type_mask
[PCICA
];
1096 PCICA_avail
= stat
->st_count
-
1097 (stat
->disabled_count
+ stat
->user_disabled_count
);
1098 stat
= &z90crypt
.hdware_info
->type_mask
[PCIXCC_MCL3
];
1099 PCIXCC_MCL3_avail
= stat
->st_count
-
1100 (stat
->disabled_count
+ stat
->user_disabled_count
);
1101 stat
= &z90crypt
.hdware_info
->type_mask
[CEX2C
];
1102 CEX2C_avail
= stat
->st_count
-
1103 (stat
->disabled_count
+ stat
->user_disabled_count
);
1104 if (PCICA_avail
|| PCIXCC_MCL3_avail
|| CEX2C_avail
) {
1106 * bitlength is a factor, PCICA is the most capable, even with
1109 if ((bytelength
< PCIXCC_MIN_MOD_SIZE
) ||
1110 (!ext_bitlens
&& (bytelength
< OLD_PCIXCC_MIN_MOD_SIZE
))) {
1114 *dev_type_p
= PCICA
;
1119 index_to_use
= count
% (PCICA_avail
+ PCIXCC_MCL3_avail
+
1121 if (index_to_use
< PCICA_avail
)
1122 *dev_type_p
= PCICA
;
1123 else if (index_to_use
< (PCICA_avail
+ PCIXCC_MCL3_avail
))
1124 *dev_type_p
= PCIXCC_MCL3
;
1126 *dev_type_p
= CEX2C
;
1131 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1132 if (bytelength
< OLD_PCIXCC_MIN_MOD_SIZE
)
1134 stat
= &z90crypt
.hdware_info
->type_mask
[PCIXCC_MCL2
];
1135 if (stat
->st_count
>
1136 (stat
->disabled_count
+ stat
->user_disabled_count
)) {
1137 *dev_type_p
= PCIXCC_MCL2
;
1142 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1143 * (if we don't have the MCL applied and the newer bitlengths enabled)
1144 * cannot go to a PCICC
1146 if ((bytelength
< PCICC_MIN_MOD_SIZE
) ||
1147 (!ext_bitlens
&& (bytelength
> OLD_PCICC_MAX_MOD_SIZE
))) {
1150 stat
= &z90crypt
.hdware_info
->type_mask
[PCICC
];
1151 if (stat
->st_count
>
1152 (stat
->disabled_count
+ stat
->user_disabled_count
)) {
1153 *dev_type_p
= PCICC
;
1161 * Try the selected number, then the selected type (can be ANYDEV)
1164 select_device(int *dev_type_p
, int *device_nr_p
, int bytelength
)
1166 int i
, indx
, devTp
, low_count
, low_indx
;
1167 struct device_x
*index_p
;
1168 struct device
*dev_ptr
;
1170 PDEBUG("device type = %d, index = %d\n", *dev_type_p
, *device_nr_p
);
1171 if ((*device_nr_p
>= 0) && (*device_nr_p
< Z90CRYPT_NUM_DEVS
)) {
1172 PDEBUG("trying index = %d\n", *device_nr_p
);
1173 dev_ptr
= z90crypt
.device_p
[*device_nr_p
];
1176 (dev_ptr
->dev_stat
!= DEV_GONE
) &&
1177 (dev_ptr
->disabled
== 0) &&
1178 (dev_ptr
->user_disabled
== 0)) {
1179 PDEBUG("selected by number, index = %d\n",
1181 *dev_type_p
= dev_ptr
->dev_type
;
1182 return *device_nr_p
;
1186 PDEBUG("trying type = %d\n", *dev_type_p
);
1187 devTp
= *dev_type_p
;
1188 if (select_device_type(&devTp
, bytelength
) == -1) {
1189 PDEBUG("failed to select by type\n");
1192 PDEBUG("selected type = %d\n", devTp
);
1193 index_p
= &z90crypt
.hdware_info
->type_x_addr
[devTp
];
1194 low_count
= 0x0000FFFF;
1196 for (i
= 0; i
< z90crypt
.hdware_info
->type_mask
[devTp
].st_count
; i
++) {
1197 indx
= index_p
->device_index
[i
];
1198 dev_ptr
= z90crypt
.device_p
[indx
];
1200 (dev_ptr
->dev_stat
!= DEV_GONE
) &&
1201 (dev_ptr
->disabled
== 0) &&
1202 (dev_ptr
->user_disabled
== 0) &&
1203 (devTp
== dev_ptr
->dev_type
) &&
1204 (low_count
> dev_ptr
->dev_caller_count
)) {
1205 low_count
= dev_ptr
->dev_caller_count
;
1209 *device_nr_p
= low_indx
;
1214 send_to_crypto_device(struct work_element
*we_p
)
1216 struct caller
*caller_p
;
1217 struct device
*device_p
;
1219 int bytelen
= ((struct ica_rsa_modexpo
*)we_p
->buffer
)->inputdatalength
;
1221 if (!we_p
->requestptr
)
1222 return SEN_FATAL_ERROR
;
1223 caller_p
= (struct caller
*)we_p
->requestptr
;
1224 dev_nr
= we_p
->devindex
;
1225 if (select_device(&we_p
->devtype
, &dev_nr
, bytelen
) == -1) {
1226 if (z90crypt
.hdware_info
->hdware_mask
.st_count
!= 0)
1229 return SEN_NOT_AVAIL
;
1231 we_p
->devindex
= dev_nr
;
1232 device_p
= z90crypt
.device_p
[dev_nr
];
1234 return SEN_NOT_AVAIL
;
1235 if (device_p
->dev_type
!= we_p
->devtype
)
1237 if (device_p
->dev_caller_count
>= device_p
->dev_q_depth
)
1238 return SEN_QUEUE_FULL
;
1239 PDEBUG("device number prior to send: %d\n", dev_nr
);
1240 switch (send_to_AP(dev_nr
, z90crypt
.cdx
,
1241 caller_p
->caller_dev_dep_req_l
,
1242 caller_p
->caller_dev_dep_req_p
)) {
1243 case DEV_SEN_EXCEPTION
:
1244 PRINTKC("Exception during send to device %d\n", dev_nr
);
1245 z90crypt
.terminating
= 1;
1246 return SEN_FATAL_ERROR
;
1248 PRINTK("Device %d not available\n", dev_nr
);
1249 remove_device(device_p
);
1250 return SEN_NOT_AVAIL
;
1252 return SEN_NOT_AVAIL
;
1254 return SEN_FATAL_ERROR
;
1255 case DEV_BAD_MESSAGE
:
1256 return SEN_USER_ERROR
;
1257 case DEV_QUEUE_FULL
:
1258 return SEN_QUEUE_FULL
;
1263 list_add_tail(&(caller_p
->caller_liste
), &(device_p
->dev_caller_list
));
1264 device_p
->dev_caller_count
++;
1269 * Send puts the user's work on one of two queues:
1270 * the pending queue if the send was successful
1271 * the request queue if the send failed because device full or busy
1274 z90crypt_send(struct work_element
*we_p
, const char *buf
)
1278 PDEBUG("PID %d\n", PID());
1280 if (CHK_RDWRMASK(we_p
->status
[0]) != STAT_NOWORK
) {
1281 PDEBUG("PID %d tried to send more work but has outstanding "
1285 we_p
->devindex
= -1; // Reset device number
1286 spin_lock_irq(&queuespinlock
);
1287 rv
= send_to_crypto_device(we_p
);
1290 we_p
->requestsent
= jiffies
;
1291 we_p
->audit
[0] |= FP_SENT
;
1292 list_add_tail(&we_p
->liste
, &pending_list
);
1294 we_p
->audit
[0] |= FP_PENDING
;
1297 case SEN_QUEUE_FULL
:
1299 we_p
->devindex
= -1; // any device will do
1300 we_p
->requestsent
= jiffies
;
1301 list_add_tail(&we_p
->liste
, &request_list
);
1303 we_p
->audit
[0] |= FP_REQUEST
;
1309 PRINTK("*** No devices available.\n");
1310 rv
= we_p
->retcode
= -ENODEV
;
1311 we_p
->status
[0] |= STAT_FAILED
;
1313 case REC_OPERAND_INV
:
1314 case REC_OPERAND_SIZE
:
1316 case REC_INVALID_PAD
:
1317 rv
= we_p
->retcode
= -EINVAL
;
1318 we_p
->status
[0] |= STAT_FAILED
;
1322 we_p
->status
[0] |= STAT_FAILED
;
1325 if (rv
!= -ERESTARTSYS
)
1326 SET_RDWRMASK(we_p
->status
[0], STAT_WRITTEN
);
1327 spin_unlock_irq(&queuespinlock
);
1329 tasklet_schedule(&reader_tasklet
);
1334 * process_results copies the user's work from kernel space.
1337 z90crypt_process_results(struct work_element
*we_p
, char __user
*buf
)
1341 PDEBUG("we_p %p (PID %d)\n", we_p
, PID());
1343 LONG2DEVPTR(we_p
->devindex
)->dev_total_req_cnt
++;
1344 SET_RDWRMASK(we_p
->status
[0], STAT_READPEND
);
1347 if (!we_p
->buffer
) {
1348 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1354 if ((rv
= copy_to_user(buf
, we_p
->buffer
, we_p
->buff_size
))) {
1355 PDEBUG("copy_to_user failed: rv = %d\n", rv
);
1362 if (we_p
->resp_buff_size
1363 && copy_to_user(we_p
->resp_addr
, we_p
->resp_buff
,
1364 we_p
->resp_buff_size
))
1367 SET_RDWRMASK(we_p
->status
[0], STAT_NOWORK
);
1371 static unsigned char NULL_psmid
[8] =
1372 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1375 * Used in device configuration functions
1377 #define MAX_RESET 90
1380 * This is used only for PCICC support
1383 is_PKCS11_padded(unsigned char *buffer
, int length
)
1386 if ((buffer
[0] != 0x00) || (buffer
[1] != 0x01))
1388 for (i
= 2; i
< length
; i
++)
1389 if (buffer
[i
] != 0xFF)
1391 if ((i
< 10) || (i
== length
))
1393 if (buffer
[i
] != 0x00)
1399 * This is used only for PCICC support
1402 is_PKCS12_padded(unsigned char *buffer
, int length
)
1405 if ((buffer
[0] != 0x00) || (buffer
[1] != 0x02))
1407 for (i
= 2; i
< length
; i
++)
1408 if (buffer
[i
] == 0x00)
1410 if ((i
< 10) || (i
== length
))
1412 if (buffer
[i
] != 0x00)
1418 * builds struct caller and converts message from generic format to
1419 * device-dependent format
1420 * func is ICARSAMODEXPO or ICARSACRT
1421 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1424 build_caller(struct work_element
*we_p
, short function
)
1427 struct caller
*caller_p
= (struct caller
*)we_p
->requestptr
;
1429 if ((we_p
->devtype
!= PCICC
) && (we_p
->devtype
!= PCICA
) &&
1430 (we_p
->devtype
!= PCIXCC_MCL2
) && (we_p
->devtype
!= PCIXCC_MCL3
) &&
1431 (we_p
->devtype
!= CEX2C
))
1432 return SEN_NOT_AVAIL
;
1434 memcpy(caller_p
->caller_id
, we_p
->caller_id
,
1435 sizeof(caller_p
->caller_id
));
1436 caller_p
->caller_dev_dep_req_p
= caller_p
->caller_dev_dep_req
;
1437 caller_p
->caller_dev_dep_req_l
= MAX_RESPONSE_SIZE
;
1438 caller_p
->caller_buf_p
= we_p
->buffer
;
1439 INIT_LIST_HEAD(&(caller_p
->caller_liste
));
1441 rv
= convert_request(we_p
->buffer
, we_p
->funccode
, function
,
1442 z90crypt
.cdx
, we_p
->devtype
,
1443 &caller_p
->caller_dev_dep_req_l
,
1444 caller_p
->caller_dev_dep_req_p
);
1446 if (rv
== SEN_NOT_AVAIL
)
1447 PDEBUG("request can't be processed on hdwr avail\n");
1449 PRINTK("Error from convert_request: %d\n", rv
);
1452 memcpy(&(caller_p
->caller_dev_dep_req_p
[4]), we_p
->caller_id
,8);
1457 unbuild_caller(struct device
*device_p
, struct caller
*caller_p
)
1461 if (caller_p
->caller_liste
.next
&& caller_p
->caller_liste
.prev
)
1462 if (!list_empty(&caller_p
->caller_liste
)) {
1463 list_del_init(&caller_p
->caller_liste
);
1464 device_p
->dev_caller_count
--;
1466 memset(caller_p
->caller_id
, 0, sizeof(caller_p
->caller_id
));
1470 get_crypto_request_buffer(struct work_element
*we_p
)
1472 struct ica_rsa_modexpo
*mex_p
;
1473 struct ica_rsa_modexpo_crt
*crt_p
;
1474 unsigned char *temp_buffer
;
1478 mex_p
= (struct ica_rsa_modexpo
*) we_p
->buffer
;
1479 crt_p
= (struct ica_rsa_modexpo_crt
*) we_p
->buffer
;
1481 PDEBUG("device type input = %d\n", we_p
->devtype
);
1483 if (z90crypt
.terminating
)
1484 return REC_NO_RESPONSE
;
1485 if (memcmp(we_p
->caller_id
, NULL_psmid
, 8) == 0) {
1486 PRINTK("psmid zeroes\n");
1487 return SEN_FATAL_ERROR
;
1489 if (!we_p
->buffer
) {
1490 PRINTK("buffer pointer NULL\n");
1491 return SEN_USER_ERROR
;
1493 if (!we_p
->requestptr
) {
1494 PRINTK("caller pointer NULL\n");
1495 return SEN_USER_ERROR
;
1498 if ((we_p
->devtype
!= PCICA
) && (we_p
->devtype
!= PCICC
) &&
1499 (we_p
->devtype
!= PCIXCC_MCL2
) && (we_p
->devtype
!= PCIXCC_MCL3
) &&
1500 (we_p
->devtype
!= CEX2C
) && (we_p
->devtype
!= ANYDEV
)) {
1501 PRINTK("invalid device type\n");
1502 return SEN_USER_ERROR
;
1505 if ((mex_p
->inputdatalength
< 1) ||
1506 (mex_p
->inputdatalength
> MAX_MOD_SIZE
)) {
1507 PRINTK("inputdatalength[%d] is not valid\n",
1508 mex_p
->inputdatalength
);
1509 return SEN_USER_ERROR
;
1512 if (mex_p
->outputdatalength
< mex_p
->inputdatalength
) {
1513 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1514 mex_p
->outputdatalength
, mex_p
->inputdatalength
);
1515 return SEN_USER_ERROR
;
1518 if (!mex_p
->inputdata
|| !mex_p
->outputdata
) {
1519 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1520 mex_p
->outputdata
, mex_p
->inputdata
);
1521 return SEN_USER_ERROR
;
1525 * As long as outputdatalength is big enough, we can set the
1526 * outputdatalength equal to the inputdatalength, since that is the
1527 * number of bytes we will copy in any case
1529 mex_p
->outputdatalength
= mex_p
->inputdatalength
;
1532 switch (we_p
->funccode
) {
1534 if (!mex_p
->b_key
|| !mex_p
->n_modulus
)
1535 rv
= SEN_USER_ERROR
;
1538 if (!IS_EVEN(crt_p
->inputdatalength
)) {
1539 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1540 crt_p
->inputdatalength
);
1541 rv
= SEN_USER_ERROR
;
1544 if (!crt_p
->bp_key
||
1548 !crt_p
->u_mult_inv
) {
1549 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1550 crt_p
->bp_key
, crt_p
->bq_key
,
1551 crt_p
->np_prime
, crt_p
->nq_prime
,
1553 rv
= SEN_USER_ERROR
;
1557 PRINTK("bad func = %d\n", we_p
->funccode
);
1558 rv
= SEN_USER_ERROR
;
1564 if (select_device_type(&we_p
->devtype
, mex_p
->inputdatalength
) < 0)
1565 return SEN_NOT_AVAIL
;
1567 temp_buffer
= (unsigned char *)we_p
+ sizeof(struct work_element
) +
1568 sizeof(struct caller
);
1569 if (copy_from_user(temp_buffer
, mex_p
->inputdata
,
1570 mex_p
->inputdatalength
) != 0)
1571 return SEN_RELEASED
;
1573 function
= PCI_FUNC_KEY_ENCRYPT
;
1574 switch (we_p
->devtype
) {
1575 /* PCICA does everything with a simple RSA mod-expo operation */
1577 function
= PCI_FUNC_KEY_ENCRYPT
;
1580 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1581 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1582 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1583 * mod-expo operation
1586 if (we_p
->funccode
== ICARSAMODEXPO
)
1587 function
= PCI_FUNC_KEY_ENCRYPT
;
1589 function
= PCI_FUNC_KEY_DECRYPT
;
1593 if (we_p
->funccode
== ICARSAMODEXPO
)
1594 function
= PCI_FUNC_KEY_ENCRYPT
;
1596 function
= PCI_FUNC_KEY_DECRYPT
;
1599 * PCICC does everything as a PKCS-1.2 format request
1602 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1603 if (is_PKCS11_padded(temp_buffer
, mex_p
->inputdatalength
)) {
1604 return SEN_NOT_AVAIL
;
1606 if (we_p
->funccode
== ICARSAMODEXPO
) {
1607 if (is_PKCS12_padded(temp_buffer
,
1608 mex_p
->inputdatalength
))
1609 function
= PCI_FUNC_KEY_ENCRYPT
;
1611 function
= PCI_FUNC_KEY_DECRYPT
;
1613 /* all CRT forms are decrypts */
1614 function
= PCI_FUNC_KEY_DECRYPT
;
1617 PDEBUG("function: %04x\n", function
);
1618 rv
= build_caller(we_p
, function
);
1619 PDEBUG("rv from build_caller = %d\n", rv
);
1624 z90crypt_prepare(struct work_element
*we_p
, unsigned int funccode
,
1625 const char __user
*buffer
)
1629 we_p
->devindex
= -1;
1630 if (funccode
== ICARSAMODEXPO
)
1631 we_p
->buff_size
= sizeof(struct ica_rsa_modexpo
);
1633 we_p
->buff_size
= sizeof(struct ica_rsa_modexpo_crt
);
1635 if (copy_from_user(we_p
->buffer
, buffer
, we_p
->buff_size
))
1638 we_p
->audit
[0] |= FP_COPYFROM
;
1639 SET_RDWRMASK(we_p
->status
[0], STAT_WRITTEN
);
1640 we_p
->funccode
= funccode
;
1642 we_p
->audit
[0] |= FP_BUFFREQ
;
1643 rv
= get_crypto_request_buffer(we_p
);
1646 we_p
->audit
[0] |= FP_BUFFGOT
;
1648 case SEN_USER_ERROR
:
1651 case SEN_QUEUE_FULL
:
1657 case REC_NO_RESPONSE
:
1665 PRINTK("rv = %d\n", rv
);
1669 if (CHK_RDWRMASK(we_p
->status
[0]) == STAT_WRITTEN
)
1670 SET_RDWRMASK(we_p
->status
[0], STAT_DEFAULT
);
1675 purge_work_element(struct work_element
*we_p
)
1677 struct list_head
*lptr
;
1679 spin_lock_irq(&queuespinlock
);
1680 list_for_each(lptr
, &request_list
) {
1681 if (lptr
== &we_p
->liste
) {
1682 list_del_init(lptr
);
1687 list_for_each(lptr
, &pending_list
) {
1688 if (lptr
== &we_p
->liste
) {
1689 list_del_init(lptr
);
1694 spin_unlock_irq(&queuespinlock
);
1698 * Build the request and send it.
1701 z90crypt_rsa(struct priv_data
*private_data_p
, pid_t pid
,
1702 unsigned int cmd
, unsigned long arg
)
1704 struct work_element
*we_p
;
1707 if ((rv
= allocate_work_element(&we_p
, private_data_p
, pid
))) {
1708 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid
);
1711 if ((rv
= z90crypt_prepare(we_p
, cmd
, (const char __user
*)arg
)))
1712 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid
, rv
);
1714 if ((rv
= z90crypt_send(we_p
, (const char *)arg
)))
1715 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid
, rv
);
1717 we_p
->audit
[0] |= FP_ASLEEP
;
1718 wait_event(we_p
->waitq
, atomic_read(&we_p
->alarmrung
));
1719 we_p
->audit
[0] |= FP_AWAKE
;
1723 rv
= z90crypt_process_results(we_p
, (char __user
*)arg
);
1725 if ((we_p
->status
[0] & STAT_FAILED
)) {
1728 * EINVAL *after* receive is almost always a padding error or
1729 * length error issued by a coprocessor (not an accelerator).
1730 * We convert this return value to -EGETBUFF which should
1731 * trigger a fallback to software.
1734 if (we_p
->devtype
!= PCICA
)
1738 if (z90crypt
.mask
.st_count
> 0)
1739 rv
= -ERESTARTSYS
; // retry with another
1741 rv
= -ENODEV
; // no cards left
1742 /* fall through to clean up request queue */
1745 switch (CHK_RDWRMASK(we_p
->status
[0])) {
1747 purge_work_element(we_p
);
1756 we_p
->status
[0] ^= STAT_FAILED
;
1760 free_page((long)we_p
);
1765 * This function is a little long, but it's really just one large switch
1769 z90crypt_unlocked_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1771 struct priv_data
*private_data_p
= filp
->private_data
;
1772 unsigned char *status
;
1773 unsigned char *qdepth
;
1774 unsigned int *reqcnt
;
1775 struct ica_z90_status
*pstat
;
1776 int ret
, i
, loopLim
, tempstat
;
1777 static int deprecated_msg_count1
= 0;
1778 static int deprecated_msg_count2
= 0;
1780 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp
, PID(), cmd
);
1781 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1783 !_IOC_DIR(cmd
) ? "NO"
1784 : ((_IOC_DIR(cmd
) == (_IOC_READ
|_IOC_WRITE
)) ? "RW"
1785 : ((_IOC_DIR(cmd
) == _IOC_READ
) ? "RD"
1787 _IOC_SIZE(cmd
), _IOC_TYPE(cmd
), _IOC_NR(cmd
));
1789 if (_IOC_TYPE(cmd
) != Z90_IOCTL_MAGIC
) {
1790 PRINTK("cmd 0x%08X contains bad magic\n", cmd
);
1798 if (quiesce_z90crypt
) {
1802 ret
= -ENODEV
; // Default if no devices
1803 loopLim
= z90crypt
.hdware_info
->hdware_mask
.st_count
-
1804 (z90crypt
.hdware_info
->hdware_mask
.disabled_count
+
1805 z90crypt
.hdware_info
->hdware_mask
.user_disabled_count
);
1806 for (i
= 0; i
< loopLim
; i
++) {
1807 ret
= z90crypt_rsa(private_data_p
, PID(), cmd
, arg
);
1808 if (ret
!= -ERESTARTSYS
)
1811 if (ret
== -ERESTARTSYS
)
1815 case Z90STAT_TOTALCOUNT
:
1816 tempstat
= get_status_totalcount();
1817 if (copy_to_user((int __user
*)arg
, &tempstat
,sizeof(int)) != 0)
1821 case Z90STAT_PCICACOUNT
:
1822 tempstat
= get_status_PCICAcount();
1823 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1827 case Z90STAT_PCICCCOUNT
:
1828 tempstat
= get_status_PCICCcount();
1829 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1833 case Z90STAT_PCIXCCMCL2COUNT
:
1834 tempstat
= get_status_PCIXCCMCL2count();
1835 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1839 case Z90STAT_PCIXCCMCL3COUNT
:
1840 tempstat
= get_status_PCIXCCMCL3count();
1841 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1845 case Z90STAT_CEX2CCOUNT
:
1846 tempstat
= get_status_CEX2Ccount();
1847 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1851 case Z90STAT_REQUESTQ_COUNT
:
1852 tempstat
= get_status_requestq_count();
1853 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1857 case Z90STAT_PENDINGQ_COUNT
:
1858 tempstat
= get_status_pendingq_count();
1859 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1863 case Z90STAT_TOTALOPEN_COUNT
:
1864 tempstat
= get_status_totalopen_count();
1865 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1869 case Z90STAT_DOMAIN_INDEX
:
1870 tempstat
= get_status_domain_index();
1871 if (copy_to_user((int __user
*)arg
, &tempstat
, sizeof(int)) != 0)
1875 case Z90STAT_STATUS_MASK
:
1876 status
= kmalloc(Z90CRYPT_NUM_APS
, GFP_KERNEL
);
1878 PRINTK("kmalloc for status failed!\n");
1882 get_status_status_mask(status
);
1883 if (copy_to_user((char __user
*) arg
, status
, Z90CRYPT_NUM_APS
)
1889 case Z90STAT_QDEPTH_MASK
:
1890 qdepth
= kmalloc(Z90CRYPT_NUM_APS
, GFP_KERNEL
);
1892 PRINTK("kmalloc for qdepth failed!\n");
1896 get_status_qdepth_mask(qdepth
);
1897 if (copy_to_user((char __user
*) arg
, qdepth
, Z90CRYPT_NUM_APS
) != 0)
1902 case Z90STAT_PERDEV_REQCNT
:
1903 reqcnt
= kmalloc(sizeof(int) * Z90CRYPT_NUM_APS
, GFP_KERNEL
);
1905 PRINTK("kmalloc for reqcnt failed!\n");
1909 get_status_perdevice_reqcnt(reqcnt
);
1910 if (copy_to_user((char __user
*) arg
, reqcnt
,
1911 Z90CRYPT_NUM_APS
* sizeof(int)) != 0)
1916 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1918 if (deprecated_msg_count1
< 20) {
1919 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1920 deprecated_msg_count1
++;
1921 if (deprecated_msg_count1
== 20)
1922 PRINTK("No longer issuing messages related to "
1923 "deprecated call to ICAZ90STATUS.\n");
1926 pstat
= kmalloc(sizeof(struct ica_z90_status
), GFP_KERNEL
);
1928 PRINTK("kmalloc for pstat failed!\n");
1933 pstat
->totalcount
= get_status_totalcount();
1934 pstat
->leedslitecount
= get_status_PCICAcount();
1935 pstat
->leeds2count
= get_status_PCICCcount();
1936 pstat
->requestqWaitCount
= get_status_requestq_count();
1937 pstat
->pendingqWaitCount
= get_status_pendingq_count();
1938 pstat
->totalOpenCount
= get_status_totalopen_count();
1939 pstat
->cryptoDomain
= get_status_domain_index();
1940 get_status_status_mask(pstat
->status
);
1941 get_status_qdepth_mask(pstat
->qdepth
);
1943 if (copy_to_user((struct ica_z90_status __user
*) arg
, pstat
,
1944 sizeof(struct ica_z90_status
)) != 0)
1949 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1950 case Z90STAT_PCIXCCCOUNT
:
1951 if (deprecated_msg_count2
< 20) {
1952 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1953 deprecated_msg_count2
++;
1954 if (deprecated_msg_count2
== 20)
1955 PRINTK("No longer issuing messages about depre"
1956 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1959 tempstat
= get_status_PCIXCCcount();
1960 if (copy_to_user((int *)arg
, &tempstat
, sizeof(int)) != 0)
1965 if (current
->euid
!= 0) {
1966 PRINTK("QUIESCE fails: euid %d\n",
1970 PRINTK("QUIESCE device from PID %d\n", PID());
1971 quiesce_z90crypt
= 1;
1976 /* user passed an invalid IOCTL number */
1977 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd
);
1986 sprintcl(unsigned char *outaddr
, unsigned char *addr
, unsigned int len
)
1991 for (i
= 0; i
< len
; i
++)
1992 hl
+= sprintf(outaddr
+hl
, "%01x", (unsigned int) addr
[i
]);
1993 hl
+= sprintf(outaddr
+hl
, " ");
1999 sprintrw(unsigned char *outaddr
, unsigned char *addr
, unsigned int len
)
2003 hl
= sprintf(outaddr
, " ");
2005 for (c
= 0; c
< (len
/ 16); c
++) {
2006 hl
+= sprintcl(outaddr
+hl
, addr
+inl
, 16);
2012 hl
+= sprintcl(outaddr
+hl
, addr
+inl
, cx
);
2016 hl
+= sprintf(outaddr
+hl
, "\n");
2022 sprinthx(unsigned char *title
, unsigned char *outaddr
,
2023 unsigned char *addr
, unsigned int len
)
2027 hl
= sprintf(outaddr
, "\n%s\n", title
);
2029 for (r
= 0; r
< (len
/ 64); r
++) {
2030 hl
+= sprintrw(outaddr
+hl
, addr
+inl
, 64);
2035 hl
+= sprintrw(outaddr
+hl
, addr
+inl
, rx
);
2039 hl
+= sprintf(outaddr
+hl
, "\n");
2045 sprinthx4(unsigned char *title
, unsigned char *outaddr
,
2046 unsigned int *array
, unsigned int len
)
2050 hl
= sprintf(outaddr
, "\n%s\n", title
);
2052 for (r
= 0; r
< len
; r
++) {
2054 hl
+= sprintf(outaddr
+hl
, " ");
2055 hl
+= sprintf(outaddr
+hl
, "%08X ", array
[r
]);
2057 hl
+= sprintf(outaddr
+hl
, "\n");
2060 hl
+= sprintf(outaddr
+hl
, "\n");
2066 z90crypt_status(char *resp_buff
, char **start
, off_t offset
,
2067 int count
, int *eof
, void *data
)
2069 unsigned char *workarea
;
2072 /* resp_buff is a page. Use the right half for a work area */
2073 workarea
= resp_buff
+2000;
2075 len
+= sprintf(resp_buff
+len
, "\nz90crypt version: %d.%d.%d\n",
2076 z90crypt_VERSION
, z90crypt_RELEASE
, z90crypt_VARIANT
);
2077 len
+= sprintf(resp_buff
+len
, "Cryptographic domain: %d\n",
2078 get_status_domain_index());
2079 len
+= sprintf(resp_buff
+len
, "Total device count: %d\n",
2080 get_status_totalcount());
2081 len
+= sprintf(resp_buff
+len
, "PCICA count: %d\n",
2082 get_status_PCICAcount());
2083 len
+= sprintf(resp_buff
+len
, "PCICC count: %d\n",
2084 get_status_PCICCcount());
2085 len
+= sprintf(resp_buff
+len
, "PCIXCC MCL2 count: %d\n",
2086 get_status_PCIXCCMCL2count());
2087 len
+= sprintf(resp_buff
+len
, "PCIXCC MCL3 count: %d\n",
2088 get_status_PCIXCCMCL3count());
2089 len
+= sprintf(resp_buff
+len
, "CEX2C count: %d\n",
2090 get_status_CEX2Ccount());
2091 len
+= sprintf(resp_buff
+len
, "requestq count: %d\n",
2092 get_status_requestq_count());
2093 len
+= sprintf(resp_buff
+len
, "pendingq count: %d\n",
2094 get_status_pendingq_count());
2095 len
+= sprintf(resp_buff
+len
, "Total open handles: %d\n\n",
2096 get_status_totalopen_count());
2098 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
2099 "4: PCIXCC (MCL3), 5: CEX2C",
2101 get_status_status_mask(workarea
),
2103 len
+= sprinthx("Waiting work element counts",
2105 get_status_qdepth_mask(workarea
),
2108 "Per-device successfully completed request counts",
2110 get_status_perdevice_reqcnt((unsigned int *)workarea
),
2113 memset(workarea
, 0, Z90CRYPT_NUM_APS
* sizeof(unsigned int));
2118 disable_card(int card_index
)
2120 struct device
*devp
;
2122 devp
= LONG2DEVPTR(card_index
);
2123 if (!devp
|| devp
->user_disabled
)
2125 devp
->user_disabled
= 1;
2126 z90crypt
.hdware_info
->hdware_mask
.user_disabled_count
++;
2127 if (devp
->dev_type
== -1)
2129 z90crypt
.hdware_info
->type_mask
[devp
->dev_type
].user_disabled_count
++;
2133 enable_card(int card_index
)
2135 struct device
*devp
;
2137 devp
= LONG2DEVPTR(card_index
);
2138 if (!devp
|| !devp
->user_disabled
)
2140 devp
->user_disabled
= 0;
2141 z90crypt
.hdware_info
->hdware_mask
.user_disabled_count
--;
2142 if (devp
->dev_type
== -1)
2144 z90crypt
.hdware_info
->type_mask
[devp
->dev_type
].user_disabled_count
--;
2148 scan_char(unsigned char *bf
, unsigned int len
,
2149 unsigned int *offs
, unsigned int *p_eof
, unsigned char c
)
2151 unsigned int i
, found
;
2154 for (i
= 0; i
< len
; i
++) {
2159 if (bf
[i
] == '\0') {
2163 if (bf
[i
] == '\n') {
2172 scan_string(unsigned char *bf
, unsigned int len
,
2173 unsigned int *offs
, unsigned int *p_eof
, unsigned char *s
)
2175 unsigned int temp_len
, temp_offs
, found
, eof
;
2177 temp_len
= temp_offs
= found
= eof
= 0;
2178 while (!eof
&& !found
) {
2179 found
= scan_char(bf
+temp_len
, len
-temp_len
,
2180 &temp_offs
, &eof
, *s
);
2182 temp_len
+= temp_offs
;
2189 if (len
>= temp_offs
+strlen(s
)) {
2190 found
= !strncmp(bf
+temp_len
-1, s
, strlen(s
));
2192 *offs
= temp_len
+strlen(s
)-1;
2206 z90crypt_status_write(struct file
*file
, const char __user
*buffer
,
2207 unsigned long count
, void *data
)
2209 int i
, j
, len
, offs
, found
, eof
;
2210 unsigned char *lbuf
;
2211 unsigned int local_count
;
2213 #define LBUFSIZE 600
2214 lbuf
= kmalloc(LBUFSIZE
, GFP_KERNEL
);
2216 PRINTK("kmalloc failed!\n");
2223 local_count
= UMIN((unsigned int)count
, LBUFSIZE
-1);
2225 if (copy_from_user(lbuf
, buffer
, local_count
) != 0) {
2230 lbuf
[local_count
-1] = '\0';
2236 found
= scan_string(lbuf
+len
, local_count
-len
, &offs
, &eof
,
2249 found
= scan_char(lbuf
+len
, local_count
-len
, &offs
, &eof
, '\n');
2251 if (!found
|| eof
) {
2258 for (i
= 0; i
< 80; i
++) {
2259 switch (*(lbuf
+len
+i
)) {
2295 * Functions that run under a timer, with no process id
2297 * The task functions:
2298 * z90crypt_reader_task
2300 * helper_handle_work_element
2302 * z90crypt_config_task
2303 * z90crypt_cleanup_task
2306 * z90crypt_schedule_reader_timer
2307 * z90crypt_schedule_reader_task
2308 * z90crypt_schedule_config_task
2309 * z90crypt_schedule_cleanup_task
2312 receive_from_crypto_device(int index
, unsigned char *psmid
, int *buff_len_p
,
2313 unsigned char *buff
, unsigned char __user
**dest_p_p
)
2316 struct device
*dev_ptr
;
2317 struct caller
*caller_p
;
2318 struct ica_rsa_modexpo
*icaMsg_p
;
2319 struct list_head
*ptr
, *tptr
;
2321 memcpy(psmid
, NULL_psmid
, sizeof(NULL_psmid
));
2323 if (z90crypt
.terminating
)
2324 return REC_FATAL_ERROR
;
2327 dev_ptr
= z90crypt
.device_p
[index
];
2330 if (!dev_ptr
|| dev_ptr
->disabled
) {
2331 rv
= REC_NO_WORK
; // a disabled device can't return work
2334 if (dev_ptr
->dev_self_x
!= index
) {
2335 PRINTKC("Corrupt dev ptr\n");
2336 z90crypt
.terminating
= 1;
2337 rv
= REC_FATAL_ERROR
;
2340 if (!dev_ptr
->dev_resp_l
|| !dev_ptr
->dev_resp_p
) {
2341 dv
= DEV_REC_EXCEPTION
;
2342 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2343 dev_ptr
->dev_resp_l
, dev_ptr
->dev_resp_p
);
2345 PDEBUG("Dequeue called for device %d\n", index
);
2346 dv
= receive_from_AP(index
, z90crypt
.cdx
,
2347 dev_ptr
->dev_resp_l
,
2348 dev_ptr
->dev_resp_p
, psmid
);
2351 case DEV_REC_EXCEPTION
:
2352 rv
= REC_FATAL_ERROR
;
2353 z90crypt
.terminating
= 1;
2354 PRINTKC("Exception in receive from device %d\n",
2366 case DEV_BAD_MESSAGE
:
2368 case REC_HARDWAR_ERR
:
2370 rv
= REC_NO_RESPONSE
;
2375 if (dev_ptr
->dev_caller_count
<= 0) {
2380 list_for_each_safe(ptr
, tptr
, &dev_ptr
->dev_caller_list
) {
2381 caller_p
= list_entry(ptr
, struct caller
, caller_liste
);
2382 if (!memcmp(caller_p
->caller_id
, psmid
,
2383 sizeof(caller_p
->caller_id
))) {
2384 if (!list_empty(&caller_p
->caller_liste
)) {
2386 dev_ptr
->dev_caller_count
--;
2393 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2394 "%02X%02X%02X in device list\n",
2395 psmid
[0], psmid
[1], psmid
[2], psmid
[3],
2396 psmid
[4], psmid
[5], psmid
[6], psmid
[7]);
2401 PDEBUG("caller_p after successful receive: %p\n", caller_p
);
2402 rv
= convert_response(dev_ptr
->dev_resp_p
,
2403 caller_p
->caller_buf_p
, buff_len_p
, buff
);
2407 case REC_OPERAND_INV
:
2408 case REC_OPERAND_SIZE
:
2410 case REC_INVALID_PAD
:
2411 PDEBUG("device %d: 'user error' %d\n", index
, rv
);
2413 case WRONG_DEVICE_TYPE
:
2414 case REC_HARDWAR_ERR
:
2415 case REC_BAD_MESSAGE
:
2416 PRINTKW("device %d: hardware error %d\n", index
, rv
);
2417 rv
= REC_NO_RESPONSE
;
2420 PDEBUG("device %d: rv = %d\n", index
, rv
);
2427 PDEBUG("Successful receive from device %d\n", index
);
2428 icaMsg_p
= (struct ica_rsa_modexpo
*)caller_p
->caller_buf_p
;
2429 *dest_p_p
= icaMsg_p
->outputdata
;
2430 if (*buff_len_p
== 0)
2431 PRINTK("Zero *buff_len_p\n");
2433 case REC_NO_RESPONSE
:
2434 PRINTKW("Removing device %d from availability\n", index
);
2435 remove_device(dev_ptr
);
2440 unbuild_caller(dev_ptr
, caller_p
);
2446 helper_send_work(int index
)
2448 struct work_element
*rq_p
;
2451 if (list_empty(&request_list
))
2454 rq_p
= list_entry(request_list
.next
, struct work_element
, liste
);
2455 list_del_init(&rq_p
->liste
);
2456 rq_p
->audit
[1] |= FP_REMREQUEST
;
2457 if (rq_p
->devtype
== SHRT2DEVPTR(index
)->dev_type
) {
2458 rq_p
->devindex
= SHRT2LONG(index
);
2459 rv
= send_to_crypto_device(rq_p
);
2461 rq_p
->requestsent
= jiffies
;
2462 rq_p
->audit
[0] |= FP_SENT
;
2463 list_add_tail(&rq_p
->liste
, &pending_list
);
2465 rq_p
->audit
[0] |= FP_PENDING
;
2468 case REC_OPERAND_INV
:
2469 case REC_OPERAND_SIZE
:
2471 case REC_INVALID_PAD
:
2472 rq_p
->retcode
= -EINVAL
;
2476 case REC_NO_RESPONSE
:
2478 if (z90crypt
.mask
.st_count
> 1)
2482 rq_p
->retcode
= -ENODEV
;
2485 rq_p
->status
[0] |= STAT_FAILED
;
2486 rq_p
->audit
[1] |= FP_AWAKENING
;
2487 atomic_set(&rq_p
->alarmrung
, 1);
2488 wake_up(&rq_p
->waitq
);
2491 if (z90crypt
.mask
.st_count
> 1)
2492 rq_p
->retcode
= -ERESTARTSYS
;
2494 rq_p
->retcode
= -ENODEV
;
2495 rq_p
->status
[0] |= STAT_FAILED
;
2496 rq_p
->audit
[1] |= FP_AWAKENING
;
2497 atomic_set(&rq_p
->alarmrung
, 1);
2498 wake_up(&rq_p
->waitq
);
2503 helper_handle_work_element(int index
, unsigned char psmid
[8], int rc
,
2504 int buff_len
, unsigned char *buff
,
2505 unsigned char __user
*resp_addr
)
2507 struct work_element
*pq_p
;
2508 struct list_head
*lptr
, *tptr
;
2511 list_for_each_safe(lptr
, tptr
, &pending_list
) {
2512 pq_p
= list_entry(lptr
, struct work_element
, liste
);
2513 if (!memcmp(pq_p
->caller_id
, psmid
, sizeof(pq_p
->caller_id
))) {
2514 list_del_init(lptr
);
2516 pq_p
->audit
[1] |= FP_NOTPENDING
;
2523 PRINTK("device %d has work but no caller exists on pending Q\n",
2530 pq_p
->resp_buff_size
= buff_len
;
2531 pq_p
->audit
[1] |= FP_RESPSIZESET
;
2533 pq_p
->resp_addr
= resp_addr
;
2534 pq_p
->audit
[1] |= FP_RESPADDRCOPIED
;
2535 memcpy(pq_p
->resp_buff
, buff
, buff_len
);
2536 pq_p
->audit
[1] |= FP_RESPBUFFCOPIED
;
2539 case REC_OPERAND_INV
:
2540 case REC_OPERAND_SIZE
:
2542 case REC_INVALID_PAD
:
2543 PDEBUG("-EINVAL after application error %d\n", rc
);
2544 pq_p
->retcode
= -EINVAL
;
2545 pq_p
->status
[0] |= STAT_FAILED
;
2548 pq_p
->retcode
= -ERESTARTSYS
;
2549 pq_p
->status
[0] |= STAT_FAILED
;
2551 case REC_NO_RESPONSE
:
2553 if (z90crypt
.mask
.st_count
> 1)
2554 pq_p
->retcode
= -ERESTARTSYS
;
2556 pq_p
->retcode
= -ENODEV
;
2557 pq_p
->status
[0] |= STAT_FAILED
;
2560 if ((pq_p
->status
[0] != STAT_FAILED
) || (pq_p
->retcode
!= -ERELEASED
)) {
2561 pq_p
->audit
[1] |= FP_AWAKENING
;
2562 atomic_set(&pq_p
->alarmrung
, 1);
2563 wake_up(&pq_p
->waitq
);
2568 * return TRUE if the work element should be removed from the queue
2571 helper_receive_rc(int index
, int *rc_p
)
2575 case REC_OPERAND_INV
:
2576 case REC_OPERAND_SIZE
:
2578 case REC_INVALID_PAD
:
2586 case REC_FATAL_ERROR
:
2589 case REC_NO_RESPONSE
:
2593 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2594 *rc_p
, SHRT2LONG(index
));
2595 *rc_p
= REC_NO_RESPONSE
;
2602 z90crypt_schedule_reader_timer(void)
2604 if (timer_pending(&reader_timer
))
2606 if (mod_timer(&reader_timer
, jiffies
+(READERTIME
*HZ
/1000)) != 0)
2607 PRINTK("Timer pending while modifying reader timer\n");
2611 z90crypt_reader_task(unsigned long ptr
)
2613 int workavail
, index
, rc
, buff_len
;
2614 unsigned char psmid
[8];
2615 unsigned char __user
*resp_addr
;
2616 static unsigned char buff
[1024];
2619 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2620 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2621 * loop, there is no work remaining on the queues.
2629 spin_lock_irq(&queuespinlock
);
2630 memset(buff
, 0x00, sizeof(buff
));
2632 /* Dequeue once from each device in round robin. */
2633 for (index
= 0; index
< z90crypt
.mask
.st_count
; index
++) {
2634 PDEBUG("About to receive.\n");
2635 rc
= receive_from_crypto_device(SHRT2LONG(index
),
2640 PDEBUG("Dequeued: rc = %d.\n", rc
);
2642 if (helper_receive_rc(index
, &rc
)) {
2643 if (rc
!= REC_NO_RESPONSE
) {
2644 helper_send_work(index
);
2648 helper_handle_work_element(index
, psmid
, rc
,
2653 if (rc
== REC_FATAL_ERROR
)
2654 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2657 spin_unlock_irq(&queuespinlock
);
2660 if (pendingq_count
+ requestq_count
)
2661 z90crypt_schedule_reader_timer();
2665 z90crypt_schedule_config_task(unsigned int expiration
)
2667 if (timer_pending(&config_timer
))
2669 if (mod_timer(&config_timer
, jiffies
+(expiration
*HZ
)) != 0)
2670 PRINTK("Timer pending while modifying config timer\n");
2674 z90crypt_config_task(unsigned long ptr
)
2678 PDEBUG("jiffies %ld\n", jiffies
);
2680 if ((rc
= refresh_z90crypt(&z90crypt
.cdx
)))
2681 PRINTK("Error %d detected in refresh_z90crypt.\n", rc
);
2682 /* If return was fatal, don't bother reconfiguring */
2683 if ((rc
!= TSQ_FATAL_ERROR
) && (rc
!= RSQ_FATAL_ERROR
))
2684 z90crypt_schedule_config_task(CONFIGTIME
);
2688 z90crypt_schedule_cleanup_task(void)
2690 if (timer_pending(&cleanup_timer
))
2692 if (mod_timer(&cleanup_timer
, jiffies
+(CLEANUPTIME
*HZ
)) != 0)
2693 PRINTK("Timer pending while modifying cleanup timer\n");
2697 helper_drain_queues(void)
2699 struct work_element
*pq_p
;
2700 struct list_head
*lptr
, *tptr
;
2702 list_for_each_safe(lptr
, tptr
, &pending_list
) {
2703 pq_p
= list_entry(lptr
, struct work_element
, liste
);
2704 pq_p
->retcode
= -ENODEV
;
2705 pq_p
->status
[0] |= STAT_FAILED
;
2706 unbuild_caller(LONG2DEVPTR(pq_p
->devindex
),
2707 (struct caller
*)pq_p
->requestptr
);
2708 list_del_init(lptr
);
2710 pq_p
->audit
[1] |= FP_NOTPENDING
;
2711 pq_p
->audit
[1] |= FP_AWAKENING
;
2712 atomic_set(&pq_p
->alarmrung
, 1);
2713 wake_up(&pq_p
->waitq
);
2716 list_for_each_safe(lptr
, tptr
, &request_list
) {
2717 pq_p
= list_entry(lptr
, struct work_element
, liste
);
2718 pq_p
->retcode
= -ENODEV
;
2719 pq_p
->status
[0] |= STAT_FAILED
;
2720 list_del_init(lptr
);
2722 pq_p
->audit
[1] |= FP_REMREQUEST
;
2723 pq_p
->audit
[1] |= FP_AWAKENING
;
2724 atomic_set(&pq_p
->alarmrung
, 1);
2725 wake_up(&pq_p
->waitq
);
2730 helper_timeout_requests(void)
2732 struct work_element
*pq_p
;
2733 struct list_head
*lptr
, *tptr
;
2736 timelimit
= jiffies
- (CLEANUPTIME
* HZ
);
2737 /* The list is in strict chronological order */
2738 list_for_each_safe(lptr
, tptr
, &pending_list
) {
2739 pq_p
= list_entry(lptr
, struct work_element
, liste
);
2740 if (pq_p
->requestsent
>= timelimit
)
2742 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2743 ((struct caller
*)pq_p
->requestptr
)->caller_id
[0],
2744 ((struct caller
*)pq_p
->requestptr
)->caller_id
[1],
2745 ((struct caller
*)pq_p
->requestptr
)->caller_id
[2],
2746 ((struct caller
*)pq_p
->requestptr
)->caller_id
[3],
2747 ((struct caller
*)pq_p
->requestptr
)->caller_id
[4],
2748 ((struct caller
*)pq_p
->requestptr
)->caller_id
[5],
2749 ((struct caller
*)pq_p
->requestptr
)->caller_id
[6],
2750 ((struct caller
*)pq_p
->requestptr
)->caller_id
[7]);
2751 pq_p
->retcode
= -ETIMEOUT
;
2752 pq_p
->status
[0] |= STAT_FAILED
;
2753 /* get this off any caller queue it may be on */
2754 unbuild_caller(LONG2DEVPTR(pq_p
->devindex
),
2755 (struct caller
*) pq_p
->requestptr
);
2756 list_del_init(lptr
);
2758 pq_p
->audit
[1] |= FP_TIMEDOUT
;
2759 pq_p
->audit
[1] |= FP_NOTPENDING
;
2760 pq_p
->audit
[1] |= FP_AWAKENING
;
2761 atomic_set(&pq_p
->alarmrung
, 1);
2762 wake_up(&pq_p
->waitq
);
2766 * If pending count is zero, items left on the request queue may
2767 * never be processed.
2769 if (pendingq_count
<= 0) {
2770 list_for_each_safe(lptr
, tptr
, &request_list
) {
2771 pq_p
= list_entry(lptr
, struct work_element
, liste
);
2772 if (pq_p
->requestsent
>= timelimit
)
2774 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2775 ((struct caller
*)pq_p
->requestptr
)->caller_id
[0],
2776 ((struct caller
*)pq_p
->requestptr
)->caller_id
[1],
2777 ((struct caller
*)pq_p
->requestptr
)->caller_id
[2],
2778 ((struct caller
*)pq_p
->requestptr
)->caller_id
[3],
2779 ((struct caller
*)pq_p
->requestptr
)->caller_id
[4],
2780 ((struct caller
*)pq_p
->requestptr
)->caller_id
[5],
2781 ((struct caller
*)pq_p
->requestptr
)->caller_id
[6],
2782 ((struct caller
*)pq_p
->requestptr
)->caller_id
[7]);
2783 pq_p
->retcode
= -ETIMEOUT
;
2784 pq_p
->status
[0] |= STAT_FAILED
;
2785 list_del_init(lptr
);
2787 pq_p
->audit
[1] |= FP_TIMEDOUT
;
2788 pq_p
->audit
[1] |= FP_REMREQUEST
;
2789 pq_p
->audit
[1] |= FP_AWAKENING
;
2790 atomic_set(&pq_p
->alarmrung
, 1);
2791 wake_up(&pq_p
->waitq
);
2797 z90crypt_cleanup_task(unsigned long ptr
)
2799 PDEBUG("jiffies %ld\n", jiffies
);
2800 spin_lock_irq(&queuespinlock
);
2801 if (z90crypt
.mask
.st_count
<= 0) // no devices!
2802 helper_drain_queues();
2804 helper_timeout_requests();
2805 spin_unlock_irq(&queuespinlock
);
2806 z90crypt_schedule_cleanup_task();
2810 z90crypt_schedule_reader_task(unsigned long ptr
)
2812 tasklet_schedule(&reader_tasklet
);
2816 * Lowlevel Functions:
2818 * create_z90crypt: creates and initializes basic data structures
2819 * refresh_z90crypt: re-initializes basic data structures
2820 * find_crypto_devices: returns a count and mask of hardware status
2821 * create_crypto_device: builds the descriptor for a device
2822 * destroy_crypto_device: unallocates the descriptor for a device
2823 * destroy_z90crypt: drains all work, unallocates structs
2827 * build the z90crypt root structure using the given domain index
2830 create_z90crypt(int *cdx_p
)
2832 struct hdware_block
*hdware_blk_p
;
2834 memset(&z90crypt
, 0x00, sizeof(struct z90crypt
));
2835 z90crypt
.domain_established
= 0;
2836 z90crypt
.len
= sizeof(struct z90crypt
);
2837 z90crypt
.max_count
= Z90CRYPT_NUM_DEVS
;
2838 z90crypt
.cdx
= *cdx_p
;
2840 hdware_blk_p
= (struct hdware_block
*)
2841 kmalloc(sizeof(struct hdware_block
), GFP_ATOMIC
);
2842 if (!hdware_blk_p
) {
2843 PDEBUG("kmalloc for hardware block failed\n");
2846 memset(hdware_blk_p
, 0x00, sizeof(struct hdware_block
));
2847 z90crypt
.hdware_info
= hdware_blk_p
;
2853 helper_scan_devices(int cdx_array
[16], int *cdx_p
, int *correct_cdx_found
)
2855 enum hdstat hd_stat
;
2856 int q_depth
, dev_type
;
2857 int indx
, chkdom
, numdomains
;
2859 q_depth
= dev_type
= numdomains
= 0;
2860 for (chkdom
= 0; chkdom
<= 15; cdx_array
[chkdom
++] = -1);
2861 for (indx
= 0; indx
< z90crypt
.max_count
; indx
++) {
2862 hd_stat
= HD_NOT_THERE
;
2864 for (chkdom
= 0; chkdom
<= 15; chkdom
++) {
2865 hd_stat
= query_online(indx
, chkdom
, MAX_RESET
,
2866 &q_depth
, &dev_type
);
2867 if (hd_stat
== HD_TSQ_EXCEPTION
) {
2868 z90crypt
.terminating
= 1;
2869 PRINTKC("exception taken!\n");
2872 if (hd_stat
== HD_ONLINE
) {
2873 cdx_array
[numdomains
++] = chkdom
;
2874 if (*cdx_p
== chkdom
) {
2875 *correct_cdx_found
= 1;
2880 if ((*correct_cdx_found
== 1) || (numdomains
!= 0))
2882 if (z90crypt
.terminating
)
2889 probe_crypto_domain(int *cdx_p
)
2892 char cdx_array_text
[53], temp
[5];
2893 int correct_cdx_found
, numdomains
;
2895 correct_cdx_found
= 0;
2896 numdomains
= helper_scan_devices(cdx_array
, cdx_p
, &correct_cdx_found
);
2898 if (z90crypt
.terminating
)
2899 return TSQ_FATAL_ERROR
;
2901 if (correct_cdx_found
)
2904 if (numdomains
== 0) {
2905 PRINTKW("Unable to find crypto domain: No devices found\n");
2906 return Z90C_NO_DEVICES
;
2909 if (numdomains
== 1) {
2911 *cdx_p
= cdx_array
[0];
2914 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2915 *cdx_p
, cdx_array
[0]);
2916 return Z90C_INCORRECT_DOMAIN
;
2920 sprintf(cdx_array_text
, "%d", cdx_array
[numdomains
]);
2921 while (numdomains
) {
2923 sprintf(temp
, ", %d", cdx_array
[numdomains
]);
2924 strcat(cdx_array_text
, temp
);
2927 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2928 *cdx_p
, cdx_array_text
);
2929 return Z90C_AMBIGUOUS_DOMAIN
;
2933 refresh_z90crypt(int *cdx_p
)
2936 static struct status local_mask
;
2937 struct device
*devPtr
;
2938 unsigned char oldStat
, newStat
;
2939 int return_unchanged
;
2941 if (z90crypt
.len
!= sizeof(z90crypt
))
2943 if (z90crypt
.terminating
)
2944 return TSQ_FATAL_ERROR
;
2946 if (!z90crypt
.hdware_info
->hdware_mask
.st_count
&&
2947 !z90crypt
.domain_established
) {
2948 rv
= probe_crypto_domain(cdx_p
);
2949 if (z90crypt
.terminating
)
2950 return TSQ_FATAL_ERROR
;
2951 if (rv
== Z90C_NO_DEVICES
)
2952 return 0; // try later
2955 z90crypt
.cdx
= *cdx_p
;
2956 z90crypt
.domain_established
= 1;
2958 rv
= find_crypto_devices(&local_mask
);
2960 PRINTK("find crypto devices returned %d\n", rv
);
2963 if (!memcmp(&local_mask
, &z90crypt
.hdware_info
->hdware_mask
,
2964 sizeof(struct status
))) {
2965 return_unchanged
= 1;
2966 for (i
= 0; i
< Z90CRYPT_NUM_TYPES
; i
++) {
2968 * Check for disabled cards. If any device is marked
2969 * disabled, destroy it.
2972 j
< z90crypt
.hdware_info
->type_mask
[i
].st_count
;
2974 indx
= z90crypt
.hdware_info
->type_x_addr
[i
].
2976 devPtr
= z90crypt
.device_p
[indx
];
2977 if (devPtr
&& devPtr
->disabled
) {
2978 local_mask
.st_mask
[indx
] = HD_NOT_THERE
;
2979 return_unchanged
= 0;
2983 if (return_unchanged
== 1)
2987 spin_lock_irq(&queuespinlock
);
2988 for (i
= 0; i
< z90crypt
.max_count
; i
++) {
2989 oldStat
= z90crypt
.hdware_info
->hdware_mask
.st_mask
[i
];
2990 newStat
= local_mask
.st_mask
[i
];
2991 if ((oldStat
== HD_ONLINE
) && (newStat
!= HD_ONLINE
))
2992 destroy_crypto_device(i
);
2993 else if ((oldStat
!= HD_ONLINE
) && (newStat
== HD_ONLINE
)) {
2994 rv
= create_crypto_device(i
);
2995 if (rv
>= REC_FATAL_ERROR
)
2998 local_mask
.st_mask
[i
] = HD_NOT_THERE
;
2999 local_mask
.st_count
--;
3003 memcpy(z90crypt
.hdware_info
->hdware_mask
.st_mask
, local_mask
.st_mask
,
3004 sizeof(local_mask
.st_mask
));
3005 z90crypt
.hdware_info
->hdware_mask
.st_count
= local_mask
.st_count
;
3006 z90crypt
.hdware_info
->hdware_mask
.disabled_count
=
3007 local_mask
.disabled_count
;
3008 refresh_index_array(&z90crypt
.mask
, &z90crypt
.overall_device_x
);
3009 for (i
= 0; i
< Z90CRYPT_NUM_TYPES
; i
++)
3010 refresh_index_array(&(z90crypt
.hdware_info
->type_mask
[i
]),
3011 &(z90crypt
.hdware_info
->type_x_addr
[i
]));
3012 spin_unlock_irq(&queuespinlock
);
3018 find_crypto_devices(struct status
*deviceMask
)
3020 int i
, q_depth
, dev_type
;
3021 enum hdstat hd_stat
;
3023 deviceMask
->st_count
= 0;
3024 deviceMask
->disabled_count
= 0;
3025 deviceMask
->user_disabled_count
= 0;
3027 for (i
= 0; i
< z90crypt
.max_count
; i
++) {
3028 hd_stat
= query_online(i
, z90crypt
.cdx
, MAX_RESET
, &q_depth
,
3030 if (hd_stat
== HD_TSQ_EXCEPTION
) {
3031 z90crypt
.terminating
= 1;
3032 PRINTKC("Exception during probe for crypto devices\n");
3033 return TSQ_FATAL_ERROR
;
3035 deviceMask
->st_mask
[i
] = hd_stat
;
3036 if (hd_stat
== HD_ONLINE
) {
3037 PDEBUG("Got an online crypto!: %d\n", i
);
3038 PDEBUG("Got a queue depth of %d\n", q_depth
);
3039 PDEBUG("Got a device type of %d\n", dev_type
);
3041 return TSQ_FATAL_ERROR
;
3042 deviceMask
->st_count
++;
3043 z90crypt
.q_depth_array
[i
] = q_depth
;
3044 z90crypt
.dev_type_array
[i
] = dev_type
;
3052 refresh_index_array(struct status
*status_str
, struct device_x
*index_array
)
3060 stat
= status_str
->st_mask
[++i
];
3061 if (stat
== DEV_ONLINE
)
3062 index_array
->device_index
[count
++] = i
;
3063 } while ((i
< Z90CRYPT_NUM_DEVS
) && (count
< status_str
->st_count
));
3069 create_crypto_device(int index
)
3071 int rv
, devstat
, total_size
;
3072 struct device
*dev_ptr
;
3073 struct status
*type_str_p
;
3076 dev_ptr
= z90crypt
.device_p
[index
];
3078 total_size
= sizeof(struct device
) +
3079 z90crypt
.q_depth_array
[index
] * sizeof(int);
3081 dev_ptr
= (struct device
*) kmalloc(total_size
, GFP_ATOMIC
);
3083 PRINTK("kmalloc device %d failed\n", index
);
3086 memset(dev_ptr
, 0, total_size
);
3087 dev_ptr
->dev_resp_p
= kmalloc(MAX_RESPONSE_SIZE
, GFP_ATOMIC
);
3088 if (!dev_ptr
->dev_resp_p
) {
3090 PRINTK("kmalloc device %d rec buffer failed\n", index
);
3093 dev_ptr
->dev_resp_l
= MAX_RESPONSE_SIZE
;
3094 INIT_LIST_HEAD(&(dev_ptr
->dev_caller_list
));
3097 devstat
= reset_device(index
, z90crypt
.cdx
, MAX_RESET
);
3098 if (devstat
== DEV_RSQ_EXCEPTION
) {
3099 PRINTK("exception during reset device %d\n", index
);
3100 kfree(dev_ptr
->dev_resp_p
);
3102 return RSQ_FATAL_ERROR
;
3104 if (devstat
== DEV_ONLINE
) {
3105 dev_ptr
->dev_self_x
= index
;
3106 dev_ptr
->dev_type
= z90crypt
.dev_type_array
[index
];
3107 if (dev_ptr
->dev_type
== NILDEV
) {
3108 rv
= probe_device_type(dev_ptr
);
3110 PRINTK("rv = %d from probe_device_type %d\n",
3112 kfree(dev_ptr
->dev_resp_p
);
3117 if (dev_ptr
->dev_type
== PCIXCC_UNK
) {
3118 rv
= probe_PCIXCC_type(dev_ptr
);
3120 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3122 kfree(dev_ptr
->dev_resp_p
);
3127 deviceType
= dev_ptr
->dev_type
;
3128 z90crypt
.dev_type_array
[index
] = deviceType
;
3129 if (deviceType
== PCICA
)
3130 z90crypt
.hdware_info
->device_type_array
[index
] = 1;
3131 else if (deviceType
== PCICC
)
3132 z90crypt
.hdware_info
->device_type_array
[index
] = 2;
3133 else if (deviceType
== PCIXCC_MCL2
)
3134 z90crypt
.hdware_info
->device_type_array
[index
] = 3;
3135 else if (deviceType
== PCIXCC_MCL3
)
3136 z90crypt
.hdware_info
->device_type_array
[index
] = 4;
3137 else if (deviceType
== CEX2C
)
3138 z90crypt
.hdware_info
->device_type_array
[index
] = 5;
3140 z90crypt
.hdware_info
->device_type_array
[index
] = -1;
3144 * 'q_depth' returned by the hardware is one less than
3147 dev_ptr
->dev_q_depth
= z90crypt
.q_depth_array
[index
];
3148 dev_ptr
->dev_type
= z90crypt
.dev_type_array
[index
];
3149 dev_ptr
->dev_stat
= devstat
;
3150 dev_ptr
->disabled
= 0;
3151 z90crypt
.device_p
[index
] = dev_ptr
;
3153 if (devstat
== DEV_ONLINE
) {
3154 if (z90crypt
.mask
.st_mask
[index
] != DEV_ONLINE
) {
3155 z90crypt
.mask
.st_mask
[index
] = DEV_ONLINE
;
3156 z90crypt
.mask
.st_count
++;
3158 deviceType
= dev_ptr
->dev_type
;
3159 type_str_p
= &z90crypt
.hdware_info
->type_mask
[deviceType
];
3160 if (type_str_p
->st_mask
[index
] != DEV_ONLINE
) {
3161 type_str_p
->st_mask
[index
] = DEV_ONLINE
;
3162 type_str_p
->st_count
++;
3170 destroy_crypto_device(int index
)
3172 struct device
*dev_ptr
;
3173 int t
, disabledFlag
;
3175 dev_ptr
= z90crypt
.device_p
[index
];
3177 /* remember device type; get rid of device struct */
3179 disabledFlag
= dev_ptr
->disabled
;
3180 t
= dev_ptr
->dev_type
;
3181 if (dev_ptr
->dev_resp_p
)
3182 kfree(dev_ptr
->dev_resp_p
);
3188 z90crypt
.device_p
[index
] = 0;
3190 /* if the type is valid, remove the device from the type_mask */
3191 if ((t
!= -1) && z90crypt
.hdware_info
->type_mask
[t
].st_mask
[index
]) {
3192 z90crypt
.hdware_info
->type_mask
[t
].st_mask
[index
] = 0x00;
3193 z90crypt
.hdware_info
->type_mask
[t
].st_count
--;
3194 if (disabledFlag
== 1)
3195 z90crypt
.hdware_info
->type_mask
[t
].disabled_count
--;
3197 if (z90crypt
.mask
.st_mask
[index
] != DEV_GONE
) {
3198 z90crypt
.mask
.st_mask
[index
] = DEV_GONE
;
3199 z90crypt
.mask
.st_count
--;
3201 z90crypt
.hdware_info
->device_type_array
[index
] = 0;
3207 destroy_z90crypt(void)
3210 for (i
= 0; i
< z90crypt
.max_count
; i
++)
3211 if (z90crypt
.device_p
[i
])
3212 destroy_crypto_device(i
);
3213 if (z90crypt
.hdware_info
)
3214 kfree((void *)z90crypt
.hdware_info
);
3215 memset((void *)&z90crypt
, 0, sizeof(z90crypt
));
3218 static unsigned char static_testmsg
[384] = {
3219 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
3220 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
3221 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
3222 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
3223 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3224 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3225 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
3226 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3227 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3228 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3229 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3230 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3231 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
3232 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
3233 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
3234 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
3235 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
3236 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
3237 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
3238 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
3239 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
3240 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
3241 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
3242 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3246 probe_device_type(struct device
*devPtr
)
3248 int rv
, dv
, i
, index
, length
;
3249 unsigned char psmid
[8];
3250 static unsigned char loc_testmsg
[sizeof(static_testmsg
)];
3252 index
= devPtr
->dev_self_x
;
3255 memcpy(loc_testmsg
, static_testmsg
, sizeof(static_testmsg
));
3256 length
= sizeof(static_testmsg
) - 24;
3257 /* the -24 allows for the header */
3258 dv
= send_to_AP(index
, z90crypt
.cdx
, length
, loc_testmsg
);
3260 PDEBUG("dv returned by send during probe: %d\n", dv
);
3261 if (dv
== DEV_SEN_EXCEPTION
) {
3262 rv
= SEN_FATAL_ERROR
;
3263 PRINTKC("exception in send to AP %d\n", index
);
3266 PDEBUG("return value from send_to_AP: %d\n", rv
);
3269 PDEBUG("dev %d not available\n", index
);
3279 rv
= SEN_FATAL_ERROR
;
3281 case DEV_BAD_MESSAGE
:
3282 rv
= SEN_USER_ERROR
;
3284 case DEV_QUEUE_FULL
:
3285 rv
= SEN_QUEUE_FULL
;
3288 PRINTK("unknown dv=%d for dev %d\n", dv
, index
);
3297 for (i
= 0; i
< 6; i
++) {
3299 dv
= receive_from_AP(index
, z90crypt
.cdx
,
3301 devPtr
->dev_resp_p
, psmid
);
3302 PDEBUG("dv returned by DQ = %d\n", dv
);
3303 if (dv
== DEV_REC_EXCEPTION
) {
3304 rv
= REC_FATAL_ERROR
;
3305 PRINTKC("exception in dequeue %d\n",
3319 case DEV_BAD_MESSAGE
:
3322 rv
= REC_NO_RESPONSE
;
3325 if ((rv
!= 0) && (rv
!= REC_NO_WORK
))
3332 rv
= (devPtr
->dev_resp_p
[0] == 0x00) &&
3333 (devPtr
->dev_resp_p
[1] == 0x86);
3335 devPtr
->dev_type
= PCICC
;
3337 devPtr
->dev_type
= PCICA
;
3340 /* In a general error case, the card is not marked online */
3344 static unsigned char MCL3_testmsg
[] = {
3345 0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
3346 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3347 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3348 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3349 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
3350 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
3351 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
3352 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
3353 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3354 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3355 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3356 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3357 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3358 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3359 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3360 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3361 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3362 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3363 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3364 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3365 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
3366 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
3367 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
3368 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
3369 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
3370 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
3371 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
3372 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
3373 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
3374 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
3375 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
3376 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
3377 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
3378 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
3379 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3383 probe_PCIXCC_type(struct device
*devPtr
)
3385 int rv
, dv
, i
, index
, length
;
3386 unsigned char psmid
[8];
3387 static unsigned char loc_testmsg
[548];
3388 struct CPRBX
*cprbx_p
;
3390 index
= devPtr
->dev_self_x
;
3393 memcpy(loc_testmsg
, MCL3_testmsg
, sizeof(MCL3_testmsg
));
3394 length
= sizeof(MCL3_testmsg
) - 0x0C;
3395 dv
= send_to_AP(index
, z90crypt
.cdx
, length
, loc_testmsg
);
3397 PDEBUG("dv returned = %d\n", dv
);
3398 if (dv
== DEV_SEN_EXCEPTION
) {
3399 rv
= SEN_FATAL_ERROR
;
3400 PRINTKC("exception in send to AP %d\n", index
);
3403 PDEBUG("return value from send_to_AP: %d\n", rv
);
3406 PDEBUG("dev %d not available\n", index
);
3416 rv
= SEN_FATAL_ERROR
;
3418 case DEV_BAD_MESSAGE
:
3419 rv
= SEN_USER_ERROR
;
3421 case DEV_QUEUE_FULL
:
3422 rv
= SEN_QUEUE_FULL
;
3425 PRINTK("unknown dv=%d for dev %d\n", dv
, index
);
3434 for (i
= 0; i
< 6; i
++) {
3436 dv
= receive_from_AP(index
, z90crypt
.cdx
,
3438 devPtr
->dev_resp_p
, psmid
);
3439 PDEBUG("dv returned by DQ = %d\n", dv
);
3440 if (dv
== DEV_REC_EXCEPTION
) {
3441 rv
= REC_FATAL_ERROR
;
3442 PRINTKC("exception in dequeue %d\n",
3456 case DEV_BAD_MESSAGE
:
3459 rv
= REC_NO_RESPONSE
;
3462 if ((rv
!= 0) && (rv
!= REC_NO_WORK
))
3469 cprbx_p
= (struct CPRBX
*) (devPtr
->dev_resp_p
+ 48);
3470 if ((cprbx_p
->ccp_rtcode
== 8) && (cprbx_p
->ccp_rscode
== 33)) {
3471 devPtr
->dev_type
= PCIXCC_MCL2
;
3472 PDEBUG("device %d is MCL2\n", index
);
3474 devPtr
->dev_type
= PCIXCC_MCL3
;
3475 PDEBUG("device %d is MCL3\n", index
);
3478 /* In a general error case, the card is not marked online */
3482 #ifdef Z90CRYPT_USE_HOTPLUG
3484 z90crypt_hotplug_event(int dev_major
, int dev_minor
, int action
)
3486 #ifdef CONFIG_HOTPLUG
3492 sprintf(major
, "MAJOR=%d", dev_major
);
3493 sprintf(minor
, "MINOR=%d", dev_minor
);
3495 argv
[0] = hotplug_path
;
3496 argv
[1] = "z90crypt";
3500 envp
[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
3503 case Z90CRYPT_HOTPLUG_ADD
:
3504 envp
[2] = "ACTION=add";
3506 case Z90CRYPT_HOTPLUG_REMOVE
:
3507 envp
[2] = "ACTION=remove";
3517 call_usermodehelper(argv
[0], argv
, envp
, 0);
3522 module_init(z90crypt_init_module
);
3523 module_exit(z90crypt_cleanup_module
);