mm, vmalloc: remove useless variable in vmap_block
[linux/fpc-iii.git] / drivers / s390 / crypto / zcrypt_api.c
blob31cfaa556072c0154373d0d3a7fbccb0b54a30e9
1 /*
2 * zcrypt 2.1.0
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/miscdevice.h>
33 #include <linux/fs.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/compat.h>
37 #include <linux/slab.h>
38 #include <linux/atomic.h>
39 #include <asm/uaccess.h>
40 #include <linux/hw_random.h>
41 #include <linux/debugfs.h>
42 #include <asm/debug.h>
44 #include "zcrypt_debug.h"
45 #include "zcrypt_api.h"
48 * Module description.
50 MODULE_AUTHOR("IBM Corporation");
51 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
52 "Copyright IBM Corp. 2001, 2012");
53 MODULE_LICENSE("GPL");
55 static DEFINE_SPINLOCK(zcrypt_device_lock);
56 static LIST_HEAD(zcrypt_device_list);
57 static int zcrypt_device_count = 0;
58 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
59 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
61 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
62 EXPORT_SYMBOL(zcrypt_rescan_req);
64 static int zcrypt_rng_device_add(void);
65 static void zcrypt_rng_device_remove(void);
67 static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
68 static LIST_HEAD(zcrypt_ops_list);
70 static debug_info_t *zcrypt_dbf_common;
71 static debug_info_t *zcrypt_dbf_devices;
72 static struct dentry *debugfs_root;
75 * Device attributes common for all crypto devices.
77 static ssize_t zcrypt_type_show(struct device *dev,
78 struct device_attribute *attr, char *buf)
80 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
81 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
84 static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
86 static ssize_t zcrypt_online_show(struct device *dev,
87 struct device_attribute *attr, char *buf)
89 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
90 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
93 static ssize_t zcrypt_online_store(struct device *dev,
94 struct device_attribute *attr,
95 const char *buf, size_t count)
97 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
98 int online;
100 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
101 return -EINVAL;
102 zdev->online = online;
103 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
104 zdev->online);
105 if (!online)
106 ap_flush_queue(zdev->ap_dev);
107 return count;
110 static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
112 static struct attribute * zcrypt_device_attrs[] = {
113 &dev_attr_type.attr,
114 &dev_attr_online.attr,
115 NULL,
118 static struct attribute_group zcrypt_device_attr_group = {
119 .attrs = zcrypt_device_attrs,
123 * Process a rescan of the transport layer.
125 * Returns 1, if the rescan has been processed, otherwise 0.
127 static inline int zcrypt_process_rescan(void)
129 if (atomic_read(&zcrypt_rescan_req)) {
130 atomic_set(&zcrypt_rescan_req, 0);
131 atomic_inc(&zcrypt_rescan_count);
132 ap_bus_force_rescan();
133 ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
134 atomic_inc_return(&zcrypt_rescan_count));
135 return 1;
137 return 0;
141 * __zcrypt_increase_preference(): Increase preference of a crypto device.
142 * @zdev: Pointer the crypto device
144 * Move the device towards the head of the device list.
145 * Need to be called while holding the zcrypt device list lock.
146 * Note: cards with speed_rating of 0 are kept at the end of the list.
148 static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
150 struct zcrypt_device *tmp;
151 struct list_head *l;
153 if (zdev->speed_rating == 0)
154 return;
155 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
156 tmp = list_entry(l, struct zcrypt_device, list);
157 if ((tmp->request_count + 1) * tmp->speed_rating <=
158 (zdev->request_count + 1) * zdev->speed_rating &&
159 tmp->speed_rating != 0)
160 break;
162 if (l == zdev->list.prev)
163 return;
164 /* Move zdev behind l */
165 list_move(&zdev->list, l);
169 * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
170 * @zdev: Pointer to a crypto device.
172 * Move the device towards the tail of the device list.
173 * Need to be called while holding the zcrypt device list lock.
174 * Note: cards with speed_rating of 0 are kept at the end of the list.
176 static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
178 struct zcrypt_device *tmp;
179 struct list_head *l;
181 if (zdev->speed_rating == 0)
182 return;
183 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
184 tmp = list_entry(l, struct zcrypt_device, list);
185 if ((tmp->request_count + 1) * tmp->speed_rating >
186 (zdev->request_count + 1) * zdev->speed_rating ||
187 tmp->speed_rating == 0)
188 break;
190 if (l == zdev->list.next)
191 return;
192 /* Move zdev before l */
193 list_move_tail(&zdev->list, l);
196 static void zcrypt_device_release(struct kref *kref)
198 struct zcrypt_device *zdev =
199 container_of(kref, struct zcrypt_device, refcount);
200 zcrypt_device_free(zdev);
203 void zcrypt_device_get(struct zcrypt_device *zdev)
205 kref_get(&zdev->refcount);
207 EXPORT_SYMBOL(zcrypt_device_get);
209 int zcrypt_device_put(struct zcrypt_device *zdev)
211 return kref_put(&zdev->refcount, zcrypt_device_release);
213 EXPORT_SYMBOL(zcrypt_device_put);
215 struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
217 struct zcrypt_device *zdev;
219 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
220 if (!zdev)
221 return NULL;
222 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
223 if (!zdev->reply.message)
224 goto out_free;
225 zdev->reply.length = max_response_size;
226 spin_lock_init(&zdev->lock);
227 INIT_LIST_HEAD(&zdev->list);
228 zdev->dbf_area = zcrypt_dbf_devices;
229 return zdev;
231 out_free:
232 kfree(zdev);
233 return NULL;
235 EXPORT_SYMBOL(zcrypt_device_alloc);
237 void zcrypt_device_free(struct zcrypt_device *zdev)
239 kfree(zdev->reply.message);
240 kfree(zdev);
242 EXPORT_SYMBOL(zcrypt_device_free);
245 * zcrypt_device_register() - Register a crypto device.
246 * @zdev: Pointer to a crypto device
248 * Register a crypto device. Returns 0 if successful.
250 int zcrypt_device_register(struct zcrypt_device *zdev)
252 int rc;
254 if (!zdev->ops)
255 return -ENODEV;
256 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
257 &zcrypt_device_attr_group);
258 if (rc)
259 goto out;
260 get_device(&zdev->ap_dev->device);
261 kref_init(&zdev->refcount);
262 spin_lock_bh(&zcrypt_device_lock);
263 zdev->online = 1; /* New devices are online by default. */
264 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
265 zdev->online);
266 list_add_tail(&zdev->list, &zcrypt_device_list);
267 __zcrypt_increase_preference(zdev);
268 zcrypt_device_count++;
269 spin_unlock_bh(&zcrypt_device_lock);
270 if (zdev->ops->rng) {
271 rc = zcrypt_rng_device_add();
272 if (rc)
273 goto out_unregister;
275 return 0;
277 out_unregister:
278 spin_lock_bh(&zcrypt_device_lock);
279 zcrypt_device_count--;
280 list_del_init(&zdev->list);
281 spin_unlock_bh(&zcrypt_device_lock);
282 sysfs_remove_group(&zdev->ap_dev->device.kobj,
283 &zcrypt_device_attr_group);
284 put_device(&zdev->ap_dev->device);
285 zcrypt_device_put(zdev);
286 out:
287 return rc;
289 EXPORT_SYMBOL(zcrypt_device_register);
292 * zcrypt_device_unregister(): Unregister a crypto device.
293 * @zdev: Pointer to crypto device
295 * Unregister a crypto device.
297 void zcrypt_device_unregister(struct zcrypt_device *zdev)
299 if (zdev->ops->rng)
300 zcrypt_rng_device_remove();
301 spin_lock_bh(&zcrypt_device_lock);
302 zcrypt_device_count--;
303 list_del_init(&zdev->list);
304 spin_unlock_bh(&zcrypt_device_lock);
305 sysfs_remove_group(&zdev->ap_dev->device.kobj,
306 &zcrypt_device_attr_group);
307 put_device(&zdev->ap_dev->device);
308 zcrypt_device_put(zdev);
310 EXPORT_SYMBOL(zcrypt_device_unregister);
312 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
314 if (zops->owner) {
315 spin_lock_bh(&zcrypt_ops_list_lock);
316 list_add_tail(&zops->list, &zcrypt_ops_list);
317 spin_unlock_bh(&zcrypt_ops_list_lock);
320 EXPORT_SYMBOL(zcrypt_msgtype_register);
322 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
324 spin_lock_bh(&zcrypt_ops_list_lock);
325 list_del_init(&zops->list);
326 spin_unlock_bh(&zcrypt_ops_list_lock);
328 EXPORT_SYMBOL(zcrypt_msgtype_unregister);
330 static inline
331 struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
333 struct zcrypt_ops *zops;
334 int found = 0;
336 spin_lock_bh(&zcrypt_ops_list_lock);
337 list_for_each_entry(zops, &zcrypt_ops_list, list) {
338 if ((zops->variant == variant) &&
339 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) {
340 found = 1;
341 break;
344 spin_unlock_bh(&zcrypt_ops_list_lock);
346 if (!found)
347 return NULL;
348 return zops;
351 struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
353 struct zcrypt_ops *zops = NULL;
355 zops = __ops_lookup(name, variant);
356 if (!zops) {
357 request_module(name);
358 zops = __ops_lookup(name, variant);
360 if ((!zops) || (!try_module_get(zops->owner)))
361 return NULL;
362 return zops;
364 EXPORT_SYMBOL(zcrypt_msgtype_request);
366 void zcrypt_msgtype_release(struct zcrypt_ops *zops)
368 if (zops)
369 module_put(zops->owner);
371 EXPORT_SYMBOL(zcrypt_msgtype_release);
374 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
376 * This function is not supported beyond zcrypt 1.3.1.
378 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
379 size_t count, loff_t *f_pos)
381 return -EPERM;
385 * zcrypt_write(): Not allowed.
387 * Write is is not allowed
389 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
390 size_t count, loff_t *f_pos)
392 return -EPERM;
396 * zcrypt_open(): Count number of users.
398 * Device open function to count number of users.
400 static int zcrypt_open(struct inode *inode, struct file *filp)
402 atomic_inc(&zcrypt_open_count);
403 return nonseekable_open(inode, filp);
407 * zcrypt_release(): Count number of users.
409 * Device close function to count number of users.
411 static int zcrypt_release(struct inode *inode, struct file *filp)
413 atomic_dec(&zcrypt_open_count);
414 return 0;
418 * zcrypt ioctls.
420 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
422 struct zcrypt_device *zdev;
423 int rc;
425 if (mex->outputdatalength < mex->inputdatalength)
426 return -EINVAL;
428 * As long as outputdatalength is big enough, we can set the
429 * outputdatalength equal to the inputdatalength, since that is the
430 * number of bytes we will copy in any case
432 mex->outputdatalength = mex->inputdatalength;
434 spin_lock_bh(&zcrypt_device_lock);
435 list_for_each_entry(zdev, &zcrypt_device_list, list) {
436 if (!zdev->online ||
437 !zdev->ops->rsa_modexpo ||
438 zdev->min_mod_size > mex->inputdatalength ||
439 zdev->max_mod_size < mex->inputdatalength)
440 continue;
441 zcrypt_device_get(zdev);
442 get_device(&zdev->ap_dev->device);
443 zdev->request_count++;
444 __zcrypt_decrease_preference(zdev);
445 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
446 spin_unlock_bh(&zcrypt_device_lock);
447 rc = zdev->ops->rsa_modexpo(zdev, mex);
448 spin_lock_bh(&zcrypt_device_lock);
449 module_put(zdev->ap_dev->drv->driver.owner);
451 else
452 rc = -EAGAIN;
453 zdev->request_count--;
454 __zcrypt_increase_preference(zdev);
455 put_device(&zdev->ap_dev->device);
456 zcrypt_device_put(zdev);
457 spin_unlock_bh(&zcrypt_device_lock);
458 return rc;
460 spin_unlock_bh(&zcrypt_device_lock);
461 return -ENODEV;
464 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
466 struct zcrypt_device *zdev;
467 unsigned long long z1, z2, z3;
468 int rc, copied;
470 if (crt->outputdatalength < crt->inputdatalength ||
471 (crt->inputdatalength & 1))
472 return -EINVAL;
474 * As long as outputdatalength is big enough, we can set the
475 * outputdatalength equal to the inputdatalength, since that is the
476 * number of bytes we will copy in any case
478 crt->outputdatalength = crt->inputdatalength;
480 copied = 0;
481 restart:
482 spin_lock_bh(&zcrypt_device_lock);
483 list_for_each_entry(zdev, &zcrypt_device_list, list) {
484 if (!zdev->online ||
485 !zdev->ops->rsa_modexpo_crt ||
486 zdev->min_mod_size > crt->inputdatalength ||
487 zdev->max_mod_size < crt->inputdatalength)
488 continue;
489 if (zdev->short_crt && crt->inputdatalength > 240) {
491 * Check inputdata for leading zeros for cards
492 * that can't handle np_prime, bp_key, or
493 * u_mult_inv > 128 bytes.
495 if (copied == 0) {
496 unsigned int len;
497 spin_unlock_bh(&zcrypt_device_lock);
498 /* len is max 256 / 2 - 120 = 8
499 * For bigger device just assume len of leading
500 * 0s is 8 as stated in the requirements for
501 * ica_rsa_modexpo_crt struct in zcrypt.h.
503 if (crt->inputdatalength <= 256)
504 len = crt->inputdatalength / 2 - 120;
505 else
506 len = 8;
507 if (len > sizeof(z1))
508 return -EFAULT;
509 z1 = z2 = z3 = 0;
510 if (copy_from_user(&z1, crt->np_prime, len) ||
511 copy_from_user(&z2, crt->bp_key, len) ||
512 copy_from_user(&z3, crt->u_mult_inv, len))
513 return -EFAULT;
514 z1 = z2 = z3 = 0;
515 copied = 1;
517 * We have to restart device lookup -
518 * the device list may have changed by now.
520 goto restart;
522 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
523 /* The device can't handle this request. */
524 continue;
526 zcrypt_device_get(zdev);
527 get_device(&zdev->ap_dev->device);
528 zdev->request_count++;
529 __zcrypt_decrease_preference(zdev);
530 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
531 spin_unlock_bh(&zcrypt_device_lock);
532 rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
533 spin_lock_bh(&zcrypt_device_lock);
534 module_put(zdev->ap_dev->drv->driver.owner);
536 else
537 rc = -EAGAIN;
538 zdev->request_count--;
539 __zcrypt_increase_preference(zdev);
540 put_device(&zdev->ap_dev->device);
541 zcrypt_device_put(zdev);
542 spin_unlock_bh(&zcrypt_device_lock);
543 return rc;
545 spin_unlock_bh(&zcrypt_device_lock);
546 return -ENODEV;
549 static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
551 struct zcrypt_device *zdev;
552 int rc;
554 spin_lock_bh(&zcrypt_device_lock);
555 list_for_each_entry(zdev, &zcrypt_device_list, list) {
556 if (!zdev->online || !zdev->ops->send_cprb ||
557 (xcRB->user_defined != AUTOSELECT &&
558 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
560 continue;
561 zcrypt_device_get(zdev);
562 get_device(&zdev->ap_dev->device);
563 zdev->request_count++;
564 __zcrypt_decrease_preference(zdev);
565 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
566 spin_unlock_bh(&zcrypt_device_lock);
567 rc = zdev->ops->send_cprb(zdev, xcRB);
568 spin_lock_bh(&zcrypt_device_lock);
569 module_put(zdev->ap_dev->drv->driver.owner);
571 else
572 rc = -EAGAIN;
573 zdev->request_count--;
574 __zcrypt_increase_preference(zdev);
575 put_device(&zdev->ap_dev->device);
576 zcrypt_device_put(zdev);
577 spin_unlock_bh(&zcrypt_device_lock);
578 return rc;
580 spin_unlock_bh(&zcrypt_device_lock);
581 return -ENODEV;
584 static long zcrypt_rng(char *buffer)
586 struct zcrypt_device *zdev;
587 int rc;
589 spin_lock_bh(&zcrypt_device_lock);
590 list_for_each_entry(zdev, &zcrypt_device_list, list) {
591 if (!zdev->online || !zdev->ops->rng)
592 continue;
593 zcrypt_device_get(zdev);
594 get_device(&zdev->ap_dev->device);
595 zdev->request_count++;
596 __zcrypt_decrease_preference(zdev);
597 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
598 spin_unlock_bh(&zcrypt_device_lock);
599 rc = zdev->ops->rng(zdev, buffer);
600 spin_lock_bh(&zcrypt_device_lock);
601 module_put(zdev->ap_dev->drv->driver.owner);
602 } else
603 rc = -EAGAIN;
604 zdev->request_count--;
605 __zcrypt_increase_preference(zdev);
606 put_device(&zdev->ap_dev->device);
607 zcrypt_device_put(zdev);
608 spin_unlock_bh(&zcrypt_device_lock);
609 return rc;
611 spin_unlock_bh(&zcrypt_device_lock);
612 return -ENODEV;
615 static void zcrypt_status_mask(char status[AP_DEVICES])
617 struct zcrypt_device *zdev;
619 memset(status, 0, sizeof(char) * AP_DEVICES);
620 spin_lock_bh(&zcrypt_device_lock);
621 list_for_each_entry(zdev, &zcrypt_device_list, list)
622 status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
623 zdev->online ? zdev->user_space_type : 0x0d;
624 spin_unlock_bh(&zcrypt_device_lock);
627 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
629 struct zcrypt_device *zdev;
631 memset(qdepth, 0, sizeof(char) * AP_DEVICES);
632 spin_lock_bh(&zcrypt_device_lock);
633 list_for_each_entry(zdev, &zcrypt_device_list, list) {
634 spin_lock(&zdev->ap_dev->lock);
635 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
636 zdev->ap_dev->pendingq_count +
637 zdev->ap_dev->requestq_count;
638 spin_unlock(&zdev->ap_dev->lock);
640 spin_unlock_bh(&zcrypt_device_lock);
643 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
645 struct zcrypt_device *zdev;
647 memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
648 spin_lock_bh(&zcrypt_device_lock);
649 list_for_each_entry(zdev, &zcrypt_device_list, list) {
650 spin_lock(&zdev->ap_dev->lock);
651 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
652 zdev->ap_dev->total_request_count;
653 spin_unlock(&zdev->ap_dev->lock);
655 spin_unlock_bh(&zcrypt_device_lock);
658 static int zcrypt_pendingq_count(void)
660 struct zcrypt_device *zdev;
661 int pendingq_count = 0;
663 spin_lock_bh(&zcrypt_device_lock);
664 list_for_each_entry(zdev, &zcrypt_device_list, list) {
665 spin_lock(&zdev->ap_dev->lock);
666 pendingq_count += zdev->ap_dev->pendingq_count;
667 spin_unlock(&zdev->ap_dev->lock);
669 spin_unlock_bh(&zcrypt_device_lock);
670 return pendingq_count;
673 static int zcrypt_requestq_count(void)
675 struct zcrypt_device *zdev;
676 int requestq_count = 0;
678 spin_lock_bh(&zcrypt_device_lock);
679 list_for_each_entry(zdev, &zcrypt_device_list, list) {
680 spin_lock(&zdev->ap_dev->lock);
681 requestq_count += zdev->ap_dev->requestq_count;
682 spin_unlock(&zdev->ap_dev->lock);
684 spin_unlock_bh(&zcrypt_device_lock);
685 return requestq_count;
688 static int zcrypt_count_type(int type)
690 struct zcrypt_device *zdev;
691 int device_count = 0;
693 spin_lock_bh(&zcrypt_device_lock);
694 list_for_each_entry(zdev, &zcrypt_device_list, list)
695 if (zdev->user_space_type == type)
696 device_count++;
697 spin_unlock_bh(&zcrypt_device_lock);
698 return device_count;
702 * zcrypt_ica_status(): Old, depracted combi status call.
704 * Old, deprecated combi status call.
706 static long zcrypt_ica_status(struct file *filp, unsigned long arg)
708 struct ica_z90_status *pstat;
709 int ret;
711 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
712 if (!pstat)
713 return -ENOMEM;
714 pstat->totalcount = zcrypt_device_count;
715 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
716 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
717 pstat->requestqWaitCount = zcrypt_requestq_count();
718 pstat->pendingqWaitCount = zcrypt_pendingq_count();
719 pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
720 pstat->cryptoDomain = ap_domain_index;
721 zcrypt_status_mask(pstat->status);
722 zcrypt_qdepth_mask(pstat->qdepth);
723 ret = 0;
724 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
725 ret = -EFAULT;
726 kfree(pstat);
727 return ret;
730 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
731 unsigned long arg)
733 int rc;
735 switch (cmd) {
736 case ICARSAMODEXPO: {
737 struct ica_rsa_modexpo __user *umex = (void __user *) arg;
738 struct ica_rsa_modexpo mex;
739 if (copy_from_user(&mex, umex, sizeof(mex)))
740 return -EFAULT;
741 do {
742 rc = zcrypt_rsa_modexpo(&mex);
743 } while (rc == -EAGAIN);
744 /* on failure: retry once again after a requested rescan */
745 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
746 do {
747 rc = zcrypt_rsa_modexpo(&mex);
748 } while (rc == -EAGAIN);
749 if (rc)
750 return rc;
751 return put_user(mex.outputdatalength, &umex->outputdatalength);
753 case ICARSACRT: {
754 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
755 struct ica_rsa_modexpo_crt crt;
756 if (copy_from_user(&crt, ucrt, sizeof(crt)))
757 return -EFAULT;
758 do {
759 rc = zcrypt_rsa_crt(&crt);
760 } while (rc == -EAGAIN);
761 /* on failure: retry once again after a requested rescan */
762 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
763 do {
764 rc = zcrypt_rsa_crt(&crt);
765 } while (rc == -EAGAIN);
766 if (rc)
767 return rc;
768 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
770 case ZSECSENDCPRB: {
771 struct ica_xcRB __user *uxcRB = (void __user *) arg;
772 struct ica_xcRB xcRB;
773 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
774 return -EFAULT;
775 do {
776 rc = zcrypt_send_cprb(&xcRB);
777 } while (rc == -EAGAIN);
778 /* on failure: retry once again after a requested rescan */
779 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
780 do {
781 rc = zcrypt_send_cprb(&xcRB);
782 } while (rc == -EAGAIN);
783 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
784 return -EFAULT;
785 return rc;
787 case Z90STAT_STATUS_MASK: {
788 char status[AP_DEVICES];
789 zcrypt_status_mask(status);
790 if (copy_to_user((char __user *) arg, status,
791 sizeof(char) * AP_DEVICES))
792 return -EFAULT;
793 return 0;
795 case Z90STAT_QDEPTH_MASK: {
796 char qdepth[AP_DEVICES];
797 zcrypt_qdepth_mask(qdepth);
798 if (copy_to_user((char __user *) arg, qdepth,
799 sizeof(char) * AP_DEVICES))
800 return -EFAULT;
801 return 0;
803 case Z90STAT_PERDEV_REQCNT: {
804 int reqcnt[AP_DEVICES];
805 zcrypt_perdev_reqcnt(reqcnt);
806 if (copy_to_user((int __user *) arg, reqcnt,
807 sizeof(int) * AP_DEVICES))
808 return -EFAULT;
809 return 0;
811 case Z90STAT_REQUESTQ_COUNT:
812 return put_user(zcrypt_requestq_count(), (int __user *) arg);
813 case Z90STAT_PENDINGQ_COUNT:
814 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
815 case Z90STAT_TOTALOPEN_COUNT:
816 return put_user(atomic_read(&zcrypt_open_count),
817 (int __user *) arg);
818 case Z90STAT_DOMAIN_INDEX:
819 return put_user(ap_domain_index, (int __user *) arg);
821 * Deprecated ioctls. Don't add another device count ioctl,
822 * you can count them yourself in the user space with the
823 * output of the Z90STAT_STATUS_MASK ioctl.
825 case ICAZ90STATUS:
826 return zcrypt_ica_status(filp, arg);
827 case Z90STAT_TOTALCOUNT:
828 return put_user(zcrypt_device_count, (int __user *) arg);
829 case Z90STAT_PCICACOUNT:
830 return put_user(zcrypt_count_type(ZCRYPT_PCICA),
831 (int __user *) arg);
832 case Z90STAT_PCICCCOUNT:
833 return put_user(zcrypt_count_type(ZCRYPT_PCICC),
834 (int __user *) arg);
835 case Z90STAT_PCIXCCMCL2COUNT:
836 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
837 (int __user *) arg);
838 case Z90STAT_PCIXCCMCL3COUNT:
839 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
840 (int __user *) arg);
841 case Z90STAT_PCIXCCCOUNT:
842 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
843 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
844 (int __user *) arg);
845 case Z90STAT_CEX2CCOUNT:
846 return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
847 (int __user *) arg);
848 case Z90STAT_CEX2ACOUNT:
849 return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
850 (int __user *) arg);
851 default:
852 /* unknown ioctl number */
853 return -ENOIOCTLCMD;
857 #ifdef CONFIG_COMPAT
859 * ioctl32 conversion routines
861 struct compat_ica_rsa_modexpo {
862 compat_uptr_t inputdata;
863 unsigned int inputdatalength;
864 compat_uptr_t outputdata;
865 unsigned int outputdatalength;
866 compat_uptr_t b_key;
867 compat_uptr_t n_modulus;
870 static long trans_modexpo32(struct file *filp, unsigned int cmd,
871 unsigned long arg)
873 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
874 struct compat_ica_rsa_modexpo mex32;
875 struct ica_rsa_modexpo mex64;
876 long rc;
878 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
879 return -EFAULT;
880 mex64.inputdata = compat_ptr(mex32.inputdata);
881 mex64.inputdatalength = mex32.inputdatalength;
882 mex64.outputdata = compat_ptr(mex32.outputdata);
883 mex64.outputdatalength = mex32.outputdatalength;
884 mex64.b_key = compat_ptr(mex32.b_key);
885 mex64.n_modulus = compat_ptr(mex32.n_modulus);
886 do {
887 rc = zcrypt_rsa_modexpo(&mex64);
888 } while (rc == -EAGAIN);
889 /* on failure: retry once again after a requested rescan */
890 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
891 do {
892 rc = zcrypt_rsa_modexpo(&mex64);
893 } while (rc == -EAGAIN);
894 if (rc)
895 return rc;
896 return put_user(mex64.outputdatalength,
897 &umex32->outputdatalength);
900 struct compat_ica_rsa_modexpo_crt {
901 compat_uptr_t inputdata;
902 unsigned int inputdatalength;
903 compat_uptr_t outputdata;
904 unsigned int outputdatalength;
905 compat_uptr_t bp_key;
906 compat_uptr_t bq_key;
907 compat_uptr_t np_prime;
908 compat_uptr_t nq_prime;
909 compat_uptr_t u_mult_inv;
912 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
913 unsigned long arg)
915 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
916 struct compat_ica_rsa_modexpo_crt crt32;
917 struct ica_rsa_modexpo_crt crt64;
918 long rc;
920 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
921 return -EFAULT;
922 crt64.inputdata = compat_ptr(crt32.inputdata);
923 crt64.inputdatalength = crt32.inputdatalength;
924 crt64.outputdata= compat_ptr(crt32.outputdata);
925 crt64.outputdatalength = crt32.outputdatalength;
926 crt64.bp_key = compat_ptr(crt32.bp_key);
927 crt64.bq_key = compat_ptr(crt32.bq_key);
928 crt64.np_prime = compat_ptr(crt32.np_prime);
929 crt64.nq_prime = compat_ptr(crt32.nq_prime);
930 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
931 do {
932 rc = zcrypt_rsa_crt(&crt64);
933 } while (rc == -EAGAIN);
934 /* on failure: retry once again after a requested rescan */
935 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
936 do {
937 rc = zcrypt_rsa_crt(&crt64);
938 } while (rc == -EAGAIN);
939 if (rc)
940 return rc;
941 return put_user(crt64.outputdatalength,
942 &ucrt32->outputdatalength);
945 struct compat_ica_xcRB {
946 unsigned short agent_ID;
947 unsigned int user_defined;
948 unsigned short request_ID;
949 unsigned int request_control_blk_length;
950 unsigned char padding1[16 - sizeof (compat_uptr_t)];
951 compat_uptr_t request_control_blk_addr;
952 unsigned int request_data_length;
953 char padding2[16 - sizeof (compat_uptr_t)];
954 compat_uptr_t request_data_address;
955 unsigned int reply_control_blk_length;
956 char padding3[16 - sizeof (compat_uptr_t)];
957 compat_uptr_t reply_control_blk_addr;
958 unsigned int reply_data_length;
959 char padding4[16 - sizeof (compat_uptr_t)];
960 compat_uptr_t reply_data_addr;
961 unsigned short priority_window;
962 unsigned int status;
963 } __attribute__((packed));
965 static long trans_xcRB32(struct file *filp, unsigned int cmd,
966 unsigned long arg)
968 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
969 struct compat_ica_xcRB xcRB32;
970 struct ica_xcRB xcRB64;
971 long rc;
973 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
974 return -EFAULT;
975 xcRB64.agent_ID = xcRB32.agent_ID;
976 xcRB64.user_defined = xcRB32.user_defined;
977 xcRB64.request_ID = xcRB32.request_ID;
978 xcRB64.request_control_blk_length =
979 xcRB32.request_control_blk_length;
980 xcRB64.request_control_blk_addr =
981 compat_ptr(xcRB32.request_control_blk_addr);
982 xcRB64.request_data_length =
983 xcRB32.request_data_length;
984 xcRB64.request_data_address =
985 compat_ptr(xcRB32.request_data_address);
986 xcRB64.reply_control_blk_length =
987 xcRB32.reply_control_blk_length;
988 xcRB64.reply_control_blk_addr =
989 compat_ptr(xcRB32.reply_control_blk_addr);
990 xcRB64.reply_data_length = xcRB32.reply_data_length;
991 xcRB64.reply_data_addr =
992 compat_ptr(xcRB32.reply_data_addr);
993 xcRB64.priority_window = xcRB32.priority_window;
994 xcRB64.status = xcRB32.status;
995 do {
996 rc = zcrypt_send_cprb(&xcRB64);
997 } while (rc == -EAGAIN);
998 /* on failure: retry once again after a requested rescan */
999 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1000 do {
1001 rc = zcrypt_send_cprb(&xcRB64);
1002 } while (rc == -EAGAIN);
1003 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
1004 xcRB32.reply_data_length = xcRB64.reply_data_length;
1005 xcRB32.status = xcRB64.status;
1006 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
1007 return -EFAULT;
1008 return rc;
1011 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1012 unsigned long arg)
1014 if (cmd == ICARSAMODEXPO)
1015 return trans_modexpo32(filp, cmd, arg);
1016 if (cmd == ICARSACRT)
1017 return trans_modexpo_crt32(filp, cmd, arg);
1018 if (cmd == ZSECSENDCPRB)
1019 return trans_xcRB32(filp, cmd, arg);
1020 return zcrypt_unlocked_ioctl(filp, cmd, arg);
1022 #endif
1025 * Misc device file operations.
1027 static const struct file_operations zcrypt_fops = {
1028 .owner = THIS_MODULE,
1029 .read = zcrypt_read,
1030 .write = zcrypt_write,
1031 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1032 #ifdef CONFIG_COMPAT
1033 .compat_ioctl = zcrypt_compat_ioctl,
1034 #endif
1035 .open = zcrypt_open,
1036 .release = zcrypt_release,
1037 .llseek = no_llseek,
1041 * Misc device.
1043 static struct miscdevice zcrypt_misc_device = {
1044 .minor = MISC_DYNAMIC_MINOR,
1045 .name = "z90crypt",
1046 .fops = &zcrypt_fops,
1050 * Deprecated /proc entry support.
1052 static struct proc_dir_entry *zcrypt_entry;
1054 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
1056 int i;
1058 for (i = 0; i < len; i++)
1059 seq_printf(m, "%01x", (unsigned int) addr[i]);
1060 seq_putc(m, ' ');
1063 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
1065 int inl, c, cx;
1067 seq_printf(m, " ");
1068 inl = 0;
1069 for (c = 0; c < (len / 16); c++) {
1070 sprintcl(m, addr+inl, 16);
1071 inl += 16;
1073 cx = len%16;
1074 if (cx) {
1075 sprintcl(m, addr+inl, cx);
1076 inl += cx;
1078 seq_putc(m, '\n');
1081 static void sprinthx(unsigned char *title, struct seq_file *m,
1082 unsigned char *addr, unsigned int len)
1084 int inl, r, rx;
1086 seq_printf(m, "\n%s\n", title);
1087 inl = 0;
1088 for (r = 0; r < (len / 64); r++) {
1089 sprintrw(m, addr+inl, 64);
1090 inl += 64;
1092 rx = len % 64;
1093 if (rx) {
1094 sprintrw(m, addr+inl, rx);
1095 inl += rx;
1097 seq_putc(m, '\n');
1100 static void sprinthx4(unsigned char *title, struct seq_file *m,
1101 unsigned int *array, unsigned int len)
1103 int r;
1105 seq_printf(m, "\n%s\n", title);
1106 for (r = 0; r < len; r++) {
1107 if ((r % 8) == 0)
1108 seq_printf(m, " ");
1109 seq_printf(m, "%08X ", array[r]);
1110 if ((r % 8) == 7)
1111 seq_putc(m, '\n');
1113 seq_putc(m, '\n');
1116 static int zcrypt_proc_show(struct seq_file *m, void *v)
1118 char workarea[sizeof(int) * AP_DEVICES];
1120 seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
1121 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
1122 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
1123 seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
1124 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
1125 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
1126 seq_printf(m, "PCIXCC MCL2 count: %d\n",
1127 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
1128 seq_printf(m, "PCIXCC MCL3 count: %d\n",
1129 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
1130 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
1131 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
1132 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
1133 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
1134 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
1135 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
1136 seq_printf(m, "Total open handles: %d\n\n",
1137 atomic_read(&zcrypt_open_count));
1138 zcrypt_status_mask(workarea);
1139 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1140 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1141 m, workarea, AP_DEVICES);
1142 zcrypt_qdepth_mask(workarea);
1143 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
1144 zcrypt_perdev_reqcnt((int *) workarea);
1145 sprinthx4("Per-device successfully completed request counts",
1146 m, (unsigned int *) workarea, AP_DEVICES);
1147 return 0;
1150 static int zcrypt_proc_open(struct inode *inode, struct file *file)
1152 return single_open(file, zcrypt_proc_show, NULL);
1155 static void zcrypt_disable_card(int index)
1157 struct zcrypt_device *zdev;
1159 spin_lock_bh(&zcrypt_device_lock);
1160 list_for_each_entry(zdev, &zcrypt_device_list, list)
1161 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
1162 zdev->online = 0;
1163 ap_flush_queue(zdev->ap_dev);
1164 break;
1166 spin_unlock_bh(&zcrypt_device_lock);
1169 static void zcrypt_enable_card(int index)
1171 struct zcrypt_device *zdev;
1173 spin_lock_bh(&zcrypt_device_lock);
1174 list_for_each_entry(zdev, &zcrypt_device_list, list)
1175 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
1176 zdev->online = 1;
1177 break;
1179 spin_unlock_bh(&zcrypt_device_lock);
1182 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
1183 size_t count, loff_t *pos)
1185 unsigned char *lbuf, *ptr;
1186 size_t local_count;
1187 int j;
1189 if (count <= 0)
1190 return 0;
1192 #define LBUFSIZE 1200UL
1193 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1194 if (!lbuf)
1195 return 0;
1197 local_count = min(LBUFSIZE - 1, count);
1198 if (copy_from_user(lbuf, buffer, local_count) != 0) {
1199 kfree(lbuf);
1200 return -EFAULT;
1202 lbuf[local_count] = '\0';
1204 ptr = strstr(lbuf, "Online devices");
1205 if (!ptr)
1206 goto out;
1207 ptr = strstr(ptr, "\n");
1208 if (!ptr)
1209 goto out;
1210 ptr++;
1212 if (strstr(ptr, "Waiting work element counts") == NULL)
1213 goto out;
1215 for (j = 0; j < 64 && *ptr; ptr++) {
1217 * '0' for no device, '1' for PCICA, '2' for PCICC,
1218 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1219 * '5' for CEX2C and '6' for CEX2A'
1220 * '7' for CEX3C and '8' for CEX3A
1222 if (*ptr >= '0' && *ptr <= '8')
1223 j++;
1224 else if (*ptr == 'd' || *ptr == 'D')
1225 zcrypt_disable_card(j++);
1226 else if (*ptr == 'e' || *ptr == 'E')
1227 zcrypt_enable_card(j++);
1228 else if (*ptr != ' ' && *ptr != '\t')
1229 break;
1231 out:
1232 kfree(lbuf);
1233 return count;
1236 static const struct file_operations zcrypt_proc_fops = {
1237 .owner = THIS_MODULE,
1238 .open = zcrypt_proc_open,
1239 .read = seq_read,
1240 .llseek = seq_lseek,
1241 .release = single_release,
1242 .write = zcrypt_proc_write,
1245 static int zcrypt_rng_device_count;
1246 static u32 *zcrypt_rng_buffer;
1247 static int zcrypt_rng_buffer_index;
1248 static DEFINE_MUTEX(zcrypt_rng_mutex);
1250 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1252 int rc;
1255 * We don't need locking here because the RNG API guarantees serialized
1256 * read method calls.
1258 if (zcrypt_rng_buffer_index == 0) {
1259 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1260 /* on failure: retry once again after a requested rescan */
1261 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1262 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1263 if (rc < 0)
1264 return -EIO;
1265 zcrypt_rng_buffer_index = rc / sizeof *data;
1267 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1268 return sizeof *data;
1271 static struct hwrng zcrypt_rng_dev = {
1272 .name = "zcrypt",
1273 .data_read = zcrypt_rng_data_read,
1276 static int zcrypt_rng_device_add(void)
1278 int rc = 0;
1280 mutex_lock(&zcrypt_rng_mutex);
1281 if (zcrypt_rng_device_count == 0) {
1282 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1283 if (!zcrypt_rng_buffer) {
1284 rc = -ENOMEM;
1285 goto out;
1287 zcrypt_rng_buffer_index = 0;
1288 rc = hwrng_register(&zcrypt_rng_dev);
1289 if (rc)
1290 goto out_free;
1291 zcrypt_rng_device_count = 1;
1292 } else
1293 zcrypt_rng_device_count++;
1294 mutex_unlock(&zcrypt_rng_mutex);
1295 return 0;
1297 out_free:
1298 free_page((unsigned long) zcrypt_rng_buffer);
1299 out:
1300 mutex_unlock(&zcrypt_rng_mutex);
1301 return rc;
1304 static void zcrypt_rng_device_remove(void)
1306 mutex_lock(&zcrypt_rng_mutex);
1307 zcrypt_rng_device_count--;
1308 if (zcrypt_rng_device_count == 0) {
1309 hwrng_unregister(&zcrypt_rng_dev);
1310 free_page((unsigned long) zcrypt_rng_buffer);
1312 mutex_unlock(&zcrypt_rng_mutex);
1315 int __init zcrypt_debug_init(void)
1317 debugfs_root = debugfs_create_dir("zcrypt", NULL);
1319 zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
1320 debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
1321 debug_set_level(zcrypt_dbf_common, DBF_ERR);
1323 zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
1324 debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
1325 debug_set_level(zcrypt_dbf_devices, DBF_ERR);
1327 return 0;
1330 void zcrypt_debug_exit(void)
1332 debugfs_remove(debugfs_root);
1333 if (zcrypt_dbf_common)
1334 debug_unregister(zcrypt_dbf_common);
1335 if (zcrypt_dbf_devices)
1336 debug_unregister(zcrypt_dbf_devices);
1340 * zcrypt_api_init(): Module initialization.
1342 * The module initialization code.
1344 int __init zcrypt_api_init(void)
1346 int rc;
1348 rc = zcrypt_debug_init();
1349 if (rc)
1350 goto out;
1352 atomic_set(&zcrypt_rescan_req, 0);
1354 /* Register the request sprayer. */
1355 rc = misc_register(&zcrypt_misc_device);
1356 if (rc < 0)
1357 goto out;
1359 /* Set up the proc file system */
1360 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
1361 if (!zcrypt_entry) {
1362 rc = -ENOMEM;
1363 goto out_misc;
1366 return 0;
1368 out_misc:
1369 misc_deregister(&zcrypt_misc_device);
1370 out:
1371 return rc;
1375 * zcrypt_api_exit(): Module termination.
1377 * The module termination code.
1379 void zcrypt_api_exit(void)
1381 remove_proc_entry("driver/z90crypt", NULL);
1382 misc_deregister(&zcrypt_misc_device);
1383 zcrypt_debug_exit();
1386 module_init(zcrypt_api_init);
1387 module_exit(zcrypt_api_exit);