mtd: nand_base: use __func__ instead of typing names
[linux/fpc-iii.git] / drivers / s390 / char / vmlogrdr.c
blobc20a4fe6da514d4f079d859b8c0775b3f9589272
1 /*
2 * drivers/s390/char/vmlogrdr.c
3 * character device driver for reading z/VM system service records
6 * Copyright IBM Corp. 2004, 2009
7 * character device driver for reading z/VM system service records,
8 * Version 1.0
9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10 * Stefan Weinhuber <wein@de.ibm.com>
14 #define KMSG_COMPONENT "vmlogrdr"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <asm/atomic.h>
24 #include <asm/uaccess.h>
25 #include <asm/cpcmd.h>
26 #include <asm/debug.h>
27 #include <asm/ebcdic.h>
28 #include <net/iucv/iucv.h>
29 #include <linux/kmod.h>
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/smp_lock.h>
33 #include <linux/string.h>
35 MODULE_AUTHOR
36 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
37 " Stefan Weinhuber (wein@de.ibm.com)");
38 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
39 "system service records.");
40 MODULE_LICENSE("GPL");
44 * The size of the buffer for iucv data transfer is one page,
45 * but in addition to the data we read from iucv we also
46 * place an integer and some characters into that buffer,
47 * so the maximum size for record data is a little less then
48 * one page.
50 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
53 * The elements that are concurrently accessed by bottom halves are
54 * connection_established, iucv_path_severed, local_interrupt_buffer
55 * and receive_ready. The first three can be protected by
56 * priv_lock. receive_ready is atomic, so it can be incremented and
57 * decremented without holding a lock.
58 * The variable dev_in_use needs to be protected by the lock, since
59 * it's a flag used by open to make sure that the device is opened only
60 * by one user at the same time.
62 struct vmlogrdr_priv_t {
63 char system_service[8];
64 char internal_name[8];
65 char recording_name[8];
66 struct iucv_path *path;
67 int connection_established;
68 int iucv_path_severed;
69 struct iucv_message local_interrupt_buffer;
70 atomic_t receive_ready;
71 int minor_num;
72 char * buffer;
73 char * current_position;
74 int remaining;
75 ulong residual_length;
76 int buffer_free;
77 int dev_in_use; /* 1: already opened, 0: not opened*/
78 spinlock_t priv_lock;
79 struct device *device;
80 struct device *class_device;
81 int autorecording;
82 int autopurge;
87 * File operation structure for vmlogrdr devices
89 static int vmlogrdr_open(struct inode *, struct file *);
90 static int vmlogrdr_release(struct inode *, struct file *);
91 static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
92 size_t count, loff_t * ppos);
94 static const struct file_operations vmlogrdr_fops = {
95 .owner = THIS_MODULE,
96 .open = vmlogrdr_open,
97 .release = vmlogrdr_release,
98 .read = vmlogrdr_read,
102 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
103 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
104 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
105 struct iucv_message *);
108 static struct iucv_handler vmlogrdr_iucv_handler = {
109 .path_complete = vmlogrdr_iucv_path_complete,
110 .path_severed = vmlogrdr_iucv_path_severed,
111 .message_pending = vmlogrdr_iucv_message_pending,
115 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
116 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
119 * pointer to system service private structure
120 * minor number 0 --> logrec
121 * minor number 1 --> account
122 * minor number 2 --> symptom
125 static struct vmlogrdr_priv_t sys_ser[] = {
126 { .system_service = "*LOGREC ",
127 .internal_name = "logrec",
128 .recording_name = "EREP",
129 .minor_num = 0,
130 .buffer_free = 1,
131 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
132 .autorecording = 1,
133 .autopurge = 1,
135 { .system_service = "*ACCOUNT",
136 .internal_name = "account",
137 .recording_name = "ACCOUNT",
138 .minor_num = 1,
139 .buffer_free = 1,
140 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
141 .autorecording = 1,
142 .autopurge = 1,
144 { .system_service = "*SYMPTOM",
145 .internal_name = "symptom",
146 .recording_name = "SYMPTOM",
147 .minor_num = 2,
148 .buffer_free = 1,
149 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
150 .autorecording = 1,
151 .autopurge = 1,
155 #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
157 static char FENCE[] = {"EOR"};
158 static int vmlogrdr_major = 0;
159 static struct cdev *vmlogrdr_cdev = NULL;
160 static int recording_class_AB;
163 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
165 struct vmlogrdr_priv_t * logptr = path->private;
167 spin_lock(&logptr->priv_lock);
168 logptr->connection_established = 1;
169 spin_unlock(&logptr->priv_lock);
170 wake_up(&conn_wait_queue);
174 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
176 struct vmlogrdr_priv_t * logptr = path->private;
177 u8 reason = (u8) ipuser[8];
179 pr_err("vmlogrdr: connection severed with reason %i\n", reason);
181 iucv_path_sever(path, NULL);
182 kfree(path);
183 logptr->path = NULL;
185 spin_lock(&logptr->priv_lock);
186 logptr->connection_established = 0;
187 logptr->iucv_path_severed = 1;
188 spin_unlock(&logptr->priv_lock);
190 wake_up(&conn_wait_queue);
191 /* just in case we're sleeping waiting for a record */
192 wake_up_interruptible(&read_wait_queue);
196 static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
197 struct iucv_message *msg)
199 struct vmlogrdr_priv_t * logptr = path->private;
202 * This function is the bottom half so it should be quick.
203 * Copy the external interrupt data into our local eib and increment
204 * the usage count
206 spin_lock(&logptr->priv_lock);
207 memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
208 atomic_inc(&logptr->receive_ready);
209 spin_unlock(&logptr->priv_lock);
210 wake_up_interruptible(&read_wait_queue);
214 static int vmlogrdr_get_recording_class_AB(void)
216 char cp_command[]="QUERY COMMAND RECORDING ";
217 char cp_response[80];
218 char *tail;
219 int len,i;
221 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
222 len = strnlen(cp_response,sizeof(cp_response));
223 // now the parsing
224 tail=strnchr(cp_response,len,'=');
225 if (!tail)
226 return 0;
227 tail++;
228 if (!strncmp("ANY",tail,3))
229 return 1;
230 if (!strncmp("NONE",tail,4))
231 return 0;
233 * expect comma separated list of classes here, if one of them
234 * is A or B return 1 otherwise 0
236 for (i=tail-cp_response; i<len; i++)
237 if ( cp_response[i]=='A' || cp_response[i]=='B' )
238 return 1;
239 return 0;
243 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
244 int action, int purge)
247 char cp_command[80];
248 char cp_response[160];
249 char *onoff, *qid_string;
251 memset(cp_command, 0x00, sizeof(cp_command));
252 memset(cp_response, 0x00, sizeof(cp_response));
254 onoff = ((action == 1) ? "ON" : "OFF");
255 qid_string = ((recording_class_AB == 1) ? " QID * " : "");
258 * The recording commands needs to be called with option QID
259 * for guests that have previlege classes A or B.
260 * Purging has to be done as separate step, because recording
261 * can't be switched on as long as records are on the queue.
262 * Doing both at the same time doesn't work.
265 if (purge) {
266 snprintf(cp_command, sizeof(cp_command),
267 "RECORDING %s PURGE %s",
268 logptr->recording_name,
269 qid_string);
271 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
274 memset(cp_command, 0x00, sizeof(cp_command));
275 memset(cp_response, 0x00, sizeof(cp_response));
276 snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
277 logptr->recording_name,
278 onoff,
279 qid_string);
281 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
282 /* The recording command will usually answer with 'Command complete'
283 * on success, but when the specific service was never connected
284 * before then there might be an additional informational message
285 * 'HCPCRC8072I Recording entry not found' before the
286 * 'Command complete'. So I use strstr rather then the strncmp.
288 if (strstr(cp_response,"Command complete"))
289 return 0;
290 else
291 return -EIO;
296 static int vmlogrdr_open (struct inode *inode, struct file *filp)
298 int dev_num = 0;
299 struct vmlogrdr_priv_t * logptr = NULL;
300 int connect_rc = 0;
301 int ret;
303 dev_num = iminor(inode);
304 if (dev_num > MAXMINOR)
305 return -ENODEV;
306 logptr = &sys_ser[dev_num];
309 * only allow for blocking reads to be open
311 if (filp->f_flags & O_NONBLOCK)
312 return -ENOSYS;
314 /* Besure this device hasn't already been opened */
315 lock_kernel();
316 spin_lock_bh(&logptr->priv_lock);
317 if (logptr->dev_in_use) {
318 spin_unlock_bh(&logptr->priv_lock);
319 unlock_kernel();
320 return -EBUSY;
322 logptr->dev_in_use = 1;
323 logptr->connection_established = 0;
324 logptr->iucv_path_severed = 0;
325 atomic_set(&logptr->receive_ready, 0);
326 logptr->buffer_free = 1;
327 spin_unlock_bh(&logptr->priv_lock);
329 /* set the file options */
330 filp->private_data = logptr;
331 filp->f_op = &vmlogrdr_fops;
333 /* start recording for this service*/
334 if (logptr->autorecording) {
335 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
336 if (ret)
337 pr_warning("vmlogrdr: failed to start "
338 "recording automatically\n");
341 /* create connection to the system service */
342 logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
343 if (!logptr->path)
344 goto out_dev;
345 connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
346 logptr->system_service, NULL, NULL,
347 logptr);
348 if (connect_rc) {
349 pr_err("vmlogrdr: iucv connection to %s "
350 "failed with rc %i \n",
351 logptr->system_service, connect_rc);
352 goto out_path;
355 /* We've issued the connect and now we must wait for a
356 * ConnectionComplete or ConnectinSevered Interrupt
357 * before we can continue to process.
359 wait_event(conn_wait_queue, (logptr->connection_established)
360 || (logptr->iucv_path_severed));
361 if (logptr->iucv_path_severed)
362 goto out_record;
363 ret = nonseekable_open(inode, filp);
364 unlock_kernel();
365 return ret;
367 out_record:
368 if (logptr->autorecording)
369 vmlogrdr_recording(logptr,0,logptr->autopurge);
370 out_path:
371 kfree(logptr->path); /* kfree(NULL) is ok. */
372 logptr->path = NULL;
373 out_dev:
374 logptr->dev_in_use = 0;
375 unlock_kernel();
376 return -EIO;
380 static int vmlogrdr_release (struct inode *inode, struct file *filp)
382 int ret;
384 struct vmlogrdr_priv_t * logptr = filp->private_data;
386 iucv_path_sever(logptr->path, NULL);
387 kfree(logptr->path);
388 logptr->path = NULL;
389 if (logptr->autorecording) {
390 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
391 if (ret)
392 pr_warning("vmlogrdr: failed to stop "
393 "recording automatically\n");
395 logptr->dev_in_use = 0;
397 return 0;
401 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
403 int rc, *temp;
404 /* we need to keep track of two data sizes here:
405 * The number of bytes we need to receive from iucv and
406 * the total number of bytes we actually write into the buffer.
408 int user_data_count, iucv_data_count;
409 char * buffer;
411 if (atomic_read(&priv->receive_ready)) {
412 spin_lock_bh(&priv->priv_lock);
413 if (priv->residual_length){
414 /* receive second half of a record */
415 iucv_data_count = priv->residual_length;
416 user_data_count = 0;
417 buffer = priv->buffer;
418 } else {
419 /* receive a new record:
420 * We need to return the total length of the record
421 * + size of FENCE in the first 4 bytes of the buffer.
423 iucv_data_count = priv->local_interrupt_buffer.length;
424 user_data_count = sizeof(int);
425 temp = (int*)priv->buffer;
426 *temp= iucv_data_count + sizeof(FENCE);
427 buffer = priv->buffer + sizeof(int);
430 * If the record is bigger than our buffer, we receive only
431 * a part of it. We can get the rest later.
433 if (iucv_data_count > NET_BUFFER_SIZE)
434 iucv_data_count = NET_BUFFER_SIZE;
435 rc = iucv_message_receive(priv->path,
436 &priv->local_interrupt_buffer,
437 0, buffer, iucv_data_count,
438 &priv->residual_length);
439 spin_unlock_bh(&priv->priv_lock);
440 /* An rc of 5 indicates that the record was bigger than
441 * the buffer, which is OK for us. A 9 indicates that the
442 * record was purged befor we could receive it.
444 if (rc == 5)
445 rc = 0;
446 if (rc == 9)
447 atomic_set(&priv->receive_ready, 0);
448 } else {
449 rc = 1;
451 if (!rc) {
452 priv->buffer_free = 0;
453 user_data_count += iucv_data_count;
454 priv->current_position = priv->buffer;
455 if (priv->residual_length == 0){
456 /* the whole record has been captured,
457 * now add the fence */
458 atomic_dec(&priv->receive_ready);
459 buffer = priv->buffer + user_data_count;
460 memcpy(buffer, FENCE, sizeof(FENCE));
461 user_data_count += sizeof(FENCE);
463 priv->remaining = user_data_count;
466 return rc;
470 static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
471 size_t count, loff_t * ppos)
473 int rc;
474 struct vmlogrdr_priv_t * priv = filp->private_data;
476 while (priv->buffer_free) {
477 rc = vmlogrdr_receive_data(priv);
478 if (rc) {
479 rc = wait_event_interruptible(read_wait_queue,
480 atomic_read(&priv->receive_ready));
481 if (rc)
482 return rc;
485 /* copy only up to end of record */
486 if (count > priv->remaining)
487 count = priv->remaining;
489 if (copy_to_user(data, priv->current_position, count))
490 return -EFAULT;
492 *ppos += count;
493 priv->current_position += count;
494 priv->remaining -= count;
496 /* if all data has been transferred, set buffer free */
497 if (priv->remaining == 0)
498 priv->buffer_free = 1;
500 return count;
503 static ssize_t vmlogrdr_autopurge_store(struct device * dev,
504 struct device_attribute *attr,
505 const char * buf, size_t count)
507 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
508 ssize_t ret = count;
510 switch (buf[0]) {
511 case '0':
512 priv->autopurge=0;
513 break;
514 case '1':
515 priv->autopurge=1;
516 break;
517 default:
518 ret = -EINVAL;
520 return ret;
524 static ssize_t vmlogrdr_autopurge_show(struct device *dev,
525 struct device_attribute *attr,
526 char *buf)
528 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
529 return sprintf(buf, "%u\n", priv->autopurge);
533 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
534 vmlogrdr_autopurge_store);
537 static ssize_t vmlogrdr_purge_store(struct device * dev,
538 struct device_attribute *attr,
539 const char * buf, size_t count)
542 char cp_command[80];
543 char cp_response[80];
544 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
546 if (buf[0] != '1')
547 return -EINVAL;
549 memset(cp_command, 0x00, sizeof(cp_command));
550 memset(cp_response, 0x00, sizeof(cp_response));
553 * The recording command needs to be called with option QID
554 * for guests that have previlege classes A or B.
555 * Other guests will not recognize the command and we have to
556 * issue the same command without the QID parameter.
559 if (recording_class_AB)
560 snprintf(cp_command, sizeof(cp_command),
561 "RECORDING %s PURGE QID * ",
562 priv->recording_name);
563 else
564 snprintf(cp_command, sizeof(cp_command),
565 "RECORDING %s PURGE ",
566 priv->recording_name);
568 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
570 return count;
574 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
577 static ssize_t vmlogrdr_autorecording_store(struct device *dev,
578 struct device_attribute *attr,
579 const char *buf, size_t count)
581 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
582 ssize_t ret = count;
584 switch (buf[0]) {
585 case '0':
586 priv->autorecording=0;
587 break;
588 case '1':
589 priv->autorecording=1;
590 break;
591 default:
592 ret = -EINVAL;
594 return ret;
598 static ssize_t vmlogrdr_autorecording_show(struct device *dev,
599 struct device_attribute *attr,
600 char *buf)
602 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
603 return sprintf(buf, "%u\n", priv->autorecording);
607 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
608 vmlogrdr_autorecording_store);
611 static ssize_t vmlogrdr_recording_store(struct device * dev,
612 struct device_attribute *attr,
613 const char * buf, size_t count)
615 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
616 ssize_t ret;
618 switch (buf[0]) {
619 case '0':
620 ret = vmlogrdr_recording(priv,0,0);
621 break;
622 case '1':
623 ret = vmlogrdr_recording(priv,1,0);
624 break;
625 default:
626 ret = -EINVAL;
628 if (ret)
629 return ret;
630 else
631 return count;
636 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
639 static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
640 char *buf)
643 char cp_command[] = "QUERY RECORDING ";
644 int len;
646 cpcmd(cp_command, buf, 4096, NULL);
647 len = strlen(buf);
648 return len;
652 static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
653 NULL);
655 static struct attribute *vmlogrdr_attrs[] = {
656 &dev_attr_autopurge.attr,
657 &dev_attr_purge.attr,
658 &dev_attr_autorecording.attr,
659 &dev_attr_recording.attr,
660 NULL,
663 static int vmlogrdr_pm_prepare(struct device *dev)
665 int rc;
666 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
668 rc = 0;
669 if (priv) {
670 spin_lock_bh(&priv->priv_lock);
671 if (priv->dev_in_use)
672 rc = -EBUSY;
673 spin_unlock_bh(&priv->priv_lock);
675 if (rc)
676 pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
677 dev_name(dev));
678 return rc;
682 static struct dev_pm_ops vmlogrdr_pm_ops = {
683 .prepare = vmlogrdr_pm_prepare,
686 static struct attribute_group vmlogrdr_attr_group = {
687 .attrs = vmlogrdr_attrs,
690 static struct class *vmlogrdr_class;
691 static struct device_driver vmlogrdr_driver = {
692 .name = "vmlogrdr",
693 .bus = &iucv_bus,
694 .pm = &vmlogrdr_pm_ops,
698 static int vmlogrdr_register_driver(void)
700 int ret;
702 /* Register with iucv driver */
703 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
704 if (ret)
705 goto out;
707 ret = driver_register(&vmlogrdr_driver);
708 if (ret)
709 goto out_iucv;
711 ret = driver_create_file(&vmlogrdr_driver,
712 &driver_attr_recording_status);
713 if (ret)
714 goto out_driver;
716 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
717 if (IS_ERR(vmlogrdr_class)) {
718 ret = PTR_ERR(vmlogrdr_class);
719 vmlogrdr_class = NULL;
720 goto out_attr;
722 return 0;
724 out_attr:
725 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
726 out_driver:
727 driver_unregister(&vmlogrdr_driver);
728 out_iucv:
729 iucv_unregister(&vmlogrdr_iucv_handler, 1);
730 out:
731 return ret;
735 static void vmlogrdr_unregister_driver(void)
737 class_destroy(vmlogrdr_class);
738 vmlogrdr_class = NULL;
739 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
740 driver_unregister(&vmlogrdr_driver);
741 iucv_unregister(&vmlogrdr_iucv_handler, 1);
745 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
747 struct device *dev;
748 int ret;
750 dev = kzalloc(sizeof(struct device), GFP_KERNEL);
751 if (dev) {
752 dev_set_name(dev, priv->internal_name);
753 dev->bus = &iucv_bus;
754 dev->parent = iucv_root;
755 dev->driver = &vmlogrdr_driver;
756 dev_set_drvdata(dev, priv);
758 * The release function could be called after the
759 * module has been unloaded. It's _only_ task is to
760 * free the struct. Therefore, we specify kfree()
761 * directly here. (Probably a little bit obfuscating
762 * but legitime ...).
764 dev->release = (void (*)(struct device *))kfree;
765 } else
766 return -ENOMEM;
767 ret = device_register(dev);
768 if (ret)
769 return ret;
771 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
772 if (ret) {
773 device_unregister(dev);
774 return ret;
776 priv->class_device = device_create(vmlogrdr_class, dev,
777 MKDEV(vmlogrdr_major,
778 priv->minor_num),
779 priv, "%s", dev_name(dev));
780 if (IS_ERR(priv->class_device)) {
781 ret = PTR_ERR(priv->class_device);
782 priv->class_device=NULL;
783 sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
784 device_unregister(dev);
785 return ret;
787 priv->device = dev;
788 return 0;
792 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
794 device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
795 if (priv->device != NULL) {
796 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
797 device_unregister(priv->device);
798 priv->device=NULL;
800 return 0;
804 static int vmlogrdr_register_cdev(dev_t dev)
806 int rc = 0;
807 vmlogrdr_cdev = cdev_alloc();
808 if (!vmlogrdr_cdev) {
809 return -ENOMEM;
811 vmlogrdr_cdev->owner = THIS_MODULE;
812 vmlogrdr_cdev->ops = &vmlogrdr_fops;
813 vmlogrdr_cdev->dev = dev;
814 rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
815 if (!rc)
816 return 0;
818 // cleanup: cdev is not fully registered, no cdev_del here!
819 kobject_put(&vmlogrdr_cdev->kobj);
820 vmlogrdr_cdev=NULL;
821 return rc;
825 static void vmlogrdr_cleanup(void)
827 int i;
829 if (vmlogrdr_cdev) {
830 cdev_del(vmlogrdr_cdev);
831 vmlogrdr_cdev=NULL;
833 for (i=0; i < MAXMINOR; ++i ) {
834 vmlogrdr_unregister_device(&sys_ser[i]);
835 free_page((unsigned long)sys_ser[i].buffer);
837 vmlogrdr_unregister_driver();
838 if (vmlogrdr_major) {
839 unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
840 vmlogrdr_major=0;
845 static int __init vmlogrdr_init(void)
847 int rc;
848 int i;
849 dev_t dev;
851 if (! MACHINE_IS_VM) {
852 pr_err("not running under VM, driver not loaded.\n");
853 return -ENODEV;
856 recording_class_AB = vmlogrdr_get_recording_class_AB();
858 rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
859 if (rc)
860 return rc;
861 vmlogrdr_major = MAJOR(dev);
863 rc=vmlogrdr_register_driver();
864 if (rc)
865 goto cleanup;
867 for (i=0; i < MAXMINOR; ++i ) {
868 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
869 if (!sys_ser[i].buffer) {
870 rc = -ENOMEM;
871 break;
873 sys_ser[i].current_position = sys_ser[i].buffer;
874 rc=vmlogrdr_register_device(&sys_ser[i]);
875 if (rc)
876 break;
878 if (rc)
879 goto cleanup;
881 rc = vmlogrdr_register_cdev(dev);
882 if (rc)
883 goto cleanup;
884 return 0;
886 cleanup:
887 vmlogrdr_cleanup();
888 return rc;
892 static void __exit vmlogrdr_exit(void)
894 vmlogrdr_cleanup();
895 return;
899 module_init(vmlogrdr_init);
900 module_exit(vmlogrdr_exit);