Merge branch 'r6040-next'
[linux/fpc-iii.git] / drivers / s390 / char / vmlogrdr.c
blobe883063c72581b61c8d4e8fd0db287476a37e101
1 /*
2 * character device driver for reading z/VM system service records
5 * Copyright IBM Corp. 2004, 2009
6 * character device driver for reading z/VM system service records,
7 * Version 1.0
8 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
9 * Stefan Weinhuber <wein@de.ibm.com>
13 #define KMSG_COMPONENT "vmlogrdr"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/atomic.h>
24 #include <asm/uaccess.h>
25 #include <asm/cpcmd.h>
26 #include <asm/debug.h>
27 #include <asm/ebcdic.h>
28 #include <net/iucv/iucv.h>
29 #include <linux/kmod.h>
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/string.h>
34 MODULE_AUTHOR
35 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
36 " Stefan Weinhuber (wein@de.ibm.com)");
37 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
38 "system service records.");
39 MODULE_LICENSE("GPL");
43 * The size of the buffer for iucv data transfer is one page,
44 * but in addition to the data we read from iucv we also
45 * place an integer and some characters into that buffer,
46 * so the maximum size for record data is a little less then
47 * one page.
49 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
52 * The elements that are concurrently accessed by bottom halves are
53 * connection_established, iucv_path_severed, local_interrupt_buffer
54 * and receive_ready. The first three can be protected by
55 * priv_lock. receive_ready is atomic, so it can be incremented and
56 * decremented without holding a lock.
57 * The variable dev_in_use needs to be protected by the lock, since
58 * it's a flag used by open to make sure that the device is opened only
59 * by one user at the same time.
61 struct vmlogrdr_priv_t {
62 char system_service[8];
63 char internal_name[8];
64 char recording_name[8];
65 struct iucv_path *path;
66 int connection_established;
67 int iucv_path_severed;
68 struct iucv_message local_interrupt_buffer;
69 atomic_t receive_ready;
70 int minor_num;
71 char * buffer;
72 char * current_position;
73 int remaining;
74 ulong residual_length;
75 int buffer_free;
76 int dev_in_use; /* 1: already opened, 0: not opened*/
77 spinlock_t priv_lock;
78 struct device *device;
79 struct device *class_device;
80 int autorecording;
81 int autopurge;
86 * File operation structure for vmlogrdr devices
88 static int vmlogrdr_open(struct inode *, struct file *);
89 static int vmlogrdr_release(struct inode *, struct file *);
90 static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
91 size_t count, loff_t * ppos);
93 static const struct file_operations vmlogrdr_fops = {
94 .owner = THIS_MODULE,
95 .open = vmlogrdr_open,
96 .release = vmlogrdr_release,
97 .read = vmlogrdr_read,
98 .llseek = no_llseek,
102 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
103 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
104 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
105 struct iucv_message *);
108 static struct iucv_handler vmlogrdr_iucv_handler = {
109 .path_complete = vmlogrdr_iucv_path_complete,
110 .path_severed = vmlogrdr_iucv_path_severed,
111 .message_pending = vmlogrdr_iucv_message_pending,
115 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
116 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
119 * pointer to system service private structure
120 * minor number 0 --> logrec
121 * minor number 1 --> account
122 * minor number 2 --> symptom
125 static struct vmlogrdr_priv_t sys_ser[] = {
126 { .system_service = "*LOGREC ",
127 .internal_name = "logrec",
128 .recording_name = "EREP",
129 .minor_num = 0,
130 .buffer_free = 1,
131 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
132 .autorecording = 1,
133 .autopurge = 1,
135 { .system_service = "*ACCOUNT",
136 .internal_name = "account",
137 .recording_name = "ACCOUNT",
138 .minor_num = 1,
139 .buffer_free = 1,
140 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
141 .autorecording = 1,
142 .autopurge = 1,
144 { .system_service = "*SYMPTOM",
145 .internal_name = "symptom",
146 .recording_name = "SYMPTOM",
147 .minor_num = 2,
148 .buffer_free = 1,
149 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
150 .autorecording = 1,
151 .autopurge = 1,
155 #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
157 static char FENCE[] = {"EOR"};
158 static int vmlogrdr_major = 0;
159 static struct cdev *vmlogrdr_cdev = NULL;
160 static int recording_class_AB;
163 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
165 struct vmlogrdr_priv_t * logptr = path->private;
167 spin_lock(&logptr->priv_lock);
168 logptr->connection_established = 1;
169 spin_unlock(&logptr->priv_lock);
170 wake_up(&conn_wait_queue);
174 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
176 struct vmlogrdr_priv_t * logptr = path->private;
177 u8 reason = (u8) ipuser[8];
179 pr_err("vmlogrdr: connection severed with reason %i\n", reason);
181 iucv_path_sever(path, NULL);
182 kfree(path);
183 logptr->path = NULL;
185 spin_lock(&logptr->priv_lock);
186 logptr->connection_established = 0;
187 logptr->iucv_path_severed = 1;
188 spin_unlock(&logptr->priv_lock);
190 wake_up(&conn_wait_queue);
191 /* just in case we're sleeping waiting for a record */
192 wake_up_interruptible(&read_wait_queue);
196 static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
197 struct iucv_message *msg)
199 struct vmlogrdr_priv_t * logptr = path->private;
202 * This function is the bottom half so it should be quick.
203 * Copy the external interrupt data into our local eib and increment
204 * the usage count
206 spin_lock(&logptr->priv_lock);
207 memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
208 atomic_inc(&logptr->receive_ready);
209 spin_unlock(&logptr->priv_lock);
210 wake_up_interruptible(&read_wait_queue);
214 static int vmlogrdr_get_recording_class_AB(void)
216 static const char cp_command[] = "QUERY COMMAND RECORDING ";
217 char cp_response[80];
218 char *tail;
219 int len,i;
221 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
222 len = strnlen(cp_response,sizeof(cp_response));
223 // now the parsing
224 tail=strnchr(cp_response,len,'=');
225 if (!tail)
226 return 0;
227 tail++;
228 if (!strncmp("ANY",tail,3))
229 return 1;
230 if (!strncmp("NONE",tail,4))
231 return 0;
233 * expect comma separated list of classes here, if one of them
234 * is A or B return 1 otherwise 0
236 for (i=tail-cp_response; i<len; i++)
237 if ( cp_response[i]=='A' || cp_response[i]=='B' )
238 return 1;
239 return 0;
243 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
244 int action, int purge)
247 char cp_command[80];
248 char cp_response[160];
249 char *onoff, *qid_string;
250 int rc;
252 onoff = ((action == 1) ? "ON" : "OFF");
253 qid_string = ((recording_class_AB == 1) ? " QID * " : "");
256 * The recording commands needs to be called with option QID
257 * for guests that have previlege classes A or B.
258 * Purging has to be done as separate step, because recording
259 * can't be switched on as long as records are on the queue.
260 * Doing both at the same time doesn't work.
262 if (purge && (action == 1)) {
263 memset(cp_command, 0x00, sizeof(cp_command));
264 memset(cp_response, 0x00, sizeof(cp_response));
265 snprintf(cp_command, sizeof(cp_command),
266 "RECORDING %s PURGE %s",
267 logptr->recording_name,
268 qid_string);
269 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
272 memset(cp_command, 0x00, sizeof(cp_command));
273 memset(cp_response, 0x00, sizeof(cp_response));
274 snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
275 logptr->recording_name,
276 onoff,
277 qid_string);
278 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
279 /* The recording command will usually answer with 'Command complete'
280 * on success, but when the specific service was never connected
281 * before then there might be an additional informational message
282 * 'HCPCRC8072I Recording entry not found' before the
283 * 'Command complete'. So I use strstr rather then the strncmp.
285 if (strstr(cp_response,"Command complete"))
286 rc = 0;
287 else
288 rc = -EIO;
290 * If we turn recording off, we have to purge any remaining records
291 * afterwards, as a large number of queued records may impact z/VM
292 * performance.
294 if (purge && (action == 0)) {
295 memset(cp_command, 0x00, sizeof(cp_command));
296 memset(cp_response, 0x00, sizeof(cp_response));
297 snprintf(cp_command, sizeof(cp_command),
298 "RECORDING %s PURGE %s",
299 logptr->recording_name,
300 qid_string);
301 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
304 return rc;
308 static int vmlogrdr_open (struct inode *inode, struct file *filp)
310 int dev_num = 0;
311 struct vmlogrdr_priv_t * logptr = NULL;
312 int connect_rc = 0;
313 int ret;
315 dev_num = iminor(inode);
316 if (dev_num >= MAXMINOR)
317 return -ENODEV;
318 logptr = &sys_ser[dev_num];
321 * only allow for blocking reads to be open
323 if (filp->f_flags & O_NONBLOCK)
324 return -EOPNOTSUPP;
326 /* Besure this device hasn't already been opened */
327 spin_lock_bh(&logptr->priv_lock);
328 if (logptr->dev_in_use) {
329 spin_unlock_bh(&logptr->priv_lock);
330 return -EBUSY;
332 logptr->dev_in_use = 1;
333 logptr->connection_established = 0;
334 logptr->iucv_path_severed = 0;
335 atomic_set(&logptr->receive_ready, 0);
336 logptr->buffer_free = 1;
337 spin_unlock_bh(&logptr->priv_lock);
339 /* set the file options */
340 filp->private_data = logptr;
342 /* start recording for this service*/
343 if (logptr->autorecording) {
344 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
345 if (ret)
346 pr_warn("vmlogrdr: failed to start recording automatically\n");
349 /* create connection to the system service */
350 logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
351 if (!logptr->path)
352 goto out_dev;
353 connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
354 logptr->system_service, NULL, NULL,
355 logptr);
356 if (connect_rc) {
357 pr_err("vmlogrdr: iucv connection to %s "
358 "failed with rc %i \n",
359 logptr->system_service, connect_rc);
360 goto out_path;
363 /* We've issued the connect and now we must wait for a
364 * ConnectionComplete or ConnectinSevered Interrupt
365 * before we can continue to process.
367 wait_event(conn_wait_queue, (logptr->connection_established)
368 || (logptr->iucv_path_severed));
369 if (logptr->iucv_path_severed)
370 goto out_record;
371 nonseekable_open(inode, filp);
372 return 0;
374 out_record:
375 if (logptr->autorecording)
376 vmlogrdr_recording(logptr,0,logptr->autopurge);
377 out_path:
378 kfree(logptr->path); /* kfree(NULL) is ok. */
379 logptr->path = NULL;
380 out_dev:
381 logptr->dev_in_use = 0;
382 return -EIO;
386 static int vmlogrdr_release (struct inode *inode, struct file *filp)
388 int ret;
390 struct vmlogrdr_priv_t * logptr = filp->private_data;
392 iucv_path_sever(logptr->path, NULL);
393 kfree(logptr->path);
394 logptr->path = NULL;
395 if (logptr->autorecording) {
396 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
397 if (ret)
398 pr_warn("vmlogrdr: failed to stop recording automatically\n");
400 logptr->dev_in_use = 0;
402 return 0;
406 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
408 int rc, *temp;
409 /* we need to keep track of two data sizes here:
410 * The number of bytes we need to receive from iucv and
411 * the total number of bytes we actually write into the buffer.
413 int user_data_count, iucv_data_count;
414 char * buffer;
416 if (atomic_read(&priv->receive_ready)) {
417 spin_lock_bh(&priv->priv_lock);
418 if (priv->residual_length){
419 /* receive second half of a record */
420 iucv_data_count = priv->residual_length;
421 user_data_count = 0;
422 buffer = priv->buffer;
423 } else {
424 /* receive a new record:
425 * We need to return the total length of the record
426 * + size of FENCE in the first 4 bytes of the buffer.
428 iucv_data_count = priv->local_interrupt_buffer.length;
429 user_data_count = sizeof(int);
430 temp = (int*)priv->buffer;
431 *temp= iucv_data_count + sizeof(FENCE);
432 buffer = priv->buffer + sizeof(int);
435 * If the record is bigger than our buffer, we receive only
436 * a part of it. We can get the rest later.
438 if (iucv_data_count > NET_BUFFER_SIZE)
439 iucv_data_count = NET_BUFFER_SIZE;
440 rc = iucv_message_receive(priv->path,
441 &priv->local_interrupt_buffer,
442 0, buffer, iucv_data_count,
443 &priv->residual_length);
444 spin_unlock_bh(&priv->priv_lock);
445 /* An rc of 5 indicates that the record was bigger than
446 * the buffer, which is OK for us. A 9 indicates that the
447 * record was purged befor we could receive it.
449 if (rc == 5)
450 rc = 0;
451 if (rc == 9)
452 atomic_set(&priv->receive_ready, 0);
453 } else {
454 rc = 1;
456 if (!rc) {
457 priv->buffer_free = 0;
458 user_data_count += iucv_data_count;
459 priv->current_position = priv->buffer;
460 if (priv->residual_length == 0){
461 /* the whole record has been captured,
462 * now add the fence */
463 atomic_dec(&priv->receive_ready);
464 buffer = priv->buffer + user_data_count;
465 memcpy(buffer, FENCE, sizeof(FENCE));
466 user_data_count += sizeof(FENCE);
468 priv->remaining = user_data_count;
471 return rc;
475 static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
476 size_t count, loff_t * ppos)
478 int rc;
479 struct vmlogrdr_priv_t * priv = filp->private_data;
481 while (priv->buffer_free) {
482 rc = vmlogrdr_receive_data(priv);
483 if (rc) {
484 rc = wait_event_interruptible(read_wait_queue,
485 atomic_read(&priv->receive_ready));
486 if (rc)
487 return rc;
490 /* copy only up to end of record */
491 if (count > priv->remaining)
492 count = priv->remaining;
494 if (copy_to_user(data, priv->current_position, count))
495 return -EFAULT;
497 *ppos += count;
498 priv->current_position += count;
499 priv->remaining -= count;
501 /* if all data has been transferred, set buffer free */
502 if (priv->remaining == 0)
503 priv->buffer_free = 1;
505 return count;
508 static ssize_t vmlogrdr_autopurge_store(struct device * dev,
509 struct device_attribute *attr,
510 const char * buf, size_t count)
512 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
513 ssize_t ret = count;
515 switch (buf[0]) {
516 case '0':
517 priv->autopurge=0;
518 break;
519 case '1':
520 priv->autopurge=1;
521 break;
522 default:
523 ret = -EINVAL;
525 return ret;
529 static ssize_t vmlogrdr_autopurge_show(struct device *dev,
530 struct device_attribute *attr,
531 char *buf)
533 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
534 return sprintf(buf, "%u\n", priv->autopurge);
538 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
539 vmlogrdr_autopurge_store);
542 static ssize_t vmlogrdr_purge_store(struct device * dev,
543 struct device_attribute *attr,
544 const char * buf, size_t count)
547 char cp_command[80];
548 char cp_response[80];
549 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
551 if (buf[0] != '1')
552 return -EINVAL;
554 memset(cp_command, 0x00, sizeof(cp_command));
555 memset(cp_response, 0x00, sizeof(cp_response));
558 * The recording command needs to be called with option QID
559 * for guests that have previlege classes A or B.
560 * Other guests will not recognize the command and we have to
561 * issue the same command without the QID parameter.
564 if (recording_class_AB)
565 snprintf(cp_command, sizeof(cp_command),
566 "RECORDING %s PURGE QID * ",
567 priv->recording_name);
568 else
569 snprintf(cp_command, sizeof(cp_command),
570 "RECORDING %s PURGE ",
571 priv->recording_name);
573 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
575 return count;
579 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
582 static ssize_t vmlogrdr_autorecording_store(struct device *dev,
583 struct device_attribute *attr,
584 const char *buf, size_t count)
586 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
587 ssize_t ret = count;
589 switch (buf[0]) {
590 case '0':
591 priv->autorecording=0;
592 break;
593 case '1':
594 priv->autorecording=1;
595 break;
596 default:
597 ret = -EINVAL;
599 return ret;
603 static ssize_t vmlogrdr_autorecording_show(struct device *dev,
604 struct device_attribute *attr,
605 char *buf)
607 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
608 return sprintf(buf, "%u\n", priv->autorecording);
612 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
613 vmlogrdr_autorecording_store);
616 static ssize_t vmlogrdr_recording_store(struct device * dev,
617 struct device_attribute *attr,
618 const char * buf, size_t count)
620 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
621 ssize_t ret;
623 switch (buf[0]) {
624 case '0':
625 ret = vmlogrdr_recording(priv,0,0);
626 break;
627 case '1':
628 ret = vmlogrdr_recording(priv,1,0);
629 break;
630 default:
631 ret = -EINVAL;
633 if (ret)
634 return ret;
635 else
636 return count;
641 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
644 static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
645 char *buf)
648 static const char cp_command[] = "QUERY RECORDING ";
649 int len;
651 cpcmd(cp_command, buf, 4096, NULL);
652 len = strlen(buf);
653 return len;
655 static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
656 NULL);
657 static struct attribute *vmlogrdr_drv_attrs[] = {
658 &driver_attr_recording_status.attr,
659 NULL,
661 static struct attribute_group vmlogrdr_drv_attr_group = {
662 .attrs = vmlogrdr_drv_attrs,
664 static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
665 &vmlogrdr_drv_attr_group,
666 NULL,
669 static struct attribute *vmlogrdr_attrs[] = {
670 &dev_attr_autopurge.attr,
671 &dev_attr_purge.attr,
672 &dev_attr_autorecording.attr,
673 &dev_attr_recording.attr,
674 NULL,
676 static struct attribute_group vmlogrdr_attr_group = {
677 .attrs = vmlogrdr_attrs,
679 static const struct attribute_group *vmlogrdr_attr_groups[] = {
680 &vmlogrdr_attr_group,
681 NULL,
684 static int vmlogrdr_pm_prepare(struct device *dev)
686 int rc;
687 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
689 rc = 0;
690 if (priv) {
691 spin_lock_bh(&priv->priv_lock);
692 if (priv->dev_in_use)
693 rc = -EBUSY;
694 spin_unlock_bh(&priv->priv_lock);
696 if (rc)
697 pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
698 dev_name(dev));
699 return rc;
703 static const struct dev_pm_ops vmlogrdr_pm_ops = {
704 .prepare = vmlogrdr_pm_prepare,
707 static struct class *vmlogrdr_class;
708 static struct device_driver vmlogrdr_driver = {
709 .name = "vmlogrdr",
710 .bus = &iucv_bus,
711 .pm = &vmlogrdr_pm_ops,
712 .groups = vmlogrdr_drv_attr_groups,
715 static int vmlogrdr_register_driver(void)
717 int ret;
719 /* Register with iucv driver */
720 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
721 if (ret)
722 goto out;
724 ret = driver_register(&vmlogrdr_driver);
725 if (ret)
726 goto out_iucv;
728 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
729 if (IS_ERR(vmlogrdr_class)) {
730 ret = PTR_ERR(vmlogrdr_class);
731 vmlogrdr_class = NULL;
732 goto out_driver;
734 return 0;
736 out_driver:
737 driver_unregister(&vmlogrdr_driver);
738 out_iucv:
739 iucv_unregister(&vmlogrdr_iucv_handler, 1);
740 out:
741 return ret;
745 static void vmlogrdr_unregister_driver(void)
747 class_destroy(vmlogrdr_class);
748 vmlogrdr_class = NULL;
749 driver_unregister(&vmlogrdr_driver);
750 iucv_unregister(&vmlogrdr_iucv_handler, 1);
754 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
756 struct device *dev;
757 int ret;
759 dev = kzalloc(sizeof(struct device), GFP_KERNEL);
760 if (dev) {
761 dev_set_name(dev, "%s", priv->internal_name);
762 dev->bus = &iucv_bus;
763 dev->parent = iucv_root;
764 dev->driver = &vmlogrdr_driver;
765 dev->groups = vmlogrdr_attr_groups;
766 dev_set_drvdata(dev, priv);
768 * The release function could be called after the
769 * module has been unloaded. It's _only_ task is to
770 * free the struct. Therefore, we specify kfree()
771 * directly here. (Probably a little bit obfuscating
772 * but legitime ...).
774 dev->release = (void (*)(struct device *))kfree;
775 } else
776 return -ENOMEM;
777 ret = device_register(dev);
778 if (ret) {
779 put_device(dev);
780 return ret;
783 priv->class_device = device_create(vmlogrdr_class, dev,
784 MKDEV(vmlogrdr_major,
785 priv->minor_num),
786 priv, "%s", dev_name(dev));
787 if (IS_ERR(priv->class_device)) {
788 ret = PTR_ERR(priv->class_device);
789 priv->class_device=NULL;
790 device_unregister(dev);
791 return ret;
793 priv->device = dev;
794 return 0;
798 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
800 device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
801 if (priv->device != NULL) {
802 device_unregister(priv->device);
803 priv->device=NULL;
805 return 0;
809 static int vmlogrdr_register_cdev(dev_t dev)
811 int rc = 0;
812 vmlogrdr_cdev = cdev_alloc();
813 if (!vmlogrdr_cdev) {
814 return -ENOMEM;
816 vmlogrdr_cdev->owner = THIS_MODULE;
817 vmlogrdr_cdev->ops = &vmlogrdr_fops;
818 vmlogrdr_cdev->dev = dev;
819 rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
820 if (!rc)
821 return 0;
823 // cleanup: cdev is not fully registered, no cdev_del here!
824 kobject_put(&vmlogrdr_cdev->kobj);
825 vmlogrdr_cdev=NULL;
826 return rc;
830 static void vmlogrdr_cleanup(void)
832 int i;
834 if (vmlogrdr_cdev) {
835 cdev_del(vmlogrdr_cdev);
836 vmlogrdr_cdev=NULL;
838 for (i=0; i < MAXMINOR; ++i ) {
839 vmlogrdr_unregister_device(&sys_ser[i]);
840 free_page((unsigned long)sys_ser[i].buffer);
842 vmlogrdr_unregister_driver();
843 if (vmlogrdr_major) {
844 unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
845 vmlogrdr_major=0;
850 static int __init vmlogrdr_init(void)
852 int rc;
853 int i;
854 dev_t dev;
856 if (! MACHINE_IS_VM) {
857 pr_err("not running under VM, driver not loaded.\n");
858 return -ENODEV;
861 recording_class_AB = vmlogrdr_get_recording_class_AB();
863 rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
864 if (rc)
865 return rc;
866 vmlogrdr_major = MAJOR(dev);
868 rc=vmlogrdr_register_driver();
869 if (rc)
870 goto cleanup;
872 for (i=0; i < MAXMINOR; ++i ) {
873 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
874 if (!sys_ser[i].buffer) {
875 rc = -ENOMEM;
876 break;
878 sys_ser[i].current_position = sys_ser[i].buffer;
879 rc=vmlogrdr_register_device(&sys_ser[i]);
880 if (rc)
881 break;
883 if (rc)
884 goto cleanup;
886 rc = vmlogrdr_register_cdev(dev);
887 if (rc)
888 goto cleanup;
889 return 0;
891 cleanup:
892 vmlogrdr_cleanup();
893 return rc;
897 static void __exit vmlogrdr_exit(void)
899 vmlogrdr_cleanup();
900 return;
904 module_init(vmlogrdr_init);
905 module_exit(vmlogrdr_exit);