2 * character device driver for reading z/VM system service records
5 * Copyright IBM Corp. 2004, 2009
6 * character device driver for reading z/VM system service records,
8 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
9 * Stefan Weinhuber <wein@de.ibm.com>
13 #define KMSG_COMPONENT "vmlogrdr"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/atomic.h>
24 #include <asm/uaccess.h>
25 #include <asm/cpcmd.h>
26 #include <asm/debug.h>
27 #include <asm/ebcdic.h>
28 #include <net/iucv/iucv.h>
29 #include <linux/kmod.h>
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/string.h>
35 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
36 " Stefan Weinhuber (wein@de.ibm.com)");
37 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
38 "system service records.");
39 MODULE_LICENSE("GPL");
43 * The size of the buffer for iucv data transfer is one page,
44 * but in addition to the data we read from iucv we also
45 * place an integer and some characters into that buffer,
46 * so the maximum size for record data is a little less then
49 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
52 * The elements that are concurrently accessed by bottom halves are
53 * connection_established, iucv_path_severed, local_interrupt_buffer
54 * and receive_ready. The first three can be protected by
55 * priv_lock. receive_ready is atomic, so it can be incremented and
56 * decremented without holding a lock.
57 * The variable dev_in_use needs to be protected by the lock, since
58 * it's a flag used by open to make sure that the device is opened only
59 * by one user at the same time.
61 struct vmlogrdr_priv_t
{
62 char system_service
[8];
63 char internal_name
[8];
64 char recording_name
[8];
65 struct iucv_path
*path
;
66 int connection_established
;
67 int iucv_path_severed
;
68 struct iucv_message local_interrupt_buffer
;
69 atomic_t receive_ready
;
72 char * current_position
;
74 ulong residual_length
;
76 int dev_in_use
; /* 1: already opened, 0: not opened*/
78 struct device
*device
;
79 struct device
*class_device
;
86 * File operation structure for vmlogrdr devices
88 static int vmlogrdr_open(struct inode
*, struct file
*);
89 static int vmlogrdr_release(struct inode
*, struct file
*);
90 static ssize_t
vmlogrdr_read (struct file
*filp
, char __user
*data
,
91 size_t count
, loff_t
* ppos
);
93 static const struct file_operations vmlogrdr_fops
= {
95 .open
= vmlogrdr_open
,
96 .release
= vmlogrdr_release
,
97 .read
= vmlogrdr_read
,
102 static void vmlogrdr_iucv_path_complete(struct iucv_path
*, u8
*ipuser
);
103 static void vmlogrdr_iucv_path_severed(struct iucv_path
*, u8
*ipuser
);
104 static void vmlogrdr_iucv_message_pending(struct iucv_path
*,
105 struct iucv_message
*);
108 static struct iucv_handler vmlogrdr_iucv_handler
= {
109 .path_complete
= vmlogrdr_iucv_path_complete
,
110 .path_severed
= vmlogrdr_iucv_path_severed
,
111 .message_pending
= vmlogrdr_iucv_message_pending
,
115 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue
);
116 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue
);
119 * pointer to system service private structure
120 * minor number 0 --> logrec
121 * minor number 1 --> account
122 * minor number 2 --> symptom
125 static struct vmlogrdr_priv_t sys_ser
[] = {
126 { .system_service
= "*LOGREC ",
127 .internal_name
= "logrec",
128 .recording_name
= "EREP",
131 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[0].priv_lock
),
135 { .system_service
= "*ACCOUNT",
136 .internal_name
= "account",
137 .recording_name
= "ACCOUNT",
140 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[1].priv_lock
),
144 { .system_service
= "*SYMPTOM",
145 .internal_name
= "symptom",
146 .recording_name
= "SYMPTOM",
149 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[2].priv_lock
),
155 #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
157 static char FENCE
[] = {"EOR"};
158 static int vmlogrdr_major
= 0;
159 static struct cdev
*vmlogrdr_cdev
= NULL
;
160 static int recording_class_AB
;
163 static void vmlogrdr_iucv_path_complete(struct iucv_path
*path
, u8
*ipuser
)
165 struct vmlogrdr_priv_t
* logptr
= path
->private;
167 spin_lock(&logptr
->priv_lock
);
168 logptr
->connection_established
= 1;
169 spin_unlock(&logptr
->priv_lock
);
170 wake_up(&conn_wait_queue
);
174 static void vmlogrdr_iucv_path_severed(struct iucv_path
*path
, u8
*ipuser
)
176 struct vmlogrdr_priv_t
* logptr
= path
->private;
177 u8 reason
= (u8
) ipuser
[8];
179 pr_err("vmlogrdr: connection severed with reason %i\n", reason
);
181 iucv_path_sever(path
, NULL
);
185 spin_lock(&logptr
->priv_lock
);
186 logptr
->connection_established
= 0;
187 logptr
->iucv_path_severed
= 1;
188 spin_unlock(&logptr
->priv_lock
);
190 wake_up(&conn_wait_queue
);
191 /* just in case we're sleeping waiting for a record */
192 wake_up_interruptible(&read_wait_queue
);
196 static void vmlogrdr_iucv_message_pending(struct iucv_path
*path
,
197 struct iucv_message
*msg
)
199 struct vmlogrdr_priv_t
* logptr
= path
->private;
202 * This function is the bottom half so it should be quick.
203 * Copy the external interrupt data into our local eib and increment
206 spin_lock(&logptr
->priv_lock
);
207 memcpy(&logptr
->local_interrupt_buffer
, msg
, sizeof(*msg
));
208 atomic_inc(&logptr
->receive_ready
);
209 spin_unlock(&logptr
->priv_lock
);
210 wake_up_interruptible(&read_wait_queue
);
214 static int vmlogrdr_get_recording_class_AB(void)
216 static const char cp_command
[] = "QUERY COMMAND RECORDING ";
217 char cp_response
[80];
221 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
222 len
= strnlen(cp_response
,sizeof(cp_response
));
224 tail
=strnchr(cp_response
,len
,'=');
228 if (!strncmp("ANY",tail
,3))
230 if (!strncmp("NONE",tail
,4))
233 * expect comma separated list of classes here, if one of them
234 * is A or B return 1 otherwise 0
236 for (i
=tail
-cp_response
; i
<len
; i
++)
237 if ( cp_response
[i
]=='A' || cp_response
[i
]=='B' )
243 static int vmlogrdr_recording(struct vmlogrdr_priv_t
* logptr
,
244 int action
, int purge
)
248 char cp_response
[160];
249 char *onoff
, *qid_string
;
252 onoff
= ((action
== 1) ? "ON" : "OFF");
253 qid_string
= ((recording_class_AB
== 1) ? " QID * " : "");
256 * The recording commands needs to be called with option QID
257 * for guests that have previlege classes A or B.
258 * Purging has to be done as separate step, because recording
259 * can't be switched on as long as records are on the queue.
260 * Doing both at the same time doesn't work.
262 if (purge
&& (action
== 1)) {
263 memset(cp_command
, 0x00, sizeof(cp_command
));
264 memset(cp_response
, 0x00, sizeof(cp_response
));
265 snprintf(cp_command
, sizeof(cp_command
),
266 "RECORDING %s PURGE %s",
267 logptr
->recording_name
,
269 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
272 memset(cp_command
, 0x00, sizeof(cp_command
));
273 memset(cp_response
, 0x00, sizeof(cp_response
));
274 snprintf(cp_command
, sizeof(cp_command
), "RECORDING %s %s %s",
275 logptr
->recording_name
,
278 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
279 /* The recording command will usually answer with 'Command complete'
280 * on success, but when the specific service was never connected
281 * before then there might be an additional informational message
282 * 'HCPCRC8072I Recording entry not found' before the
283 * 'Command complete'. So I use strstr rather then the strncmp.
285 if (strstr(cp_response
,"Command complete"))
290 * If we turn recording off, we have to purge any remaining records
291 * afterwards, as a large number of queued records may impact z/VM
294 if (purge
&& (action
== 0)) {
295 memset(cp_command
, 0x00, sizeof(cp_command
));
296 memset(cp_response
, 0x00, sizeof(cp_response
));
297 snprintf(cp_command
, sizeof(cp_command
),
298 "RECORDING %s PURGE %s",
299 logptr
->recording_name
,
301 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
308 static int vmlogrdr_open (struct inode
*inode
, struct file
*filp
)
311 struct vmlogrdr_priv_t
* logptr
= NULL
;
315 dev_num
= iminor(inode
);
316 if (dev_num
>= MAXMINOR
)
318 logptr
= &sys_ser
[dev_num
];
321 * only allow for blocking reads to be open
323 if (filp
->f_flags
& O_NONBLOCK
)
326 /* Besure this device hasn't already been opened */
327 spin_lock_bh(&logptr
->priv_lock
);
328 if (logptr
->dev_in_use
) {
329 spin_unlock_bh(&logptr
->priv_lock
);
332 logptr
->dev_in_use
= 1;
333 logptr
->connection_established
= 0;
334 logptr
->iucv_path_severed
= 0;
335 atomic_set(&logptr
->receive_ready
, 0);
336 logptr
->buffer_free
= 1;
337 spin_unlock_bh(&logptr
->priv_lock
);
339 /* set the file options */
340 filp
->private_data
= logptr
;
342 /* start recording for this service*/
343 if (logptr
->autorecording
) {
344 ret
= vmlogrdr_recording(logptr
,1,logptr
->autopurge
);
346 pr_warn("vmlogrdr: failed to start recording automatically\n");
349 /* create connection to the system service */
350 logptr
->path
= iucv_path_alloc(10, 0, GFP_KERNEL
);
353 connect_rc
= iucv_path_connect(logptr
->path
, &vmlogrdr_iucv_handler
,
354 logptr
->system_service
, NULL
, NULL
,
357 pr_err("vmlogrdr: iucv connection to %s "
358 "failed with rc %i \n",
359 logptr
->system_service
, connect_rc
);
363 /* We've issued the connect and now we must wait for a
364 * ConnectionComplete or ConnectinSevered Interrupt
365 * before we can continue to process.
367 wait_event(conn_wait_queue
, (logptr
->connection_established
)
368 || (logptr
->iucv_path_severed
));
369 if (logptr
->iucv_path_severed
)
371 nonseekable_open(inode
, filp
);
375 if (logptr
->autorecording
)
376 vmlogrdr_recording(logptr
,0,logptr
->autopurge
);
378 kfree(logptr
->path
); /* kfree(NULL) is ok. */
381 logptr
->dev_in_use
= 0;
386 static int vmlogrdr_release (struct inode
*inode
, struct file
*filp
)
390 struct vmlogrdr_priv_t
* logptr
= filp
->private_data
;
392 iucv_path_sever(logptr
->path
, NULL
);
395 if (logptr
->autorecording
) {
396 ret
= vmlogrdr_recording(logptr
,0,logptr
->autopurge
);
398 pr_warn("vmlogrdr: failed to stop recording automatically\n");
400 logptr
->dev_in_use
= 0;
406 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t
*priv
)
409 /* we need to keep track of two data sizes here:
410 * The number of bytes we need to receive from iucv and
411 * the total number of bytes we actually write into the buffer.
413 int user_data_count
, iucv_data_count
;
416 if (atomic_read(&priv
->receive_ready
)) {
417 spin_lock_bh(&priv
->priv_lock
);
418 if (priv
->residual_length
){
419 /* receive second half of a record */
420 iucv_data_count
= priv
->residual_length
;
422 buffer
= priv
->buffer
;
424 /* receive a new record:
425 * We need to return the total length of the record
426 * + size of FENCE in the first 4 bytes of the buffer.
428 iucv_data_count
= priv
->local_interrupt_buffer
.length
;
429 user_data_count
= sizeof(int);
430 temp
= (int*)priv
->buffer
;
431 *temp
= iucv_data_count
+ sizeof(FENCE
);
432 buffer
= priv
->buffer
+ sizeof(int);
435 * If the record is bigger than our buffer, we receive only
436 * a part of it. We can get the rest later.
438 if (iucv_data_count
> NET_BUFFER_SIZE
)
439 iucv_data_count
= NET_BUFFER_SIZE
;
440 rc
= iucv_message_receive(priv
->path
,
441 &priv
->local_interrupt_buffer
,
442 0, buffer
, iucv_data_count
,
443 &priv
->residual_length
);
444 spin_unlock_bh(&priv
->priv_lock
);
445 /* An rc of 5 indicates that the record was bigger than
446 * the buffer, which is OK for us. A 9 indicates that the
447 * record was purged befor we could receive it.
452 atomic_set(&priv
->receive_ready
, 0);
457 priv
->buffer_free
= 0;
458 user_data_count
+= iucv_data_count
;
459 priv
->current_position
= priv
->buffer
;
460 if (priv
->residual_length
== 0){
461 /* the whole record has been captured,
462 * now add the fence */
463 atomic_dec(&priv
->receive_ready
);
464 buffer
= priv
->buffer
+ user_data_count
;
465 memcpy(buffer
, FENCE
, sizeof(FENCE
));
466 user_data_count
+= sizeof(FENCE
);
468 priv
->remaining
= user_data_count
;
475 static ssize_t
vmlogrdr_read(struct file
*filp
, char __user
*data
,
476 size_t count
, loff_t
* ppos
)
479 struct vmlogrdr_priv_t
* priv
= filp
->private_data
;
481 while (priv
->buffer_free
) {
482 rc
= vmlogrdr_receive_data(priv
);
484 rc
= wait_event_interruptible(read_wait_queue
,
485 atomic_read(&priv
->receive_ready
));
490 /* copy only up to end of record */
491 if (count
> priv
->remaining
)
492 count
= priv
->remaining
;
494 if (copy_to_user(data
, priv
->current_position
, count
))
498 priv
->current_position
+= count
;
499 priv
->remaining
-= count
;
501 /* if all data has been transferred, set buffer free */
502 if (priv
->remaining
== 0)
503 priv
->buffer_free
= 1;
508 static ssize_t
vmlogrdr_autopurge_store(struct device
* dev
,
509 struct device_attribute
*attr
,
510 const char * buf
, size_t count
)
512 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
529 static ssize_t
vmlogrdr_autopurge_show(struct device
*dev
,
530 struct device_attribute
*attr
,
533 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
534 return sprintf(buf
, "%u\n", priv
->autopurge
);
538 static DEVICE_ATTR(autopurge
, 0644, vmlogrdr_autopurge_show
,
539 vmlogrdr_autopurge_store
);
542 static ssize_t
vmlogrdr_purge_store(struct device
* dev
,
543 struct device_attribute
*attr
,
544 const char * buf
, size_t count
)
548 char cp_response
[80];
549 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
554 memset(cp_command
, 0x00, sizeof(cp_command
));
555 memset(cp_response
, 0x00, sizeof(cp_response
));
558 * The recording command needs to be called with option QID
559 * for guests that have previlege classes A or B.
560 * Other guests will not recognize the command and we have to
561 * issue the same command without the QID parameter.
564 if (recording_class_AB
)
565 snprintf(cp_command
, sizeof(cp_command
),
566 "RECORDING %s PURGE QID * ",
567 priv
->recording_name
);
569 snprintf(cp_command
, sizeof(cp_command
),
570 "RECORDING %s PURGE ",
571 priv
->recording_name
);
573 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
579 static DEVICE_ATTR(purge
, 0200, NULL
, vmlogrdr_purge_store
);
582 static ssize_t
vmlogrdr_autorecording_store(struct device
*dev
,
583 struct device_attribute
*attr
,
584 const char *buf
, size_t count
)
586 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
591 priv
->autorecording
=0;
594 priv
->autorecording
=1;
603 static ssize_t
vmlogrdr_autorecording_show(struct device
*dev
,
604 struct device_attribute
*attr
,
607 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
608 return sprintf(buf
, "%u\n", priv
->autorecording
);
612 static DEVICE_ATTR(autorecording
, 0644, vmlogrdr_autorecording_show
,
613 vmlogrdr_autorecording_store
);
616 static ssize_t
vmlogrdr_recording_store(struct device
* dev
,
617 struct device_attribute
*attr
,
618 const char * buf
, size_t count
)
620 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
625 ret
= vmlogrdr_recording(priv
,0,0);
628 ret
= vmlogrdr_recording(priv
,1,0);
641 static DEVICE_ATTR(recording
, 0200, NULL
, vmlogrdr_recording_store
);
644 static ssize_t
vmlogrdr_recording_status_show(struct device_driver
*driver
,
648 static const char cp_command
[] = "QUERY RECORDING ";
651 cpcmd(cp_command
, buf
, 4096, NULL
);
655 static DRIVER_ATTR(recording_status
, 0444, vmlogrdr_recording_status_show
,
657 static struct attribute
*vmlogrdr_drv_attrs
[] = {
658 &driver_attr_recording_status
.attr
,
661 static struct attribute_group vmlogrdr_drv_attr_group
= {
662 .attrs
= vmlogrdr_drv_attrs
,
664 static const struct attribute_group
*vmlogrdr_drv_attr_groups
[] = {
665 &vmlogrdr_drv_attr_group
,
669 static struct attribute
*vmlogrdr_attrs
[] = {
670 &dev_attr_autopurge
.attr
,
671 &dev_attr_purge
.attr
,
672 &dev_attr_autorecording
.attr
,
673 &dev_attr_recording
.attr
,
676 static struct attribute_group vmlogrdr_attr_group
= {
677 .attrs
= vmlogrdr_attrs
,
679 static const struct attribute_group
*vmlogrdr_attr_groups
[] = {
680 &vmlogrdr_attr_group
,
684 static int vmlogrdr_pm_prepare(struct device
*dev
)
687 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
691 spin_lock_bh(&priv
->priv_lock
);
692 if (priv
->dev_in_use
)
694 spin_unlock_bh(&priv
->priv_lock
);
697 pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
703 static const struct dev_pm_ops vmlogrdr_pm_ops
= {
704 .prepare
= vmlogrdr_pm_prepare
,
707 static struct class *vmlogrdr_class
;
708 static struct device_driver vmlogrdr_driver
= {
711 .pm
= &vmlogrdr_pm_ops
,
712 .groups
= vmlogrdr_drv_attr_groups
,
715 static int vmlogrdr_register_driver(void)
719 /* Register with iucv driver */
720 ret
= iucv_register(&vmlogrdr_iucv_handler
, 1);
724 ret
= driver_register(&vmlogrdr_driver
);
728 vmlogrdr_class
= class_create(THIS_MODULE
, "vmlogrdr");
729 if (IS_ERR(vmlogrdr_class
)) {
730 ret
= PTR_ERR(vmlogrdr_class
);
731 vmlogrdr_class
= NULL
;
737 driver_unregister(&vmlogrdr_driver
);
739 iucv_unregister(&vmlogrdr_iucv_handler
, 1);
745 static void vmlogrdr_unregister_driver(void)
747 class_destroy(vmlogrdr_class
);
748 vmlogrdr_class
= NULL
;
749 driver_unregister(&vmlogrdr_driver
);
750 iucv_unregister(&vmlogrdr_iucv_handler
, 1);
754 static int vmlogrdr_register_device(struct vmlogrdr_priv_t
*priv
)
759 dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
761 dev_set_name(dev
, "%s", priv
->internal_name
);
762 dev
->bus
= &iucv_bus
;
763 dev
->parent
= iucv_root
;
764 dev
->driver
= &vmlogrdr_driver
;
765 dev
->groups
= vmlogrdr_attr_groups
;
766 dev_set_drvdata(dev
, priv
);
768 * The release function could be called after the
769 * module has been unloaded. It's _only_ task is to
770 * free the struct. Therefore, we specify kfree()
771 * directly here. (Probably a little bit obfuscating
774 dev
->release
= (void (*)(struct device
*))kfree
;
777 ret
= device_register(dev
);
783 priv
->class_device
= device_create(vmlogrdr_class
, dev
,
784 MKDEV(vmlogrdr_major
,
786 priv
, "%s", dev_name(dev
));
787 if (IS_ERR(priv
->class_device
)) {
788 ret
= PTR_ERR(priv
->class_device
);
789 priv
->class_device
=NULL
;
790 device_unregister(dev
);
798 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t
*priv
)
800 device_destroy(vmlogrdr_class
, MKDEV(vmlogrdr_major
, priv
->minor_num
));
801 if (priv
->device
!= NULL
) {
802 device_unregister(priv
->device
);
809 static int vmlogrdr_register_cdev(dev_t dev
)
812 vmlogrdr_cdev
= cdev_alloc();
813 if (!vmlogrdr_cdev
) {
816 vmlogrdr_cdev
->owner
= THIS_MODULE
;
817 vmlogrdr_cdev
->ops
= &vmlogrdr_fops
;
818 vmlogrdr_cdev
->dev
= dev
;
819 rc
= cdev_add(vmlogrdr_cdev
, vmlogrdr_cdev
->dev
, MAXMINOR
);
823 // cleanup: cdev is not fully registered, no cdev_del here!
824 kobject_put(&vmlogrdr_cdev
->kobj
);
830 static void vmlogrdr_cleanup(void)
835 cdev_del(vmlogrdr_cdev
);
838 for (i
=0; i
< MAXMINOR
; ++i
) {
839 vmlogrdr_unregister_device(&sys_ser
[i
]);
840 free_page((unsigned long)sys_ser
[i
].buffer
);
842 vmlogrdr_unregister_driver();
843 if (vmlogrdr_major
) {
844 unregister_chrdev_region(MKDEV(vmlogrdr_major
, 0), MAXMINOR
);
850 static int __init
vmlogrdr_init(void)
856 if (! MACHINE_IS_VM
) {
857 pr_err("not running under VM, driver not loaded.\n");
861 recording_class_AB
= vmlogrdr_get_recording_class_AB();
863 rc
= alloc_chrdev_region(&dev
, 0, MAXMINOR
, "vmlogrdr");
866 vmlogrdr_major
= MAJOR(dev
);
868 rc
=vmlogrdr_register_driver();
872 for (i
=0; i
< MAXMINOR
; ++i
) {
873 sys_ser
[i
].buffer
= (char *) get_zeroed_page(GFP_KERNEL
);
874 if (!sys_ser
[i
].buffer
) {
878 sys_ser
[i
].current_position
= sys_ser
[i
].buffer
;
879 rc
=vmlogrdr_register_device(&sys_ser
[i
]);
886 rc
= vmlogrdr_register_cdev(dev
);
897 static void __exit
vmlogrdr_exit(void)
904 module_init(vmlogrdr_init
);
905 module_exit(vmlogrdr_exit
);