2 * character device driver for reading z/VM system service records
5 * Copyright IBM Corp. 2004, 2009
6 * character device driver for reading z/VM system service records,
8 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
9 * Stefan Weinhuber <wein@de.ibm.com>
13 #define KMSG_COMPONENT "vmlogrdr"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/atomic.h>
24 #include <asm/uaccess.h>
25 #include <asm/cpcmd.h>
26 #include <asm/debug.h>
27 #include <asm/ebcdic.h>
28 #include <net/iucv/iucv.h>
29 #include <linux/kmod.h>
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/string.h>
35 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
36 " Stefan Weinhuber (wein@de.ibm.com)");
37 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
38 "system service records.");
39 MODULE_LICENSE("GPL");
43 * The size of the buffer for iucv data transfer is one page,
44 * but in addition to the data we read from iucv we also
45 * place an integer and some characters into that buffer,
46 * so the maximum size for record data is a little less then
49 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
52 * The elements that are concurrently accessed by bottom halves are
53 * connection_established, iucv_path_severed, local_interrupt_buffer
54 * and receive_ready. The first three can be protected by
55 * priv_lock. receive_ready is atomic, so it can be incremented and
56 * decremented without holding a lock.
57 * The variable dev_in_use needs to be protected by the lock, since
58 * it's a flag used by open to make sure that the device is opened only
59 * by one user at the same time.
61 struct vmlogrdr_priv_t
{
62 char system_service
[8];
63 char internal_name
[8];
64 char recording_name
[8];
65 struct iucv_path
*path
;
66 int connection_established
;
67 int iucv_path_severed
;
68 struct iucv_message local_interrupt_buffer
;
69 atomic_t receive_ready
;
72 char * current_position
;
74 ulong residual_length
;
76 int dev_in_use
; /* 1: already opened, 0: not opened*/
78 struct device
*device
;
79 struct device
*class_device
;
86 * File operation structure for vmlogrdr devices
88 static int vmlogrdr_open(struct inode
*, struct file
*);
89 static int vmlogrdr_release(struct inode
*, struct file
*);
90 static ssize_t
vmlogrdr_read (struct file
*filp
, char __user
*data
,
91 size_t count
, loff_t
* ppos
);
93 static const struct file_operations vmlogrdr_fops
= {
95 .open
= vmlogrdr_open
,
96 .release
= vmlogrdr_release
,
97 .read
= vmlogrdr_read
,
102 static void vmlogrdr_iucv_path_complete(struct iucv_path
*, u8
*ipuser
);
103 static void vmlogrdr_iucv_path_severed(struct iucv_path
*, u8
*ipuser
);
104 static void vmlogrdr_iucv_message_pending(struct iucv_path
*,
105 struct iucv_message
*);
108 static struct iucv_handler vmlogrdr_iucv_handler
= {
109 .path_complete
= vmlogrdr_iucv_path_complete
,
110 .path_severed
= vmlogrdr_iucv_path_severed
,
111 .message_pending
= vmlogrdr_iucv_message_pending
,
115 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue
);
116 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue
);
119 * pointer to system service private structure
120 * minor number 0 --> logrec
121 * minor number 1 --> account
122 * minor number 2 --> symptom
125 static struct vmlogrdr_priv_t sys_ser
[] = {
126 { .system_service
= "*LOGREC ",
127 .internal_name
= "logrec",
128 .recording_name
= "EREP",
131 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[0].priv_lock
),
135 { .system_service
= "*ACCOUNT",
136 .internal_name
= "account",
137 .recording_name
= "ACCOUNT",
140 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[1].priv_lock
),
144 { .system_service
= "*SYMPTOM",
145 .internal_name
= "symptom",
146 .recording_name
= "SYMPTOM",
149 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[2].priv_lock
),
155 #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
157 static char FENCE
[] = {"EOR"};
158 static int vmlogrdr_major
= 0;
159 static struct cdev
*vmlogrdr_cdev
= NULL
;
160 static int recording_class_AB
;
163 static void vmlogrdr_iucv_path_complete(struct iucv_path
*path
, u8
*ipuser
)
165 struct vmlogrdr_priv_t
* logptr
= path
->private;
167 spin_lock(&logptr
->priv_lock
);
168 logptr
->connection_established
= 1;
169 spin_unlock(&logptr
->priv_lock
);
170 wake_up(&conn_wait_queue
);
174 static void vmlogrdr_iucv_path_severed(struct iucv_path
*path
, u8
*ipuser
)
176 struct vmlogrdr_priv_t
* logptr
= path
->private;
177 u8 reason
= (u8
) ipuser
[8];
179 pr_err("vmlogrdr: connection severed with reason %i\n", reason
);
181 iucv_path_sever(path
, NULL
);
185 spin_lock(&logptr
->priv_lock
);
186 logptr
->connection_established
= 0;
187 logptr
->iucv_path_severed
= 1;
188 spin_unlock(&logptr
->priv_lock
);
190 wake_up(&conn_wait_queue
);
191 /* just in case we're sleeping waiting for a record */
192 wake_up_interruptible(&read_wait_queue
);
196 static void vmlogrdr_iucv_message_pending(struct iucv_path
*path
,
197 struct iucv_message
*msg
)
199 struct vmlogrdr_priv_t
* logptr
= path
->private;
202 * This function is the bottom half so it should be quick.
203 * Copy the external interrupt data into our local eib and increment
206 spin_lock(&logptr
->priv_lock
);
207 memcpy(&logptr
->local_interrupt_buffer
, msg
, sizeof(*msg
));
208 atomic_inc(&logptr
->receive_ready
);
209 spin_unlock(&logptr
->priv_lock
);
210 wake_up_interruptible(&read_wait_queue
);
214 static int vmlogrdr_get_recording_class_AB(void)
216 static const char cp_command
[] = "QUERY COMMAND RECORDING ";
217 char cp_response
[80];
221 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
222 len
= strnlen(cp_response
,sizeof(cp_response
));
224 tail
=strnchr(cp_response
,len
,'=');
228 if (!strncmp("ANY",tail
,3))
230 if (!strncmp("NONE",tail
,4))
233 * expect comma separated list of classes here, if one of them
234 * is A or B return 1 otherwise 0
236 for (i
=tail
-cp_response
; i
<len
; i
++)
237 if ( cp_response
[i
]=='A' || cp_response
[i
]=='B' )
243 static int vmlogrdr_recording(struct vmlogrdr_priv_t
* logptr
,
244 int action
, int purge
)
248 char cp_response
[160];
249 char *onoff
, *qid_string
;
252 onoff
= ((action
== 1) ? "ON" : "OFF");
253 qid_string
= ((recording_class_AB
== 1) ? " QID * " : "");
256 * The recording commands needs to be called with option QID
257 * for guests that have previlege classes A or B.
258 * Purging has to be done as separate step, because recording
259 * can't be switched on as long as records are on the queue.
260 * Doing both at the same time doesn't work.
262 if (purge
&& (action
== 1)) {
263 memset(cp_command
, 0x00, sizeof(cp_command
));
264 memset(cp_response
, 0x00, sizeof(cp_response
));
265 snprintf(cp_command
, sizeof(cp_command
),
266 "RECORDING %s PURGE %s",
267 logptr
->recording_name
,
269 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
272 memset(cp_command
, 0x00, sizeof(cp_command
));
273 memset(cp_response
, 0x00, sizeof(cp_response
));
274 snprintf(cp_command
, sizeof(cp_command
), "RECORDING %s %s %s",
275 logptr
->recording_name
,
278 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
279 /* The recording command will usually answer with 'Command complete'
280 * on success, but when the specific service was never connected
281 * before then there might be an additional informational message
282 * 'HCPCRC8072I Recording entry not found' before the
283 * 'Command complete'. So I use strstr rather then the strncmp.
285 if (strstr(cp_response
,"Command complete"))
290 * If we turn recording off, we have to purge any remaining records
291 * afterwards, as a large number of queued records may impact z/VM
294 if (purge
&& (action
== 0)) {
295 memset(cp_command
, 0x00, sizeof(cp_command
));
296 memset(cp_response
, 0x00, sizeof(cp_response
));
297 snprintf(cp_command
, sizeof(cp_command
),
298 "RECORDING %s PURGE %s",
299 logptr
->recording_name
,
301 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
308 static int vmlogrdr_open (struct inode
*inode
, struct file
*filp
)
311 struct vmlogrdr_priv_t
* logptr
= NULL
;
315 dev_num
= iminor(inode
);
316 if (dev_num
>= MAXMINOR
)
318 logptr
= &sys_ser
[dev_num
];
321 * only allow for blocking reads to be open
323 if (filp
->f_flags
& O_NONBLOCK
)
326 /* Besure this device hasn't already been opened */
327 spin_lock_bh(&logptr
->priv_lock
);
328 if (logptr
->dev_in_use
) {
329 spin_unlock_bh(&logptr
->priv_lock
);
332 logptr
->dev_in_use
= 1;
333 logptr
->connection_established
= 0;
334 logptr
->iucv_path_severed
= 0;
335 atomic_set(&logptr
->receive_ready
, 0);
336 logptr
->buffer_free
= 1;
337 spin_unlock_bh(&logptr
->priv_lock
);
339 /* set the file options */
340 filp
->private_data
= logptr
;
342 /* start recording for this service*/
343 if (logptr
->autorecording
) {
344 ret
= vmlogrdr_recording(logptr
,1,logptr
->autopurge
);
346 pr_warning("vmlogrdr: failed to start "
347 "recording automatically\n");
350 /* create connection to the system service */
351 logptr
->path
= iucv_path_alloc(10, 0, GFP_KERNEL
);
354 connect_rc
= iucv_path_connect(logptr
->path
, &vmlogrdr_iucv_handler
,
355 logptr
->system_service
, NULL
, NULL
,
358 pr_err("vmlogrdr: iucv connection to %s "
359 "failed with rc %i \n",
360 logptr
->system_service
, connect_rc
);
364 /* We've issued the connect and now we must wait for a
365 * ConnectionComplete or ConnectinSevered Interrupt
366 * before we can continue to process.
368 wait_event(conn_wait_queue
, (logptr
->connection_established
)
369 || (logptr
->iucv_path_severed
));
370 if (logptr
->iucv_path_severed
)
372 nonseekable_open(inode
, filp
);
376 if (logptr
->autorecording
)
377 vmlogrdr_recording(logptr
,0,logptr
->autopurge
);
379 kfree(logptr
->path
); /* kfree(NULL) is ok. */
382 logptr
->dev_in_use
= 0;
387 static int vmlogrdr_release (struct inode
*inode
, struct file
*filp
)
391 struct vmlogrdr_priv_t
* logptr
= filp
->private_data
;
393 iucv_path_sever(logptr
->path
, NULL
);
396 if (logptr
->autorecording
) {
397 ret
= vmlogrdr_recording(logptr
,0,logptr
->autopurge
);
399 pr_warning("vmlogrdr: failed to stop "
400 "recording automatically\n");
402 logptr
->dev_in_use
= 0;
408 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t
*priv
)
411 /* we need to keep track of two data sizes here:
412 * The number of bytes we need to receive from iucv and
413 * the total number of bytes we actually write into the buffer.
415 int user_data_count
, iucv_data_count
;
418 if (atomic_read(&priv
->receive_ready
)) {
419 spin_lock_bh(&priv
->priv_lock
);
420 if (priv
->residual_length
){
421 /* receive second half of a record */
422 iucv_data_count
= priv
->residual_length
;
424 buffer
= priv
->buffer
;
426 /* receive a new record:
427 * We need to return the total length of the record
428 * + size of FENCE in the first 4 bytes of the buffer.
430 iucv_data_count
= priv
->local_interrupt_buffer
.length
;
431 user_data_count
= sizeof(int);
432 temp
= (int*)priv
->buffer
;
433 *temp
= iucv_data_count
+ sizeof(FENCE
);
434 buffer
= priv
->buffer
+ sizeof(int);
437 * If the record is bigger than our buffer, we receive only
438 * a part of it. We can get the rest later.
440 if (iucv_data_count
> NET_BUFFER_SIZE
)
441 iucv_data_count
= NET_BUFFER_SIZE
;
442 rc
= iucv_message_receive(priv
->path
,
443 &priv
->local_interrupt_buffer
,
444 0, buffer
, iucv_data_count
,
445 &priv
->residual_length
);
446 spin_unlock_bh(&priv
->priv_lock
);
447 /* An rc of 5 indicates that the record was bigger than
448 * the buffer, which is OK for us. A 9 indicates that the
449 * record was purged befor we could receive it.
454 atomic_set(&priv
->receive_ready
, 0);
459 priv
->buffer_free
= 0;
460 user_data_count
+= iucv_data_count
;
461 priv
->current_position
= priv
->buffer
;
462 if (priv
->residual_length
== 0){
463 /* the whole record has been captured,
464 * now add the fence */
465 atomic_dec(&priv
->receive_ready
);
466 buffer
= priv
->buffer
+ user_data_count
;
467 memcpy(buffer
, FENCE
, sizeof(FENCE
));
468 user_data_count
+= sizeof(FENCE
);
470 priv
->remaining
= user_data_count
;
477 static ssize_t
vmlogrdr_read(struct file
*filp
, char __user
*data
,
478 size_t count
, loff_t
* ppos
)
481 struct vmlogrdr_priv_t
* priv
= filp
->private_data
;
483 while (priv
->buffer_free
) {
484 rc
= vmlogrdr_receive_data(priv
);
486 rc
= wait_event_interruptible(read_wait_queue
,
487 atomic_read(&priv
->receive_ready
));
492 /* copy only up to end of record */
493 if (count
> priv
->remaining
)
494 count
= priv
->remaining
;
496 if (copy_to_user(data
, priv
->current_position
, count
))
500 priv
->current_position
+= count
;
501 priv
->remaining
-= count
;
503 /* if all data has been transferred, set buffer free */
504 if (priv
->remaining
== 0)
505 priv
->buffer_free
= 1;
510 static ssize_t
vmlogrdr_autopurge_store(struct device
* dev
,
511 struct device_attribute
*attr
,
512 const char * buf
, size_t count
)
514 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
531 static ssize_t
vmlogrdr_autopurge_show(struct device
*dev
,
532 struct device_attribute
*attr
,
535 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
536 return sprintf(buf
, "%u\n", priv
->autopurge
);
540 static DEVICE_ATTR(autopurge
, 0644, vmlogrdr_autopurge_show
,
541 vmlogrdr_autopurge_store
);
544 static ssize_t
vmlogrdr_purge_store(struct device
* dev
,
545 struct device_attribute
*attr
,
546 const char * buf
, size_t count
)
550 char cp_response
[80];
551 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
556 memset(cp_command
, 0x00, sizeof(cp_command
));
557 memset(cp_response
, 0x00, sizeof(cp_response
));
560 * The recording command needs to be called with option QID
561 * for guests that have previlege classes A or B.
562 * Other guests will not recognize the command and we have to
563 * issue the same command without the QID parameter.
566 if (recording_class_AB
)
567 snprintf(cp_command
, sizeof(cp_command
),
568 "RECORDING %s PURGE QID * ",
569 priv
->recording_name
);
571 snprintf(cp_command
, sizeof(cp_command
),
572 "RECORDING %s PURGE ",
573 priv
->recording_name
);
575 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
581 static DEVICE_ATTR(purge
, 0200, NULL
, vmlogrdr_purge_store
);
584 static ssize_t
vmlogrdr_autorecording_store(struct device
*dev
,
585 struct device_attribute
*attr
,
586 const char *buf
, size_t count
)
588 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
593 priv
->autorecording
=0;
596 priv
->autorecording
=1;
605 static ssize_t
vmlogrdr_autorecording_show(struct device
*dev
,
606 struct device_attribute
*attr
,
609 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
610 return sprintf(buf
, "%u\n", priv
->autorecording
);
614 static DEVICE_ATTR(autorecording
, 0644, vmlogrdr_autorecording_show
,
615 vmlogrdr_autorecording_store
);
618 static ssize_t
vmlogrdr_recording_store(struct device
* dev
,
619 struct device_attribute
*attr
,
620 const char * buf
, size_t count
)
622 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
627 ret
= vmlogrdr_recording(priv
,0,0);
630 ret
= vmlogrdr_recording(priv
,1,0);
643 static DEVICE_ATTR(recording
, 0200, NULL
, vmlogrdr_recording_store
);
646 static ssize_t
vmlogrdr_recording_status_show(struct device_driver
*driver
,
650 static const char cp_command
[] = "QUERY RECORDING ";
653 cpcmd(cp_command
, buf
, 4096, NULL
);
657 static DRIVER_ATTR(recording_status
, 0444, vmlogrdr_recording_status_show
,
659 static struct attribute
*vmlogrdr_drv_attrs
[] = {
660 &driver_attr_recording_status
.attr
,
663 static struct attribute_group vmlogrdr_drv_attr_group
= {
664 .attrs
= vmlogrdr_drv_attrs
,
666 static const struct attribute_group
*vmlogrdr_drv_attr_groups
[] = {
667 &vmlogrdr_drv_attr_group
,
671 static struct attribute
*vmlogrdr_attrs
[] = {
672 &dev_attr_autopurge
.attr
,
673 &dev_attr_purge
.attr
,
674 &dev_attr_autorecording
.attr
,
675 &dev_attr_recording
.attr
,
678 static struct attribute_group vmlogrdr_attr_group
= {
679 .attrs
= vmlogrdr_attrs
,
681 static const struct attribute_group
*vmlogrdr_attr_groups
[] = {
682 &vmlogrdr_attr_group
,
686 static int vmlogrdr_pm_prepare(struct device
*dev
)
689 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
693 spin_lock_bh(&priv
->priv_lock
);
694 if (priv
->dev_in_use
)
696 spin_unlock_bh(&priv
->priv_lock
);
699 pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
705 static const struct dev_pm_ops vmlogrdr_pm_ops
= {
706 .prepare
= vmlogrdr_pm_prepare
,
709 static struct class *vmlogrdr_class
;
710 static struct device_driver vmlogrdr_driver
= {
713 .pm
= &vmlogrdr_pm_ops
,
714 .groups
= vmlogrdr_drv_attr_groups
,
717 static int vmlogrdr_register_driver(void)
721 /* Register with iucv driver */
722 ret
= iucv_register(&vmlogrdr_iucv_handler
, 1);
726 ret
= driver_register(&vmlogrdr_driver
);
730 vmlogrdr_class
= class_create(THIS_MODULE
, "vmlogrdr");
731 if (IS_ERR(vmlogrdr_class
)) {
732 ret
= PTR_ERR(vmlogrdr_class
);
733 vmlogrdr_class
= NULL
;
739 driver_unregister(&vmlogrdr_driver
);
741 iucv_unregister(&vmlogrdr_iucv_handler
, 1);
747 static void vmlogrdr_unregister_driver(void)
749 class_destroy(vmlogrdr_class
);
750 vmlogrdr_class
= NULL
;
751 driver_unregister(&vmlogrdr_driver
);
752 iucv_unregister(&vmlogrdr_iucv_handler
, 1);
756 static int vmlogrdr_register_device(struct vmlogrdr_priv_t
*priv
)
761 dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
763 dev_set_name(dev
, "%s", priv
->internal_name
);
764 dev
->bus
= &iucv_bus
;
765 dev
->parent
= iucv_root
;
766 dev
->driver
= &vmlogrdr_driver
;
767 dev
->groups
= vmlogrdr_attr_groups
;
768 dev_set_drvdata(dev
, priv
);
770 * The release function could be called after the
771 * module has been unloaded. It's _only_ task is to
772 * free the struct. Therefore, we specify kfree()
773 * directly here. (Probably a little bit obfuscating
776 dev
->release
= (void (*)(struct device
*))kfree
;
779 ret
= device_register(dev
);
785 priv
->class_device
= device_create(vmlogrdr_class
, dev
,
786 MKDEV(vmlogrdr_major
,
788 priv
, "%s", dev_name(dev
));
789 if (IS_ERR(priv
->class_device
)) {
790 ret
= PTR_ERR(priv
->class_device
);
791 priv
->class_device
=NULL
;
792 device_unregister(dev
);
800 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t
*priv
)
802 device_destroy(vmlogrdr_class
, MKDEV(vmlogrdr_major
, priv
->minor_num
));
803 if (priv
->device
!= NULL
) {
804 device_unregister(priv
->device
);
811 static int vmlogrdr_register_cdev(dev_t dev
)
814 vmlogrdr_cdev
= cdev_alloc();
815 if (!vmlogrdr_cdev
) {
818 vmlogrdr_cdev
->owner
= THIS_MODULE
;
819 vmlogrdr_cdev
->ops
= &vmlogrdr_fops
;
820 vmlogrdr_cdev
->dev
= dev
;
821 rc
= cdev_add(vmlogrdr_cdev
, vmlogrdr_cdev
->dev
, MAXMINOR
);
825 // cleanup: cdev is not fully registered, no cdev_del here!
826 kobject_put(&vmlogrdr_cdev
->kobj
);
832 static void vmlogrdr_cleanup(void)
837 cdev_del(vmlogrdr_cdev
);
840 for (i
=0; i
< MAXMINOR
; ++i
) {
841 vmlogrdr_unregister_device(&sys_ser
[i
]);
842 free_page((unsigned long)sys_ser
[i
].buffer
);
844 vmlogrdr_unregister_driver();
845 if (vmlogrdr_major
) {
846 unregister_chrdev_region(MKDEV(vmlogrdr_major
, 0), MAXMINOR
);
852 static int __init
vmlogrdr_init(void)
858 if (! MACHINE_IS_VM
) {
859 pr_err("not running under VM, driver not loaded.\n");
863 recording_class_AB
= vmlogrdr_get_recording_class_AB();
865 rc
= alloc_chrdev_region(&dev
, 0, MAXMINOR
, "vmlogrdr");
868 vmlogrdr_major
= MAJOR(dev
);
870 rc
=vmlogrdr_register_driver();
874 for (i
=0; i
< MAXMINOR
; ++i
) {
875 sys_ser
[i
].buffer
= (char *) get_zeroed_page(GFP_KERNEL
);
876 if (!sys_ser
[i
].buffer
) {
880 sys_ser
[i
].current_position
= sys_ser
[i
].buffer
;
881 rc
=vmlogrdr_register_device(&sys_ser
[i
]);
888 rc
= vmlogrdr_register_cdev(dev
);
899 static void __exit
vmlogrdr_exit(void)
906 module_init(vmlogrdr_init
);
907 module_exit(vmlogrdr_exit
);