1 // SPDX-License-Identifier: GPL-2.0
3 * character device driver for reading z/VM system service records
6 * Copyright IBM Corp. 2004, 2009
7 * character device driver for reading z/VM system service records,
9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10 * Stefan Weinhuber <wein@de.ibm.com>
14 #define KMSG_COMPONENT "vmlogrdr"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/atomic.h>
25 #include <linux/uaccess.h>
26 #include <asm/cpcmd.h>
27 #include <asm/debug.h>
28 #include <asm/ebcdic.h>
29 #include <net/iucv/iucv.h>
30 #include <linux/kmod.h>
31 #include <linux/cdev.h>
32 #include <linux/device.h>
33 #include <linux/string.h>
36 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
37 " Stefan Weinhuber (wein@de.ibm.com)");
38 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
39 "system service records.");
40 MODULE_LICENSE("GPL");
44 * The size of the buffer for iucv data transfer is one page,
45 * but in addition to the data we read from iucv we also
46 * place an integer and some characters into that buffer,
47 * so the maximum size for record data is a little less then
50 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
53 * The elements that are concurrently accessed by bottom halves are
54 * connection_established, iucv_path_severed, local_interrupt_buffer
55 * and receive_ready. The first three can be protected by
56 * priv_lock. receive_ready is atomic, so it can be incremented and
57 * decremented without holding a lock.
58 * The variable dev_in_use needs to be protected by the lock, since
59 * it's a flag used by open to make sure that the device is opened only
60 * by one user at the same time.
62 struct vmlogrdr_priv_t
{
63 char system_service
[8];
64 char internal_name
[8];
65 char recording_name
[8];
66 struct iucv_path
*path
;
67 int connection_established
;
68 int iucv_path_severed
;
69 struct iucv_message local_interrupt_buffer
;
70 atomic_t receive_ready
;
73 char * current_position
;
75 ulong residual_length
;
77 int dev_in_use
; /* 1: already opened, 0: not opened*/
79 struct device
*device
;
80 struct device
*class_device
;
87 * File operation structure for vmlogrdr devices
89 static int vmlogrdr_open(struct inode
*, struct file
*);
90 static int vmlogrdr_release(struct inode
*, struct file
*);
91 static ssize_t
vmlogrdr_read (struct file
*filp
, char __user
*data
,
92 size_t count
, loff_t
* ppos
);
94 static const struct file_operations vmlogrdr_fops
= {
96 .open
= vmlogrdr_open
,
97 .release
= vmlogrdr_release
,
98 .read
= vmlogrdr_read
,
103 static void vmlogrdr_iucv_path_complete(struct iucv_path
*, u8
*ipuser
);
104 static void vmlogrdr_iucv_path_severed(struct iucv_path
*, u8
*ipuser
);
105 static void vmlogrdr_iucv_message_pending(struct iucv_path
*,
106 struct iucv_message
*);
109 static struct iucv_handler vmlogrdr_iucv_handler
= {
110 .path_complete
= vmlogrdr_iucv_path_complete
,
111 .path_severed
= vmlogrdr_iucv_path_severed
,
112 .message_pending
= vmlogrdr_iucv_message_pending
,
116 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue
);
117 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue
);
120 * pointer to system service private structure
121 * minor number 0 --> logrec
122 * minor number 1 --> account
123 * minor number 2 --> symptom
126 static struct vmlogrdr_priv_t sys_ser
[] = {
127 { .system_service
= "*LOGREC ",
128 .internal_name
= "logrec",
129 .recording_name
= "EREP",
132 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[0].priv_lock
),
136 { .system_service
= "*ACCOUNT",
137 .internal_name
= "account",
138 .recording_name
= "ACCOUNT",
141 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[1].priv_lock
),
145 { .system_service
= "*SYMPTOM",
146 .internal_name
= "symptom",
147 .recording_name
= "SYMPTOM",
150 .priv_lock
= __SPIN_LOCK_UNLOCKED(sys_ser
[2].priv_lock
),
156 #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
158 static char FENCE
[] = {"EOR"};
159 static int vmlogrdr_major
= 0;
160 static struct cdev
*vmlogrdr_cdev
= NULL
;
161 static int recording_class_AB
;
164 static void vmlogrdr_iucv_path_complete(struct iucv_path
*path
, u8
*ipuser
)
166 struct vmlogrdr_priv_t
* logptr
= path
->private;
168 spin_lock(&logptr
->priv_lock
);
169 logptr
->connection_established
= 1;
170 spin_unlock(&logptr
->priv_lock
);
171 wake_up(&conn_wait_queue
);
175 static void vmlogrdr_iucv_path_severed(struct iucv_path
*path
, u8
*ipuser
)
177 struct vmlogrdr_priv_t
* logptr
= path
->private;
178 u8 reason
= (u8
) ipuser
[8];
180 pr_err("vmlogrdr: connection severed with reason %i\n", reason
);
182 iucv_path_sever(path
, NULL
);
186 spin_lock(&logptr
->priv_lock
);
187 logptr
->connection_established
= 0;
188 logptr
->iucv_path_severed
= 1;
189 spin_unlock(&logptr
->priv_lock
);
191 wake_up(&conn_wait_queue
);
192 /* just in case we're sleeping waiting for a record */
193 wake_up_interruptible(&read_wait_queue
);
197 static void vmlogrdr_iucv_message_pending(struct iucv_path
*path
,
198 struct iucv_message
*msg
)
200 struct vmlogrdr_priv_t
* logptr
= path
->private;
203 * This function is the bottom half so it should be quick.
204 * Copy the external interrupt data into our local eib and increment
207 spin_lock(&logptr
->priv_lock
);
208 memcpy(&logptr
->local_interrupt_buffer
, msg
, sizeof(*msg
));
209 atomic_inc(&logptr
->receive_ready
);
210 spin_unlock(&logptr
->priv_lock
);
211 wake_up_interruptible(&read_wait_queue
);
215 static int vmlogrdr_get_recording_class_AB(void)
217 static const char cp_command
[] = "QUERY COMMAND RECORDING ";
218 char cp_response
[80];
222 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
223 len
= strnlen(cp_response
,sizeof(cp_response
));
225 tail
=strnchr(cp_response
,len
,'=');
229 if (!strncmp("ANY",tail
,3))
231 if (!strncmp("NONE",tail
,4))
234 * expect comma separated list of classes here, if one of them
235 * is A or B return 1 otherwise 0
237 for (i
=tail
-cp_response
; i
<len
; i
++)
238 if ( cp_response
[i
]=='A' || cp_response
[i
]=='B' )
244 static int vmlogrdr_recording(struct vmlogrdr_priv_t
* logptr
,
245 int action
, int purge
)
249 char cp_response
[160];
250 char *onoff
, *qid_string
;
253 onoff
= ((action
== 1) ? "ON" : "OFF");
254 qid_string
= ((recording_class_AB
== 1) ? " QID * " : "");
257 * The recording commands needs to be called with option QID
258 * for guests that have previlege classes A or B.
259 * Purging has to be done as separate step, because recording
260 * can't be switched on as long as records are on the queue.
261 * Doing both at the same time doesn't work.
263 if (purge
&& (action
== 1)) {
264 memset(cp_command
, 0x00, sizeof(cp_command
));
265 memset(cp_response
, 0x00, sizeof(cp_response
));
266 snprintf(cp_command
, sizeof(cp_command
),
267 "RECORDING %s PURGE %s",
268 logptr
->recording_name
,
270 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
273 memset(cp_command
, 0x00, sizeof(cp_command
));
274 memset(cp_response
, 0x00, sizeof(cp_response
));
275 snprintf(cp_command
, sizeof(cp_command
), "RECORDING %s %s %s",
276 logptr
->recording_name
,
279 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
280 /* The recording command will usually answer with 'Command complete'
281 * on success, but when the specific service was never connected
282 * before then there might be an additional informational message
283 * 'HCPCRC8072I Recording entry not found' before the
284 * 'Command complete'. So I use strstr rather then the strncmp.
286 if (strstr(cp_response
,"Command complete"))
291 * If we turn recording off, we have to purge any remaining records
292 * afterwards, as a large number of queued records may impact z/VM
295 if (purge
&& (action
== 0)) {
296 memset(cp_command
, 0x00, sizeof(cp_command
));
297 memset(cp_response
, 0x00, sizeof(cp_response
));
298 snprintf(cp_command
, sizeof(cp_command
),
299 "RECORDING %s PURGE %s",
300 logptr
->recording_name
,
302 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
309 static int vmlogrdr_open (struct inode
*inode
, struct file
*filp
)
312 struct vmlogrdr_priv_t
* logptr
= NULL
;
316 dev_num
= iminor(inode
);
317 if (dev_num
>= MAXMINOR
)
319 logptr
= &sys_ser
[dev_num
];
322 * only allow for blocking reads to be open
324 if (filp
->f_flags
& O_NONBLOCK
)
327 /* Besure this device hasn't already been opened */
328 spin_lock_bh(&logptr
->priv_lock
);
329 if (logptr
->dev_in_use
) {
330 spin_unlock_bh(&logptr
->priv_lock
);
333 logptr
->dev_in_use
= 1;
334 logptr
->connection_established
= 0;
335 logptr
->iucv_path_severed
= 0;
336 atomic_set(&logptr
->receive_ready
, 0);
337 logptr
->buffer_free
= 1;
338 spin_unlock_bh(&logptr
->priv_lock
);
340 /* set the file options */
341 filp
->private_data
= logptr
;
343 /* start recording for this service*/
344 if (logptr
->autorecording
) {
345 ret
= vmlogrdr_recording(logptr
,1,logptr
->autopurge
);
347 pr_warn("vmlogrdr: failed to start recording automatically\n");
350 /* create connection to the system service */
351 logptr
->path
= iucv_path_alloc(10, 0, GFP_KERNEL
);
354 connect_rc
= iucv_path_connect(logptr
->path
, &vmlogrdr_iucv_handler
,
355 logptr
->system_service
, NULL
, NULL
,
358 pr_err("vmlogrdr: iucv connection to %s "
359 "failed with rc %i \n",
360 logptr
->system_service
, connect_rc
);
364 /* We've issued the connect and now we must wait for a
365 * ConnectionComplete or ConnectinSevered Interrupt
366 * before we can continue to process.
368 wait_event(conn_wait_queue
, (logptr
->connection_established
)
369 || (logptr
->iucv_path_severed
));
370 if (logptr
->iucv_path_severed
)
372 nonseekable_open(inode
, filp
);
376 if (logptr
->autorecording
)
377 vmlogrdr_recording(logptr
,0,logptr
->autopurge
);
379 kfree(logptr
->path
); /* kfree(NULL) is ok. */
382 logptr
->dev_in_use
= 0;
387 static int vmlogrdr_release (struct inode
*inode
, struct file
*filp
)
391 struct vmlogrdr_priv_t
* logptr
= filp
->private_data
;
393 iucv_path_sever(logptr
->path
, NULL
);
396 if (logptr
->autorecording
) {
397 ret
= vmlogrdr_recording(logptr
,0,logptr
->autopurge
);
399 pr_warn("vmlogrdr: failed to stop recording automatically\n");
401 logptr
->dev_in_use
= 0;
407 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t
*priv
)
410 /* we need to keep track of two data sizes here:
411 * The number of bytes we need to receive from iucv and
412 * the total number of bytes we actually write into the buffer.
414 int user_data_count
, iucv_data_count
;
417 if (atomic_read(&priv
->receive_ready
)) {
418 spin_lock_bh(&priv
->priv_lock
);
419 if (priv
->residual_length
){
420 /* receive second half of a record */
421 iucv_data_count
= priv
->residual_length
;
423 buffer
= priv
->buffer
;
425 /* receive a new record:
426 * We need to return the total length of the record
427 * + size of FENCE in the first 4 bytes of the buffer.
429 iucv_data_count
= priv
->local_interrupt_buffer
.length
;
430 user_data_count
= sizeof(int);
431 temp
= (int*)priv
->buffer
;
432 *temp
= iucv_data_count
+ sizeof(FENCE
);
433 buffer
= priv
->buffer
+ sizeof(int);
436 * If the record is bigger than our buffer, we receive only
437 * a part of it. We can get the rest later.
439 if (iucv_data_count
> NET_BUFFER_SIZE
)
440 iucv_data_count
= NET_BUFFER_SIZE
;
441 rc
= iucv_message_receive(priv
->path
,
442 &priv
->local_interrupt_buffer
,
443 0, buffer
, iucv_data_count
,
444 &priv
->residual_length
);
445 spin_unlock_bh(&priv
->priv_lock
);
446 /* An rc of 5 indicates that the record was bigger than
447 * the buffer, which is OK for us. A 9 indicates that the
448 * record was purged befor we could receive it.
453 atomic_set(&priv
->receive_ready
, 0);
458 priv
->buffer_free
= 0;
459 user_data_count
+= iucv_data_count
;
460 priv
->current_position
= priv
->buffer
;
461 if (priv
->residual_length
== 0){
462 /* the whole record has been captured,
463 * now add the fence */
464 atomic_dec(&priv
->receive_ready
);
465 buffer
= priv
->buffer
+ user_data_count
;
466 memcpy(buffer
, FENCE
, sizeof(FENCE
));
467 user_data_count
+= sizeof(FENCE
);
469 priv
->remaining
= user_data_count
;
476 static ssize_t
vmlogrdr_read(struct file
*filp
, char __user
*data
,
477 size_t count
, loff_t
* ppos
)
480 struct vmlogrdr_priv_t
* priv
= filp
->private_data
;
482 while (priv
->buffer_free
) {
483 rc
= vmlogrdr_receive_data(priv
);
485 rc
= wait_event_interruptible(read_wait_queue
,
486 atomic_read(&priv
->receive_ready
));
491 /* copy only up to end of record */
492 if (count
> priv
->remaining
)
493 count
= priv
->remaining
;
495 if (copy_to_user(data
, priv
->current_position
, count
))
499 priv
->current_position
+= count
;
500 priv
->remaining
-= count
;
502 /* if all data has been transferred, set buffer free */
503 if (priv
->remaining
== 0)
504 priv
->buffer_free
= 1;
509 static ssize_t
vmlogrdr_autopurge_store(struct device
* dev
,
510 struct device_attribute
*attr
,
511 const char * buf
, size_t count
)
513 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
530 static ssize_t
vmlogrdr_autopurge_show(struct device
*dev
,
531 struct device_attribute
*attr
,
534 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
535 return sprintf(buf
, "%u\n", priv
->autopurge
);
539 static DEVICE_ATTR(autopurge
, 0644, vmlogrdr_autopurge_show
,
540 vmlogrdr_autopurge_store
);
543 static ssize_t
vmlogrdr_purge_store(struct device
* dev
,
544 struct device_attribute
*attr
,
545 const char * buf
, size_t count
)
549 char cp_response
[80];
550 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
555 memset(cp_command
, 0x00, sizeof(cp_command
));
556 memset(cp_response
, 0x00, sizeof(cp_response
));
559 * The recording command needs to be called with option QID
560 * for guests that have previlege classes A or B.
561 * Other guests will not recognize the command and we have to
562 * issue the same command without the QID parameter.
565 if (recording_class_AB
)
566 snprintf(cp_command
, sizeof(cp_command
),
567 "RECORDING %s PURGE QID * ",
568 priv
->recording_name
);
570 snprintf(cp_command
, sizeof(cp_command
),
571 "RECORDING %s PURGE ",
572 priv
->recording_name
);
574 cpcmd(cp_command
, cp_response
, sizeof(cp_response
), NULL
);
580 static DEVICE_ATTR(purge
, 0200, NULL
, vmlogrdr_purge_store
);
583 static ssize_t
vmlogrdr_autorecording_store(struct device
*dev
,
584 struct device_attribute
*attr
,
585 const char *buf
, size_t count
)
587 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
592 priv
->autorecording
=0;
595 priv
->autorecording
=1;
604 static ssize_t
vmlogrdr_autorecording_show(struct device
*dev
,
605 struct device_attribute
*attr
,
608 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
609 return sprintf(buf
, "%u\n", priv
->autorecording
);
613 static DEVICE_ATTR(autorecording
, 0644, vmlogrdr_autorecording_show
,
614 vmlogrdr_autorecording_store
);
617 static ssize_t
vmlogrdr_recording_store(struct device
* dev
,
618 struct device_attribute
*attr
,
619 const char * buf
, size_t count
)
621 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
626 ret
= vmlogrdr_recording(priv
,0,0);
629 ret
= vmlogrdr_recording(priv
,1,0);
642 static DEVICE_ATTR(recording
, 0200, NULL
, vmlogrdr_recording_store
);
645 static ssize_t
recording_status_show(struct device_driver
*driver
, char *buf
)
647 static const char cp_command
[] = "QUERY RECORDING ";
650 cpcmd(cp_command
, buf
, 4096, NULL
);
654 static DRIVER_ATTR_RO(recording_status
);
655 static struct attribute
*vmlogrdr_drv_attrs
[] = {
656 &driver_attr_recording_status
.attr
,
659 static struct attribute_group vmlogrdr_drv_attr_group
= {
660 .attrs
= vmlogrdr_drv_attrs
,
662 static const struct attribute_group
*vmlogrdr_drv_attr_groups
[] = {
663 &vmlogrdr_drv_attr_group
,
667 static struct attribute
*vmlogrdr_attrs
[] = {
668 &dev_attr_autopurge
.attr
,
669 &dev_attr_purge
.attr
,
670 &dev_attr_autorecording
.attr
,
671 &dev_attr_recording
.attr
,
674 static struct attribute_group vmlogrdr_attr_group
= {
675 .attrs
= vmlogrdr_attrs
,
677 static const struct attribute_group
*vmlogrdr_attr_groups
[] = {
678 &vmlogrdr_attr_group
,
682 static int vmlogrdr_pm_prepare(struct device
*dev
)
685 struct vmlogrdr_priv_t
*priv
= dev_get_drvdata(dev
);
689 spin_lock_bh(&priv
->priv_lock
);
690 if (priv
->dev_in_use
)
692 spin_unlock_bh(&priv
->priv_lock
);
695 pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
701 static const struct dev_pm_ops vmlogrdr_pm_ops
= {
702 .prepare
= vmlogrdr_pm_prepare
,
705 static struct class *vmlogrdr_class
;
706 static struct device_driver vmlogrdr_driver
= {
709 .pm
= &vmlogrdr_pm_ops
,
710 .groups
= vmlogrdr_drv_attr_groups
,
713 static int vmlogrdr_register_driver(void)
717 /* Register with iucv driver */
718 ret
= iucv_register(&vmlogrdr_iucv_handler
, 1);
722 ret
= driver_register(&vmlogrdr_driver
);
726 vmlogrdr_class
= class_create(THIS_MODULE
, "vmlogrdr");
727 if (IS_ERR(vmlogrdr_class
)) {
728 ret
= PTR_ERR(vmlogrdr_class
);
729 vmlogrdr_class
= NULL
;
735 driver_unregister(&vmlogrdr_driver
);
737 iucv_unregister(&vmlogrdr_iucv_handler
, 1);
743 static void vmlogrdr_unregister_driver(void)
745 class_destroy(vmlogrdr_class
);
746 vmlogrdr_class
= NULL
;
747 driver_unregister(&vmlogrdr_driver
);
748 iucv_unregister(&vmlogrdr_iucv_handler
, 1);
752 static int vmlogrdr_register_device(struct vmlogrdr_priv_t
*priv
)
757 dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
759 dev_set_name(dev
, "%s", priv
->internal_name
);
760 dev
->bus
= &iucv_bus
;
761 dev
->parent
= iucv_root
;
762 dev
->driver
= &vmlogrdr_driver
;
763 dev
->groups
= vmlogrdr_attr_groups
;
764 dev_set_drvdata(dev
, priv
);
766 * The release function could be called after the
767 * module has been unloaded. It's _only_ task is to
768 * free the struct. Therefore, we specify kfree()
769 * directly here. (Probably a little bit obfuscating
772 dev
->release
= (void (*)(struct device
*))kfree
;
775 ret
= device_register(dev
);
781 priv
->class_device
= device_create(vmlogrdr_class
, dev
,
782 MKDEV(vmlogrdr_major
,
784 priv
, "%s", dev_name(dev
));
785 if (IS_ERR(priv
->class_device
)) {
786 ret
= PTR_ERR(priv
->class_device
);
787 priv
->class_device
=NULL
;
788 device_unregister(dev
);
796 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t
*priv
)
798 device_destroy(vmlogrdr_class
, MKDEV(vmlogrdr_major
, priv
->minor_num
));
799 if (priv
->device
!= NULL
) {
800 device_unregister(priv
->device
);
807 static int vmlogrdr_register_cdev(dev_t dev
)
810 vmlogrdr_cdev
= cdev_alloc();
811 if (!vmlogrdr_cdev
) {
814 vmlogrdr_cdev
->owner
= THIS_MODULE
;
815 vmlogrdr_cdev
->ops
= &vmlogrdr_fops
;
816 rc
= cdev_add(vmlogrdr_cdev
, dev
, MAXMINOR
);
820 // cleanup: cdev is not fully registered, no cdev_del here!
821 kobject_put(&vmlogrdr_cdev
->kobj
);
827 static void vmlogrdr_cleanup(void)
832 cdev_del(vmlogrdr_cdev
);
835 for (i
=0; i
< MAXMINOR
; ++i
) {
836 vmlogrdr_unregister_device(&sys_ser
[i
]);
837 free_page((unsigned long)sys_ser
[i
].buffer
);
839 vmlogrdr_unregister_driver();
840 if (vmlogrdr_major
) {
841 unregister_chrdev_region(MKDEV(vmlogrdr_major
, 0), MAXMINOR
);
847 static int __init
vmlogrdr_init(void)
853 if (! MACHINE_IS_VM
) {
854 pr_err("not running under VM, driver not loaded.\n");
858 recording_class_AB
= vmlogrdr_get_recording_class_AB();
860 rc
= alloc_chrdev_region(&dev
, 0, MAXMINOR
, "vmlogrdr");
863 vmlogrdr_major
= MAJOR(dev
);
865 rc
=vmlogrdr_register_driver();
869 for (i
=0; i
< MAXMINOR
; ++i
) {
870 sys_ser
[i
].buffer
= (char *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
871 if (!sys_ser
[i
].buffer
) {
875 sys_ser
[i
].current_position
= sys_ser
[i
].buffer
;
876 rc
=vmlogrdr_register_device(&sys_ser
[i
]);
883 rc
= vmlogrdr_register_cdev(dev
);
894 static void __exit
vmlogrdr_exit(void)
901 module_init(vmlogrdr_init
);
902 module_exit(vmlogrdr_exit
);