2 * Data gathering module for Linux-VM Monitor Stream, Stage 1.
3 * Collects misc. OS related data (CPU utilization, running processes).
5 * Copyright IBM Corp. 2003, 2006
7 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
10 #define KMSG_COMPONENT "appldata"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/netdevice.h>
19 #include <linux/sched.h>
20 #include <linux/sched/loadavg.h>
21 #include <linux/sched/stat.h>
22 #include <asm/appldata.h>
28 #define LOAD_INT(x) ((x) >> FSHIFT)
29 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
34 * This is accessed as binary data by z/VM. If changes to it can't be avoided,
35 * the structure version (product ID, see appldata_base.c) needs to be changed
36 * as well and all documentation and z/VM applications using it must be
39 * The record layout is documented in the Linux for zSeries Device Drivers
41 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
43 struct appldata_os_per_cpu
{
44 u32 per_cpu_user
; /* timer ticks spent in user mode */
45 u32 per_cpu_nice
; /* ... spent with modified priority */
46 u32 per_cpu_system
; /* ... spent in kernel mode */
47 u32 per_cpu_idle
; /* ... spent in idle mode */
50 u32 per_cpu_irq
; /* ... spent in interrupts */
51 u32 per_cpu_softirq
; /* ... spent in softirqs */
52 u32 per_cpu_iowait
; /* ... spent while waiting for I/O */
54 /* New in modification level 01 */
55 u32 per_cpu_steal
; /* ... stolen by hypervisor */
56 u32 cpu_id
; /* number of this CPU */
57 } __attribute__((packed
));
59 struct appldata_os_data
{
61 u32 sync_count_1
; /* after VM collected the record data, */
62 u32 sync_count_2
; /* sync_count_1 and sync_count_2 should be the
63 same. If not, the record has been updated on
64 the Linux side while VM was collecting the
65 (possibly corrupt) data */
67 u32 nr_cpus
; /* number of (virtual) CPUs */
68 u32 per_cpu_size
; /* size of the per-cpu data struct */
69 u32 cpu_offset
; /* offset of the first per-cpu data struct */
71 u32 nr_running
; /* number of runnable threads */
72 u32 nr_threads
; /* number of threads */
73 u32 avenrun
[3]; /* average nr. of running processes during */
74 /* the last 1, 5 and 15 minutes */
77 u32 nr_iowait
; /* number of blocked threads
81 struct appldata_os_per_cpu os_cpu
[0];
82 } __attribute__((packed
));
84 static struct appldata_os_data
*appldata_os_data
;
86 static struct appldata_ops ops
= {
88 .record_nr
= APPLDATA_RECORD_OS_ID
,
90 .mod_lvl
= {0xF0, 0xF1}, /* EBCDIC "01" */
95 * appldata_get_os_data()
99 static void appldata_get_os_data(void *data
)
102 struct appldata_os_data
*os_data
;
103 unsigned int new_size
;
106 os_data
->sync_count_1
++;
108 os_data
->nr_threads
= nr_threads
;
109 os_data
->nr_running
= nr_running();
110 os_data
->nr_iowait
= nr_iowait();
111 os_data
->avenrun
[0] = avenrun
[0] + (FIXED_1
/200);
112 os_data
->avenrun
[1] = avenrun
[1] + (FIXED_1
/200);
113 os_data
->avenrun
[2] = avenrun
[2] + (FIXED_1
/200);
116 for_each_online_cpu(i
) {
117 os_data
->os_cpu
[j
].per_cpu_user
=
118 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_USER
]);
119 os_data
->os_cpu
[j
].per_cpu_nice
=
120 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_NICE
]);
121 os_data
->os_cpu
[j
].per_cpu_system
=
122 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_SYSTEM
]);
123 os_data
->os_cpu
[j
].per_cpu_idle
=
124 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_IDLE
]);
125 os_data
->os_cpu
[j
].per_cpu_irq
=
126 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_IRQ
]);
127 os_data
->os_cpu
[j
].per_cpu_softirq
=
128 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_SOFTIRQ
]);
129 os_data
->os_cpu
[j
].per_cpu_iowait
=
130 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_IOWAIT
]);
131 os_data
->os_cpu
[j
].per_cpu_steal
=
132 nsecs_to_jiffies(kcpustat_cpu(i
).cpustat
[CPUTIME_STEAL
]);
133 os_data
->os_cpu
[j
].cpu_id
= i
;
137 os_data
->nr_cpus
= j
;
139 new_size
= sizeof(struct appldata_os_data
) +
140 (os_data
->nr_cpus
* sizeof(struct appldata_os_per_cpu
));
141 if (ops
.size
!= new_size
) {
143 rc
= appldata_diag(APPLDATA_RECORD_OS_ID
,
144 APPLDATA_START_INTERVAL_REC
,
145 (unsigned long) ops
.data
, new_size
,
148 pr_err("Starting a new OS data collection "
149 "failed with rc=%d\n", rc
);
151 rc
= appldata_diag(APPLDATA_RECORD_OS_ID
,
153 (unsigned long) ops
.data
, ops
.size
,
156 pr_err("Stopping a faulty OS data "
157 "collection failed with rc=%d\n", rc
);
161 os_data
->timestamp
= get_tod_clock();
162 os_data
->sync_count_2
++;
169 * init data, register ops
171 static int __init
appldata_os_init(void)
175 max_size
= sizeof(struct appldata_os_data
) +
176 (num_possible_cpus() * sizeof(struct appldata_os_per_cpu
));
177 if (max_size
> APPLDATA_MAX_REC_SIZE
) {
178 pr_err("Maximum OS record size %i exceeds the maximum "
179 "record size %i\n", max_size
, APPLDATA_MAX_REC_SIZE
);
184 appldata_os_data
= kzalloc(max_size
, GFP_KERNEL
| GFP_DMA
);
185 if (appldata_os_data
== NULL
) {
190 appldata_os_data
->per_cpu_size
= sizeof(struct appldata_os_per_cpu
);
191 appldata_os_data
->cpu_offset
= offsetof(struct appldata_os_data
,
194 ops
.data
= appldata_os_data
;
195 ops
.callback
= &appldata_get_os_data
;
196 rc
= appldata_register_ops(&ops
);
198 kfree(appldata_os_data
);
208 static void __exit
appldata_os_exit(void)
210 appldata_unregister_ops(&ops
);
211 kfree(appldata_os_data
);
215 module_init(appldata_os_init
);
216 module_exit(appldata_os_exit
);
218 MODULE_LICENSE("GPL");
219 MODULE_AUTHOR("Gerald Schaefer");
220 MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");