perf hists: Move sort__has_sym into struct perf_hpp_list
[linux/fpc-iii.git] / kernel / trace / trace_benchmark.c
blob0f109c4130d300384cc5e1a22d4fbbb98ed1539f
1 #include <linux/delay.h>
2 #include <linux/module.h>
3 #include <linux/kthread.h>
4 #include <linux/trace_clock.h>
6 #define CREATE_TRACE_POINTS
7 #include "trace_benchmark.h"
9 static struct task_struct *bm_event_thread;
11 static char bm_str[BENCHMARK_EVENT_STRLEN] = "START";
13 static u64 bm_total;
14 static u64 bm_totalsq;
15 static u64 bm_last;
16 static u64 bm_max;
17 static u64 bm_min;
18 static u64 bm_first;
19 static u64 bm_cnt;
20 static u64 bm_stddev;
21 static unsigned int bm_avg;
22 static unsigned int bm_std;
25 * This gets called in a loop recording the time it took to write
26 * the tracepoint. What it writes is the time statistics of the last
27 * tracepoint write. As there is nothing to write the first time
28 * it simply writes "START". As the first write is cold cache and
29 * the rest is hot, we save off that time in bm_first and it is
30 * reported as "first", which is shown in the second write to the
31 * tracepoint. The "first" field is writen within the statics from
32 * then on but never changes.
34 static void trace_do_benchmark(void)
36 u64 start;
37 u64 stop;
38 u64 delta;
39 u64 stddev;
40 u64 seed;
41 u64 last_seed;
42 unsigned int avg;
43 unsigned int std = 0;
45 /* Only run if the tracepoint is actually active */
46 if (!trace_benchmark_event_enabled() || !tracing_is_on())
47 return;
49 local_irq_disable();
50 start = trace_clock_local();
51 trace_benchmark_event(bm_str);
52 stop = trace_clock_local();
53 local_irq_enable();
55 bm_cnt++;
57 delta = stop - start;
60 * The first read is cold cached, keep it separate from the
61 * other calculations.
63 if (bm_cnt == 1) {
64 bm_first = delta;
65 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
66 "first=%llu [COLD CACHED]", bm_first);
67 return;
70 bm_last = delta;
72 if (delta > bm_max)
73 bm_max = delta;
74 if (!bm_min || delta < bm_min)
75 bm_min = delta;
78 * When bm_cnt is greater than UINT_MAX, it breaks the statistics
79 * accounting. Freeze the statistics when that happens.
80 * We should have enough data for the avg and stddev anyway.
82 if (bm_cnt > UINT_MAX) {
83 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
84 "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
85 bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev);
86 return;
89 bm_total += delta;
90 bm_totalsq += delta * delta;
93 if (bm_cnt > 1) {
95 * Apply Welford's method to calculate standard deviation:
96 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
98 stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total;
99 do_div(stddev, (u32)bm_cnt);
100 do_div(stddev, (u32)bm_cnt - 1);
101 } else
102 stddev = 0;
104 delta = bm_total;
105 do_div(delta, bm_cnt);
106 avg = delta;
108 if (stddev > 0) {
109 int i = 0;
111 * stddev is the square of standard deviation but
112 * we want the actualy number. Use the average
113 * as our seed to find the std.
115 * The next try is:
116 * x = (x + N/x) / 2
118 * Where N is the squared number to find the square
119 * root of.
121 seed = avg;
122 do {
123 last_seed = seed;
124 seed = stddev;
125 if (!last_seed)
126 break;
127 do_div(seed, last_seed);
128 seed += last_seed;
129 do_div(seed, 2);
130 } while (i++ < 10 && last_seed != seed);
132 std = seed;
135 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
136 "last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld",
137 bm_last, bm_first, bm_max, bm_min, avg, std, stddev);
139 bm_std = std;
140 bm_avg = avg;
141 bm_stddev = stddev;
144 static int benchmark_event_kthread(void *arg)
146 /* sleep a bit to make sure the tracepoint gets activated */
147 msleep(100);
149 while (!kthread_should_stop()) {
151 trace_do_benchmark();
154 * We don't go to sleep, but let others
155 * run as well.
157 cond_resched();
160 return 0;
164 * When the benchmark tracepoint is enabled, it calls this
165 * function and the thread that calls the tracepoint is created.
167 void trace_benchmark_reg(void)
169 bm_event_thread = kthread_run(benchmark_event_kthread,
170 NULL, "event_benchmark");
171 WARN_ON(!bm_event_thread);
175 * When the benchmark tracepoint is disabled, it calls this
176 * function and the thread that calls the tracepoint is deleted
177 * and all the numbers are reset.
179 void trace_benchmark_unreg(void)
181 if (!bm_event_thread)
182 return;
184 kthread_stop(bm_event_thread);
186 strcpy(bm_str, "START");
187 bm_total = 0;
188 bm_totalsq = 0;
189 bm_last = 0;
190 bm_max = 0;
191 bm_min = 0;
192 bm_cnt = 0;
193 /* These don't need to be reset but reset them anyway */
194 bm_first = 0;
195 bm_std = 0;
196 bm_avg = 0;
197 bm_stddev = 0;