arm64: kernel: fix per-cpu offset restore on resume
[linux/fpc-iii.git] / drivers / oprofile / oprof.c
blobed2c3ec07024d0bcb04686a64510f1a607bfded7
1 /**
2 * @file oprof.c
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/oprofile.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/time.h>
17 #include <linux/mutex.h>
19 #include "oprof.h"
20 #include "event_buffer.h"
21 #include "cpu_buffer.h"
22 #include "buffer_sync.h"
23 #include "oprofile_stats.h"
25 struct oprofile_operations oprofile_ops;
27 unsigned long oprofile_started;
28 unsigned long oprofile_backtrace_depth;
29 static unsigned long is_setup;
30 static DEFINE_MUTEX(start_mutex);
32 /* timer
33 0 - use performance monitoring hardware if available
34 1 - use the timer int mechanism regardless
36 static int timer = 0;
38 int oprofile_setup(void)
40 int err;
42 mutex_lock(&start_mutex);
44 if ((err = alloc_cpu_buffers()))
45 goto out;
47 if ((err = alloc_event_buffer()))
48 goto out1;
50 if (oprofile_ops.setup && (err = oprofile_ops.setup()))
51 goto out2;
53 /* Note even though this starts part of the
54 * profiling overhead, it's necessary to prevent
55 * us missing task deaths and eventually oopsing
56 * when trying to process the event buffer.
58 if (oprofile_ops.sync_start) {
59 int sync_ret = oprofile_ops.sync_start();
60 switch (sync_ret) {
61 case 0:
62 goto post_sync;
63 case 1:
64 goto do_generic;
65 case -1:
66 goto out3;
67 default:
68 goto out3;
71 do_generic:
72 if ((err = sync_start()))
73 goto out3;
75 post_sync:
76 is_setup = 1;
77 mutex_unlock(&start_mutex);
78 return 0;
80 out3:
81 if (oprofile_ops.shutdown)
82 oprofile_ops.shutdown();
83 out2:
84 free_event_buffer();
85 out1:
86 free_cpu_buffers();
87 out:
88 mutex_unlock(&start_mutex);
89 return err;
92 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
94 static void switch_worker(struct work_struct *work);
95 static DECLARE_DELAYED_WORK(switch_work, switch_worker);
97 static void start_switch_worker(void)
99 if (oprofile_ops.switch_events)
100 schedule_delayed_work(&switch_work, oprofile_time_slice);
103 static void stop_switch_worker(void)
105 cancel_delayed_work_sync(&switch_work);
108 static void switch_worker(struct work_struct *work)
110 if (oprofile_ops.switch_events())
111 return;
113 atomic_inc(&oprofile_stats.multiplex_counter);
114 start_switch_worker();
117 /* User inputs in ms, converts to jiffies */
118 int oprofile_set_timeout(unsigned long val_msec)
120 int err = 0;
121 unsigned long time_slice;
123 mutex_lock(&start_mutex);
125 if (oprofile_started) {
126 err = -EBUSY;
127 goto out;
130 if (!oprofile_ops.switch_events) {
131 err = -EINVAL;
132 goto out;
135 time_slice = msecs_to_jiffies(val_msec);
136 if (time_slice == MAX_JIFFY_OFFSET) {
137 err = -EINVAL;
138 goto out;
141 oprofile_time_slice = time_slice;
143 out:
144 mutex_unlock(&start_mutex);
145 return err;
149 #else
151 static inline void start_switch_worker(void) { }
152 static inline void stop_switch_worker(void) { }
154 #endif
156 /* Actually start profiling (echo 1>/dev/oprofile/enable) */
157 int oprofile_start(void)
159 int err = -EINVAL;
161 mutex_lock(&start_mutex);
163 if (!is_setup)
164 goto out;
166 err = 0;
168 if (oprofile_started)
169 goto out;
171 oprofile_reset_stats();
173 if ((err = oprofile_ops.start()))
174 goto out;
176 start_switch_worker();
178 oprofile_started = 1;
179 out:
180 mutex_unlock(&start_mutex);
181 return err;
185 /* echo 0>/dev/oprofile/enable */
186 void oprofile_stop(void)
188 mutex_lock(&start_mutex);
189 if (!oprofile_started)
190 goto out;
191 oprofile_ops.stop();
192 oprofile_started = 0;
194 stop_switch_worker();
196 /* wake up the daemon to read what remains */
197 wake_up_buffer_waiter();
198 out:
199 mutex_unlock(&start_mutex);
203 void oprofile_shutdown(void)
205 mutex_lock(&start_mutex);
206 if (oprofile_ops.sync_stop) {
207 int sync_ret = oprofile_ops.sync_stop();
208 switch (sync_ret) {
209 case 0:
210 goto post_sync;
211 case 1:
212 goto do_generic;
213 default:
214 goto post_sync;
217 do_generic:
218 sync_stop();
219 post_sync:
220 if (oprofile_ops.shutdown)
221 oprofile_ops.shutdown();
222 is_setup = 0;
223 free_event_buffer();
224 free_cpu_buffers();
225 mutex_unlock(&start_mutex);
228 int oprofile_set_ulong(unsigned long *addr, unsigned long val)
230 int err = -EBUSY;
232 mutex_lock(&start_mutex);
233 if (!oprofile_started) {
234 *addr = val;
235 err = 0;
237 mutex_unlock(&start_mutex);
239 return err;
242 static int timer_mode;
244 static int __init oprofile_init(void)
246 int err;
248 /* always init architecture to setup backtrace support */
249 timer_mode = 0;
250 err = oprofile_arch_init(&oprofile_ops);
251 if (!err) {
252 if (!timer && !oprofilefs_register())
253 return 0;
254 oprofile_arch_exit();
257 /* setup timer mode: */
258 timer_mode = 1;
259 /* no nmi timer mode if oprofile.timer is set */
260 if (timer || op_nmi_timer_init(&oprofile_ops)) {
261 err = oprofile_timer_init(&oprofile_ops);
262 if (err)
263 return err;
266 return oprofilefs_register();
270 static void __exit oprofile_exit(void)
272 oprofilefs_unregister();
273 if (!timer_mode)
274 oprofile_arch_exit();
278 module_init(oprofile_init);
279 module_exit(oprofile_exit);
281 module_param_named(timer, timer, int, 0644);
282 MODULE_PARM_DESC(timer, "force use of timer interrupt");
284 MODULE_LICENSE("GPL");
285 MODULE_AUTHOR("John Levon <levon@movementarian.org>");
286 MODULE_DESCRIPTION("OProfile system profiler");