tracing: Use helper functions in event assignment to shrink macro size
[linux/fpc-iii.git] / kernel / sched / clock.c
blob43c2bcc35761e40c9342522b4fa87b92cdefd8f4
1 /*
2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
9 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
14 * What:
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
22 * # go backwards !! #
23 * ####################################################################
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * local_clock() -- is cpu_clock() on the current cpu.
31 * sched_clock_cpu(i)
33 * How:
35 * The implementation either uses sched_clock() when
36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
37 * sched_clock() is assumed to provide these properties (mostly it means
38 * the architecture provides a globally synchronized highres time source).
40 * Otherwise it tries to create a semi stable clock from a mixture of other
41 * clocks, including:
43 * - GTOD (clock monotomic)
44 * - sched_clock()
45 * - explicit idle events
47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
48 * deltas are filtered to provide monotonicity and keeping it within an
49 * expected window.
51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
52 * that is otherwise invisible (TSC gets stopped).
55 #include <linux/spinlock.h>
56 #include <linux/hardirq.h>
57 #include <linux/export.h>
58 #include <linux/percpu.h>
59 #include <linux/ktime.h>
60 #include <linux/sched.h>
61 #include <linux/static_key.h>
62 #include <linux/workqueue.h>
65 * Scheduler clock - returns current time in nanosec units.
66 * This is default implementation.
67 * Architectures and sub-architectures can override this.
69 unsigned long long __attribute__((weak)) sched_clock(void)
71 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
72 * (NSEC_PER_SEC / HZ);
74 EXPORT_SYMBOL_GPL(sched_clock);
76 __read_mostly int sched_clock_running;
78 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
79 static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
80 static int __sched_clock_stable_early;
82 int sched_clock_stable(void)
84 return static_key_false(&__sched_clock_stable);
87 static void __set_sched_clock_stable(void)
89 if (!sched_clock_stable())
90 static_key_slow_inc(&__sched_clock_stable);
93 void set_sched_clock_stable(void)
95 __sched_clock_stable_early = 1;
97 smp_mb(); /* matches sched_clock_init() */
99 if (!sched_clock_running)
100 return;
102 __set_sched_clock_stable();
105 static void __clear_sched_clock_stable(struct work_struct *work)
107 /* XXX worry about clock continuity */
108 if (sched_clock_stable())
109 static_key_slow_dec(&__sched_clock_stable);
112 static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
114 void clear_sched_clock_stable(void)
116 __sched_clock_stable_early = 0;
118 smp_mb(); /* matches sched_clock_init() */
120 if (!sched_clock_running)
121 return;
123 schedule_work(&sched_clock_work);
126 struct sched_clock_data {
127 u64 tick_raw;
128 u64 tick_gtod;
129 u64 clock;
132 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
134 static inline struct sched_clock_data *this_scd(void)
136 return &__get_cpu_var(sched_clock_data);
139 static inline struct sched_clock_data *cpu_sdc(int cpu)
141 return &per_cpu(sched_clock_data, cpu);
144 void sched_clock_init(void)
146 u64 ktime_now = ktime_to_ns(ktime_get());
147 int cpu;
149 for_each_possible_cpu(cpu) {
150 struct sched_clock_data *scd = cpu_sdc(cpu);
152 scd->tick_raw = 0;
153 scd->tick_gtod = ktime_now;
154 scd->clock = ktime_now;
157 sched_clock_running = 1;
160 * Ensure that it is impossible to not do a static_key update.
162 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
163 * and do the update, or we must see their __sched_clock_stable_early
164 * and do the update, or both.
166 smp_mb(); /* matches {set,clear}_sched_clock_stable() */
168 if (__sched_clock_stable_early)
169 __set_sched_clock_stable();
170 else
171 __clear_sched_clock_stable(NULL);
175 * min, max except they take wrapping into account
178 static inline u64 wrap_min(u64 x, u64 y)
180 return (s64)(x - y) < 0 ? x : y;
183 static inline u64 wrap_max(u64 x, u64 y)
185 return (s64)(x - y) > 0 ? x : y;
189 * update the percpu scd from the raw @now value
191 * - filter out backward motion
192 * - use the GTOD tick value to create a window to filter crazy TSC values
194 static u64 sched_clock_local(struct sched_clock_data *scd)
196 u64 now, clock, old_clock, min_clock, max_clock;
197 s64 delta;
199 again:
200 now = sched_clock();
201 delta = now - scd->tick_raw;
202 if (unlikely(delta < 0))
203 delta = 0;
205 old_clock = scd->clock;
208 * scd->clock = clamp(scd->tick_gtod + delta,
209 * max(scd->tick_gtod, scd->clock),
210 * scd->tick_gtod + TICK_NSEC);
213 clock = scd->tick_gtod + delta;
214 min_clock = wrap_max(scd->tick_gtod, old_clock);
215 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
217 clock = wrap_max(clock, min_clock);
218 clock = wrap_min(clock, max_clock);
220 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
221 goto again;
223 return clock;
226 static u64 sched_clock_remote(struct sched_clock_data *scd)
228 struct sched_clock_data *my_scd = this_scd();
229 u64 this_clock, remote_clock;
230 u64 *ptr, old_val, val;
232 #if BITS_PER_LONG != 64
233 again:
235 * Careful here: The local and the remote clock values need to
236 * be read out atomic as we need to compare the values and
237 * then update either the local or the remote side. So the
238 * cmpxchg64 below only protects one readout.
240 * We must reread via sched_clock_local() in the retry case on
241 * 32bit as an NMI could use sched_clock_local() via the
242 * tracer and hit between the readout of
243 * the low32bit and the high 32bit portion.
245 this_clock = sched_clock_local(my_scd);
247 * We must enforce atomic readout on 32bit, otherwise the
248 * update on the remote cpu can hit inbetween the readout of
249 * the low32bit and the high 32bit portion.
251 remote_clock = cmpxchg64(&scd->clock, 0, 0);
252 #else
254 * On 64bit the read of [my]scd->clock is atomic versus the
255 * update, so we can avoid the above 32bit dance.
257 sched_clock_local(my_scd);
258 again:
259 this_clock = my_scd->clock;
260 remote_clock = scd->clock;
261 #endif
264 * Use the opportunity that we have both locks
265 * taken to couple the two clocks: we take the
266 * larger time as the latest time for both
267 * runqueues. (this creates monotonic movement)
269 if (likely((s64)(remote_clock - this_clock) < 0)) {
270 ptr = &scd->clock;
271 old_val = remote_clock;
272 val = this_clock;
273 } else {
275 * Should be rare, but possible:
277 ptr = &my_scd->clock;
278 old_val = this_clock;
279 val = remote_clock;
282 if (cmpxchg64(ptr, old_val, val) != old_val)
283 goto again;
285 return val;
289 * Similar to cpu_clock(), but requires local IRQs to be disabled.
291 * See cpu_clock().
293 u64 sched_clock_cpu(int cpu)
295 struct sched_clock_data *scd;
296 u64 clock;
298 if (sched_clock_stable())
299 return sched_clock();
301 if (unlikely(!sched_clock_running))
302 return 0ull;
304 preempt_disable();
305 scd = cpu_sdc(cpu);
307 if (cpu != smp_processor_id())
308 clock = sched_clock_remote(scd);
309 else
310 clock = sched_clock_local(scd);
311 preempt_enable();
313 return clock;
316 void sched_clock_tick(void)
318 struct sched_clock_data *scd;
319 u64 now, now_gtod;
321 if (sched_clock_stable())
322 return;
324 if (unlikely(!sched_clock_running))
325 return;
327 WARN_ON_ONCE(!irqs_disabled());
329 scd = this_scd();
330 now_gtod = ktime_to_ns(ktime_get());
331 now = sched_clock();
333 scd->tick_raw = now;
334 scd->tick_gtod = now_gtod;
335 sched_clock_local(scd);
339 * We are going deep-idle (irqs are disabled):
341 void sched_clock_idle_sleep_event(void)
343 sched_clock_cpu(smp_processor_id());
345 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
348 * We just idled delta nanoseconds (called with irqs disabled):
350 void sched_clock_idle_wakeup_event(u64 delta_ns)
352 if (timekeeping_suspended)
353 return;
355 sched_clock_tick();
356 touch_softlockup_watchdog();
358 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
361 * As outlined at the top, provides a fast, high resolution, nanosecond
362 * time source that is monotonic per cpu argument and has bounded drift
363 * between cpus.
365 * ######################### BIG FAT WARNING ##########################
366 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
367 * # go backwards !! #
368 * ####################################################################
370 u64 cpu_clock(int cpu)
372 if (!sched_clock_stable())
373 return sched_clock_cpu(cpu);
375 return sched_clock();
379 * Similar to cpu_clock() for the current cpu. Time will only be observed
380 * to be monotonic if care is taken to only compare timestampt taken on the
381 * same CPU.
383 * See cpu_clock().
385 u64 local_clock(void)
387 if (!sched_clock_stable())
388 return sched_clock_cpu(raw_smp_processor_id());
390 return sched_clock();
393 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
395 void sched_clock_init(void)
397 sched_clock_running = 1;
400 u64 sched_clock_cpu(int cpu)
402 if (unlikely(!sched_clock_running))
403 return 0;
405 return sched_clock();
408 u64 cpu_clock(int cpu)
410 return sched_clock();
413 u64 local_clock(void)
415 return sched_clock();
418 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
420 EXPORT_SYMBOL_GPL(cpu_clock);
421 EXPORT_SYMBOL_GPL(local_clock);