Merge tag 'block-6.13-20242901' of git://git.kernel.dk/linux
[drm/drm-misc.git] / include / linux / sched / clock.h
blob196f0ca351a258c22cf1033e7fe536144b4a4a36
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_CLOCK_H
3 #define _LINUX_SCHED_CLOCK_H
5 #include <linux/smp.h>
7 /*
8 * Do not use outside of architecture code which knows its limitations.
10 * sched_clock() has no promise of monotonicity or bounded drift between
11 * CPUs, use (which you should not) requires disabling IRQs.
13 * Please use one of the three interfaces below.
15 extern u64 sched_clock(void);
17 #if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
18 extern u64 sched_clock_noinstr(void);
19 #else
20 static __always_inline u64 sched_clock_noinstr(void)
22 return sched_clock();
24 #endif
27 * See the comment in kernel/sched/clock.c
29 extern u64 running_clock(void);
30 extern u64 sched_clock_cpu(int cpu);
33 extern void sched_clock_init(void);
35 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
36 static inline void sched_clock_tick(void)
40 static inline void clear_sched_clock_stable(void)
44 static inline void sched_clock_idle_sleep_event(void)
48 static inline void sched_clock_idle_wakeup_event(void)
52 static inline u64 cpu_clock(int cpu)
54 return sched_clock();
57 static __always_inline u64 local_clock_noinstr(void)
59 return sched_clock_noinstr();
62 static __always_inline u64 local_clock(void)
64 return sched_clock();
66 #else
67 extern int sched_clock_stable(void);
68 extern void clear_sched_clock_stable(void);
71 * When sched_clock_stable(), __sched_clock_offset provides the offset
72 * between local_clock() and sched_clock().
74 extern u64 __sched_clock_offset;
76 extern void sched_clock_tick(void);
77 extern void sched_clock_tick_stable(void);
78 extern void sched_clock_idle_sleep_event(void);
79 extern void sched_clock_idle_wakeup_event(void);
82 * As outlined in clock.c, provides a fast, high resolution, nanosecond
83 * time source that is monotonic per cpu argument and has bounded drift
84 * between cpus.
86 * ######################### BIG FAT WARNING ##########################
87 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
88 * # go backwards !! #
89 * ####################################################################
91 static inline u64 cpu_clock(int cpu)
93 return sched_clock_cpu(cpu);
96 extern u64 local_clock_noinstr(void);
97 extern u64 local_clock(void);
99 #endif
101 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
103 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
104 * The reason for this explicit opt-in is not to have perf penalty with
105 * slow sched_clocks.
107 extern void enable_sched_clock_irqtime(void);
108 extern void disable_sched_clock_irqtime(void);
109 #else
110 static inline void enable_sched_clock_irqtime(void) {}
111 static inline void disable_sched_clock_irqtime(void) {}
112 #endif
114 #endif /* _LINUX_SCHED_CLOCK_H */