accel/amdxdna: use modern PM helpers
[drm/drm-misc.git] / kernel / trace / trace_preemptirq.c
blob0c42b15c380047b44efdd4b2c6395d6b77b9ccf7
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * preemptoff and irqoff tracepoints
5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6 */
8 #include <linux/kallsyms.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/hardirq.h>
14 #include "trace.h"
16 #define CREATE_TRACE_POINTS
17 #include <trace/events/preemptirq.h>
20 * Use regular trace points on architectures that implement noinstr
21 * tooling: these calls will only happen with RCU enabled, which can
22 * use a regular tracepoint.
24 * On older architectures, RCU may not be watching in idle. In that
25 * case, wake up RCU to watch while calling the tracepoint. These
26 * aren't NMI-safe - so exclude NMI contexts:
28 #ifdef CONFIG_ARCH_WANTS_NO_INSTR
29 #define trace(point, args) trace_##point(args)
30 #else
31 #define trace(point, args) \
32 do { \
33 if (trace_##point##_enabled()) { \
34 bool exit_rcu = false; \
35 if (in_nmi()) \
36 break; \
37 if (!IS_ENABLED(CONFIG_TINY_RCU) && \
38 is_idle_task(current)) { \
39 ct_irq_enter(); \
40 exit_rcu = true; \
41 } \
42 trace_##point(args); \
43 if (exit_rcu) \
44 ct_irq_exit(); \
45 } \
46 } while (0)
47 #endif
49 #ifdef CONFIG_TRACE_IRQFLAGS
50 /* Per-cpu variable to prevent redundant calls when IRQs already off */
51 static DEFINE_PER_CPU(int, tracing_irq_cpu);
54 * Like trace_hardirqs_on() but without the lockdep invocation. This is
55 * used in the low level entry code where the ordering vs. RCU is important
56 * and lockdep uses a staged approach which splits the lockdep hardirq
57 * tracking into a RCU on and a RCU off section.
59 void trace_hardirqs_on_prepare(void)
61 if (this_cpu_read(tracing_irq_cpu)) {
62 trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
63 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
64 this_cpu_write(tracing_irq_cpu, 0);
67 EXPORT_SYMBOL(trace_hardirqs_on_prepare);
68 NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
70 void trace_hardirqs_on(void)
72 if (this_cpu_read(tracing_irq_cpu)) {
73 trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
74 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
75 this_cpu_write(tracing_irq_cpu, 0);
78 lockdep_hardirqs_on_prepare();
79 lockdep_hardirqs_on(CALLER_ADDR0);
81 EXPORT_SYMBOL(trace_hardirqs_on);
82 NOKPROBE_SYMBOL(trace_hardirqs_on);
85 * Like trace_hardirqs_off() but without the lockdep invocation. This is
86 * used in the low level entry code where the ordering vs. RCU is important
87 * and lockdep uses a staged approach which splits the lockdep hardirq
88 * tracking into a RCU on and a RCU off section.
90 void trace_hardirqs_off_finish(void)
92 if (!this_cpu_read(tracing_irq_cpu)) {
93 this_cpu_write(tracing_irq_cpu, 1);
94 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
95 trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
99 EXPORT_SYMBOL(trace_hardirqs_off_finish);
100 NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
102 void trace_hardirqs_off(void)
104 lockdep_hardirqs_off(CALLER_ADDR0);
106 if (!this_cpu_read(tracing_irq_cpu)) {
107 this_cpu_write(tracing_irq_cpu, 1);
108 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
109 trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
112 EXPORT_SYMBOL(trace_hardirqs_off);
113 NOKPROBE_SYMBOL(trace_hardirqs_off);
114 #endif /* CONFIG_TRACE_IRQFLAGS */
116 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
118 void trace_preempt_on(unsigned long a0, unsigned long a1)
120 trace(preempt_enable, TP_ARGS(a0, a1));
121 tracer_preempt_on(a0, a1);
124 void trace_preempt_off(unsigned long a0, unsigned long a1)
126 trace(preempt_disable, TP_ARGS(a0, a1));
127 tracer_preempt_off(a0, a1);
129 #endif