1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_TRACE_RECURSION_H
3 #define _LINUX_TRACE_RECURSION_H
5 #include <linux/interrupt.h>
6 #include <linux/sched.h>
10 /* Only current can touch trace_recursion */
13 * For function tracing recursion:
14 * The order of these bits are important.
16 * When function tracing occurs, the following steps are made:
17 * If arch does not support a ftrace feature:
18 * call internal function (uses INTERNAL bits) which calls...
19 * The function callback, which can use the FTRACE bits to
20 * check for recursion.
23 /* Function recursion bits */
27 TRACE_FTRACE_SIRQ_BIT
,
28 TRACE_FTRACE_TRANSITION_BIT
,
30 /* Internal use recursion bits */
32 TRACE_INTERNAL_NMI_BIT
,
33 TRACE_INTERNAL_IRQ_BIT
,
34 TRACE_INTERNAL_SIRQ_BIT
,
35 TRACE_INTERNAL_TRANSITION_BIT
,
39 * Abuse of the trace_recursion.
40 * As we need a way to maintain state if we are tracing the function
41 * graph in irq because we want to trace a particular function that
42 * was called in irq context but we have irq tracing off. Since this
43 * can only be modified by current, we can reuse trace_recursion.
47 /* Used to prevent recursion recording from recursing. */
48 TRACE_RECORD_RECURSION_BIT
,
51 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
52 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
53 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
55 #define TRACE_CONTEXT_BITS 4
57 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
59 #define TRACE_LIST_START TRACE_INTERNAL_BIT
61 #define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
64 * Used for setting context
78 static __always_inline
int trace_get_context_bit(void)
80 unsigned char bit
= interrupt_context_level();
82 return TRACE_CTX_NORMAL
- bit
;
85 #ifdef CONFIG_FTRACE_RECORD_RECURSION
86 extern void ftrace_record_recursion(unsigned long ip
, unsigned long parent_ip
);
87 # define do_ftrace_record_recursion(ip, pip) \
89 if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
90 trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
91 ftrace_record_recursion(ip, pip); \
92 trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
96 # define do_ftrace_record_recursion(ip, pip) do { } while (0)
99 #ifdef CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING
100 # define trace_warn_on_no_rcu(ip) \
102 bool __ret = !rcu_is_watching(); \
103 if (__ret && !trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
104 trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
105 WARN_ONCE(true, "RCU not on for: %pS\n", (void *)ip); \
106 trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
111 # define trace_warn_on_no_rcu(ip) false
115 * Preemption is promised to be disabled when return bit >= 0.
117 static __always_inline
int trace_test_and_set_recursion(unsigned long ip
, unsigned long pip
,
120 unsigned int val
= READ_ONCE(current
->trace_recursion
);
123 if (trace_warn_on_no_rcu(ip
))
126 bit
= trace_get_context_bit() + start
;
127 if (unlikely(val
& (1 << bit
))) {
129 * If an interrupt occurs during a trace, and another trace
130 * happens in that interrupt but before the preempt_count is
131 * updated to reflect the new interrupt context, then this
132 * will think a recursion occurred, and the event will be dropped.
133 * Let a single instance happen via the TRANSITION_BIT to
134 * not drop those events.
136 bit
= TRACE_CTX_TRANSITION
+ start
;
137 if (val
& (1 << bit
)) {
138 do_ftrace_record_recursion(ip
, pip
);
144 current
->trace_recursion
= val
;
147 preempt_disable_notrace();
153 * Preemption will be enabled (if it was previously enabled).
155 static __always_inline
void trace_clear_recursion(int bit
)
157 preempt_enable_notrace();
159 trace_recursion_clear(bit
);
163 * ftrace_test_recursion_trylock - tests for recursion in same context
165 * Use this for ftrace callbacks. This will detect if the function
166 * tracing recursed in the same context (normal vs interrupt),
168 * Returns: -1 if a recursion happened.
169 * >= 0 if no recursion.
171 static __always_inline
int ftrace_test_recursion_trylock(unsigned long ip
,
172 unsigned long parent_ip
)
174 return trace_test_and_set_recursion(ip
, parent_ip
, TRACE_FTRACE_START
);
178 * ftrace_test_recursion_unlock - called when function callback is complete
179 * @bit: The return of a successful ftrace_test_recursion_trylock()
181 * This is used at the end of a ftrace callback.
183 static __always_inline
void ftrace_test_recursion_unlock(int bit
)
185 trace_clear_recursion(bit
);
188 #endif /* CONFIG_TRACING */
189 #endif /* _LINUX_TRACE_RECURSION_H */