1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/seq_file.h>
4 #include <linux/kallsyms.h>
5 #include <linux/module.h>
6 #include <linux/ftrace.h>
9 #include "trace_output.h"
11 struct recursed_functions
{
13 unsigned long parent_ip
;
16 static struct recursed_functions recursed_functions
[CONFIG_FTRACE_RECORD_RECURSION_SIZE
];
17 static atomic_t nr_records
;
20 * Cache the last found function. Yes, updates to this is racey, but
21 * so is memory cache ;-)
23 static unsigned long cached_function
;
25 void ftrace_record_recursion(unsigned long ip
, unsigned long parent_ip
)
32 /* First check the last one recorded */
33 if (ip
== cached_function
)
36 i
= atomic_read(&nr_records
);
37 /* nr_records is -1 when clearing records */
38 smp_mb__after_atomic();
43 * If there's two writers and this writer comes in second,
44 * the cmpxchg() below to update the ip will fail. Then this
45 * writer will try again. It is possible that index will now
46 * be greater than nr_records. This is because the writer
47 * that succeeded has not updated the nr_records yet.
48 * This writer could keep trying again until the other writer
49 * updates nr_records. But if the other writer takes an
50 * interrupt, and that interrupt locks up that CPU, we do
51 * not want this CPU to lock up due to the recursion protection,
52 * and have a bug report showing this CPU as the cause of
53 * locking up the computer. To not lose this record, this
54 * writer will simply use the next position to update the
55 * recursed_functions, and it will update the nr_records
60 if (index
>= CONFIG_FTRACE_RECORD_RECURSION_SIZE
)
63 for (i
= index
- 1; i
>= 0; i
--) {
64 if (recursed_functions
[i
].ip
== ip
) {
73 * We only want to add a function if it hasn't been added before.
74 * Add to the current location before incrementing the count.
75 * If it fails to add, then increment the index (save in i)
78 old
= cmpxchg(&recursed_functions
[index
].ip
, 0, ip
);
80 /* Did something else already added this for us? */
83 /* Try the next location (use i for the next index) */
88 recursed_functions
[index
].parent_ip
= parent_ip
;
91 * It's still possible that we could race with the clearing
96 * recursed_functions[0] = 0;
100 * (new recursion detected)
101 * recursed_functions[0] = func
102 * cmpxchg(recursed_functions[0],
105 * But the worse that could happen is that we get a zero in
106 * the recursed_functions array, and it's likely that "func" will
109 i
= atomic_read(&nr_records
);
110 smp_mb__after_atomic();
112 cmpxchg(&recursed_functions
[index
].ip
, ip
, 0);
114 atomic_cmpxchg(&nr_records
, i
, index
+ 1);
116 EXPORT_SYMBOL_GPL(ftrace_record_recursion
);
118 static DEFINE_MUTEX(recursed_function_lock
);
119 static struct trace_seq
*tseq
;
121 static void *recursed_function_seq_start(struct seq_file
*m
, loff_t
*pos
)
126 mutex_lock(&recursed_function_lock
);
127 index
= atomic_read(&nr_records
);
129 ret
= &recursed_functions
[*pos
];
132 tseq
= kzalloc(sizeof(*tseq
), GFP_KERNEL
);
134 return ERR_PTR(-ENOMEM
);
136 trace_seq_init(tseq
);
141 static void *recursed_function_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
146 index
= atomic_read(&nr_records
);
149 return p
< index
? &recursed_functions
[p
] : NULL
;
152 static void recursed_function_seq_stop(struct seq_file
*m
, void *v
)
155 mutex_unlock(&recursed_function_lock
);
158 static int recursed_function_seq_show(struct seq_file
*m
, void *v
)
160 struct recursed_functions
*record
= v
;
164 trace_seq_print_sym(tseq
, record
->parent_ip
, true);
165 trace_seq_puts(tseq
, ":\t");
166 trace_seq_print_sym(tseq
, record
->ip
, true);
167 trace_seq_putc(tseq
, '\n');
168 ret
= trace_print_seq(m
, tseq
);
174 static const struct seq_operations recursed_function_seq_ops
= {
175 .start
= recursed_function_seq_start
,
176 .next
= recursed_function_seq_next
,
177 .stop
= recursed_function_seq_stop
,
178 .show
= recursed_function_seq_show
181 static int recursed_function_open(struct inode
*inode
, struct file
*file
)
185 mutex_lock(&recursed_function_lock
);
186 /* If this file was opened for write, then erase contents */
187 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
188 /* disable updating records */
189 atomic_set(&nr_records
, -1);
190 smp_mb__after_atomic();
191 memset(recursed_functions
, 0, sizeof(recursed_functions
));
193 /* enable them again */
194 atomic_set(&nr_records
, 0);
196 if (file
->f_mode
& FMODE_READ
)
197 ret
= seq_open(file
, &recursed_function_seq_ops
);
198 mutex_unlock(&recursed_function_lock
);
203 static ssize_t
recursed_function_write(struct file
*file
,
204 const char __user
*buffer
,
205 size_t count
, loff_t
*ppos
)
210 static int recursed_function_release(struct inode
*inode
, struct file
*file
)
212 if (file
->f_mode
& FMODE_READ
)
213 seq_release(inode
, file
);
217 static const struct file_operations recursed_functions_fops
= {
218 .open
= recursed_function_open
,
219 .write
= recursed_function_write
,
222 .release
= recursed_function_release
,
225 __init
static int create_recursed_functions(void)
227 struct dentry
*dentry
;
229 dentry
= trace_create_file("recursed_functions", 0644, NULL
, NULL
,
230 &recursed_functions_fops
);
232 pr_warn("WARNING: Failed to create recursed_functions\n");
236 fs_initcall(create_recursed_functions
);