Merge tag 'powerpc-5.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux/fpc-iii.git] / kernel / trace / trace_recursion_record.c
blobb2edac1fe156ed1ab2e7dcfe14a451faee63ef23
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/seq_file.h>
4 #include <linux/kallsyms.h>
5 #include <linux/module.h>
6 #include <linux/ftrace.h>
7 #include <linux/fs.h>
9 #include "trace_output.h"
11 struct recursed_functions {
12 unsigned long ip;
13 unsigned long parent_ip;
16 static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE];
17 static atomic_t nr_records;
20 * Cache the last found function. Yes, updates to this is racey, but
21 * so is memory cache ;-)
23 static unsigned long cached_function;
25 void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip)
27 int index = 0;
28 int i;
29 unsigned long old;
31 again:
32 /* First check the last one recorded */
33 if (ip == cached_function)
34 return;
36 i = atomic_read(&nr_records);
37 /* nr_records is -1 when clearing records */
38 smp_mb__after_atomic();
39 if (i < 0)
40 return;
43 * If there's two writers and this writer comes in second,
44 * the cmpxchg() below to update the ip will fail. Then this
45 * writer will try again. It is possible that index will now
46 * be greater than nr_records. This is because the writer
47 * that succeeded has not updated the nr_records yet.
48 * This writer could keep trying again until the other writer
49 * updates nr_records. But if the other writer takes an
50 * interrupt, and that interrupt locks up that CPU, we do
51 * not want this CPU to lock up due to the recursion protection,
52 * and have a bug report showing this CPU as the cause of
53 * locking up the computer. To not lose this record, this
54 * writer will simply use the next position to update the
55 * recursed_functions, and it will update the nr_records
56 * accordingly.
58 if (index < i)
59 index = i;
60 if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE)
61 return;
63 for (i = index - 1; i >= 0; i--) {
64 if (recursed_functions[i].ip == ip) {
65 cached_function = ip;
66 return;
70 cached_function = ip;
73 * We only want to add a function if it hasn't been added before.
74 * Add to the current location before incrementing the count.
75 * If it fails to add, then increment the index (save in i)
76 * and try again.
78 old = cmpxchg(&recursed_functions[index].ip, 0, ip);
79 if (old != 0) {
80 /* Did something else already added this for us? */
81 if (old == ip)
82 return;
83 /* Try the next location (use i for the next index) */
84 index++;
85 goto again;
88 recursed_functions[index].parent_ip = parent_ip;
91 * It's still possible that we could race with the clearing
92 * CPU0 CPU1
93 * ---- ----
94 * ip = func
95 * nr_records = -1;
96 * recursed_functions[0] = 0;
97 * i = -1
98 * if (i < 0)
99 * nr_records = 0;
100 * (new recursion detected)
101 * recursed_functions[0] = func
102 * cmpxchg(recursed_functions[0],
103 * func, 0)
105 * But the worse that could happen is that we get a zero in
106 * the recursed_functions array, and it's likely that "func" will
107 * be recorded again.
109 i = atomic_read(&nr_records);
110 smp_mb__after_atomic();
111 if (i < 0)
112 cmpxchg(&recursed_functions[index].ip, ip, 0);
113 else if (i <= index)
114 atomic_cmpxchg(&nr_records, i, index + 1);
116 EXPORT_SYMBOL_GPL(ftrace_record_recursion);
118 static DEFINE_MUTEX(recursed_function_lock);
119 static struct trace_seq *tseq;
121 static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos)
123 void *ret = NULL;
124 int index;
126 mutex_lock(&recursed_function_lock);
127 index = atomic_read(&nr_records);
128 if (*pos < index) {
129 ret = &recursed_functions[*pos];
132 tseq = kzalloc(sizeof(*tseq), GFP_KERNEL);
133 if (!tseq)
134 return ERR_PTR(-ENOMEM);
136 trace_seq_init(tseq);
138 return ret;
141 static void *recursed_function_seq_next(struct seq_file *m, void *v, loff_t *pos)
143 int index;
144 int p;
146 index = atomic_read(&nr_records);
147 p = ++(*pos);
149 return p < index ? &recursed_functions[p] : NULL;
152 static void recursed_function_seq_stop(struct seq_file *m, void *v)
154 kfree(tseq);
155 mutex_unlock(&recursed_function_lock);
158 static int recursed_function_seq_show(struct seq_file *m, void *v)
160 struct recursed_functions *record = v;
161 int ret = 0;
163 if (record) {
164 trace_seq_print_sym(tseq, record->parent_ip, true);
165 trace_seq_puts(tseq, ":\t");
166 trace_seq_print_sym(tseq, record->ip, true);
167 trace_seq_putc(tseq, '\n');
168 ret = trace_print_seq(m, tseq);
171 return ret;
174 static const struct seq_operations recursed_function_seq_ops = {
175 .start = recursed_function_seq_start,
176 .next = recursed_function_seq_next,
177 .stop = recursed_function_seq_stop,
178 .show = recursed_function_seq_show
181 static int recursed_function_open(struct inode *inode, struct file *file)
183 int ret = 0;
185 mutex_lock(&recursed_function_lock);
186 /* If this file was opened for write, then erase contents */
187 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
188 /* disable updating records */
189 atomic_set(&nr_records, -1);
190 smp_mb__after_atomic();
191 memset(recursed_functions, 0, sizeof(recursed_functions));
192 smp_wmb();
193 /* enable them again */
194 atomic_set(&nr_records, 0);
196 if (file->f_mode & FMODE_READ)
197 ret = seq_open(file, &recursed_function_seq_ops);
198 mutex_unlock(&recursed_function_lock);
200 return ret;
203 static ssize_t recursed_function_write(struct file *file,
204 const char __user *buffer,
205 size_t count, loff_t *ppos)
207 return count;
210 static int recursed_function_release(struct inode *inode, struct file *file)
212 if (file->f_mode & FMODE_READ)
213 seq_release(inode, file);
214 return 0;
217 static const struct file_operations recursed_functions_fops = {
218 .open = recursed_function_open,
219 .write = recursed_function_write,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = recursed_function_release,
225 __init static int create_recursed_functions(void)
227 struct dentry *dentry;
229 dentry = trace_create_file("recursed_functions", 0644, NULL, NULL,
230 &recursed_functions_fops);
231 if (!dentry)
232 pr_warn("WARNING: Failed to create recursed_functions\n");
233 return 0;
236 fs_initcall(create_recursed_functions);