gpio: of: Break out OF-only code
[linux/fpc-iii.git] / kernel / trace / trace_stack.c
blob5d16f73898dbd2f851fe685a51e3e325d84ca9b0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/sched/task_stack.h>
7 #include <linux/stacktrace.h>
8 #include <linux/kallsyms.h>
9 #include <linux/seq_file.h>
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/sysctl.h>
15 #include <linux/init.h>
17 #include <asm/setup.h>
19 #include "trace.h"
21 #define STACK_TRACE_ENTRIES 500
23 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
24 static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
26 static unsigned int stack_trace_nr_entries;
27 static unsigned long stack_trace_max_size;
28 static arch_spinlock_t stack_trace_max_lock =
29 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
31 DEFINE_PER_CPU(int, disable_stack_tracer);
32 static DEFINE_MUTEX(stack_sysctl_mutex);
34 int stack_tracer_enabled;
36 static void print_max_stack(void)
38 long i;
39 int size;
41 pr_emerg(" Depth Size Location (%d entries)\n"
42 " ----- ---- --------\n",
43 stack_trace_nr_entries);
45 for (i = 0; i < stack_trace_nr_entries; i++) {
46 if (i + 1 == stack_trace_nr_entries)
47 size = stack_trace_index[i];
48 else
49 size = stack_trace_index[i] - stack_trace_index[i+1];
51 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
52 size, (void *)stack_dump_trace[i]);
56 static void check_stack(unsigned long ip, unsigned long *stack)
58 unsigned long this_size, flags; unsigned long *p, *top, *start;
59 static int tracer_frame;
60 int frame_size = READ_ONCE(tracer_frame);
61 int i, x;
63 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64 this_size = THREAD_SIZE - this_size;
65 /* Remove the frame of the tracer */
66 this_size -= frame_size;
68 if (this_size <= stack_trace_max_size)
69 return;
71 /* we do not handle interrupt stacks yet */
72 if (!object_is_on_stack(stack))
73 return;
75 /* Can't do this from NMI context (can cause deadlocks) */
76 if (in_nmi())
77 return;
79 local_irq_save(flags);
80 arch_spin_lock(&stack_trace_max_lock);
82 /* In case another CPU set the tracer_frame on us */
83 if (unlikely(!frame_size))
84 this_size -= tracer_frame;
86 /* a race could have already updated it */
87 if (this_size <= stack_trace_max_size)
88 goto out;
90 stack_trace_max_size = this_size;
92 stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
93 ARRAY_SIZE(stack_dump_trace) - 1,
94 0);
96 /* Skip over the overhead of the stack tracer itself */
97 for (i = 0; i < stack_trace_nr_entries; i++) {
98 if (stack_dump_trace[i] == ip)
99 break;
103 * Some archs may not have the passed in ip in the dump.
104 * If that happens, we need to show everything.
106 if (i == stack_trace_nr_entries)
107 i = 0;
110 * Now find where in the stack these are.
112 x = 0;
113 start = stack;
114 top = (unsigned long *)
115 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
118 * Loop through all the entries. One of the entries may
119 * for some reason be missed on the stack, so we may
120 * have to account for them. If they are all there, this
121 * loop will only happen once. This code only takes place
122 * on a new max, so it is far from a fast path.
124 while (i < stack_trace_nr_entries) {
125 int found = 0;
127 stack_trace_index[x] = this_size;
128 p = start;
130 for (; p < top && i < stack_trace_nr_entries; p++) {
132 * The READ_ONCE_NOCHECK is used to let KASAN know that
133 * this is not a stack-out-of-bounds error.
135 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
136 stack_dump_trace[x] = stack_dump_trace[i++];
137 this_size = stack_trace_index[x++] =
138 (top - p) * sizeof(unsigned long);
139 found = 1;
140 /* Start the search from here */
141 start = p + 1;
143 * We do not want to show the overhead
144 * of the stack tracer stack in the
145 * max stack. If we haven't figured
146 * out what that is, then figure it out
147 * now.
149 if (unlikely(!tracer_frame)) {
150 tracer_frame = (p - stack) *
151 sizeof(unsigned long);
152 stack_trace_max_size -= tracer_frame;
157 if (!found)
158 i++;
161 stack_trace_nr_entries = x;
163 if (task_stack_end_corrupted(current)) {
164 print_max_stack();
165 BUG();
168 out:
169 arch_spin_unlock(&stack_trace_max_lock);
170 local_irq_restore(flags);
173 static void
174 stack_trace_call(unsigned long ip, unsigned long parent_ip,
175 struct ftrace_ops *op, struct pt_regs *pt_regs)
177 unsigned long stack;
179 preempt_disable_notrace();
181 /* no atomic needed, we only modify this variable by this cpu */
182 __this_cpu_inc(disable_stack_tracer);
183 if (__this_cpu_read(disable_stack_tracer) != 1)
184 goto out;
186 /* If rcu is not watching, then save stack trace can fail */
187 if (!rcu_is_watching())
188 goto out;
190 ip += MCOUNT_INSN_SIZE;
192 check_stack(ip, &stack);
194 out:
195 __this_cpu_dec(disable_stack_tracer);
196 /* prevent recursion in schedule */
197 preempt_enable_notrace();
200 static struct ftrace_ops trace_ops __read_mostly =
202 .func = stack_trace_call,
203 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
206 static ssize_t
207 stack_max_size_read(struct file *filp, char __user *ubuf,
208 size_t count, loff_t *ppos)
210 unsigned long *ptr = filp->private_data;
211 char buf[64];
212 int r;
214 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
215 if (r > sizeof(buf))
216 r = sizeof(buf);
217 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
220 static ssize_t
221 stack_max_size_write(struct file *filp, const char __user *ubuf,
222 size_t count, loff_t *ppos)
224 long *ptr = filp->private_data;
225 unsigned long val, flags;
226 int ret;
228 ret = kstrtoul_from_user(ubuf, count, 10, &val);
229 if (ret)
230 return ret;
232 local_irq_save(flags);
235 * In case we trace inside arch_spin_lock() or after (NMI),
236 * we will cause circular lock, so we also need to increase
237 * the percpu disable_stack_tracer here.
239 __this_cpu_inc(disable_stack_tracer);
241 arch_spin_lock(&stack_trace_max_lock);
242 *ptr = val;
243 arch_spin_unlock(&stack_trace_max_lock);
245 __this_cpu_dec(disable_stack_tracer);
246 local_irq_restore(flags);
248 return count;
251 static const struct file_operations stack_max_size_fops = {
252 .open = tracing_open_generic,
253 .read = stack_max_size_read,
254 .write = stack_max_size_write,
255 .llseek = default_llseek,
258 static void *
259 __next(struct seq_file *m, loff_t *pos)
261 long n = *pos - 1;
263 if (n >= stack_trace_nr_entries)
264 return NULL;
266 m->private = (void *)n;
267 return &m->private;
270 static void *
271 t_next(struct seq_file *m, void *v, loff_t *pos)
273 (*pos)++;
274 return __next(m, pos);
277 static void *t_start(struct seq_file *m, loff_t *pos)
279 local_irq_disable();
281 __this_cpu_inc(disable_stack_tracer);
283 arch_spin_lock(&stack_trace_max_lock);
285 if (*pos == 0)
286 return SEQ_START_TOKEN;
288 return __next(m, pos);
291 static void t_stop(struct seq_file *m, void *p)
293 arch_spin_unlock(&stack_trace_max_lock);
295 __this_cpu_dec(disable_stack_tracer);
297 local_irq_enable();
300 static void trace_lookup_stack(struct seq_file *m, long i)
302 unsigned long addr = stack_dump_trace[i];
304 seq_printf(m, "%pS\n", (void *)addr);
307 static void print_disabled(struct seq_file *m)
309 seq_puts(m, "#\n"
310 "# Stack tracer disabled\n"
311 "#\n"
312 "# To enable the stack tracer, either add 'stacktrace' to the\n"
313 "# kernel command line\n"
314 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
315 "#\n");
318 static int t_show(struct seq_file *m, void *v)
320 long i;
321 int size;
323 if (v == SEQ_START_TOKEN) {
324 seq_printf(m, " Depth Size Location"
325 " (%d entries)\n"
326 " ----- ---- --------\n",
327 stack_trace_nr_entries);
329 if (!stack_tracer_enabled && !stack_trace_max_size)
330 print_disabled(m);
332 return 0;
335 i = *(long *)v;
337 if (i >= stack_trace_nr_entries)
338 return 0;
340 if (i + 1 == stack_trace_nr_entries)
341 size = stack_trace_index[i];
342 else
343 size = stack_trace_index[i] - stack_trace_index[i+1];
345 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
347 trace_lookup_stack(m, i);
349 return 0;
352 static const struct seq_operations stack_trace_seq_ops = {
353 .start = t_start,
354 .next = t_next,
355 .stop = t_stop,
356 .show = t_show,
359 static int stack_trace_open(struct inode *inode, struct file *file)
361 return seq_open(file, &stack_trace_seq_ops);
364 static const struct file_operations stack_trace_fops = {
365 .open = stack_trace_open,
366 .read = seq_read,
367 .llseek = seq_lseek,
368 .release = seq_release,
371 #ifdef CONFIG_DYNAMIC_FTRACE
373 static int
374 stack_trace_filter_open(struct inode *inode, struct file *file)
376 struct ftrace_ops *ops = inode->i_private;
378 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
379 inode, file);
382 static const struct file_operations stack_trace_filter_fops = {
383 .open = stack_trace_filter_open,
384 .read = seq_read,
385 .write = ftrace_filter_write,
386 .llseek = tracing_lseek,
387 .release = ftrace_regex_release,
390 #endif /* CONFIG_DYNAMIC_FTRACE */
393 stack_trace_sysctl(struct ctl_table *table, int write,
394 void __user *buffer, size_t *lenp,
395 loff_t *ppos)
397 int was_enabled;
398 int ret;
400 mutex_lock(&stack_sysctl_mutex);
401 was_enabled = !!stack_tracer_enabled;
403 ret = proc_dointvec(table, write, buffer, lenp, ppos);
405 if (ret || !write || (was_enabled == !!stack_tracer_enabled))
406 goto out;
408 if (stack_tracer_enabled)
409 register_ftrace_function(&trace_ops);
410 else
411 unregister_ftrace_function(&trace_ops);
412 out:
413 mutex_unlock(&stack_sysctl_mutex);
414 return ret;
417 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
419 static __init int enable_stacktrace(char *str)
421 int len;
423 if ((len = str_has_prefix(str, "_filter=")))
424 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
426 stack_tracer_enabled = 1;
427 return 1;
429 __setup("stacktrace", enable_stacktrace);
431 static __init int stack_trace_init(void)
433 struct dentry *d_tracer;
435 d_tracer = tracing_init_dentry();
436 if (IS_ERR(d_tracer))
437 return 0;
439 trace_create_file("stack_max_size", 0644, d_tracer,
440 &stack_trace_max_size, &stack_max_size_fops);
442 trace_create_file("stack_trace", 0444, d_tracer,
443 NULL, &stack_trace_fops);
445 #ifdef CONFIG_DYNAMIC_FTRACE
446 trace_create_file("stack_trace_filter", 0644, d_tracer,
447 &trace_ops, &stack_trace_filter_fops);
448 #endif
450 if (stack_trace_filter_buf[0])
451 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
453 if (stack_tracer_enabled)
454 register_ftrace_function(&trace_ops);
456 return 0;
459 device_initcall(stack_trace_init);