2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
17 #include <asm/setup.h>
21 #define STACK_TRACE_ENTRIES 500
23 static unsigned long stack_dump_trace
[STACK_TRACE_ENTRIES
+1] =
24 { [0 ... (STACK_TRACE_ENTRIES
)] = ULONG_MAX
};
25 static unsigned stack_dump_index
[STACK_TRACE_ENTRIES
];
27 static struct stack_trace max_stack_trace
= {
28 .max_entries
= STACK_TRACE_ENTRIES
,
29 .entries
= stack_dump_trace
,
32 static unsigned long max_stack_size
;
33 static arch_spinlock_t max_stack_lock
=
34 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
36 static int stack_trace_disabled __read_mostly
;
37 static DEFINE_PER_CPU(int, trace_active
);
38 static DEFINE_MUTEX(stack_sysctl_mutex
);
40 int stack_tracer_enabled
;
41 static int last_stack_tracer_enabled
;
43 static inline void check_stack(void)
45 unsigned long this_size
, flags
;
46 unsigned long *p
, *top
, *start
;
49 this_size
= ((unsigned long)&this_size
) & (THREAD_SIZE
-1);
50 this_size
= THREAD_SIZE
- this_size
;
52 if (this_size
<= max_stack_size
)
55 /* we do not handle interrupt stacks yet */
56 if (!object_is_on_stack(&this_size
))
59 local_irq_save(flags
);
60 arch_spin_lock(&max_stack_lock
);
62 /* a race could have already updated it */
63 if (this_size
<= max_stack_size
)
66 max_stack_size
= this_size
;
68 max_stack_trace
.nr_entries
= 0;
69 max_stack_trace
.skip
= 3;
71 save_stack_trace(&max_stack_trace
);
74 * Now find where in the stack these are.
78 top
= (unsigned long *)
79 (((unsigned long)start
& ~(THREAD_SIZE
-1)) + THREAD_SIZE
);
82 * Loop through all the entries. One of the entries may
83 * for some reason be missed on the stack, so we may
84 * have to account for them. If they are all there, this
85 * loop will only happen once. This code only takes place
86 * on a new max, so it is far from a fast path.
88 while (i
< max_stack_trace
.nr_entries
) {
91 stack_dump_index
[i
] = this_size
;
94 for (; p
< top
&& i
< max_stack_trace
.nr_entries
; p
++) {
95 if (*p
== stack_dump_trace
[i
]) {
96 this_size
= stack_dump_index
[i
++] =
97 (top
- p
) * sizeof(unsigned long);
99 /* Start the search from here */
109 arch_spin_unlock(&max_stack_lock
);
110 local_irq_restore(flags
);
114 stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
115 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
119 if (unlikely(!ftrace_enabled
|| stack_trace_disabled
))
122 preempt_disable_notrace();
124 cpu
= raw_smp_processor_id();
125 /* no atomic needed, we only modify this variable by this cpu */
126 if (per_cpu(trace_active
, cpu
)++ != 0)
132 per_cpu(trace_active
, cpu
)--;
133 /* prevent recursion in schedule */
134 preempt_enable_notrace();
137 static struct ftrace_ops trace_ops __read_mostly
=
139 .func
= stack_trace_call
,
140 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
144 stack_max_size_read(struct file
*filp
, char __user
*ubuf
,
145 size_t count
, loff_t
*ppos
)
147 unsigned long *ptr
= filp
->private_data
;
151 r
= snprintf(buf
, sizeof(buf
), "%ld\n", *ptr
);
154 return simple_read_from_buffer(ubuf
, count
, ppos
, buf
, r
);
158 stack_max_size_write(struct file
*filp
, const char __user
*ubuf
,
159 size_t count
, loff_t
*ppos
)
161 long *ptr
= filp
->private_data
;
162 unsigned long val
, flags
;
166 ret
= kstrtoul_from_user(ubuf
, count
, 10, &val
);
170 local_irq_save(flags
);
173 * In case we trace inside arch_spin_lock() or after (NMI),
174 * we will cause circular lock, so we also need to increase
175 * the percpu trace_active here.
177 cpu
= smp_processor_id();
178 per_cpu(trace_active
, cpu
)++;
180 arch_spin_lock(&max_stack_lock
);
182 arch_spin_unlock(&max_stack_lock
);
184 per_cpu(trace_active
, cpu
)--;
185 local_irq_restore(flags
);
190 static const struct file_operations stack_max_size_fops
= {
191 .open
= tracing_open_generic
,
192 .read
= stack_max_size_read
,
193 .write
= stack_max_size_write
,
194 .llseek
= default_llseek
,
198 __next(struct seq_file
*m
, loff_t
*pos
)
202 if (n
>= max_stack_trace
.nr_entries
|| stack_dump_trace
[n
] == ULONG_MAX
)
205 m
->private = (void *)n
;
210 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
213 return __next(m
, pos
);
216 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
222 cpu
= smp_processor_id();
223 per_cpu(trace_active
, cpu
)++;
225 arch_spin_lock(&max_stack_lock
);
228 return SEQ_START_TOKEN
;
230 return __next(m
, pos
);
233 static void t_stop(struct seq_file
*m
, void *p
)
237 arch_spin_unlock(&max_stack_lock
);
239 cpu
= smp_processor_id();
240 per_cpu(trace_active
, cpu
)--;
245 static int trace_lookup_stack(struct seq_file
*m
, long i
)
247 unsigned long addr
= stack_dump_trace
[i
];
249 return seq_printf(m
, "%pS\n", (void *)addr
);
252 static void print_disabled(struct seq_file
*m
)
255 "# Stack tracer disabled\n"
257 "# To enable the stack tracer, either add 'stacktrace' to the\n"
258 "# kernel command line\n"
259 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
263 static int t_show(struct seq_file
*m
, void *v
)
268 if (v
== SEQ_START_TOKEN
) {
269 seq_printf(m
, " Depth Size Location"
271 " ----- ---- --------\n",
272 max_stack_trace
.nr_entries
- 1);
274 if (!stack_tracer_enabled
&& !max_stack_size
)
282 if (i
>= max_stack_trace
.nr_entries
||
283 stack_dump_trace
[i
] == ULONG_MAX
)
286 if (i
+1 == max_stack_trace
.nr_entries
||
287 stack_dump_trace
[i
+1] == ULONG_MAX
)
288 size
= stack_dump_index
[i
];
290 size
= stack_dump_index
[i
] - stack_dump_index
[i
+1];
292 seq_printf(m
, "%3ld) %8d %5d ", i
, stack_dump_index
[i
], size
);
294 trace_lookup_stack(m
, i
);
299 static const struct seq_operations stack_trace_seq_ops
= {
306 static int stack_trace_open(struct inode
*inode
, struct file
*file
)
308 return seq_open(file
, &stack_trace_seq_ops
);
311 static const struct file_operations stack_trace_fops
= {
312 .open
= stack_trace_open
,
315 .release
= seq_release
,
319 stack_trace_filter_open(struct inode
*inode
, struct file
*file
)
321 return ftrace_regex_open(&trace_ops
, FTRACE_ITER_FILTER
,
325 static const struct file_operations stack_trace_filter_fops
= {
326 .open
= stack_trace_filter_open
,
328 .write
= ftrace_filter_write
,
329 .llseek
= ftrace_regex_lseek
,
330 .release
= ftrace_regex_release
,
334 stack_trace_sysctl(struct ctl_table
*table
, int write
,
335 void __user
*buffer
, size_t *lenp
,
340 mutex_lock(&stack_sysctl_mutex
);
342 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
345 (last_stack_tracer_enabled
== !!stack_tracer_enabled
))
348 last_stack_tracer_enabled
= !!stack_tracer_enabled
;
350 if (stack_tracer_enabled
)
351 register_ftrace_function(&trace_ops
);
353 unregister_ftrace_function(&trace_ops
);
356 mutex_unlock(&stack_sysctl_mutex
);
360 static char stack_trace_filter_buf
[COMMAND_LINE_SIZE
+1] __initdata
;
362 static __init
int enable_stacktrace(char *str
)
364 if (strncmp(str
, "_filter=", 8) == 0)
365 strncpy(stack_trace_filter_buf
, str
+8, COMMAND_LINE_SIZE
);
367 stack_tracer_enabled
= 1;
368 last_stack_tracer_enabled
= 1;
371 __setup("stacktrace", enable_stacktrace
);
373 static __init
int stack_trace_init(void)
375 struct dentry
*d_tracer
;
377 d_tracer
= tracing_init_dentry();
379 trace_create_file("stack_max_size", 0644, d_tracer
,
380 &max_stack_size
, &stack_max_size_fops
);
382 trace_create_file("stack_trace", 0444, d_tracer
,
383 NULL
, &stack_trace_fops
);
385 trace_create_file("stack_trace_filter", 0444, d_tracer
,
386 NULL
, &stack_trace_filter_fops
);
388 if (stack_trace_filter_buf
[0])
389 ftrace_set_early_filter(&trace_ops
, stack_trace_filter_buf
, 1);
391 if (stack_tracer_enabled
)
392 register_ftrace_function(&trace_ops
);
397 device_initcall(stack_trace_init
);