2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
47 #include "trace_output.h"
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
53 bool ring_buffer_expanded
;
56 * We need to change this state when a selftest is running.
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
59 * insertions into the ring-buffer such as trace_printk could occurred
60 * at the same time, giving false positive or negative results.
62 static bool __read_mostly tracing_selftest_running
;
65 * If a tracer is running, we do not want to run SELFTEST.
67 bool __read_mostly tracing_selftest_disabled
;
69 /* Pipe tracepoints to printk */
70 struct trace_iterator
*tracepoint_print_iter
;
71 int tracepoint_printk
;
72 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key
);
74 /* For tracers that don't implement custom flags */
75 static struct tracer_opt dummy_tracer_opt
[] = {
80 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
90 static DEFINE_PER_CPU(bool, trace_taskinfo_save
);
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
98 static int tracing_disabled
= 1;
100 cpumask_var_t __read_mostly tracing_buffer_mask
;
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 enum ftrace_dump_mode ftrace_dump_on_oops
;
120 /* When set, tracing will stop when a WARN*() is hit */
121 int __disable_trace_on_warning
;
123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
124 /* Map of enums to their values, for "eval_map" file */
125 struct trace_eval_map_head
{
127 unsigned long length
;
130 union trace_eval_map_item
;
132 struct trace_eval_map_tail
{
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "eval_string"
137 union trace_eval_map_item
*next
;
138 const char *end
; /* points to NULL */
141 static DEFINE_MUTEX(trace_eval_mutex
);
144 * The trace_eval_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved eval_map items.
150 union trace_eval_map_item
{
151 struct trace_eval_map map
;
152 struct trace_eval_map_head head
;
153 struct trace_eval_map_tail tail
;
156 static union trace_eval_map_item
*trace_eval_maps
;
157 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
159 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
161 #define MAX_TRACER_SIZE 100
162 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
163 static char *default_bootup_tracer
;
165 static bool allocate_snapshot
;
167 static int __init
set_cmdline_ftrace(char *str
)
169 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
170 default_bootup_tracer
= bootup_tracer_buf
;
171 /* We are using ftrace early, expand it */
172 ring_buffer_expanded
= true;
175 __setup("ftrace=", set_cmdline_ftrace
);
177 static int __init
set_ftrace_dump_on_oops(char *str
)
179 if (*str
++ != '=' || !*str
) {
180 ftrace_dump_on_oops
= DUMP_ALL
;
184 if (!strcmp("orig_cpu", str
)) {
185 ftrace_dump_on_oops
= DUMP_ORIG
;
191 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
193 static int __init
stop_trace_on_warning(char *str
)
195 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
196 __disable_trace_on_warning
= 1;
199 __setup("traceoff_on_warning", stop_trace_on_warning
);
201 static int __init
boot_alloc_snapshot(char *str
)
203 allocate_snapshot
= true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded
= true;
208 __setup("alloc_snapshot", boot_alloc_snapshot
);
211 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
213 static int __init
set_trace_boot_options(char *str
)
215 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
218 __setup("trace_options=", set_trace_boot_options
);
220 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
221 static char *trace_boot_clock __initdata
;
223 static int __init
set_trace_boot_clock(char *str
)
225 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
226 trace_boot_clock
= trace_boot_clock_buf
;
229 __setup("trace_clock=", set_trace_boot_clock
);
231 static int __init
set_tracepoint_printk(char *str
)
233 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
234 tracepoint_printk
= 1;
237 __setup("tp_printk", set_tracepoint_printk
);
239 unsigned long long ns2usecs(u64 nsec
)
246 /* trace_flags holds trace_options default values */
247 #define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
254 /* trace_options that are only supported by global_trace */
255 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
258 /* trace_flags that are default zero for instances */
259 #define ZEROED_TRACE_FLAGS \
260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
266 static struct trace_array global_trace
= {
267 .trace_flags
= TRACE_DEFAULT_FLAGS
,
270 LIST_HEAD(ftrace_trace_arrays
);
272 int trace_array_get(struct trace_array
*this_tr
)
274 struct trace_array
*tr
;
277 mutex_lock(&trace_types_lock
);
278 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
285 mutex_unlock(&trace_types_lock
);
290 static void __trace_array_put(struct trace_array
*this_tr
)
292 WARN_ON(!this_tr
->ref
);
296 void trace_array_put(struct trace_array
*this_tr
)
298 mutex_lock(&trace_types_lock
);
299 __trace_array_put(this_tr
);
300 mutex_unlock(&trace_types_lock
);
303 int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
304 struct ring_buffer
*buffer
,
305 struct ring_buffer_event
*event
)
307 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
308 !filter_match_preds(call
->filter
, rec
)) {
309 __trace_event_discard_commit(buffer
, event
);
316 void trace_free_pid_list(struct trace_pid_list
*pid_list
)
318 vfree(pid_list
->pids
);
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
, pid_t search_pid
)
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
336 if (search_pid
>= filtered_pids
->pid_max
)
339 return test_bit(search_pid
, filtered_pids
->pids
);
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
352 trace_ignore_this_task(struct trace_pid_list
*filtered_pids
, struct task_struct
*task
)
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
361 return !trace_find_filtered_pid(filtered_pids
, task
->pid
);
365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
376 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
377 struct task_struct
*self
,
378 struct task_struct
*task
)
383 /* For forks, we only add if the forking task is listed */
385 if (!trace_find_filtered_pid(pid_list
, self
->pid
))
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task
->pid
>= pid_list
->pid_max
)
393 /* "self" is set for forks, and NULL for exits */
395 set_bit(task
->pid
, pid_list
->pids
);
397 clear_bit(task
->pid
, pid_list
->pids
);
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
412 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
)
414 unsigned long pid
= (unsigned long)v
;
418 /* pid already is +1 of the actual prevous bit */
419 pid
= find_next_bit(pid_list
->pids
, pid_list
->pid_max
, pid
);
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid
< pid_list
->pid_max
)
423 return (void *)(pid
+ 1);
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
433 * This is used by seq_file "start" operation to start the iteration
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
439 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
)
444 pid
= find_first_bit(pid_list
->pids
, pid_list
->pid_max
);
445 if (pid
>= pid_list
->pid_max
)
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid
++; pid
&& l
< *pos
;
450 pid
= (unsigned long)trace_pid_next(pid_list
, (void *)pid
, &l
))
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
460 * Can be directly used by seq_file operations to display the current
463 int trace_pid_show(struct seq_file
*m
, void *v
)
465 unsigned long pid
= (unsigned long)v
- 1;
467 seq_printf(m
, "%lu\n", pid
);
471 /* 128 should be much more than enough */
472 #define PID_BUF_SIZE 127
474 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
475 struct trace_pid_list
**new_pid_list
,
476 const char __user
*ubuf
, size_t cnt
)
478 struct trace_pid_list
*pid_list
;
479 struct trace_parser parser
;
487 if (trace_parser_get_init(&parser
, PID_BUF_SIZE
+ 1))
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
496 pid_list
= kmalloc(sizeof(*pid_list
), GFP_KERNEL
);
500 pid_list
->pid_max
= READ_ONCE(pid_max
);
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids
&& filtered_pids
->pid_max
> pid_list
->pid_max
)
504 pid_list
->pid_max
= filtered_pids
->pid_max
;
506 pid_list
->pids
= vzalloc((pid_list
->pid_max
+ 7) >> 3);
507 if (!pid_list
->pids
) {
513 /* copy the current bits to the new max */
514 for_each_set_bit(pid
, filtered_pids
->pids
,
515 filtered_pids
->pid_max
) {
516 set_bit(pid
, pid_list
->pids
);
525 ret
= trace_get_user(&parser
, ubuf
, cnt
, &pos
);
526 if (ret
< 0 || !trace_parser_loaded(&parser
))
533 parser
.buffer
[parser
.idx
] = 0;
536 if (kstrtoul(parser
.buffer
, 0, &val
))
538 if (val
>= pid_list
->pid_max
)
543 set_bit(pid
, pid_list
->pids
);
546 trace_parser_clear(&parser
);
549 trace_parser_put(&parser
);
552 trace_free_pid_list(pid_list
);
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list
);
563 *new_pid_list
= pid_list
;
568 static u64
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
572 /* Early boot up does not have a buffer yet */
574 return trace_clock_local();
576 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
577 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
582 u64
ftrace_now(int cpu
)
584 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
588 * tracing_is_enabled - Show if global_trace has been disabled
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
596 int tracing_is_enabled(void)
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
604 return !global_trace
.buffer_disabled
;
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
619 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer
*trace_types __read_mostly
;
625 * trace_types_lock is used to protect the trace_types list.
627 DEFINE_MUTEX(trace_types_lock
);
630 * serialize the access of the ring buffer
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
644 * These primitives allow multi process access to different cpu ring buffer
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
652 static DECLARE_RWSEM(all_cpu_access_lock
);
653 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
655 static inline void trace_access_lock(int cpu
)
657 if (cpu
== RING_BUFFER_ALL_CPUS
) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock
);
661 /* gain it for accessing a cpu ring buffer. */
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock
);
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
671 static inline void trace_access_unlock(int cpu
)
673 if (cpu
== RING_BUFFER_ALL_CPUS
) {
674 up_write(&all_cpu_access_lock
);
676 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
677 up_read(&all_cpu_access_lock
);
681 static inline void trace_access_lock_init(void)
685 for_each_possible_cpu(cpu
)
686 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
691 static DEFINE_MUTEX(access_lock
);
693 static inline void trace_access_lock(int cpu
)
696 mutex_lock(&access_lock
);
699 static inline void trace_access_unlock(int cpu
)
702 mutex_unlock(&access_lock
);
705 static inline void trace_access_lock_init(void)
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
714 int skip
, int pc
, struct pt_regs
*regs
);
715 static inline void ftrace_trace_stack(struct trace_array
*tr
,
716 struct ring_buffer
*buffer
,
718 int skip
, int pc
, struct pt_regs
*regs
);
721 static inline void __ftrace_trace_stack(struct ring_buffer
*buffer
,
723 int skip
, int pc
, struct pt_regs
*regs
)
726 static inline void ftrace_trace_stack(struct trace_array
*tr
,
727 struct ring_buffer
*buffer
,
729 int skip
, int pc
, struct pt_regs
*regs
)
735 static __always_inline
void
736 trace_event_setup(struct ring_buffer_event
*event
,
737 int type
, unsigned long flags
, int pc
)
739 struct trace_entry
*ent
= ring_buffer_event_data(event
);
741 tracing_generic_entry_update(ent
, flags
, pc
);
745 static __always_inline
struct ring_buffer_event
*
746 __trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
749 unsigned long flags
, int pc
)
751 struct ring_buffer_event
*event
;
753 event
= ring_buffer_lock_reserve(buffer
, len
);
755 trace_event_setup(event
, type
, flags
, pc
);
760 void tracer_tracing_on(struct trace_array
*tr
)
762 if (tr
->trace_buffer
.buffer
)
763 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
772 tr
->buffer_disabled
= 0;
773 /* Make the flag seen by readers */
778 * tracing_on - enable tracing buffers
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
783 void tracing_on(void)
785 tracer_tracing_on(&global_trace
);
787 EXPORT_SYMBOL_GPL(tracing_on
);
790 static __always_inline
void
791 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
793 __this_cpu_write(trace_taskinfo_save
, true);
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event
) == event
) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer
, event
->array
[0], &event
->array
[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt
);
802 ring_buffer_unlock_commit(buffer
, event
);
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
811 int __trace_puts(unsigned long ip
, const char *str
, int size
)
813 struct ring_buffer_event
*event
;
814 struct ring_buffer
*buffer
;
815 struct print_entry
*entry
;
816 unsigned long irq_flags
;
820 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
823 pc
= preempt_count();
825 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
828 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
830 local_save_flags(irq_flags
);
831 buffer
= global_trace
.trace_buffer
.buffer
;
832 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
837 entry
= ring_buffer_event_data(event
);
840 memcpy(&entry
->buf
, str
, size
);
842 /* Add a newline if necessary */
843 if (entry
->buf
[size
- 1] != '\n') {
844 entry
->buf
[size
] = '\n';
845 entry
->buf
[size
+ 1] = '\0';
847 entry
->buf
[size
] = '\0';
849 __buffer_unlock_commit(buffer
, event
);
850 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
854 EXPORT_SYMBOL_GPL(__trace_puts
);
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
861 int __trace_bputs(unsigned long ip
, const char *str
)
863 struct ring_buffer_event
*event
;
864 struct ring_buffer
*buffer
;
865 struct bputs_entry
*entry
;
866 unsigned long irq_flags
;
867 int size
= sizeof(struct bputs_entry
);
870 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
873 pc
= preempt_count();
875 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
878 local_save_flags(irq_flags
);
879 buffer
= global_trace
.trace_buffer
.buffer
;
880 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
885 entry
= ring_buffer_event_data(event
);
889 __buffer_unlock_commit(buffer
, event
);
890 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
894 EXPORT_SYMBOL_GPL(__trace_bputs
);
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 static void tracing_snapshot_instance(struct trace_array
*tr
)
899 struct tracer
*tracer
= tr
->current_trace
;
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
908 if (!tr
->allocated_snapshot
) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer
->use_max_tr
) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 local_irq_save(flags
);
923 update_max_tr(tr
, current
, smp_processor_id());
924 local_irq_restore(flags
);
928 * tracing_snapshot - take a snapshot of the current buffer.
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
941 void tracing_snapshot(void)
943 struct trace_array
*tr
= &global_trace
;
945 tracing_snapshot_instance(tr
);
947 EXPORT_SYMBOL_GPL(tracing_snapshot
);
949 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
950 struct trace_buffer
*size_buf
, int cpu_id
);
951 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
953 static int alloc_snapshot(struct trace_array
*tr
)
957 if (!tr
->allocated_snapshot
) {
959 /* allocate spare buffer */
960 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
961 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
965 tr
->allocated_snapshot
= true;
971 static void free_snapshot(struct trace_array
*tr
)
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
978 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
979 set_buffer_entries(&tr
->max_buffer
, 1);
980 tracing_reset_online_cpus(&tr
->max_buffer
);
981 tr
->allocated_snapshot
= false;
985 * tracing_alloc_snapshot - allocate snapshot buffer.
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
994 int tracing_alloc_snapshot(void)
996 struct trace_array
*tr
= &global_trace
;
999 ret
= alloc_snapshot(tr
);
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1009 * This is similar to tracing_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1017 void tracing_snapshot_alloc(void)
1021 ret
= tracing_alloc_snapshot();
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1029 void tracing_snapshot(void)
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1033 EXPORT_SYMBOL_GPL(tracing_snapshot
);
1034 int tracing_alloc_snapshot(void)
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1040 void tracing_snapshot_alloc(void)
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1048 void tracer_tracing_off(struct trace_array
*tr
)
1050 if (tr
->trace_buffer
.buffer
)
1051 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1060 tr
->buffer_disabled
= 1;
1061 /* Make the flag seen by readers */
1066 * tracing_off - turn off tracing buffers
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1073 void tracing_off(void)
1075 tracer_tracing_off(&global_trace
);
1077 EXPORT_SYMBOL_GPL(tracing_off
);
1079 void disable_trace_on_warning(void)
1081 if (__disable_trace_on_warning
)
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1089 * Shows real state of the ring buffer if it is enabled or not.
1091 int tracer_tracing_is_on(struct trace_array
*tr
)
1093 if (tr
->trace_buffer
.buffer
)
1094 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
1095 return !tr
->buffer_disabled
;
1099 * tracing_is_on - show state of ring buffers enabled
1101 int tracing_is_on(void)
1103 return tracer_tracing_is_on(&global_trace
);
1105 EXPORT_SYMBOL_GPL(tracing_is_on
);
1107 static int __init
set_buf_size(char *str
)
1109 unsigned long buf_size
;
1113 buf_size
= memparse(str
, &str
);
1114 /* nr_entries can not be zero */
1117 trace_buf_size
= buf_size
;
1120 __setup("trace_buf_size=", set_buf_size
);
1122 static int __init
set_tracing_thresh(char *str
)
1124 unsigned long threshold
;
1129 ret
= kstrtoul(str
, 0, &threshold
);
1132 tracing_thresh
= threshold
* 1000;
1135 __setup("tracing_thresh=", set_tracing_thresh
);
1137 unsigned long nsecs_to_usecs(unsigned long nsecs
)
1139 return nsecs
/ 1000;
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options
[] = {
1160 int in_ns
; /* is this clock in nanoseconds? */
1161 } trace_clocks
[] = {
1162 { trace_clock_local
, "local", 1 },
1163 { trace_clock_global
, "global", 1 },
1164 { trace_clock_counter
, "counter", 0 },
1165 { trace_clock_jiffies
, "uptime", 0 },
1166 { trace_clock
, "perf", 1 },
1167 { ktime_get_mono_fast_ns
, "mono", 1 },
1168 { ktime_get_raw_fast_ns
, "mono_raw", 1 },
1169 { ktime_get_boot_fast_ns
, "boot", 1 },
1174 * trace_parser_get_init - gets the buffer for trace parser
1176 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
1178 memset(parser
, 0, sizeof(*parser
));
1180 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
1181 if (!parser
->buffer
)
1184 parser
->size
= size
;
1189 * trace_parser_put - frees the buffer for trace parser
1191 void trace_parser_put(struct trace_parser
*parser
)
1193 kfree(parser
->buffer
);
1194 parser
->buffer
= NULL
;
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1204 * Returns number of bytes read.
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1208 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
1209 size_t cnt
, loff_t
*ppos
)
1216 trace_parser_clear(parser
);
1218 ret
= get_user(ch
, ubuf
++);
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1229 if (!parser
->cont
) {
1230 /* skip white space */
1231 while (cnt
&& isspace(ch
)) {
1232 ret
= get_user(ch
, ubuf
++);
1239 /* only spaces were written */
1249 /* read the non-space input */
1250 while (cnt
&& !isspace(ch
)) {
1251 if (parser
->idx
< parser
->size
- 1)
1252 parser
->buffer
[parser
->idx
++] = ch
;
1257 ret
= get_user(ch
, ubuf
++);
1264 /* We either got finished input or we have to wait for another call. */
1266 parser
->buffer
[parser
->idx
] = 0;
1267 parser
->cont
= false;
1268 } else if (parser
->idx
< parser
->size
- 1) {
1269 parser
->cont
= true;
1270 parser
->buffer
[parser
->idx
++] = ch
;
1283 /* TODO add a seq_buf_to_buffer() */
1284 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
1288 if (trace_seq_used(s
) <= s
->seq
.readpos
)
1291 len
= trace_seq_used(s
) - s
->seq
.readpos
;
1294 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
1296 s
->seq
.readpos
+= cnt
;
1300 unsigned long __read_mostly tracing_thresh
;
1302 #ifdef CONFIG_TRACER_MAX_TRACE
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1309 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1311 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
1312 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
1313 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
1314 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
1317 max_buf
->time_start
= data
->preempt_timestamp
;
1319 max_data
->saved_latency
= tr
->max_latency
;
1320 max_data
->critical_start
= data
->critical_start
;
1321 max_data
->critical_end
= data
->critical_end
;
1323 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
1324 max_data
->pid
= tsk
->pid
;
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1330 max_data
->uid
= current_uid();
1332 max_data
->uid
= task_uid(tsk
);
1334 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1335 max_data
->policy
= tsk
->policy
;
1336 max_data
->rt_priority
= tsk
->rt_priority
;
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk
);
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1352 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1354 struct ring_buffer
*buf
;
1359 WARN_ON_ONCE(!irqs_disabled());
1361 if (!tr
->allocated_snapshot
) {
1362 /* Only the nop tracer should hit this when disabling */
1363 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1367 arch_spin_lock(&tr
->max_lock
);
1369 buf
= tr
->trace_buffer
.buffer
;
1370 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1371 tr
->max_buffer
.buffer
= buf
;
1373 __update_max_tr(tr
, tsk
, cpu
);
1374 arch_spin_unlock(&tr
->max_lock
);
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1386 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1393 WARN_ON_ONCE(!irqs_disabled());
1394 if (!tr
->allocated_snapshot
) {
1395 /* Only the nop tracer should hit this when disabling */
1396 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1400 arch_spin_lock(&tr
->max_lock
);
1402 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1404 if (ret
== -EBUSY
) {
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1411 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1412 "Failed to swap buffers due to commit in progress\n");
1415 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1417 __update_max_tr(tr
, tsk
, cpu
);
1418 arch_spin_unlock(&tr
->max_lock
);
1420 #endif /* CONFIG_TRACER_MAX_TRACE */
1422 static int wait_on_pipe(struct trace_iterator
*iter
, bool full
)
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1428 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
1432 #ifdef CONFIG_FTRACE_STARTUP_TEST
1433 static bool selftests_can_run
;
1435 struct trace_selftests
{
1436 struct list_head list
;
1437 struct tracer
*type
;
1440 static LIST_HEAD(postponed_selftests
);
1442 static int save_selftest(struct tracer
*type
)
1444 struct trace_selftests
*selftest
;
1446 selftest
= kmalloc(sizeof(*selftest
), GFP_KERNEL
);
1450 selftest
->type
= type
;
1451 list_add(&selftest
->list
, &postponed_selftests
);
1455 static int run_tracer_selftest(struct tracer
*type
)
1457 struct trace_array
*tr
= &global_trace
;
1458 struct tracer
*saved_tracer
= tr
->current_trace
;
1461 if (!type
->selftest
|| tracing_selftest_disabled
)
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1469 if (!selftests_can_run
)
1470 return save_selftest(type
);
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
1479 tracing_reset_online_cpus(&tr
->trace_buffer
);
1481 tr
->current_trace
= type
;
1483 #ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type
->use_max_tr
) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded
)
1487 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1488 RING_BUFFER_ALL_CPUS
);
1489 tr
->allocated_snapshot
= true;
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type
->name
);
1495 ret
= type
->selftest(type
, tr
);
1496 /* the test is responsible for resetting too */
1497 tr
->current_trace
= saved_tracer
;
1499 printk(KERN_CONT
"FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr
->trace_buffer
);
1507 #ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type
->use_max_tr
) {
1509 tr
->allocated_snapshot
= false;
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded
)
1513 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1514 RING_BUFFER_ALL_CPUS
);
1518 printk(KERN_CONT
"PASSED\n");
1522 static __init
int init_trace_selftests(void)
1524 struct trace_selftests
*p
, *n
;
1525 struct tracer
*t
, **last
;
1528 selftests_can_run
= true;
1530 mutex_lock(&trace_types_lock
);
1532 if (list_empty(&postponed_selftests
))
1535 pr_info("Running postponed tracer tests:\n");
1537 list_for_each_entry_safe(p
, n
, &postponed_selftests
, list
) {
1538 ret
= run_tracer_selftest(p
->type
);
1539 /* If the test fails, then warn and remove from available_tracers */
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1543 last
= &trace_types
;
1544 for (t
= trace_types
; t
; t
= t
->next
) {
1557 mutex_unlock(&trace_types_lock
);
1561 core_initcall(init_trace_selftests
);
1563 static inline int run_tracer_selftest(struct tracer
*type
)
1567 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1569 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
);
1571 static void __init
apply_trace_boot_options(void);
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1577 * Register a new plugin tracer.
1579 int __init
register_tracer(struct tracer
*type
)
1585 pr_info("Tracer must have a name\n");
1589 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1594 mutex_lock(&trace_types_lock
);
1596 tracing_selftest_running
= true;
1598 for (t
= trace_types
; t
; t
= t
->next
) {
1599 if (strcmp(type
->name
, t
->name
) == 0) {
1601 pr_info("Tracer %s already registered\n",
1608 if (!type
->set_flag
)
1609 type
->set_flag
= &dummy_set_flag
;
1611 /*allocate a dummy tracer_flags*/
1612 type
->flags
= kmalloc(sizeof(*type
->flags
), GFP_KERNEL
);
1617 type
->flags
->val
= 0;
1618 type
->flags
->opts
= dummy_tracer_opt
;
1620 if (!type
->flags
->opts
)
1621 type
->flags
->opts
= dummy_tracer_opt
;
1623 /* store the tracer for __set_tracer_option */
1624 type
->flags
->trace
= type
;
1626 ret
= run_tracer_selftest(type
);
1630 type
->next
= trace_types
;
1632 add_tracer_options(&global_trace
, type
);
1635 tracing_selftest_running
= false;
1636 mutex_unlock(&trace_types_lock
);
1638 if (ret
|| !default_bootup_tracer
)
1641 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1644 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1645 /* Do we want this tracer to start on bootup? */
1646 tracing_set_tracer(&global_trace
, type
->name
);
1647 default_bootup_tracer
= NULL
;
1649 apply_trace_boot_options();
1651 /* disable other selftests, since this will break it. */
1652 tracing_selftest_disabled
= true;
1653 #ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1662 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1664 struct ring_buffer
*buffer
= buf
->buffer
;
1669 ring_buffer_record_disable(buffer
);
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
1673 ring_buffer_reset_cpu(buffer
, cpu
);
1675 ring_buffer_record_enable(buffer
);
1678 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1680 struct ring_buffer
*buffer
= buf
->buffer
;
1686 ring_buffer_record_disable(buffer
);
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1691 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1693 for_each_online_cpu(cpu
)
1694 ring_buffer_reset_cpu(buffer
, cpu
);
1696 ring_buffer_record_enable(buffer
);
1699 /* Must have trace_types_lock held */
1700 void tracing_reset_all_online_cpus(void)
1702 struct trace_array
*tr
;
1704 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1705 if (!tr
->clear_trace
)
1707 tr
->clear_trace
= false;
1708 tracing_reset_online_cpus(&tr
->trace_buffer
);
1709 #ifdef CONFIG_TRACER_MAX_TRACE
1710 tracing_reset_online_cpus(&tr
->max_buffer
);
1715 static int *tgid_map
;
1717 #define SAVED_CMDLINES_DEFAULT 128
1718 #define NO_CMDLINE_MAP UINT_MAX
1719 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1720 struct saved_cmdlines_buffer
{
1721 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1722 unsigned *map_cmdline_to_pid
;
1723 unsigned cmdline_num
;
1725 char *saved_cmdlines
;
1727 static struct saved_cmdlines_buffer
*savedcmd
;
1729 /* temporary disable recording */
1730 static atomic_t trace_record_taskinfo_disabled __read_mostly
;
1732 static inline char *get_saved_cmdlines(int idx
)
1734 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1737 static inline void set_cmdline(int idx
, const char *cmdline
)
1739 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1742 static int allocate_cmdlines_buffer(unsigned int val
,
1743 struct saved_cmdlines_buffer
*s
)
1745 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1747 if (!s
->map_cmdline_to_pid
)
1750 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1751 if (!s
->saved_cmdlines
) {
1752 kfree(s
->map_cmdline_to_pid
);
1757 s
->cmdline_num
= val
;
1758 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1759 sizeof(s
->map_pid_to_cmdline
));
1760 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1761 val
* sizeof(*s
->map_cmdline_to_pid
));
1766 static int trace_create_savedcmd(void)
1770 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1774 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1784 int is_tracing_stopped(void)
1786 return global_trace
.stop_count
;
1790 * tracing_start - quick start of the tracer
1792 * If tracing is enabled but was stopped by tracing_stop,
1793 * this will start the tracer back up.
1795 void tracing_start(void)
1797 struct ring_buffer
*buffer
;
1798 unsigned long flags
;
1800 if (tracing_disabled
)
1803 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1804 if (--global_trace
.stop_count
) {
1805 if (global_trace
.stop_count
< 0) {
1806 /* Someone screwed up their debugging */
1808 global_trace
.stop_count
= 0;
1813 /* Prevent the buffers from switching */
1814 arch_spin_lock(&global_trace
.max_lock
);
1816 buffer
= global_trace
.trace_buffer
.buffer
;
1818 ring_buffer_record_enable(buffer
);
1820 #ifdef CONFIG_TRACER_MAX_TRACE
1821 buffer
= global_trace
.max_buffer
.buffer
;
1823 ring_buffer_record_enable(buffer
);
1826 arch_spin_unlock(&global_trace
.max_lock
);
1829 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1832 static void tracing_start_tr(struct trace_array
*tr
)
1834 struct ring_buffer
*buffer
;
1835 unsigned long flags
;
1837 if (tracing_disabled
)
1840 /* If global, we need to also start the max tracer */
1841 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1842 return tracing_start();
1844 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1846 if (--tr
->stop_count
) {
1847 if (tr
->stop_count
< 0) {
1848 /* Someone screwed up their debugging */
1855 buffer
= tr
->trace_buffer
.buffer
;
1857 ring_buffer_record_enable(buffer
);
1860 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1864 * tracing_stop - quick stop of the tracer
1866 * Light weight way to stop tracing. Use in conjunction with
1869 void tracing_stop(void)
1871 struct ring_buffer
*buffer
;
1872 unsigned long flags
;
1874 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1875 if (global_trace
.stop_count
++)
1878 /* Prevent the buffers from switching */
1879 arch_spin_lock(&global_trace
.max_lock
);
1881 buffer
= global_trace
.trace_buffer
.buffer
;
1883 ring_buffer_record_disable(buffer
);
1885 #ifdef CONFIG_TRACER_MAX_TRACE
1886 buffer
= global_trace
.max_buffer
.buffer
;
1888 ring_buffer_record_disable(buffer
);
1891 arch_spin_unlock(&global_trace
.max_lock
);
1894 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1897 static void tracing_stop_tr(struct trace_array
*tr
)
1899 struct ring_buffer
*buffer
;
1900 unsigned long flags
;
1902 /* If global, we need to also stop the max tracer */
1903 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1904 return tracing_stop();
1906 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1907 if (tr
->stop_count
++)
1910 buffer
= tr
->trace_buffer
.buffer
;
1912 ring_buffer_record_disable(buffer
);
1915 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1918 static int trace_save_cmdline(struct task_struct
*tsk
)
1922 /* treat recording of idle task as a success */
1926 if (unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1930 * It's not the end of the world if we don't get
1931 * the lock, but we also don't want to spin
1932 * nor do we want to disable interrupts,
1933 * so if we miss here, then better luck next time.
1935 if (!arch_spin_trylock(&trace_cmdline_lock
))
1938 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1939 if (idx
== NO_CMDLINE_MAP
) {
1940 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1943 * Check whether the cmdline buffer at idx has a pid
1944 * mapped. We are going to overwrite that entry so we
1945 * need to clear the map_pid_to_cmdline. Otherwise we
1946 * would read the new comm for the old pid.
1948 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1949 if (pid
!= NO_CMDLINE_MAP
)
1950 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1952 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1953 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1955 savedcmd
->cmdline_idx
= idx
;
1958 set_cmdline(idx
, tsk
->comm
);
1960 arch_spin_unlock(&trace_cmdline_lock
);
1965 static void __trace_find_cmdline(int pid
, char comm
[])
1970 strcpy(comm
, "<idle>");
1974 if (WARN_ON_ONCE(pid
< 0)) {
1975 strcpy(comm
, "<XXX>");
1979 if (pid
> PID_MAX_DEFAULT
) {
1980 strcpy(comm
, "<...>");
1984 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1985 if (map
!= NO_CMDLINE_MAP
)
1986 strlcpy(comm
, get_saved_cmdlines(map
), TASK_COMM_LEN
);
1988 strcpy(comm
, "<...>");
1991 void trace_find_cmdline(int pid
, char comm
[])
1994 arch_spin_lock(&trace_cmdline_lock
);
1996 __trace_find_cmdline(pid
, comm
);
1998 arch_spin_unlock(&trace_cmdline_lock
);
2002 int trace_find_tgid(int pid
)
2004 if (unlikely(!tgid_map
|| !pid
|| pid
> PID_MAX_DEFAULT
))
2007 return tgid_map
[pid
];
2010 static int trace_save_tgid(struct task_struct
*tsk
)
2012 /* treat recording of idle task as a success */
2016 if (unlikely(!tgid_map
|| tsk
->pid
> PID_MAX_DEFAULT
))
2019 tgid_map
[tsk
->pid
] = tsk
->tgid
;
2023 static bool tracing_record_taskinfo_skip(int flags
)
2025 if (unlikely(!(flags
& (TRACE_RECORD_CMDLINE
| TRACE_RECORD_TGID
))))
2027 if (atomic_read(&trace_record_taskinfo_disabled
) || !tracing_is_on())
2029 if (!__this_cpu_read(trace_taskinfo_save
))
2035 * tracing_record_taskinfo - record the task info of a task
2037 * @task - task to record
2038 * @flags - TRACE_RECORD_CMDLINE for recording comm
2039 * - TRACE_RECORD_TGID for recording tgid
2041 void tracing_record_taskinfo(struct task_struct
*task
, int flags
)
2045 if (tracing_record_taskinfo_skip(flags
))
2049 * Record as much task information as possible. If some fail, continue
2050 * to try to record the others.
2052 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(task
);
2053 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(task
);
2055 /* If recording any information failed, retry again soon. */
2059 __this_cpu_write(trace_taskinfo_save
, false);
2063 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2065 * @prev - previous task during sched_switch
2066 * @next - next task during sched_switch
2067 * @flags - TRACE_RECORD_CMDLINE for recording comm
2068 * TRACE_RECORD_TGID for recording tgid
2070 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
2071 struct task_struct
*next
, int flags
)
2075 if (tracing_record_taskinfo_skip(flags
))
2079 * Record as much task information as possible. If some fail, continue
2080 * to try to record the others.
2082 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(prev
);
2083 done
&= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(next
);
2084 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(prev
);
2085 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(next
);
2087 /* If recording any information failed, retry again soon. */
2091 __this_cpu_write(trace_taskinfo_save
, false);
2094 /* Helpers to record a specific task information */
2095 void tracing_record_cmdline(struct task_struct
*task
)
2097 tracing_record_taskinfo(task
, TRACE_RECORD_CMDLINE
);
2100 void tracing_record_tgid(struct task_struct
*task
)
2102 tracing_record_taskinfo(task
, TRACE_RECORD_TGID
);
2106 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2107 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2108 * simplifies those functions and keeps them in sync.
2110 enum print_line_t
trace_handle_return(struct trace_seq
*s
)
2112 return trace_seq_has_overflowed(s
) ?
2113 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
2115 EXPORT_SYMBOL_GPL(trace_handle_return
);
2118 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
2121 struct task_struct
*tsk
= current
;
2123 entry
->preempt_count
= pc
& 0xff;
2124 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
2126 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2127 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
2129 TRACE_FLAG_IRQS_NOSUPPORT
|
2131 ((pc
& NMI_MASK
) ? TRACE_FLAG_NMI
: 0) |
2132 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
2133 ((pc
& SOFTIRQ_OFFSET
) ? TRACE_FLAG_SOFTIRQ
: 0) |
2134 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
2135 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
2137 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
2139 struct ring_buffer_event
*
2140 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
2143 unsigned long flags
, int pc
)
2145 return __trace_buffer_lock_reserve(buffer
, type
, len
, flags
, pc
);
2148 DEFINE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
2149 DEFINE_PER_CPU(int, trace_buffered_event_cnt
);
2150 static int trace_buffered_event_ref
;
2153 * trace_buffered_event_enable - enable buffering events
2155 * When events are being filtered, it is quicker to use a temporary
2156 * buffer to write the event data into if there's a likely chance
2157 * that it will not be committed. The discard of the ring buffer
2158 * is not as fast as committing, and is much slower than copying
2161 * When an event is to be filtered, allocate per cpu buffers to
2162 * write the event data into, and if the event is filtered and discarded
2163 * it is simply dropped, otherwise, the entire data is to be committed
2166 void trace_buffered_event_enable(void)
2168 struct ring_buffer_event
*event
;
2172 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2174 if (trace_buffered_event_ref
++)
2177 for_each_tracing_cpu(cpu
) {
2178 page
= alloc_pages_node(cpu_to_node(cpu
),
2179 GFP_KERNEL
| __GFP_NORETRY
, 0);
2183 event
= page_address(page
);
2184 memset(event
, 0, sizeof(*event
));
2186 per_cpu(trace_buffered_event
, cpu
) = event
;
2189 if (cpu
== smp_processor_id() &&
2190 this_cpu_read(trace_buffered_event
) !=
2191 per_cpu(trace_buffered_event
, cpu
))
2198 trace_buffered_event_disable();
2201 static void enable_trace_buffered_event(void *data
)
2203 /* Probably not needed, but do it anyway */
2205 this_cpu_dec(trace_buffered_event_cnt
);
2208 static void disable_trace_buffered_event(void *data
)
2210 this_cpu_inc(trace_buffered_event_cnt
);
2214 * trace_buffered_event_disable - disable buffering events
2216 * When a filter is removed, it is faster to not use the buffered
2217 * events, and to commit directly into the ring buffer. Free up
2218 * the temp buffers when there are no more users. This requires
2219 * special synchronization with current events.
2221 void trace_buffered_event_disable(void)
2225 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2227 if (WARN_ON_ONCE(!trace_buffered_event_ref
))
2230 if (--trace_buffered_event_ref
)
2234 /* For each CPU, set the buffer as used. */
2235 smp_call_function_many(tracing_buffer_mask
,
2236 disable_trace_buffered_event
, NULL
, 1);
2239 /* Wait for all current users to finish */
2240 synchronize_sched();
2242 for_each_tracing_cpu(cpu
) {
2243 free_page((unsigned long)per_cpu(trace_buffered_event
, cpu
));
2244 per_cpu(trace_buffered_event
, cpu
) = NULL
;
2247 * Make sure trace_buffered_event is NULL before clearing
2248 * trace_buffered_event_cnt.
2253 /* Do the work on each cpu */
2254 smp_call_function_many(tracing_buffer_mask
,
2255 enable_trace_buffered_event
, NULL
, 1);
2259 static struct ring_buffer
*temp_buffer
;
2261 struct ring_buffer_event
*
2262 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
2263 struct trace_event_file
*trace_file
,
2264 int type
, unsigned long len
,
2265 unsigned long flags
, int pc
)
2267 struct ring_buffer_event
*entry
;
2270 *current_rb
= trace_file
->tr
->trace_buffer
.buffer
;
2272 if ((trace_file
->flags
&
2273 (EVENT_FILE_FL_SOFT_DISABLED
| EVENT_FILE_FL_FILTERED
)) &&
2274 (entry
= this_cpu_read(trace_buffered_event
))) {
2275 /* Try to use the per cpu buffer first */
2276 val
= this_cpu_inc_return(trace_buffered_event_cnt
);
2278 trace_event_setup(entry
, type
, flags
, pc
);
2279 entry
->array
[0] = len
;
2282 this_cpu_dec(trace_buffered_event_cnt
);
2285 entry
= __trace_buffer_lock_reserve(*current_rb
,
2286 type
, len
, flags
, pc
);
2288 * If tracing is off, but we have triggers enabled
2289 * we still need to look at the event data. Use the temp_buffer
2290 * to store the trace event for the tigger to use. It's recusive
2291 * safe and will not be recorded anywhere.
2293 if (!entry
&& trace_file
->flags
& EVENT_FILE_FL_TRIGGER_COND
) {
2294 *current_rb
= temp_buffer
;
2295 entry
= __trace_buffer_lock_reserve(*current_rb
,
2296 type
, len
, flags
, pc
);
2300 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
2302 static DEFINE_SPINLOCK(tracepoint_iter_lock
);
2303 static DEFINE_MUTEX(tracepoint_printk_mutex
);
2305 static void output_printk(struct trace_event_buffer
*fbuffer
)
2307 struct trace_event_call
*event_call
;
2308 struct trace_event
*event
;
2309 unsigned long flags
;
2310 struct trace_iterator
*iter
= tracepoint_print_iter
;
2312 /* We should never get here if iter is NULL */
2313 if (WARN_ON_ONCE(!iter
))
2316 event_call
= fbuffer
->trace_file
->event_call
;
2317 if (!event_call
|| !event_call
->event
.funcs
||
2318 !event_call
->event
.funcs
->trace
)
2321 event
= &fbuffer
->trace_file
->event_call
->event
;
2323 spin_lock_irqsave(&tracepoint_iter_lock
, flags
);
2324 trace_seq_init(&iter
->seq
);
2325 iter
->ent
= fbuffer
->entry
;
2326 event_call
->event
.funcs
->trace(iter
, 0, event
);
2327 trace_seq_putc(&iter
->seq
, 0);
2328 printk("%s", iter
->seq
.buffer
);
2330 spin_unlock_irqrestore(&tracepoint_iter_lock
, flags
);
2333 int tracepoint_printk_sysctl(struct ctl_table
*table
, int write
,
2334 void __user
*buffer
, size_t *lenp
,
2337 int save_tracepoint_printk
;
2340 mutex_lock(&tracepoint_printk_mutex
);
2341 save_tracepoint_printk
= tracepoint_printk
;
2343 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
2346 * This will force exiting early, as tracepoint_printk
2347 * is always zero when tracepoint_printk_iter is not allocated
2349 if (!tracepoint_print_iter
)
2350 tracepoint_printk
= 0;
2352 if (save_tracepoint_printk
== tracepoint_printk
)
2355 if (tracepoint_printk
)
2356 static_key_enable(&tracepoint_printk_key
.key
);
2358 static_key_disable(&tracepoint_printk_key
.key
);
2361 mutex_unlock(&tracepoint_printk_mutex
);
2366 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
)
2368 if (static_key_false(&tracepoint_printk_key
.key
))
2369 output_printk(fbuffer
);
2371 event_trigger_unlock_commit(fbuffer
->trace_file
, fbuffer
->buffer
,
2372 fbuffer
->event
, fbuffer
->entry
,
2373 fbuffer
->flags
, fbuffer
->pc
);
2375 EXPORT_SYMBOL_GPL(trace_event_buffer_commit
);
2380 * trace_buffer_unlock_commit_regs()
2381 * trace_event_buffer_commit()
2382 * trace_event_raw_event_xxx()
2384 # define STACK_SKIP 3
2386 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
2387 struct ring_buffer
*buffer
,
2388 struct ring_buffer_event
*event
,
2389 unsigned long flags
, int pc
,
2390 struct pt_regs
*regs
)
2392 __buffer_unlock_commit(buffer
, event
);
2395 * If regs is not set, then skip the necessary functions.
2396 * Note, we can still get here via blktrace, wakeup tracer
2397 * and mmiotrace, but that's ok if they lose a function or
2398 * two. They are not that meaningful.
2400 ftrace_trace_stack(tr
, buffer
, flags
, regs
? 0 : STACK_SKIP
, pc
, regs
);
2401 ftrace_trace_userstack(buffer
, flags
, pc
);
2405 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2408 trace_buffer_unlock_commit_nostack(struct ring_buffer
*buffer
,
2409 struct ring_buffer_event
*event
)
2411 __buffer_unlock_commit(buffer
, event
);
2415 trace_process_export(struct trace_export
*export
,
2416 struct ring_buffer_event
*event
)
2418 struct trace_entry
*entry
;
2419 unsigned int size
= 0;
2421 entry
= ring_buffer_event_data(event
);
2422 size
= ring_buffer_event_length(event
);
2423 export
->write(export
, entry
, size
);
2426 static DEFINE_MUTEX(ftrace_export_lock
);
2428 static struct trace_export __rcu
*ftrace_exports_list __read_mostly
;
2430 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled
);
2432 static inline void ftrace_exports_enable(void)
2434 static_branch_enable(&ftrace_exports_enabled
);
2437 static inline void ftrace_exports_disable(void)
2439 static_branch_disable(&ftrace_exports_enabled
);
2442 void ftrace_exports(struct ring_buffer_event
*event
)
2444 struct trace_export
*export
;
2446 preempt_disable_notrace();
2448 export
= rcu_dereference_raw_notrace(ftrace_exports_list
);
2450 trace_process_export(export
, event
);
2451 export
= rcu_dereference_raw_notrace(export
->next
);
2454 preempt_enable_notrace();
2458 add_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2460 rcu_assign_pointer(export
->next
, *list
);
2462 * We are entering export into the list but another
2463 * CPU might be walking that list. We need to make sure
2464 * the export->next pointer is valid before another CPU sees
2465 * the export pointer included into the list.
2467 rcu_assign_pointer(*list
, export
);
2471 rm_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2473 struct trace_export
**p
;
2475 for (p
= list
; *p
!= NULL
; p
= &(*p
)->next
)
2482 rcu_assign_pointer(*p
, (*p
)->next
);
2488 add_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2491 ftrace_exports_enable();
2493 add_trace_export(list
, export
);
2497 rm_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2501 ret
= rm_trace_export(list
, export
);
2503 ftrace_exports_disable();
2508 int register_ftrace_export(struct trace_export
*export
)
2510 if (WARN_ON_ONCE(!export
->write
))
2513 mutex_lock(&ftrace_export_lock
);
2515 add_ftrace_export(&ftrace_exports_list
, export
);
2517 mutex_unlock(&ftrace_export_lock
);
2521 EXPORT_SYMBOL_GPL(register_ftrace_export
);
2523 int unregister_ftrace_export(struct trace_export
*export
)
2527 mutex_lock(&ftrace_export_lock
);
2529 ret
= rm_ftrace_export(&ftrace_exports_list
, export
);
2531 mutex_unlock(&ftrace_export_lock
);
2535 EXPORT_SYMBOL_GPL(unregister_ftrace_export
);
2538 trace_function(struct trace_array
*tr
,
2539 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
2542 struct trace_event_call
*call
= &event_function
;
2543 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2544 struct ring_buffer_event
*event
;
2545 struct ftrace_entry
*entry
;
2547 event
= __trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
2551 entry
= ring_buffer_event_data(event
);
2553 entry
->parent_ip
= parent_ip
;
2555 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2556 if (static_branch_unlikely(&ftrace_exports_enabled
))
2557 ftrace_exports(event
);
2558 __buffer_unlock_commit(buffer
, event
);
2562 #ifdef CONFIG_STACKTRACE
2564 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2565 struct ftrace_stack
{
2566 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
2569 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
2570 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
2572 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
2573 unsigned long flags
,
2574 int skip
, int pc
, struct pt_regs
*regs
)
2576 struct trace_event_call
*call
= &event_kernel_stack
;
2577 struct ring_buffer_event
*event
;
2578 struct stack_entry
*entry
;
2579 struct stack_trace trace
;
2581 int size
= FTRACE_STACK_ENTRIES
;
2583 trace
.nr_entries
= 0;
2587 * Add one, for this function and the call to save_stack_trace()
2588 * If regs is set, then these functions will not be in the way.
2590 #ifndef CONFIG_UNWINDER_ORC
2596 * Since events can happen in NMIs there's no safe way to
2597 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2598 * or NMI comes in, it will just have to use the default
2599 * FTRACE_STACK_SIZE.
2601 preempt_disable_notrace();
2603 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
2605 * We don't need any atomic variables, just a barrier.
2606 * If an interrupt comes in, we don't care, because it would
2607 * have exited and put the counter back to what we want.
2608 * We just need a barrier to keep gcc from moving things
2612 if (use_stack
== 1) {
2613 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
2614 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
2617 save_stack_trace_regs(regs
, &trace
);
2619 save_stack_trace(&trace
);
2621 if (trace
.nr_entries
> size
)
2622 size
= trace
.nr_entries
;
2624 /* From now on, use_stack is a boolean */
2627 size
*= sizeof(unsigned long);
2629 event
= __trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
2630 sizeof(*entry
) + size
, flags
, pc
);
2633 entry
= ring_buffer_event_data(event
);
2635 memset(&entry
->caller
, 0, size
);
2638 memcpy(&entry
->caller
, trace
.entries
,
2639 trace
.nr_entries
* sizeof(unsigned long));
2641 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2642 trace
.entries
= entry
->caller
;
2644 save_stack_trace_regs(regs
, &trace
);
2646 save_stack_trace(&trace
);
2649 entry
->size
= trace
.nr_entries
;
2651 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2652 __buffer_unlock_commit(buffer
, event
);
2655 /* Again, don't let gcc optimize things here */
2657 __this_cpu_dec(ftrace_stack_reserve
);
2658 preempt_enable_notrace();
2662 static inline void ftrace_trace_stack(struct trace_array
*tr
,
2663 struct ring_buffer
*buffer
,
2664 unsigned long flags
,
2665 int skip
, int pc
, struct pt_regs
*regs
)
2667 if (!(tr
->trace_flags
& TRACE_ITER_STACKTRACE
))
2670 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
2673 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
2676 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2678 if (rcu_is_watching()) {
2679 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2684 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2685 * but if the above rcu_is_watching() failed, then the NMI
2686 * triggered someplace critical, and rcu_irq_enter() should
2687 * not be called from NMI.
2689 if (unlikely(in_nmi()))
2693 * It is possible that a function is being traced in a
2694 * location that RCU is not watching. A call to
2695 * rcu_irq_enter() will make sure that it is, but there's
2696 * a few internal rcu functions that could be traced
2697 * where that wont work either. In those cases, we just
2700 if (unlikely(rcu_irq_enter_disabled()))
2703 rcu_irq_enter_irqson();
2704 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2705 rcu_irq_exit_irqson();
2709 * trace_dump_stack - record a stack back trace in the trace buffer
2710 * @skip: Number of functions to skip (helper handlers)
2712 void trace_dump_stack(int skip
)
2714 unsigned long flags
;
2716 if (tracing_disabled
|| tracing_selftest_running
)
2719 local_save_flags(flags
);
2721 #ifndef CONFIG_UNWINDER_ORC
2722 /* Skip 1 to skip this function. */
2725 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
2726 flags
, skip
, preempt_count(), NULL
);
2729 static DEFINE_PER_CPU(int, user_stack_count
);
2732 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
2734 struct trace_event_call
*call
= &event_user_stack
;
2735 struct ring_buffer_event
*event
;
2736 struct userstack_entry
*entry
;
2737 struct stack_trace trace
;
2739 if (!(global_trace
.trace_flags
& TRACE_ITER_USERSTACKTRACE
))
2743 * NMIs can not handle page faults, even with fix ups.
2744 * The save user stack can (and often does) fault.
2746 if (unlikely(in_nmi()))
2750 * prevent recursion, since the user stack tracing may
2751 * trigger other kernel events.
2754 if (__this_cpu_read(user_stack_count
))
2757 __this_cpu_inc(user_stack_count
);
2759 event
= __trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
2760 sizeof(*entry
), flags
, pc
);
2762 goto out_drop_count
;
2763 entry
= ring_buffer_event_data(event
);
2765 entry
->tgid
= current
->tgid
;
2766 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
2768 trace
.nr_entries
= 0;
2769 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2771 trace
.entries
= entry
->caller
;
2773 save_stack_trace_user(&trace
);
2774 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2775 __buffer_unlock_commit(buffer
, event
);
2778 __this_cpu_dec(user_stack_count
);
2784 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
2786 ftrace_trace_userstack(tr
, flags
, preempt_count());
2790 #endif /* CONFIG_STACKTRACE */
2792 /* created for use with alloc_percpu */
2793 struct trace_buffer_struct
{
2795 char buffer
[4][TRACE_BUF_SIZE
];
2798 static struct trace_buffer_struct
*trace_percpu_buffer
;
2801 * Thise allows for lockless recording. If we're nested too deeply, then
2802 * this returns NULL.
2804 static char *get_trace_buf(void)
2806 struct trace_buffer_struct
*buffer
= this_cpu_ptr(trace_percpu_buffer
);
2808 if (!buffer
|| buffer
->nesting
>= 4)
2813 /* Interrupts must see nesting incremented before we use the buffer */
2815 return &buffer
->buffer
[buffer
->nesting
][0];
2818 static void put_trace_buf(void)
2820 /* Don't let the decrement of nesting leak before this */
2822 this_cpu_dec(trace_percpu_buffer
->nesting
);
2825 static int alloc_percpu_trace_buffer(void)
2827 struct trace_buffer_struct
*buffers
;
2829 buffers
= alloc_percpu(struct trace_buffer_struct
);
2830 if (WARN(!buffers
, "Could not allocate percpu trace_printk buffer"))
2833 trace_percpu_buffer
= buffers
;
2837 static int buffers_allocated
;
2839 void trace_printk_init_buffers(void)
2841 if (buffers_allocated
)
2844 if (alloc_percpu_trace_buffer())
2847 /* trace_printk() is for debug use only. Don't use it in production. */
2850 pr_warn("**********************************************************\n");
2851 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2853 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2855 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2856 pr_warn("** unsafe for production use. **\n");
2858 pr_warn("** If you see this message and you are not debugging **\n");
2859 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2861 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2862 pr_warn("**********************************************************\n");
2864 /* Expand the buffers to set size */
2865 tracing_update_buffers();
2867 buffers_allocated
= 1;
2870 * trace_printk_init_buffers() can be called by modules.
2871 * If that happens, then we need to start cmdline recording
2872 * directly here. If the global_trace.buffer is already
2873 * allocated here, then this was called by module code.
2875 if (global_trace
.trace_buffer
.buffer
)
2876 tracing_start_cmdline_record();
2879 void trace_printk_start_comm(void)
2881 /* Start tracing comms if trace printk is set */
2882 if (!buffers_allocated
)
2884 tracing_start_cmdline_record();
2887 static void trace_printk_start_stop_comm(int enabled
)
2889 if (!buffers_allocated
)
2893 tracing_start_cmdline_record();
2895 tracing_stop_cmdline_record();
2899 * trace_vbprintk - write binary msg to tracing buffer
2902 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2904 struct trace_event_call
*call
= &event_bprint
;
2905 struct ring_buffer_event
*event
;
2906 struct ring_buffer
*buffer
;
2907 struct trace_array
*tr
= &global_trace
;
2908 struct bprint_entry
*entry
;
2909 unsigned long flags
;
2911 int len
= 0, size
, pc
;
2913 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2916 /* Don't pollute graph traces with trace_vprintk internals */
2917 pause_graph_tracing();
2919 pc
= preempt_count();
2920 preempt_disable_notrace();
2922 tbuffer
= get_trace_buf();
2928 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2930 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2933 local_save_flags(flags
);
2934 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2935 buffer
= tr
->trace_buffer
.buffer
;
2936 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2940 entry
= ring_buffer_event_data(event
);
2944 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2945 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2946 __buffer_unlock_commit(buffer
, event
);
2947 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
2954 preempt_enable_notrace();
2955 unpause_graph_tracing();
2959 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2962 __trace_array_vprintk(struct ring_buffer
*buffer
,
2963 unsigned long ip
, const char *fmt
, va_list args
)
2965 struct trace_event_call
*call
= &event_print
;
2966 struct ring_buffer_event
*event
;
2967 int len
= 0, size
, pc
;
2968 struct print_entry
*entry
;
2969 unsigned long flags
;
2972 if (tracing_disabled
|| tracing_selftest_running
)
2975 /* Don't pollute graph traces with trace_vprintk internals */
2976 pause_graph_tracing();
2978 pc
= preempt_count();
2979 preempt_disable_notrace();
2982 tbuffer
= get_trace_buf();
2988 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2990 local_save_flags(flags
);
2991 size
= sizeof(*entry
) + len
+ 1;
2992 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2996 entry
= ring_buffer_event_data(event
);
2999 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
3000 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
3001 __buffer_unlock_commit(buffer
, event
);
3002 ftrace_trace_stack(&global_trace
, buffer
, flags
, 6, pc
, NULL
);
3009 preempt_enable_notrace();
3010 unpause_graph_tracing();
3015 int trace_array_vprintk(struct trace_array
*tr
,
3016 unsigned long ip
, const char *fmt
, va_list args
)
3018 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
3021 int trace_array_printk(struct trace_array
*tr
,
3022 unsigned long ip
, const char *fmt
, ...)
3027 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3031 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
3036 int trace_array_printk_buf(struct ring_buffer
*buffer
,
3037 unsigned long ip
, const char *fmt
, ...)
3042 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3046 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
3051 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
3053 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
3055 EXPORT_SYMBOL_GPL(trace_vprintk
);
3057 static void trace_iterator_increment(struct trace_iterator
*iter
)
3059 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
3063 ring_buffer_read(buf_iter
, NULL
);
3066 static struct trace_entry
*
3067 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
3068 unsigned long *lost_events
)
3070 struct ring_buffer_event
*event
;
3071 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
3074 event
= ring_buffer_iter_peek(buf_iter
, ts
);
3076 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
3080 iter
->ent_size
= ring_buffer_event_length(event
);
3081 return ring_buffer_event_data(event
);
3087 static struct trace_entry
*
3088 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
3089 unsigned long *missing_events
, u64
*ent_ts
)
3091 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
3092 struct trace_entry
*ent
, *next
= NULL
;
3093 unsigned long lost_events
= 0, next_lost
= 0;
3094 int cpu_file
= iter
->cpu_file
;
3095 u64 next_ts
= 0, ts
;
3101 * If we are in a per_cpu trace file, don't bother by iterating over
3102 * all cpu and peek directly.
3104 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
3105 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
3107 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
3109 *ent_cpu
= cpu_file
;
3114 for_each_tracing_cpu(cpu
) {
3116 if (ring_buffer_empty_cpu(buffer
, cpu
))
3119 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
3122 * Pick the entry with the smallest timestamp:
3124 if (ent
&& (!next
|| ts
< next_ts
)) {
3128 next_lost
= lost_events
;
3129 next_size
= iter
->ent_size
;
3133 iter
->ent_size
= next_size
;
3136 *ent_cpu
= next_cpu
;
3142 *missing_events
= next_lost
;
3147 /* Find the next real entry, without updating the iterator itself */
3148 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
3149 int *ent_cpu
, u64
*ent_ts
)
3151 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
3154 /* Find the next real entry, and increment the iterator to the next entry */
3155 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
3157 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
3158 &iter
->lost_events
, &iter
->ts
);
3161 trace_iterator_increment(iter
);
3163 return iter
->ent
? iter
: NULL
;
3166 static void trace_consume(struct trace_iterator
*iter
)
3168 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
3169 &iter
->lost_events
);
3172 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3174 struct trace_iterator
*iter
= m
->private;
3178 WARN_ON_ONCE(iter
->leftover
);
3182 /* can't go backwards */
3187 ent
= trace_find_next_entry_inc(iter
);
3191 while (ent
&& iter
->idx
< i
)
3192 ent
= trace_find_next_entry_inc(iter
);
3199 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
3201 struct ring_buffer_event
*event
;
3202 struct ring_buffer_iter
*buf_iter
;
3203 unsigned long entries
= 0;
3206 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
3208 buf_iter
= trace_buffer_iter(iter
, cpu
);
3212 ring_buffer_iter_reset(buf_iter
);
3215 * We could have the case with the max latency tracers
3216 * that a reset never took place on a cpu. This is evident
3217 * by the timestamp being before the start of the buffer.
3219 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
3220 if (ts
>= iter
->trace_buffer
->time_start
)
3223 ring_buffer_read(buf_iter
, NULL
);
3226 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
3230 * The current tracer is copied to avoid a global locking
3233 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3235 struct trace_iterator
*iter
= m
->private;
3236 struct trace_array
*tr
= iter
->tr
;
3237 int cpu_file
= iter
->cpu_file
;
3243 * copy the tracer to avoid using a global lock all around.
3244 * iter->trace is a copy of current_trace, the pointer to the
3245 * name may be used instead of a strcmp(), as iter->trace->name
3246 * will point to the same string as current_trace->name.
3248 mutex_lock(&trace_types_lock
);
3249 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
3250 *iter
->trace
= *tr
->current_trace
;
3251 mutex_unlock(&trace_types_lock
);
3253 #ifdef CONFIG_TRACER_MAX_TRACE
3254 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3255 return ERR_PTR(-EBUSY
);
3258 if (!iter
->snapshot
)
3259 atomic_inc(&trace_record_taskinfo_disabled
);
3261 if (*pos
!= iter
->pos
) {
3266 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
3267 for_each_tracing_cpu(cpu
)
3268 tracing_iter_reset(iter
, cpu
);
3270 tracing_iter_reset(iter
, cpu_file
);
3273 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
3278 * If we overflowed the seq_file before, then we want
3279 * to just reuse the trace_seq buffer again.
3285 p
= s_next(m
, p
, &l
);
3289 trace_event_read_lock();
3290 trace_access_lock(cpu_file
);
3294 static void s_stop(struct seq_file
*m
, void *p
)
3296 struct trace_iterator
*iter
= m
->private;
3298 #ifdef CONFIG_TRACER_MAX_TRACE
3299 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3303 if (!iter
->snapshot
)
3304 atomic_dec(&trace_record_taskinfo_disabled
);
3306 trace_access_unlock(iter
->cpu_file
);
3307 trace_event_read_unlock();
3311 get_total_entries(struct trace_buffer
*buf
,
3312 unsigned long *total
, unsigned long *entries
)
3314 unsigned long count
;
3320 for_each_tracing_cpu(cpu
) {
3321 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
3323 * If this buffer has skipped entries, then we hold all
3324 * entries for the trace and we need to ignore the
3325 * ones before the time stamp.
3327 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
3328 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
3329 /* total is the same as the entries */
3333 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
3338 static void print_lat_help_header(struct seq_file
*m
)
3340 seq_puts(m
, "# _------=> CPU# \n"
3341 "# / _-----=> irqs-off \n"
3342 "# | / _----=> need-resched \n"
3343 "# || / _---=> hardirq/softirq \n"
3344 "# ||| / _--=> preempt-depth \n"
3346 "# cmd pid ||||| time | caller \n"
3347 "# \\ / ||||| \\ | / \n");
3350 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
3352 unsigned long total
;
3353 unsigned long entries
;
3355 get_total_entries(buf
, &total
, &entries
);
3356 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3357 entries
, total
, num_online_cpus());
3361 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
,
3364 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3366 print_event_info(buf
, m
);
3368 seq_printf(m
, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid
? "TGID " : "");
3369 seq_printf(m
, "# | | | %s | |\n", tgid
? " | " : "");
3372 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
,
3375 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3376 const char tgid_space
[] = " ";
3377 const char space
[] = " ";
3379 seq_printf(m
, "# %s _-----=> irqs-off\n",
3380 tgid
? tgid_space
: space
);
3381 seq_printf(m
, "# %s / _----=> need-resched\n",
3382 tgid
? tgid_space
: space
);
3383 seq_printf(m
, "# %s| / _---=> hardirq/softirq\n",
3384 tgid
? tgid_space
: space
);
3385 seq_printf(m
, "# %s|| / _--=> preempt-depth\n",
3386 tgid
? tgid_space
: space
);
3387 seq_printf(m
, "# %s||| / delay\n",
3388 tgid
? tgid_space
: space
);
3389 seq_printf(m
, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3390 tgid
? " TGID " : space
);
3391 seq_printf(m
, "# | | | %s|||| | |\n",
3392 tgid
? " | " : space
);
3396 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
3398 unsigned long sym_flags
= (global_trace
.trace_flags
& TRACE_ITER_SYM_MASK
);
3399 struct trace_buffer
*buf
= iter
->trace_buffer
;
3400 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
3401 struct tracer
*type
= iter
->trace
;
3402 unsigned long entries
;
3403 unsigned long total
;
3404 const char *name
= "preemption";
3408 get_total_entries(buf
, &total
, &entries
);
3410 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
3412 seq_puts(m
, "# -----------------------------------"
3413 "---------------------------------\n");
3414 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3415 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3416 nsecs_to_usecs(data
->saved_latency
),
3420 #if defined(CONFIG_PREEMPT_NONE)
3422 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3424 #elif defined(CONFIG_PREEMPT)
3429 /* These are reserved for later use */
3432 seq_printf(m
, " #P:%d)\n", num_online_cpus());
3436 seq_puts(m
, "# -----------------\n");
3437 seq_printf(m
, "# | task: %.16s-%d "
3438 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3439 data
->comm
, data
->pid
,
3440 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
3441 data
->policy
, data
->rt_priority
);
3442 seq_puts(m
, "# -----------------\n");
3444 if (data
->critical_start
) {
3445 seq_puts(m
, "# => started at: ");
3446 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
3447 trace_print_seq(m
, &iter
->seq
);
3448 seq_puts(m
, "\n# => ended at: ");
3449 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
3450 trace_print_seq(m
, &iter
->seq
);
3451 seq_puts(m
, "\n#\n");
3457 static void test_cpu_buff_start(struct trace_iterator
*iter
)
3459 struct trace_seq
*s
= &iter
->seq
;
3460 struct trace_array
*tr
= iter
->tr
;
3462 if (!(tr
->trace_flags
& TRACE_ITER_ANNOTATE
))
3465 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
3468 if (cpumask_available(iter
->started
) &&
3469 cpumask_test_cpu(iter
->cpu
, iter
->started
))
3472 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
3475 if (cpumask_available(iter
->started
))
3476 cpumask_set_cpu(iter
->cpu
, iter
->started
);
3478 /* Don't print started cpu buffer for the first entry of the trace */
3480 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
3484 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
3486 struct trace_array
*tr
= iter
->tr
;
3487 struct trace_seq
*s
= &iter
->seq
;
3488 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
3489 struct trace_entry
*entry
;
3490 struct trace_event
*event
;
3494 test_cpu_buff_start(iter
);
3496 event
= ftrace_find_event(entry
->type
);
3498 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3499 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3500 trace_print_lat_context(iter
);
3502 trace_print_context(iter
);
3505 if (trace_seq_has_overflowed(s
))
3506 return TRACE_TYPE_PARTIAL_LINE
;
3509 return event
->funcs
->trace(iter
, sym_flags
, event
);
3511 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
3513 return trace_handle_return(s
);
3516 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
3518 struct trace_array
*tr
= iter
->tr
;
3519 struct trace_seq
*s
= &iter
->seq
;
3520 struct trace_entry
*entry
;
3521 struct trace_event
*event
;
3525 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
)
3526 trace_seq_printf(s
, "%d %d %llu ",
3527 entry
->pid
, iter
->cpu
, iter
->ts
);
3529 if (trace_seq_has_overflowed(s
))
3530 return TRACE_TYPE_PARTIAL_LINE
;
3532 event
= ftrace_find_event(entry
->type
);
3534 return event
->funcs
->raw(iter
, 0, event
);
3536 trace_seq_printf(s
, "%d ?\n", entry
->type
);
3538 return trace_handle_return(s
);
3541 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
3543 struct trace_array
*tr
= iter
->tr
;
3544 struct trace_seq
*s
= &iter
->seq
;
3545 unsigned char newline
= '\n';
3546 struct trace_entry
*entry
;
3547 struct trace_event
*event
;
3551 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3552 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
3553 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
3554 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
3555 if (trace_seq_has_overflowed(s
))
3556 return TRACE_TYPE_PARTIAL_LINE
;
3559 event
= ftrace_find_event(entry
->type
);
3561 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
3562 if (ret
!= TRACE_TYPE_HANDLED
)
3566 SEQ_PUT_FIELD(s
, newline
);
3568 return trace_handle_return(s
);
3571 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
3573 struct trace_array
*tr
= iter
->tr
;
3574 struct trace_seq
*s
= &iter
->seq
;
3575 struct trace_entry
*entry
;
3576 struct trace_event
*event
;
3580 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3581 SEQ_PUT_FIELD(s
, entry
->pid
);
3582 SEQ_PUT_FIELD(s
, iter
->cpu
);
3583 SEQ_PUT_FIELD(s
, iter
->ts
);
3584 if (trace_seq_has_overflowed(s
))
3585 return TRACE_TYPE_PARTIAL_LINE
;
3588 event
= ftrace_find_event(entry
->type
);
3589 return event
? event
->funcs
->binary(iter
, 0, event
) :
3593 int trace_empty(struct trace_iterator
*iter
)
3595 struct ring_buffer_iter
*buf_iter
;
3598 /* If we are looking at one CPU buffer, only check that one */
3599 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
3600 cpu
= iter
->cpu_file
;
3601 buf_iter
= trace_buffer_iter(iter
, cpu
);
3603 if (!ring_buffer_iter_empty(buf_iter
))
3606 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3612 for_each_tracing_cpu(cpu
) {
3613 buf_iter
= trace_buffer_iter(iter
, cpu
);
3615 if (!ring_buffer_iter_empty(buf_iter
))
3618 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3626 /* Called with trace_event_read_lock() held. */
3627 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
3629 struct trace_array
*tr
= iter
->tr
;
3630 unsigned long trace_flags
= tr
->trace_flags
;
3631 enum print_line_t ret
;
3633 if (iter
->lost_events
) {
3634 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
3635 iter
->cpu
, iter
->lost_events
);
3636 if (trace_seq_has_overflowed(&iter
->seq
))
3637 return TRACE_TYPE_PARTIAL_LINE
;
3640 if (iter
->trace
&& iter
->trace
->print_line
) {
3641 ret
= iter
->trace
->print_line(iter
);
3642 if (ret
!= TRACE_TYPE_UNHANDLED
)
3646 if (iter
->ent
->type
== TRACE_BPUTS
&&
3647 trace_flags
& TRACE_ITER_PRINTK
&&
3648 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3649 return trace_print_bputs_msg_only(iter
);
3651 if (iter
->ent
->type
== TRACE_BPRINT
&&
3652 trace_flags
& TRACE_ITER_PRINTK
&&
3653 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3654 return trace_print_bprintk_msg_only(iter
);
3656 if (iter
->ent
->type
== TRACE_PRINT
&&
3657 trace_flags
& TRACE_ITER_PRINTK
&&
3658 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3659 return trace_print_printk_msg_only(iter
);
3661 if (trace_flags
& TRACE_ITER_BIN
)
3662 return print_bin_fmt(iter
);
3664 if (trace_flags
& TRACE_ITER_HEX
)
3665 return print_hex_fmt(iter
);
3667 if (trace_flags
& TRACE_ITER_RAW
)
3668 return print_raw_fmt(iter
);
3670 return print_trace_fmt(iter
);
3673 void trace_latency_header(struct seq_file
*m
)
3675 struct trace_iterator
*iter
= m
->private;
3676 struct trace_array
*tr
= iter
->tr
;
3678 /* print nothing if the buffers are empty */
3679 if (trace_empty(iter
))
3682 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3683 print_trace_header(m
, iter
);
3685 if (!(tr
->trace_flags
& TRACE_ITER_VERBOSE
))
3686 print_lat_help_header(m
);
3689 void trace_default_header(struct seq_file
*m
)
3691 struct trace_iterator
*iter
= m
->private;
3692 struct trace_array
*tr
= iter
->tr
;
3693 unsigned long trace_flags
= tr
->trace_flags
;
3695 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
3698 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
3699 /* print nothing if the buffers are empty */
3700 if (trace_empty(iter
))
3702 print_trace_header(m
, iter
);
3703 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
3704 print_lat_help_header(m
);
3706 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
3707 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
3708 print_func_help_header_irq(iter
->trace_buffer
,
3711 print_func_help_header(iter
->trace_buffer
, m
,
3717 static void test_ftrace_alive(struct seq_file
*m
)
3719 if (!ftrace_is_dead())
3721 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3722 "# MAY BE MISSING FUNCTION EVENTS\n");
3725 #ifdef CONFIG_TRACER_MAX_TRACE
3726 static void show_snapshot_main_help(struct seq_file
*m
)
3728 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3729 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3730 "# Takes a snapshot of the main buffer.\n"
3731 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3732 "# (Doesn't have to be '2' works with any number that\n"
3733 "# is not a '0' or '1')\n");
3736 static void show_snapshot_percpu_help(struct seq_file
*m
)
3738 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3739 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3740 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3741 "# Takes a snapshot of the main buffer for this cpu.\n");
3743 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
3744 "# Must use main snapshot file to allocate.\n");
3746 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3747 "# (Doesn't have to be '2' works with any number that\n"
3748 "# is not a '0' or '1')\n");
3751 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
3753 if (iter
->tr
->allocated_snapshot
)
3754 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
3756 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
3758 seq_puts(m
, "# Snapshot commands:\n");
3759 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
3760 show_snapshot_main_help(m
);
3762 show_snapshot_percpu_help(m
);
3765 /* Should never be called */
3766 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
3769 static int s_show(struct seq_file
*m
, void *v
)
3771 struct trace_iterator
*iter
= v
;
3774 if (iter
->ent
== NULL
) {
3776 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
3778 test_ftrace_alive(m
);
3780 if (iter
->snapshot
&& trace_empty(iter
))
3781 print_snapshot_help(m
, iter
);
3782 else if (iter
->trace
&& iter
->trace
->print_header
)
3783 iter
->trace
->print_header(m
);
3785 trace_default_header(m
);
3787 } else if (iter
->leftover
) {
3789 * If we filled the seq_file buffer earlier, we
3790 * want to just show it now.
3792 ret
= trace_print_seq(m
, &iter
->seq
);
3794 /* ret should this time be zero, but you never know */
3795 iter
->leftover
= ret
;
3798 print_trace_line(iter
);
3799 ret
= trace_print_seq(m
, &iter
->seq
);
3801 * If we overflow the seq_file buffer, then it will
3802 * ask us for this data again at start up.
3804 * ret is 0 if seq_file write succeeded.
3807 iter
->leftover
= ret
;
3814 * Should be used after trace_array_get(), trace_types_lock
3815 * ensures that i_cdev was already initialized.
3817 static inline int tracing_get_cpu(struct inode
*inode
)
3819 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
3820 return (long)inode
->i_cdev
- 1;
3821 return RING_BUFFER_ALL_CPUS
;
3824 static const struct seq_operations tracer_seq_ops
= {
3831 static struct trace_iterator
*
3832 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
3834 struct trace_array
*tr
= inode
->i_private
;
3835 struct trace_iterator
*iter
;
3838 if (tracing_disabled
)
3839 return ERR_PTR(-ENODEV
);
3841 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
3843 return ERR_PTR(-ENOMEM
);
3845 iter
->buffer_iter
= kcalloc(nr_cpu_ids
, sizeof(*iter
->buffer_iter
),
3847 if (!iter
->buffer_iter
)
3851 * We make a copy of the current tracer to avoid concurrent
3852 * changes on it while we are reading.
3854 mutex_lock(&trace_types_lock
);
3855 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3859 *iter
->trace
= *tr
->current_trace
;
3861 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3866 #ifdef CONFIG_TRACER_MAX_TRACE
3867 /* Currently only the top directory has a snapshot */
3868 if (tr
->current_trace
->print_max
|| snapshot
)
3869 iter
->trace_buffer
= &tr
->max_buffer
;
3872 iter
->trace_buffer
= &tr
->trace_buffer
;
3873 iter
->snapshot
= snapshot
;
3875 iter
->cpu_file
= tracing_get_cpu(inode
);
3876 mutex_init(&iter
->mutex
);
3878 /* Notify the tracer early; before we stop tracing. */
3879 if (iter
->trace
&& iter
->trace
->open
)
3880 iter
->trace
->open(iter
);
3882 /* Annotate start of buffers if we had overruns */
3883 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3884 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3886 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3887 if (trace_clocks
[tr
->clock_id
].in_ns
)
3888 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3890 /* stop the trace while dumping if we are not opening "snapshot" */
3891 if (!iter
->snapshot
)
3892 tracing_stop_tr(tr
);
3894 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3895 for_each_tracing_cpu(cpu
) {
3896 iter
->buffer_iter
[cpu
] =
3897 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3899 ring_buffer_read_prepare_sync();
3900 for_each_tracing_cpu(cpu
) {
3901 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3902 tracing_iter_reset(iter
, cpu
);
3905 cpu
= iter
->cpu_file
;
3906 iter
->buffer_iter
[cpu
] =
3907 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3908 ring_buffer_read_prepare_sync();
3909 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3910 tracing_iter_reset(iter
, cpu
);
3913 mutex_unlock(&trace_types_lock
);
3918 mutex_unlock(&trace_types_lock
);
3920 kfree(iter
->buffer_iter
);
3922 seq_release_private(inode
, file
);
3923 return ERR_PTR(-ENOMEM
);
3926 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3928 if (tracing_disabled
)
3931 filp
->private_data
= inode
->i_private
;
3935 bool tracing_is_disabled(void)
3937 return (tracing_disabled
) ? true: false;
3941 * Open and update trace_array ref count.
3942 * Must have the current trace_array passed to it.
3944 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3946 struct trace_array
*tr
= inode
->i_private
;
3948 if (tracing_disabled
)
3951 if (trace_array_get(tr
) < 0)
3954 filp
->private_data
= inode
->i_private
;
3959 static int tracing_release(struct inode
*inode
, struct file
*file
)
3961 struct trace_array
*tr
= inode
->i_private
;
3962 struct seq_file
*m
= file
->private_data
;
3963 struct trace_iterator
*iter
;
3966 if (!(file
->f_mode
& FMODE_READ
)) {
3967 trace_array_put(tr
);
3971 /* Writes do not use seq_file */
3973 mutex_lock(&trace_types_lock
);
3975 for_each_tracing_cpu(cpu
) {
3976 if (iter
->buffer_iter
[cpu
])
3977 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3980 if (iter
->trace
&& iter
->trace
->close
)
3981 iter
->trace
->close(iter
);
3983 if (!iter
->snapshot
)
3984 /* reenable tracing if it was previously enabled */
3985 tracing_start_tr(tr
);
3987 __trace_array_put(tr
);
3989 mutex_unlock(&trace_types_lock
);
3991 mutex_destroy(&iter
->mutex
);
3992 free_cpumask_var(iter
->started
);
3994 kfree(iter
->buffer_iter
);
3995 seq_release_private(inode
, file
);
4000 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
4002 struct trace_array
*tr
= inode
->i_private
;
4004 trace_array_put(tr
);
4008 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
4010 struct trace_array
*tr
= inode
->i_private
;
4012 trace_array_put(tr
);
4014 return single_release(inode
, file
);
4017 static int tracing_open(struct inode
*inode
, struct file
*file
)
4019 struct trace_array
*tr
= inode
->i_private
;
4020 struct trace_iterator
*iter
;
4023 if (trace_array_get(tr
) < 0)
4026 /* If this file was open for write, then erase contents */
4027 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
4028 int cpu
= tracing_get_cpu(inode
);
4029 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
4031 #ifdef CONFIG_TRACER_MAX_TRACE
4032 if (tr
->current_trace
->print_max
)
4033 trace_buf
= &tr
->max_buffer
;
4036 if (cpu
== RING_BUFFER_ALL_CPUS
)
4037 tracing_reset_online_cpus(trace_buf
);
4039 tracing_reset(trace_buf
, cpu
);
4042 if (file
->f_mode
& FMODE_READ
) {
4043 iter
= __tracing_open(inode
, file
, false);
4045 ret
= PTR_ERR(iter
);
4046 else if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
4047 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4051 trace_array_put(tr
);
4057 * Some tracers are not suitable for instance buffers.
4058 * A tracer is always available for the global array (toplevel)
4059 * or if it explicitly states that it is.
4062 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
4064 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
4067 /* Find the next tracer that this trace array may use */
4068 static struct tracer
*
4069 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
4071 while (t
&& !trace_ok_for_array(t
, tr
))
4078 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4080 struct trace_array
*tr
= m
->private;
4081 struct tracer
*t
= v
;
4086 t
= get_tracer_for_array(tr
, t
->next
);
4091 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
4093 struct trace_array
*tr
= m
->private;
4097 mutex_lock(&trace_types_lock
);
4099 t
= get_tracer_for_array(tr
, trace_types
);
4100 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
4106 static void t_stop(struct seq_file
*m
, void *p
)
4108 mutex_unlock(&trace_types_lock
);
4111 static int t_show(struct seq_file
*m
, void *v
)
4113 struct tracer
*t
= v
;
4118 seq_puts(m
, t
->name
);
4127 static const struct seq_operations show_traces_seq_ops
= {
4134 static int show_traces_open(struct inode
*inode
, struct file
*file
)
4136 struct trace_array
*tr
= inode
->i_private
;
4140 if (tracing_disabled
)
4143 ret
= seq_open(file
, &show_traces_seq_ops
);
4147 m
= file
->private_data
;
4154 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
4155 size_t count
, loff_t
*ppos
)
4160 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
4164 if (file
->f_mode
& FMODE_READ
)
4165 ret
= seq_lseek(file
, offset
, whence
);
4167 file
->f_pos
= ret
= 0;
4172 static const struct file_operations tracing_fops
= {
4173 .open
= tracing_open
,
4175 .write
= tracing_write_stub
,
4176 .llseek
= tracing_lseek
,
4177 .release
= tracing_release
,
4180 static const struct file_operations show_traces_fops
= {
4181 .open
= show_traces_open
,
4183 .release
= seq_release
,
4184 .llseek
= seq_lseek
,
4188 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
4189 size_t count
, loff_t
*ppos
)
4191 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4195 len
= snprintf(NULL
, 0, "%*pb\n",
4196 cpumask_pr_args(tr
->tracing_cpumask
)) + 1;
4197 mask_str
= kmalloc(len
, GFP_KERNEL
);
4201 len
= snprintf(mask_str
, len
, "%*pb\n",
4202 cpumask_pr_args(tr
->tracing_cpumask
));
4207 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, len
);
4216 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
4217 size_t count
, loff_t
*ppos
)
4219 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4220 cpumask_var_t tracing_cpumask_new
;
4223 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
4226 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
4230 local_irq_disable();
4231 arch_spin_lock(&tr
->max_lock
);
4232 for_each_tracing_cpu(cpu
) {
4234 * Increase/decrease the disabled counter if we are
4235 * about to flip a bit in the cpumask:
4237 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4238 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4239 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4240 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4242 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4243 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4244 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4245 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4248 arch_spin_unlock(&tr
->max_lock
);
4251 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
4252 free_cpumask_var(tracing_cpumask_new
);
4257 free_cpumask_var(tracing_cpumask_new
);
4262 static const struct file_operations tracing_cpumask_fops
= {
4263 .open
= tracing_open_generic_tr
,
4264 .read
= tracing_cpumask_read
,
4265 .write
= tracing_cpumask_write
,
4266 .release
= tracing_release_generic_tr
,
4267 .llseek
= generic_file_llseek
,
4270 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
4272 struct tracer_opt
*trace_opts
;
4273 struct trace_array
*tr
= m
->private;
4277 mutex_lock(&trace_types_lock
);
4278 tracer_flags
= tr
->current_trace
->flags
->val
;
4279 trace_opts
= tr
->current_trace
->flags
->opts
;
4281 for (i
= 0; trace_options
[i
]; i
++) {
4282 if (tr
->trace_flags
& (1 << i
))
4283 seq_printf(m
, "%s\n", trace_options
[i
]);
4285 seq_printf(m
, "no%s\n", trace_options
[i
]);
4288 for (i
= 0; trace_opts
[i
].name
; i
++) {
4289 if (tracer_flags
& trace_opts
[i
].bit
)
4290 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
4292 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
4294 mutex_unlock(&trace_types_lock
);
4299 static int __set_tracer_option(struct trace_array
*tr
,
4300 struct tracer_flags
*tracer_flags
,
4301 struct tracer_opt
*opts
, int neg
)
4303 struct tracer
*trace
= tracer_flags
->trace
;
4306 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
4311 tracer_flags
->val
&= ~opts
->bit
;
4313 tracer_flags
->val
|= opts
->bit
;
4317 /* Try to assign a tracer specific option */
4318 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
4320 struct tracer
*trace
= tr
->current_trace
;
4321 struct tracer_flags
*tracer_flags
= trace
->flags
;
4322 struct tracer_opt
*opts
= NULL
;
4325 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
4326 opts
= &tracer_flags
->opts
[i
];
4328 if (strcmp(cmp
, opts
->name
) == 0)
4329 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
4335 /* Some tracers require overwrite to stay enabled */
4336 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
4338 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
4344 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
4346 /* do nothing if flag is already set */
4347 if (!!(tr
->trace_flags
& mask
) == !!enabled
)
4350 /* Give the tracer a chance to approve the change */
4351 if (tr
->current_trace
->flag_changed
)
4352 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
4356 tr
->trace_flags
|= mask
;
4358 tr
->trace_flags
&= ~mask
;
4360 if (mask
== TRACE_ITER_RECORD_CMD
)
4361 trace_event_enable_cmd_record(enabled
);
4363 if (mask
== TRACE_ITER_RECORD_TGID
) {
4365 tgid_map
= kzalloc((PID_MAX_DEFAULT
+ 1) * sizeof(*tgid_map
),
4368 tr
->trace_flags
&= ~TRACE_ITER_RECORD_TGID
;
4372 trace_event_enable_tgid_record(enabled
);
4375 if (mask
== TRACE_ITER_EVENT_FORK
)
4376 trace_event_follow_fork(tr
, enabled
);
4378 if (mask
== TRACE_ITER_FUNC_FORK
)
4379 ftrace_pid_follow_fork(tr
, enabled
);
4381 if (mask
== TRACE_ITER_OVERWRITE
) {
4382 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
4383 #ifdef CONFIG_TRACER_MAX_TRACE
4384 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
4388 if (mask
== TRACE_ITER_PRINTK
) {
4389 trace_printk_start_stop_comm(enabled
);
4390 trace_printk_control(enabled
);
4396 static int trace_set_options(struct trace_array
*tr
, char *option
)
4402 size_t orig_len
= strlen(option
);
4404 cmp
= strstrip(option
);
4406 if (strncmp(cmp
, "no", 2) == 0) {
4411 mutex_lock(&trace_types_lock
);
4413 for (i
= 0; trace_options
[i
]; i
++) {
4414 if (strcmp(cmp
, trace_options
[i
]) == 0) {
4415 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
4420 /* If no option could be set, test the specific tracer options */
4421 if (!trace_options
[i
])
4422 ret
= set_tracer_option(tr
, cmp
, neg
);
4424 mutex_unlock(&trace_types_lock
);
4427 * If the first trailing whitespace is replaced with '\0' by strstrip,
4428 * turn it back into a space.
4430 if (orig_len
> strlen(option
))
4431 option
[strlen(option
)] = ' ';
4436 static void __init
apply_trace_boot_options(void)
4438 char *buf
= trace_boot_options_buf
;
4442 option
= strsep(&buf
, ",");
4448 trace_set_options(&global_trace
, option
);
4450 /* Put back the comma to allow this to be called again */
4457 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
4458 size_t cnt
, loff_t
*ppos
)
4460 struct seq_file
*m
= filp
->private_data
;
4461 struct trace_array
*tr
= m
->private;
4465 if (cnt
>= sizeof(buf
))
4468 if (copy_from_user(buf
, ubuf
, cnt
))
4473 ret
= trace_set_options(tr
, buf
);
4482 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
4484 struct trace_array
*tr
= inode
->i_private
;
4487 if (tracing_disabled
)
4490 if (trace_array_get(tr
) < 0)
4493 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
4495 trace_array_put(tr
);
4500 static const struct file_operations tracing_iter_fops
= {
4501 .open
= tracing_trace_options_open
,
4503 .llseek
= seq_lseek
,
4504 .release
= tracing_single_release_tr
,
4505 .write
= tracing_trace_options_write
,
4508 static const char readme_msg
[] =
4509 "tracing mini-HOWTO:\n\n"
4510 "# echo 0 > tracing_on : quick way to disable tracing\n"
4511 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4512 " Important files:\n"
4513 " trace\t\t\t- The static contents of the buffer\n"
4514 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4515 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4516 " current_tracer\t- function and latency tracers\n"
4517 " available_tracers\t- list of configured tracers for current_tracer\n"
4518 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4519 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4520 " trace_clock\t\t-change the clock used to order events\n"
4521 " local: Per cpu clock but may not be synced across CPUs\n"
4522 " global: Synced across CPUs but slows tracing down.\n"
4523 " counter: Not a clock, but just an increment\n"
4524 " uptime: Jiffy counter from time of boot\n"
4525 " perf: Same clock that perf events use\n"
4526 #ifdef CONFIG_X86_64
4527 " x86-tsc: TSC cycle counter\n"
4529 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4530 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4531 " tracing_cpumask\t- Limit which CPUs to trace\n"
4532 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4533 "\t\t\t Remove sub-buffer with rmdir\n"
4534 " trace_options\t\t- Set format or modify how tracing happens\n"
4535 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4536 "\t\t\t option name\n"
4537 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4538 #ifdef CONFIG_DYNAMIC_FTRACE
4539 "\n available_filter_functions - list of functions that can be filtered on\n"
4540 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4541 "\t\t\t functions\n"
4542 "\t accepts: func_full_name or glob-matching-pattern\n"
4543 "\t modules: Can select a group via module\n"
4544 "\t Format: :mod:<module-name>\n"
4545 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4546 "\t triggers: a command to perform when function is hit\n"
4547 "\t Format: <function>:<trigger>[:count]\n"
4548 "\t trigger: traceon, traceoff\n"
4549 "\t\t enable_event:<system>:<event>\n"
4550 "\t\t disable_event:<system>:<event>\n"
4551 #ifdef CONFIG_STACKTRACE
4554 #ifdef CONFIG_TRACER_SNAPSHOT
4559 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4560 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4561 "\t The first one will disable tracing every time do_fault is hit\n"
4562 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4563 "\t The first time do trap is hit and it disables tracing, the\n"
4564 "\t counter will decrement to 2. If tracing is already disabled,\n"
4565 "\t the counter will not decrement. It only decrements when the\n"
4566 "\t trigger did work\n"
4567 "\t To remove trigger without count:\n"
4568 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4569 "\t To remove trigger with a count:\n"
4570 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4571 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4572 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4573 "\t modules: Can select a group via module command :mod:\n"
4574 "\t Does not accept triggers\n"
4575 #endif /* CONFIG_DYNAMIC_FTRACE */
4576 #ifdef CONFIG_FUNCTION_TRACER
4577 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4580 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4581 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4582 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4583 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4585 #ifdef CONFIG_TRACER_SNAPSHOT
4586 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4587 "\t\t\t snapshot buffer. Read the contents for more\n"
4588 "\t\t\t information\n"
4590 #ifdef CONFIG_STACK_TRACER
4591 " stack_trace\t\t- Shows the max stack trace when active\n"
4592 " stack_max_size\t- Shows current max stack size that was traced\n"
4593 "\t\t\t Write into this file to reset the max size (trigger a\n"
4594 "\t\t\t new trace)\n"
4595 #ifdef CONFIG_DYNAMIC_FTRACE
4596 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4599 #endif /* CONFIG_STACK_TRACER */
4600 #ifdef CONFIG_KPROBE_EVENTS
4601 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4602 "\t\t\t Write into this file to define/undefine new trace events.\n"
4604 #ifdef CONFIG_UPROBE_EVENTS
4605 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4606 "\t\t\t Write into this file to define/undefine new trace events.\n"
4608 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4609 "\t accepts: event-definitions (one definition per line)\n"
4610 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4611 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4612 "\t -:[<group>/]<event>\n"
4613 #ifdef CONFIG_KPROBE_EVENTS
4614 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4615 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4617 #ifdef CONFIG_UPROBE_EVENTS
4618 "\t place: <path>:<offset>\n"
4620 "\t args: <name>=fetcharg[:type]\n"
4621 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4622 "\t $stack<index>, $stack, $retval, $comm\n"
4623 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4624 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4626 " events/\t\t- Directory containing all trace event subsystems:\n"
4627 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4628 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4629 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4631 " filter\t\t- If set, only events passing filter are traced\n"
4632 " events/<system>/<event>/\t- Directory containing control files for\n"
4634 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4635 " filter\t\t- If set, only events passing filter are traced\n"
4636 " trigger\t\t- If set, a command to perform when event is hit\n"
4637 "\t Format: <trigger>[:count][if <filter>]\n"
4638 "\t trigger: traceon, traceoff\n"
4639 "\t enable_event:<system>:<event>\n"
4640 "\t disable_event:<system>:<event>\n"
4641 #ifdef CONFIG_HIST_TRIGGERS
4642 "\t enable_hist:<system>:<event>\n"
4643 "\t disable_hist:<system>:<event>\n"
4645 #ifdef CONFIG_STACKTRACE
4648 #ifdef CONFIG_TRACER_SNAPSHOT
4651 #ifdef CONFIG_HIST_TRIGGERS
4652 "\t\t hist (see below)\n"
4654 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4655 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4656 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4657 "\t events/block/block_unplug/trigger\n"
4658 "\t The first disables tracing every time block_unplug is hit.\n"
4659 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4660 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4661 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4662 "\t Like function triggers, the counter is only decremented if it\n"
4663 "\t enabled or disabled tracing.\n"
4664 "\t To remove a trigger without a count:\n"
4665 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4666 "\t To remove a trigger with a count:\n"
4667 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4668 "\t Filters can be ignored when removing a trigger.\n"
4669 #ifdef CONFIG_HIST_TRIGGERS
4670 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4671 "\t Format: hist:keys=<field1[,field2,...]>\n"
4672 "\t [:values=<field1[,field2,...]>]\n"
4673 "\t [:sort=<field1[,field2,...]>]\n"
4674 "\t [:size=#entries]\n"
4675 "\t [:pause][:continue][:clear]\n"
4676 "\t [:name=histname1]\n"
4677 "\t [if <filter>]\n\n"
4678 "\t When a matching event is hit, an entry is added to a hash\n"
4679 "\t table using the key(s) and value(s) named, and the value of a\n"
4680 "\t sum called 'hitcount' is incremented. Keys and values\n"
4681 "\t correspond to fields in the event's format description. Keys\n"
4682 "\t can be any field, or the special string 'stacktrace'.\n"
4683 "\t Compound keys consisting of up to two fields can be specified\n"
4684 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4685 "\t fields. Sort keys consisting of up to two fields can be\n"
4686 "\t specified using the 'sort' keyword. The sort direction can\n"
4687 "\t be modified by appending '.descending' or '.ascending' to a\n"
4688 "\t sort field. The 'size' parameter can be used to specify more\n"
4689 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4690 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4691 "\t its histogram data will be shared with other triggers of the\n"
4692 "\t same name, and trigger hits will update this common data.\n\n"
4693 "\t Reading the 'hist' file for the event will dump the hash\n"
4694 "\t table in its entirety to stdout. If there are multiple hist\n"
4695 "\t triggers attached to an event, there will be a table for each\n"
4696 "\t trigger in the output. The table displayed for a named\n"
4697 "\t trigger will be the same as any other instance having the\n"
4698 "\t same name. The default format used to display a given field\n"
4699 "\t can be modified by appending any of the following modifiers\n"
4700 "\t to the field name, as applicable:\n\n"
4701 "\t .hex display a number as a hex value\n"
4702 "\t .sym display an address as a symbol\n"
4703 "\t .sym-offset display an address as a symbol and offset\n"
4704 "\t .execname display a common_pid as a program name\n"
4705 "\t .syscall display a syscall id as a syscall name\n\n"
4706 "\t .log2 display log2 value rather than raw number\n\n"
4707 "\t The 'pause' parameter can be used to pause an existing hist\n"
4708 "\t trigger or to start a hist trigger but not log any events\n"
4709 "\t until told to do so. 'continue' can be used to start or\n"
4710 "\t restart a paused hist trigger.\n\n"
4711 "\t The 'clear' parameter will clear the contents of a running\n"
4712 "\t hist trigger and leave its current paused/active state\n"
4714 "\t The enable_hist and disable_hist triggers can be used to\n"
4715 "\t have one event conditionally start and stop another event's\n"
4716 "\t already-attached hist trigger. The syntax is analagous to\n"
4717 "\t the enable_event and disable_event triggers.\n"
4722 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
4723 size_t cnt
, loff_t
*ppos
)
4725 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
4726 readme_msg
, strlen(readme_msg
));
4729 static const struct file_operations tracing_readme_fops
= {
4730 .open
= tracing_open_generic
,
4731 .read
= tracing_readme_read
,
4732 .llseek
= generic_file_llseek
,
4735 static void *saved_tgids_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4739 if (*pos
|| m
->count
)
4744 for (; ptr
<= &tgid_map
[PID_MAX_DEFAULT
]; ptr
++) {
4745 if (trace_find_tgid(*ptr
))
4752 static void *saved_tgids_start(struct seq_file
*m
, loff_t
*pos
)
4762 v
= saved_tgids_next(m
, v
, &l
);
4770 static void saved_tgids_stop(struct seq_file
*m
, void *v
)
4774 static int saved_tgids_show(struct seq_file
*m
, void *v
)
4776 int pid
= (int *)v
- tgid_map
;
4778 seq_printf(m
, "%d %d\n", pid
, trace_find_tgid(pid
));
4782 static const struct seq_operations tracing_saved_tgids_seq_ops
= {
4783 .start
= saved_tgids_start
,
4784 .stop
= saved_tgids_stop
,
4785 .next
= saved_tgids_next
,
4786 .show
= saved_tgids_show
,
4789 static int tracing_saved_tgids_open(struct inode
*inode
, struct file
*filp
)
4791 if (tracing_disabled
)
4794 return seq_open(filp
, &tracing_saved_tgids_seq_ops
);
4798 static const struct file_operations tracing_saved_tgids_fops
= {
4799 .open
= tracing_saved_tgids_open
,
4801 .llseek
= seq_lseek
,
4802 .release
= seq_release
,
4805 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4807 unsigned int *ptr
= v
;
4809 if (*pos
|| m
->count
)
4814 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
4816 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
4825 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
4831 arch_spin_lock(&trace_cmdline_lock
);
4833 v
= &savedcmd
->map_cmdline_to_pid
[0];
4835 v
= saved_cmdlines_next(m
, v
, &l
);
4843 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
4845 arch_spin_unlock(&trace_cmdline_lock
);
4849 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
4851 char buf
[TASK_COMM_LEN
];
4852 unsigned int *pid
= v
;
4854 __trace_find_cmdline(*pid
, buf
);
4855 seq_printf(m
, "%d %s\n", *pid
, buf
);
4859 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
4860 .start
= saved_cmdlines_start
,
4861 .next
= saved_cmdlines_next
,
4862 .stop
= saved_cmdlines_stop
,
4863 .show
= saved_cmdlines_show
,
4866 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
4868 if (tracing_disabled
)
4871 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
4874 static const struct file_operations tracing_saved_cmdlines_fops
= {
4875 .open
= tracing_saved_cmdlines_open
,
4877 .llseek
= seq_lseek
,
4878 .release
= seq_release
,
4882 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
4883 size_t cnt
, loff_t
*ppos
)
4888 arch_spin_lock(&trace_cmdline_lock
);
4889 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
4890 arch_spin_unlock(&trace_cmdline_lock
);
4892 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4895 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
4897 kfree(s
->saved_cmdlines
);
4898 kfree(s
->map_cmdline_to_pid
);
4902 static int tracing_resize_saved_cmdlines(unsigned int val
)
4904 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
4906 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
4910 if (allocate_cmdlines_buffer(val
, s
) < 0) {
4915 arch_spin_lock(&trace_cmdline_lock
);
4916 savedcmd_temp
= savedcmd
;
4918 arch_spin_unlock(&trace_cmdline_lock
);
4919 free_saved_cmdlines_buffer(savedcmd_temp
);
4925 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
4926 size_t cnt
, loff_t
*ppos
)
4931 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4935 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4936 if (!val
|| val
> PID_MAX_DEFAULT
)
4939 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
4948 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
4949 .open
= tracing_open_generic
,
4950 .read
= tracing_saved_cmdlines_size_read
,
4951 .write
= tracing_saved_cmdlines_size_write
,
4954 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4955 static union trace_eval_map_item
*
4956 update_eval_map(union trace_eval_map_item
*ptr
)
4958 if (!ptr
->map
.eval_string
) {
4959 if (ptr
->tail
.next
) {
4960 ptr
= ptr
->tail
.next
;
4961 /* Set ptr to the next real item (skip head) */
4969 static void *eval_map_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4971 union trace_eval_map_item
*ptr
= v
;
4974 * Paranoid! If ptr points to end, we don't want to increment past it.
4975 * This really should never happen.
4977 ptr
= update_eval_map(ptr
);
4978 if (WARN_ON_ONCE(!ptr
))
4985 ptr
= update_eval_map(ptr
);
4990 static void *eval_map_start(struct seq_file
*m
, loff_t
*pos
)
4992 union trace_eval_map_item
*v
;
4995 mutex_lock(&trace_eval_mutex
);
4997 v
= trace_eval_maps
;
5001 while (v
&& l
< *pos
) {
5002 v
= eval_map_next(m
, v
, &l
);
5008 static void eval_map_stop(struct seq_file
*m
, void *v
)
5010 mutex_unlock(&trace_eval_mutex
);
5013 static int eval_map_show(struct seq_file
*m
, void *v
)
5015 union trace_eval_map_item
*ptr
= v
;
5017 seq_printf(m
, "%s %ld (%s)\n",
5018 ptr
->map
.eval_string
, ptr
->map
.eval_value
,
5024 static const struct seq_operations tracing_eval_map_seq_ops
= {
5025 .start
= eval_map_start
,
5026 .next
= eval_map_next
,
5027 .stop
= eval_map_stop
,
5028 .show
= eval_map_show
,
5031 static int tracing_eval_map_open(struct inode
*inode
, struct file
*filp
)
5033 if (tracing_disabled
)
5036 return seq_open(filp
, &tracing_eval_map_seq_ops
);
5039 static const struct file_operations tracing_eval_map_fops
= {
5040 .open
= tracing_eval_map_open
,
5042 .llseek
= seq_lseek
,
5043 .release
= seq_release
,
5046 static inline union trace_eval_map_item
*
5047 trace_eval_jmp_to_tail(union trace_eval_map_item
*ptr
)
5049 /* Return tail of array given the head */
5050 return ptr
+ ptr
->head
.length
+ 1;
5054 trace_insert_eval_map_file(struct module
*mod
, struct trace_eval_map
**start
,
5057 struct trace_eval_map
**stop
;
5058 struct trace_eval_map
**map
;
5059 union trace_eval_map_item
*map_array
;
5060 union trace_eval_map_item
*ptr
;
5065 * The trace_eval_maps contains the map plus a head and tail item,
5066 * where the head holds the module and length of array, and the
5067 * tail holds a pointer to the next list.
5069 map_array
= kmalloc(sizeof(*map_array
) * (len
+ 2), GFP_KERNEL
);
5071 pr_warn("Unable to allocate trace eval mapping\n");
5075 mutex_lock(&trace_eval_mutex
);
5077 if (!trace_eval_maps
)
5078 trace_eval_maps
= map_array
;
5080 ptr
= trace_eval_maps
;
5082 ptr
= trace_eval_jmp_to_tail(ptr
);
5083 if (!ptr
->tail
.next
)
5085 ptr
= ptr
->tail
.next
;
5088 ptr
->tail
.next
= map_array
;
5090 map_array
->head
.mod
= mod
;
5091 map_array
->head
.length
= len
;
5094 for (map
= start
; (unsigned long)map
< (unsigned long)stop
; map
++) {
5095 map_array
->map
= **map
;
5098 memset(map_array
, 0, sizeof(*map_array
));
5100 mutex_unlock(&trace_eval_mutex
);
5103 static void trace_create_eval_file(struct dentry
*d_tracer
)
5105 trace_create_file("eval_map", 0444, d_tracer
,
5106 NULL
, &tracing_eval_map_fops
);
5109 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5110 static inline void trace_create_eval_file(struct dentry
*d_tracer
) { }
5111 static inline void trace_insert_eval_map_file(struct module
*mod
,
5112 struct trace_eval_map
**start
, int len
) { }
5113 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5115 static void trace_insert_eval_map(struct module
*mod
,
5116 struct trace_eval_map
**start
, int len
)
5118 struct trace_eval_map
**map
;
5125 trace_event_eval_update(map
, len
);
5127 trace_insert_eval_map_file(mod
, start
, len
);
5131 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
5132 size_t cnt
, loff_t
*ppos
)
5134 struct trace_array
*tr
= filp
->private_data
;
5135 char buf
[MAX_TRACER_SIZE
+2];
5138 mutex_lock(&trace_types_lock
);
5139 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
5140 mutex_unlock(&trace_types_lock
);
5142 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5145 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
5147 tracing_reset_online_cpus(&tr
->trace_buffer
);
5151 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
5155 for_each_tracing_cpu(cpu
)
5156 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
5159 #ifdef CONFIG_TRACER_MAX_TRACE
5160 /* resize @tr's buffer to the size of @size_tr's entries */
5161 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
5162 struct trace_buffer
*size_buf
, int cpu_id
)
5166 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
5167 for_each_tracing_cpu(cpu
) {
5168 ret
= ring_buffer_resize(trace_buf
->buffer
,
5169 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
5172 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
5173 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
5176 ret
= ring_buffer_resize(trace_buf
->buffer
,
5177 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
5179 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
5180 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
5185 #endif /* CONFIG_TRACER_MAX_TRACE */
5187 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
5188 unsigned long size
, int cpu
)
5193 * If kernel or user changes the size of the ring buffer
5194 * we use the size that was given, and we can forget about
5195 * expanding it later.
5197 ring_buffer_expanded
= true;
5199 /* May be called before buffers are initialized */
5200 if (!tr
->trace_buffer
.buffer
)
5203 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
5207 #ifdef CONFIG_TRACER_MAX_TRACE
5208 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
5209 !tr
->current_trace
->use_max_tr
)
5212 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
5214 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
5215 &tr
->trace_buffer
, cpu
);
5218 * AARGH! We are left with different
5219 * size max buffer!!!!
5220 * The max buffer is our "snapshot" buffer.
5221 * When a tracer needs a snapshot (one of the
5222 * latency tracers), it swaps the max buffer
5223 * with the saved snap shot. We succeeded to
5224 * update the size of the main buffer, but failed to
5225 * update the size of the max buffer. But when we tried
5226 * to reset the main buffer to the original size, we
5227 * failed there too. This is very unlikely to
5228 * happen, but if it does, warn and kill all
5232 tracing_disabled
= 1;
5237 if (cpu
== RING_BUFFER_ALL_CPUS
)
5238 set_buffer_entries(&tr
->max_buffer
, size
);
5240 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
5243 #endif /* CONFIG_TRACER_MAX_TRACE */
5245 if (cpu
== RING_BUFFER_ALL_CPUS
)
5246 set_buffer_entries(&tr
->trace_buffer
, size
);
5248 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
5253 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
5254 unsigned long size
, int cpu_id
)
5258 mutex_lock(&trace_types_lock
);
5260 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
5261 /* make sure, this cpu is enabled in the mask */
5262 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
5268 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
5273 mutex_unlock(&trace_types_lock
);
5280 * tracing_update_buffers - used by tracing facility to expand ring buffers
5282 * To save on memory when the tracing is never used on a system with it
5283 * configured in. The ring buffers are set to a minimum size. But once
5284 * a user starts to use the tracing facility, then they need to grow
5285 * to their default size.
5287 * This function is to be called when a tracer is about to be used.
5289 int tracing_update_buffers(void)
5293 mutex_lock(&trace_types_lock
);
5294 if (!ring_buffer_expanded
)
5295 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
5296 RING_BUFFER_ALL_CPUS
);
5297 mutex_unlock(&trace_types_lock
);
5302 struct trace_option_dentry
;
5305 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
5308 * Used to clear out the tracer before deletion of an instance.
5309 * Must have trace_types_lock held.
5311 static void tracing_set_nop(struct trace_array
*tr
)
5313 if (tr
->current_trace
== &nop_trace
)
5316 tr
->current_trace
->enabled
--;
5318 if (tr
->current_trace
->reset
)
5319 tr
->current_trace
->reset(tr
);
5321 tr
->current_trace
= &nop_trace
;
5324 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
)
5326 /* Only enable if the directory has been created already. */
5330 create_trace_option_files(tr
, t
);
5333 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
5336 #ifdef CONFIG_TRACER_MAX_TRACE
5341 mutex_lock(&trace_types_lock
);
5343 if (!ring_buffer_expanded
) {
5344 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
5345 RING_BUFFER_ALL_CPUS
);
5351 for (t
= trace_types
; t
; t
= t
->next
) {
5352 if (strcmp(t
->name
, buf
) == 0)
5359 if (t
== tr
->current_trace
)
5362 /* Some tracers won't work on kernel command line */
5363 if (system_state
< SYSTEM_RUNNING
&& t
->noboot
) {
5364 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5369 /* Some tracers are only allowed for the top level buffer */
5370 if (!trace_ok_for_array(t
, tr
)) {
5375 /* If trace pipe files are being read, we can't change the tracer */
5376 if (tr
->current_trace
->ref
) {
5381 trace_branch_disable();
5383 tr
->current_trace
->enabled
--;
5385 if (tr
->current_trace
->reset
)
5386 tr
->current_trace
->reset(tr
);
5388 /* Current trace needs to be nop_trace before synchronize_sched */
5389 tr
->current_trace
= &nop_trace
;
5391 #ifdef CONFIG_TRACER_MAX_TRACE
5392 had_max_tr
= tr
->allocated_snapshot
;
5394 if (had_max_tr
&& !t
->use_max_tr
) {
5396 * We need to make sure that the update_max_tr sees that
5397 * current_trace changed to nop_trace to keep it from
5398 * swapping the buffers after we resize it.
5399 * The update_max_tr is called from interrupts disabled
5400 * so a synchronized_sched() is sufficient.
5402 synchronize_sched();
5407 #ifdef CONFIG_TRACER_MAX_TRACE
5408 if (t
->use_max_tr
&& !had_max_tr
) {
5409 ret
= alloc_snapshot(tr
);
5416 ret
= tracer_init(t
, tr
);
5421 tr
->current_trace
= t
;
5422 tr
->current_trace
->enabled
++;
5423 trace_branch_enable(tr
);
5425 mutex_unlock(&trace_types_lock
);
5431 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
5432 size_t cnt
, loff_t
*ppos
)
5434 struct trace_array
*tr
= filp
->private_data
;
5435 char buf
[MAX_TRACER_SIZE
+1];
5442 if (cnt
> MAX_TRACER_SIZE
)
5443 cnt
= MAX_TRACER_SIZE
;
5445 if (copy_from_user(buf
, ubuf
, cnt
))
5450 /* strip ending whitespace. */
5451 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
5454 err
= tracing_set_tracer(tr
, buf
);
5464 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
5465 size_t cnt
, loff_t
*ppos
)
5470 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
5471 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
5472 if (r
> sizeof(buf
))
5474 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5478 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
5479 size_t cnt
, loff_t
*ppos
)
5484 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5494 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
5495 size_t cnt
, loff_t
*ppos
)
5497 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
5501 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
5502 size_t cnt
, loff_t
*ppos
)
5504 struct trace_array
*tr
= filp
->private_data
;
5507 mutex_lock(&trace_types_lock
);
5508 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
5512 if (tr
->current_trace
->update_thresh
) {
5513 ret
= tr
->current_trace
->update_thresh(tr
);
5520 mutex_unlock(&trace_types_lock
);
5525 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5528 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
5529 size_t cnt
, loff_t
*ppos
)
5531 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
5535 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
5536 size_t cnt
, loff_t
*ppos
)
5538 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
5543 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
5545 struct trace_array
*tr
= inode
->i_private
;
5546 struct trace_iterator
*iter
;
5549 if (tracing_disabled
)
5552 if (trace_array_get(tr
) < 0)
5555 mutex_lock(&trace_types_lock
);
5557 /* create a buffer to store the information to pass to userspace */
5558 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5561 __trace_array_put(tr
);
5565 trace_seq_init(&iter
->seq
);
5566 iter
->trace
= tr
->current_trace
;
5568 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
5573 /* trace pipe does not show start of buffer */
5574 cpumask_setall(iter
->started
);
5576 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
5577 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
5579 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5580 if (trace_clocks
[tr
->clock_id
].in_ns
)
5581 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
5584 iter
->trace_buffer
= &tr
->trace_buffer
;
5585 iter
->cpu_file
= tracing_get_cpu(inode
);
5586 mutex_init(&iter
->mutex
);
5587 filp
->private_data
= iter
;
5589 if (iter
->trace
->pipe_open
)
5590 iter
->trace
->pipe_open(iter
);
5592 nonseekable_open(inode
, filp
);
5594 tr
->current_trace
->ref
++;
5596 mutex_unlock(&trace_types_lock
);
5602 __trace_array_put(tr
);
5603 mutex_unlock(&trace_types_lock
);
5607 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
5609 struct trace_iterator
*iter
= file
->private_data
;
5610 struct trace_array
*tr
= inode
->i_private
;
5612 mutex_lock(&trace_types_lock
);
5614 tr
->current_trace
->ref
--;
5616 if (iter
->trace
->pipe_close
)
5617 iter
->trace
->pipe_close(iter
);
5619 mutex_unlock(&trace_types_lock
);
5621 free_cpumask_var(iter
->started
);
5622 mutex_destroy(&iter
->mutex
);
5625 trace_array_put(tr
);
5631 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
5633 struct trace_array
*tr
= iter
->tr
;
5635 /* Iterators are static, they should be filled or empty */
5636 if (trace_buffer_iter(iter
, iter
->cpu_file
))
5637 return POLLIN
| POLLRDNORM
;
5639 if (tr
->trace_flags
& TRACE_ITER_BLOCK
)
5641 * Always select as readable when in blocking mode
5643 return POLLIN
| POLLRDNORM
;
5645 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
5650 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
5652 struct trace_iterator
*iter
= filp
->private_data
;
5654 return trace_poll(iter
, filp
, poll_table
);
5657 /* Must be called with iter->mutex held. */
5658 static int tracing_wait_pipe(struct file
*filp
)
5660 struct trace_iterator
*iter
= filp
->private_data
;
5663 while (trace_empty(iter
)) {
5665 if ((filp
->f_flags
& O_NONBLOCK
)) {
5670 * We block until we read something and tracing is disabled.
5671 * We still block if tracing is disabled, but we have never
5672 * read anything. This allows a user to cat this file, and
5673 * then enable tracing. But after we have read something,
5674 * we give an EOF when tracing is again disabled.
5676 * iter->pos will be 0 if we haven't read anything.
5678 if (!tracer_tracing_is_on(iter
->tr
) && iter
->pos
)
5681 mutex_unlock(&iter
->mutex
);
5683 ret
= wait_on_pipe(iter
, false);
5685 mutex_lock(&iter
->mutex
);
5698 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
5699 size_t cnt
, loff_t
*ppos
)
5701 struct trace_iterator
*iter
= filp
->private_data
;
5705 * Avoid more than one consumer on a single file descriptor
5706 * This is just a matter of traces coherency, the ring buffer itself
5709 mutex_lock(&iter
->mutex
);
5711 /* return any leftover data */
5712 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5716 trace_seq_init(&iter
->seq
);
5718 if (iter
->trace
->read
) {
5719 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
5725 sret
= tracing_wait_pipe(filp
);
5729 /* stop when tracing is finished */
5730 if (trace_empty(iter
)) {
5735 if (cnt
>= PAGE_SIZE
)
5736 cnt
= PAGE_SIZE
- 1;
5738 /* reset all but tr, trace, and overruns */
5739 memset(&iter
->seq
, 0,
5740 sizeof(struct trace_iterator
) -
5741 offsetof(struct trace_iterator
, seq
));
5742 cpumask_clear(iter
->started
);
5745 trace_event_read_lock();
5746 trace_access_lock(iter
->cpu_file
);
5747 while (trace_find_next_entry_inc(iter
) != NULL
) {
5748 enum print_line_t ret
;
5749 int save_len
= iter
->seq
.seq
.len
;
5751 ret
= print_trace_line(iter
);
5752 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5753 /* don't print partial lines */
5754 iter
->seq
.seq
.len
= save_len
;
5757 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5758 trace_consume(iter
);
5760 if (trace_seq_used(&iter
->seq
) >= cnt
)
5764 * Setting the full flag means we reached the trace_seq buffer
5765 * size and we should leave by partial output condition above.
5766 * One of the trace_seq_* functions is not used properly.
5768 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
5771 trace_access_unlock(iter
->cpu_file
);
5772 trace_event_read_unlock();
5774 /* Now copy what we have to the user */
5775 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5776 if (iter
->seq
.seq
.readpos
>= trace_seq_used(&iter
->seq
))
5777 trace_seq_init(&iter
->seq
);
5780 * If there was nothing to send to user, in spite of consuming trace
5781 * entries, go back to wait for more entries.
5787 mutex_unlock(&iter
->mutex
);
5792 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
5795 __free_page(spd
->pages
[idx
]);
5798 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
5800 .confirm
= generic_pipe_buf_confirm
,
5801 .release
= generic_pipe_buf_release
,
5802 .steal
= generic_pipe_buf_steal
,
5803 .get
= generic_pipe_buf_get
,
5807 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
5813 /* Seq buffer is page-sized, exactly what we need. */
5815 save_len
= iter
->seq
.seq
.len
;
5816 ret
= print_trace_line(iter
);
5818 if (trace_seq_has_overflowed(&iter
->seq
)) {
5819 iter
->seq
.seq
.len
= save_len
;
5824 * This should not be hit, because it should only
5825 * be set if the iter->seq overflowed. But check it
5826 * anyway to be safe.
5828 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5829 iter
->seq
.seq
.len
= save_len
;
5833 count
= trace_seq_used(&iter
->seq
) - save_len
;
5836 iter
->seq
.seq
.len
= save_len
;
5840 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5841 trace_consume(iter
);
5843 if (!trace_find_next_entry_inc(iter
)) {
5853 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
5855 struct pipe_inode_info
*pipe
,
5859 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5860 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5861 struct trace_iterator
*iter
= filp
->private_data
;
5862 struct splice_pipe_desc spd
= {
5864 .partial
= partial_def
,
5865 .nr_pages
= 0, /* This gets updated below. */
5866 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5867 .ops
= &tracing_pipe_buf_ops
,
5868 .spd_release
= tracing_spd_release_pipe
,
5874 if (splice_grow_spd(pipe
, &spd
))
5877 mutex_lock(&iter
->mutex
);
5879 if (iter
->trace
->splice_read
) {
5880 ret
= iter
->trace
->splice_read(iter
, filp
,
5881 ppos
, pipe
, len
, flags
);
5886 ret
= tracing_wait_pipe(filp
);
5890 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
5895 trace_event_read_lock();
5896 trace_access_lock(iter
->cpu_file
);
5898 /* Fill as many pages as possible. */
5899 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
5900 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
5904 rem
= tracing_fill_pipe_page(rem
, iter
);
5906 /* Copy the data into the page, so we can start over. */
5907 ret
= trace_seq_to_buffer(&iter
->seq
,
5908 page_address(spd
.pages
[i
]),
5909 trace_seq_used(&iter
->seq
));
5911 __free_page(spd
.pages
[i
]);
5914 spd
.partial
[i
].offset
= 0;
5915 spd
.partial
[i
].len
= trace_seq_used(&iter
->seq
);
5917 trace_seq_init(&iter
->seq
);
5920 trace_access_unlock(iter
->cpu_file
);
5921 trace_event_read_unlock();
5922 mutex_unlock(&iter
->mutex
);
5927 ret
= splice_to_pipe(pipe
, &spd
);
5931 splice_shrink_spd(&spd
);
5935 mutex_unlock(&iter
->mutex
);
5940 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
5941 size_t cnt
, loff_t
*ppos
)
5943 struct inode
*inode
= file_inode(filp
);
5944 struct trace_array
*tr
= inode
->i_private
;
5945 int cpu
= tracing_get_cpu(inode
);
5950 mutex_lock(&trace_types_lock
);
5952 if (cpu
== RING_BUFFER_ALL_CPUS
) {
5953 int cpu
, buf_size_same
;
5958 /* check if all cpu sizes are same */
5959 for_each_tracing_cpu(cpu
) {
5960 /* fill in the size from first enabled cpu */
5962 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
5963 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
5969 if (buf_size_same
) {
5970 if (!ring_buffer_expanded
)
5971 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
5973 trace_buf_size
>> 10);
5975 r
= sprintf(buf
, "%lu\n", size
>> 10);
5977 r
= sprintf(buf
, "X\n");
5979 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
5981 mutex_unlock(&trace_types_lock
);
5983 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5988 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
5989 size_t cnt
, loff_t
*ppos
)
5991 struct inode
*inode
= file_inode(filp
);
5992 struct trace_array
*tr
= inode
->i_private
;
5996 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6000 /* must have at least 1 entry */
6004 /* value is in KB */
6006 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
6016 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
6017 size_t cnt
, loff_t
*ppos
)
6019 struct trace_array
*tr
= filp
->private_data
;
6022 unsigned long size
= 0, expanded_size
= 0;
6024 mutex_lock(&trace_types_lock
);
6025 for_each_tracing_cpu(cpu
) {
6026 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
6027 if (!ring_buffer_expanded
)
6028 expanded_size
+= trace_buf_size
>> 10;
6030 if (ring_buffer_expanded
)
6031 r
= sprintf(buf
, "%lu\n", size
);
6033 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
6034 mutex_unlock(&trace_types_lock
);
6036 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6040 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
6041 size_t cnt
, loff_t
*ppos
)
6044 * There is no need to read what the user has written, this function
6045 * is just to make sure that there is no error when "echo" is used
6054 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
6056 struct trace_array
*tr
= inode
->i_private
;
6058 /* disable tracing ? */
6059 if (tr
->trace_flags
& TRACE_ITER_STOP_ON_FREE
)
6060 tracer_tracing_off(tr
);
6061 /* resize the ring buffer to 0 */
6062 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
6064 trace_array_put(tr
);
6070 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
6071 size_t cnt
, loff_t
*fpos
)
6073 struct trace_array
*tr
= filp
->private_data
;
6074 struct ring_buffer_event
*event
;
6075 struct ring_buffer
*buffer
;
6076 struct print_entry
*entry
;
6077 unsigned long irq_flags
;
6078 const char faulted
[] = "<faulted>";
6083 /* Used in tracing_mark_raw_write() as well */
6084 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6086 if (tracing_disabled
)
6089 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6092 if (cnt
> TRACE_BUF_SIZE
)
6093 cnt
= TRACE_BUF_SIZE
;
6095 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6097 local_save_flags(irq_flags
);
6098 size
= sizeof(*entry
) + cnt
+ 2; /* add '\0' and possible '\n' */
6100 /* If less than "<faulted>", then make sure we can still add that */
6101 if (cnt
< FAULTED_SIZE
)
6102 size
+= FAULTED_SIZE
- cnt
;
6104 buffer
= tr
->trace_buffer
.buffer
;
6105 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
6106 irq_flags
, preempt_count());
6107 if (unlikely(!event
))
6108 /* Ring buffer disabled, return as if not open for write */
6111 entry
= ring_buffer_event_data(event
);
6112 entry
->ip
= _THIS_IP_
;
6114 len
= __copy_from_user_inatomic(&entry
->buf
, ubuf
, cnt
);
6116 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6123 if (entry
->buf
[cnt
- 1] != '\n') {
6124 entry
->buf
[cnt
] = '\n';
6125 entry
->buf
[cnt
+ 1] = '\0';
6127 entry
->buf
[cnt
] = '\0';
6129 __buffer_unlock_commit(buffer
, event
);
6137 /* Limit it for now to 3K (including tag) */
6138 #define RAW_DATA_MAX_SIZE (1024*3)
6141 tracing_mark_raw_write(struct file
*filp
, const char __user
*ubuf
,
6142 size_t cnt
, loff_t
*fpos
)
6144 struct trace_array
*tr
= filp
->private_data
;
6145 struct ring_buffer_event
*event
;
6146 struct ring_buffer
*buffer
;
6147 struct raw_data_entry
*entry
;
6148 const char faulted
[] = "<faulted>";
6149 unsigned long irq_flags
;
6154 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6156 if (tracing_disabled
)
6159 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6162 /* The marker must at least have a tag id */
6163 if (cnt
< sizeof(unsigned int) || cnt
> RAW_DATA_MAX_SIZE
)
6166 if (cnt
> TRACE_BUF_SIZE
)
6167 cnt
= TRACE_BUF_SIZE
;
6169 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6171 local_save_flags(irq_flags
);
6172 size
= sizeof(*entry
) + cnt
;
6173 if (cnt
< FAULT_SIZE_ID
)
6174 size
+= FAULT_SIZE_ID
- cnt
;
6176 buffer
= tr
->trace_buffer
.buffer
;
6177 event
= __trace_buffer_lock_reserve(buffer
, TRACE_RAW_DATA
, size
,
6178 irq_flags
, preempt_count());
6180 /* Ring buffer disabled, return as if not open for write */
6183 entry
= ring_buffer_event_data(event
);
6185 len
= __copy_from_user_inatomic(&entry
->id
, ubuf
, cnt
);
6188 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6193 __buffer_unlock_commit(buffer
, event
);
6201 static int tracing_clock_show(struct seq_file
*m
, void *v
)
6203 struct trace_array
*tr
= m
->private;
6206 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
6208 "%s%s%s%s", i
? " " : "",
6209 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
6210 i
== tr
->clock_id
? "]" : "");
6216 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
6220 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
6221 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
6224 if (i
== ARRAY_SIZE(trace_clocks
))
6227 mutex_lock(&trace_types_lock
);
6231 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
6234 * New clock may not be consistent with the previous clock.
6235 * Reset the buffer so that it doesn't have incomparable timestamps.
6237 tracing_reset_online_cpus(&tr
->trace_buffer
);
6239 #ifdef CONFIG_TRACER_MAX_TRACE
6240 if (tr
->max_buffer
.buffer
)
6241 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
6242 tracing_reset_online_cpus(&tr
->max_buffer
);
6245 mutex_unlock(&trace_types_lock
);
6250 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
6251 size_t cnt
, loff_t
*fpos
)
6253 struct seq_file
*m
= filp
->private_data
;
6254 struct trace_array
*tr
= m
->private;
6256 const char *clockstr
;
6259 if (cnt
>= sizeof(buf
))
6262 if (copy_from_user(buf
, ubuf
, cnt
))
6267 clockstr
= strstrip(buf
);
6269 ret
= tracing_set_clock(tr
, clockstr
);
6278 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
6280 struct trace_array
*tr
= inode
->i_private
;
6283 if (tracing_disabled
)
6286 if (trace_array_get(tr
))
6289 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
6291 trace_array_put(tr
);
6296 struct ftrace_buffer_info
{
6297 struct trace_iterator iter
;
6299 unsigned int spare_cpu
;
6303 #ifdef CONFIG_TRACER_SNAPSHOT
6304 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
6306 struct trace_array
*tr
= inode
->i_private
;
6307 struct trace_iterator
*iter
;
6311 if (trace_array_get(tr
) < 0)
6314 if (file
->f_mode
& FMODE_READ
) {
6315 iter
= __tracing_open(inode
, file
, true);
6317 ret
= PTR_ERR(iter
);
6319 /* Writes still need the seq_file to hold the private data */
6321 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
6324 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
6332 iter
->trace_buffer
= &tr
->max_buffer
;
6333 iter
->cpu_file
= tracing_get_cpu(inode
);
6335 file
->private_data
= m
;
6339 trace_array_put(tr
);
6345 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6348 struct seq_file
*m
= filp
->private_data
;
6349 struct trace_iterator
*iter
= m
->private;
6350 struct trace_array
*tr
= iter
->tr
;
6354 ret
= tracing_update_buffers();
6358 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6362 mutex_lock(&trace_types_lock
);
6364 if (tr
->current_trace
->use_max_tr
) {
6371 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6375 if (tr
->allocated_snapshot
)
6379 /* Only allow per-cpu swap if the ring buffer supports it */
6380 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6381 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6386 if (!tr
->allocated_snapshot
) {
6387 ret
= alloc_snapshot(tr
);
6391 local_irq_disable();
6392 /* Now, we're going to swap */
6393 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6394 update_max_tr(tr
, current
, smp_processor_id());
6396 update_max_tr_single(tr
, current
, iter
->cpu_file
);
6400 if (tr
->allocated_snapshot
) {
6401 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6402 tracing_reset_online_cpus(&tr
->max_buffer
);
6404 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
6414 mutex_unlock(&trace_types_lock
);
6418 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
6420 struct seq_file
*m
= file
->private_data
;
6423 ret
= tracing_release(inode
, file
);
6425 if (file
->f_mode
& FMODE_READ
)
6428 /* If write only, the seq_file is just a stub */
6436 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
6437 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6438 size_t count
, loff_t
*ppos
);
6439 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
6440 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6441 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
6443 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
6445 struct ftrace_buffer_info
*info
;
6448 ret
= tracing_buffers_open(inode
, filp
);
6452 info
= filp
->private_data
;
6454 if (info
->iter
.trace
->use_max_tr
) {
6455 tracing_buffers_release(inode
, filp
);
6459 info
->iter
.snapshot
= true;
6460 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
6465 #endif /* CONFIG_TRACER_SNAPSHOT */
6468 static const struct file_operations tracing_thresh_fops
= {
6469 .open
= tracing_open_generic
,
6470 .read
= tracing_thresh_read
,
6471 .write
= tracing_thresh_write
,
6472 .llseek
= generic_file_llseek
,
6475 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6476 static const struct file_operations tracing_max_lat_fops
= {
6477 .open
= tracing_open_generic
,
6478 .read
= tracing_max_lat_read
,
6479 .write
= tracing_max_lat_write
,
6480 .llseek
= generic_file_llseek
,
6484 static const struct file_operations set_tracer_fops
= {
6485 .open
= tracing_open_generic
,
6486 .read
= tracing_set_trace_read
,
6487 .write
= tracing_set_trace_write
,
6488 .llseek
= generic_file_llseek
,
6491 static const struct file_operations tracing_pipe_fops
= {
6492 .open
= tracing_open_pipe
,
6493 .poll
= tracing_poll_pipe
,
6494 .read
= tracing_read_pipe
,
6495 .splice_read
= tracing_splice_read_pipe
,
6496 .release
= tracing_release_pipe
,
6497 .llseek
= no_llseek
,
6500 static const struct file_operations tracing_entries_fops
= {
6501 .open
= tracing_open_generic_tr
,
6502 .read
= tracing_entries_read
,
6503 .write
= tracing_entries_write
,
6504 .llseek
= generic_file_llseek
,
6505 .release
= tracing_release_generic_tr
,
6508 static const struct file_operations tracing_total_entries_fops
= {
6509 .open
= tracing_open_generic_tr
,
6510 .read
= tracing_total_entries_read
,
6511 .llseek
= generic_file_llseek
,
6512 .release
= tracing_release_generic_tr
,
6515 static const struct file_operations tracing_free_buffer_fops
= {
6516 .open
= tracing_open_generic_tr
,
6517 .write
= tracing_free_buffer_write
,
6518 .release
= tracing_free_buffer_release
,
6521 static const struct file_operations tracing_mark_fops
= {
6522 .open
= tracing_open_generic_tr
,
6523 .write
= tracing_mark_write
,
6524 .llseek
= generic_file_llseek
,
6525 .release
= tracing_release_generic_tr
,
6528 static const struct file_operations tracing_mark_raw_fops
= {
6529 .open
= tracing_open_generic_tr
,
6530 .write
= tracing_mark_raw_write
,
6531 .llseek
= generic_file_llseek
,
6532 .release
= tracing_release_generic_tr
,
6535 static const struct file_operations trace_clock_fops
= {
6536 .open
= tracing_clock_open
,
6538 .llseek
= seq_lseek
,
6539 .release
= tracing_single_release_tr
,
6540 .write
= tracing_clock_write
,
6543 #ifdef CONFIG_TRACER_SNAPSHOT
6544 static const struct file_operations snapshot_fops
= {
6545 .open
= tracing_snapshot_open
,
6547 .write
= tracing_snapshot_write
,
6548 .llseek
= tracing_lseek
,
6549 .release
= tracing_snapshot_release
,
6552 static const struct file_operations snapshot_raw_fops
= {
6553 .open
= snapshot_raw_open
,
6554 .read
= tracing_buffers_read
,
6555 .release
= tracing_buffers_release
,
6556 .splice_read
= tracing_buffers_splice_read
,
6557 .llseek
= no_llseek
,
6560 #endif /* CONFIG_TRACER_SNAPSHOT */
6562 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
6564 struct trace_array
*tr
= inode
->i_private
;
6565 struct ftrace_buffer_info
*info
;
6568 if (tracing_disabled
)
6571 if (trace_array_get(tr
) < 0)
6574 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
6576 trace_array_put(tr
);
6580 mutex_lock(&trace_types_lock
);
6583 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
6584 info
->iter
.trace
= tr
->current_trace
;
6585 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
6587 /* Force reading ring buffer for first read */
6588 info
->read
= (unsigned int)-1;
6590 filp
->private_data
= info
;
6592 tr
->current_trace
->ref
++;
6594 mutex_unlock(&trace_types_lock
);
6596 ret
= nonseekable_open(inode
, filp
);
6598 trace_array_put(tr
);
6604 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
6606 struct ftrace_buffer_info
*info
= filp
->private_data
;
6607 struct trace_iterator
*iter
= &info
->iter
;
6609 return trace_poll(iter
, filp
, poll_table
);
6613 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6614 size_t count
, loff_t
*ppos
)
6616 struct ftrace_buffer_info
*info
= filp
->private_data
;
6617 struct trace_iterator
*iter
= &info
->iter
;
6624 #ifdef CONFIG_TRACER_MAX_TRACE
6625 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6630 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
6632 if (IS_ERR(info
->spare
)) {
6633 ret
= PTR_ERR(info
->spare
);
6636 info
->spare_cpu
= iter
->cpu_file
;
6642 /* Do we have previous read data to read? */
6643 if (info
->read
< PAGE_SIZE
)
6647 trace_access_lock(iter
->cpu_file
);
6648 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
6652 trace_access_unlock(iter
->cpu_file
);
6655 if (trace_empty(iter
)) {
6656 if ((filp
->f_flags
& O_NONBLOCK
))
6659 ret
= wait_on_pipe(iter
, false);
6670 size
= PAGE_SIZE
- info
->read
;
6674 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
6686 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
6688 struct ftrace_buffer_info
*info
= file
->private_data
;
6689 struct trace_iterator
*iter
= &info
->iter
;
6691 mutex_lock(&trace_types_lock
);
6693 iter
->tr
->current_trace
->ref
--;
6695 __trace_array_put(iter
->tr
);
6698 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
,
6699 info
->spare_cpu
, info
->spare
);
6702 mutex_unlock(&trace_types_lock
);
6708 struct ring_buffer
*buffer
;
6714 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
6715 struct pipe_buffer
*buf
)
6717 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6722 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6727 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
6728 struct pipe_buffer
*buf
)
6730 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6735 /* Pipe buffer operations for a buffer. */
6736 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
6738 .confirm
= generic_pipe_buf_confirm
,
6739 .release
= buffer_pipe_buf_release
,
6740 .steal
= generic_pipe_buf_steal
,
6741 .get
= buffer_pipe_buf_get
,
6745 * Callback from splice_to_pipe(), if we need to release some pages
6746 * at the end of the spd in case we error'ed out in filling the pipe.
6748 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
6750 struct buffer_ref
*ref
=
6751 (struct buffer_ref
*)spd
->partial
[i
].private;
6756 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6758 spd
->partial
[i
].private = 0;
6762 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6763 struct pipe_inode_info
*pipe
, size_t len
,
6766 struct ftrace_buffer_info
*info
= file
->private_data
;
6767 struct trace_iterator
*iter
= &info
->iter
;
6768 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
6769 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
6770 struct splice_pipe_desc spd
= {
6772 .partial
= partial_def
,
6773 .nr_pages_max
= PIPE_DEF_BUFFERS
,
6774 .ops
= &buffer_pipe_buf_ops
,
6775 .spd_release
= buffer_spd_release
,
6777 struct buffer_ref
*ref
;
6781 #ifdef CONFIG_TRACER_MAX_TRACE
6782 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6786 if (*ppos
& (PAGE_SIZE
- 1))
6789 if (len
& (PAGE_SIZE
- 1)) {
6790 if (len
< PAGE_SIZE
)
6795 if (splice_grow_spd(pipe
, &spd
))
6799 trace_access_lock(iter
->cpu_file
);
6800 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6802 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
6806 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
6813 ref
->buffer
= iter
->trace_buffer
->buffer
;
6814 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
6815 if (IS_ERR(ref
->page
)) {
6816 ret
= PTR_ERR(ref
->page
);
6821 ref
->cpu
= iter
->cpu_file
;
6823 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
6824 len
, iter
->cpu_file
, 1);
6826 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
,
6832 page
= virt_to_page(ref
->page
);
6834 spd
.pages
[i
] = page
;
6835 spd
.partial
[i
].len
= PAGE_SIZE
;
6836 spd
.partial
[i
].offset
= 0;
6837 spd
.partial
[i
].private = (unsigned long)ref
;
6841 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6844 trace_access_unlock(iter
->cpu_file
);
6847 /* did we read anything? */
6848 if (!spd
.nr_pages
) {
6853 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
))
6856 ret
= wait_on_pipe(iter
, true);
6863 ret
= splice_to_pipe(pipe
, &spd
);
6865 splice_shrink_spd(&spd
);
6870 static const struct file_operations tracing_buffers_fops
= {
6871 .open
= tracing_buffers_open
,
6872 .read
= tracing_buffers_read
,
6873 .poll
= tracing_buffers_poll
,
6874 .release
= tracing_buffers_release
,
6875 .splice_read
= tracing_buffers_splice_read
,
6876 .llseek
= no_llseek
,
6880 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
6881 size_t count
, loff_t
*ppos
)
6883 struct inode
*inode
= file_inode(filp
);
6884 struct trace_array
*tr
= inode
->i_private
;
6885 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
6886 int cpu
= tracing_get_cpu(inode
);
6887 struct trace_seq
*s
;
6889 unsigned long long t
;
6890 unsigned long usec_rem
;
6892 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
6898 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
6899 trace_seq_printf(s
, "entries: %ld\n", cnt
);
6901 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
6902 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
6904 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
6905 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
6907 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
6908 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
6910 if (trace_clocks
[tr
->clock_id
].in_ns
) {
6911 /* local or global for trace_clock */
6912 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6913 usec_rem
= do_div(t
, USEC_PER_SEC
);
6914 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
6917 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6918 usec_rem
= do_div(t
, USEC_PER_SEC
);
6919 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
6921 /* counter or tsc mode for trace_clock */
6922 trace_seq_printf(s
, "oldest event ts: %llu\n",
6923 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6925 trace_seq_printf(s
, "now ts: %llu\n",
6926 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6929 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
6930 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
6932 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
6933 trace_seq_printf(s
, "read events: %ld\n", cnt
);
6935 count
= simple_read_from_buffer(ubuf
, count
, ppos
,
6936 s
->buffer
, trace_seq_used(s
));
6943 static const struct file_operations tracing_stats_fops
= {
6944 .open
= tracing_open_generic_tr
,
6945 .read
= tracing_stats_read
,
6946 .llseek
= generic_file_llseek
,
6947 .release
= tracing_release_generic_tr
,
6950 #ifdef CONFIG_DYNAMIC_FTRACE
6953 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
6954 size_t cnt
, loff_t
*ppos
)
6956 unsigned long *p
= filp
->private_data
;
6957 char buf
[64]; /* Not too big for a shallow stack */
6960 r
= scnprintf(buf
, 63, "%ld", *p
);
6963 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6966 static const struct file_operations tracing_dyn_info_fops
= {
6967 .open
= tracing_open_generic
,
6968 .read
= tracing_read_dyn_info
,
6969 .llseek
= generic_file_llseek
,
6971 #endif /* CONFIG_DYNAMIC_FTRACE */
6973 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6975 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
,
6976 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
6979 tracing_snapshot_instance(tr
);
6983 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
,
6984 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
6987 struct ftrace_func_mapper
*mapper
= data
;
6991 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7001 tracing_snapshot_instance(tr
);
7005 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
7006 struct ftrace_probe_ops
*ops
, void *data
)
7008 struct ftrace_func_mapper
*mapper
= data
;
7011 seq_printf(m
, "%ps:", (void *)ip
);
7013 seq_puts(m
, "snapshot");
7016 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7019 seq_printf(m
, ":count=%ld\n", *count
);
7021 seq_puts(m
, ":unlimited\n");
7027 ftrace_snapshot_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7028 unsigned long ip
, void *init_data
, void **data
)
7030 struct ftrace_func_mapper
*mapper
= *data
;
7033 mapper
= allocate_ftrace_func_mapper();
7039 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
7043 ftrace_snapshot_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7044 unsigned long ip
, void *data
)
7046 struct ftrace_func_mapper
*mapper
= data
;
7051 free_ftrace_func_mapper(mapper
, NULL
);
7055 ftrace_func_mapper_remove_ip(mapper
, ip
);
7058 static struct ftrace_probe_ops snapshot_probe_ops
= {
7059 .func
= ftrace_snapshot
,
7060 .print
= ftrace_snapshot_print
,
7063 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
7064 .func
= ftrace_count_snapshot
,
7065 .print
= ftrace_snapshot_print
,
7066 .init
= ftrace_snapshot_init
,
7067 .free
= ftrace_snapshot_free
,
7071 ftrace_trace_snapshot_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
7072 char *glob
, char *cmd
, char *param
, int enable
)
7074 struct ftrace_probe_ops
*ops
;
7075 void *count
= (void *)-1;
7082 /* hash funcs only work with set_ftrace_filter */
7086 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
7089 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
7094 number
= strsep(¶m
, ":");
7096 if (!strlen(number
))
7100 * We use the callback data field (which is a pointer)
7103 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
7108 ret
= alloc_snapshot(tr
);
7112 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
7115 return ret
< 0 ? ret
: 0;
7118 static struct ftrace_func_command ftrace_snapshot_cmd
= {
7120 .func
= ftrace_trace_snapshot_callback
,
7123 static __init
int register_snapshot_cmd(void)
7125 return register_ftrace_command(&ftrace_snapshot_cmd
);
7128 static inline __init
int register_snapshot_cmd(void) { return 0; }
7129 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7131 static struct dentry
*tracing_get_dentry(struct trace_array
*tr
)
7133 if (WARN_ON(!tr
->dir
))
7134 return ERR_PTR(-ENODEV
);
7136 /* Top directory uses NULL as the parent */
7137 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
7140 /* All sub buffers have a descriptor */
7144 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
7146 struct dentry
*d_tracer
;
7149 return tr
->percpu_dir
;
7151 d_tracer
= tracing_get_dentry(tr
);
7152 if (IS_ERR(d_tracer
))
7155 tr
->percpu_dir
= tracefs_create_dir("per_cpu", d_tracer
);
7157 WARN_ONCE(!tr
->percpu_dir
,
7158 "Could not create tracefs directory 'per_cpu/%d'\n", cpu
);
7160 return tr
->percpu_dir
;
7163 static struct dentry
*
7164 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
7165 void *data
, long cpu
, const struct file_operations
*fops
)
7167 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
7169 if (ret
) /* See tracing_get_cpu() */
7170 d_inode(ret
)->i_cdev
= (void *)(cpu
+ 1);
7175 tracing_init_tracefs_percpu(struct trace_array
*tr
, long cpu
)
7177 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
7178 struct dentry
*d_cpu
;
7179 char cpu_dir
[30]; /* 30 characters should be more than enough */
7184 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
7185 d_cpu
= tracefs_create_dir(cpu_dir
, d_percpu
);
7187 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir
);
7191 /* per cpu trace_pipe */
7192 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
7193 tr
, cpu
, &tracing_pipe_fops
);
7196 trace_create_cpu_file("trace", 0644, d_cpu
,
7197 tr
, cpu
, &tracing_fops
);
7199 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
7200 tr
, cpu
, &tracing_buffers_fops
);
7202 trace_create_cpu_file("stats", 0444, d_cpu
,
7203 tr
, cpu
, &tracing_stats_fops
);
7205 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
7206 tr
, cpu
, &tracing_entries_fops
);
7208 #ifdef CONFIG_TRACER_SNAPSHOT
7209 trace_create_cpu_file("snapshot", 0644, d_cpu
,
7210 tr
, cpu
, &snapshot_fops
);
7212 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
7213 tr
, cpu
, &snapshot_raw_fops
);
7217 #ifdef CONFIG_FTRACE_SELFTEST
7218 /* Let selftest have access to static functions in this file */
7219 #include "trace_selftest.c"
7223 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7226 struct trace_option_dentry
*topt
= filp
->private_data
;
7229 if (topt
->flags
->val
& topt
->opt
->bit
)
7234 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7238 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7241 struct trace_option_dentry
*topt
= filp
->private_data
;
7245 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7249 if (val
!= 0 && val
!= 1)
7252 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
7253 mutex_lock(&trace_types_lock
);
7254 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
7256 mutex_unlock(&trace_types_lock
);
7267 static const struct file_operations trace_options_fops
= {
7268 .open
= tracing_open_generic
,
7269 .read
= trace_options_read
,
7270 .write
= trace_options_write
,
7271 .llseek
= generic_file_llseek
,
7275 * In order to pass in both the trace_array descriptor as well as the index
7276 * to the flag that the trace option file represents, the trace_array
7277 * has a character array of trace_flags_index[], which holds the index
7278 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7279 * The address of this character array is passed to the flag option file
7280 * read/write callbacks.
7282 * In order to extract both the index and the trace_array descriptor,
7283 * get_tr_index() uses the following algorithm.
7287 * As the pointer itself contains the address of the index (remember
7290 * Then to get the trace_array descriptor, by subtracting that index
7291 * from the ptr, we get to the start of the index itself.
7293 * ptr - idx == &index[0]
7295 * Then a simple container_of() from that pointer gets us to the
7296 * trace_array descriptor.
7298 static void get_tr_index(void *data
, struct trace_array
**ptr
,
7299 unsigned int *pindex
)
7301 *pindex
= *(unsigned char *)data
;
7303 *ptr
= container_of(data
- *pindex
, struct trace_array
,
7308 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7311 void *tr_index
= filp
->private_data
;
7312 struct trace_array
*tr
;
7316 get_tr_index(tr_index
, &tr
, &index
);
7318 if (tr
->trace_flags
& (1 << index
))
7323 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7327 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7330 void *tr_index
= filp
->private_data
;
7331 struct trace_array
*tr
;
7336 get_tr_index(tr_index
, &tr
, &index
);
7338 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7342 if (val
!= 0 && val
!= 1)
7345 mutex_lock(&trace_types_lock
);
7346 ret
= set_tracer_flag(tr
, 1 << index
, val
);
7347 mutex_unlock(&trace_types_lock
);
7357 static const struct file_operations trace_options_core_fops
= {
7358 .open
= tracing_open_generic
,
7359 .read
= trace_options_core_read
,
7360 .write
= trace_options_core_write
,
7361 .llseek
= generic_file_llseek
,
7364 struct dentry
*trace_create_file(const char *name
,
7366 struct dentry
*parent
,
7368 const struct file_operations
*fops
)
7372 ret
= tracefs_create_file(name
, mode
, parent
, data
, fops
);
7374 pr_warn("Could not create tracefs '%s' entry\n", name
);
7380 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
7382 struct dentry
*d_tracer
;
7387 d_tracer
= tracing_get_dentry(tr
);
7388 if (IS_ERR(d_tracer
))
7391 tr
->options
= tracefs_create_dir("options", d_tracer
);
7393 pr_warn("Could not create tracefs directory 'options'\n");
7401 create_trace_option_file(struct trace_array
*tr
,
7402 struct trace_option_dentry
*topt
,
7403 struct tracer_flags
*flags
,
7404 struct tracer_opt
*opt
)
7406 struct dentry
*t_options
;
7408 t_options
= trace_options_init_dentry(tr
);
7412 topt
->flags
= flags
;
7416 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
7417 &trace_options_fops
);
7422 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
7424 struct trace_option_dentry
*topts
;
7425 struct trace_options
*tr_topts
;
7426 struct tracer_flags
*flags
;
7427 struct tracer_opt
*opts
;
7434 flags
= tracer
->flags
;
7436 if (!flags
|| !flags
->opts
)
7440 * If this is an instance, only create flags for tracers
7441 * the instance may have.
7443 if (!trace_ok_for_array(tracer
, tr
))
7446 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7447 /* Make sure there's no duplicate flags. */
7448 if (WARN_ON_ONCE(tr
->topts
[i
].tracer
->flags
== tracer
->flags
))
7454 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
7457 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
7461 tr_topts
= krealloc(tr
->topts
, sizeof(*tr
->topts
) * (tr
->nr_topts
+ 1),
7468 tr
->topts
= tr_topts
;
7469 tr
->topts
[tr
->nr_topts
].tracer
= tracer
;
7470 tr
->topts
[tr
->nr_topts
].topts
= topts
;
7473 for (cnt
= 0; opts
[cnt
].name
; cnt
++) {
7474 create_trace_option_file(tr
, &topts
[cnt
], flags
,
7476 WARN_ONCE(topts
[cnt
].entry
== NULL
,
7477 "Failed to create trace option: %s",
7482 static struct dentry
*
7483 create_trace_option_core_file(struct trace_array
*tr
,
7484 const char *option
, long index
)
7486 struct dentry
*t_options
;
7488 t_options
= trace_options_init_dentry(tr
);
7492 return trace_create_file(option
, 0644, t_options
,
7493 (void *)&tr
->trace_flags_index
[index
],
7494 &trace_options_core_fops
);
7497 static void create_trace_options_dir(struct trace_array
*tr
)
7499 struct dentry
*t_options
;
7500 bool top_level
= tr
== &global_trace
;
7503 t_options
= trace_options_init_dentry(tr
);
7507 for (i
= 0; trace_options
[i
]; i
++) {
7509 !((1 << i
) & TOP_LEVEL_TRACE_FLAGS
))
7510 create_trace_option_core_file(tr
, trace_options
[i
], i
);
7515 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
7516 size_t cnt
, loff_t
*ppos
)
7518 struct trace_array
*tr
= filp
->private_data
;
7522 r
= tracer_tracing_is_on(tr
);
7523 r
= sprintf(buf
, "%d\n", r
);
7525 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7529 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
7530 size_t cnt
, loff_t
*ppos
)
7532 struct trace_array
*tr
= filp
->private_data
;
7533 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
7537 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7542 mutex_lock(&trace_types_lock
);
7544 tracer_tracing_on(tr
);
7545 if (tr
->current_trace
->start
)
7546 tr
->current_trace
->start(tr
);
7548 tracer_tracing_off(tr
);
7549 if (tr
->current_trace
->stop
)
7550 tr
->current_trace
->stop(tr
);
7552 mutex_unlock(&trace_types_lock
);
7560 static const struct file_operations rb_simple_fops
= {
7561 .open
= tracing_open_generic_tr
,
7562 .read
= rb_simple_read
,
7563 .write
= rb_simple_write
,
7564 .release
= tracing_release_generic_tr
,
7565 .llseek
= default_llseek
,
7568 struct dentry
*trace_instance_dir
;
7571 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
7574 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
7576 enum ring_buffer_flags rb_flags
;
7578 rb_flags
= tr
->trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
7582 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
7586 buf
->data
= alloc_percpu(struct trace_array_cpu
);
7588 ring_buffer_free(buf
->buffer
);
7593 /* Allocate the first page for all buffers */
7594 set_buffer_entries(&tr
->trace_buffer
,
7595 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
7600 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
7604 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
7608 #ifdef CONFIG_TRACER_MAX_TRACE
7609 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
7610 allocate_snapshot
? size
: 1);
7612 ring_buffer_free(tr
->trace_buffer
.buffer
);
7613 tr
->trace_buffer
.buffer
= NULL
;
7614 free_percpu(tr
->trace_buffer
.data
);
7615 tr
->trace_buffer
.data
= NULL
;
7618 tr
->allocated_snapshot
= allocate_snapshot
;
7621 * Only the top level trace array gets its snapshot allocated
7622 * from the kernel command line.
7624 allocate_snapshot
= false;
7629 static void free_trace_buffer(struct trace_buffer
*buf
)
7632 ring_buffer_free(buf
->buffer
);
7634 free_percpu(buf
->data
);
7639 static void free_trace_buffers(struct trace_array
*tr
)
7644 free_trace_buffer(&tr
->trace_buffer
);
7646 #ifdef CONFIG_TRACER_MAX_TRACE
7647 free_trace_buffer(&tr
->max_buffer
);
7651 static void init_trace_flags_index(struct trace_array
*tr
)
7655 /* Used by the trace options files */
7656 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++)
7657 tr
->trace_flags_index
[i
] = i
;
7660 static void __update_tracer_options(struct trace_array
*tr
)
7664 for (t
= trace_types
; t
; t
= t
->next
)
7665 add_tracer_options(tr
, t
);
7668 static void update_tracer_options(struct trace_array
*tr
)
7670 mutex_lock(&trace_types_lock
);
7671 __update_tracer_options(tr
);
7672 mutex_unlock(&trace_types_lock
);
7675 static int instance_mkdir(const char *name
)
7677 struct trace_array
*tr
;
7680 mutex_lock(&event_mutex
);
7681 mutex_lock(&trace_types_lock
);
7684 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7685 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
7690 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
7694 tr
->name
= kstrdup(name
, GFP_KERNEL
);
7698 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
7701 tr
->trace_flags
= global_trace
.trace_flags
& ~ZEROED_TRACE_FLAGS
;
7703 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
7705 raw_spin_lock_init(&tr
->start_lock
);
7707 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
7709 tr
->current_trace
= &nop_trace
;
7711 INIT_LIST_HEAD(&tr
->systems
);
7712 INIT_LIST_HEAD(&tr
->events
);
7714 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
7717 tr
->dir
= tracefs_create_dir(name
, trace_instance_dir
);
7721 ret
= event_trace_add_tracer(tr
->dir
, tr
);
7723 tracefs_remove_recursive(tr
->dir
);
7727 ftrace_init_trace_array(tr
);
7729 init_tracer_tracefs(tr
, tr
->dir
);
7730 init_trace_flags_index(tr
);
7731 __update_tracer_options(tr
);
7733 list_add(&tr
->list
, &ftrace_trace_arrays
);
7735 mutex_unlock(&trace_types_lock
);
7736 mutex_unlock(&event_mutex
);
7741 free_trace_buffers(tr
);
7742 free_cpumask_var(tr
->tracing_cpumask
);
7747 mutex_unlock(&trace_types_lock
);
7748 mutex_unlock(&event_mutex
);
7754 static int instance_rmdir(const char *name
)
7756 struct trace_array
*tr
;
7761 mutex_lock(&event_mutex
);
7762 mutex_lock(&trace_types_lock
);
7765 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7766 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
7775 if (tr
->ref
|| (tr
->current_trace
&& tr
->current_trace
->ref
))
7778 list_del(&tr
->list
);
7780 /* Disable all the flags that were enabled coming in */
7781 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++) {
7782 if ((1 << i
) & ZEROED_TRACE_FLAGS
)
7783 set_tracer_flag(tr
, 1 << i
, 0);
7786 tracing_set_nop(tr
);
7787 clear_ftrace_function_probes(tr
);
7788 event_trace_del_tracer(tr
);
7789 ftrace_clear_pids(tr
);
7790 ftrace_destroy_function_files(tr
);
7791 tracefs_remove_recursive(tr
->dir
);
7792 free_trace_buffers(tr
);
7794 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7795 kfree(tr
->topts
[i
].topts
);
7799 free_cpumask_var(tr
->tracing_cpumask
);
7806 mutex_unlock(&trace_types_lock
);
7807 mutex_unlock(&event_mutex
);
7812 static __init
void create_trace_instances(struct dentry
*d_tracer
)
7814 trace_instance_dir
= tracefs_create_instance_dir("instances", d_tracer
,
7817 if (WARN_ON(!trace_instance_dir
))
7822 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
7826 trace_create_file("available_tracers", 0444, d_tracer
,
7827 tr
, &show_traces_fops
);
7829 trace_create_file("current_tracer", 0644, d_tracer
,
7830 tr
, &set_tracer_fops
);
7832 trace_create_file("tracing_cpumask", 0644, d_tracer
,
7833 tr
, &tracing_cpumask_fops
);
7835 trace_create_file("trace_options", 0644, d_tracer
,
7836 tr
, &tracing_iter_fops
);
7838 trace_create_file("trace", 0644, d_tracer
,
7841 trace_create_file("trace_pipe", 0444, d_tracer
,
7842 tr
, &tracing_pipe_fops
);
7844 trace_create_file("buffer_size_kb", 0644, d_tracer
,
7845 tr
, &tracing_entries_fops
);
7847 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
7848 tr
, &tracing_total_entries_fops
);
7850 trace_create_file("free_buffer", 0200, d_tracer
,
7851 tr
, &tracing_free_buffer_fops
);
7853 trace_create_file("trace_marker", 0220, d_tracer
,
7854 tr
, &tracing_mark_fops
);
7856 trace_create_file("trace_marker_raw", 0220, d_tracer
,
7857 tr
, &tracing_mark_raw_fops
);
7859 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
7862 trace_create_file("tracing_on", 0644, d_tracer
,
7863 tr
, &rb_simple_fops
);
7865 create_trace_options_dir(tr
);
7867 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7868 trace_create_file("tracing_max_latency", 0644, d_tracer
,
7869 &tr
->max_latency
, &tracing_max_lat_fops
);
7872 if (ftrace_create_function_files(tr
, d_tracer
))
7873 WARN(1, "Could not allocate function filter files");
7875 #ifdef CONFIG_TRACER_SNAPSHOT
7876 trace_create_file("snapshot", 0644, d_tracer
,
7877 tr
, &snapshot_fops
);
7880 for_each_tracing_cpu(cpu
)
7881 tracing_init_tracefs_percpu(tr
, cpu
);
7883 ftrace_init_tracefs(tr
, d_tracer
);
7886 static struct vfsmount
*trace_automount(struct dentry
*mntpt
, void *ingore
)
7888 struct vfsmount
*mnt
;
7889 struct file_system_type
*type
;
7892 * To maintain backward compatibility for tools that mount
7893 * debugfs to get to the tracing facility, tracefs is automatically
7894 * mounted to the debugfs/tracing directory.
7896 type
= get_fs_type("tracefs");
7899 mnt
= vfs_submount(mntpt
, type
, "tracefs", NULL
);
7900 put_filesystem(type
);
7909 * tracing_init_dentry - initialize top level trace array
7911 * This is called when creating files or directories in the tracing
7912 * directory. It is called via fs_initcall() by any of the boot up code
7913 * and expects to return the dentry of the top level tracing directory.
7915 struct dentry
*tracing_init_dentry(void)
7917 struct trace_array
*tr
= &global_trace
;
7919 /* The top level trace array uses NULL as parent */
7923 if (WARN_ON(!tracefs_initialized()) ||
7924 (IS_ENABLED(CONFIG_DEBUG_FS
) &&
7925 WARN_ON(!debugfs_initialized())))
7926 return ERR_PTR(-ENODEV
);
7929 * As there may still be users that expect the tracing
7930 * files to exist in debugfs/tracing, we must automount
7931 * the tracefs file system there, so older tools still
7932 * work with the newer kerenl.
7934 tr
->dir
= debugfs_create_automount("tracing", NULL
,
7935 trace_automount
, NULL
);
7937 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7938 return ERR_PTR(-ENOMEM
);
7944 extern struct trace_eval_map
*__start_ftrace_eval_maps
[];
7945 extern struct trace_eval_map
*__stop_ftrace_eval_maps
[];
7947 static void __init
trace_eval_init(void)
7951 len
= __stop_ftrace_eval_maps
- __start_ftrace_eval_maps
;
7952 trace_insert_eval_map(NULL
, __start_ftrace_eval_maps
, len
);
7955 #ifdef CONFIG_MODULES
7956 static void trace_module_add_evals(struct module
*mod
)
7958 if (!mod
->num_trace_evals
)
7962 * Modules with bad taint do not have events created, do
7963 * not bother with enums either.
7965 if (trace_module_has_bad_taint(mod
))
7968 trace_insert_eval_map(mod
, mod
->trace_evals
, mod
->num_trace_evals
);
7971 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
7972 static void trace_module_remove_evals(struct module
*mod
)
7974 union trace_eval_map_item
*map
;
7975 union trace_eval_map_item
**last
= &trace_eval_maps
;
7977 if (!mod
->num_trace_evals
)
7980 mutex_lock(&trace_eval_mutex
);
7982 map
= trace_eval_maps
;
7985 if (map
->head
.mod
== mod
)
7987 map
= trace_eval_jmp_to_tail(map
);
7988 last
= &map
->tail
.next
;
7989 map
= map
->tail
.next
;
7994 *last
= trace_eval_jmp_to_tail(map
)->tail
.next
;
7997 mutex_unlock(&trace_eval_mutex
);
8000 static inline void trace_module_remove_evals(struct module
*mod
) { }
8001 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8003 static int trace_module_notify(struct notifier_block
*self
,
8004 unsigned long val
, void *data
)
8006 struct module
*mod
= data
;
8009 case MODULE_STATE_COMING
:
8010 trace_module_add_evals(mod
);
8012 case MODULE_STATE_GOING
:
8013 trace_module_remove_evals(mod
);
8020 static struct notifier_block trace_module_nb
= {
8021 .notifier_call
= trace_module_notify
,
8024 #endif /* CONFIG_MODULES */
8026 static __init
int tracer_init_tracefs(void)
8028 struct dentry
*d_tracer
;
8030 trace_access_lock_init();
8032 d_tracer
= tracing_init_dentry();
8033 if (IS_ERR(d_tracer
))
8036 init_tracer_tracefs(&global_trace
, d_tracer
);
8037 ftrace_init_tracefs_toplevel(&global_trace
, d_tracer
);
8039 trace_create_file("tracing_thresh", 0644, d_tracer
,
8040 &global_trace
, &tracing_thresh_fops
);
8042 trace_create_file("README", 0444, d_tracer
,
8043 NULL
, &tracing_readme_fops
);
8045 trace_create_file("saved_cmdlines", 0444, d_tracer
,
8046 NULL
, &tracing_saved_cmdlines_fops
);
8048 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
8049 NULL
, &tracing_saved_cmdlines_size_fops
);
8051 trace_create_file("saved_tgids", 0444, d_tracer
,
8052 NULL
, &tracing_saved_tgids_fops
);
8056 trace_create_eval_file(d_tracer
);
8058 #ifdef CONFIG_MODULES
8059 register_module_notifier(&trace_module_nb
);
8062 #ifdef CONFIG_DYNAMIC_FTRACE
8063 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
8064 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
8067 create_trace_instances(d_tracer
);
8069 update_tracer_options(&global_trace
);
8074 static int trace_panic_handler(struct notifier_block
*this,
8075 unsigned long event
, void *unused
)
8077 if (ftrace_dump_on_oops
)
8078 ftrace_dump(ftrace_dump_on_oops
);
8082 static struct notifier_block trace_panic_notifier
= {
8083 .notifier_call
= trace_panic_handler
,
8085 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
8088 static int trace_die_handler(struct notifier_block
*self
,
8094 if (ftrace_dump_on_oops
)
8095 ftrace_dump(ftrace_dump_on_oops
);
8103 static struct notifier_block trace_die_notifier
= {
8104 .notifier_call
= trace_die_handler
,
8109 * printk is set to max of 1024, we really don't need it that big.
8110 * Nothing should be printing 1000 characters anyway.
8112 #define TRACE_MAX_PRINT 1000
8115 * Define here KERN_TRACE so that we have one place to modify
8116 * it if we decide to change what log level the ftrace dump
8119 #define KERN_TRACE KERN_EMERG
8122 trace_printk_seq(struct trace_seq
*s
)
8124 /* Probably should print a warning here. */
8125 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
8126 s
->seq
.len
= TRACE_MAX_PRINT
;
8129 * More paranoid code. Although the buffer size is set to
8130 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8131 * an extra layer of protection.
8133 if (WARN_ON_ONCE(s
->seq
.len
>= s
->seq
.size
))
8134 s
->seq
.len
= s
->seq
.size
- 1;
8136 /* should be zero ended, but we are paranoid. */
8137 s
->buffer
[s
->seq
.len
] = 0;
8139 printk(KERN_TRACE
"%s", s
->buffer
);
8144 void trace_init_global_iter(struct trace_iterator
*iter
)
8146 iter
->tr
= &global_trace
;
8147 iter
->trace
= iter
->tr
->current_trace
;
8148 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
8149 iter
->trace_buffer
= &global_trace
.trace_buffer
;
8151 if (iter
->trace
&& iter
->trace
->open
)
8152 iter
->trace
->open(iter
);
8154 /* Annotate start of buffers if we had overruns */
8155 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
8156 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
8158 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8159 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
8160 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
8163 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
8165 /* use static because iter can be a bit big for the stack */
8166 static struct trace_iterator iter
;
8167 static atomic_t dump_running
;
8168 struct trace_array
*tr
= &global_trace
;
8169 unsigned int old_userobj
;
8170 unsigned long flags
;
8173 /* Only allow one dump user at a time. */
8174 if (atomic_inc_return(&dump_running
) != 1) {
8175 atomic_dec(&dump_running
);
8180 * Always turn off tracing when we dump.
8181 * We don't need to show trace output of what happens
8182 * between multiple crashes.
8184 * If the user does a sysrq-z, then they can re-enable
8185 * tracing with echo 1 > tracing_on.
8189 local_irq_save(flags
);
8191 /* Simulate the iterator */
8192 trace_init_global_iter(&iter
);
8194 for_each_tracing_cpu(cpu
) {
8195 atomic_inc(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8198 old_userobj
= tr
->trace_flags
& TRACE_ITER_SYM_USEROBJ
;
8200 /* don't look at user memory in panic mode */
8201 tr
->trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
8203 switch (oops_dump_mode
) {
8205 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8208 iter
.cpu_file
= raw_smp_processor_id();
8213 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
8214 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8217 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
8219 /* Did function tracer already get disabled? */
8220 if (ftrace_is_dead()) {
8221 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8222 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8226 * We need to stop all tracing on all CPUS to read the
8227 * the next buffer. This is a bit expensive, but is
8228 * not done often. We fill all what we can read,
8229 * and then release the locks again.
8232 while (!trace_empty(&iter
)) {
8235 printk(KERN_TRACE
"---------------------------------\n");
8239 /* reset all but tr, trace, and overruns */
8240 memset(&iter
.seq
, 0,
8241 sizeof(struct trace_iterator
) -
8242 offsetof(struct trace_iterator
, seq
));
8243 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
8246 if (trace_find_next_entry_inc(&iter
) != NULL
) {
8249 ret
= print_trace_line(&iter
);
8250 if (ret
!= TRACE_TYPE_NO_CONSUME
)
8251 trace_consume(&iter
);
8253 touch_nmi_watchdog();
8255 trace_printk_seq(&iter
.seq
);
8259 printk(KERN_TRACE
" (ftrace buffer empty)\n");
8261 printk(KERN_TRACE
"---------------------------------\n");
8264 tr
->trace_flags
|= old_userobj
;
8266 for_each_tracing_cpu(cpu
) {
8267 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8269 atomic_dec(&dump_running
);
8270 local_irq_restore(flags
);
8272 EXPORT_SYMBOL_GPL(ftrace_dump
);
8274 int trace_run_command(const char *buf
, int (*createfn
)(int, char **))
8281 argv
= argv_split(GFP_KERNEL
, buf
, &argc
);
8286 ret
= createfn(argc
, argv
);
8293 #define WRITE_BUFSIZE 4096
8295 ssize_t
trace_parse_run_command(struct file
*file
, const char __user
*buffer
,
8296 size_t count
, loff_t
*ppos
,
8297 int (*createfn
)(int, char **))
8299 char *kbuf
, *buf
, *tmp
;
8304 kbuf
= kmalloc(WRITE_BUFSIZE
, GFP_KERNEL
);
8308 while (done
< count
) {
8309 size
= count
- done
;
8311 if (size
>= WRITE_BUFSIZE
)
8312 size
= WRITE_BUFSIZE
- 1;
8314 if (copy_from_user(kbuf
, buffer
+ done
, size
)) {
8321 tmp
= strchr(buf
, '\n');
8324 size
= tmp
- buf
+ 1;
8327 if (done
+ size
< count
) {
8330 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8331 pr_warn("Line length is too long: Should be less than %d\n",
8339 /* Remove comments */
8340 tmp
= strchr(buf
, '#');
8345 ret
= trace_run_command(buf
, createfn
);
8350 } while (done
< count
);
8360 __init
static int tracer_alloc_buffers(void)
8366 * Make sure we don't accidently add more trace options
8367 * than we have bits for.
8369 BUILD_BUG_ON(TRACE_ITER_LAST_BIT
> TRACE_FLAGS_MAX_SIZE
);
8371 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
8374 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
8375 goto out_free_buffer_mask
;
8377 /* Only allocate trace_printk buffers if a trace_printk exists */
8378 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
8379 /* Must be called before global_trace.buffer is allocated */
8380 trace_printk_init_buffers();
8382 /* To save memory, keep the ring buffer size to its minimum */
8383 if (ring_buffer_expanded
)
8384 ring_buf_size
= trace_buf_size
;
8388 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
8389 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
8391 raw_spin_lock_init(&global_trace
.start_lock
);
8394 * The prepare callbacks allocates some memory for the ring buffer. We
8395 * don't free the buffer if the if the CPU goes down. If we were to free
8396 * the buffer, then the user would lose any trace that was in the
8397 * buffer. The memory will be removed once the "instance" is removed.
8399 ret
= cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE
,
8400 "trace/RB:preapre", trace_rb_cpu_prepare
,
8403 goto out_free_cpumask
;
8404 /* Used for event triggers */
8406 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
8408 goto out_rm_hp_state
;
8410 if (trace_create_savedcmd() < 0)
8411 goto out_free_temp_buffer
;
8413 /* TODO: make the number of buffers hot pluggable with CPUS */
8414 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
8415 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
8417 goto out_free_savedcmd
;
8420 if (global_trace
.buffer_disabled
)
8423 if (trace_boot_clock
) {
8424 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
8426 pr_warn("Trace clock %s not defined, going back to default\n",
8431 * register_tracer() might reference current_trace, so it
8432 * needs to be set before we register anything. This is
8433 * just a bootstrap of current_trace anyway.
8435 global_trace
.current_trace
= &nop_trace
;
8437 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
8439 ftrace_init_global_array_ops(&global_trace
);
8441 init_trace_flags_index(&global_trace
);
8443 register_tracer(&nop_trace
);
8445 /* Function tracing may start here (via kernel command line) */
8446 init_function_trace();
8448 /* All seems OK, enable tracing */
8449 tracing_disabled
= 0;
8451 atomic_notifier_chain_register(&panic_notifier_list
,
8452 &trace_panic_notifier
);
8454 register_die_notifier(&trace_die_notifier
);
8456 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
8458 INIT_LIST_HEAD(&global_trace
.systems
);
8459 INIT_LIST_HEAD(&global_trace
.events
);
8460 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
8462 apply_trace_boot_options();
8464 register_snapshot_cmd();
8469 free_saved_cmdlines_buffer(savedcmd
);
8470 out_free_temp_buffer
:
8471 ring_buffer_free(temp_buffer
);
8473 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE
);
8475 free_cpumask_var(global_trace
.tracing_cpumask
);
8476 out_free_buffer_mask
:
8477 free_cpumask_var(tracing_buffer_mask
);
8482 void __init
early_trace_init(void)
8484 if (tracepoint_printk
) {
8485 tracepoint_print_iter
=
8486 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
8487 if (WARN_ON(!tracepoint_print_iter
))
8488 tracepoint_printk
= 0;
8490 static_key_enable(&tracepoint_printk_key
.key
);
8492 tracer_alloc_buffers();
8495 void __init
trace_init(void)
8500 __init
static int clear_boot_tracer(void)
8503 * The default tracer at boot buffer is an init section.
8504 * This function is called in lateinit. If we did not
8505 * find the boot tracer, then clear it out, to prevent
8506 * later registration from accessing the buffer that is
8507 * about to be freed.
8509 if (!default_bootup_tracer
)
8512 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
8513 default_bootup_tracer
);
8514 default_bootup_tracer
= NULL
;
8519 fs_initcall(tracer_init_tracefs
);
8520 late_initcall_sync(clear_boot_tracer
);