Linux 4.19.168
[linux/fpc-iii.git] / kernel / trace / trace.c
blobd6f1e305bb3dbd26ca8f79c9fb783a6870093384
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
43 #include <linux/fs.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
48 #include "trace.h"
49 #include "trace_output.h"
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
55 bool ring_buffer_expanded;
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
64 static bool __read_mostly tracing_selftest_running;
67 * If a tracer is running, we do not want to run SELFTEST.
69 bool __read_mostly tracing_selftest_disabled;
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
78 { }
81 static int
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
84 return 0;
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
100 static int tracing_disabled = 1;
102 cpumask_var_t __read_mostly tracing_buffer_mask;
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
120 enum ftrace_dump_mode ftrace_dump_on_oops;
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
128 struct module *mod;
129 unsigned long length;
132 union trace_eval_map_item;
134 struct trace_eval_map_tail {
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
143 static DEFINE_MUTEX(trace_eval_mutex);
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
163 #define MAX_TRACER_SIZE 100
164 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
165 static char *default_bootup_tracer;
167 static bool allocate_snapshot;
169 static int __init set_cmdline_ftrace(char *str)
171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
172 default_bootup_tracer = bootup_tracer_buf;
173 /* We are using ftrace early, expand it */
174 ring_buffer_expanded = true;
175 return 1;
177 __setup("ftrace=", set_cmdline_ftrace);
179 static int __init set_ftrace_dump_on_oops(char *str)
181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
183 return 1;
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
188 return 1;
191 return 0;
193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
195 static int __init stop_trace_on_warning(char *str)
197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
199 return 1;
201 __setup("traceoff_on_warning", stop_trace_on_warning);
203 static int __init boot_alloc_snapshot(char *str)
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
208 return 1;
210 __setup("alloc_snapshot", boot_alloc_snapshot);
213 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
215 static int __init set_trace_boot_options(char *str)
217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
218 return 0;
220 __setup("trace_options=", set_trace_boot_options);
222 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223 static char *trace_boot_clock __initdata;
225 static int __init set_trace_boot_clock(char *str)
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
229 return 0;
231 __setup("trace_clock=", set_trace_boot_clock);
233 static int __init set_tracepoint_printk(char *str)
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
237 return 1;
239 __setup("tp_printk", set_tracepoint_printk);
241 unsigned long long ns2usecs(u64 nsec)
243 nsec += 500;
244 do_div(nsec, 1000);
245 return nsec;
248 /* trace_flags holds trace_options default values */
249 #define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256 /* trace_options that are only supported by global_trace */
257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260 /* trace_flags that are default zero for instances */
261 #define ZEROED_TRACE_FLAGS \
262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
268 static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
272 LIST_HEAD(ftrace_trace_arrays);
274 int trace_array_get(struct trace_array *this_tr)
276 struct trace_array *tr;
277 int ret = -ENODEV;
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281 if (tr == this_tr) {
282 tr->ref++;
283 ret = 0;
284 break;
287 mutex_unlock(&trace_types_lock);
289 return ret;
292 static void __trace_array_put(struct trace_array *this_tr)
294 WARN_ON(!this_tr->ref);
295 this_tr->ref--;
298 void trace_array_put(struct trace_array *this_tr)
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
305 int call_filter_check_discard(struct trace_event_call *call, void *rec,
306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
311 __trace_event_discard_commit(buffer, event);
312 return 1;
315 return 0;
318 void trace_free_pid_list(struct trace_pid_list *pid_list)
320 vfree(pid_list->pids);
321 kfree(pid_list);
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
331 bool
332 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
338 if (search_pid >= filtered_pids->pid_max)
339 return false;
341 return test_bit(search_pid, filtered_pids->pids);
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
353 bool
354 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
360 if (!filtered_pids)
361 return false;
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
376 * of a task.
378 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
382 if (!pid_list)
383 return;
385 /* For forks, we only add if the forking task is listed */
386 if (self) {
387 if (!trace_find_filtered_pid(pid_list, self->pid))
388 return;
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
393 return;
395 /* "self" is set for forks, and NULL for exits */
396 if (self)
397 set_bit(task->pid, pid_list->pids);
398 else
399 clear_bit(task->pid, pid_list->pids);
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
414 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
416 unsigned long pid = (unsigned long)v;
418 (*pos)++;
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
427 return NULL;
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
435 * This is used by seq_file "start" operation to start the iteration
436 * of listing pids.
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
441 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
443 unsigned long pid;
444 loff_t l = 0;
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
448 return NULL;
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
454 return (void *)pid;
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
462 * Can be directly used by seq_file operations to display the current
463 * pid value.
465 int trace_pid_show(struct seq_file *m, void *v)
467 unsigned long pid = (unsigned long)v - 1;
469 seq_printf(m, "%lu\n", pid);
470 return 0;
473 /* 128 should be much more than enough */
474 #define PID_BUF_SIZE 127
476 int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
482 unsigned long val;
483 int nr_pids = 0;
484 ssize_t read = 0;
485 ssize_t ret = 0;
486 loff_t pos;
487 pid_t pid;
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490 return -ENOMEM;
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
496 * not modified.
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list) {
500 trace_parser_put(&parser);
501 return -ENOMEM;
504 pid_list->pid_max = READ_ONCE(pid_max);
506 /* Only truncating will shrink pid_max */
507 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
508 pid_list->pid_max = filtered_pids->pid_max;
510 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
511 if (!pid_list->pids) {
512 trace_parser_put(&parser);
513 kfree(pid_list);
514 return -ENOMEM;
517 if (filtered_pids) {
518 /* copy the current bits to the new max */
519 for_each_set_bit(pid, filtered_pids->pids,
520 filtered_pids->pid_max) {
521 set_bit(pid, pid_list->pids);
522 nr_pids++;
526 while (cnt > 0) {
528 pos = 0;
530 ret = trace_get_user(&parser, ubuf, cnt, &pos);
531 if (ret < 0 || !trace_parser_loaded(&parser))
532 break;
534 read += ret;
535 ubuf += ret;
536 cnt -= ret;
538 ret = -EINVAL;
539 if (kstrtoul(parser.buffer, 0, &val))
540 break;
541 if (val >= pid_list->pid_max)
542 break;
544 pid = (pid_t)val;
546 set_bit(pid, pid_list->pids);
547 nr_pids++;
549 trace_parser_clear(&parser);
550 ret = 0;
552 trace_parser_put(&parser);
554 if (ret < 0) {
555 trace_free_pid_list(pid_list);
556 return ret;
559 if (!nr_pids) {
560 /* Cleared the list of pids */
561 trace_free_pid_list(pid_list);
562 read = ret;
563 pid_list = NULL;
566 *new_pid_list = pid_list;
568 return read;
571 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
573 u64 ts;
575 /* Early boot up does not have a buffer yet */
576 if (!buf->buffer)
577 return trace_clock_local();
579 ts = ring_buffer_time_stamp(buf->buffer, cpu);
580 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
582 return ts;
585 u64 ftrace_now(int cpu)
587 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
591 * tracing_is_enabled - Show if global_trace has been disabled
593 * Shows if the global trace has been enabled or not. It uses the
594 * mirror flag "buffer_disabled" to be used in fast paths such as for
595 * the irqsoff tracer. But it may be inaccurate due to races. If you
596 * need to know the accurate state, use tracing_is_on() which is a little
597 * slower, but accurate.
599 int tracing_is_enabled(void)
602 * For quick access (irqsoff uses this in fast path), just
603 * return the mirror variable of the state of the ring buffer.
604 * It's a little racy, but we don't really care.
606 smp_rmb();
607 return !global_trace.buffer_disabled;
611 * trace_buf_size is the size in bytes that is allocated
612 * for a buffer. Note, the number of bytes is always rounded
613 * to page size.
615 * This number is purposely set to a low number of 16384.
616 * If the dump on oops happens, it will be much appreciated
617 * to not have to wait for all that output. Anyway this can be
618 * boot time and run time configurable.
620 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
622 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
624 /* trace_types holds a link list of available tracers. */
625 static struct tracer *trace_types __read_mostly;
628 * trace_types_lock is used to protect the trace_types list.
630 DEFINE_MUTEX(trace_types_lock);
633 * serialize the access of the ring buffer
635 * ring buffer serializes readers, but it is low level protection.
636 * The validity of the events (which returns by ring_buffer_peek() ..etc)
637 * are not protected by ring buffer.
639 * The content of events may become garbage if we allow other process consumes
640 * these events concurrently:
641 * A) the page of the consumed events may become a normal page
642 * (not reader page) in ring buffer, and this page will be rewrited
643 * by events producer.
644 * B) The page of the consumed events may become a page for splice_read,
645 * and this page will be returned to system.
647 * These primitives allow multi process access to different cpu ring buffer
648 * concurrently.
650 * These primitives don't distinguish read-only and read-consume access.
651 * Multi read-only access are also serialized.
654 #ifdef CONFIG_SMP
655 static DECLARE_RWSEM(all_cpu_access_lock);
656 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
658 static inline void trace_access_lock(int cpu)
660 if (cpu == RING_BUFFER_ALL_CPUS) {
661 /* gain it for accessing the whole ring buffer. */
662 down_write(&all_cpu_access_lock);
663 } else {
664 /* gain it for accessing a cpu ring buffer. */
666 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
667 down_read(&all_cpu_access_lock);
669 /* Secondly block other access to this @cpu ring buffer. */
670 mutex_lock(&per_cpu(cpu_access_lock, cpu));
674 static inline void trace_access_unlock(int cpu)
676 if (cpu == RING_BUFFER_ALL_CPUS) {
677 up_write(&all_cpu_access_lock);
678 } else {
679 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
680 up_read(&all_cpu_access_lock);
684 static inline void trace_access_lock_init(void)
686 int cpu;
688 for_each_possible_cpu(cpu)
689 mutex_init(&per_cpu(cpu_access_lock, cpu));
692 #else
694 static DEFINE_MUTEX(access_lock);
696 static inline void trace_access_lock(int cpu)
698 (void)cpu;
699 mutex_lock(&access_lock);
702 static inline void trace_access_unlock(int cpu)
704 (void)cpu;
705 mutex_unlock(&access_lock);
708 static inline void trace_access_lock_init(void)
712 #endif
714 #ifdef CONFIG_STACKTRACE
715 static void __ftrace_trace_stack(struct ring_buffer *buffer,
716 unsigned long flags,
717 int skip, int pc, struct pt_regs *regs);
718 static inline void ftrace_trace_stack(struct trace_array *tr,
719 struct ring_buffer *buffer,
720 unsigned long flags,
721 int skip, int pc, struct pt_regs *regs);
723 #else
724 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
725 unsigned long flags,
726 int skip, int pc, struct pt_regs *regs)
729 static inline void ftrace_trace_stack(struct trace_array *tr,
730 struct ring_buffer *buffer,
731 unsigned long flags,
732 int skip, int pc, struct pt_regs *regs)
736 #endif
738 static __always_inline void
739 trace_event_setup(struct ring_buffer_event *event,
740 int type, unsigned long flags, int pc)
742 struct trace_entry *ent = ring_buffer_event_data(event);
744 tracing_generic_entry_update(ent, flags, pc);
745 ent->type = type;
748 static __always_inline struct ring_buffer_event *
749 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
750 int type,
751 unsigned long len,
752 unsigned long flags, int pc)
754 struct ring_buffer_event *event;
756 event = ring_buffer_lock_reserve(buffer, len);
757 if (event != NULL)
758 trace_event_setup(event, type, flags, pc);
760 return event;
763 void tracer_tracing_on(struct trace_array *tr)
765 if (tr->trace_buffer.buffer)
766 ring_buffer_record_on(tr->trace_buffer.buffer);
768 * This flag is looked at when buffers haven't been allocated
769 * yet, or by some tracers (like irqsoff), that just want to
770 * know if the ring buffer has been disabled, but it can handle
771 * races of where it gets disabled but we still do a record.
772 * As the check is in the fast path of the tracers, it is more
773 * important to be fast than accurate.
775 tr->buffer_disabled = 0;
776 /* Make the flag seen by readers */
777 smp_wmb();
781 * tracing_on - enable tracing buffers
783 * This function enables tracing buffers that may have been
784 * disabled with tracing_off.
786 void tracing_on(void)
788 tracer_tracing_on(&global_trace);
790 EXPORT_SYMBOL_GPL(tracing_on);
793 static __always_inline void
794 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
796 __this_cpu_write(trace_taskinfo_save, true);
798 /* If this is the temp buffer, we need to commit fully */
799 if (this_cpu_read(trace_buffered_event) == event) {
800 /* Length is in event->array[0] */
801 ring_buffer_write(buffer, event->array[0], &event->array[1]);
802 /* Release the temp buffer */
803 this_cpu_dec(trace_buffered_event_cnt);
804 } else
805 ring_buffer_unlock_commit(buffer, event);
809 * __trace_puts - write a constant string into the trace buffer.
810 * @ip: The address of the caller
811 * @str: The constant string to write
812 * @size: The size of the string.
814 int __trace_puts(unsigned long ip, const char *str, int size)
816 struct ring_buffer_event *event;
817 struct ring_buffer *buffer;
818 struct print_entry *entry;
819 unsigned long irq_flags;
820 int alloc;
821 int pc;
823 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
824 return 0;
826 pc = preempt_count();
828 if (unlikely(tracing_selftest_running || tracing_disabled))
829 return 0;
831 alloc = sizeof(*entry) + size + 2; /* possible \n added */
833 local_save_flags(irq_flags);
834 buffer = global_trace.trace_buffer.buffer;
835 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
836 irq_flags, pc);
837 if (!event)
838 return 0;
840 entry = ring_buffer_event_data(event);
841 entry->ip = ip;
843 memcpy(&entry->buf, str, size);
845 /* Add a newline if necessary */
846 if (entry->buf[size - 1] != '\n') {
847 entry->buf[size] = '\n';
848 entry->buf[size + 1] = '\0';
849 } else
850 entry->buf[size] = '\0';
852 __buffer_unlock_commit(buffer, event);
853 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
855 return size;
857 EXPORT_SYMBOL_GPL(__trace_puts);
860 * __trace_bputs - write the pointer to a constant string into trace buffer
861 * @ip: The address of the caller
862 * @str: The constant string to write to the buffer to
864 int __trace_bputs(unsigned long ip, const char *str)
866 struct ring_buffer_event *event;
867 struct ring_buffer *buffer;
868 struct bputs_entry *entry;
869 unsigned long irq_flags;
870 int size = sizeof(struct bputs_entry);
871 int pc;
873 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
874 return 0;
876 pc = preempt_count();
878 if (unlikely(tracing_selftest_running || tracing_disabled))
879 return 0;
881 local_save_flags(irq_flags);
882 buffer = global_trace.trace_buffer.buffer;
883 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
884 irq_flags, pc);
885 if (!event)
886 return 0;
888 entry = ring_buffer_event_data(event);
889 entry->ip = ip;
890 entry->str = str;
892 __buffer_unlock_commit(buffer, event);
893 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
895 return 1;
897 EXPORT_SYMBOL_GPL(__trace_bputs);
899 #ifdef CONFIG_TRACER_SNAPSHOT
900 void tracing_snapshot_instance(struct trace_array *tr)
902 struct tracer *tracer = tr->current_trace;
903 unsigned long flags;
905 if (in_nmi()) {
906 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
907 internal_trace_puts("*** snapshot is being ignored ***\n");
908 return;
911 if (!tr->allocated_snapshot) {
912 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
913 internal_trace_puts("*** stopping trace here! ***\n");
914 tracing_off();
915 return;
918 /* Note, snapshot can not be used when the tracer uses it */
919 if (tracer->use_max_tr) {
920 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
921 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 return;
925 local_irq_save(flags);
926 update_max_tr(tr, current, smp_processor_id());
927 local_irq_restore(flags);
931 * tracing_snapshot - take a snapshot of the current buffer.
933 * This causes a swap between the snapshot buffer and the current live
934 * tracing buffer. You can use this to take snapshots of the live
935 * trace when some condition is triggered, but continue to trace.
937 * Note, make sure to allocate the snapshot with either
938 * a tracing_snapshot_alloc(), or by doing it manually
939 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
941 * If the snapshot buffer is not allocated, it will stop tracing.
942 * Basically making a permanent snapshot.
944 void tracing_snapshot(void)
946 struct trace_array *tr = &global_trace;
948 tracing_snapshot_instance(tr);
950 EXPORT_SYMBOL_GPL(tracing_snapshot);
952 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
953 struct trace_buffer *size_buf, int cpu_id);
954 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
956 int tracing_alloc_snapshot_instance(struct trace_array *tr)
958 int ret;
960 if (!tr->allocated_snapshot) {
962 /* allocate spare buffer */
963 ret = resize_buffer_duplicate_size(&tr->max_buffer,
964 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
965 if (ret < 0)
966 return ret;
968 tr->allocated_snapshot = true;
971 return 0;
974 static void free_snapshot(struct trace_array *tr)
977 * We don't free the ring buffer. instead, resize it because
978 * The max_tr ring buffer has some state (e.g. ring->clock) and
979 * we want preserve it.
981 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
982 set_buffer_entries(&tr->max_buffer, 1);
983 tracing_reset_online_cpus(&tr->max_buffer);
984 tr->allocated_snapshot = false;
988 * tracing_alloc_snapshot - allocate snapshot buffer.
990 * This only allocates the snapshot buffer if it isn't already
991 * allocated - it doesn't also take a snapshot.
993 * This is meant to be used in cases where the snapshot buffer needs
994 * to be set up for events that can't sleep but need to be able to
995 * trigger a snapshot.
997 int tracing_alloc_snapshot(void)
999 struct trace_array *tr = &global_trace;
1000 int ret;
1002 ret = tracing_alloc_snapshot_instance(tr);
1003 WARN_ON(ret < 0);
1005 return ret;
1007 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1010 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1012 * This is similar to tracing_snapshot(), but it will allocate the
1013 * snapshot buffer if it isn't already allocated. Use this only
1014 * where it is safe to sleep, as the allocation may sleep.
1016 * This causes a swap between the snapshot buffer and the current live
1017 * tracing buffer. You can use this to take snapshots of the live
1018 * trace when some condition is triggered, but continue to trace.
1020 void tracing_snapshot_alloc(void)
1022 int ret;
1024 ret = tracing_alloc_snapshot();
1025 if (ret < 0)
1026 return;
1028 tracing_snapshot();
1030 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1031 #else
1032 void tracing_snapshot(void)
1034 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1036 EXPORT_SYMBOL_GPL(tracing_snapshot);
1037 int tracing_alloc_snapshot(void)
1039 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1040 return -ENODEV;
1042 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1043 void tracing_snapshot_alloc(void)
1045 /* Give warning */
1046 tracing_snapshot();
1048 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1049 #endif /* CONFIG_TRACER_SNAPSHOT */
1051 void tracer_tracing_off(struct trace_array *tr)
1053 if (tr->trace_buffer.buffer)
1054 ring_buffer_record_off(tr->trace_buffer.buffer);
1056 * This flag is looked at when buffers haven't been allocated
1057 * yet, or by some tracers (like irqsoff), that just want to
1058 * know if the ring buffer has been disabled, but it can handle
1059 * races of where it gets disabled but we still do a record.
1060 * As the check is in the fast path of the tracers, it is more
1061 * important to be fast than accurate.
1063 tr->buffer_disabled = 1;
1064 /* Make the flag seen by readers */
1065 smp_wmb();
1069 * tracing_off - turn off tracing buffers
1071 * This function stops the tracing buffers from recording data.
1072 * It does not disable any overhead the tracers themselves may
1073 * be causing. This function simply causes all recording to
1074 * the ring buffers to fail.
1076 void tracing_off(void)
1078 tracer_tracing_off(&global_trace);
1080 EXPORT_SYMBOL_GPL(tracing_off);
1082 void disable_trace_on_warning(void)
1084 if (__disable_trace_on_warning)
1085 tracing_off();
1089 * tracer_tracing_is_on - show real state of ring buffer enabled
1090 * @tr : the trace array to know if ring buffer is enabled
1092 * Shows real state of the ring buffer if it is enabled or not.
1094 bool tracer_tracing_is_on(struct trace_array *tr)
1096 if (tr->trace_buffer.buffer)
1097 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1098 return !tr->buffer_disabled;
1102 * tracing_is_on - show state of ring buffers enabled
1104 int tracing_is_on(void)
1106 return tracer_tracing_is_on(&global_trace);
1108 EXPORT_SYMBOL_GPL(tracing_is_on);
1110 static int __init set_buf_size(char *str)
1112 unsigned long buf_size;
1114 if (!str)
1115 return 0;
1116 buf_size = memparse(str, &str);
1117 /* nr_entries can not be zero */
1118 if (buf_size == 0)
1119 return 0;
1120 trace_buf_size = buf_size;
1121 return 1;
1123 __setup("trace_buf_size=", set_buf_size);
1125 static int __init set_tracing_thresh(char *str)
1127 unsigned long threshold;
1128 int ret;
1130 if (!str)
1131 return 0;
1132 ret = kstrtoul(str, 0, &threshold);
1133 if (ret < 0)
1134 return 0;
1135 tracing_thresh = threshold * 1000;
1136 return 1;
1138 __setup("tracing_thresh=", set_tracing_thresh);
1140 unsigned long nsecs_to_usecs(unsigned long nsecs)
1142 return nsecs / 1000;
1146 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1147 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1148 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1149 * of strings in the order that the evals (enum) were defined.
1151 #undef C
1152 #define C(a, b) b
1154 /* These must match the bit postions in trace_iterator_flags */
1155 static const char *trace_options[] = {
1156 TRACE_FLAGS
1157 NULL
1160 static struct {
1161 u64 (*func)(void);
1162 const char *name;
1163 int in_ns; /* is this clock in nanoseconds? */
1164 } trace_clocks[] = {
1165 { trace_clock_local, "local", 1 },
1166 { trace_clock_global, "global", 1 },
1167 { trace_clock_counter, "counter", 0 },
1168 { trace_clock_jiffies, "uptime", 0 },
1169 { trace_clock, "perf", 1 },
1170 { ktime_get_mono_fast_ns, "mono", 1 },
1171 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1172 { ktime_get_boot_fast_ns, "boot", 1 },
1173 ARCH_TRACE_CLOCKS
1176 bool trace_clock_in_ns(struct trace_array *tr)
1178 if (trace_clocks[tr->clock_id].in_ns)
1179 return true;
1181 return false;
1185 * trace_parser_get_init - gets the buffer for trace parser
1187 int trace_parser_get_init(struct trace_parser *parser, int size)
1189 memset(parser, 0, sizeof(*parser));
1191 parser->buffer = kmalloc(size, GFP_KERNEL);
1192 if (!parser->buffer)
1193 return 1;
1195 parser->size = size;
1196 return 0;
1200 * trace_parser_put - frees the buffer for trace parser
1202 void trace_parser_put(struct trace_parser *parser)
1204 kfree(parser->buffer);
1205 parser->buffer = NULL;
1209 * trace_get_user - reads the user input string separated by space
1210 * (matched by isspace(ch))
1212 * For each string found the 'struct trace_parser' is updated,
1213 * and the function returns.
1215 * Returns number of bytes read.
1217 * See kernel/trace/trace.h for 'struct trace_parser' details.
1219 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1220 size_t cnt, loff_t *ppos)
1222 char ch;
1223 size_t read = 0;
1224 ssize_t ret;
1226 if (!*ppos)
1227 trace_parser_clear(parser);
1229 ret = get_user(ch, ubuf++);
1230 if (ret)
1231 goto out;
1233 read++;
1234 cnt--;
1237 * The parser is not finished with the last write,
1238 * continue reading the user input without skipping spaces.
1240 if (!parser->cont) {
1241 /* skip white space */
1242 while (cnt && isspace(ch)) {
1243 ret = get_user(ch, ubuf++);
1244 if (ret)
1245 goto out;
1246 read++;
1247 cnt--;
1250 parser->idx = 0;
1252 /* only spaces were written */
1253 if (isspace(ch) || !ch) {
1254 *ppos += read;
1255 ret = read;
1256 goto out;
1260 /* read the non-space input */
1261 while (cnt && !isspace(ch) && ch) {
1262 if (parser->idx < parser->size - 1)
1263 parser->buffer[parser->idx++] = ch;
1264 else {
1265 ret = -EINVAL;
1266 goto out;
1268 ret = get_user(ch, ubuf++);
1269 if (ret)
1270 goto out;
1271 read++;
1272 cnt--;
1275 /* We either got finished input or we have to wait for another call. */
1276 if (isspace(ch) || !ch) {
1277 parser->buffer[parser->idx] = 0;
1278 parser->cont = false;
1279 } else if (parser->idx < parser->size - 1) {
1280 parser->cont = true;
1281 parser->buffer[parser->idx++] = ch;
1282 /* Make sure the parsed string always terminates with '\0'. */
1283 parser->buffer[parser->idx] = 0;
1284 } else {
1285 ret = -EINVAL;
1286 goto out;
1289 *ppos += read;
1290 ret = read;
1292 out:
1293 return ret;
1296 /* TODO add a seq_buf_to_buffer() */
1297 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1299 int len;
1301 if (trace_seq_used(s) <= s->seq.readpos)
1302 return -EBUSY;
1304 len = trace_seq_used(s) - s->seq.readpos;
1305 if (cnt > len)
1306 cnt = len;
1307 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1309 s->seq.readpos += cnt;
1310 return cnt;
1313 unsigned long __read_mostly tracing_thresh;
1315 #ifdef CONFIG_TRACER_MAX_TRACE
1317 * Copy the new maximum trace into the separate maximum-trace
1318 * structure. (this way the maximum trace is permanently saved,
1319 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1321 static void
1322 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1324 struct trace_buffer *trace_buf = &tr->trace_buffer;
1325 struct trace_buffer *max_buf = &tr->max_buffer;
1326 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1327 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1329 max_buf->cpu = cpu;
1330 max_buf->time_start = data->preempt_timestamp;
1332 max_data->saved_latency = tr->max_latency;
1333 max_data->critical_start = data->critical_start;
1334 max_data->critical_end = data->critical_end;
1336 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1337 max_data->pid = tsk->pid;
1339 * If tsk == current, then use current_uid(), as that does not use
1340 * RCU. The irq tracer can be called out of RCU scope.
1342 if (tsk == current)
1343 max_data->uid = current_uid();
1344 else
1345 max_data->uid = task_uid(tsk);
1347 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1348 max_data->policy = tsk->policy;
1349 max_data->rt_priority = tsk->rt_priority;
1351 /* record this tasks comm */
1352 tracing_record_cmdline(tsk);
1356 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1357 * @tr: tracer
1358 * @tsk: the task with the latency
1359 * @cpu: The cpu that initiated the trace.
1361 * Flip the buffers between the @tr and the max_tr and record information
1362 * about which task was the cause of this latency.
1364 void
1365 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1367 if (tr->stop_count)
1368 return;
1370 WARN_ON_ONCE(!irqs_disabled());
1372 if (!tr->allocated_snapshot) {
1373 /* Only the nop tracer should hit this when disabling */
1374 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1375 return;
1378 arch_spin_lock(&tr->max_lock);
1380 /* Inherit the recordable setting from trace_buffer */
1381 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1382 ring_buffer_record_on(tr->max_buffer.buffer);
1383 else
1384 ring_buffer_record_off(tr->max_buffer.buffer);
1386 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1388 __update_max_tr(tr, tsk, cpu);
1389 arch_spin_unlock(&tr->max_lock);
1393 * update_max_tr_single - only copy one trace over, and reset the rest
1394 * @tr - tracer
1395 * @tsk - task with the latency
1396 * @cpu - the cpu of the buffer to copy.
1398 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1400 void
1401 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1403 int ret;
1405 if (tr->stop_count)
1406 return;
1408 WARN_ON_ONCE(!irqs_disabled());
1409 if (!tr->allocated_snapshot) {
1410 /* Only the nop tracer should hit this when disabling */
1411 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1412 return;
1415 arch_spin_lock(&tr->max_lock);
1417 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1419 if (ret == -EBUSY) {
1421 * We failed to swap the buffer due to a commit taking
1422 * place on this CPU. We fail to record, but we reset
1423 * the max trace buffer (no one writes directly to it)
1424 * and flag that it failed.
1426 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1427 "Failed to swap buffers due to commit in progress\n");
1430 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1432 __update_max_tr(tr, tsk, cpu);
1433 arch_spin_unlock(&tr->max_lock);
1435 #endif /* CONFIG_TRACER_MAX_TRACE */
1437 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1439 /* Iterators are static, they should be filled or empty */
1440 if (trace_buffer_iter(iter, iter->cpu_file))
1441 return 0;
1443 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1444 full);
1447 #ifdef CONFIG_FTRACE_STARTUP_TEST
1448 static bool selftests_can_run;
1450 struct trace_selftests {
1451 struct list_head list;
1452 struct tracer *type;
1455 static LIST_HEAD(postponed_selftests);
1457 static int save_selftest(struct tracer *type)
1459 struct trace_selftests *selftest;
1461 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1462 if (!selftest)
1463 return -ENOMEM;
1465 selftest->type = type;
1466 list_add(&selftest->list, &postponed_selftests);
1467 return 0;
1470 static int run_tracer_selftest(struct tracer *type)
1472 struct trace_array *tr = &global_trace;
1473 struct tracer *saved_tracer = tr->current_trace;
1474 int ret;
1476 if (!type->selftest || tracing_selftest_disabled)
1477 return 0;
1480 * If a tracer registers early in boot up (before scheduling is
1481 * initialized and such), then do not run its selftests yet.
1482 * Instead, run it a little later in the boot process.
1484 if (!selftests_can_run)
1485 return save_selftest(type);
1488 * Run a selftest on this tracer.
1489 * Here we reset the trace buffer, and set the current
1490 * tracer to be this tracer. The tracer can then run some
1491 * internal tracing to verify that everything is in order.
1492 * If we fail, we do not register this tracer.
1494 tracing_reset_online_cpus(&tr->trace_buffer);
1496 tr->current_trace = type;
1498 #ifdef CONFIG_TRACER_MAX_TRACE
1499 if (type->use_max_tr) {
1500 /* If we expanded the buffers, make sure the max is expanded too */
1501 if (ring_buffer_expanded)
1502 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1503 RING_BUFFER_ALL_CPUS);
1504 tr->allocated_snapshot = true;
1506 #endif
1508 /* the test is responsible for initializing and enabling */
1509 pr_info("Testing tracer %s: ", type->name);
1510 ret = type->selftest(type, tr);
1511 /* the test is responsible for resetting too */
1512 tr->current_trace = saved_tracer;
1513 if (ret) {
1514 printk(KERN_CONT "FAILED!\n");
1515 /* Add the warning after printing 'FAILED' */
1516 WARN_ON(1);
1517 return -1;
1519 /* Only reset on passing, to avoid touching corrupted buffers */
1520 tracing_reset_online_cpus(&tr->trace_buffer);
1522 #ifdef CONFIG_TRACER_MAX_TRACE
1523 if (type->use_max_tr) {
1524 tr->allocated_snapshot = false;
1526 /* Shrink the max buffer again */
1527 if (ring_buffer_expanded)
1528 ring_buffer_resize(tr->max_buffer.buffer, 1,
1529 RING_BUFFER_ALL_CPUS);
1531 #endif
1533 printk(KERN_CONT "PASSED\n");
1534 return 0;
1537 static __init int init_trace_selftests(void)
1539 struct trace_selftests *p, *n;
1540 struct tracer *t, **last;
1541 int ret;
1543 selftests_can_run = true;
1545 mutex_lock(&trace_types_lock);
1547 if (list_empty(&postponed_selftests))
1548 goto out;
1550 pr_info("Running postponed tracer tests:\n");
1552 tracing_selftest_running = true;
1553 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1554 ret = run_tracer_selftest(p->type);
1555 /* If the test fails, then warn and remove from available_tracers */
1556 if (ret < 0) {
1557 WARN(1, "tracer: %s failed selftest, disabling\n",
1558 p->type->name);
1559 last = &trace_types;
1560 for (t = trace_types; t; t = t->next) {
1561 if (t == p->type) {
1562 *last = t->next;
1563 break;
1565 last = &t->next;
1568 list_del(&p->list);
1569 kfree(p);
1571 tracing_selftest_running = false;
1573 out:
1574 mutex_unlock(&trace_types_lock);
1576 return 0;
1578 core_initcall(init_trace_selftests);
1579 #else
1580 static inline int run_tracer_selftest(struct tracer *type)
1582 return 0;
1584 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1586 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1588 static void __init apply_trace_boot_options(void);
1591 * register_tracer - register a tracer with the ftrace system.
1592 * @type - the plugin for the tracer
1594 * Register a new plugin tracer.
1596 int __init register_tracer(struct tracer *type)
1598 struct tracer *t;
1599 int ret = 0;
1601 if (!type->name) {
1602 pr_info("Tracer must have a name\n");
1603 return -1;
1606 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1607 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1608 return -1;
1611 mutex_lock(&trace_types_lock);
1613 tracing_selftest_running = true;
1615 for (t = trace_types; t; t = t->next) {
1616 if (strcmp(type->name, t->name) == 0) {
1617 /* already found */
1618 pr_info("Tracer %s already registered\n",
1619 type->name);
1620 ret = -1;
1621 goto out;
1625 if (!type->set_flag)
1626 type->set_flag = &dummy_set_flag;
1627 if (!type->flags) {
1628 /*allocate a dummy tracer_flags*/
1629 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1630 if (!type->flags) {
1631 ret = -ENOMEM;
1632 goto out;
1634 type->flags->val = 0;
1635 type->flags->opts = dummy_tracer_opt;
1636 } else
1637 if (!type->flags->opts)
1638 type->flags->opts = dummy_tracer_opt;
1640 /* store the tracer for __set_tracer_option */
1641 type->flags->trace = type;
1643 ret = run_tracer_selftest(type);
1644 if (ret < 0)
1645 goto out;
1647 type->next = trace_types;
1648 trace_types = type;
1649 add_tracer_options(&global_trace, type);
1651 out:
1652 tracing_selftest_running = false;
1653 mutex_unlock(&trace_types_lock);
1655 if (ret || !default_bootup_tracer)
1656 goto out_unlock;
1658 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1659 goto out_unlock;
1661 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1662 /* Do we want this tracer to start on bootup? */
1663 tracing_set_tracer(&global_trace, type->name);
1664 default_bootup_tracer = NULL;
1666 apply_trace_boot_options();
1668 /* disable other selftests, since this will break it. */
1669 tracing_selftest_disabled = true;
1670 #ifdef CONFIG_FTRACE_STARTUP_TEST
1671 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1672 type->name);
1673 #endif
1675 out_unlock:
1676 return ret;
1679 void tracing_reset(struct trace_buffer *buf, int cpu)
1681 struct ring_buffer *buffer = buf->buffer;
1683 if (!buffer)
1684 return;
1686 ring_buffer_record_disable(buffer);
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690 ring_buffer_reset_cpu(buffer, cpu);
1692 ring_buffer_record_enable(buffer);
1695 void tracing_reset_online_cpus(struct trace_buffer *buf)
1697 struct ring_buffer *buffer = buf->buffer;
1698 int cpu;
1700 if (!buffer)
1701 return;
1703 ring_buffer_record_disable(buffer);
1705 /* Make sure all commits have finished */
1706 synchronize_sched();
1708 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1710 for_each_online_cpu(cpu)
1711 ring_buffer_reset_cpu(buffer, cpu);
1713 ring_buffer_record_enable(buffer);
1716 /* Must have trace_types_lock held */
1717 void tracing_reset_all_online_cpus(void)
1719 struct trace_array *tr;
1721 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1722 if (!tr->clear_trace)
1723 continue;
1724 tr->clear_trace = false;
1725 tracing_reset_online_cpus(&tr->trace_buffer);
1726 #ifdef CONFIG_TRACER_MAX_TRACE
1727 tracing_reset_online_cpus(&tr->max_buffer);
1728 #endif
1732 static int *tgid_map;
1734 #define SAVED_CMDLINES_DEFAULT 128
1735 #define NO_CMDLINE_MAP UINT_MAX
1736 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1737 struct saved_cmdlines_buffer {
1738 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1739 unsigned *map_cmdline_to_pid;
1740 unsigned cmdline_num;
1741 int cmdline_idx;
1742 char *saved_cmdlines;
1744 static struct saved_cmdlines_buffer *savedcmd;
1746 /* temporary disable recording */
1747 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1749 static inline char *get_saved_cmdlines(int idx)
1751 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1754 static inline void set_cmdline(int idx, const char *cmdline)
1756 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1759 static int allocate_cmdlines_buffer(unsigned int val,
1760 struct saved_cmdlines_buffer *s)
1762 s->map_cmdline_to_pid = kmalloc_array(val,
1763 sizeof(*s->map_cmdline_to_pid),
1764 GFP_KERNEL);
1765 if (!s->map_cmdline_to_pid)
1766 return -ENOMEM;
1768 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1769 if (!s->saved_cmdlines) {
1770 kfree(s->map_cmdline_to_pid);
1771 return -ENOMEM;
1774 s->cmdline_idx = 0;
1775 s->cmdline_num = val;
1776 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1777 sizeof(s->map_pid_to_cmdline));
1778 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1779 val * sizeof(*s->map_cmdline_to_pid));
1781 return 0;
1784 static int trace_create_savedcmd(void)
1786 int ret;
1788 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1789 if (!savedcmd)
1790 return -ENOMEM;
1792 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1793 if (ret < 0) {
1794 kfree(savedcmd);
1795 savedcmd = NULL;
1796 return -ENOMEM;
1799 return 0;
1802 int is_tracing_stopped(void)
1804 return global_trace.stop_count;
1808 * tracing_start - quick start of the tracer
1810 * If tracing is enabled but was stopped by tracing_stop,
1811 * this will start the tracer back up.
1813 void tracing_start(void)
1815 struct ring_buffer *buffer;
1816 unsigned long flags;
1818 if (tracing_disabled)
1819 return;
1821 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1822 if (--global_trace.stop_count) {
1823 if (global_trace.stop_count < 0) {
1824 /* Someone screwed up their debugging */
1825 WARN_ON_ONCE(1);
1826 global_trace.stop_count = 0;
1828 goto out;
1831 /* Prevent the buffers from switching */
1832 arch_spin_lock(&global_trace.max_lock);
1834 buffer = global_trace.trace_buffer.buffer;
1835 if (buffer)
1836 ring_buffer_record_enable(buffer);
1838 #ifdef CONFIG_TRACER_MAX_TRACE
1839 buffer = global_trace.max_buffer.buffer;
1840 if (buffer)
1841 ring_buffer_record_enable(buffer);
1842 #endif
1844 arch_spin_unlock(&global_trace.max_lock);
1846 out:
1847 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1850 static void tracing_start_tr(struct trace_array *tr)
1852 struct ring_buffer *buffer;
1853 unsigned long flags;
1855 if (tracing_disabled)
1856 return;
1858 /* If global, we need to also start the max tracer */
1859 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1860 return tracing_start();
1862 raw_spin_lock_irqsave(&tr->start_lock, flags);
1864 if (--tr->stop_count) {
1865 if (tr->stop_count < 0) {
1866 /* Someone screwed up their debugging */
1867 WARN_ON_ONCE(1);
1868 tr->stop_count = 0;
1870 goto out;
1873 buffer = tr->trace_buffer.buffer;
1874 if (buffer)
1875 ring_buffer_record_enable(buffer);
1877 out:
1878 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1882 * tracing_stop - quick stop of the tracer
1884 * Light weight way to stop tracing. Use in conjunction with
1885 * tracing_start.
1887 void tracing_stop(void)
1889 struct ring_buffer *buffer;
1890 unsigned long flags;
1892 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1893 if (global_trace.stop_count++)
1894 goto out;
1896 /* Prevent the buffers from switching */
1897 arch_spin_lock(&global_trace.max_lock);
1899 buffer = global_trace.trace_buffer.buffer;
1900 if (buffer)
1901 ring_buffer_record_disable(buffer);
1903 #ifdef CONFIG_TRACER_MAX_TRACE
1904 buffer = global_trace.max_buffer.buffer;
1905 if (buffer)
1906 ring_buffer_record_disable(buffer);
1907 #endif
1909 arch_spin_unlock(&global_trace.max_lock);
1911 out:
1912 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1915 static void tracing_stop_tr(struct trace_array *tr)
1917 struct ring_buffer *buffer;
1918 unsigned long flags;
1920 /* If global, we need to also stop the max tracer */
1921 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1922 return tracing_stop();
1924 raw_spin_lock_irqsave(&tr->start_lock, flags);
1925 if (tr->stop_count++)
1926 goto out;
1928 buffer = tr->trace_buffer.buffer;
1929 if (buffer)
1930 ring_buffer_record_disable(buffer);
1932 out:
1933 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1936 static int trace_save_cmdline(struct task_struct *tsk)
1938 unsigned pid, idx;
1940 /* treat recording of idle task as a success */
1941 if (!tsk->pid)
1942 return 1;
1944 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
1945 return 0;
1948 * It's not the end of the world if we don't get
1949 * the lock, but we also don't want to spin
1950 * nor do we want to disable interrupts,
1951 * so if we miss here, then better luck next time.
1953 if (!arch_spin_trylock(&trace_cmdline_lock))
1954 return 0;
1956 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1957 if (idx == NO_CMDLINE_MAP) {
1958 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1961 * Check whether the cmdline buffer at idx has a pid
1962 * mapped. We are going to overwrite that entry so we
1963 * need to clear the map_pid_to_cmdline. Otherwise we
1964 * would read the new comm for the old pid.
1966 pid = savedcmd->map_cmdline_to_pid[idx];
1967 if (pid != NO_CMDLINE_MAP)
1968 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1970 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1971 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1973 savedcmd->cmdline_idx = idx;
1976 set_cmdline(idx, tsk->comm);
1978 arch_spin_unlock(&trace_cmdline_lock);
1980 return 1;
1983 static void __trace_find_cmdline(int pid, char comm[])
1985 unsigned map;
1987 if (!pid) {
1988 strcpy(comm, "<idle>");
1989 return;
1992 if (WARN_ON_ONCE(pid < 0)) {
1993 strcpy(comm, "<XXX>");
1994 return;
1997 if (pid > PID_MAX_DEFAULT) {
1998 strcpy(comm, "<...>");
1999 return;
2002 map = savedcmd->map_pid_to_cmdline[pid];
2003 if (map != NO_CMDLINE_MAP)
2004 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2005 else
2006 strcpy(comm, "<...>");
2009 void trace_find_cmdline(int pid, char comm[])
2011 preempt_disable();
2012 arch_spin_lock(&trace_cmdline_lock);
2014 __trace_find_cmdline(pid, comm);
2016 arch_spin_unlock(&trace_cmdline_lock);
2017 preempt_enable();
2020 int trace_find_tgid(int pid)
2022 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2023 return 0;
2025 return tgid_map[pid];
2028 static int trace_save_tgid(struct task_struct *tsk)
2030 /* treat recording of idle task as a success */
2031 if (!tsk->pid)
2032 return 1;
2034 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2035 return 0;
2037 tgid_map[tsk->pid] = tsk->tgid;
2038 return 1;
2041 static bool tracing_record_taskinfo_skip(int flags)
2043 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2044 return true;
2045 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2046 return true;
2047 if (!__this_cpu_read(trace_taskinfo_save))
2048 return true;
2049 return false;
2053 * tracing_record_taskinfo - record the task info of a task
2055 * @task - task to record
2056 * @flags - TRACE_RECORD_CMDLINE for recording comm
2057 * - TRACE_RECORD_TGID for recording tgid
2059 void tracing_record_taskinfo(struct task_struct *task, int flags)
2061 bool done;
2063 if (tracing_record_taskinfo_skip(flags))
2064 return;
2067 * Record as much task information as possible. If some fail, continue
2068 * to try to record the others.
2070 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2071 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2073 /* If recording any information failed, retry again soon. */
2074 if (!done)
2075 return;
2077 __this_cpu_write(trace_taskinfo_save, false);
2081 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2083 * @prev - previous task during sched_switch
2084 * @next - next task during sched_switch
2085 * @flags - TRACE_RECORD_CMDLINE for recording comm
2086 * TRACE_RECORD_TGID for recording tgid
2088 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2089 struct task_struct *next, int flags)
2091 bool done;
2093 if (tracing_record_taskinfo_skip(flags))
2094 return;
2097 * Record as much task information as possible. If some fail, continue
2098 * to try to record the others.
2100 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2101 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2102 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2103 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2105 /* If recording any information failed, retry again soon. */
2106 if (!done)
2107 return;
2109 __this_cpu_write(trace_taskinfo_save, false);
2112 /* Helpers to record a specific task information */
2113 void tracing_record_cmdline(struct task_struct *task)
2115 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2118 void tracing_record_tgid(struct task_struct *task)
2120 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2124 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2125 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2126 * simplifies those functions and keeps them in sync.
2128 enum print_line_t trace_handle_return(struct trace_seq *s)
2130 return trace_seq_has_overflowed(s) ?
2131 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2133 EXPORT_SYMBOL_GPL(trace_handle_return);
2135 void
2136 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2137 int pc)
2139 struct task_struct *tsk = current;
2141 entry->preempt_count = pc & 0xff;
2142 entry->pid = (tsk) ? tsk->pid : 0;
2143 entry->flags =
2144 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2145 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2146 #else
2147 TRACE_FLAG_IRQS_NOSUPPORT |
2148 #endif
2149 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2150 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2151 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2152 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2153 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2155 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2157 struct ring_buffer_event *
2158 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2159 int type,
2160 unsigned long len,
2161 unsigned long flags, int pc)
2163 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2166 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2167 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2168 static int trace_buffered_event_ref;
2171 * trace_buffered_event_enable - enable buffering events
2173 * When events are being filtered, it is quicker to use a temporary
2174 * buffer to write the event data into if there's a likely chance
2175 * that it will not be committed. The discard of the ring buffer
2176 * is not as fast as committing, and is much slower than copying
2177 * a commit.
2179 * When an event is to be filtered, allocate per cpu buffers to
2180 * write the event data into, and if the event is filtered and discarded
2181 * it is simply dropped, otherwise, the entire data is to be committed
2182 * in one shot.
2184 void trace_buffered_event_enable(void)
2186 struct ring_buffer_event *event;
2187 struct page *page;
2188 int cpu;
2190 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2192 if (trace_buffered_event_ref++)
2193 return;
2195 for_each_tracing_cpu(cpu) {
2196 page = alloc_pages_node(cpu_to_node(cpu),
2197 GFP_KERNEL | __GFP_NORETRY, 0);
2198 if (!page)
2199 goto failed;
2201 event = page_address(page);
2202 memset(event, 0, sizeof(*event));
2204 per_cpu(trace_buffered_event, cpu) = event;
2206 preempt_disable();
2207 if (cpu == smp_processor_id() &&
2208 this_cpu_read(trace_buffered_event) !=
2209 per_cpu(trace_buffered_event, cpu))
2210 WARN_ON_ONCE(1);
2211 preempt_enable();
2214 return;
2215 failed:
2216 trace_buffered_event_disable();
2219 static void enable_trace_buffered_event(void *data)
2221 /* Probably not needed, but do it anyway */
2222 smp_rmb();
2223 this_cpu_dec(trace_buffered_event_cnt);
2226 static void disable_trace_buffered_event(void *data)
2228 this_cpu_inc(trace_buffered_event_cnt);
2232 * trace_buffered_event_disable - disable buffering events
2234 * When a filter is removed, it is faster to not use the buffered
2235 * events, and to commit directly into the ring buffer. Free up
2236 * the temp buffers when there are no more users. This requires
2237 * special synchronization with current events.
2239 void trace_buffered_event_disable(void)
2241 int cpu;
2243 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2245 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2246 return;
2248 if (--trace_buffered_event_ref)
2249 return;
2251 preempt_disable();
2252 /* For each CPU, set the buffer as used. */
2253 smp_call_function_many(tracing_buffer_mask,
2254 disable_trace_buffered_event, NULL, 1);
2255 preempt_enable();
2257 /* Wait for all current users to finish */
2258 synchronize_sched();
2260 for_each_tracing_cpu(cpu) {
2261 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2262 per_cpu(trace_buffered_event, cpu) = NULL;
2265 * Make sure trace_buffered_event is NULL before clearing
2266 * trace_buffered_event_cnt.
2268 smp_wmb();
2270 preempt_disable();
2271 /* Do the work on each cpu */
2272 smp_call_function_many(tracing_buffer_mask,
2273 enable_trace_buffered_event, NULL, 1);
2274 preempt_enable();
2277 static struct ring_buffer *temp_buffer;
2279 struct ring_buffer_event *
2280 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2281 struct trace_event_file *trace_file,
2282 int type, unsigned long len,
2283 unsigned long flags, int pc)
2285 struct ring_buffer_event *entry;
2286 int val;
2288 *current_rb = trace_file->tr->trace_buffer.buffer;
2290 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2291 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2292 (entry = this_cpu_read(trace_buffered_event))) {
2293 /* Try to use the per cpu buffer first */
2294 val = this_cpu_inc_return(trace_buffered_event_cnt);
2295 if (val == 1) {
2296 trace_event_setup(entry, type, flags, pc);
2297 entry->array[0] = len;
2298 return entry;
2300 this_cpu_dec(trace_buffered_event_cnt);
2303 entry = __trace_buffer_lock_reserve(*current_rb,
2304 type, len, flags, pc);
2306 * If tracing is off, but we have triggers enabled
2307 * we still need to look at the event data. Use the temp_buffer
2308 * to store the trace event for the tigger to use. It's recusive
2309 * safe and will not be recorded anywhere.
2311 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2312 *current_rb = temp_buffer;
2313 entry = __trace_buffer_lock_reserve(*current_rb,
2314 type, len, flags, pc);
2316 return entry;
2318 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2320 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2321 static DEFINE_MUTEX(tracepoint_printk_mutex);
2323 static void output_printk(struct trace_event_buffer *fbuffer)
2325 struct trace_event_call *event_call;
2326 struct trace_event *event;
2327 unsigned long flags;
2328 struct trace_iterator *iter = tracepoint_print_iter;
2330 /* We should never get here if iter is NULL */
2331 if (WARN_ON_ONCE(!iter))
2332 return;
2334 event_call = fbuffer->trace_file->event_call;
2335 if (!event_call || !event_call->event.funcs ||
2336 !event_call->event.funcs->trace)
2337 return;
2339 event = &fbuffer->trace_file->event_call->event;
2341 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2342 trace_seq_init(&iter->seq);
2343 iter->ent = fbuffer->entry;
2344 event_call->event.funcs->trace(iter, 0, event);
2345 trace_seq_putc(&iter->seq, 0);
2346 printk("%s", iter->seq.buffer);
2348 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2351 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2352 void __user *buffer, size_t *lenp,
2353 loff_t *ppos)
2355 int save_tracepoint_printk;
2356 int ret;
2358 mutex_lock(&tracepoint_printk_mutex);
2359 save_tracepoint_printk = tracepoint_printk;
2361 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2364 * This will force exiting early, as tracepoint_printk
2365 * is always zero when tracepoint_printk_iter is not allocated
2367 if (!tracepoint_print_iter)
2368 tracepoint_printk = 0;
2370 if (save_tracepoint_printk == tracepoint_printk)
2371 goto out;
2373 if (tracepoint_printk)
2374 static_key_enable(&tracepoint_printk_key.key);
2375 else
2376 static_key_disable(&tracepoint_printk_key.key);
2378 out:
2379 mutex_unlock(&tracepoint_printk_mutex);
2381 return ret;
2384 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2386 if (static_key_false(&tracepoint_printk_key.key))
2387 output_printk(fbuffer);
2389 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2390 fbuffer->event, fbuffer->entry,
2391 fbuffer->flags, fbuffer->pc);
2393 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2396 * Skip 3:
2398 * trace_buffer_unlock_commit_regs()
2399 * trace_event_buffer_commit()
2400 * trace_event_raw_event_xxx()
2402 # define STACK_SKIP 3
2404 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2405 struct ring_buffer *buffer,
2406 struct ring_buffer_event *event,
2407 unsigned long flags, int pc,
2408 struct pt_regs *regs)
2410 __buffer_unlock_commit(buffer, event);
2413 * If regs is not set, then skip the necessary functions.
2414 * Note, we can still get here via blktrace, wakeup tracer
2415 * and mmiotrace, but that's ok if they lose a function or
2416 * two. They are not that meaningful.
2418 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2419 ftrace_trace_userstack(tr, buffer, flags, pc);
2423 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2425 void
2426 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2427 struct ring_buffer_event *event)
2429 __buffer_unlock_commit(buffer, event);
2432 static void
2433 trace_process_export(struct trace_export *export,
2434 struct ring_buffer_event *event)
2436 struct trace_entry *entry;
2437 unsigned int size = 0;
2439 entry = ring_buffer_event_data(event);
2440 size = ring_buffer_event_length(event);
2441 export->write(export, entry, size);
2444 static DEFINE_MUTEX(ftrace_export_lock);
2446 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2448 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2450 static inline void ftrace_exports_enable(void)
2452 static_branch_enable(&ftrace_exports_enabled);
2455 static inline void ftrace_exports_disable(void)
2457 static_branch_disable(&ftrace_exports_enabled);
2460 void ftrace_exports(struct ring_buffer_event *event)
2462 struct trace_export *export;
2464 preempt_disable_notrace();
2466 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2467 while (export) {
2468 trace_process_export(export, event);
2469 export = rcu_dereference_raw_notrace(export->next);
2472 preempt_enable_notrace();
2475 static inline void
2476 add_trace_export(struct trace_export **list, struct trace_export *export)
2478 rcu_assign_pointer(export->next, *list);
2480 * We are entering export into the list but another
2481 * CPU might be walking that list. We need to make sure
2482 * the export->next pointer is valid before another CPU sees
2483 * the export pointer included into the list.
2485 rcu_assign_pointer(*list, export);
2488 static inline int
2489 rm_trace_export(struct trace_export **list, struct trace_export *export)
2491 struct trace_export **p;
2493 for (p = list; *p != NULL; p = &(*p)->next)
2494 if (*p == export)
2495 break;
2497 if (*p != export)
2498 return -1;
2500 rcu_assign_pointer(*p, (*p)->next);
2502 return 0;
2505 static inline void
2506 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2508 if (*list == NULL)
2509 ftrace_exports_enable();
2511 add_trace_export(list, export);
2514 static inline int
2515 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2517 int ret;
2519 ret = rm_trace_export(list, export);
2520 if (*list == NULL)
2521 ftrace_exports_disable();
2523 return ret;
2526 int register_ftrace_export(struct trace_export *export)
2528 if (WARN_ON_ONCE(!export->write))
2529 return -1;
2531 mutex_lock(&ftrace_export_lock);
2533 add_ftrace_export(&ftrace_exports_list, export);
2535 mutex_unlock(&ftrace_export_lock);
2537 return 0;
2539 EXPORT_SYMBOL_GPL(register_ftrace_export);
2541 int unregister_ftrace_export(struct trace_export *export)
2543 int ret;
2545 mutex_lock(&ftrace_export_lock);
2547 ret = rm_ftrace_export(&ftrace_exports_list, export);
2549 mutex_unlock(&ftrace_export_lock);
2551 return ret;
2553 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2555 void
2556 trace_function(struct trace_array *tr,
2557 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2558 int pc)
2560 struct trace_event_call *call = &event_function;
2561 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2562 struct ring_buffer_event *event;
2563 struct ftrace_entry *entry;
2565 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2566 flags, pc);
2567 if (!event)
2568 return;
2569 entry = ring_buffer_event_data(event);
2570 entry->ip = ip;
2571 entry->parent_ip = parent_ip;
2573 if (!call_filter_check_discard(call, entry, buffer, event)) {
2574 if (static_branch_unlikely(&ftrace_exports_enabled))
2575 ftrace_exports(event);
2576 __buffer_unlock_commit(buffer, event);
2580 #ifdef CONFIG_STACKTRACE
2582 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2583 struct ftrace_stack {
2584 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2587 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2588 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2590 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2591 unsigned long flags,
2592 int skip, int pc, struct pt_regs *regs)
2594 struct trace_event_call *call = &event_kernel_stack;
2595 struct ring_buffer_event *event;
2596 struct stack_entry *entry;
2597 struct stack_trace trace;
2598 int use_stack;
2599 int size = FTRACE_STACK_ENTRIES;
2601 trace.nr_entries = 0;
2602 trace.skip = skip;
2605 * Add one, for this function and the call to save_stack_trace()
2606 * If regs is set, then these functions will not be in the way.
2608 #ifndef CONFIG_UNWINDER_ORC
2609 if (!regs)
2610 trace.skip++;
2611 #endif
2614 * Since events can happen in NMIs there's no safe way to
2615 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2616 * or NMI comes in, it will just have to use the default
2617 * FTRACE_STACK_SIZE.
2619 preempt_disable_notrace();
2621 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2623 * We don't need any atomic variables, just a barrier.
2624 * If an interrupt comes in, we don't care, because it would
2625 * have exited and put the counter back to what we want.
2626 * We just need a barrier to keep gcc from moving things
2627 * around.
2629 barrier();
2630 if (use_stack == 1) {
2631 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2632 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2634 if (regs)
2635 save_stack_trace_regs(regs, &trace);
2636 else
2637 save_stack_trace(&trace);
2639 if (trace.nr_entries > size)
2640 size = trace.nr_entries;
2641 } else
2642 /* From now on, use_stack is a boolean */
2643 use_stack = 0;
2645 size *= sizeof(unsigned long);
2647 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2648 sizeof(*entry) + size, flags, pc);
2649 if (!event)
2650 goto out;
2651 entry = ring_buffer_event_data(event);
2653 memset(&entry->caller, 0, size);
2655 if (use_stack)
2656 memcpy(&entry->caller, trace.entries,
2657 trace.nr_entries * sizeof(unsigned long));
2658 else {
2659 trace.max_entries = FTRACE_STACK_ENTRIES;
2660 trace.entries = entry->caller;
2661 if (regs)
2662 save_stack_trace_regs(regs, &trace);
2663 else
2664 save_stack_trace(&trace);
2667 entry->size = trace.nr_entries;
2669 if (!call_filter_check_discard(call, entry, buffer, event))
2670 __buffer_unlock_commit(buffer, event);
2672 out:
2673 /* Again, don't let gcc optimize things here */
2674 barrier();
2675 __this_cpu_dec(ftrace_stack_reserve);
2676 preempt_enable_notrace();
2680 static inline void ftrace_trace_stack(struct trace_array *tr,
2681 struct ring_buffer *buffer,
2682 unsigned long flags,
2683 int skip, int pc, struct pt_regs *regs)
2685 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2686 return;
2688 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2691 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2692 int pc)
2694 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2696 if (rcu_is_watching()) {
2697 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2698 return;
2702 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2703 * but if the above rcu_is_watching() failed, then the NMI
2704 * triggered someplace critical, and rcu_irq_enter() should
2705 * not be called from NMI.
2707 if (unlikely(in_nmi()))
2708 return;
2710 rcu_irq_enter_irqson();
2711 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2712 rcu_irq_exit_irqson();
2716 * trace_dump_stack - record a stack back trace in the trace buffer
2717 * @skip: Number of functions to skip (helper handlers)
2719 void trace_dump_stack(int skip)
2721 unsigned long flags;
2723 if (tracing_disabled || tracing_selftest_running)
2724 return;
2726 local_save_flags(flags);
2728 #ifndef CONFIG_UNWINDER_ORC
2729 /* Skip 1 to skip this function. */
2730 skip++;
2731 #endif
2732 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2733 flags, skip, preempt_count(), NULL);
2736 static DEFINE_PER_CPU(int, user_stack_count);
2738 void
2739 ftrace_trace_userstack(struct trace_array *tr,
2740 struct ring_buffer *buffer, unsigned long flags, int pc)
2742 struct trace_event_call *call = &event_user_stack;
2743 struct ring_buffer_event *event;
2744 struct userstack_entry *entry;
2745 struct stack_trace trace;
2747 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
2748 return;
2751 * NMIs can not handle page faults, even with fix ups.
2752 * The save user stack can (and often does) fault.
2754 if (unlikely(in_nmi()))
2755 return;
2758 * prevent recursion, since the user stack tracing may
2759 * trigger other kernel events.
2761 preempt_disable();
2762 if (__this_cpu_read(user_stack_count))
2763 goto out;
2765 __this_cpu_inc(user_stack_count);
2767 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2768 sizeof(*entry), flags, pc);
2769 if (!event)
2770 goto out_drop_count;
2771 entry = ring_buffer_event_data(event);
2773 entry->tgid = current->tgid;
2774 memset(&entry->caller, 0, sizeof(entry->caller));
2776 trace.nr_entries = 0;
2777 trace.max_entries = FTRACE_STACK_ENTRIES;
2778 trace.skip = 0;
2779 trace.entries = entry->caller;
2781 save_stack_trace_user(&trace);
2782 if (!call_filter_check_discard(call, entry, buffer, event))
2783 __buffer_unlock_commit(buffer, event);
2785 out_drop_count:
2786 __this_cpu_dec(user_stack_count);
2787 out:
2788 preempt_enable();
2791 #ifdef UNUSED
2792 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2794 ftrace_trace_userstack(tr, flags, preempt_count());
2796 #endif /* UNUSED */
2798 #endif /* CONFIG_STACKTRACE */
2800 /* created for use with alloc_percpu */
2801 struct trace_buffer_struct {
2802 int nesting;
2803 char buffer[4][TRACE_BUF_SIZE];
2806 static struct trace_buffer_struct *trace_percpu_buffer;
2809 * Thise allows for lockless recording. If we're nested too deeply, then
2810 * this returns NULL.
2812 static char *get_trace_buf(void)
2814 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2816 if (!buffer || buffer->nesting >= 4)
2817 return NULL;
2819 buffer->nesting++;
2821 /* Interrupts must see nesting incremented before we use the buffer */
2822 barrier();
2823 return &buffer->buffer[buffer->nesting - 1][0];
2826 static void put_trace_buf(void)
2828 /* Don't let the decrement of nesting leak before this */
2829 barrier();
2830 this_cpu_dec(trace_percpu_buffer->nesting);
2833 static int alloc_percpu_trace_buffer(void)
2835 struct trace_buffer_struct *buffers;
2837 buffers = alloc_percpu(struct trace_buffer_struct);
2838 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2839 return -ENOMEM;
2841 trace_percpu_buffer = buffers;
2842 return 0;
2845 static int buffers_allocated;
2847 void trace_printk_init_buffers(void)
2849 if (buffers_allocated)
2850 return;
2852 if (alloc_percpu_trace_buffer())
2853 return;
2855 /* trace_printk() is for debug use only. Don't use it in production. */
2857 pr_warn("\n");
2858 pr_warn("**********************************************************\n");
2859 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2860 pr_warn("** **\n");
2861 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2862 pr_warn("** **\n");
2863 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2864 pr_warn("** unsafe for production use. **\n");
2865 pr_warn("** **\n");
2866 pr_warn("** If you see this message and you are not debugging **\n");
2867 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2868 pr_warn("** **\n");
2869 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2870 pr_warn("**********************************************************\n");
2872 /* Expand the buffers to set size */
2873 tracing_update_buffers();
2875 buffers_allocated = 1;
2878 * trace_printk_init_buffers() can be called by modules.
2879 * If that happens, then we need to start cmdline recording
2880 * directly here. If the global_trace.buffer is already
2881 * allocated here, then this was called by module code.
2883 if (global_trace.trace_buffer.buffer)
2884 tracing_start_cmdline_record();
2887 void trace_printk_start_comm(void)
2889 /* Start tracing comms if trace printk is set */
2890 if (!buffers_allocated)
2891 return;
2892 tracing_start_cmdline_record();
2895 static void trace_printk_start_stop_comm(int enabled)
2897 if (!buffers_allocated)
2898 return;
2900 if (enabled)
2901 tracing_start_cmdline_record();
2902 else
2903 tracing_stop_cmdline_record();
2907 * trace_vbprintk - write binary msg to tracing buffer
2910 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2912 struct trace_event_call *call = &event_bprint;
2913 struct ring_buffer_event *event;
2914 struct ring_buffer *buffer;
2915 struct trace_array *tr = &global_trace;
2916 struct bprint_entry *entry;
2917 unsigned long flags;
2918 char *tbuffer;
2919 int len = 0, size, pc;
2921 if (unlikely(tracing_selftest_running || tracing_disabled))
2922 return 0;
2924 /* Don't pollute graph traces with trace_vprintk internals */
2925 pause_graph_tracing();
2927 pc = preempt_count();
2928 preempt_disable_notrace();
2930 tbuffer = get_trace_buf();
2931 if (!tbuffer) {
2932 len = 0;
2933 goto out_nobuffer;
2936 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2938 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2939 goto out;
2941 local_save_flags(flags);
2942 size = sizeof(*entry) + sizeof(u32) * len;
2943 buffer = tr->trace_buffer.buffer;
2944 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2945 flags, pc);
2946 if (!event)
2947 goto out;
2948 entry = ring_buffer_event_data(event);
2949 entry->ip = ip;
2950 entry->fmt = fmt;
2952 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2953 if (!call_filter_check_discard(call, entry, buffer, event)) {
2954 __buffer_unlock_commit(buffer, event);
2955 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2958 out:
2959 put_trace_buf();
2961 out_nobuffer:
2962 preempt_enable_notrace();
2963 unpause_graph_tracing();
2965 return len;
2967 EXPORT_SYMBOL_GPL(trace_vbprintk);
2969 __printf(3, 0)
2970 static int
2971 __trace_array_vprintk(struct ring_buffer *buffer,
2972 unsigned long ip, const char *fmt, va_list args)
2974 struct trace_event_call *call = &event_print;
2975 struct ring_buffer_event *event;
2976 int len = 0, size, pc;
2977 struct print_entry *entry;
2978 unsigned long flags;
2979 char *tbuffer;
2981 if (tracing_disabled || tracing_selftest_running)
2982 return 0;
2984 /* Don't pollute graph traces with trace_vprintk internals */
2985 pause_graph_tracing();
2987 pc = preempt_count();
2988 preempt_disable_notrace();
2991 tbuffer = get_trace_buf();
2992 if (!tbuffer) {
2993 len = 0;
2994 goto out_nobuffer;
2997 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2999 local_save_flags(flags);
3000 size = sizeof(*entry) + len + 1;
3001 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3002 flags, pc);
3003 if (!event)
3004 goto out;
3005 entry = ring_buffer_event_data(event);
3006 entry->ip = ip;
3008 memcpy(&entry->buf, tbuffer, len + 1);
3009 if (!call_filter_check_discard(call, entry, buffer, event)) {
3010 __buffer_unlock_commit(buffer, event);
3011 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3014 out:
3015 put_trace_buf();
3017 out_nobuffer:
3018 preempt_enable_notrace();
3019 unpause_graph_tracing();
3021 return len;
3024 __printf(3, 0)
3025 int trace_array_vprintk(struct trace_array *tr,
3026 unsigned long ip, const char *fmt, va_list args)
3028 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3031 __printf(3, 0)
3032 int trace_array_printk(struct trace_array *tr,
3033 unsigned long ip, const char *fmt, ...)
3035 int ret;
3036 va_list ap;
3038 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3039 return 0;
3041 if (!tr)
3042 return -ENOENT;
3044 va_start(ap, fmt);
3045 ret = trace_array_vprintk(tr, ip, fmt, ap);
3046 va_end(ap);
3047 return ret;
3050 __printf(3, 4)
3051 int trace_array_printk_buf(struct ring_buffer *buffer,
3052 unsigned long ip, const char *fmt, ...)
3054 int ret;
3055 va_list ap;
3057 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3058 return 0;
3060 va_start(ap, fmt);
3061 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3062 va_end(ap);
3063 return ret;
3066 __printf(2, 0)
3067 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3069 return trace_array_vprintk(&global_trace, ip, fmt, args);
3071 EXPORT_SYMBOL_GPL(trace_vprintk);
3073 static void trace_iterator_increment(struct trace_iterator *iter)
3075 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3077 iter->idx++;
3078 if (buf_iter)
3079 ring_buffer_read(buf_iter, NULL);
3082 static struct trace_entry *
3083 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3084 unsigned long *lost_events)
3086 struct ring_buffer_event *event;
3087 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3089 if (buf_iter)
3090 event = ring_buffer_iter_peek(buf_iter, ts);
3091 else
3092 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3093 lost_events);
3095 if (event) {
3096 iter->ent_size = ring_buffer_event_length(event);
3097 return ring_buffer_event_data(event);
3099 iter->ent_size = 0;
3100 return NULL;
3103 static struct trace_entry *
3104 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3105 unsigned long *missing_events, u64 *ent_ts)
3107 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3108 struct trace_entry *ent, *next = NULL;
3109 unsigned long lost_events = 0, next_lost = 0;
3110 int cpu_file = iter->cpu_file;
3111 u64 next_ts = 0, ts;
3112 int next_cpu = -1;
3113 int next_size = 0;
3114 int cpu;
3117 * If we are in a per_cpu trace file, don't bother by iterating over
3118 * all cpu and peek directly.
3120 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3121 if (ring_buffer_empty_cpu(buffer, cpu_file))
3122 return NULL;
3123 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3124 if (ent_cpu)
3125 *ent_cpu = cpu_file;
3127 return ent;
3130 for_each_tracing_cpu(cpu) {
3132 if (ring_buffer_empty_cpu(buffer, cpu))
3133 continue;
3135 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3138 * Pick the entry with the smallest timestamp:
3140 if (ent && (!next || ts < next_ts)) {
3141 next = ent;
3142 next_cpu = cpu;
3143 next_ts = ts;
3144 next_lost = lost_events;
3145 next_size = iter->ent_size;
3149 iter->ent_size = next_size;
3151 if (ent_cpu)
3152 *ent_cpu = next_cpu;
3154 if (ent_ts)
3155 *ent_ts = next_ts;
3157 if (missing_events)
3158 *missing_events = next_lost;
3160 return next;
3163 /* Find the next real entry, without updating the iterator itself */
3164 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3165 int *ent_cpu, u64 *ent_ts)
3167 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3170 /* Find the next real entry, and increment the iterator to the next entry */
3171 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3173 iter->ent = __find_next_entry(iter, &iter->cpu,
3174 &iter->lost_events, &iter->ts);
3176 if (iter->ent)
3177 trace_iterator_increment(iter);
3179 return iter->ent ? iter : NULL;
3182 static void trace_consume(struct trace_iterator *iter)
3184 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3185 &iter->lost_events);
3188 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3190 struct trace_iterator *iter = m->private;
3191 int i = (int)*pos;
3192 void *ent;
3194 WARN_ON_ONCE(iter->leftover);
3196 (*pos)++;
3198 /* can't go backwards */
3199 if (iter->idx > i)
3200 return NULL;
3202 if (iter->idx < 0)
3203 ent = trace_find_next_entry_inc(iter);
3204 else
3205 ent = iter;
3207 while (ent && iter->idx < i)
3208 ent = trace_find_next_entry_inc(iter);
3210 iter->pos = *pos;
3212 return ent;
3215 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3217 struct ring_buffer_event *event;
3218 struct ring_buffer_iter *buf_iter;
3219 unsigned long entries = 0;
3220 u64 ts;
3222 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3224 buf_iter = trace_buffer_iter(iter, cpu);
3225 if (!buf_iter)
3226 return;
3228 ring_buffer_iter_reset(buf_iter);
3231 * We could have the case with the max latency tracers
3232 * that a reset never took place on a cpu. This is evident
3233 * by the timestamp being before the start of the buffer.
3235 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3236 if (ts >= iter->trace_buffer->time_start)
3237 break;
3238 entries++;
3239 ring_buffer_read(buf_iter, NULL);
3242 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3246 * The current tracer is copied to avoid a global locking
3247 * all around.
3249 static void *s_start(struct seq_file *m, loff_t *pos)
3251 struct trace_iterator *iter = m->private;
3252 struct trace_array *tr = iter->tr;
3253 int cpu_file = iter->cpu_file;
3254 void *p = NULL;
3255 loff_t l = 0;
3256 int cpu;
3259 * copy the tracer to avoid using a global lock all around.
3260 * iter->trace is a copy of current_trace, the pointer to the
3261 * name may be used instead of a strcmp(), as iter->trace->name
3262 * will point to the same string as current_trace->name.
3264 mutex_lock(&trace_types_lock);
3265 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3266 *iter->trace = *tr->current_trace;
3267 mutex_unlock(&trace_types_lock);
3269 #ifdef CONFIG_TRACER_MAX_TRACE
3270 if (iter->snapshot && iter->trace->use_max_tr)
3271 return ERR_PTR(-EBUSY);
3272 #endif
3274 if (!iter->snapshot)
3275 atomic_inc(&trace_record_taskinfo_disabled);
3277 if (*pos != iter->pos) {
3278 iter->ent = NULL;
3279 iter->cpu = 0;
3280 iter->idx = -1;
3282 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3283 for_each_tracing_cpu(cpu)
3284 tracing_iter_reset(iter, cpu);
3285 } else
3286 tracing_iter_reset(iter, cpu_file);
3288 iter->leftover = 0;
3289 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3292 } else {
3294 * If we overflowed the seq_file before, then we want
3295 * to just reuse the trace_seq buffer again.
3297 if (iter->leftover)
3298 p = iter;
3299 else {
3300 l = *pos - 1;
3301 p = s_next(m, p, &l);
3305 trace_event_read_lock();
3306 trace_access_lock(cpu_file);
3307 return p;
3310 static void s_stop(struct seq_file *m, void *p)
3312 struct trace_iterator *iter = m->private;
3314 #ifdef CONFIG_TRACER_MAX_TRACE
3315 if (iter->snapshot && iter->trace->use_max_tr)
3316 return;
3317 #endif
3319 if (!iter->snapshot)
3320 atomic_dec(&trace_record_taskinfo_disabled);
3322 trace_access_unlock(iter->cpu_file);
3323 trace_event_read_unlock();
3326 static void
3327 get_total_entries(struct trace_buffer *buf,
3328 unsigned long *total, unsigned long *entries)
3330 unsigned long count;
3331 int cpu;
3333 *total = 0;
3334 *entries = 0;
3336 for_each_tracing_cpu(cpu) {
3337 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3339 * If this buffer has skipped entries, then we hold all
3340 * entries for the trace and we need to ignore the
3341 * ones before the time stamp.
3343 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3344 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3345 /* total is the same as the entries */
3346 *total += count;
3347 } else
3348 *total += count +
3349 ring_buffer_overrun_cpu(buf->buffer, cpu);
3350 *entries += count;
3354 static void print_lat_help_header(struct seq_file *m)
3356 seq_puts(m, "# _------=> CPU# \n"
3357 "# / _-----=> irqs-off \n"
3358 "# | / _----=> need-resched \n"
3359 "# || / _---=> hardirq/softirq \n"
3360 "# ||| / _--=> preempt-depth \n"
3361 "# |||| / delay \n"
3362 "# cmd pid ||||| time | caller \n"
3363 "# \\ / ||||| \\ | / \n");
3366 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3368 unsigned long total;
3369 unsigned long entries;
3371 get_total_entries(buf, &total, &entries);
3372 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3373 entries, total, num_online_cpus());
3374 seq_puts(m, "#\n");
3377 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3378 unsigned int flags)
3380 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3382 print_event_info(buf, m);
3384 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3385 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3388 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3389 unsigned int flags)
3391 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3392 const char tgid_space[] = " ";
3393 const char space[] = " ";
3395 print_event_info(buf, m);
3397 seq_printf(m, "# %s _-----=> irqs-off\n",
3398 tgid ? tgid_space : space);
3399 seq_printf(m, "# %s / _----=> need-resched\n",
3400 tgid ? tgid_space : space);
3401 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3402 tgid ? tgid_space : space);
3403 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3404 tgid ? tgid_space : space);
3405 seq_printf(m, "# %s||| / delay\n",
3406 tgid ? tgid_space : space);
3407 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3408 tgid ? " TGID " : space);
3409 seq_printf(m, "# | | %s | |||| | |\n",
3410 tgid ? " | " : space);
3413 void
3414 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3416 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3417 struct trace_buffer *buf = iter->trace_buffer;
3418 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3419 struct tracer *type = iter->trace;
3420 unsigned long entries;
3421 unsigned long total;
3422 const char *name = "preemption";
3424 name = type->name;
3426 get_total_entries(buf, &total, &entries);
3428 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3429 name, UTS_RELEASE);
3430 seq_puts(m, "# -----------------------------------"
3431 "---------------------------------\n");
3432 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3433 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3434 nsecs_to_usecs(data->saved_latency),
3435 entries,
3436 total,
3437 buf->cpu,
3438 #if defined(CONFIG_PREEMPT_NONE)
3439 "server",
3440 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3441 "desktop",
3442 #elif defined(CONFIG_PREEMPT)
3443 "preempt",
3444 #else
3445 "unknown",
3446 #endif
3447 /* These are reserved for later use */
3448 0, 0, 0, 0);
3449 #ifdef CONFIG_SMP
3450 seq_printf(m, " #P:%d)\n", num_online_cpus());
3451 #else
3452 seq_puts(m, ")\n");
3453 #endif
3454 seq_puts(m, "# -----------------\n");
3455 seq_printf(m, "# | task: %.16s-%d "
3456 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3457 data->comm, data->pid,
3458 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3459 data->policy, data->rt_priority);
3460 seq_puts(m, "# -----------------\n");
3462 if (data->critical_start) {
3463 seq_puts(m, "# => started at: ");
3464 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3465 trace_print_seq(m, &iter->seq);
3466 seq_puts(m, "\n# => ended at: ");
3467 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3468 trace_print_seq(m, &iter->seq);
3469 seq_puts(m, "\n#\n");
3472 seq_puts(m, "#\n");
3475 static void test_cpu_buff_start(struct trace_iterator *iter)
3477 struct trace_seq *s = &iter->seq;
3478 struct trace_array *tr = iter->tr;
3480 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3481 return;
3483 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3484 return;
3486 if (cpumask_available(iter->started) &&
3487 cpumask_test_cpu(iter->cpu, iter->started))
3488 return;
3490 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3491 return;
3493 if (cpumask_available(iter->started))
3494 cpumask_set_cpu(iter->cpu, iter->started);
3496 /* Don't print started cpu buffer for the first entry of the trace */
3497 if (iter->idx > 1)
3498 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3499 iter->cpu);
3502 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3504 struct trace_array *tr = iter->tr;
3505 struct trace_seq *s = &iter->seq;
3506 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3507 struct trace_entry *entry;
3508 struct trace_event *event;
3510 entry = iter->ent;
3512 test_cpu_buff_start(iter);
3514 event = ftrace_find_event(entry->type);
3516 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3517 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3518 trace_print_lat_context(iter);
3519 else
3520 trace_print_context(iter);
3523 if (trace_seq_has_overflowed(s))
3524 return TRACE_TYPE_PARTIAL_LINE;
3526 if (event)
3527 return event->funcs->trace(iter, sym_flags, event);
3529 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3531 return trace_handle_return(s);
3534 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3536 struct trace_array *tr = iter->tr;
3537 struct trace_seq *s = &iter->seq;
3538 struct trace_entry *entry;
3539 struct trace_event *event;
3541 entry = iter->ent;
3543 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3544 trace_seq_printf(s, "%d %d %llu ",
3545 entry->pid, iter->cpu, iter->ts);
3547 if (trace_seq_has_overflowed(s))
3548 return TRACE_TYPE_PARTIAL_LINE;
3550 event = ftrace_find_event(entry->type);
3551 if (event)
3552 return event->funcs->raw(iter, 0, event);
3554 trace_seq_printf(s, "%d ?\n", entry->type);
3556 return trace_handle_return(s);
3559 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3561 struct trace_array *tr = iter->tr;
3562 struct trace_seq *s = &iter->seq;
3563 unsigned char newline = '\n';
3564 struct trace_entry *entry;
3565 struct trace_event *event;
3567 entry = iter->ent;
3569 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3570 SEQ_PUT_HEX_FIELD(s, entry->pid);
3571 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3572 SEQ_PUT_HEX_FIELD(s, iter->ts);
3573 if (trace_seq_has_overflowed(s))
3574 return TRACE_TYPE_PARTIAL_LINE;
3577 event = ftrace_find_event(entry->type);
3578 if (event) {
3579 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3580 if (ret != TRACE_TYPE_HANDLED)
3581 return ret;
3584 SEQ_PUT_FIELD(s, newline);
3586 return trace_handle_return(s);
3589 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3591 struct trace_array *tr = iter->tr;
3592 struct trace_seq *s = &iter->seq;
3593 struct trace_entry *entry;
3594 struct trace_event *event;
3596 entry = iter->ent;
3598 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3599 SEQ_PUT_FIELD(s, entry->pid);
3600 SEQ_PUT_FIELD(s, iter->cpu);
3601 SEQ_PUT_FIELD(s, iter->ts);
3602 if (trace_seq_has_overflowed(s))
3603 return TRACE_TYPE_PARTIAL_LINE;
3606 event = ftrace_find_event(entry->type);
3607 return event ? event->funcs->binary(iter, 0, event) :
3608 TRACE_TYPE_HANDLED;
3611 int trace_empty(struct trace_iterator *iter)
3613 struct ring_buffer_iter *buf_iter;
3614 int cpu;
3616 /* If we are looking at one CPU buffer, only check that one */
3617 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3618 cpu = iter->cpu_file;
3619 buf_iter = trace_buffer_iter(iter, cpu);
3620 if (buf_iter) {
3621 if (!ring_buffer_iter_empty(buf_iter))
3622 return 0;
3623 } else {
3624 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3625 return 0;
3627 return 1;
3630 for_each_tracing_cpu(cpu) {
3631 buf_iter = trace_buffer_iter(iter, cpu);
3632 if (buf_iter) {
3633 if (!ring_buffer_iter_empty(buf_iter))
3634 return 0;
3635 } else {
3636 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3637 return 0;
3641 return 1;
3644 /* Called with trace_event_read_lock() held. */
3645 enum print_line_t print_trace_line(struct trace_iterator *iter)
3647 struct trace_array *tr = iter->tr;
3648 unsigned long trace_flags = tr->trace_flags;
3649 enum print_line_t ret;
3651 if (iter->lost_events) {
3652 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3653 iter->cpu, iter->lost_events);
3654 if (trace_seq_has_overflowed(&iter->seq))
3655 return TRACE_TYPE_PARTIAL_LINE;
3658 if (iter->trace && iter->trace->print_line) {
3659 ret = iter->trace->print_line(iter);
3660 if (ret != TRACE_TYPE_UNHANDLED)
3661 return ret;
3664 if (iter->ent->type == TRACE_BPUTS &&
3665 trace_flags & TRACE_ITER_PRINTK &&
3666 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3667 return trace_print_bputs_msg_only(iter);
3669 if (iter->ent->type == TRACE_BPRINT &&
3670 trace_flags & TRACE_ITER_PRINTK &&
3671 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3672 return trace_print_bprintk_msg_only(iter);
3674 if (iter->ent->type == TRACE_PRINT &&
3675 trace_flags & TRACE_ITER_PRINTK &&
3676 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3677 return trace_print_printk_msg_only(iter);
3679 if (trace_flags & TRACE_ITER_BIN)
3680 return print_bin_fmt(iter);
3682 if (trace_flags & TRACE_ITER_HEX)
3683 return print_hex_fmt(iter);
3685 if (trace_flags & TRACE_ITER_RAW)
3686 return print_raw_fmt(iter);
3688 return print_trace_fmt(iter);
3691 void trace_latency_header(struct seq_file *m)
3693 struct trace_iterator *iter = m->private;
3694 struct trace_array *tr = iter->tr;
3696 /* print nothing if the buffers are empty */
3697 if (trace_empty(iter))
3698 return;
3700 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3701 print_trace_header(m, iter);
3703 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3704 print_lat_help_header(m);
3707 void trace_default_header(struct seq_file *m)
3709 struct trace_iterator *iter = m->private;
3710 struct trace_array *tr = iter->tr;
3711 unsigned long trace_flags = tr->trace_flags;
3713 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3714 return;
3716 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3717 /* print nothing if the buffers are empty */
3718 if (trace_empty(iter))
3719 return;
3720 print_trace_header(m, iter);
3721 if (!(trace_flags & TRACE_ITER_VERBOSE))
3722 print_lat_help_header(m);
3723 } else {
3724 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3725 if (trace_flags & TRACE_ITER_IRQ_INFO)
3726 print_func_help_header_irq(iter->trace_buffer,
3727 m, trace_flags);
3728 else
3729 print_func_help_header(iter->trace_buffer, m,
3730 trace_flags);
3735 static void test_ftrace_alive(struct seq_file *m)
3737 if (!ftrace_is_dead())
3738 return;
3739 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3740 "# MAY BE MISSING FUNCTION EVENTS\n");
3743 #ifdef CONFIG_TRACER_MAX_TRACE
3744 static void show_snapshot_main_help(struct seq_file *m)
3746 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3747 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3748 "# Takes a snapshot of the main buffer.\n"
3749 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3750 "# (Doesn't have to be '2' works with any number that\n"
3751 "# is not a '0' or '1')\n");
3754 static void show_snapshot_percpu_help(struct seq_file *m)
3756 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3757 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3758 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3759 "# Takes a snapshot of the main buffer for this cpu.\n");
3760 #else
3761 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3762 "# Must use main snapshot file to allocate.\n");
3763 #endif
3764 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3765 "# (Doesn't have to be '2' works with any number that\n"
3766 "# is not a '0' or '1')\n");
3769 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3771 if (iter->tr->allocated_snapshot)
3772 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3773 else
3774 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3776 seq_puts(m, "# Snapshot commands:\n");
3777 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3778 show_snapshot_main_help(m);
3779 else
3780 show_snapshot_percpu_help(m);
3782 #else
3783 /* Should never be called */
3784 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3785 #endif
3787 static int s_show(struct seq_file *m, void *v)
3789 struct trace_iterator *iter = v;
3790 int ret;
3792 if (iter->ent == NULL) {
3793 if (iter->tr) {
3794 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3795 seq_puts(m, "#\n");
3796 test_ftrace_alive(m);
3798 if (iter->snapshot && trace_empty(iter))
3799 print_snapshot_help(m, iter);
3800 else if (iter->trace && iter->trace->print_header)
3801 iter->trace->print_header(m);
3802 else
3803 trace_default_header(m);
3805 } else if (iter->leftover) {
3807 * If we filled the seq_file buffer earlier, we
3808 * want to just show it now.
3810 ret = trace_print_seq(m, &iter->seq);
3812 /* ret should this time be zero, but you never know */
3813 iter->leftover = ret;
3815 } else {
3816 print_trace_line(iter);
3817 ret = trace_print_seq(m, &iter->seq);
3819 * If we overflow the seq_file buffer, then it will
3820 * ask us for this data again at start up.
3821 * Use that instead.
3822 * ret is 0 if seq_file write succeeded.
3823 * -1 otherwise.
3825 iter->leftover = ret;
3828 return 0;
3832 * Should be used after trace_array_get(), trace_types_lock
3833 * ensures that i_cdev was already initialized.
3835 static inline int tracing_get_cpu(struct inode *inode)
3837 if (inode->i_cdev) /* See trace_create_cpu_file() */
3838 return (long)inode->i_cdev - 1;
3839 return RING_BUFFER_ALL_CPUS;
3842 static const struct seq_operations tracer_seq_ops = {
3843 .start = s_start,
3844 .next = s_next,
3845 .stop = s_stop,
3846 .show = s_show,
3849 static struct trace_iterator *
3850 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3852 struct trace_array *tr = inode->i_private;
3853 struct trace_iterator *iter;
3854 int cpu;
3856 if (tracing_disabled)
3857 return ERR_PTR(-ENODEV);
3859 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3860 if (!iter)
3861 return ERR_PTR(-ENOMEM);
3863 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3864 GFP_KERNEL);
3865 if (!iter->buffer_iter)
3866 goto release;
3869 * We make a copy of the current tracer to avoid concurrent
3870 * changes on it while we are reading.
3872 mutex_lock(&trace_types_lock);
3873 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3874 if (!iter->trace)
3875 goto fail;
3877 *iter->trace = *tr->current_trace;
3879 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3880 goto fail;
3882 iter->tr = tr;
3884 #ifdef CONFIG_TRACER_MAX_TRACE
3885 /* Currently only the top directory has a snapshot */
3886 if (tr->current_trace->print_max || snapshot)
3887 iter->trace_buffer = &tr->max_buffer;
3888 else
3889 #endif
3890 iter->trace_buffer = &tr->trace_buffer;
3891 iter->snapshot = snapshot;
3892 iter->pos = -1;
3893 iter->cpu_file = tracing_get_cpu(inode);
3894 mutex_init(&iter->mutex);
3896 /* Notify the tracer early; before we stop tracing. */
3897 if (iter->trace && iter->trace->open)
3898 iter->trace->open(iter);
3900 /* Annotate start of buffers if we had overruns */
3901 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3902 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3904 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3905 if (trace_clocks[tr->clock_id].in_ns)
3906 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3908 /* stop the trace while dumping if we are not opening "snapshot" */
3909 if (!iter->snapshot)
3910 tracing_stop_tr(tr);
3912 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3913 for_each_tracing_cpu(cpu) {
3914 iter->buffer_iter[cpu] =
3915 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3916 cpu, GFP_KERNEL);
3918 ring_buffer_read_prepare_sync();
3919 for_each_tracing_cpu(cpu) {
3920 ring_buffer_read_start(iter->buffer_iter[cpu]);
3921 tracing_iter_reset(iter, cpu);
3923 } else {
3924 cpu = iter->cpu_file;
3925 iter->buffer_iter[cpu] =
3926 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3927 cpu, GFP_KERNEL);
3928 ring_buffer_read_prepare_sync();
3929 ring_buffer_read_start(iter->buffer_iter[cpu]);
3930 tracing_iter_reset(iter, cpu);
3933 mutex_unlock(&trace_types_lock);
3935 return iter;
3937 fail:
3938 mutex_unlock(&trace_types_lock);
3939 kfree(iter->trace);
3940 kfree(iter->buffer_iter);
3941 release:
3942 seq_release_private(inode, file);
3943 return ERR_PTR(-ENOMEM);
3946 int tracing_open_generic(struct inode *inode, struct file *filp)
3948 if (tracing_disabled)
3949 return -ENODEV;
3951 filp->private_data = inode->i_private;
3952 return 0;
3955 bool tracing_is_disabled(void)
3957 return (tracing_disabled) ? true: false;
3961 * Open and update trace_array ref count.
3962 * Must have the current trace_array passed to it.
3964 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3966 struct trace_array *tr = inode->i_private;
3968 if (tracing_disabled)
3969 return -ENODEV;
3971 if (trace_array_get(tr) < 0)
3972 return -ENODEV;
3974 filp->private_data = inode->i_private;
3976 return 0;
3979 static int tracing_release(struct inode *inode, struct file *file)
3981 struct trace_array *tr = inode->i_private;
3982 struct seq_file *m = file->private_data;
3983 struct trace_iterator *iter;
3984 int cpu;
3986 if (!(file->f_mode & FMODE_READ)) {
3987 trace_array_put(tr);
3988 return 0;
3991 /* Writes do not use seq_file */
3992 iter = m->private;
3993 mutex_lock(&trace_types_lock);
3995 for_each_tracing_cpu(cpu) {
3996 if (iter->buffer_iter[cpu])
3997 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4000 if (iter->trace && iter->trace->close)
4001 iter->trace->close(iter);
4003 if (!iter->snapshot)
4004 /* reenable tracing if it was previously enabled */
4005 tracing_start_tr(tr);
4007 __trace_array_put(tr);
4009 mutex_unlock(&trace_types_lock);
4011 mutex_destroy(&iter->mutex);
4012 free_cpumask_var(iter->started);
4013 kfree(iter->trace);
4014 kfree(iter->buffer_iter);
4015 seq_release_private(inode, file);
4017 return 0;
4020 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4022 struct trace_array *tr = inode->i_private;
4024 trace_array_put(tr);
4025 return 0;
4028 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4030 struct trace_array *tr = inode->i_private;
4032 trace_array_put(tr);
4034 return single_release(inode, file);
4037 static int tracing_open(struct inode *inode, struct file *file)
4039 struct trace_array *tr = inode->i_private;
4040 struct trace_iterator *iter;
4041 int ret = 0;
4043 if (trace_array_get(tr) < 0)
4044 return -ENODEV;
4046 /* If this file was open for write, then erase contents */
4047 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4048 int cpu = tracing_get_cpu(inode);
4049 struct trace_buffer *trace_buf = &tr->trace_buffer;
4051 #ifdef CONFIG_TRACER_MAX_TRACE
4052 if (tr->current_trace->print_max)
4053 trace_buf = &tr->max_buffer;
4054 #endif
4056 if (cpu == RING_BUFFER_ALL_CPUS)
4057 tracing_reset_online_cpus(trace_buf);
4058 else
4059 tracing_reset(trace_buf, cpu);
4062 if (file->f_mode & FMODE_READ) {
4063 iter = __tracing_open(inode, file, false);
4064 if (IS_ERR(iter))
4065 ret = PTR_ERR(iter);
4066 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4067 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4070 if (ret < 0)
4071 trace_array_put(tr);
4073 return ret;
4077 * Some tracers are not suitable for instance buffers.
4078 * A tracer is always available for the global array (toplevel)
4079 * or if it explicitly states that it is.
4081 static bool
4082 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4084 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4087 /* Find the next tracer that this trace array may use */
4088 static struct tracer *
4089 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4091 while (t && !trace_ok_for_array(t, tr))
4092 t = t->next;
4094 return t;
4097 static void *
4098 t_next(struct seq_file *m, void *v, loff_t *pos)
4100 struct trace_array *tr = m->private;
4101 struct tracer *t = v;
4103 (*pos)++;
4105 if (t)
4106 t = get_tracer_for_array(tr, t->next);
4108 return t;
4111 static void *t_start(struct seq_file *m, loff_t *pos)
4113 struct trace_array *tr = m->private;
4114 struct tracer *t;
4115 loff_t l = 0;
4117 mutex_lock(&trace_types_lock);
4119 t = get_tracer_for_array(tr, trace_types);
4120 for (; t && l < *pos; t = t_next(m, t, &l))
4123 return t;
4126 static void t_stop(struct seq_file *m, void *p)
4128 mutex_unlock(&trace_types_lock);
4131 static int t_show(struct seq_file *m, void *v)
4133 struct tracer *t = v;
4135 if (!t)
4136 return 0;
4138 seq_puts(m, t->name);
4139 if (t->next)
4140 seq_putc(m, ' ');
4141 else
4142 seq_putc(m, '\n');
4144 return 0;
4147 static const struct seq_operations show_traces_seq_ops = {
4148 .start = t_start,
4149 .next = t_next,
4150 .stop = t_stop,
4151 .show = t_show,
4154 static int show_traces_open(struct inode *inode, struct file *file)
4156 struct trace_array *tr = inode->i_private;
4157 struct seq_file *m;
4158 int ret;
4160 if (tracing_disabled)
4161 return -ENODEV;
4163 if (trace_array_get(tr) < 0)
4164 return -ENODEV;
4166 ret = seq_open(file, &show_traces_seq_ops);
4167 if (ret) {
4168 trace_array_put(tr);
4169 return ret;
4172 m = file->private_data;
4173 m->private = tr;
4175 return 0;
4178 static int show_traces_release(struct inode *inode, struct file *file)
4180 struct trace_array *tr = inode->i_private;
4182 trace_array_put(tr);
4183 return seq_release(inode, file);
4186 static ssize_t
4187 tracing_write_stub(struct file *filp, const char __user *ubuf,
4188 size_t count, loff_t *ppos)
4190 return count;
4193 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4195 int ret;
4197 if (file->f_mode & FMODE_READ)
4198 ret = seq_lseek(file, offset, whence);
4199 else
4200 file->f_pos = ret = 0;
4202 return ret;
4205 static const struct file_operations tracing_fops = {
4206 .open = tracing_open,
4207 .read = seq_read,
4208 .write = tracing_write_stub,
4209 .llseek = tracing_lseek,
4210 .release = tracing_release,
4213 static const struct file_operations show_traces_fops = {
4214 .open = show_traces_open,
4215 .read = seq_read,
4216 .llseek = seq_lseek,
4217 .release = show_traces_release,
4220 static ssize_t
4221 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4222 size_t count, loff_t *ppos)
4224 struct trace_array *tr = file_inode(filp)->i_private;
4225 char *mask_str;
4226 int len;
4228 len = snprintf(NULL, 0, "%*pb\n",
4229 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4230 mask_str = kmalloc(len, GFP_KERNEL);
4231 if (!mask_str)
4232 return -ENOMEM;
4234 len = snprintf(mask_str, len, "%*pb\n",
4235 cpumask_pr_args(tr->tracing_cpumask));
4236 if (len >= count) {
4237 count = -EINVAL;
4238 goto out_err;
4240 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4242 out_err:
4243 kfree(mask_str);
4245 return count;
4248 static ssize_t
4249 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4250 size_t count, loff_t *ppos)
4252 struct trace_array *tr = file_inode(filp)->i_private;
4253 cpumask_var_t tracing_cpumask_new;
4254 int err, cpu;
4256 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4257 return -ENOMEM;
4259 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4260 if (err)
4261 goto err_unlock;
4263 local_irq_disable();
4264 arch_spin_lock(&tr->max_lock);
4265 for_each_tracing_cpu(cpu) {
4267 * Increase/decrease the disabled counter if we are
4268 * about to flip a bit in the cpumask:
4270 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4271 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4272 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4273 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4275 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4276 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4277 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4278 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4281 arch_spin_unlock(&tr->max_lock);
4282 local_irq_enable();
4284 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4285 free_cpumask_var(tracing_cpumask_new);
4287 return count;
4289 err_unlock:
4290 free_cpumask_var(tracing_cpumask_new);
4292 return err;
4295 static const struct file_operations tracing_cpumask_fops = {
4296 .open = tracing_open_generic_tr,
4297 .read = tracing_cpumask_read,
4298 .write = tracing_cpumask_write,
4299 .release = tracing_release_generic_tr,
4300 .llseek = generic_file_llseek,
4303 static int tracing_trace_options_show(struct seq_file *m, void *v)
4305 struct tracer_opt *trace_opts;
4306 struct trace_array *tr = m->private;
4307 u32 tracer_flags;
4308 int i;
4310 mutex_lock(&trace_types_lock);
4311 tracer_flags = tr->current_trace->flags->val;
4312 trace_opts = tr->current_trace->flags->opts;
4314 for (i = 0; trace_options[i]; i++) {
4315 if (tr->trace_flags & (1 << i))
4316 seq_printf(m, "%s\n", trace_options[i]);
4317 else
4318 seq_printf(m, "no%s\n", trace_options[i]);
4321 for (i = 0; trace_opts[i].name; i++) {
4322 if (tracer_flags & trace_opts[i].bit)
4323 seq_printf(m, "%s\n", trace_opts[i].name);
4324 else
4325 seq_printf(m, "no%s\n", trace_opts[i].name);
4327 mutex_unlock(&trace_types_lock);
4329 return 0;
4332 static int __set_tracer_option(struct trace_array *tr,
4333 struct tracer_flags *tracer_flags,
4334 struct tracer_opt *opts, int neg)
4336 struct tracer *trace = tracer_flags->trace;
4337 int ret;
4339 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4340 if (ret)
4341 return ret;
4343 if (neg)
4344 tracer_flags->val &= ~opts->bit;
4345 else
4346 tracer_flags->val |= opts->bit;
4347 return 0;
4350 /* Try to assign a tracer specific option */
4351 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4353 struct tracer *trace = tr->current_trace;
4354 struct tracer_flags *tracer_flags = trace->flags;
4355 struct tracer_opt *opts = NULL;
4356 int i;
4358 for (i = 0; tracer_flags->opts[i].name; i++) {
4359 opts = &tracer_flags->opts[i];
4361 if (strcmp(cmp, opts->name) == 0)
4362 return __set_tracer_option(tr, trace->flags, opts, neg);
4365 return -EINVAL;
4368 /* Some tracers require overwrite to stay enabled */
4369 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4371 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4372 return -1;
4374 return 0;
4377 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4379 if ((mask == TRACE_ITER_RECORD_TGID) ||
4380 (mask == TRACE_ITER_RECORD_CMD))
4381 lockdep_assert_held(&event_mutex);
4383 /* do nothing if flag is already set */
4384 if (!!(tr->trace_flags & mask) == !!enabled)
4385 return 0;
4387 /* Give the tracer a chance to approve the change */
4388 if (tr->current_trace->flag_changed)
4389 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4390 return -EINVAL;
4392 if (enabled)
4393 tr->trace_flags |= mask;
4394 else
4395 tr->trace_flags &= ~mask;
4397 if (mask == TRACE_ITER_RECORD_CMD)
4398 trace_event_enable_cmd_record(enabled);
4400 if (mask == TRACE_ITER_RECORD_TGID) {
4401 if (!tgid_map)
4402 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4403 sizeof(*tgid_map),
4404 GFP_KERNEL);
4405 if (!tgid_map) {
4406 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4407 return -ENOMEM;
4410 trace_event_enable_tgid_record(enabled);
4413 if (mask == TRACE_ITER_EVENT_FORK)
4414 trace_event_follow_fork(tr, enabled);
4416 if (mask == TRACE_ITER_FUNC_FORK)
4417 ftrace_pid_follow_fork(tr, enabled);
4419 if (mask == TRACE_ITER_OVERWRITE) {
4420 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4421 #ifdef CONFIG_TRACER_MAX_TRACE
4422 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4423 #endif
4426 if (mask == TRACE_ITER_PRINTK) {
4427 trace_printk_start_stop_comm(enabled);
4428 trace_printk_control(enabled);
4431 return 0;
4434 static int trace_set_options(struct trace_array *tr, char *option)
4436 char *cmp;
4437 int neg = 0;
4438 int ret;
4439 size_t orig_len = strlen(option);
4441 cmp = strstrip(option);
4443 if (strncmp(cmp, "no", 2) == 0) {
4444 neg = 1;
4445 cmp += 2;
4448 mutex_lock(&event_mutex);
4449 mutex_lock(&trace_types_lock);
4451 ret = match_string(trace_options, -1, cmp);
4452 /* If no option could be set, test the specific tracer options */
4453 if (ret < 0)
4454 ret = set_tracer_option(tr, cmp, neg);
4455 else
4456 ret = set_tracer_flag(tr, 1 << ret, !neg);
4458 mutex_unlock(&trace_types_lock);
4459 mutex_unlock(&event_mutex);
4462 * If the first trailing whitespace is replaced with '\0' by strstrip,
4463 * turn it back into a space.
4465 if (orig_len > strlen(option))
4466 option[strlen(option)] = ' ';
4468 return ret;
4471 static void __init apply_trace_boot_options(void)
4473 char *buf = trace_boot_options_buf;
4474 char *option;
4476 while (true) {
4477 option = strsep(&buf, ",");
4479 if (!option)
4480 break;
4482 if (*option)
4483 trace_set_options(&global_trace, option);
4485 /* Put back the comma to allow this to be called again */
4486 if (buf)
4487 *(buf - 1) = ',';
4491 static ssize_t
4492 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4493 size_t cnt, loff_t *ppos)
4495 struct seq_file *m = filp->private_data;
4496 struct trace_array *tr = m->private;
4497 char buf[64];
4498 int ret;
4500 if (cnt >= sizeof(buf))
4501 return -EINVAL;
4503 if (copy_from_user(buf, ubuf, cnt))
4504 return -EFAULT;
4506 buf[cnt] = 0;
4508 ret = trace_set_options(tr, buf);
4509 if (ret < 0)
4510 return ret;
4512 *ppos += cnt;
4514 return cnt;
4517 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4519 struct trace_array *tr = inode->i_private;
4520 int ret;
4522 if (tracing_disabled)
4523 return -ENODEV;
4525 if (trace_array_get(tr) < 0)
4526 return -ENODEV;
4528 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4529 if (ret < 0)
4530 trace_array_put(tr);
4532 return ret;
4535 static const struct file_operations tracing_iter_fops = {
4536 .open = tracing_trace_options_open,
4537 .read = seq_read,
4538 .llseek = seq_lseek,
4539 .release = tracing_single_release_tr,
4540 .write = tracing_trace_options_write,
4543 static const char readme_msg[] =
4544 "tracing mini-HOWTO:\n\n"
4545 "# echo 0 > tracing_on : quick way to disable tracing\n"
4546 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4547 " Important files:\n"
4548 " trace\t\t\t- The static contents of the buffer\n"
4549 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4550 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4551 " current_tracer\t- function and latency tracers\n"
4552 " available_tracers\t- list of configured tracers for current_tracer\n"
4553 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4554 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4555 " trace_clock\t\t-change the clock used to order events\n"
4556 " local: Per cpu clock but may not be synced across CPUs\n"
4557 " global: Synced across CPUs but slows tracing down.\n"
4558 " counter: Not a clock, but just an increment\n"
4559 " uptime: Jiffy counter from time of boot\n"
4560 " perf: Same clock that perf events use\n"
4561 #ifdef CONFIG_X86_64
4562 " x86-tsc: TSC cycle counter\n"
4563 #endif
4564 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4565 " delta: Delta difference against a buffer-wide timestamp\n"
4566 " absolute: Absolute (standalone) timestamp\n"
4567 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4568 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4569 " tracing_cpumask\t- Limit which CPUs to trace\n"
4570 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4571 "\t\t\t Remove sub-buffer with rmdir\n"
4572 " trace_options\t\t- Set format or modify how tracing happens\n"
4573 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4574 "\t\t\t option name\n"
4575 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4576 #ifdef CONFIG_DYNAMIC_FTRACE
4577 "\n available_filter_functions - list of functions that can be filtered on\n"
4578 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4579 "\t\t\t functions\n"
4580 "\t accepts: func_full_name or glob-matching-pattern\n"
4581 "\t modules: Can select a group via module\n"
4582 "\t Format: :mod:<module-name>\n"
4583 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4584 "\t triggers: a command to perform when function is hit\n"
4585 "\t Format: <function>:<trigger>[:count]\n"
4586 "\t trigger: traceon, traceoff\n"
4587 "\t\t enable_event:<system>:<event>\n"
4588 "\t\t disable_event:<system>:<event>\n"
4589 #ifdef CONFIG_STACKTRACE
4590 "\t\t stacktrace\n"
4591 #endif
4592 #ifdef CONFIG_TRACER_SNAPSHOT
4593 "\t\t snapshot\n"
4594 #endif
4595 "\t\t dump\n"
4596 "\t\t cpudump\n"
4597 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4598 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4599 "\t The first one will disable tracing every time do_fault is hit\n"
4600 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4601 "\t The first time do trap is hit and it disables tracing, the\n"
4602 "\t counter will decrement to 2. If tracing is already disabled,\n"
4603 "\t the counter will not decrement. It only decrements when the\n"
4604 "\t trigger did work\n"
4605 "\t To remove trigger without count:\n"
4606 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4607 "\t To remove trigger with a count:\n"
4608 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4609 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4610 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4611 "\t modules: Can select a group via module command :mod:\n"
4612 "\t Does not accept triggers\n"
4613 #endif /* CONFIG_DYNAMIC_FTRACE */
4614 #ifdef CONFIG_FUNCTION_TRACER
4615 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4616 "\t\t (function)\n"
4617 #endif
4618 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4619 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4620 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4621 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4622 #endif
4623 #ifdef CONFIG_TRACER_SNAPSHOT
4624 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4625 "\t\t\t snapshot buffer. Read the contents for more\n"
4626 "\t\t\t information\n"
4627 #endif
4628 #ifdef CONFIG_STACK_TRACER
4629 " stack_trace\t\t- Shows the max stack trace when active\n"
4630 " stack_max_size\t- Shows current max stack size that was traced\n"
4631 "\t\t\t Write into this file to reset the max size (trigger a\n"
4632 "\t\t\t new trace)\n"
4633 #ifdef CONFIG_DYNAMIC_FTRACE
4634 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4635 "\t\t\t traces\n"
4636 #endif
4637 #endif /* CONFIG_STACK_TRACER */
4638 #ifdef CONFIG_KPROBE_EVENTS
4639 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4640 "\t\t\t Write into this file to define/undefine new trace events.\n"
4641 #endif
4642 #ifdef CONFIG_UPROBE_EVENTS
4643 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4644 "\t\t\t Write into this file to define/undefine new trace events.\n"
4645 #endif
4646 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4647 "\t accepts: event-definitions (one definition per line)\n"
4648 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4649 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4650 "\t -:[<group>/]<event>\n"
4651 #ifdef CONFIG_KPROBE_EVENTS
4652 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4653 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4654 #endif
4655 #ifdef CONFIG_UPROBE_EVENTS
4656 "\t place: <path>:<offset>\n"
4657 #endif
4658 "\t args: <name>=fetcharg[:type]\n"
4659 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4660 "\t $stack<index>, $stack, $retval, $comm\n"
4661 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4662 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4663 #endif
4664 " events/\t\t- Directory containing all trace event subsystems:\n"
4665 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4666 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4667 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4668 "\t\t\t events\n"
4669 " filter\t\t- If set, only events passing filter are traced\n"
4670 " events/<system>/<event>/\t- Directory containing control files for\n"
4671 "\t\t\t <event>:\n"
4672 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4673 " filter\t\t- If set, only events passing filter are traced\n"
4674 " trigger\t\t- If set, a command to perform when event is hit\n"
4675 "\t Format: <trigger>[:count][if <filter>]\n"
4676 "\t trigger: traceon, traceoff\n"
4677 "\t enable_event:<system>:<event>\n"
4678 "\t disable_event:<system>:<event>\n"
4679 #ifdef CONFIG_HIST_TRIGGERS
4680 "\t enable_hist:<system>:<event>\n"
4681 "\t disable_hist:<system>:<event>\n"
4682 #endif
4683 #ifdef CONFIG_STACKTRACE
4684 "\t\t stacktrace\n"
4685 #endif
4686 #ifdef CONFIG_TRACER_SNAPSHOT
4687 "\t\t snapshot\n"
4688 #endif
4689 #ifdef CONFIG_HIST_TRIGGERS
4690 "\t\t hist (see below)\n"
4691 #endif
4692 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4693 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4694 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4695 "\t events/block/block_unplug/trigger\n"
4696 "\t The first disables tracing every time block_unplug is hit.\n"
4697 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4698 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4699 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4700 "\t Like function triggers, the counter is only decremented if it\n"
4701 "\t enabled or disabled tracing.\n"
4702 "\t To remove a trigger without a count:\n"
4703 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4704 "\t To remove a trigger with a count:\n"
4705 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4706 "\t Filters can be ignored when removing a trigger.\n"
4707 #ifdef CONFIG_HIST_TRIGGERS
4708 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4709 "\t Format: hist:keys=<field1[,field2,...]>\n"
4710 "\t [:values=<field1[,field2,...]>]\n"
4711 "\t [:sort=<field1[,field2,...]>]\n"
4712 "\t [:size=#entries]\n"
4713 "\t [:pause][:continue][:clear]\n"
4714 "\t [:name=histname1]\n"
4715 "\t [if <filter>]\n\n"
4716 "\t When a matching event is hit, an entry is added to a hash\n"
4717 "\t table using the key(s) and value(s) named, and the value of a\n"
4718 "\t sum called 'hitcount' is incremented. Keys and values\n"
4719 "\t correspond to fields in the event's format description. Keys\n"
4720 "\t can be any field, or the special string 'stacktrace'.\n"
4721 "\t Compound keys consisting of up to two fields can be specified\n"
4722 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4723 "\t fields. Sort keys consisting of up to two fields can be\n"
4724 "\t specified using the 'sort' keyword. The sort direction can\n"
4725 "\t be modified by appending '.descending' or '.ascending' to a\n"
4726 "\t sort field. The 'size' parameter can be used to specify more\n"
4727 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4728 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4729 "\t its histogram data will be shared with other triggers of the\n"
4730 "\t same name, and trigger hits will update this common data.\n\n"
4731 "\t Reading the 'hist' file for the event will dump the hash\n"
4732 "\t table in its entirety to stdout. If there are multiple hist\n"
4733 "\t triggers attached to an event, there will be a table for each\n"
4734 "\t trigger in the output. The table displayed for a named\n"
4735 "\t trigger will be the same as any other instance having the\n"
4736 "\t same name. The default format used to display a given field\n"
4737 "\t can be modified by appending any of the following modifiers\n"
4738 "\t to the field name, as applicable:\n\n"
4739 "\t .hex display a number as a hex value\n"
4740 "\t .sym display an address as a symbol\n"
4741 "\t .sym-offset display an address as a symbol and offset\n"
4742 "\t .execname display a common_pid as a program name\n"
4743 "\t .syscall display a syscall id as a syscall name\n"
4744 "\t .log2 display log2 value rather than raw number\n"
4745 "\t .usecs display a common_timestamp in microseconds\n\n"
4746 "\t The 'pause' parameter can be used to pause an existing hist\n"
4747 "\t trigger or to start a hist trigger but not log any events\n"
4748 "\t until told to do so. 'continue' can be used to start or\n"
4749 "\t restart a paused hist trigger.\n\n"
4750 "\t The 'clear' parameter will clear the contents of a running\n"
4751 "\t hist trigger and leave its current paused/active state\n"
4752 "\t unchanged.\n\n"
4753 "\t The enable_hist and disable_hist triggers can be used to\n"
4754 "\t have one event conditionally start and stop another event's\n"
4755 "\t already-attached hist trigger. The syntax is analagous to\n"
4756 "\t the enable_event and disable_event triggers.\n"
4757 #endif
4760 static ssize_t
4761 tracing_readme_read(struct file *filp, char __user *ubuf,
4762 size_t cnt, loff_t *ppos)
4764 return simple_read_from_buffer(ubuf, cnt, ppos,
4765 readme_msg, strlen(readme_msg));
4768 static const struct file_operations tracing_readme_fops = {
4769 .open = tracing_open_generic,
4770 .read = tracing_readme_read,
4771 .llseek = generic_file_llseek,
4774 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4776 int *ptr = v;
4778 if (*pos || m->count)
4779 ptr++;
4781 (*pos)++;
4783 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4784 if (trace_find_tgid(*ptr))
4785 return ptr;
4788 return NULL;
4791 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4793 void *v;
4794 loff_t l = 0;
4796 if (!tgid_map)
4797 return NULL;
4799 v = &tgid_map[0];
4800 while (l <= *pos) {
4801 v = saved_tgids_next(m, v, &l);
4802 if (!v)
4803 return NULL;
4806 return v;
4809 static void saved_tgids_stop(struct seq_file *m, void *v)
4813 static int saved_tgids_show(struct seq_file *m, void *v)
4815 int pid = (int *)v - tgid_map;
4817 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4818 return 0;
4821 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4822 .start = saved_tgids_start,
4823 .stop = saved_tgids_stop,
4824 .next = saved_tgids_next,
4825 .show = saved_tgids_show,
4828 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4830 if (tracing_disabled)
4831 return -ENODEV;
4833 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4837 static const struct file_operations tracing_saved_tgids_fops = {
4838 .open = tracing_saved_tgids_open,
4839 .read = seq_read,
4840 .llseek = seq_lseek,
4841 .release = seq_release,
4844 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4846 unsigned int *ptr = v;
4848 if (*pos || m->count)
4849 ptr++;
4851 (*pos)++;
4853 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4854 ptr++) {
4855 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4856 continue;
4858 return ptr;
4861 return NULL;
4864 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4866 void *v;
4867 loff_t l = 0;
4869 preempt_disable();
4870 arch_spin_lock(&trace_cmdline_lock);
4872 v = &savedcmd->map_cmdline_to_pid[0];
4873 while (l <= *pos) {
4874 v = saved_cmdlines_next(m, v, &l);
4875 if (!v)
4876 return NULL;
4879 return v;
4882 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4884 arch_spin_unlock(&trace_cmdline_lock);
4885 preempt_enable();
4888 static int saved_cmdlines_show(struct seq_file *m, void *v)
4890 char buf[TASK_COMM_LEN];
4891 unsigned int *pid = v;
4893 __trace_find_cmdline(*pid, buf);
4894 seq_printf(m, "%d %s\n", *pid, buf);
4895 return 0;
4898 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4899 .start = saved_cmdlines_start,
4900 .next = saved_cmdlines_next,
4901 .stop = saved_cmdlines_stop,
4902 .show = saved_cmdlines_show,
4905 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4907 if (tracing_disabled)
4908 return -ENODEV;
4910 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4913 static const struct file_operations tracing_saved_cmdlines_fops = {
4914 .open = tracing_saved_cmdlines_open,
4915 .read = seq_read,
4916 .llseek = seq_lseek,
4917 .release = seq_release,
4920 static ssize_t
4921 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4922 size_t cnt, loff_t *ppos)
4924 char buf[64];
4925 int r;
4927 arch_spin_lock(&trace_cmdline_lock);
4928 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4929 arch_spin_unlock(&trace_cmdline_lock);
4931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4934 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4936 kfree(s->saved_cmdlines);
4937 kfree(s->map_cmdline_to_pid);
4938 kfree(s);
4941 static int tracing_resize_saved_cmdlines(unsigned int val)
4943 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4945 s = kmalloc(sizeof(*s), GFP_KERNEL);
4946 if (!s)
4947 return -ENOMEM;
4949 if (allocate_cmdlines_buffer(val, s) < 0) {
4950 kfree(s);
4951 return -ENOMEM;
4954 arch_spin_lock(&trace_cmdline_lock);
4955 savedcmd_temp = savedcmd;
4956 savedcmd = s;
4957 arch_spin_unlock(&trace_cmdline_lock);
4958 free_saved_cmdlines_buffer(savedcmd_temp);
4960 return 0;
4963 static ssize_t
4964 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4965 size_t cnt, loff_t *ppos)
4967 unsigned long val;
4968 int ret;
4970 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4971 if (ret)
4972 return ret;
4974 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4975 if (!val || val > PID_MAX_DEFAULT)
4976 return -EINVAL;
4978 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4979 if (ret < 0)
4980 return ret;
4982 *ppos += cnt;
4984 return cnt;
4987 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4988 .open = tracing_open_generic,
4989 .read = tracing_saved_cmdlines_size_read,
4990 .write = tracing_saved_cmdlines_size_write,
4993 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4994 static union trace_eval_map_item *
4995 update_eval_map(union trace_eval_map_item *ptr)
4997 if (!ptr->map.eval_string) {
4998 if (ptr->tail.next) {
4999 ptr = ptr->tail.next;
5000 /* Set ptr to the next real item (skip head) */
5001 ptr++;
5002 } else
5003 return NULL;
5005 return ptr;
5008 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5010 union trace_eval_map_item *ptr = v;
5013 * Paranoid! If ptr points to end, we don't want to increment past it.
5014 * This really should never happen.
5016 ptr = update_eval_map(ptr);
5017 if (WARN_ON_ONCE(!ptr))
5018 return NULL;
5020 ptr++;
5022 (*pos)++;
5024 ptr = update_eval_map(ptr);
5026 return ptr;
5029 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5031 union trace_eval_map_item *v;
5032 loff_t l = 0;
5034 mutex_lock(&trace_eval_mutex);
5036 v = trace_eval_maps;
5037 if (v)
5038 v++;
5040 while (v && l < *pos) {
5041 v = eval_map_next(m, v, &l);
5044 return v;
5047 static void eval_map_stop(struct seq_file *m, void *v)
5049 mutex_unlock(&trace_eval_mutex);
5052 static int eval_map_show(struct seq_file *m, void *v)
5054 union trace_eval_map_item *ptr = v;
5056 seq_printf(m, "%s %ld (%s)\n",
5057 ptr->map.eval_string, ptr->map.eval_value,
5058 ptr->map.system);
5060 return 0;
5063 static const struct seq_operations tracing_eval_map_seq_ops = {
5064 .start = eval_map_start,
5065 .next = eval_map_next,
5066 .stop = eval_map_stop,
5067 .show = eval_map_show,
5070 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5072 if (tracing_disabled)
5073 return -ENODEV;
5075 return seq_open(filp, &tracing_eval_map_seq_ops);
5078 static const struct file_operations tracing_eval_map_fops = {
5079 .open = tracing_eval_map_open,
5080 .read = seq_read,
5081 .llseek = seq_lseek,
5082 .release = seq_release,
5085 static inline union trace_eval_map_item *
5086 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5088 /* Return tail of array given the head */
5089 return ptr + ptr->head.length + 1;
5092 static void
5093 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5094 int len)
5096 struct trace_eval_map **stop;
5097 struct trace_eval_map **map;
5098 union trace_eval_map_item *map_array;
5099 union trace_eval_map_item *ptr;
5101 stop = start + len;
5104 * The trace_eval_maps contains the map plus a head and tail item,
5105 * where the head holds the module and length of array, and the
5106 * tail holds a pointer to the next list.
5108 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5109 if (!map_array) {
5110 pr_warn("Unable to allocate trace eval mapping\n");
5111 return;
5114 mutex_lock(&trace_eval_mutex);
5116 if (!trace_eval_maps)
5117 trace_eval_maps = map_array;
5118 else {
5119 ptr = trace_eval_maps;
5120 for (;;) {
5121 ptr = trace_eval_jmp_to_tail(ptr);
5122 if (!ptr->tail.next)
5123 break;
5124 ptr = ptr->tail.next;
5127 ptr->tail.next = map_array;
5129 map_array->head.mod = mod;
5130 map_array->head.length = len;
5131 map_array++;
5133 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5134 map_array->map = **map;
5135 map_array++;
5137 memset(map_array, 0, sizeof(*map_array));
5139 mutex_unlock(&trace_eval_mutex);
5142 static void trace_create_eval_file(struct dentry *d_tracer)
5144 trace_create_file("eval_map", 0444, d_tracer,
5145 NULL, &tracing_eval_map_fops);
5148 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5149 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5150 static inline void trace_insert_eval_map_file(struct module *mod,
5151 struct trace_eval_map **start, int len) { }
5152 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5154 static void trace_insert_eval_map(struct module *mod,
5155 struct trace_eval_map **start, int len)
5157 struct trace_eval_map **map;
5159 if (len <= 0)
5160 return;
5162 map = start;
5164 trace_event_eval_update(map, len);
5166 trace_insert_eval_map_file(mod, start, len);
5169 static ssize_t
5170 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5171 size_t cnt, loff_t *ppos)
5173 struct trace_array *tr = filp->private_data;
5174 char buf[MAX_TRACER_SIZE+2];
5175 int r;
5177 mutex_lock(&trace_types_lock);
5178 r = sprintf(buf, "%s\n", tr->current_trace->name);
5179 mutex_unlock(&trace_types_lock);
5181 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5184 int tracer_init(struct tracer *t, struct trace_array *tr)
5186 tracing_reset_online_cpus(&tr->trace_buffer);
5187 return t->init(tr);
5190 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5192 int cpu;
5194 for_each_tracing_cpu(cpu)
5195 per_cpu_ptr(buf->data, cpu)->entries = val;
5198 #ifdef CONFIG_TRACER_MAX_TRACE
5199 /* resize @tr's buffer to the size of @size_tr's entries */
5200 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5201 struct trace_buffer *size_buf, int cpu_id)
5203 int cpu, ret = 0;
5205 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5206 for_each_tracing_cpu(cpu) {
5207 ret = ring_buffer_resize(trace_buf->buffer,
5208 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5209 if (ret < 0)
5210 break;
5211 per_cpu_ptr(trace_buf->data, cpu)->entries =
5212 per_cpu_ptr(size_buf->data, cpu)->entries;
5214 } else {
5215 ret = ring_buffer_resize(trace_buf->buffer,
5216 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5217 if (ret == 0)
5218 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5219 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5222 return ret;
5224 #endif /* CONFIG_TRACER_MAX_TRACE */
5226 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5227 unsigned long size, int cpu)
5229 int ret;
5232 * If kernel or user changes the size of the ring buffer
5233 * we use the size that was given, and we can forget about
5234 * expanding it later.
5236 ring_buffer_expanded = true;
5238 /* May be called before buffers are initialized */
5239 if (!tr->trace_buffer.buffer)
5240 return 0;
5242 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5243 if (ret < 0)
5244 return ret;
5246 #ifdef CONFIG_TRACER_MAX_TRACE
5247 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5248 !tr->current_trace->use_max_tr)
5249 goto out;
5251 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5252 if (ret < 0) {
5253 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5254 &tr->trace_buffer, cpu);
5255 if (r < 0) {
5257 * AARGH! We are left with different
5258 * size max buffer!!!!
5259 * The max buffer is our "snapshot" buffer.
5260 * When a tracer needs a snapshot (one of the
5261 * latency tracers), it swaps the max buffer
5262 * with the saved snap shot. We succeeded to
5263 * update the size of the main buffer, but failed to
5264 * update the size of the max buffer. But when we tried
5265 * to reset the main buffer to the original size, we
5266 * failed there too. This is very unlikely to
5267 * happen, but if it does, warn and kill all
5268 * tracing.
5270 WARN_ON(1);
5271 tracing_disabled = 1;
5273 return ret;
5276 if (cpu == RING_BUFFER_ALL_CPUS)
5277 set_buffer_entries(&tr->max_buffer, size);
5278 else
5279 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5281 out:
5282 #endif /* CONFIG_TRACER_MAX_TRACE */
5284 if (cpu == RING_BUFFER_ALL_CPUS)
5285 set_buffer_entries(&tr->trace_buffer, size);
5286 else
5287 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5289 return ret;
5292 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5293 unsigned long size, int cpu_id)
5295 int ret = size;
5297 mutex_lock(&trace_types_lock);
5299 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5300 /* make sure, this cpu is enabled in the mask */
5301 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5302 ret = -EINVAL;
5303 goto out;
5307 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5308 if (ret < 0)
5309 ret = -ENOMEM;
5311 out:
5312 mutex_unlock(&trace_types_lock);
5314 return ret;
5319 * tracing_update_buffers - used by tracing facility to expand ring buffers
5321 * To save on memory when the tracing is never used on a system with it
5322 * configured in. The ring buffers are set to a minimum size. But once
5323 * a user starts to use the tracing facility, then they need to grow
5324 * to their default size.
5326 * This function is to be called when a tracer is about to be used.
5328 int tracing_update_buffers(void)
5330 int ret = 0;
5332 mutex_lock(&trace_types_lock);
5333 if (!ring_buffer_expanded)
5334 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5335 RING_BUFFER_ALL_CPUS);
5336 mutex_unlock(&trace_types_lock);
5338 return ret;
5341 struct trace_option_dentry;
5343 static void
5344 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5347 * Used to clear out the tracer before deletion of an instance.
5348 * Must have trace_types_lock held.
5350 static void tracing_set_nop(struct trace_array *tr)
5352 if (tr->current_trace == &nop_trace)
5353 return;
5355 tr->current_trace->enabled--;
5357 if (tr->current_trace->reset)
5358 tr->current_trace->reset(tr);
5360 tr->current_trace = &nop_trace;
5363 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5365 /* Only enable if the directory has been created already. */
5366 if (!tr->dir)
5367 return;
5369 create_trace_option_files(tr, t);
5372 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5374 struct tracer *t;
5375 #ifdef CONFIG_TRACER_MAX_TRACE
5376 bool had_max_tr;
5377 #endif
5378 int ret = 0;
5380 mutex_lock(&trace_types_lock);
5382 if (!ring_buffer_expanded) {
5383 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5384 RING_BUFFER_ALL_CPUS);
5385 if (ret < 0)
5386 goto out;
5387 ret = 0;
5390 for (t = trace_types; t; t = t->next) {
5391 if (strcmp(t->name, buf) == 0)
5392 break;
5394 if (!t) {
5395 ret = -EINVAL;
5396 goto out;
5398 if (t == tr->current_trace)
5399 goto out;
5401 /* Some tracers won't work on kernel command line */
5402 if (system_state < SYSTEM_RUNNING && t->noboot) {
5403 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5404 t->name);
5405 goto out;
5408 /* Some tracers are only allowed for the top level buffer */
5409 if (!trace_ok_for_array(t, tr)) {
5410 ret = -EINVAL;
5411 goto out;
5414 /* If trace pipe files are being read, we can't change the tracer */
5415 if (tr->current_trace->ref) {
5416 ret = -EBUSY;
5417 goto out;
5420 trace_branch_disable();
5422 tr->current_trace->enabled--;
5424 if (tr->current_trace->reset)
5425 tr->current_trace->reset(tr);
5427 /* Current trace needs to be nop_trace before synchronize_sched */
5428 tr->current_trace = &nop_trace;
5430 #ifdef CONFIG_TRACER_MAX_TRACE
5431 had_max_tr = tr->allocated_snapshot;
5433 if (had_max_tr && !t->use_max_tr) {
5435 * We need to make sure that the update_max_tr sees that
5436 * current_trace changed to nop_trace to keep it from
5437 * swapping the buffers after we resize it.
5438 * The update_max_tr is called from interrupts disabled
5439 * so a synchronized_sched() is sufficient.
5441 synchronize_sched();
5442 free_snapshot(tr);
5444 #endif
5446 #ifdef CONFIG_TRACER_MAX_TRACE
5447 if (t->use_max_tr && !had_max_tr) {
5448 ret = tracing_alloc_snapshot_instance(tr);
5449 if (ret < 0)
5450 goto out;
5452 #endif
5454 if (t->init) {
5455 ret = tracer_init(t, tr);
5456 if (ret)
5457 goto out;
5460 tr->current_trace = t;
5461 tr->current_trace->enabled++;
5462 trace_branch_enable(tr);
5463 out:
5464 mutex_unlock(&trace_types_lock);
5466 return ret;
5469 static ssize_t
5470 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5471 size_t cnt, loff_t *ppos)
5473 struct trace_array *tr = filp->private_data;
5474 char buf[MAX_TRACER_SIZE+1];
5475 int i;
5476 size_t ret;
5477 int err;
5479 ret = cnt;
5481 if (cnt > MAX_TRACER_SIZE)
5482 cnt = MAX_TRACER_SIZE;
5484 if (copy_from_user(buf, ubuf, cnt))
5485 return -EFAULT;
5487 buf[cnt] = 0;
5489 /* strip ending whitespace. */
5490 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5491 buf[i] = 0;
5493 err = tracing_set_tracer(tr, buf);
5494 if (err)
5495 return err;
5497 *ppos += ret;
5499 return ret;
5502 static ssize_t
5503 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5504 size_t cnt, loff_t *ppos)
5506 char buf[64];
5507 int r;
5509 r = snprintf(buf, sizeof(buf), "%ld\n",
5510 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5511 if (r > sizeof(buf))
5512 r = sizeof(buf);
5513 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5516 static ssize_t
5517 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5518 size_t cnt, loff_t *ppos)
5520 unsigned long val;
5521 int ret;
5523 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5524 if (ret)
5525 return ret;
5527 *ptr = val * 1000;
5529 return cnt;
5532 static ssize_t
5533 tracing_thresh_read(struct file *filp, char __user *ubuf,
5534 size_t cnt, loff_t *ppos)
5536 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5539 static ssize_t
5540 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5541 size_t cnt, loff_t *ppos)
5543 struct trace_array *tr = filp->private_data;
5544 int ret;
5546 mutex_lock(&trace_types_lock);
5547 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5548 if (ret < 0)
5549 goto out;
5551 if (tr->current_trace->update_thresh) {
5552 ret = tr->current_trace->update_thresh(tr);
5553 if (ret < 0)
5554 goto out;
5557 ret = cnt;
5558 out:
5559 mutex_unlock(&trace_types_lock);
5561 return ret;
5564 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5566 static ssize_t
5567 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5568 size_t cnt, loff_t *ppos)
5570 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5573 static ssize_t
5574 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5575 size_t cnt, loff_t *ppos)
5577 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5580 #endif
5582 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5584 struct trace_array *tr = inode->i_private;
5585 struct trace_iterator *iter;
5586 int ret = 0;
5588 if (tracing_disabled)
5589 return -ENODEV;
5591 if (trace_array_get(tr) < 0)
5592 return -ENODEV;
5594 mutex_lock(&trace_types_lock);
5596 /* create a buffer to store the information to pass to userspace */
5597 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5598 if (!iter) {
5599 ret = -ENOMEM;
5600 __trace_array_put(tr);
5601 goto out;
5604 trace_seq_init(&iter->seq);
5605 iter->trace = tr->current_trace;
5607 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5608 ret = -ENOMEM;
5609 goto fail;
5612 /* trace pipe does not show start of buffer */
5613 cpumask_setall(iter->started);
5615 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5616 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5618 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5619 if (trace_clocks[tr->clock_id].in_ns)
5620 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5622 iter->tr = tr;
5623 iter->trace_buffer = &tr->trace_buffer;
5624 iter->cpu_file = tracing_get_cpu(inode);
5625 mutex_init(&iter->mutex);
5626 filp->private_data = iter;
5628 if (iter->trace->pipe_open)
5629 iter->trace->pipe_open(iter);
5631 nonseekable_open(inode, filp);
5633 tr->current_trace->ref++;
5634 out:
5635 mutex_unlock(&trace_types_lock);
5636 return ret;
5638 fail:
5639 kfree(iter);
5640 __trace_array_put(tr);
5641 mutex_unlock(&trace_types_lock);
5642 return ret;
5645 static int tracing_release_pipe(struct inode *inode, struct file *file)
5647 struct trace_iterator *iter = file->private_data;
5648 struct trace_array *tr = inode->i_private;
5650 mutex_lock(&trace_types_lock);
5652 tr->current_trace->ref--;
5654 if (iter->trace->pipe_close)
5655 iter->trace->pipe_close(iter);
5657 mutex_unlock(&trace_types_lock);
5659 free_cpumask_var(iter->started);
5660 mutex_destroy(&iter->mutex);
5661 kfree(iter);
5663 trace_array_put(tr);
5665 return 0;
5668 static __poll_t
5669 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5671 struct trace_array *tr = iter->tr;
5673 /* Iterators are static, they should be filled or empty */
5674 if (trace_buffer_iter(iter, iter->cpu_file))
5675 return EPOLLIN | EPOLLRDNORM;
5677 if (tr->trace_flags & TRACE_ITER_BLOCK)
5679 * Always select as readable when in blocking mode
5681 return EPOLLIN | EPOLLRDNORM;
5682 else
5683 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5684 filp, poll_table);
5687 static __poll_t
5688 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5690 struct trace_iterator *iter = filp->private_data;
5692 return trace_poll(iter, filp, poll_table);
5695 /* Must be called with iter->mutex held. */
5696 static int tracing_wait_pipe(struct file *filp)
5698 struct trace_iterator *iter = filp->private_data;
5699 int ret;
5701 while (trace_empty(iter)) {
5703 if ((filp->f_flags & O_NONBLOCK)) {
5704 return -EAGAIN;
5708 * We block until we read something and tracing is disabled.
5709 * We still block if tracing is disabled, but we have never
5710 * read anything. This allows a user to cat this file, and
5711 * then enable tracing. But after we have read something,
5712 * we give an EOF when tracing is again disabled.
5714 * iter->pos will be 0 if we haven't read anything.
5716 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5717 break;
5719 mutex_unlock(&iter->mutex);
5721 ret = wait_on_pipe(iter, false);
5723 mutex_lock(&iter->mutex);
5725 if (ret)
5726 return ret;
5729 return 1;
5733 * Consumer reader.
5735 static ssize_t
5736 tracing_read_pipe(struct file *filp, char __user *ubuf,
5737 size_t cnt, loff_t *ppos)
5739 struct trace_iterator *iter = filp->private_data;
5740 ssize_t sret;
5743 * Avoid more than one consumer on a single file descriptor
5744 * This is just a matter of traces coherency, the ring buffer itself
5745 * is protected.
5747 mutex_lock(&iter->mutex);
5749 /* return any leftover data */
5750 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5751 if (sret != -EBUSY)
5752 goto out;
5754 trace_seq_init(&iter->seq);
5756 if (iter->trace->read) {
5757 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5758 if (sret)
5759 goto out;
5762 waitagain:
5763 sret = tracing_wait_pipe(filp);
5764 if (sret <= 0)
5765 goto out;
5767 /* stop when tracing is finished */
5768 if (trace_empty(iter)) {
5769 sret = 0;
5770 goto out;
5773 if (cnt >= PAGE_SIZE)
5774 cnt = PAGE_SIZE - 1;
5776 /* reset all but tr, trace, and overruns */
5777 memset(&iter->seq, 0,
5778 sizeof(struct trace_iterator) -
5779 offsetof(struct trace_iterator, seq));
5780 cpumask_clear(iter->started);
5781 trace_seq_init(&iter->seq);
5782 iter->pos = -1;
5784 trace_event_read_lock();
5785 trace_access_lock(iter->cpu_file);
5786 while (trace_find_next_entry_inc(iter) != NULL) {
5787 enum print_line_t ret;
5788 int save_len = iter->seq.seq.len;
5790 ret = print_trace_line(iter);
5791 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5792 /* don't print partial lines */
5793 iter->seq.seq.len = save_len;
5794 break;
5796 if (ret != TRACE_TYPE_NO_CONSUME)
5797 trace_consume(iter);
5799 if (trace_seq_used(&iter->seq) >= cnt)
5800 break;
5803 * Setting the full flag means we reached the trace_seq buffer
5804 * size and we should leave by partial output condition above.
5805 * One of the trace_seq_* functions is not used properly.
5807 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5808 iter->ent->type);
5810 trace_access_unlock(iter->cpu_file);
5811 trace_event_read_unlock();
5813 /* Now copy what we have to the user */
5814 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5815 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5816 trace_seq_init(&iter->seq);
5819 * If there was nothing to send to user, in spite of consuming trace
5820 * entries, go back to wait for more entries.
5822 if (sret == -EBUSY)
5823 goto waitagain;
5825 out:
5826 mutex_unlock(&iter->mutex);
5828 return sret;
5831 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5832 unsigned int idx)
5834 __free_page(spd->pages[idx]);
5837 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5838 .can_merge = 0,
5839 .confirm = generic_pipe_buf_confirm,
5840 .release = generic_pipe_buf_release,
5841 .steal = generic_pipe_buf_steal,
5842 .get = generic_pipe_buf_get,
5845 static size_t
5846 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5848 size_t count;
5849 int save_len;
5850 int ret;
5852 /* Seq buffer is page-sized, exactly what we need. */
5853 for (;;) {
5854 save_len = iter->seq.seq.len;
5855 ret = print_trace_line(iter);
5857 if (trace_seq_has_overflowed(&iter->seq)) {
5858 iter->seq.seq.len = save_len;
5859 break;
5863 * This should not be hit, because it should only
5864 * be set if the iter->seq overflowed. But check it
5865 * anyway to be safe.
5867 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5868 iter->seq.seq.len = save_len;
5869 break;
5872 count = trace_seq_used(&iter->seq) - save_len;
5873 if (rem < count) {
5874 rem = 0;
5875 iter->seq.seq.len = save_len;
5876 break;
5879 if (ret != TRACE_TYPE_NO_CONSUME)
5880 trace_consume(iter);
5881 rem -= count;
5882 if (!trace_find_next_entry_inc(iter)) {
5883 rem = 0;
5884 iter->ent = NULL;
5885 break;
5889 return rem;
5892 static ssize_t tracing_splice_read_pipe(struct file *filp,
5893 loff_t *ppos,
5894 struct pipe_inode_info *pipe,
5895 size_t len,
5896 unsigned int flags)
5898 struct page *pages_def[PIPE_DEF_BUFFERS];
5899 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5900 struct trace_iterator *iter = filp->private_data;
5901 struct splice_pipe_desc spd = {
5902 .pages = pages_def,
5903 .partial = partial_def,
5904 .nr_pages = 0, /* This gets updated below. */
5905 .nr_pages_max = PIPE_DEF_BUFFERS,
5906 .ops = &tracing_pipe_buf_ops,
5907 .spd_release = tracing_spd_release_pipe,
5909 ssize_t ret;
5910 size_t rem;
5911 unsigned int i;
5913 if (splice_grow_spd(pipe, &spd))
5914 return -ENOMEM;
5916 mutex_lock(&iter->mutex);
5918 if (iter->trace->splice_read) {
5919 ret = iter->trace->splice_read(iter, filp,
5920 ppos, pipe, len, flags);
5921 if (ret)
5922 goto out_err;
5925 ret = tracing_wait_pipe(filp);
5926 if (ret <= 0)
5927 goto out_err;
5929 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5930 ret = -EFAULT;
5931 goto out_err;
5934 trace_event_read_lock();
5935 trace_access_lock(iter->cpu_file);
5937 /* Fill as many pages as possible. */
5938 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5939 spd.pages[i] = alloc_page(GFP_KERNEL);
5940 if (!spd.pages[i])
5941 break;
5943 rem = tracing_fill_pipe_page(rem, iter);
5945 /* Copy the data into the page, so we can start over. */
5946 ret = trace_seq_to_buffer(&iter->seq,
5947 page_address(spd.pages[i]),
5948 trace_seq_used(&iter->seq));
5949 if (ret < 0) {
5950 __free_page(spd.pages[i]);
5951 break;
5953 spd.partial[i].offset = 0;
5954 spd.partial[i].len = trace_seq_used(&iter->seq);
5956 trace_seq_init(&iter->seq);
5959 trace_access_unlock(iter->cpu_file);
5960 trace_event_read_unlock();
5961 mutex_unlock(&iter->mutex);
5963 spd.nr_pages = i;
5965 if (i)
5966 ret = splice_to_pipe(pipe, &spd);
5967 else
5968 ret = 0;
5969 out:
5970 splice_shrink_spd(&spd);
5971 return ret;
5973 out_err:
5974 mutex_unlock(&iter->mutex);
5975 goto out;
5978 static ssize_t
5979 tracing_entries_read(struct file *filp, char __user *ubuf,
5980 size_t cnt, loff_t *ppos)
5982 struct inode *inode = file_inode(filp);
5983 struct trace_array *tr = inode->i_private;
5984 int cpu = tracing_get_cpu(inode);
5985 char buf[64];
5986 int r = 0;
5987 ssize_t ret;
5989 mutex_lock(&trace_types_lock);
5991 if (cpu == RING_BUFFER_ALL_CPUS) {
5992 int cpu, buf_size_same;
5993 unsigned long size;
5995 size = 0;
5996 buf_size_same = 1;
5997 /* check if all cpu sizes are same */
5998 for_each_tracing_cpu(cpu) {
5999 /* fill in the size from first enabled cpu */
6000 if (size == 0)
6001 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6002 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6003 buf_size_same = 0;
6004 break;
6008 if (buf_size_same) {
6009 if (!ring_buffer_expanded)
6010 r = sprintf(buf, "%lu (expanded: %lu)\n",
6011 size >> 10,
6012 trace_buf_size >> 10);
6013 else
6014 r = sprintf(buf, "%lu\n", size >> 10);
6015 } else
6016 r = sprintf(buf, "X\n");
6017 } else
6018 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6020 mutex_unlock(&trace_types_lock);
6022 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6023 return ret;
6026 static ssize_t
6027 tracing_entries_write(struct file *filp, const char __user *ubuf,
6028 size_t cnt, loff_t *ppos)
6030 struct inode *inode = file_inode(filp);
6031 struct trace_array *tr = inode->i_private;
6032 unsigned long val;
6033 int ret;
6035 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6036 if (ret)
6037 return ret;
6039 /* must have at least 1 entry */
6040 if (!val)
6041 return -EINVAL;
6043 /* value is in KB */
6044 val <<= 10;
6045 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6046 if (ret < 0)
6047 return ret;
6049 *ppos += cnt;
6051 return cnt;
6054 static ssize_t
6055 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6056 size_t cnt, loff_t *ppos)
6058 struct trace_array *tr = filp->private_data;
6059 char buf[64];
6060 int r, cpu;
6061 unsigned long size = 0, expanded_size = 0;
6063 mutex_lock(&trace_types_lock);
6064 for_each_tracing_cpu(cpu) {
6065 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6066 if (!ring_buffer_expanded)
6067 expanded_size += trace_buf_size >> 10;
6069 if (ring_buffer_expanded)
6070 r = sprintf(buf, "%lu\n", size);
6071 else
6072 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6073 mutex_unlock(&trace_types_lock);
6075 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6078 static ssize_t
6079 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6080 size_t cnt, loff_t *ppos)
6083 * There is no need to read what the user has written, this function
6084 * is just to make sure that there is no error when "echo" is used
6087 *ppos += cnt;
6089 return cnt;
6092 static int
6093 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6095 struct trace_array *tr = inode->i_private;
6097 /* disable tracing ? */
6098 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6099 tracer_tracing_off(tr);
6100 /* resize the ring buffer to 0 */
6101 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6103 trace_array_put(tr);
6105 return 0;
6108 static ssize_t
6109 tracing_mark_write(struct file *filp, const char __user *ubuf,
6110 size_t cnt, loff_t *fpos)
6112 struct trace_array *tr = filp->private_data;
6113 struct ring_buffer_event *event;
6114 enum event_trigger_type tt = ETT_NONE;
6115 struct ring_buffer *buffer;
6116 struct print_entry *entry;
6117 unsigned long irq_flags;
6118 const char faulted[] = "<faulted>";
6119 ssize_t written;
6120 int size;
6121 int len;
6123 /* Used in tracing_mark_raw_write() as well */
6124 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6126 if (tracing_disabled)
6127 return -EINVAL;
6129 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6130 return -EINVAL;
6132 if (cnt > TRACE_BUF_SIZE)
6133 cnt = TRACE_BUF_SIZE;
6135 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6137 local_save_flags(irq_flags);
6138 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6140 /* If less than "<faulted>", then make sure we can still add that */
6141 if (cnt < FAULTED_SIZE)
6142 size += FAULTED_SIZE - cnt;
6144 buffer = tr->trace_buffer.buffer;
6145 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6146 irq_flags, preempt_count());
6147 if (unlikely(!event))
6148 /* Ring buffer disabled, return as if not open for write */
6149 return -EBADF;
6151 entry = ring_buffer_event_data(event);
6152 entry->ip = _THIS_IP_;
6154 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6155 if (len) {
6156 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6157 cnt = FAULTED_SIZE;
6158 written = -EFAULT;
6159 } else
6160 written = cnt;
6161 len = cnt;
6163 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6164 /* do not add \n before testing triggers, but add \0 */
6165 entry->buf[cnt] = '\0';
6166 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6169 if (entry->buf[cnt - 1] != '\n') {
6170 entry->buf[cnt] = '\n';
6171 entry->buf[cnt + 1] = '\0';
6172 } else
6173 entry->buf[cnt] = '\0';
6175 __buffer_unlock_commit(buffer, event);
6177 if (tt)
6178 event_triggers_post_call(tr->trace_marker_file, tt);
6180 if (written > 0)
6181 *fpos += written;
6183 return written;
6186 /* Limit it for now to 3K (including tag) */
6187 #define RAW_DATA_MAX_SIZE (1024*3)
6189 static ssize_t
6190 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6191 size_t cnt, loff_t *fpos)
6193 struct trace_array *tr = filp->private_data;
6194 struct ring_buffer_event *event;
6195 struct ring_buffer *buffer;
6196 struct raw_data_entry *entry;
6197 const char faulted[] = "<faulted>";
6198 unsigned long irq_flags;
6199 ssize_t written;
6200 int size;
6201 int len;
6203 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6205 if (tracing_disabled)
6206 return -EINVAL;
6208 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6209 return -EINVAL;
6211 /* The marker must at least have a tag id */
6212 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6213 return -EINVAL;
6215 if (cnt > TRACE_BUF_SIZE)
6216 cnt = TRACE_BUF_SIZE;
6218 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6220 local_save_flags(irq_flags);
6221 size = sizeof(*entry) + cnt;
6222 if (cnt < FAULT_SIZE_ID)
6223 size += FAULT_SIZE_ID - cnt;
6225 buffer = tr->trace_buffer.buffer;
6226 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6227 irq_flags, preempt_count());
6228 if (!event)
6229 /* Ring buffer disabled, return as if not open for write */
6230 return -EBADF;
6232 entry = ring_buffer_event_data(event);
6234 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6235 if (len) {
6236 entry->id = -1;
6237 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6238 written = -EFAULT;
6239 } else
6240 written = cnt;
6242 __buffer_unlock_commit(buffer, event);
6244 if (written > 0)
6245 *fpos += written;
6247 return written;
6250 static int tracing_clock_show(struct seq_file *m, void *v)
6252 struct trace_array *tr = m->private;
6253 int i;
6255 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6256 seq_printf(m,
6257 "%s%s%s%s", i ? " " : "",
6258 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6259 i == tr->clock_id ? "]" : "");
6260 seq_putc(m, '\n');
6262 return 0;
6265 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6267 int i;
6269 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6270 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6271 break;
6273 if (i == ARRAY_SIZE(trace_clocks))
6274 return -EINVAL;
6276 mutex_lock(&trace_types_lock);
6278 tr->clock_id = i;
6280 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6283 * New clock may not be consistent with the previous clock.
6284 * Reset the buffer so that it doesn't have incomparable timestamps.
6286 tracing_reset_online_cpus(&tr->trace_buffer);
6288 #ifdef CONFIG_TRACER_MAX_TRACE
6289 if (tr->max_buffer.buffer)
6290 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6291 tracing_reset_online_cpus(&tr->max_buffer);
6292 #endif
6294 mutex_unlock(&trace_types_lock);
6296 return 0;
6299 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6300 size_t cnt, loff_t *fpos)
6302 struct seq_file *m = filp->private_data;
6303 struct trace_array *tr = m->private;
6304 char buf[64];
6305 const char *clockstr;
6306 int ret;
6308 if (cnt >= sizeof(buf))
6309 return -EINVAL;
6311 if (copy_from_user(buf, ubuf, cnt))
6312 return -EFAULT;
6314 buf[cnt] = 0;
6316 clockstr = strstrip(buf);
6318 ret = tracing_set_clock(tr, clockstr);
6319 if (ret)
6320 return ret;
6322 *fpos += cnt;
6324 return cnt;
6327 static int tracing_clock_open(struct inode *inode, struct file *file)
6329 struct trace_array *tr = inode->i_private;
6330 int ret;
6332 if (tracing_disabled)
6333 return -ENODEV;
6335 if (trace_array_get(tr))
6336 return -ENODEV;
6338 ret = single_open(file, tracing_clock_show, inode->i_private);
6339 if (ret < 0)
6340 trace_array_put(tr);
6342 return ret;
6345 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6347 struct trace_array *tr = m->private;
6349 mutex_lock(&trace_types_lock);
6351 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6352 seq_puts(m, "delta [absolute]\n");
6353 else
6354 seq_puts(m, "[delta] absolute\n");
6356 mutex_unlock(&trace_types_lock);
6358 return 0;
6361 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6363 struct trace_array *tr = inode->i_private;
6364 int ret;
6366 if (tracing_disabled)
6367 return -ENODEV;
6369 if (trace_array_get(tr))
6370 return -ENODEV;
6372 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6373 if (ret < 0)
6374 trace_array_put(tr);
6376 return ret;
6379 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6381 int ret = 0;
6383 mutex_lock(&trace_types_lock);
6385 if (abs && tr->time_stamp_abs_ref++)
6386 goto out;
6388 if (!abs) {
6389 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6390 ret = -EINVAL;
6391 goto out;
6394 if (--tr->time_stamp_abs_ref)
6395 goto out;
6398 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6400 #ifdef CONFIG_TRACER_MAX_TRACE
6401 if (tr->max_buffer.buffer)
6402 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6403 #endif
6404 out:
6405 mutex_unlock(&trace_types_lock);
6407 return ret;
6410 struct ftrace_buffer_info {
6411 struct trace_iterator iter;
6412 void *spare;
6413 unsigned int spare_cpu;
6414 unsigned int read;
6417 #ifdef CONFIG_TRACER_SNAPSHOT
6418 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6420 struct trace_array *tr = inode->i_private;
6421 struct trace_iterator *iter;
6422 struct seq_file *m;
6423 int ret = 0;
6425 if (trace_array_get(tr) < 0)
6426 return -ENODEV;
6428 if (file->f_mode & FMODE_READ) {
6429 iter = __tracing_open(inode, file, true);
6430 if (IS_ERR(iter))
6431 ret = PTR_ERR(iter);
6432 } else {
6433 /* Writes still need the seq_file to hold the private data */
6434 ret = -ENOMEM;
6435 m = kzalloc(sizeof(*m), GFP_KERNEL);
6436 if (!m)
6437 goto out;
6438 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6439 if (!iter) {
6440 kfree(m);
6441 goto out;
6443 ret = 0;
6445 iter->tr = tr;
6446 iter->trace_buffer = &tr->max_buffer;
6447 iter->cpu_file = tracing_get_cpu(inode);
6448 m->private = iter;
6449 file->private_data = m;
6451 out:
6452 if (ret < 0)
6453 trace_array_put(tr);
6455 return ret;
6458 static ssize_t
6459 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6460 loff_t *ppos)
6462 struct seq_file *m = filp->private_data;
6463 struct trace_iterator *iter = m->private;
6464 struct trace_array *tr = iter->tr;
6465 unsigned long val;
6466 int ret;
6468 ret = tracing_update_buffers();
6469 if (ret < 0)
6470 return ret;
6472 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6473 if (ret)
6474 return ret;
6476 mutex_lock(&trace_types_lock);
6478 if (tr->current_trace->use_max_tr) {
6479 ret = -EBUSY;
6480 goto out;
6483 switch (val) {
6484 case 0:
6485 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6486 ret = -EINVAL;
6487 break;
6489 if (tr->allocated_snapshot)
6490 free_snapshot(tr);
6491 break;
6492 case 1:
6493 /* Only allow per-cpu swap if the ring buffer supports it */
6494 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6495 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6496 ret = -EINVAL;
6497 break;
6499 #endif
6500 if (tr->allocated_snapshot)
6501 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6502 &tr->trace_buffer, iter->cpu_file);
6503 else
6504 ret = tracing_alloc_snapshot_instance(tr);
6505 if (ret < 0)
6506 break;
6507 local_irq_disable();
6508 /* Now, we're going to swap */
6509 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6510 update_max_tr(tr, current, smp_processor_id());
6511 else
6512 update_max_tr_single(tr, current, iter->cpu_file);
6513 local_irq_enable();
6514 break;
6515 default:
6516 if (tr->allocated_snapshot) {
6517 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6518 tracing_reset_online_cpus(&tr->max_buffer);
6519 else
6520 tracing_reset(&tr->max_buffer, iter->cpu_file);
6522 break;
6525 if (ret >= 0) {
6526 *ppos += cnt;
6527 ret = cnt;
6529 out:
6530 mutex_unlock(&trace_types_lock);
6531 return ret;
6534 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6536 struct seq_file *m = file->private_data;
6537 int ret;
6539 ret = tracing_release(inode, file);
6541 if (file->f_mode & FMODE_READ)
6542 return ret;
6544 /* If write only, the seq_file is just a stub */
6545 if (m)
6546 kfree(m->private);
6547 kfree(m);
6549 return 0;
6552 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6553 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6554 size_t count, loff_t *ppos);
6555 static int tracing_buffers_release(struct inode *inode, struct file *file);
6556 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6557 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6559 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6561 struct ftrace_buffer_info *info;
6562 int ret;
6564 ret = tracing_buffers_open(inode, filp);
6565 if (ret < 0)
6566 return ret;
6568 info = filp->private_data;
6570 if (info->iter.trace->use_max_tr) {
6571 tracing_buffers_release(inode, filp);
6572 return -EBUSY;
6575 info->iter.snapshot = true;
6576 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6578 return ret;
6581 #endif /* CONFIG_TRACER_SNAPSHOT */
6584 static const struct file_operations tracing_thresh_fops = {
6585 .open = tracing_open_generic,
6586 .read = tracing_thresh_read,
6587 .write = tracing_thresh_write,
6588 .llseek = generic_file_llseek,
6591 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6592 static const struct file_operations tracing_max_lat_fops = {
6593 .open = tracing_open_generic,
6594 .read = tracing_max_lat_read,
6595 .write = tracing_max_lat_write,
6596 .llseek = generic_file_llseek,
6598 #endif
6600 static const struct file_operations set_tracer_fops = {
6601 .open = tracing_open_generic,
6602 .read = tracing_set_trace_read,
6603 .write = tracing_set_trace_write,
6604 .llseek = generic_file_llseek,
6607 static const struct file_operations tracing_pipe_fops = {
6608 .open = tracing_open_pipe,
6609 .poll = tracing_poll_pipe,
6610 .read = tracing_read_pipe,
6611 .splice_read = tracing_splice_read_pipe,
6612 .release = tracing_release_pipe,
6613 .llseek = no_llseek,
6616 static const struct file_operations tracing_entries_fops = {
6617 .open = tracing_open_generic_tr,
6618 .read = tracing_entries_read,
6619 .write = tracing_entries_write,
6620 .llseek = generic_file_llseek,
6621 .release = tracing_release_generic_tr,
6624 static const struct file_operations tracing_total_entries_fops = {
6625 .open = tracing_open_generic_tr,
6626 .read = tracing_total_entries_read,
6627 .llseek = generic_file_llseek,
6628 .release = tracing_release_generic_tr,
6631 static const struct file_operations tracing_free_buffer_fops = {
6632 .open = tracing_open_generic_tr,
6633 .write = tracing_free_buffer_write,
6634 .release = tracing_free_buffer_release,
6637 static const struct file_operations tracing_mark_fops = {
6638 .open = tracing_open_generic_tr,
6639 .write = tracing_mark_write,
6640 .llseek = generic_file_llseek,
6641 .release = tracing_release_generic_tr,
6644 static const struct file_operations tracing_mark_raw_fops = {
6645 .open = tracing_open_generic_tr,
6646 .write = tracing_mark_raw_write,
6647 .llseek = generic_file_llseek,
6648 .release = tracing_release_generic_tr,
6651 static const struct file_operations trace_clock_fops = {
6652 .open = tracing_clock_open,
6653 .read = seq_read,
6654 .llseek = seq_lseek,
6655 .release = tracing_single_release_tr,
6656 .write = tracing_clock_write,
6659 static const struct file_operations trace_time_stamp_mode_fops = {
6660 .open = tracing_time_stamp_mode_open,
6661 .read = seq_read,
6662 .llseek = seq_lseek,
6663 .release = tracing_single_release_tr,
6666 #ifdef CONFIG_TRACER_SNAPSHOT
6667 static const struct file_operations snapshot_fops = {
6668 .open = tracing_snapshot_open,
6669 .read = seq_read,
6670 .write = tracing_snapshot_write,
6671 .llseek = tracing_lseek,
6672 .release = tracing_snapshot_release,
6675 static const struct file_operations snapshot_raw_fops = {
6676 .open = snapshot_raw_open,
6677 .read = tracing_buffers_read,
6678 .release = tracing_buffers_release,
6679 .splice_read = tracing_buffers_splice_read,
6680 .llseek = no_llseek,
6683 #endif /* CONFIG_TRACER_SNAPSHOT */
6685 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6687 struct trace_array *tr = inode->i_private;
6688 struct ftrace_buffer_info *info;
6689 int ret;
6691 if (tracing_disabled)
6692 return -ENODEV;
6694 if (trace_array_get(tr) < 0)
6695 return -ENODEV;
6697 info = kzalloc(sizeof(*info), GFP_KERNEL);
6698 if (!info) {
6699 trace_array_put(tr);
6700 return -ENOMEM;
6703 mutex_lock(&trace_types_lock);
6705 info->iter.tr = tr;
6706 info->iter.cpu_file = tracing_get_cpu(inode);
6707 info->iter.trace = tr->current_trace;
6708 info->iter.trace_buffer = &tr->trace_buffer;
6709 info->spare = NULL;
6710 /* Force reading ring buffer for first read */
6711 info->read = (unsigned int)-1;
6713 filp->private_data = info;
6715 tr->current_trace->ref++;
6717 mutex_unlock(&trace_types_lock);
6719 ret = nonseekable_open(inode, filp);
6720 if (ret < 0)
6721 trace_array_put(tr);
6723 return ret;
6726 static __poll_t
6727 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6729 struct ftrace_buffer_info *info = filp->private_data;
6730 struct trace_iterator *iter = &info->iter;
6732 return trace_poll(iter, filp, poll_table);
6735 static ssize_t
6736 tracing_buffers_read(struct file *filp, char __user *ubuf,
6737 size_t count, loff_t *ppos)
6739 struct ftrace_buffer_info *info = filp->private_data;
6740 struct trace_iterator *iter = &info->iter;
6741 ssize_t ret = 0;
6742 ssize_t size;
6744 if (!count)
6745 return 0;
6747 #ifdef CONFIG_TRACER_MAX_TRACE
6748 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6749 return -EBUSY;
6750 #endif
6752 if (!info->spare) {
6753 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6754 iter->cpu_file);
6755 if (IS_ERR(info->spare)) {
6756 ret = PTR_ERR(info->spare);
6757 info->spare = NULL;
6758 } else {
6759 info->spare_cpu = iter->cpu_file;
6762 if (!info->spare)
6763 return ret;
6765 /* Do we have previous read data to read? */
6766 if (info->read < PAGE_SIZE)
6767 goto read;
6769 again:
6770 trace_access_lock(iter->cpu_file);
6771 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6772 &info->spare,
6773 count,
6774 iter->cpu_file, 0);
6775 trace_access_unlock(iter->cpu_file);
6777 if (ret < 0) {
6778 if (trace_empty(iter)) {
6779 if ((filp->f_flags & O_NONBLOCK))
6780 return -EAGAIN;
6782 ret = wait_on_pipe(iter, false);
6783 if (ret)
6784 return ret;
6786 goto again;
6788 return 0;
6791 info->read = 0;
6792 read:
6793 size = PAGE_SIZE - info->read;
6794 if (size > count)
6795 size = count;
6797 ret = copy_to_user(ubuf, info->spare + info->read, size);
6798 if (ret == size)
6799 return -EFAULT;
6801 size -= ret;
6803 *ppos += size;
6804 info->read += size;
6806 return size;
6809 static int tracing_buffers_release(struct inode *inode, struct file *file)
6811 struct ftrace_buffer_info *info = file->private_data;
6812 struct trace_iterator *iter = &info->iter;
6814 mutex_lock(&trace_types_lock);
6816 iter->tr->current_trace->ref--;
6818 __trace_array_put(iter->tr);
6820 if (info->spare)
6821 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6822 info->spare_cpu, info->spare);
6823 kfree(info);
6825 mutex_unlock(&trace_types_lock);
6827 return 0;
6830 struct buffer_ref {
6831 struct ring_buffer *buffer;
6832 void *page;
6833 int cpu;
6834 refcount_t refcount;
6837 static void buffer_ref_release(struct buffer_ref *ref)
6839 if (!refcount_dec_and_test(&ref->refcount))
6840 return;
6841 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6842 kfree(ref);
6845 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6846 struct pipe_buffer *buf)
6848 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6850 buffer_ref_release(ref);
6851 buf->private = 0;
6854 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6855 struct pipe_buffer *buf)
6857 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6859 if (refcount_read(&ref->refcount) > INT_MAX/2)
6860 return false;
6862 refcount_inc(&ref->refcount);
6863 return true;
6866 /* Pipe buffer operations for a buffer. */
6867 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6868 .can_merge = 0,
6869 .confirm = generic_pipe_buf_confirm,
6870 .release = buffer_pipe_buf_release,
6871 .steal = generic_pipe_buf_nosteal,
6872 .get = buffer_pipe_buf_get,
6876 * Callback from splice_to_pipe(), if we need to release some pages
6877 * at the end of the spd in case we error'ed out in filling the pipe.
6879 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6881 struct buffer_ref *ref =
6882 (struct buffer_ref *)spd->partial[i].private;
6884 buffer_ref_release(ref);
6885 spd->partial[i].private = 0;
6888 static ssize_t
6889 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6890 struct pipe_inode_info *pipe, size_t len,
6891 unsigned int flags)
6893 struct ftrace_buffer_info *info = file->private_data;
6894 struct trace_iterator *iter = &info->iter;
6895 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6896 struct page *pages_def[PIPE_DEF_BUFFERS];
6897 struct splice_pipe_desc spd = {
6898 .pages = pages_def,
6899 .partial = partial_def,
6900 .nr_pages_max = PIPE_DEF_BUFFERS,
6901 .ops = &buffer_pipe_buf_ops,
6902 .spd_release = buffer_spd_release,
6904 struct buffer_ref *ref;
6905 int entries, i;
6906 ssize_t ret = 0;
6908 #ifdef CONFIG_TRACER_MAX_TRACE
6909 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6910 return -EBUSY;
6911 #endif
6913 if (*ppos & (PAGE_SIZE - 1))
6914 return -EINVAL;
6916 if (len & (PAGE_SIZE - 1)) {
6917 if (len < PAGE_SIZE)
6918 return -EINVAL;
6919 len &= PAGE_MASK;
6922 if (splice_grow_spd(pipe, &spd))
6923 return -ENOMEM;
6925 again:
6926 trace_access_lock(iter->cpu_file);
6927 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6929 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6930 struct page *page;
6931 int r;
6933 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6934 if (!ref) {
6935 ret = -ENOMEM;
6936 break;
6939 refcount_set(&ref->refcount, 1);
6940 ref->buffer = iter->trace_buffer->buffer;
6941 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6942 if (IS_ERR(ref->page)) {
6943 ret = PTR_ERR(ref->page);
6944 ref->page = NULL;
6945 kfree(ref);
6946 break;
6948 ref->cpu = iter->cpu_file;
6950 r = ring_buffer_read_page(ref->buffer, &ref->page,
6951 len, iter->cpu_file, 1);
6952 if (r < 0) {
6953 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6954 ref->page);
6955 kfree(ref);
6956 break;
6959 page = virt_to_page(ref->page);
6961 spd.pages[i] = page;
6962 spd.partial[i].len = PAGE_SIZE;
6963 spd.partial[i].offset = 0;
6964 spd.partial[i].private = (unsigned long)ref;
6965 spd.nr_pages++;
6966 *ppos += PAGE_SIZE;
6968 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6971 trace_access_unlock(iter->cpu_file);
6972 spd.nr_pages = i;
6974 /* did we read anything? */
6975 if (!spd.nr_pages) {
6976 if (ret)
6977 goto out;
6979 ret = -EAGAIN;
6980 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6981 goto out;
6983 ret = wait_on_pipe(iter, true);
6984 if (ret)
6985 goto out;
6987 goto again;
6990 ret = splice_to_pipe(pipe, &spd);
6991 out:
6992 splice_shrink_spd(&spd);
6994 return ret;
6997 static const struct file_operations tracing_buffers_fops = {
6998 .open = tracing_buffers_open,
6999 .read = tracing_buffers_read,
7000 .poll = tracing_buffers_poll,
7001 .release = tracing_buffers_release,
7002 .splice_read = tracing_buffers_splice_read,
7003 .llseek = no_llseek,
7006 static ssize_t
7007 tracing_stats_read(struct file *filp, char __user *ubuf,
7008 size_t count, loff_t *ppos)
7010 struct inode *inode = file_inode(filp);
7011 struct trace_array *tr = inode->i_private;
7012 struct trace_buffer *trace_buf = &tr->trace_buffer;
7013 int cpu = tracing_get_cpu(inode);
7014 struct trace_seq *s;
7015 unsigned long cnt;
7016 unsigned long long t;
7017 unsigned long usec_rem;
7019 s = kmalloc(sizeof(*s), GFP_KERNEL);
7020 if (!s)
7021 return -ENOMEM;
7023 trace_seq_init(s);
7025 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7026 trace_seq_printf(s, "entries: %ld\n", cnt);
7028 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7029 trace_seq_printf(s, "overrun: %ld\n", cnt);
7031 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7032 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7034 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7035 trace_seq_printf(s, "bytes: %ld\n", cnt);
7037 if (trace_clocks[tr->clock_id].in_ns) {
7038 /* local or global for trace_clock */
7039 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7040 usec_rem = do_div(t, USEC_PER_SEC);
7041 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7042 t, usec_rem);
7044 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7045 usec_rem = do_div(t, USEC_PER_SEC);
7046 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7047 } else {
7048 /* counter or tsc mode for trace_clock */
7049 trace_seq_printf(s, "oldest event ts: %llu\n",
7050 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7052 trace_seq_printf(s, "now ts: %llu\n",
7053 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7056 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7057 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7059 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7060 trace_seq_printf(s, "read events: %ld\n", cnt);
7062 count = simple_read_from_buffer(ubuf, count, ppos,
7063 s->buffer, trace_seq_used(s));
7065 kfree(s);
7067 return count;
7070 static const struct file_operations tracing_stats_fops = {
7071 .open = tracing_open_generic_tr,
7072 .read = tracing_stats_read,
7073 .llseek = generic_file_llseek,
7074 .release = tracing_release_generic_tr,
7077 #ifdef CONFIG_DYNAMIC_FTRACE
7079 static ssize_t
7080 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7081 size_t cnt, loff_t *ppos)
7083 unsigned long *p = filp->private_data;
7084 char buf[64]; /* Not too big for a shallow stack */
7085 int r;
7087 r = scnprintf(buf, 63, "%ld", *p);
7088 buf[r++] = '\n';
7090 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7093 static const struct file_operations tracing_dyn_info_fops = {
7094 .open = tracing_open_generic,
7095 .read = tracing_read_dyn_info,
7096 .llseek = generic_file_llseek,
7098 #endif /* CONFIG_DYNAMIC_FTRACE */
7100 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7101 static void
7102 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7103 struct trace_array *tr, struct ftrace_probe_ops *ops,
7104 void *data)
7106 tracing_snapshot_instance(tr);
7109 static void
7110 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7111 struct trace_array *tr, struct ftrace_probe_ops *ops,
7112 void *data)
7114 struct ftrace_func_mapper *mapper = data;
7115 long *count = NULL;
7117 if (mapper)
7118 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7120 if (count) {
7122 if (*count <= 0)
7123 return;
7125 (*count)--;
7128 tracing_snapshot_instance(tr);
7131 static int
7132 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7133 struct ftrace_probe_ops *ops, void *data)
7135 struct ftrace_func_mapper *mapper = data;
7136 long *count = NULL;
7138 seq_printf(m, "%ps:", (void *)ip);
7140 seq_puts(m, "snapshot");
7142 if (mapper)
7143 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7145 if (count)
7146 seq_printf(m, ":count=%ld\n", *count);
7147 else
7148 seq_puts(m, ":unlimited\n");
7150 return 0;
7153 static int
7154 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7155 unsigned long ip, void *init_data, void **data)
7157 struct ftrace_func_mapper *mapper = *data;
7159 if (!mapper) {
7160 mapper = allocate_ftrace_func_mapper();
7161 if (!mapper)
7162 return -ENOMEM;
7163 *data = mapper;
7166 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7169 static void
7170 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7171 unsigned long ip, void *data)
7173 struct ftrace_func_mapper *mapper = data;
7175 if (!ip) {
7176 if (!mapper)
7177 return;
7178 free_ftrace_func_mapper(mapper, NULL);
7179 return;
7182 ftrace_func_mapper_remove_ip(mapper, ip);
7185 static struct ftrace_probe_ops snapshot_probe_ops = {
7186 .func = ftrace_snapshot,
7187 .print = ftrace_snapshot_print,
7190 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7191 .func = ftrace_count_snapshot,
7192 .print = ftrace_snapshot_print,
7193 .init = ftrace_snapshot_init,
7194 .free = ftrace_snapshot_free,
7197 static int
7198 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7199 char *glob, char *cmd, char *param, int enable)
7201 struct ftrace_probe_ops *ops;
7202 void *count = (void *)-1;
7203 char *number;
7204 int ret;
7206 if (!tr)
7207 return -ENODEV;
7209 /* hash funcs only work with set_ftrace_filter */
7210 if (!enable)
7211 return -EINVAL;
7213 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7215 if (glob[0] == '!')
7216 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7218 if (!param)
7219 goto out_reg;
7221 number = strsep(&param, ":");
7223 if (!strlen(number))
7224 goto out_reg;
7227 * We use the callback data field (which is a pointer)
7228 * as our counter.
7230 ret = kstrtoul(number, 0, (unsigned long *)&count);
7231 if (ret)
7232 return ret;
7234 out_reg:
7235 ret = tracing_alloc_snapshot_instance(tr);
7236 if (ret < 0)
7237 goto out;
7239 ret = register_ftrace_function_probe(glob, tr, ops, count);
7241 out:
7242 return ret < 0 ? ret : 0;
7245 static struct ftrace_func_command ftrace_snapshot_cmd = {
7246 .name = "snapshot",
7247 .func = ftrace_trace_snapshot_callback,
7250 static __init int register_snapshot_cmd(void)
7252 return register_ftrace_command(&ftrace_snapshot_cmd);
7254 #else
7255 static inline __init int register_snapshot_cmd(void) { return 0; }
7256 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7258 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7260 if (WARN_ON(!tr->dir))
7261 return ERR_PTR(-ENODEV);
7263 /* Top directory uses NULL as the parent */
7264 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7265 return NULL;
7267 /* All sub buffers have a descriptor */
7268 return tr->dir;
7271 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7273 struct dentry *d_tracer;
7275 if (tr->percpu_dir)
7276 return tr->percpu_dir;
7278 d_tracer = tracing_get_dentry(tr);
7279 if (IS_ERR(d_tracer))
7280 return NULL;
7282 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7284 WARN_ONCE(!tr->percpu_dir,
7285 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7287 return tr->percpu_dir;
7290 static struct dentry *
7291 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7292 void *data, long cpu, const struct file_operations *fops)
7294 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7296 if (ret) /* See tracing_get_cpu() */
7297 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7298 return ret;
7301 static void
7302 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7304 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7305 struct dentry *d_cpu;
7306 char cpu_dir[30]; /* 30 characters should be more than enough */
7308 if (!d_percpu)
7309 return;
7311 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7312 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7313 if (!d_cpu) {
7314 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7315 return;
7318 /* per cpu trace_pipe */
7319 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7320 tr, cpu, &tracing_pipe_fops);
7322 /* per cpu trace */
7323 trace_create_cpu_file("trace", 0644, d_cpu,
7324 tr, cpu, &tracing_fops);
7326 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7327 tr, cpu, &tracing_buffers_fops);
7329 trace_create_cpu_file("stats", 0444, d_cpu,
7330 tr, cpu, &tracing_stats_fops);
7332 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7333 tr, cpu, &tracing_entries_fops);
7335 #ifdef CONFIG_TRACER_SNAPSHOT
7336 trace_create_cpu_file("snapshot", 0644, d_cpu,
7337 tr, cpu, &snapshot_fops);
7339 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7340 tr, cpu, &snapshot_raw_fops);
7341 #endif
7344 #ifdef CONFIG_FTRACE_SELFTEST
7345 /* Let selftest have access to static functions in this file */
7346 #include "trace_selftest.c"
7347 #endif
7349 static ssize_t
7350 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7351 loff_t *ppos)
7353 struct trace_option_dentry *topt = filp->private_data;
7354 char *buf;
7356 if (topt->flags->val & topt->opt->bit)
7357 buf = "1\n";
7358 else
7359 buf = "0\n";
7361 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7364 static ssize_t
7365 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7366 loff_t *ppos)
7368 struct trace_option_dentry *topt = filp->private_data;
7369 unsigned long val;
7370 int ret;
7372 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7373 if (ret)
7374 return ret;
7376 if (val != 0 && val != 1)
7377 return -EINVAL;
7379 if (!!(topt->flags->val & topt->opt->bit) != val) {
7380 mutex_lock(&trace_types_lock);
7381 ret = __set_tracer_option(topt->tr, topt->flags,
7382 topt->opt, !val);
7383 mutex_unlock(&trace_types_lock);
7384 if (ret)
7385 return ret;
7388 *ppos += cnt;
7390 return cnt;
7394 static const struct file_operations trace_options_fops = {
7395 .open = tracing_open_generic,
7396 .read = trace_options_read,
7397 .write = trace_options_write,
7398 .llseek = generic_file_llseek,
7402 * In order to pass in both the trace_array descriptor as well as the index
7403 * to the flag that the trace option file represents, the trace_array
7404 * has a character array of trace_flags_index[], which holds the index
7405 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7406 * The address of this character array is passed to the flag option file
7407 * read/write callbacks.
7409 * In order to extract both the index and the trace_array descriptor,
7410 * get_tr_index() uses the following algorithm.
7412 * idx = *ptr;
7414 * As the pointer itself contains the address of the index (remember
7415 * index[1] == 1).
7417 * Then to get the trace_array descriptor, by subtracting that index
7418 * from the ptr, we get to the start of the index itself.
7420 * ptr - idx == &index[0]
7422 * Then a simple container_of() from that pointer gets us to the
7423 * trace_array descriptor.
7425 static void get_tr_index(void *data, struct trace_array **ptr,
7426 unsigned int *pindex)
7428 *pindex = *(unsigned char *)data;
7430 *ptr = container_of(data - *pindex, struct trace_array,
7431 trace_flags_index);
7434 static ssize_t
7435 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7436 loff_t *ppos)
7438 void *tr_index = filp->private_data;
7439 struct trace_array *tr;
7440 unsigned int index;
7441 char *buf;
7443 get_tr_index(tr_index, &tr, &index);
7445 if (tr->trace_flags & (1 << index))
7446 buf = "1\n";
7447 else
7448 buf = "0\n";
7450 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7453 static ssize_t
7454 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7455 loff_t *ppos)
7457 void *tr_index = filp->private_data;
7458 struct trace_array *tr;
7459 unsigned int index;
7460 unsigned long val;
7461 int ret;
7463 get_tr_index(tr_index, &tr, &index);
7465 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7466 if (ret)
7467 return ret;
7469 if (val != 0 && val != 1)
7470 return -EINVAL;
7472 mutex_lock(&event_mutex);
7473 mutex_lock(&trace_types_lock);
7474 ret = set_tracer_flag(tr, 1 << index, val);
7475 mutex_unlock(&trace_types_lock);
7476 mutex_unlock(&event_mutex);
7478 if (ret < 0)
7479 return ret;
7481 *ppos += cnt;
7483 return cnt;
7486 static const struct file_operations trace_options_core_fops = {
7487 .open = tracing_open_generic,
7488 .read = trace_options_core_read,
7489 .write = trace_options_core_write,
7490 .llseek = generic_file_llseek,
7493 struct dentry *trace_create_file(const char *name,
7494 umode_t mode,
7495 struct dentry *parent,
7496 void *data,
7497 const struct file_operations *fops)
7499 struct dentry *ret;
7501 ret = tracefs_create_file(name, mode, parent, data, fops);
7502 if (!ret)
7503 pr_warn("Could not create tracefs '%s' entry\n", name);
7505 return ret;
7509 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7511 struct dentry *d_tracer;
7513 if (tr->options)
7514 return tr->options;
7516 d_tracer = tracing_get_dentry(tr);
7517 if (IS_ERR(d_tracer))
7518 return NULL;
7520 tr->options = tracefs_create_dir("options", d_tracer);
7521 if (!tr->options) {
7522 pr_warn("Could not create tracefs directory 'options'\n");
7523 return NULL;
7526 return tr->options;
7529 static void
7530 create_trace_option_file(struct trace_array *tr,
7531 struct trace_option_dentry *topt,
7532 struct tracer_flags *flags,
7533 struct tracer_opt *opt)
7535 struct dentry *t_options;
7537 t_options = trace_options_init_dentry(tr);
7538 if (!t_options)
7539 return;
7541 topt->flags = flags;
7542 topt->opt = opt;
7543 topt->tr = tr;
7545 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7546 &trace_options_fops);
7550 static void
7551 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7553 struct trace_option_dentry *topts;
7554 struct trace_options *tr_topts;
7555 struct tracer_flags *flags;
7556 struct tracer_opt *opts;
7557 int cnt;
7558 int i;
7560 if (!tracer)
7561 return;
7563 flags = tracer->flags;
7565 if (!flags || !flags->opts)
7566 return;
7569 * If this is an instance, only create flags for tracers
7570 * the instance may have.
7572 if (!trace_ok_for_array(tracer, tr))
7573 return;
7575 for (i = 0; i < tr->nr_topts; i++) {
7576 /* Make sure there's no duplicate flags. */
7577 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7578 return;
7581 opts = flags->opts;
7583 for (cnt = 0; opts[cnt].name; cnt++)
7586 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7587 if (!topts)
7588 return;
7590 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7591 GFP_KERNEL);
7592 if (!tr_topts) {
7593 kfree(topts);
7594 return;
7597 tr->topts = tr_topts;
7598 tr->topts[tr->nr_topts].tracer = tracer;
7599 tr->topts[tr->nr_topts].topts = topts;
7600 tr->nr_topts++;
7602 for (cnt = 0; opts[cnt].name; cnt++) {
7603 create_trace_option_file(tr, &topts[cnt], flags,
7604 &opts[cnt]);
7605 WARN_ONCE(topts[cnt].entry == NULL,
7606 "Failed to create trace option: %s",
7607 opts[cnt].name);
7611 static struct dentry *
7612 create_trace_option_core_file(struct trace_array *tr,
7613 const char *option, long index)
7615 struct dentry *t_options;
7617 t_options = trace_options_init_dentry(tr);
7618 if (!t_options)
7619 return NULL;
7621 return trace_create_file(option, 0644, t_options,
7622 (void *)&tr->trace_flags_index[index],
7623 &trace_options_core_fops);
7626 static void create_trace_options_dir(struct trace_array *tr)
7628 struct dentry *t_options;
7629 bool top_level = tr == &global_trace;
7630 int i;
7632 t_options = trace_options_init_dentry(tr);
7633 if (!t_options)
7634 return;
7636 for (i = 0; trace_options[i]; i++) {
7637 if (top_level ||
7638 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7639 create_trace_option_core_file(tr, trace_options[i], i);
7643 static ssize_t
7644 rb_simple_read(struct file *filp, char __user *ubuf,
7645 size_t cnt, loff_t *ppos)
7647 struct trace_array *tr = filp->private_data;
7648 char buf[64];
7649 int r;
7651 r = tracer_tracing_is_on(tr);
7652 r = sprintf(buf, "%d\n", r);
7654 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7657 static ssize_t
7658 rb_simple_write(struct file *filp, const char __user *ubuf,
7659 size_t cnt, loff_t *ppos)
7661 struct trace_array *tr = filp->private_data;
7662 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7663 unsigned long val;
7664 int ret;
7666 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7667 if (ret)
7668 return ret;
7670 if (buffer) {
7671 mutex_lock(&trace_types_lock);
7672 if (!!val == tracer_tracing_is_on(tr)) {
7673 val = 0; /* do nothing */
7674 } else if (val) {
7675 tracer_tracing_on(tr);
7676 if (tr->current_trace->start)
7677 tr->current_trace->start(tr);
7678 } else {
7679 tracer_tracing_off(tr);
7680 if (tr->current_trace->stop)
7681 tr->current_trace->stop(tr);
7683 mutex_unlock(&trace_types_lock);
7686 (*ppos)++;
7688 return cnt;
7691 static const struct file_operations rb_simple_fops = {
7692 .open = tracing_open_generic_tr,
7693 .read = rb_simple_read,
7694 .write = rb_simple_write,
7695 .release = tracing_release_generic_tr,
7696 .llseek = default_llseek,
7699 struct dentry *trace_instance_dir;
7701 static void
7702 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7704 static int
7705 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7707 enum ring_buffer_flags rb_flags;
7709 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7711 buf->tr = tr;
7713 buf->buffer = ring_buffer_alloc(size, rb_flags);
7714 if (!buf->buffer)
7715 return -ENOMEM;
7717 buf->data = alloc_percpu(struct trace_array_cpu);
7718 if (!buf->data) {
7719 ring_buffer_free(buf->buffer);
7720 buf->buffer = NULL;
7721 return -ENOMEM;
7724 /* Allocate the first page for all buffers */
7725 set_buffer_entries(&tr->trace_buffer,
7726 ring_buffer_size(tr->trace_buffer.buffer, 0));
7728 return 0;
7731 static int allocate_trace_buffers(struct trace_array *tr, int size)
7733 int ret;
7735 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7736 if (ret)
7737 return ret;
7739 #ifdef CONFIG_TRACER_MAX_TRACE
7740 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7741 allocate_snapshot ? size : 1);
7742 if (WARN_ON(ret)) {
7743 ring_buffer_free(tr->trace_buffer.buffer);
7744 tr->trace_buffer.buffer = NULL;
7745 free_percpu(tr->trace_buffer.data);
7746 tr->trace_buffer.data = NULL;
7747 return -ENOMEM;
7749 tr->allocated_snapshot = allocate_snapshot;
7752 * Only the top level trace array gets its snapshot allocated
7753 * from the kernel command line.
7755 allocate_snapshot = false;
7756 #endif
7759 * Because of some magic with the way alloc_percpu() works on
7760 * x86_64, we need to synchronize the pgd of all the tables,
7761 * otherwise the trace events that happen in x86_64 page fault
7762 * handlers can't cope with accessing the chance that a
7763 * alloc_percpu()'d memory might be touched in the page fault trace
7764 * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
7765 * calls in tracing, because something might get triggered within a
7766 * page fault trace event!
7768 vmalloc_sync_mappings();
7770 return 0;
7773 static void free_trace_buffer(struct trace_buffer *buf)
7775 if (buf->buffer) {
7776 ring_buffer_free(buf->buffer);
7777 buf->buffer = NULL;
7778 free_percpu(buf->data);
7779 buf->data = NULL;
7783 static void free_trace_buffers(struct trace_array *tr)
7785 if (!tr)
7786 return;
7788 free_trace_buffer(&tr->trace_buffer);
7790 #ifdef CONFIG_TRACER_MAX_TRACE
7791 free_trace_buffer(&tr->max_buffer);
7792 #endif
7795 static void init_trace_flags_index(struct trace_array *tr)
7797 int i;
7799 /* Used by the trace options files */
7800 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7801 tr->trace_flags_index[i] = i;
7804 static void __update_tracer_options(struct trace_array *tr)
7806 struct tracer *t;
7808 for (t = trace_types; t; t = t->next)
7809 add_tracer_options(tr, t);
7812 static void update_tracer_options(struct trace_array *tr)
7814 mutex_lock(&trace_types_lock);
7815 __update_tracer_options(tr);
7816 mutex_unlock(&trace_types_lock);
7819 static int instance_mkdir(const char *name)
7821 struct trace_array *tr;
7822 int ret;
7824 mutex_lock(&event_mutex);
7825 mutex_lock(&trace_types_lock);
7827 ret = -EEXIST;
7828 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7829 if (tr->name && strcmp(tr->name, name) == 0)
7830 goto out_unlock;
7833 ret = -ENOMEM;
7834 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7835 if (!tr)
7836 goto out_unlock;
7838 tr->name = kstrdup(name, GFP_KERNEL);
7839 if (!tr->name)
7840 goto out_free_tr;
7842 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7843 goto out_free_tr;
7845 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7847 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7849 raw_spin_lock_init(&tr->start_lock);
7851 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7853 tr->current_trace = &nop_trace;
7855 INIT_LIST_HEAD(&tr->systems);
7856 INIT_LIST_HEAD(&tr->events);
7857 INIT_LIST_HEAD(&tr->hist_vars);
7859 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7860 goto out_free_tr;
7862 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7863 if (!tr->dir)
7864 goto out_free_tr;
7866 ret = event_trace_add_tracer(tr->dir, tr);
7867 if (ret) {
7868 tracefs_remove_recursive(tr->dir);
7869 goto out_free_tr;
7872 ftrace_init_trace_array(tr);
7874 init_tracer_tracefs(tr, tr->dir);
7875 init_trace_flags_index(tr);
7876 __update_tracer_options(tr);
7878 list_add(&tr->list, &ftrace_trace_arrays);
7880 mutex_unlock(&trace_types_lock);
7881 mutex_unlock(&event_mutex);
7883 return 0;
7885 out_free_tr:
7886 free_trace_buffers(tr);
7887 free_cpumask_var(tr->tracing_cpumask);
7888 kfree(tr->name);
7889 kfree(tr);
7891 out_unlock:
7892 mutex_unlock(&trace_types_lock);
7893 mutex_unlock(&event_mutex);
7895 return ret;
7899 static int instance_rmdir(const char *name)
7901 struct trace_array *tr;
7902 int found = 0;
7903 int ret;
7904 int i;
7906 mutex_lock(&event_mutex);
7907 mutex_lock(&trace_types_lock);
7909 ret = -ENODEV;
7910 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7911 if (tr->name && strcmp(tr->name, name) == 0) {
7912 found = 1;
7913 break;
7916 if (!found)
7917 goto out_unlock;
7919 ret = -EBUSY;
7920 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7921 goto out_unlock;
7923 list_del(&tr->list);
7925 /* Disable all the flags that were enabled coming in */
7926 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7927 if ((1 << i) & ZEROED_TRACE_FLAGS)
7928 set_tracer_flag(tr, 1 << i, 0);
7931 tracing_set_nop(tr);
7932 clear_ftrace_function_probes(tr);
7933 event_trace_del_tracer(tr);
7934 ftrace_clear_pids(tr);
7935 ftrace_destroy_function_files(tr);
7936 tracefs_remove_recursive(tr->dir);
7937 free_trace_buffers(tr);
7939 for (i = 0; i < tr->nr_topts; i++) {
7940 kfree(tr->topts[i].topts);
7942 kfree(tr->topts);
7944 free_cpumask_var(tr->tracing_cpumask);
7945 kfree(tr->name);
7946 kfree(tr);
7948 ret = 0;
7950 out_unlock:
7951 mutex_unlock(&trace_types_lock);
7952 mutex_unlock(&event_mutex);
7954 return ret;
7957 static __init void create_trace_instances(struct dentry *d_tracer)
7959 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7960 instance_mkdir,
7961 instance_rmdir);
7962 if (WARN_ON(!trace_instance_dir))
7963 return;
7966 static void
7967 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7969 struct trace_event_file *file;
7970 int cpu;
7972 trace_create_file("available_tracers", 0444, d_tracer,
7973 tr, &show_traces_fops);
7975 trace_create_file("current_tracer", 0644, d_tracer,
7976 tr, &set_tracer_fops);
7978 trace_create_file("tracing_cpumask", 0644, d_tracer,
7979 tr, &tracing_cpumask_fops);
7981 trace_create_file("trace_options", 0644, d_tracer,
7982 tr, &tracing_iter_fops);
7984 trace_create_file("trace", 0644, d_tracer,
7985 tr, &tracing_fops);
7987 trace_create_file("trace_pipe", 0444, d_tracer,
7988 tr, &tracing_pipe_fops);
7990 trace_create_file("buffer_size_kb", 0644, d_tracer,
7991 tr, &tracing_entries_fops);
7993 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7994 tr, &tracing_total_entries_fops);
7996 trace_create_file("free_buffer", 0200, d_tracer,
7997 tr, &tracing_free_buffer_fops);
7999 trace_create_file("trace_marker", 0220, d_tracer,
8000 tr, &tracing_mark_fops);
8002 file = __find_event_file(tr, "ftrace", "print");
8003 if (file && file->dir)
8004 trace_create_file("trigger", 0644, file->dir, file,
8005 &event_trigger_fops);
8006 tr->trace_marker_file = file;
8008 trace_create_file("trace_marker_raw", 0220, d_tracer,
8009 tr, &tracing_mark_raw_fops);
8011 trace_create_file("trace_clock", 0644, d_tracer, tr,
8012 &trace_clock_fops);
8014 trace_create_file("tracing_on", 0644, d_tracer,
8015 tr, &rb_simple_fops);
8017 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8018 &trace_time_stamp_mode_fops);
8020 create_trace_options_dir(tr);
8022 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8023 trace_create_file("tracing_max_latency", 0644, d_tracer,
8024 &tr->max_latency, &tracing_max_lat_fops);
8025 #endif
8027 if (ftrace_create_function_files(tr, d_tracer))
8028 WARN(1, "Could not allocate function filter files");
8030 #ifdef CONFIG_TRACER_SNAPSHOT
8031 trace_create_file("snapshot", 0644, d_tracer,
8032 tr, &snapshot_fops);
8033 #endif
8035 for_each_tracing_cpu(cpu)
8036 tracing_init_tracefs_percpu(tr, cpu);
8038 ftrace_init_tracefs(tr, d_tracer);
8041 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8043 struct vfsmount *mnt;
8044 struct file_system_type *type;
8047 * To maintain backward compatibility for tools that mount
8048 * debugfs to get to the tracing facility, tracefs is automatically
8049 * mounted to the debugfs/tracing directory.
8051 type = get_fs_type("tracefs");
8052 if (!type)
8053 return NULL;
8054 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8055 put_filesystem(type);
8056 if (IS_ERR(mnt))
8057 return NULL;
8058 mntget(mnt);
8060 return mnt;
8064 * tracing_init_dentry - initialize top level trace array
8066 * This is called when creating files or directories in the tracing
8067 * directory. It is called via fs_initcall() by any of the boot up code
8068 * and expects to return the dentry of the top level tracing directory.
8070 struct dentry *tracing_init_dentry(void)
8072 struct trace_array *tr = &global_trace;
8074 /* The top level trace array uses NULL as parent */
8075 if (tr->dir)
8076 return NULL;
8078 if (WARN_ON(!tracefs_initialized()) ||
8079 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8080 WARN_ON(!debugfs_initialized())))
8081 return ERR_PTR(-ENODEV);
8084 * As there may still be users that expect the tracing
8085 * files to exist in debugfs/tracing, we must automount
8086 * the tracefs file system there, so older tools still
8087 * work with the newer kerenl.
8089 tr->dir = debugfs_create_automount("tracing", NULL,
8090 trace_automount, NULL);
8091 if (!tr->dir) {
8092 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8093 return ERR_PTR(-ENOMEM);
8096 return NULL;
8099 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8100 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8102 static void __init trace_eval_init(void)
8104 int len;
8106 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8107 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8110 #ifdef CONFIG_MODULES
8111 static void trace_module_add_evals(struct module *mod)
8113 if (!mod->num_trace_evals)
8114 return;
8117 * Modules with bad taint do not have events created, do
8118 * not bother with enums either.
8120 if (trace_module_has_bad_taint(mod))
8121 return;
8123 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8126 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8127 static void trace_module_remove_evals(struct module *mod)
8129 union trace_eval_map_item *map;
8130 union trace_eval_map_item **last = &trace_eval_maps;
8132 if (!mod->num_trace_evals)
8133 return;
8135 mutex_lock(&trace_eval_mutex);
8137 map = trace_eval_maps;
8139 while (map) {
8140 if (map->head.mod == mod)
8141 break;
8142 map = trace_eval_jmp_to_tail(map);
8143 last = &map->tail.next;
8144 map = map->tail.next;
8146 if (!map)
8147 goto out;
8149 *last = trace_eval_jmp_to_tail(map)->tail.next;
8150 kfree(map);
8151 out:
8152 mutex_unlock(&trace_eval_mutex);
8154 #else
8155 static inline void trace_module_remove_evals(struct module *mod) { }
8156 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8158 static int trace_module_notify(struct notifier_block *self,
8159 unsigned long val, void *data)
8161 struct module *mod = data;
8163 switch (val) {
8164 case MODULE_STATE_COMING:
8165 trace_module_add_evals(mod);
8166 break;
8167 case MODULE_STATE_GOING:
8168 trace_module_remove_evals(mod);
8169 break;
8172 return 0;
8175 static struct notifier_block trace_module_nb = {
8176 .notifier_call = trace_module_notify,
8177 .priority = 0,
8179 #endif /* CONFIG_MODULES */
8181 static __init int tracer_init_tracefs(void)
8183 struct dentry *d_tracer;
8185 trace_access_lock_init();
8187 d_tracer = tracing_init_dentry();
8188 if (IS_ERR(d_tracer))
8189 return 0;
8191 event_trace_init();
8193 init_tracer_tracefs(&global_trace, d_tracer);
8194 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8196 trace_create_file("tracing_thresh", 0644, d_tracer,
8197 &global_trace, &tracing_thresh_fops);
8199 trace_create_file("README", 0444, d_tracer,
8200 NULL, &tracing_readme_fops);
8202 trace_create_file("saved_cmdlines", 0444, d_tracer,
8203 NULL, &tracing_saved_cmdlines_fops);
8205 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8206 NULL, &tracing_saved_cmdlines_size_fops);
8208 trace_create_file("saved_tgids", 0444, d_tracer,
8209 NULL, &tracing_saved_tgids_fops);
8211 trace_eval_init();
8213 trace_create_eval_file(d_tracer);
8215 #ifdef CONFIG_MODULES
8216 register_module_notifier(&trace_module_nb);
8217 #endif
8219 #ifdef CONFIG_DYNAMIC_FTRACE
8220 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8221 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8222 #endif
8224 create_trace_instances(d_tracer);
8226 update_tracer_options(&global_trace);
8228 return 0;
8231 static int trace_panic_handler(struct notifier_block *this,
8232 unsigned long event, void *unused)
8234 if (ftrace_dump_on_oops)
8235 ftrace_dump(ftrace_dump_on_oops);
8236 return NOTIFY_OK;
8239 static struct notifier_block trace_panic_notifier = {
8240 .notifier_call = trace_panic_handler,
8241 .next = NULL,
8242 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8245 static int trace_die_handler(struct notifier_block *self,
8246 unsigned long val,
8247 void *data)
8249 switch (val) {
8250 case DIE_OOPS:
8251 if (ftrace_dump_on_oops)
8252 ftrace_dump(ftrace_dump_on_oops);
8253 break;
8254 default:
8255 break;
8257 return NOTIFY_OK;
8260 static struct notifier_block trace_die_notifier = {
8261 .notifier_call = trace_die_handler,
8262 .priority = 200
8266 * printk is set to max of 1024, we really don't need it that big.
8267 * Nothing should be printing 1000 characters anyway.
8269 #define TRACE_MAX_PRINT 1000
8272 * Define here KERN_TRACE so that we have one place to modify
8273 * it if we decide to change what log level the ftrace dump
8274 * should be at.
8276 #define KERN_TRACE KERN_EMERG
8278 void
8279 trace_printk_seq(struct trace_seq *s)
8281 /* Probably should print a warning here. */
8282 if (s->seq.len >= TRACE_MAX_PRINT)
8283 s->seq.len = TRACE_MAX_PRINT;
8286 * More paranoid code. Although the buffer size is set to
8287 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8288 * an extra layer of protection.
8290 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8291 s->seq.len = s->seq.size - 1;
8293 /* should be zero ended, but we are paranoid. */
8294 s->buffer[s->seq.len] = 0;
8296 printk(KERN_TRACE "%s", s->buffer);
8298 trace_seq_init(s);
8301 void trace_init_global_iter(struct trace_iterator *iter)
8303 iter->tr = &global_trace;
8304 iter->trace = iter->tr->current_trace;
8305 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8306 iter->trace_buffer = &global_trace.trace_buffer;
8308 if (iter->trace && iter->trace->open)
8309 iter->trace->open(iter);
8311 /* Annotate start of buffers if we had overruns */
8312 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8313 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8315 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8316 if (trace_clocks[iter->tr->clock_id].in_ns)
8317 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8320 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8322 /* use static because iter can be a bit big for the stack */
8323 static struct trace_iterator iter;
8324 static atomic_t dump_running;
8325 struct trace_array *tr = &global_trace;
8326 unsigned int old_userobj;
8327 unsigned long flags;
8328 int cnt = 0, cpu;
8330 /* Only allow one dump user at a time. */
8331 if (atomic_inc_return(&dump_running) != 1) {
8332 atomic_dec(&dump_running);
8333 return;
8337 * Always turn off tracing when we dump.
8338 * We don't need to show trace output of what happens
8339 * between multiple crashes.
8341 * If the user does a sysrq-z, then they can re-enable
8342 * tracing with echo 1 > tracing_on.
8344 tracing_off();
8346 local_irq_save(flags);
8347 printk_nmi_direct_enter();
8349 /* Simulate the iterator */
8350 trace_init_global_iter(&iter);
8352 for_each_tracing_cpu(cpu) {
8353 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8356 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8358 /* don't look at user memory in panic mode */
8359 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8361 switch (oops_dump_mode) {
8362 case DUMP_ALL:
8363 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8364 break;
8365 case DUMP_ORIG:
8366 iter.cpu_file = raw_smp_processor_id();
8367 break;
8368 case DUMP_NONE:
8369 goto out_enable;
8370 default:
8371 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8372 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8375 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8377 /* Did function tracer already get disabled? */
8378 if (ftrace_is_dead()) {
8379 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8380 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8384 * We need to stop all tracing on all CPUS to read the
8385 * the next buffer. This is a bit expensive, but is
8386 * not done often. We fill all what we can read,
8387 * and then release the locks again.
8390 while (!trace_empty(&iter)) {
8392 if (!cnt)
8393 printk(KERN_TRACE "---------------------------------\n");
8395 cnt++;
8397 trace_iterator_reset(&iter);
8398 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8400 if (trace_find_next_entry_inc(&iter) != NULL) {
8401 int ret;
8403 ret = print_trace_line(&iter);
8404 if (ret != TRACE_TYPE_NO_CONSUME)
8405 trace_consume(&iter);
8407 touch_nmi_watchdog();
8409 trace_printk_seq(&iter.seq);
8412 if (!cnt)
8413 printk(KERN_TRACE " (ftrace buffer empty)\n");
8414 else
8415 printk(KERN_TRACE "---------------------------------\n");
8417 out_enable:
8418 tr->trace_flags |= old_userobj;
8420 for_each_tracing_cpu(cpu) {
8421 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8423 atomic_dec(&dump_running);
8424 printk_nmi_direct_exit();
8425 local_irq_restore(flags);
8427 EXPORT_SYMBOL_GPL(ftrace_dump);
8429 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8431 char **argv;
8432 int argc, ret;
8434 argc = 0;
8435 ret = 0;
8436 argv = argv_split(GFP_KERNEL, buf, &argc);
8437 if (!argv)
8438 return -ENOMEM;
8440 if (argc)
8441 ret = createfn(argc, argv);
8443 argv_free(argv);
8445 return ret;
8448 #define WRITE_BUFSIZE 4096
8450 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8451 size_t count, loff_t *ppos,
8452 int (*createfn)(int, char **))
8454 char *kbuf, *buf, *tmp;
8455 int ret = 0;
8456 size_t done = 0;
8457 size_t size;
8459 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8460 if (!kbuf)
8461 return -ENOMEM;
8463 while (done < count) {
8464 size = count - done;
8466 if (size >= WRITE_BUFSIZE)
8467 size = WRITE_BUFSIZE - 1;
8469 if (copy_from_user(kbuf, buffer + done, size)) {
8470 ret = -EFAULT;
8471 goto out;
8473 kbuf[size] = '\0';
8474 buf = kbuf;
8475 do {
8476 tmp = strchr(buf, '\n');
8477 if (tmp) {
8478 *tmp = '\0';
8479 size = tmp - buf + 1;
8480 } else {
8481 size = strlen(buf);
8482 if (done + size < count) {
8483 if (buf != kbuf)
8484 break;
8485 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8486 pr_warn("Line length is too long: Should be less than %d\n",
8487 WRITE_BUFSIZE - 2);
8488 ret = -EINVAL;
8489 goto out;
8492 done += size;
8494 /* Remove comments */
8495 tmp = strchr(buf, '#');
8497 if (tmp)
8498 *tmp = '\0';
8500 ret = trace_run_command(buf, createfn);
8501 if (ret)
8502 goto out;
8503 buf += size;
8505 } while (done < count);
8507 ret = done;
8509 out:
8510 kfree(kbuf);
8512 return ret;
8515 __init static int tracer_alloc_buffers(void)
8517 int ring_buf_size;
8518 int ret = -ENOMEM;
8521 * Make sure we don't accidently add more trace options
8522 * than we have bits for.
8524 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8526 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8527 goto out;
8529 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8530 goto out_free_buffer_mask;
8532 /* Only allocate trace_printk buffers if a trace_printk exists */
8533 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
8534 /* Must be called before global_trace.buffer is allocated */
8535 trace_printk_init_buffers();
8537 /* To save memory, keep the ring buffer size to its minimum */
8538 if (ring_buffer_expanded)
8539 ring_buf_size = trace_buf_size;
8540 else
8541 ring_buf_size = 1;
8543 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8544 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8546 raw_spin_lock_init(&global_trace.start_lock);
8549 * The prepare callbacks allocates some memory for the ring buffer. We
8550 * don't free the buffer if the if the CPU goes down. If we were to free
8551 * the buffer, then the user would lose any trace that was in the
8552 * buffer. The memory will be removed once the "instance" is removed.
8554 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8555 "trace/RB:preapre", trace_rb_cpu_prepare,
8556 NULL);
8557 if (ret < 0)
8558 goto out_free_cpumask;
8559 /* Used for event triggers */
8560 ret = -ENOMEM;
8561 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8562 if (!temp_buffer)
8563 goto out_rm_hp_state;
8565 if (trace_create_savedcmd() < 0)
8566 goto out_free_temp_buffer;
8568 /* TODO: make the number of buffers hot pluggable with CPUS */
8569 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8570 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8571 WARN_ON(1);
8572 goto out_free_savedcmd;
8575 if (global_trace.buffer_disabled)
8576 tracing_off();
8578 if (trace_boot_clock) {
8579 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8580 if (ret < 0)
8581 pr_warn("Trace clock %s not defined, going back to default\n",
8582 trace_boot_clock);
8586 * register_tracer() might reference current_trace, so it
8587 * needs to be set before we register anything. This is
8588 * just a bootstrap of current_trace anyway.
8590 global_trace.current_trace = &nop_trace;
8592 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8594 ftrace_init_global_array_ops(&global_trace);
8596 init_trace_flags_index(&global_trace);
8598 register_tracer(&nop_trace);
8600 /* Function tracing may start here (via kernel command line) */
8601 init_function_trace();
8603 /* All seems OK, enable tracing */
8604 tracing_disabled = 0;
8606 atomic_notifier_chain_register(&panic_notifier_list,
8607 &trace_panic_notifier);
8609 register_die_notifier(&trace_die_notifier);
8611 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8613 INIT_LIST_HEAD(&global_trace.systems);
8614 INIT_LIST_HEAD(&global_trace.events);
8615 INIT_LIST_HEAD(&global_trace.hist_vars);
8616 list_add(&global_trace.list, &ftrace_trace_arrays);
8618 apply_trace_boot_options();
8620 register_snapshot_cmd();
8622 return 0;
8624 out_free_savedcmd:
8625 free_saved_cmdlines_buffer(savedcmd);
8626 out_free_temp_buffer:
8627 ring_buffer_free(temp_buffer);
8628 out_rm_hp_state:
8629 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8630 out_free_cpumask:
8631 free_cpumask_var(global_trace.tracing_cpumask);
8632 out_free_buffer_mask:
8633 free_cpumask_var(tracing_buffer_mask);
8634 out:
8635 return ret;
8638 void __init early_trace_init(void)
8640 if (tracepoint_printk) {
8641 tracepoint_print_iter =
8642 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8643 if (WARN_ON(!tracepoint_print_iter))
8644 tracepoint_printk = 0;
8645 else
8646 static_key_enable(&tracepoint_printk_key.key);
8648 tracer_alloc_buffers();
8651 void __init trace_init(void)
8653 trace_event_init();
8656 __init static int clear_boot_tracer(void)
8659 * The default tracer at boot buffer is an init section.
8660 * This function is called in lateinit. If we did not
8661 * find the boot tracer, then clear it out, to prevent
8662 * later registration from accessing the buffer that is
8663 * about to be freed.
8665 if (!default_bootup_tracer)
8666 return 0;
8668 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8669 default_bootup_tracer);
8670 default_bootup_tracer = NULL;
8672 return 0;
8675 fs_initcall(tracer_init_tracefs);
8676 late_initcall_sync(clear_boot_tracer);
8678 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8679 __init static int tracing_set_default_clock(void)
8681 /* sched_clock_stable() is determined in late_initcall */
8682 if (!trace_boot_clock && !sched_clock_stable()) {
8683 printk(KERN_WARNING
8684 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8685 "If you want to keep using the local clock, then add:\n"
8686 " \"trace_clock=local\"\n"
8687 "on the kernel command line\n");
8688 tracing_set_clock(&global_trace, "global");
8691 return 0;
8693 late_initcall_sync(tracing_set_default_clock);
8694 #endif