mm/hmm.c: remove superfluous RCU protection around radix tree lookup
[linux/fpc-iii.git] / kernel / trace / trace.h
blob6fb46a06c9dc0deef8560b57eaedf1aaf88cb0a2
1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/trace_seq.h>
16 #include <linux/trace_events.h>
17 #include <linux/compiler.h>
18 #include <linux/trace_seq.h>
19 #include <linux/glob.h>
21 #ifdef CONFIG_FTRACE_SYSCALLS
22 #include <asm/unistd.h> /* For NR_SYSCALLS */
23 #include <asm/syscall.h> /* some archs define it here */
24 #endif
26 enum trace_type {
27 __TRACE_FIRST_TYPE = 0,
29 TRACE_FN,
30 TRACE_CTX,
31 TRACE_WAKE,
32 TRACE_STACK,
33 TRACE_PRINT,
34 TRACE_BPRINT,
35 TRACE_MMIO_RW,
36 TRACE_MMIO_MAP,
37 TRACE_BRANCH,
38 TRACE_GRAPH_RET,
39 TRACE_GRAPH_ENT,
40 TRACE_USER_STACK,
41 TRACE_BLK,
42 TRACE_BPUTS,
43 TRACE_HWLAT,
44 TRACE_RAW_DATA,
46 __TRACE_LAST_TYPE,
50 #undef __field
51 #define __field(type, item) type item;
53 #undef __field_struct
54 #define __field_struct(type, item) __field(type, item)
56 #undef __field_desc
57 #define __field_desc(type, container, item)
59 #undef __array
60 #define __array(type, item, size) type item[size];
62 #undef __array_desc
63 #define __array_desc(type, container, item, size)
65 #undef __dynamic_array
66 #define __dynamic_array(type, item) type item[];
68 #undef F_STRUCT
69 #define F_STRUCT(args...) args
71 #undef FTRACE_ENTRY
72 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
73 struct struct_name { \
74 struct trace_entry ent; \
75 tstruct \
78 #undef FTRACE_ENTRY_DUP
79 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
81 #undef FTRACE_ENTRY_REG
82 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
83 filter, regfn) \
84 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
85 filter)
87 #undef FTRACE_ENTRY_PACKED
88 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
89 filter) \
90 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
91 filter) __packed
93 #include "trace_entries.h"
96 * syscalls are special, and need special handling, this is why
97 * they are not included in trace_entries.h
99 struct syscall_trace_enter {
100 struct trace_entry ent;
101 int nr;
102 unsigned long args[];
105 struct syscall_trace_exit {
106 struct trace_entry ent;
107 int nr;
108 long ret;
111 struct kprobe_trace_entry_head {
112 struct trace_entry ent;
113 unsigned long ip;
116 struct kretprobe_trace_entry_head {
117 struct trace_entry ent;
118 unsigned long func;
119 unsigned long ret_ip;
123 * trace_flag_type is an enumeration that holds different
124 * states when a trace occurs. These are:
125 * IRQS_OFF - interrupts were disabled
126 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
127 * NEED_RESCHED - reschedule is requested
128 * HARDIRQ - inside an interrupt handler
129 * SOFTIRQ - inside a softirq handler
131 enum trace_flag_type {
132 TRACE_FLAG_IRQS_OFF = 0x01,
133 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
134 TRACE_FLAG_NEED_RESCHED = 0x04,
135 TRACE_FLAG_HARDIRQ = 0x08,
136 TRACE_FLAG_SOFTIRQ = 0x10,
137 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
138 TRACE_FLAG_NMI = 0x40,
141 #define TRACE_BUF_SIZE 1024
143 struct trace_array;
146 * The CPU trace array - it consists of thousands of trace entries
147 * plus some other descriptor data: (for example which task started
148 * the trace, etc.)
150 struct trace_array_cpu {
151 atomic_t disabled;
152 void *buffer_page; /* ring buffer spare */
154 unsigned long entries;
155 unsigned long saved_latency;
156 unsigned long critical_start;
157 unsigned long critical_end;
158 unsigned long critical_sequence;
159 unsigned long nice;
160 unsigned long policy;
161 unsigned long rt_priority;
162 unsigned long skipped_entries;
163 u64 preempt_timestamp;
164 pid_t pid;
165 kuid_t uid;
166 char comm[TASK_COMM_LEN];
168 bool ignore_pid;
169 #ifdef CONFIG_FUNCTION_TRACER
170 bool ftrace_ignore_pid;
171 #endif
174 struct tracer;
175 struct trace_option_dentry;
177 struct trace_buffer {
178 struct trace_array *tr;
179 struct ring_buffer *buffer;
180 struct trace_array_cpu __percpu *data;
181 u64 time_start;
182 int cpu;
185 #define TRACE_FLAGS_MAX_SIZE 32
187 struct trace_options {
188 struct tracer *tracer;
189 struct trace_option_dentry *topts;
192 struct trace_pid_list {
193 int pid_max;
194 unsigned long *pids;
198 * The trace array - an array of per-CPU trace arrays. This is the
199 * highest level data structure that individual tracers deal with.
200 * They have on/off state as well:
202 struct trace_array {
203 struct list_head list;
204 char *name;
205 struct trace_buffer trace_buffer;
206 #ifdef CONFIG_TRACER_MAX_TRACE
208 * The max_buffer is used to snapshot the trace when a maximum
209 * latency is reached, or when the user initiates a snapshot.
210 * Some tracers will use this to store a maximum trace while
211 * it continues examining live traces.
213 * The buffers for the max_buffer are set up the same as the trace_buffer
214 * When a snapshot is taken, the buffer of the max_buffer is swapped
215 * with the buffer of the trace_buffer and the buffers are reset for
216 * the trace_buffer so the tracing can continue.
218 struct trace_buffer max_buffer;
219 bool allocated_snapshot;
220 #endif
221 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
222 unsigned long max_latency;
223 #endif
224 struct trace_pid_list __rcu *filtered_pids;
226 * max_lock is used to protect the swapping of buffers
227 * when taking a max snapshot. The buffers themselves are
228 * protected by per_cpu spinlocks. But the action of the swap
229 * needs its own lock.
231 * This is defined as a arch_spinlock_t in order to help
232 * with performance when lockdep debugging is enabled.
234 * It is also used in other places outside the update_max_tr
235 * so it needs to be defined outside of the
236 * CONFIG_TRACER_MAX_TRACE.
238 arch_spinlock_t max_lock;
239 int buffer_disabled;
240 #ifdef CONFIG_FTRACE_SYSCALLS
241 int sys_refcount_enter;
242 int sys_refcount_exit;
243 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
244 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
245 #endif
246 int stop_count;
247 int clock_id;
248 int nr_topts;
249 bool clear_trace;
250 struct tracer *current_trace;
251 unsigned int trace_flags;
252 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
253 unsigned int flags;
254 raw_spinlock_t start_lock;
255 struct dentry *dir;
256 struct dentry *options;
257 struct dentry *percpu_dir;
258 struct dentry *event_dir;
259 struct trace_options *topts;
260 struct list_head systems;
261 struct list_head events;
262 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
263 int ref;
264 #ifdef CONFIG_FUNCTION_TRACER
265 struct ftrace_ops *ops;
266 struct trace_pid_list __rcu *function_pids;
267 #ifdef CONFIG_DYNAMIC_FTRACE
268 /* All of these are protected by the ftrace_lock */
269 struct list_head func_probes;
270 struct list_head mod_trace;
271 struct list_head mod_notrace;
272 #endif
273 /* function tracing enabled */
274 int function_enabled;
275 #endif
276 int time_stamp_abs_ref;
277 struct list_head hist_vars;
280 enum {
281 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
284 extern struct list_head ftrace_trace_arrays;
286 extern struct mutex trace_types_lock;
288 extern int trace_array_get(struct trace_array *tr);
289 extern void trace_array_put(struct trace_array *tr);
291 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
292 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
294 extern bool trace_clock_in_ns(struct trace_array *tr);
297 * The global tracer (top) should be the first trace array added,
298 * but we check the flag anyway.
300 static inline struct trace_array *top_trace_array(void)
302 struct trace_array *tr;
304 if (list_empty(&ftrace_trace_arrays))
305 return NULL;
307 tr = list_entry(ftrace_trace_arrays.prev,
308 typeof(*tr), list);
309 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
310 return tr;
313 #define FTRACE_CMP_TYPE(var, type) \
314 __builtin_types_compatible_p(typeof(var), type *)
316 #undef IF_ASSIGN
317 #define IF_ASSIGN(var, entry, etype, id) \
318 if (FTRACE_CMP_TYPE(var, etype)) { \
319 var = (typeof(var))(entry); \
320 WARN_ON(id && (entry)->type != id); \
321 break; \
324 /* Will cause compile errors if type is not found. */
325 extern void __ftrace_bad_type(void);
328 * The trace_assign_type is a verifier that the entry type is
329 * the same as the type being assigned. To add new types simply
330 * add a line with the following format:
332 * IF_ASSIGN(var, ent, type, id);
334 * Where "type" is the trace type that includes the trace_entry
335 * as the "ent" item. And "id" is the trace identifier that is
336 * used in the trace_type enum.
338 * If the type can have more than one id, then use zero.
340 #define trace_assign_type(var, ent) \
341 do { \
342 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
343 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
344 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
345 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
346 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
347 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
348 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
349 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
350 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
351 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
352 TRACE_MMIO_RW); \
353 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
354 TRACE_MMIO_MAP); \
355 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
356 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
357 TRACE_GRAPH_ENT); \
358 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
359 TRACE_GRAPH_RET); \
360 __ftrace_bad_type(); \
361 } while (0)
364 * An option specific to a tracer. This is a boolean value.
365 * The bit is the bit index that sets its value on the
366 * flags value in struct tracer_flags.
368 struct tracer_opt {
369 const char *name; /* Will appear on the trace_options file */
370 u32 bit; /* Mask assigned in val field in tracer_flags */
374 * The set of specific options for a tracer. Your tracer
375 * have to set the initial value of the flags val.
377 struct tracer_flags {
378 u32 val;
379 struct tracer_opt *opts;
380 struct tracer *trace;
383 /* Makes more easy to define a tracer opt */
384 #define TRACER_OPT(s, b) .name = #s, .bit = b
387 struct trace_option_dentry {
388 struct tracer_opt *opt;
389 struct tracer_flags *flags;
390 struct trace_array *tr;
391 struct dentry *entry;
395 * struct tracer - a specific tracer and its callbacks to interact with tracefs
396 * @name: the name chosen to select it on the available_tracers file
397 * @init: called when one switches to this tracer (echo name > current_tracer)
398 * @reset: called when one switches to another tracer
399 * @start: called when tracing is unpaused (echo 1 > tracing_on)
400 * @stop: called when tracing is paused (echo 0 > tracing_on)
401 * @update_thresh: called when tracing_thresh is updated
402 * @open: called when the trace file is opened
403 * @pipe_open: called when the trace_pipe file is opened
404 * @close: called when the trace file is released
405 * @pipe_close: called when the trace_pipe file is released
406 * @read: override the default read callback on trace_pipe
407 * @splice_read: override the default splice_read callback on trace_pipe
408 * @selftest: selftest to run on boot (see trace_selftest.c)
409 * @print_headers: override the first lines that describe your columns
410 * @print_line: callback that prints a trace
411 * @set_flag: signals one of your private flags changed (trace_options file)
412 * @flags: your private flags
414 struct tracer {
415 const char *name;
416 int (*init)(struct trace_array *tr);
417 void (*reset)(struct trace_array *tr);
418 void (*start)(struct trace_array *tr);
419 void (*stop)(struct trace_array *tr);
420 int (*update_thresh)(struct trace_array *tr);
421 void (*open)(struct trace_iterator *iter);
422 void (*pipe_open)(struct trace_iterator *iter);
423 void (*close)(struct trace_iterator *iter);
424 void (*pipe_close)(struct trace_iterator *iter);
425 ssize_t (*read)(struct trace_iterator *iter,
426 struct file *filp, char __user *ubuf,
427 size_t cnt, loff_t *ppos);
428 ssize_t (*splice_read)(struct trace_iterator *iter,
429 struct file *filp,
430 loff_t *ppos,
431 struct pipe_inode_info *pipe,
432 size_t len,
433 unsigned int flags);
434 #ifdef CONFIG_FTRACE_STARTUP_TEST
435 int (*selftest)(struct tracer *trace,
436 struct trace_array *tr);
437 #endif
438 void (*print_header)(struct seq_file *m);
439 enum print_line_t (*print_line)(struct trace_iterator *iter);
440 /* If you handled the flag setting, return 0 */
441 int (*set_flag)(struct trace_array *tr,
442 u32 old_flags, u32 bit, int set);
443 /* Return 0 if OK with change, else return non-zero */
444 int (*flag_changed)(struct trace_array *tr,
445 u32 mask, int set);
446 struct tracer *next;
447 struct tracer_flags *flags;
448 int enabled;
449 int ref;
450 bool print_max;
451 bool allow_instances;
452 #ifdef CONFIG_TRACER_MAX_TRACE
453 bool use_max_tr;
454 #endif
455 /* True if tracer cannot be enabled in kernel param */
456 bool noboot;
460 /* Only current can touch trace_recursion */
463 * For function tracing recursion:
464 * The order of these bits are important.
466 * When function tracing occurs, the following steps are made:
467 * If arch does not support a ftrace feature:
468 * call internal function (uses INTERNAL bits) which calls...
469 * If callback is registered to the "global" list, the list
470 * function is called and recursion checks the GLOBAL bits.
471 * then this function calls...
472 * The function callback, which can use the FTRACE bits to
473 * check for recursion.
475 * Now if the arch does not suppport a feature, and it calls
476 * the global list function which calls the ftrace callback
477 * all three of these steps will do a recursion protection.
478 * There's no reason to do one if the previous caller already
479 * did. The recursion that we are protecting against will
480 * go through the same steps again.
482 * To prevent the multiple recursion checks, if a recursion
483 * bit is set that is higher than the MAX bit of the current
484 * check, then we know that the check was made by the previous
485 * caller, and we can skip the current check.
487 enum {
488 TRACE_BUFFER_BIT,
489 TRACE_BUFFER_NMI_BIT,
490 TRACE_BUFFER_IRQ_BIT,
491 TRACE_BUFFER_SIRQ_BIT,
493 /* Start of function recursion bits */
494 TRACE_FTRACE_BIT,
495 TRACE_FTRACE_NMI_BIT,
496 TRACE_FTRACE_IRQ_BIT,
497 TRACE_FTRACE_SIRQ_BIT,
499 /* INTERNAL_BITs must be greater than FTRACE_BITs */
500 TRACE_INTERNAL_BIT,
501 TRACE_INTERNAL_NMI_BIT,
502 TRACE_INTERNAL_IRQ_BIT,
503 TRACE_INTERNAL_SIRQ_BIT,
505 TRACE_BRANCH_BIT,
507 * Abuse of the trace_recursion.
508 * As we need a way to maintain state if we are tracing the function
509 * graph in irq because we want to trace a particular function that
510 * was called in irq context but we have irq tracing off. Since this
511 * can only be modified by current, we can reuse trace_recursion.
513 TRACE_IRQ_BIT,
516 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
517 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
518 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
520 #define TRACE_CONTEXT_BITS 4
522 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
523 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
525 #define TRACE_LIST_START TRACE_INTERNAL_BIT
526 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
528 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
530 static __always_inline int trace_get_context_bit(void)
532 int bit;
534 if (in_interrupt()) {
535 if (in_nmi())
536 bit = 0;
538 else if (in_irq())
539 bit = 1;
540 else
541 bit = 2;
542 } else
543 bit = 3;
545 return bit;
548 static __always_inline int trace_test_and_set_recursion(int start, int max)
550 unsigned int val = current->trace_recursion;
551 int bit;
553 /* A previous recursion check was made */
554 if ((val & TRACE_CONTEXT_MASK) > max)
555 return 0;
557 bit = trace_get_context_bit() + start;
558 if (unlikely(val & (1 << bit)))
559 return -1;
561 val |= 1 << bit;
562 current->trace_recursion = val;
563 barrier();
565 return bit;
568 static __always_inline void trace_clear_recursion(int bit)
570 unsigned int val = current->trace_recursion;
572 if (!bit)
573 return;
575 bit = 1 << bit;
576 val &= ~bit;
578 barrier();
579 current->trace_recursion = val;
582 static inline struct ring_buffer_iter *
583 trace_buffer_iter(struct trace_iterator *iter, int cpu)
585 if (iter->buffer_iter && iter->buffer_iter[cpu])
586 return iter->buffer_iter[cpu];
587 return NULL;
590 int tracer_init(struct tracer *t, struct trace_array *tr);
591 int tracing_is_enabled(void);
592 void tracing_reset(struct trace_buffer *buf, int cpu);
593 void tracing_reset_online_cpus(struct trace_buffer *buf);
594 void tracing_reset_current(int cpu);
595 void tracing_reset_all_online_cpus(void);
596 int tracing_open_generic(struct inode *inode, struct file *filp);
597 bool tracing_is_disabled(void);
598 int tracer_tracing_is_on(struct trace_array *tr);
599 void tracer_tracing_on(struct trace_array *tr);
600 void tracer_tracing_off(struct trace_array *tr);
601 struct dentry *trace_create_file(const char *name,
602 umode_t mode,
603 struct dentry *parent,
604 void *data,
605 const struct file_operations *fops);
607 struct dentry *tracing_init_dentry(void);
609 struct ring_buffer_event;
611 struct ring_buffer_event *
612 trace_buffer_lock_reserve(struct ring_buffer *buffer,
613 int type,
614 unsigned long len,
615 unsigned long flags,
616 int pc);
618 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
619 struct trace_array_cpu *data);
621 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
622 int *ent_cpu, u64 *ent_ts);
624 void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
625 struct ring_buffer_event *event);
627 int trace_empty(struct trace_iterator *iter);
629 void *trace_find_next_entry_inc(struct trace_iterator *iter);
631 void trace_init_global_iter(struct trace_iterator *iter);
633 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
635 void trace_function(struct trace_array *tr,
636 unsigned long ip,
637 unsigned long parent_ip,
638 unsigned long flags, int pc);
639 void trace_graph_function(struct trace_array *tr,
640 unsigned long ip,
641 unsigned long parent_ip,
642 unsigned long flags, int pc);
643 void trace_latency_header(struct seq_file *m);
644 void trace_default_header(struct seq_file *m);
645 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
646 int trace_empty(struct trace_iterator *iter);
648 void trace_graph_return(struct ftrace_graph_ret *trace);
649 int trace_graph_entry(struct ftrace_graph_ent *trace);
650 void set_graph_array(struct trace_array *tr);
652 void tracing_start_cmdline_record(void);
653 void tracing_stop_cmdline_record(void);
654 void tracing_start_tgid_record(void);
655 void tracing_stop_tgid_record(void);
657 int register_tracer(struct tracer *type);
658 int is_tracing_stopped(void);
660 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
662 extern cpumask_var_t __read_mostly tracing_buffer_mask;
664 #define for_each_tracing_cpu(cpu) \
665 for_each_cpu(cpu, tracing_buffer_mask)
667 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
669 extern unsigned long tracing_thresh;
671 /* PID filtering */
673 extern int pid_max;
675 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
676 pid_t search_pid);
677 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
678 struct task_struct *task);
679 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
680 struct task_struct *self,
681 struct task_struct *task);
682 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
684 int trace_pid_show(struct seq_file *m, void *v);
685 void trace_free_pid_list(struct trace_pid_list *pid_list);
686 int trace_pid_write(struct trace_pid_list *filtered_pids,
687 struct trace_pid_list **new_pid_list,
688 const char __user *ubuf, size_t cnt);
690 #ifdef CONFIG_TRACER_MAX_TRACE
691 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
692 void update_max_tr_single(struct trace_array *tr,
693 struct task_struct *tsk, int cpu);
694 #endif /* CONFIG_TRACER_MAX_TRACE */
696 #ifdef CONFIG_STACKTRACE
697 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
698 int pc);
700 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
701 int pc);
702 #else
703 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
704 unsigned long flags, int pc)
708 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
709 int skip, int pc)
712 #endif /* CONFIG_STACKTRACE */
714 extern u64 ftrace_now(int cpu);
716 extern void trace_find_cmdline(int pid, char comm[]);
717 extern int trace_find_tgid(int pid);
718 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
720 #ifdef CONFIG_DYNAMIC_FTRACE
721 extern unsigned long ftrace_update_tot_cnt;
722 void ftrace_init_trace_array(struct trace_array *tr);
723 #else
724 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
725 #endif
726 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
727 extern int DYN_FTRACE_TEST_NAME(void);
728 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
729 extern int DYN_FTRACE_TEST_NAME2(void);
731 extern bool ring_buffer_expanded;
732 extern bool tracing_selftest_disabled;
734 #ifdef CONFIG_FTRACE_STARTUP_TEST
735 extern int trace_selftest_startup_function(struct tracer *trace,
736 struct trace_array *tr);
737 extern int trace_selftest_startup_function_graph(struct tracer *trace,
738 struct trace_array *tr);
739 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
740 struct trace_array *tr);
741 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
742 struct trace_array *tr);
743 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
744 struct trace_array *tr);
745 extern int trace_selftest_startup_wakeup(struct tracer *trace,
746 struct trace_array *tr);
747 extern int trace_selftest_startup_nop(struct tracer *trace,
748 struct trace_array *tr);
749 extern int trace_selftest_startup_branch(struct tracer *trace,
750 struct trace_array *tr);
752 * Tracer data references selftest functions that only occur
753 * on boot up. These can be __init functions. Thus, when selftests
754 * are enabled, then the tracers need to reference __init functions.
756 #define __tracer_data __refdata
757 #else
758 /* Tracers are seldom changed. Optimize when selftests are disabled. */
759 #define __tracer_data __read_mostly
760 #endif /* CONFIG_FTRACE_STARTUP_TEST */
762 extern void *head_page(struct trace_array_cpu *data);
763 extern unsigned long long ns2usecs(u64 nsec);
764 extern int
765 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
766 extern int
767 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
768 extern int
769 trace_array_vprintk(struct trace_array *tr,
770 unsigned long ip, const char *fmt, va_list args);
771 int trace_array_printk(struct trace_array *tr,
772 unsigned long ip, const char *fmt, ...);
773 int trace_array_printk_buf(struct ring_buffer *buffer,
774 unsigned long ip, const char *fmt, ...);
775 void trace_printk_seq(struct trace_seq *s);
776 enum print_line_t print_trace_line(struct trace_iterator *iter);
778 extern char trace_find_mark(unsigned long long duration);
780 struct ftrace_hash;
782 struct ftrace_mod_load {
783 struct list_head list;
784 char *func;
785 char *module;
786 int enable;
789 enum {
790 FTRACE_HASH_FL_MOD = (1 << 0),
793 struct ftrace_hash {
794 unsigned long size_bits;
795 struct hlist_head *buckets;
796 unsigned long count;
797 unsigned long flags;
798 struct rcu_head rcu;
801 struct ftrace_func_entry *
802 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
804 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
806 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
809 /* Standard output formatting function used for function return traces */
810 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
812 /* Flag options */
813 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
814 #define TRACE_GRAPH_PRINT_CPU 0x2
815 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
816 #define TRACE_GRAPH_PRINT_PROC 0x8
817 #define TRACE_GRAPH_PRINT_DURATION 0x10
818 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
819 #define TRACE_GRAPH_PRINT_IRQS 0x40
820 #define TRACE_GRAPH_PRINT_TAIL 0x80
821 #define TRACE_GRAPH_SLEEP_TIME 0x100
822 #define TRACE_GRAPH_GRAPH_TIME 0x200
823 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
824 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
826 extern void ftrace_graph_sleep_time_control(bool enable);
827 extern void ftrace_graph_graph_time_control(bool enable);
829 extern enum print_line_t
830 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
831 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
832 extern void
833 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
834 extern void graph_trace_open(struct trace_iterator *iter);
835 extern void graph_trace_close(struct trace_iterator *iter);
836 extern int __trace_graph_entry(struct trace_array *tr,
837 struct ftrace_graph_ent *trace,
838 unsigned long flags, int pc);
839 extern void __trace_graph_return(struct trace_array *tr,
840 struct ftrace_graph_ret *trace,
841 unsigned long flags, int pc);
843 #ifdef CONFIG_DYNAMIC_FTRACE
844 extern struct ftrace_hash *ftrace_graph_hash;
845 extern struct ftrace_hash *ftrace_graph_notrace_hash;
847 static inline int ftrace_graph_addr(unsigned long addr)
849 int ret = 0;
851 preempt_disable_notrace();
853 if (ftrace_hash_empty(ftrace_graph_hash)) {
854 ret = 1;
855 goto out;
858 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
860 * If no irqs are to be traced, but a set_graph_function
861 * is set, and called by an interrupt handler, we still
862 * want to trace it.
864 if (in_irq())
865 trace_recursion_set(TRACE_IRQ_BIT);
866 else
867 trace_recursion_clear(TRACE_IRQ_BIT);
868 ret = 1;
871 out:
872 preempt_enable_notrace();
873 return ret;
876 static inline int ftrace_graph_notrace_addr(unsigned long addr)
878 int ret = 0;
880 preempt_disable_notrace();
882 if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
883 ret = 1;
885 preempt_enable_notrace();
886 return ret;
888 #else
889 static inline int ftrace_graph_addr(unsigned long addr)
891 return 1;
894 static inline int ftrace_graph_notrace_addr(unsigned long addr)
896 return 0;
898 #endif /* CONFIG_DYNAMIC_FTRACE */
900 extern unsigned int fgraph_max_depth;
902 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
904 /* trace it when it is-nested-in or is a function enabled. */
905 return !(trace->depth || ftrace_graph_addr(trace->func)) ||
906 (trace->depth < 0) ||
907 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
910 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
911 static inline enum print_line_t
912 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
914 return TRACE_TYPE_UNHANDLED;
916 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
918 extern struct list_head ftrace_pids;
920 #ifdef CONFIG_FUNCTION_TRACER
921 struct ftrace_func_command {
922 struct list_head list;
923 char *name;
924 int (*func)(struct trace_array *tr,
925 struct ftrace_hash *hash,
926 char *func, char *cmd,
927 char *params, int enable);
929 extern bool ftrace_filter_param __initdata;
930 static inline int ftrace_trace_task(struct trace_array *tr)
932 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
934 extern int ftrace_is_dead(void);
935 int ftrace_create_function_files(struct trace_array *tr,
936 struct dentry *parent);
937 void ftrace_destroy_function_files(struct trace_array *tr);
938 void ftrace_init_global_array_ops(struct trace_array *tr);
939 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
940 void ftrace_reset_array_ops(struct trace_array *tr);
941 int using_ftrace_ops_list_func(void);
942 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
943 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
944 struct dentry *d_tracer);
945 void ftrace_clear_pids(struct trace_array *tr);
946 int init_function_trace(void);
947 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
948 #else
949 static inline int ftrace_trace_task(struct trace_array *tr)
951 return 1;
953 static inline int ftrace_is_dead(void) { return 0; }
954 static inline int
955 ftrace_create_function_files(struct trace_array *tr,
956 struct dentry *parent)
958 return 0;
960 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
961 static inline __init void
962 ftrace_init_global_array_ops(struct trace_array *tr) { }
963 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
964 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
965 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
966 static inline void ftrace_clear_pids(struct trace_array *tr) { }
967 static inline int init_function_trace(void) { return 0; }
968 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
969 /* ftace_func_t type is not defined, use macro instead of static inline */
970 #define ftrace_init_array_ops(tr, func) do { } while (0)
971 #endif /* CONFIG_FUNCTION_TRACER */
973 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
975 struct ftrace_probe_ops {
976 void (*func)(unsigned long ip,
977 unsigned long parent_ip,
978 struct trace_array *tr,
979 struct ftrace_probe_ops *ops,
980 void *data);
981 int (*init)(struct ftrace_probe_ops *ops,
982 struct trace_array *tr,
983 unsigned long ip, void *init_data,
984 void **data);
985 void (*free)(struct ftrace_probe_ops *ops,
986 struct trace_array *tr,
987 unsigned long ip, void *data);
988 int (*print)(struct seq_file *m,
989 unsigned long ip,
990 struct ftrace_probe_ops *ops,
991 void *data);
994 struct ftrace_func_mapper;
995 typedef int (*ftrace_mapper_func)(void *data);
997 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
998 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
999 unsigned long ip);
1000 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1001 unsigned long ip, void *data);
1002 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1003 unsigned long ip);
1004 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1005 ftrace_mapper_func free_func);
1007 extern int
1008 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1009 struct ftrace_probe_ops *ops, void *data);
1010 extern int
1011 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1012 struct ftrace_probe_ops *ops);
1013 extern void clear_ftrace_function_probes(struct trace_array *tr);
1015 int register_ftrace_command(struct ftrace_func_command *cmd);
1016 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1018 void ftrace_create_filter_files(struct ftrace_ops *ops,
1019 struct dentry *parent);
1020 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1021 #else
1022 struct ftrace_func_command;
1024 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1026 return -EINVAL;
1028 static inline __init int unregister_ftrace_command(char *cmd_name)
1030 return -EINVAL;
1032 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1037 * The ops parameter passed in is usually undefined.
1038 * This must be a macro.
1040 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1041 #define ftrace_destroy_filter_files(ops) do { } while (0)
1042 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1044 bool ftrace_event_is_function(struct trace_event_call *call);
1047 * struct trace_parser - servers for reading the user input separated by spaces
1048 * @cont: set if the input is not complete - no final space char was found
1049 * @buffer: holds the parsed user input
1050 * @idx: user input length
1051 * @size: buffer size
1053 struct trace_parser {
1054 bool cont;
1055 char *buffer;
1056 unsigned idx;
1057 unsigned size;
1060 static inline bool trace_parser_loaded(struct trace_parser *parser)
1062 return (parser->idx != 0);
1065 static inline bool trace_parser_cont(struct trace_parser *parser)
1067 return parser->cont;
1070 static inline void trace_parser_clear(struct trace_parser *parser)
1072 parser->cont = false;
1073 parser->idx = 0;
1076 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1077 extern void trace_parser_put(struct trace_parser *parser);
1078 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1079 size_t cnt, loff_t *ppos);
1082 * Only create function graph options if function graph is configured.
1084 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1085 # define FGRAPH_FLAGS \
1086 C(DISPLAY_GRAPH, "display-graph"),
1087 #else
1088 # define FGRAPH_FLAGS
1089 #endif
1091 #ifdef CONFIG_BRANCH_TRACER
1092 # define BRANCH_FLAGS \
1093 C(BRANCH, "branch"),
1094 #else
1095 # define BRANCH_FLAGS
1096 #endif
1098 #ifdef CONFIG_FUNCTION_TRACER
1099 # define FUNCTION_FLAGS \
1100 C(FUNCTION, "function-trace"), \
1101 C(FUNC_FORK, "function-fork"),
1102 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1103 #else
1104 # define FUNCTION_FLAGS
1105 # define FUNCTION_DEFAULT_FLAGS 0UL
1106 # define TRACE_ITER_FUNC_FORK 0UL
1107 #endif
1109 #ifdef CONFIG_STACKTRACE
1110 # define STACK_FLAGS \
1111 C(STACKTRACE, "stacktrace"),
1112 #else
1113 # define STACK_FLAGS
1114 #endif
1117 * trace_iterator_flags is an enumeration that defines bit
1118 * positions into trace_flags that controls the output.
1120 * NOTE: These bits must match the trace_options array in
1121 * trace.c (this macro guarantees it).
1123 #define TRACE_FLAGS \
1124 C(PRINT_PARENT, "print-parent"), \
1125 C(SYM_OFFSET, "sym-offset"), \
1126 C(SYM_ADDR, "sym-addr"), \
1127 C(VERBOSE, "verbose"), \
1128 C(RAW, "raw"), \
1129 C(HEX, "hex"), \
1130 C(BIN, "bin"), \
1131 C(BLOCK, "block"), \
1132 C(PRINTK, "trace_printk"), \
1133 C(ANNOTATE, "annotate"), \
1134 C(USERSTACKTRACE, "userstacktrace"), \
1135 C(SYM_USEROBJ, "sym-userobj"), \
1136 C(PRINTK_MSGONLY, "printk-msg-only"), \
1137 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1138 C(LATENCY_FMT, "latency-format"), \
1139 C(RECORD_CMD, "record-cmd"), \
1140 C(RECORD_TGID, "record-tgid"), \
1141 C(OVERWRITE, "overwrite"), \
1142 C(STOP_ON_FREE, "disable_on_free"), \
1143 C(IRQ_INFO, "irq-info"), \
1144 C(MARKERS, "markers"), \
1145 C(EVENT_FORK, "event-fork"), \
1146 FUNCTION_FLAGS \
1147 FGRAPH_FLAGS \
1148 STACK_FLAGS \
1149 BRANCH_FLAGS
1152 * By defining C, we can make TRACE_FLAGS a list of bit names
1153 * that will define the bits for the flag masks.
1155 #undef C
1156 #define C(a, b) TRACE_ITER_##a##_BIT
1158 enum trace_iterator_bits {
1159 TRACE_FLAGS
1160 /* Make sure we don't go more than we have bits for */
1161 TRACE_ITER_LAST_BIT
1165 * By redefining C, we can make TRACE_FLAGS a list of masks that
1166 * use the bits as defined above.
1168 #undef C
1169 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1171 enum trace_iterator_flags { TRACE_FLAGS };
1174 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1175 * control the output of kernel symbols.
1177 #define TRACE_ITER_SYM_MASK \
1178 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1180 extern struct tracer nop_trace;
1182 #ifdef CONFIG_BRANCH_TRACER
1183 extern int enable_branch_tracing(struct trace_array *tr);
1184 extern void disable_branch_tracing(void);
1185 static inline int trace_branch_enable(struct trace_array *tr)
1187 if (tr->trace_flags & TRACE_ITER_BRANCH)
1188 return enable_branch_tracing(tr);
1189 return 0;
1191 static inline void trace_branch_disable(void)
1193 /* due to races, always disable */
1194 disable_branch_tracing();
1196 #else
1197 static inline int trace_branch_enable(struct trace_array *tr)
1199 return 0;
1201 static inline void trace_branch_disable(void)
1204 #endif /* CONFIG_BRANCH_TRACER */
1206 /* set ring buffers to default size if not already done so */
1207 int tracing_update_buffers(void);
1209 struct ftrace_event_field {
1210 struct list_head link;
1211 const char *name;
1212 const char *type;
1213 int filter_type;
1214 int offset;
1215 int size;
1216 int is_signed;
1219 struct prog_entry;
1221 struct event_filter {
1222 struct prog_entry __rcu *prog;
1223 char *filter_string;
1226 struct event_subsystem {
1227 struct list_head list;
1228 const char *name;
1229 struct event_filter *filter;
1230 int ref_count;
1233 struct trace_subsystem_dir {
1234 struct list_head list;
1235 struct event_subsystem *subsystem;
1236 struct trace_array *tr;
1237 struct dentry *entry;
1238 int ref_count;
1239 int nr_events;
1242 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1243 struct ring_buffer *buffer,
1244 struct ring_buffer_event *event);
1246 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1247 struct ring_buffer *buffer,
1248 struct ring_buffer_event *event,
1249 unsigned long flags, int pc,
1250 struct pt_regs *regs);
1252 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1253 struct ring_buffer *buffer,
1254 struct ring_buffer_event *event,
1255 unsigned long flags, int pc)
1257 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1260 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1261 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1262 void trace_buffered_event_disable(void);
1263 void trace_buffered_event_enable(void);
1265 static inline void
1266 __trace_event_discard_commit(struct ring_buffer *buffer,
1267 struct ring_buffer_event *event)
1269 if (this_cpu_read(trace_buffered_event) == event) {
1270 /* Simply release the temp buffer */
1271 this_cpu_dec(trace_buffered_event_cnt);
1272 return;
1274 ring_buffer_discard_commit(buffer, event);
1278 * Helper function for event_trigger_unlock_commit{_regs}().
1279 * If there are event triggers attached to this event that requires
1280 * filtering against its fields, then they wil be called as the
1281 * entry already holds the field information of the current event.
1283 * It also checks if the event should be discarded or not.
1284 * It is to be discarded if the event is soft disabled and the
1285 * event was only recorded to process triggers, or if the event
1286 * filter is active and this event did not match the filters.
1288 * Returns true if the event is discarded, false otherwise.
1290 static inline bool
1291 __event_trigger_test_discard(struct trace_event_file *file,
1292 struct ring_buffer *buffer,
1293 struct ring_buffer_event *event,
1294 void *entry,
1295 enum event_trigger_type *tt)
1297 unsigned long eflags = file->flags;
1299 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1300 *tt = event_triggers_call(file, entry, event);
1302 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1303 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1304 !filter_match_preds(file->filter, entry))) {
1305 __trace_event_discard_commit(buffer, event);
1306 return true;
1309 return false;
1313 * event_trigger_unlock_commit - handle triggers and finish event commit
1314 * @file: The file pointer assoctiated to the event
1315 * @buffer: The ring buffer that the event is being written to
1316 * @event: The event meta data in the ring buffer
1317 * @entry: The event itself
1318 * @irq_flags: The state of the interrupts at the start of the event
1319 * @pc: The state of the preempt count at the start of the event.
1321 * This is a helper function to handle triggers that require data
1322 * from the event itself. It also tests the event against filters and
1323 * if the event is soft disabled and should be discarded.
1325 static inline void
1326 event_trigger_unlock_commit(struct trace_event_file *file,
1327 struct ring_buffer *buffer,
1328 struct ring_buffer_event *event,
1329 void *entry, unsigned long irq_flags, int pc)
1331 enum event_trigger_type tt = ETT_NONE;
1333 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1334 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1336 if (tt)
1337 event_triggers_post_call(file, tt, entry, event);
1341 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1342 * @file: The file pointer assoctiated to the event
1343 * @buffer: The ring buffer that the event is being written to
1344 * @event: The event meta data in the ring buffer
1345 * @entry: The event itself
1346 * @irq_flags: The state of the interrupts at the start of the event
1347 * @pc: The state of the preempt count at the start of the event.
1349 * This is a helper function to handle triggers that require data
1350 * from the event itself. It also tests the event against filters and
1351 * if the event is soft disabled and should be discarded.
1353 * Same as event_trigger_unlock_commit() but calls
1354 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1356 static inline void
1357 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1358 struct ring_buffer *buffer,
1359 struct ring_buffer_event *event,
1360 void *entry, unsigned long irq_flags, int pc,
1361 struct pt_regs *regs)
1363 enum event_trigger_type tt = ETT_NONE;
1365 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1366 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1367 irq_flags, pc, regs);
1369 if (tt)
1370 event_triggers_post_call(file, tt, entry, event);
1373 #define FILTER_PRED_INVALID ((unsigned short)-1)
1374 #define FILTER_PRED_IS_RIGHT (1 << 15)
1375 #define FILTER_PRED_FOLD (1 << 15)
1378 * The max preds is the size of unsigned short with
1379 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1380 * and FOLD flags. The other is reserved.
1382 * 2^14 preds is way more than enough.
1384 #define MAX_FILTER_PRED 16384
1386 struct filter_pred;
1387 struct regex;
1389 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1391 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1393 enum regex_type {
1394 MATCH_FULL = 0,
1395 MATCH_FRONT_ONLY,
1396 MATCH_MIDDLE_ONLY,
1397 MATCH_END_ONLY,
1398 MATCH_GLOB,
1401 struct regex {
1402 char pattern[MAX_FILTER_STR_VAL];
1403 int len;
1404 int field_len;
1405 regex_match_func match;
1408 struct filter_pred {
1409 filter_pred_fn_t fn;
1410 u64 val;
1411 struct regex regex;
1412 unsigned short *ops;
1413 struct ftrace_event_field *field;
1414 int offset;
1415 int not;
1416 int op;
1419 static inline bool is_string_field(struct ftrace_event_field *field)
1421 return field->filter_type == FILTER_DYN_STRING ||
1422 field->filter_type == FILTER_STATIC_STRING ||
1423 field->filter_type == FILTER_PTR_STRING ||
1424 field->filter_type == FILTER_COMM;
1427 static inline bool is_function_field(struct ftrace_event_field *field)
1429 return field->filter_type == FILTER_TRACE_FN;
1432 extern enum regex_type
1433 filter_parse_regex(char *buff, int len, char **search, int *not);
1434 extern void print_event_filter(struct trace_event_file *file,
1435 struct trace_seq *s);
1436 extern int apply_event_filter(struct trace_event_file *file,
1437 char *filter_string);
1438 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1439 char *filter_string);
1440 extern void print_subsystem_event_filter(struct event_subsystem *system,
1441 struct trace_seq *s);
1442 extern int filter_assign_type(const char *type);
1443 extern int create_event_filter(struct trace_event_call *call,
1444 char *filter_str, bool set_str,
1445 struct event_filter **filterp);
1446 extern void free_event_filter(struct event_filter *filter);
1448 struct ftrace_event_field *
1449 trace_find_event_field(struct trace_event_call *call, char *name);
1451 extern void trace_event_enable_cmd_record(bool enable);
1452 extern void trace_event_enable_tgid_record(bool enable);
1454 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1455 extern int event_trace_del_tracer(struct trace_array *tr);
1457 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1458 const char *system,
1459 const char *event);
1461 static inline void *event_file_data(struct file *filp)
1463 return READ_ONCE(file_inode(filp)->i_private);
1466 extern struct mutex event_mutex;
1467 extern struct list_head ftrace_events;
1469 extern const struct file_operations event_trigger_fops;
1470 extern const struct file_operations event_hist_fops;
1472 #ifdef CONFIG_HIST_TRIGGERS
1473 extern int register_trigger_hist_cmd(void);
1474 extern int register_trigger_hist_enable_disable_cmds(void);
1475 #else
1476 static inline int register_trigger_hist_cmd(void) { return 0; }
1477 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1478 #endif
1480 extern int register_trigger_cmds(void);
1481 extern void clear_event_triggers(struct trace_array *tr);
1483 struct event_trigger_data {
1484 unsigned long count;
1485 int ref;
1486 struct event_trigger_ops *ops;
1487 struct event_command *cmd_ops;
1488 struct event_filter __rcu *filter;
1489 char *filter_str;
1490 void *private_data;
1491 bool paused;
1492 bool paused_tmp;
1493 struct list_head list;
1494 char *name;
1495 struct list_head named_list;
1496 struct event_trigger_data *named_data;
1499 /* Avoid typos */
1500 #define ENABLE_EVENT_STR "enable_event"
1501 #define DISABLE_EVENT_STR "disable_event"
1502 #define ENABLE_HIST_STR "enable_hist"
1503 #define DISABLE_HIST_STR "disable_hist"
1505 struct enable_trigger_data {
1506 struct trace_event_file *file;
1507 bool enable;
1508 bool hist;
1511 extern int event_enable_trigger_print(struct seq_file *m,
1512 struct event_trigger_ops *ops,
1513 struct event_trigger_data *data);
1514 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1515 struct event_trigger_data *data);
1516 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1517 struct trace_event_file *file,
1518 char *glob, char *cmd, char *param);
1519 extern int event_enable_register_trigger(char *glob,
1520 struct event_trigger_ops *ops,
1521 struct event_trigger_data *data,
1522 struct trace_event_file *file);
1523 extern void event_enable_unregister_trigger(char *glob,
1524 struct event_trigger_ops *ops,
1525 struct event_trigger_data *test,
1526 struct trace_event_file *file);
1527 extern void trigger_data_free(struct event_trigger_data *data);
1528 extern int event_trigger_init(struct event_trigger_ops *ops,
1529 struct event_trigger_data *data);
1530 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1531 int trigger_enable);
1532 extern void update_cond_flag(struct trace_event_file *file);
1533 extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
1534 struct event_trigger_data *test,
1535 struct trace_event_file *file);
1536 extern int set_trigger_filter(char *filter_str,
1537 struct event_trigger_data *trigger_data,
1538 struct trace_event_file *file);
1539 extern struct event_trigger_data *find_named_trigger(const char *name);
1540 extern bool is_named_trigger(struct event_trigger_data *test);
1541 extern int save_named_trigger(const char *name,
1542 struct event_trigger_data *data);
1543 extern void del_named_trigger(struct event_trigger_data *data);
1544 extern void pause_named_trigger(struct event_trigger_data *data);
1545 extern void unpause_named_trigger(struct event_trigger_data *data);
1546 extern void set_named_trigger_data(struct event_trigger_data *data,
1547 struct event_trigger_data *named_data);
1548 extern struct event_trigger_data *
1549 get_named_trigger_data(struct event_trigger_data *data);
1550 extern int register_event_command(struct event_command *cmd);
1551 extern int unregister_event_command(struct event_command *cmd);
1552 extern int register_trigger_hist_enable_disable_cmds(void);
1555 * struct event_trigger_ops - callbacks for trace event triggers
1557 * The methods in this structure provide per-event trigger hooks for
1558 * various trigger operations.
1560 * All the methods below, except for @init() and @free(), must be
1561 * implemented.
1563 * @func: The trigger 'probe' function called when the triggering
1564 * event occurs. The data passed into this callback is the data
1565 * that was supplied to the event_command @reg() function that
1566 * registered the trigger (see struct event_command) along with
1567 * the trace record, rec.
1569 * @init: An optional initialization function called for the trigger
1570 * when the trigger is registered (via the event_command reg()
1571 * function). This can be used to perform per-trigger
1572 * initialization such as incrementing a per-trigger reference
1573 * count, for instance. This is usually implemented by the
1574 * generic utility function @event_trigger_init() (see
1575 * trace_event_triggers.c).
1577 * @free: An optional de-initialization function called for the
1578 * trigger when the trigger is unregistered (via the
1579 * event_command @reg() function). This can be used to perform
1580 * per-trigger de-initialization such as decrementing a
1581 * per-trigger reference count and freeing corresponding trigger
1582 * data, for instance. This is usually implemented by the
1583 * generic utility function @event_trigger_free() (see
1584 * trace_event_triggers.c).
1586 * @print: The callback function invoked to have the trigger print
1587 * itself. This is usually implemented by a wrapper function
1588 * that calls the generic utility function @event_trigger_print()
1589 * (see trace_event_triggers.c).
1591 struct event_trigger_ops {
1592 void (*func)(struct event_trigger_data *data,
1593 void *rec,
1594 struct ring_buffer_event *rbe);
1595 int (*init)(struct event_trigger_ops *ops,
1596 struct event_trigger_data *data);
1597 void (*free)(struct event_trigger_ops *ops,
1598 struct event_trigger_data *data);
1599 int (*print)(struct seq_file *m,
1600 struct event_trigger_ops *ops,
1601 struct event_trigger_data *data);
1605 * struct event_command - callbacks and data members for event commands
1607 * Event commands are invoked by users by writing the command name
1608 * into the 'trigger' file associated with a trace event. The
1609 * parameters associated with a specific invocation of an event
1610 * command are used to create an event trigger instance, which is
1611 * added to the list of trigger instances associated with that trace
1612 * event. When the event is hit, the set of triggers associated with
1613 * that event is invoked.
1615 * The data members in this structure provide per-event command data
1616 * for various event commands.
1618 * All the data members below, except for @post_trigger, must be set
1619 * for each event command.
1621 * @name: The unique name that identifies the event command. This is
1622 * the name used when setting triggers via trigger files.
1624 * @trigger_type: A unique id that identifies the event command
1625 * 'type'. This value has two purposes, the first to ensure that
1626 * only one trigger of the same type can be set at a given time
1627 * for a particular event e.g. it doesn't make sense to have both
1628 * a traceon and traceoff trigger attached to a single event at
1629 * the same time, so traceon and traceoff have the same type
1630 * though they have different names. The @trigger_type value is
1631 * also used as a bit value for deferring the actual trigger
1632 * action until after the current event is finished. Some
1633 * commands need to do this if they themselves log to the trace
1634 * buffer (see the @post_trigger() member below). @trigger_type
1635 * values are defined by adding new values to the trigger_type
1636 * enum in include/linux/trace_events.h.
1638 * @flags: See the enum event_command_flags below.
1640 * All the methods below, except for @set_filter() and @unreg_all(),
1641 * must be implemented.
1643 * @func: The callback function responsible for parsing and
1644 * registering the trigger written to the 'trigger' file by the
1645 * user. It allocates the trigger instance and registers it with
1646 * the appropriate trace event. It makes use of the other
1647 * event_command callback functions to orchestrate this, and is
1648 * usually implemented by the generic utility function
1649 * @event_trigger_callback() (see trace_event_triggers.c).
1651 * @reg: Adds the trigger to the list of triggers associated with the
1652 * event, and enables the event trigger itself, after
1653 * initializing it (via the event_trigger_ops @init() function).
1654 * This is also where commands can use the @trigger_type value to
1655 * make the decision as to whether or not multiple instances of
1656 * the trigger should be allowed. This is usually implemented by
1657 * the generic utility function @register_trigger() (see
1658 * trace_event_triggers.c).
1660 * @unreg: Removes the trigger from the list of triggers associated
1661 * with the event, and disables the event trigger itself, after
1662 * initializing it (via the event_trigger_ops @free() function).
1663 * This is usually implemented by the generic utility function
1664 * @unregister_trigger() (see trace_event_triggers.c).
1666 * @unreg_all: An optional function called to remove all the triggers
1667 * from the list of triggers associated with the event. Called
1668 * when a trigger file is opened in truncate mode.
1670 * @set_filter: An optional function called to parse and set a filter
1671 * for the trigger. If no @set_filter() method is set for the
1672 * event command, filters set by the user for the command will be
1673 * ignored. This is usually implemented by the generic utility
1674 * function @set_trigger_filter() (see trace_event_triggers.c).
1676 * @get_trigger_ops: The callback function invoked to retrieve the
1677 * event_trigger_ops implementation associated with the command.
1679 struct event_command {
1680 struct list_head list;
1681 char *name;
1682 enum event_trigger_type trigger_type;
1683 int flags;
1684 int (*func)(struct event_command *cmd_ops,
1685 struct trace_event_file *file,
1686 char *glob, char *cmd, char *params);
1687 int (*reg)(char *glob,
1688 struct event_trigger_ops *ops,
1689 struct event_trigger_data *data,
1690 struct trace_event_file *file);
1691 void (*unreg)(char *glob,
1692 struct event_trigger_ops *ops,
1693 struct event_trigger_data *data,
1694 struct trace_event_file *file);
1695 void (*unreg_all)(struct trace_event_file *file);
1696 int (*set_filter)(char *filter_str,
1697 struct event_trigger_data *data,
1698 struct trace_event_file *file);
1699 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1703 * enum event_command_flags - flags for struct event_command
1705 * @POST_TRIGGER: A flag that says whether or not this command needs
1706 * to have its action delayed until after the current event has
1707 * been closed. Some triggers need to avoid being invoked while
1708 * an event is currently in the process of being logged, since
1709 * the trigger may itself log data into the trace buffer. Thus
1710 * we make sure the current event is committed before invoking
1711 * those triggers. To do that, the trigger invocation is split
1712 * in two - the first part checks the filter using the current
1713 * trace record; if a command has the @post_trigger flag set, it
1714 * sets a bit for itself in the return value, otherwise it
1715 * directly invokes the trigger. Once all commands have been
1716 * either invoked or set their return flag, the current record is
1717 * either committed or discarded. At that point, if any commands
1718 * have deferred their triggers, those commands are finally
1719 * invoked following the close of the current event. In other
1720 * words, if the event_trigger_ops @func() probe implementation
1721 * itself logs to the trace buffer, this flag should be set,
1722 * otherwise it can be left unspecified.
1724 * @NEEDS_REC: A flag that says whether or not this command needs
1725 * access to the trace record in order to perform its function,
1726 * regardless of whether or not it has a filter associated with
1727 * it (filters make a trigger require access to the trace record
1728 * but are not always present).
1730 enum event_command_flags {
1731 EVENT_CMD_FL_POST_TRIGGER = 1,
1732 EVENT_CMD_FL_NEEDS_REC = 2,
1735 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1737 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1740 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1742 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1745 extern int trace_event_enable_disable(struct trace_event_file *file,
1746 int enable, int soft_disable);
1747 extern int tracing_alloc_snapshot(void);
1749 extern const char *__start___trace_bprintk_fmt[];
1750 extern const char *__stop___trace_bprintk_fmt[];
1752 extern const char *__start___tracepoint_str[];
1753 extern const char *__stop___tracepoint_str[];
1755 void trace_printk_control(bool enabled);
1756 void trace_printk_init_buffers(void);
1757 void trace_printk_start_comm(void);
1758 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1759 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1761 #define MAX_EVENT_NAME_LEN 64
1763 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1764 extern ssize_t trace_parse_run_command(struct file *file,
1765 const char __user *buffer, size_t count, loff_t *ppos,
1766 int (*createfn)(int, char**));
1769 * Normal trace_printk() and friends allocates special buffers
1770 * to do the manipulation, as well as saves the print formats
1771 * into sections to display. But the trace infrastructure wants
1772 * to use these without the added overhead at the price of being
1773 * a bit slower (used mainly for warnings, where we don't care
1774 * about performance). The internal_trace_puts() is for such
1775 * a purpose.
1777 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1779 #undef FTRACE_ENTRY
1780 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1781 extern struct trace_event_call \
1782 __aligned(4) event_##call;
1783 #undef FTRACE_ENTRY_DUP
1784 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1785 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1786 filter)
1787 #undef FTRACE_ENTRY_PACKED
1788 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1789 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1790 filter)
1792 #include "trace_entries.h"
1794 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1795 int perf_ftrace_event_register(struct trace_event_call *call,
1796 enum trace_reg type, void *data);
1797 #else
1798 #define perf_ftrace_event_register NULL
1799 #endif
1801 #ifdef CONFIG_FTRACE_SYSCALLS
1802 void init_ftrace_syscalls(void);
1803 const char *get_syscall_name(int syscall);
1804 #else
1805 static inline void init_ftrace_syscalls(void) { }
1806 static inline const char *get_syscall_name(int syscall)
1808 return NULL;
1810 #endif
1812 #ifdef CONFIG_EVENT_TRACING
1813 void trace_event_init(void);
1814 void trace_event_eval_update(struct trace_eval_map **map, int len);
1815 #else
1816 static inline void __init trace_event_init(void) { }
1817 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1818 #endif
1820 extern struct trace_iterator *tracepoint_print_iter;
1822 #endif /* _LINUX_KERNEL_TRACE_H */