1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/trace_seq.h>
16 #include <linux/trace_events.h>
17 #include <linux/compiler.h>
18 #include <linux/trace_seq.h>
19 #include <linux/glob.h>
21 #ifdef CONFIG_FTRACE_SYSCALLS
22 #include <asm/unistd.h> /* For NR_SYSCALLS */
23 #include <asm/syscall.h> /* some archs define it here */
27 __TRACE_FIRST_TYPE
= 0,
51 #define __field(type, item) type item;
54 #define __field_struct(type, item) __field(type, item)
57 #define __field_desc(type, container, item)
60 #define __array(type, item, size) type item[size];
63 #define __array_desc(type, container, item, size)
65 #undef __dynamic_array
66 #define __dynamic_array(type, item) type item[];
69 #define F_STRUCT(args...) args
72 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
73 struct struct_name { \
74 struct trace_entry ent; \
78 #undef FTRACE_ENTRY_DUP
79 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
81 #undef FTRACE_ENTRY_REG
82 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
84 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
87 #undef FTRACE_ENTRY_PACKED
88 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
90 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
93 #include "trace_entries.h"
96 * syscalls are special, and need special handling, this is why
97 * they are not included in trace_entries.h
99 struct syscall_trace_enter
{
100 struct trace_entry ent
;
102 unsigned long args
[];
105 struct syscall_trace_exit
{
106 struct trace_entry ent
;
111 struct kprobe_trace_entry_head
{
112 struct trace_entry ent
;
116 struct kretprobe_trace_entry_head
{
117 struct trace_entry ent
;
119 unsigned long ret_ip
;
123 * trace_flag_type is an enumeration that holds different
124 * states when a trace occurs. These are:
125 * IRQS_OFF - interrupts were disabled
126 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
127 * NEED_RESCHED - reschedule is requested
128 * HARDIRQ - inside an interrupt handler
129 * SOFTIRQ - inside a softirq handler
131 enum trace_flag_type
{
132 TRACE_FLAG_IRQS_OFF
= 0x01,
133 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
134 TRACE_FLAG_NEED_RESCHED
= 0x04,
135 TRACE_FLAG_HARDIRQ
= 0x08,
136 TRACE_FLAG_SOFTIRQ
= 0x10,
137 TRACE_FLAG_PREEMPT_RESCHED
= 0x20,
138 TRACE_FLAG_NMI
= 0x40,
141 #define TRACE_BUF_SIZE 1024
146 * The CPU trace array - it consists of thousands of trace entries
147 * plus some other descriptor data: (for example which task started
150 struct trace_array_cpu
{
152 void *buffer_page
; /* ring buffer spare */
154 unsigned long entries
;
155 unsigned long saved_latency
;
156 unsigned long critical_start
;
157 unsigned long critical_end
;
158 unsigned long critical_sequence
;
160 unsigned long policy
;
161 unsigned long rt_priority
;
162 unsigned long skipped_entries
;
163 u64 preempt_timestamp
;
166 char comm
[TASK_COMM_LEN
];
169 #ifdef CONFIG_FUNCTION_TRACER
170 bool ftrace_ignore_pid
;
175 struct trace_option_dentry
;
177 struct trace_buffer
{
178 struct trace_array
*tr
;
179 struct ring_buffer
*buffer
;
180 struct trace_array_cpu __percpu
*data
;
185 #define TRACE_FLAGS_MAX_SIZE 32
187 struct trace_options
{
188 struct tracer
*tracer
;
189 struct trace_option_dentry
*topts
;
192 struct trace_pid_list
{
198 * The trace array - an array of per-CPU trace arrays. This is the
199 * highest level data structure that individual tracers deal with.
200 * They have on/off state as well:
203 struct list_head list
;
205 struct trace_buffer trace_buffer
;
206 #ifdef CONFIG_TRACER_MAX_TRACE
208 * The max_buffer is used to snapshot the trace when a maximum
209 * latency is reached, or when the user initiates a snapshot.
210 * Some tracers will use this to store a maximum trace while
211 * it continues examining live traces.
213 * The buffers for the max_buffer are set up the same as the trace_buffer
214 * When a snapshot is taken, the buffer of the max_buffer is swapped
215 * with the buffer of the trace_buffer and the buffers are reset for
216 * the trace_buffer so the tracing can continue.
218 struct trace_buffer max_buffer
;
219 bool allocated_snapshot
;
221 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
222 unsigned long max_latency
;
224 struct trace_pid_list __rcu
*filtered_pids
;
226 * max_lock is used to protect the swapping of buffers
227 * when taking a max snapshot. The buffers themselves are
228 * protected by per_cpu spinlocks. But the action of the swap
229 * needs its own lock.
231 * This is defined as a arch_spinlock_t in order to help
232 * with performance when lockdep debugging is enabled.
234 * It is also used in other places outside the update_max_tr
235 * so it needs to be defined outside of the
236 * CONFIG_TRACER_MAX_TRACE.
238 arch_spinlock_t max_lock
;
240 #ifdef CONFIG_FTRACE_SYSCALLS
241 int sys_refcount_enter
;
242 int sys_refcount_exit
;
243 struct trace_event_file __rcu
*enter_syscall_files
[NR_syscalls
];
244 struct trace_event_file __rcu
*exit_syscall_files
[NR_syscalls
];
250 struct tracer
*current_trace
;
251 unsigned int trace_flags
;
252 unsigned char trace_flags_index
[TRACE_FLAGS_MAX_SIZE
];
254 raw_spinlock_t start_lock
;
256 struct dentry
*options
;
257 struct dentry
*percpu_dir
;
258 struct dentry
*event_dir
;
259 struct trace_options
*topts
;
260 struct list_head systems
;
261 struct list_head events
;
262 cpumask_var_t tracing_cpumask
; /* only trace on set CPUs */
264 #ifdef CONFIG_FUNCTION_TRACER
265 struct ftrace_ops
*ops
;
266 struct trace_pid_list __rcu
*function_pids
;
267 #ifdef CONFIG_DYNAMIC_FTRACE
268 /* All of these are protected by the ftrace_lock */
269 struct list_head func_probes
;
270 struct list_head mod_trace
;
271 struct list_head mod_notrace
;
273 /* function tracing enabled */
274 int function_enabled
;
279 TRACE_ARRAY_FL_GLOBAL
= (1 << 0)
282 extern struct list_head ftrace_trace_arrays
;
284 extern struct mutex trace_types_lock
;
286 extern int trace_array_get(struct trace_array
*tr
);
287 extern void trace_array_put(struct trace_array
*tr
);
290 * The global tracer (top) should be the first trace array added,
291 * but we check the flag anyway.
293 static inline struct trace_array
*top_trace_array(void)
295 struct trace_array
*tr
;
297 if (list_empty(&ftrace_trace_arrays
))
300 tr
= list_entry(ftrace_trace_arrays
.prev
,
302 WARN_ON(!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
));
306 #define FTRACE_CMP_TYPE(var, type) \
307 __builtin_types_compatible_p(typeof(var), type *)
310 #define IF_ASSIGN(var, entry, etype, id) \
311 if (FTRACE_CMP_TYPE(var, etype)) { \
312 var = (typeof(var))(entry); \
313 WARN_ON(id && (entry)->type != id); \
317 /* Will cause compile errors if type is not found. */
318 extern void __ftrace_bad_type(void);
321 * The trace_assign_type is a verifier that the entry type is
322 * the same as the type being assigned. To add new types simply
323 * add a line with the following format:
325 * IF_ASSIGN(var, ent, type, id);
327 * Where "type" is the trace type that includes the trace_entry
328 * as the "ent" item. And "id" is the trace identifier that is
329 * used in the trace_type enum.
331 * If the type can have more than one id, then use zero.
333 #define trace_assign_type(var, ent) \
335 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
336 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
337 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
338 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
339 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
340 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
341 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
342 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
343 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
344 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
346 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
348 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
349 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
351 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
353 __ftrace_bad_type(); \
357 * An option specific to a tracer. This is a boolean value.
358 * The bit is the bit index that sets its value on the
359 * flags value in struct tracer_flags.
362 const char *name
; /* Will appear on the trace_options file */
363 u32 bit
; /* Mask assigned in val field in tracer_flags */
367 * The set of specific options for a tracer. Your tracer
368 * have to set the initial value of the flags val.
370 struct tracer_flags
{
372 struct tracer_opt
*opts
;
373 struct tracer
*trace
;
376 /* Makes more easy to define a tracer opt */
377 #define TRACER_OPT(s, b) .name = #s, .bit = b
380 struct trace_option_dentry
{
381 struct tracer_opt
*opt
;
382 struct tracer_flags
*flags
;
383 struct trace_array
*tr
;
384 struct dentry
*entry
;
388 * struct tracer - a specific tracer and its callbacks to interact with tracefs
389 * @name: the name chosen to select it on the available_tracers file
390 * @init: called when one switches to this tracer (echo name > current_tracer)
391 * @reset: called when one switches to another tracer
392 * @start: called when tracing is unpaused (echo 1 > tracing_on)
393 * @stop: called when tracing is paused (echo 0 > tracing_on)
394 * @update_thresh: called when tracing_thresh is updated
395 * @open: called when the trace file is opened
396 * @pipe_open: called when the trace_pipe file is opened
397 * @close: called when the trace file is released
398 * @pipe_close: called when the trace_pipe file is released
399 * @read: override the default read callback on trace_pipe
400 * @splice_read: override the default splice_read callback on trace_pipe
401 * @selftest: selftest to run on boot (see trace_selftest.c)
402 * @print_headers: override the first lines that describe your columns
403 * @print_line: callback that prints a trace
404 * @set_flag: signals one of your private flags changed (trace_options file)
405 * @flags: your private flags
409 int (*init
)(struct trace_array
*tr
);
410 void (*reset
)(struct trace_array
*tr
);
411 void (*start
)(struct trace_array
*tr
);
412 void (*stop
)(struct trace_array
*tr
);
413 int (*update_thresh
)(struct trace_array
*tr
);
414 void (*open
)(struct trace_iterator
*iter
);
415 void (*pipe_open
)(struct trace_iterator
*iter
);
416 void (*close
)(struct trace_iterator
*iter
);
417 void (*pipe_close
)(struct trace_iterator
*iter
);
418 ssize_t (*read
)(struct trace_iterator
*iter
,
419 struct file
*filp
, char __user
*ubuf
,
420 size_t cnt
, loff_t
*ppos
);
421 ssize_t (*splice_read
)(struct trace_iterator
*iter
,
424 struct pipe_inode_info
*pipe
,
427 #ifdef CONFIG_FTRACE_STARTUP_TEST
428 int (*selftest
)(struct tracer
*trace
,
429 struct trace_array
*tr
);
431 void (*print_header
)(struct seq_file
*m
);
432 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
433 /* If you handled the flag setting, return 0 */
434 int (*set_flag
)(struct trace_array
*tr
,
435 u32 old_flags
, u32 bit
, int set
);
436 /* Return 0 if OK with change, else return non-zero */
437 int (*flag_changed
)(struct trace_array
*tr
,
440 struct tracer_flags
*flags
;
444 bool allow_instances
;
445 #ifdef CONFIG_TRACER_MAX_TRACE
448 /* True if tracer cannot be enabled in kernel param */
453 /* Only current can touch trace_recursion */
456 * For function tracing recursion:
457 * The order of these bits are important.
459 * When function tracing occurs, the following steps are made:
460 * If arch does not support a ftrace feature:
461 * call internal function (uses INTERNAL bits) which calls...
462 * If callback is registered to the "global" list, the list
463 * function is called and recursion checks the GLOBAL bits.
464 * then this function calls...
465 * The function callback, which can use the FTRACE bits to
466 * check for recursion.
468 * Now if the arch does not suppport a feature, and it calls
469 * the global list function which calls the ftrace callback
470 * all three of these steps will do a recursion protection.
471 * There's no reason to do one if the previous caller already
472 * did. The recursion that we are protecting against will
473 * go through the same steps again.
475 * To prevent the multiple recursion checks, if a recursion
476 * bit is set that is higher than the MAX bit of the current
477 * check, then we know that the check was made by the previous
478 * caller, and we can skip the current check.
482 TRACE_BUFFER_NMI_BIT
,
483 TRACE_BUFFER_IRQ_BIT
,
484 TRACE_BUFFER_SIRQ_BIT
,
486 /* Start of function recursion bits */
488 TRACE_FTRACE_NMI_BIT
,
489 TRACE_FTRACE_IRQ_BIT
,
490 TRACE_FTRACE_SIRQ_BIT
,
492 /* INTERNAL_BITs must be greater than FTRACE_BITs */
494 TRACE_INTERNAL_NMI_BIT
,
495 TRACE_INTERNAL_IRQ_BIT
,
496 TRACE_INTERNAL_SIRQ_BIT
,
500 * Abuse of the trace_recursion.
501 * As we need a way to maintain state if we are tracing the function
502 * graph in irq because we want to trace a particular function that
503 * was called in irq context but we have irq tracing off. Since this
504 * can only be modified by current, we can reuse trace_recursion.
509 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
510 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
511 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
513 #define TRACE_CONTEXT_BITS 4
515 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
516 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
518 #define TRACE_LIST_START TRACE_INTERNAL_BIT
519 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
521 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
523 static __always_inline
int trace_get_context_bit(void)
527 if (in_interrupt()) {
541 static __always_inline
int trace_test_and_set_recursion(int start
, int max
)
543 unsigned int val
= current
->trace_recursion
;
546 /* A previous recursion check was made */
547 if ((val
& TRACE_CONTEXT_MASK
) > max
)
550 bit
= trace_get_context_bit() + start
;
551 if (unlikely(val
& (1 << bit
)))
555 current
->trace_recursion
= val
;
561 static __always_inline
void trace_clear_recursion(int bit
)
563 unsigned int val
= current
->trace_recursion
;
572 current
->trace_recursion
= val
;
575 static inline struct ring_buffer_iter
*
576 trace_buffer_iter(struct trace_iterator
*iter
, int cpu
)
578 if (iter
->buffer_iter
&& iter
->buffer_iter
[cpu
])
579 return iter
->buffer_iter
[cpu
];
583 int tracer_init(struct tracer
*t
, struct trace_array
*tr
);
584 int tracing_is_enabled(void);
585 void tracing_reset(struct trace_buffer
*buf
, int cpu
);
586 void tracing_reset_online_cpus(struct trace_buffer
*buf
);
587 void tracing_reset_current(int cpu
);
588 void tracing_reset_all_online_cpus(void);
589 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
590 bool tracing_is_disabled(void);
591 int tracer_tracing_is_on(struct trace_array
*tr
);
592 void tracer_tracing_on(struct trace_array
*tr
);
593 void tracer_tracing_off(struct trace_array
*tr
);
594 struct dentry
*trace_create_file(const char *name
,
596 struct dentry
*parent
,
598 const struct file_operations
*fops
);
600 struct dentry
*tracing_init_dentry(void);
602 struct ring_buffer_event
;
604 struct ring_buffer_event
*
605 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
611 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
612 struct trace_array_cpu
*data
);
614 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
615 int *ent_cpu
, u64
*ent_ts
);
617 void trace_buffer_unlock_commit_nostack(struct ring_buffer
*buffer
,
618 struct ring_buffer_event
*event
);
620 int trace_empty(struct trace_iterator
*iter
);
622 void *trace_find_next_entry_inc(struct trace_iterator
*iter
);
624 void trace_init_global_iter(struct trace_iterator
*iter
);
626 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
);
628 void trace_function(struct trace_array
*tr
,
630 unsigned long parent_ip
,
631 unsigned long flags
, int pc
);
632 void trace_graph_function(struct trace_array
*tr
,
634 unsigned long parent_ip
,
635 unsigned long flags
, int pc
);
636 void trace_latency_header(struct seq_file
*m
);
637 void trace_default_header(struct seq_file
*m
);
638 void print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
);
639 int trace_empty(struct trace_iterator
*iter
);
641 void trace_graph_return(struct ftrace_graph_ret
*trace
);
642 int trace_graph_entry(struct ftrace_graph_ent
*trace
);
643 void set_graph_array(struct trace_array
*tr
);
645 void tracing_start_cmdline_record(void);
646 void tracing_stop_cmdline_record(void);
647 void tracing_start_tgid_record(void);
648 void tracing_stop_tgid_record(void);
650 int register_tracer(struct tracer
*type
);
651 int is_tracing_stopped(void);
653 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
);
655 extern cpumask_var_t __read_mostly tracing_buffer_mask
;
657 #define for_each_tracing_cpu(cpu) \
658 for_each_cpu(cpu, tracing_buffer_mask)
660 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
662 extern unsigned long tracing_thresh
;
668 bool trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
,
670 bool trace_ignore_this_task(struct trace_pid_list
*filtered_pids
,
671 struct task_struct
*task
);
672 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
673 struct task_struct
*self
,
674 struct task_struct
*task
);
675 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
);
676 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
);
677 int trace_pid_show(struct seq_file
*m
, void *v
);
678 void trace_free_pid_list(struct trace_pid_list
*pid_list
);
679 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
680 struct trace_pid_list
**new_pid_list
,
681 const char __user
*ubuf
, size_t cnt
);
683 #ifdef CONFIG_TRACER_MAX_TRACE
684 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
685 void update_max_tr_single(struct trace_array
*tr
,
686 struct task_struct
*tsk
, int cpu
);
687 #endif /* CONFIG_TRACER_MAX_TRACE */
689 #ifdef CONFIG_STACKTRACE
690 void ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
,
693 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
696 static inline void ftrace_trace_userstack(struct ring_buffer
*buffer
,
697 unsigned long flags
, int pc
)
701 static inline void __trace_stack(struct trace_array
*tr
, unsigned long flags
,
705 #endif /* CONFIG_STACKTRACE */
707 extern u64
ftrace_now(int cpu
);
709 extern void trace_find_cmdline(int pid
, char comm
[]);
710 extern int trace_find_tgid(int pid
);
711 extern void trace_event_follow_fork(struct trace_array
*tr
, bool enable
);
713 #ifdef CONFIG_DYNAMIC_FTRACE
714 extern unsigned long ftrace_update_tot_cnt
;
715 void ftrace_init_trace_array(struct trace_array
*tr
);
717 static inline void ftrace_init_trace_array(struct trace_array
*tr
) { }
719 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
720 extern int DYN_FTRACE_TEST_NAME(void);
721 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
722 extern int DYN_FTRACE_TEST_NAME2(void);
724 extern bool ring_buffer_expanded
;
725 extern bool tracing_selftest_disabled
;
727 #ifdef CONFIG_FTRACE_STARTUP_TEST
728 extern int trace_selftest_startup_function(struct tracer
*trace
,
729 struct trace_array
*tr
);
730 extern int trace_selftest_startup_function_graph(struct tracer
*trace
,
731 struct trace_array
*tr
);
732 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
733 struct trace_array
*tr
);
734 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
735 struct trace_array
*tr
);
736 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
737 struct trace_array
*tr
);
738 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
739 struct trace_array
*tr
);
740 extern int trace_selftest_startup_nop(struct tracer
*trace
,
741 struct trace_array
*tr
);
742 extern int trace_selftest_startup_branch(struct tracer
*trace
,
743 struct trace_array
*tr
);
745 * Tracer data references selftest functions that only occur
746 * on boot up. These can be __init functions. Thus, when selftests
747 * are enabled, then the tracers need to reference __init functions.
749 #define __tracer_data __refdata
751 /* Tracers are seldom changed. Optimize when selftests are disabled. */
752 #define __tracer_data __read_mostly
753 #endif /* CONFIG_FTRACE_STARTUP_TEST */
755 extern void *head_page(struct trace_array_cpu
*data
);
756 extern unsigned long long ns2usecs(u64 nsec
);
758 trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
);
760 trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
);
762 trace_array_vprintk(struct trace_array
*tr
,
763 unsigned long ip
, const char *fmt
, va_list args
);
764 int trace_array_printk(struct trace_array
*tr
,
765 unsigned long ip
, const char *fmt
, ...);
766 int trace_array_printk_buf(struct ring_buffer
*buffer
,
767 unsigned long ip
, const char *fmt
, ...);
768 void trace_printk_seq(struct trace_seq
*s
);
769 enum print_line_t
print_trace_line(struct trace_iterator
*iter
);
771 extern char trace_find_mark(unsigned long long duration
);
775 struct ftrace_mod_load
{
776 struct list_head list
;
783 FTRACE_HASH_FL_MOD
= (1 << 0),
787 unsigned long size_bits
;
788 struct hlist_head
*buckets
;
794 struct ftrace_func_entry
*
795 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
);
797 static __always_inline
bool ftrace_hash_empty(struct ftrace_hash
*hash
)
799 return !hash
|| !(hash
->count
|| (hash
->flags
& FTRACE_HASH_FL_MOD
));
802 /* Standard output formatting function used for function return traces */
803 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
806 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
807 #define TRACE_GRAPH_PRINT_CPU 0x2
808 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
809 #define TRACE_GRAPH_PRINT_PROC 0x8
810 #define TRACE_GRAPH_PRINT_DURATION 0x10
811 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
812 #define TRACE_GRAPH_PRINT_IRQS 0x40
813 #define TRACE_GRAPH_PRINT_TAIL 0x80
814 #define TRACE_GRAPH_SLEEP_TIME 0x100
815 #define TRACE_GRAPH_GRAPH_TIME 0x200
816 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
817 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
819 extern void ftrace_graph_sleep_time_control(bool enable
);
820 extern void ftrace_graph_graph_time_control(bool enable
);
822 extern enum print_line_t
823 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
);
824 extern void print_graph_headers_flags(struct seq_file
*s
, u32 flags
);
826 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
);
827 extern void graph_trace_open(struct trace_iterator
*iter
);
828 extern void graph_trace_close(struct trace_iterator
*iter
);
829 extern int __trace_graph_entry(struct trace_array
*tr
,
830 struct ftrace_graph_ent
*trace
,
831 unsigned long flags
, int pc
);
832 extern void __trace_graph_return(struct trace_array
*tr
,
833 struct ftrace_graph_ret
*trace
,
834 unsigned long flags
, int pc
);
836 #ifdef CONFIG_DYNAMIC_FTRACE
837 extern struct ftrace_hash
*ftrace_graph_hash
;
838 extern struct ftrace_hash
*ftrace_graph_notrace_hash
;
840 static inline int ftrace_graph_addr(unsigned long addr
)
844 preempt_disable_notrace();
846 if (ftrace_hash_empty(ftrace_graph_hash
)) {
851 if (ftrace_lookup_ip(ftrace_graph_hash
, addr
)) {
853 * If no irqs are to be traced, but a set_graph_function
854 * is set, and called by an interrupt handler, we still
858 trace_recursion_set(TRACE_IRQ_BIT
);
860 trace_recursion_clear(TRACE_IRQ_BIT
);
865 preempt_enable_notrace();
869 static inline int ftrace_graph_notrace_addr(unsigned long addr
)
873 preempt_disable_notrace();
875 if (ftrace_lookup_ip(ftrace_graph_notrace_hash
, addr
))
878 preempt_enable_notrace();
882 static inline int ftrace_graph_addr(unsigned long addr
)
887 static inline int ftrace_graph_notrace_addr(unsigned long addr
)
891 #endif /* CONFIG_DYNAMIC_FTRACE */
893 extern unsigned int fgraph_max_depth
;
895 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent
*trace
)
897 /* trace it when it is-nested-in or is a function enabled. */
898 return !(trace
->depth
|| ftrace_graph_addr(trace
->func
)) ||
899 (trace
->depth
< 0) ||
900 (fgraph_max_depth
&& trace
->depth
>= fgraph_max_depth
);
903 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
904 static inline enum print_line_t
905 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
)
907 return TRACE_TYPE_UNHANDLED
;
909 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
911 extern struct list_head ftrace_pids
;
913 #ifdef CONFIG_FUNCTION_TRACER
914 struct ftrace_func_command
{
915 struct list_head list
;
917 int (*func
)(struct trace_array
*tr
,
918 struct ftrace_hash
*hash
,
919 char *func
, char *cmd
,
920 char *params
, int enable
);
922 extern bool ftrace_filter_param __initdata
;
923 static inline int ftrace_trace_task(struct trace_array
*tr
)
925 return !this_cpu_read(tr
->trace_buffer
.data
->ftrace_ignore_pid
);
927 extern int ftrace_is_dead(void);
928 int ftrace_create_function_files(struct trace_array
*tr
,
929 struct dentry
*parent
);
930 void ftrace_destroy_function_files(struct trace_array
*tr
);
931 void ftrace_init_global_array_ops(struct trace_array
*tr
);
932 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
);
933 void ftrace_reset_array_ops(struct trace_array
*tr
);
934 int using_ftrace_ops_list_func(void);
935 void ftrace_init_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
936 void ftrace_init_tracefs_toplevel(struct trace_array
*tr
,
937 struct dentry
*d_tracer
);
938 void ftrace_clear_pids(struct trace_array
*tr
);
939 int init_function_trace(void);
940 void ftrace_pid_follow_fork(struct trace_array
*tr
, bool enable
);
942 static inline int ftrace_trace_task(struct trace_array
*tr
)
946 static inline int ftrace_is_dead(void) { return 0; }
948 ftrace_create_function_files(struct trace_array
*tr
,
949 struct dentry
*parent
)
953 static inline void ftrace_destroy_function_files(struct trace_array
*tr
) { }
954 static inline __init
void
955 ftrace_init_global_array_ops(struct trace_array
*tr
) { }
956 static inline void ftrace_reset_array_ops(struct trace_array
*tr
) { }
957 static inline void ftrace_init_tracefs(struct trace_array
*tr
, struct dentry
*d
) { }
958 static inline void ftrace_init_tracefs_toplevel(struct trace_array
*tr
, struct dentry
*d
) { }
959 static inline void ftrace_clear_pids(struct trace_array
*tr
) { }
960 static inline int init_function_trace(void) { return 0; }
961 static inline void ftrace_pid_follow_fork(struct trace_array
*tr
, bool enable
) { }
962 /* ftace_func_t type is not defined, use macro instead of static inline */
963 #define ftrace_init_array_ops(tr, func) do { } while (0)
964 #endif /* CONFIG_FUNCTION_TRACER */
966 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
968 struct ftrace_probe_ops
{
969 void (*func
)(unsigned long ip
,
970 unsigned long parent_ip
,
971 struct trace_array
*tr
,
972 struct ftrace_probe_ops
*ops
,
974 int (*init
)(struct ftrace_probe_ops
*ops
,
975 struct trace_array
*tr
,
976 unsigned long ip
, void *init_data
,
978 void (*free
)(struct ftrace_probe_ops
*ops
,
979 struct trace_array
*tr
,
980 unsigned long ip
, void *data
);
981 int (*print
)(struct seq_file
*m
,
983 struct ftrace_probe_ops
*ops
,
987 struct ftrace_func_mapper
;
988 typedef int (*ftrace_mapper_func
)(void *data
);
990 struct ftrace_func_mapper
*allocate_ftrace_func_mapper(void);
991 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper
*mapper
,
993 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper
*mapper
,
994 unsigned long ip
, void *data
);
995 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper
*mapper
,
997 void free_ftrace_func_mapper(struct ftrace_func_mapper
*mapper
,
998 ftrace_mapper_func free_func
);
1001 register_ftrace_function_probe(char *glob
, struct trace_array
*tr
,
1002 struct ftrace_probe_ops
*ops
, void *data
);
1004 unregister_ftrace_function_probe_func(char *glob
, struct trace_array
*tr
,
1005 struct ftrace_probe_ops
*ops
);
1006 extern void clear_ftrace_function_probes(struct trace_array
*tr
);
1008 int register_ftrace_command(struct ftrace_func_command
*cmd
);
1009 int unregister_ftrace_command(struct ftrace_func_command
*cmd
);
1011 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
1012 struct dentry
*parent
);
1013 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
);
1015 struct ftrace_func_command
;
1017 static inline __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
1021 static inline __init
int unregister_ftrace_command(char *cmd_name
)
1025 static inline void clear_ftrace_function_probes(struct trace_array
*tr
)
1030 * The ops parameter passed in is usually undefined.
1031 * This must be a macro.
1033 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1034 #define ftrace_destroy_filter_files(ops) do { } while (0)
1035 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1037 bool ftrace_event_is_function(struct trace_event_call
*call
);
1040 * struct trace_parser - servers for reading the user input separated by spaces
1041 * @cont: set if the input is not complete - no final space char was found
1042 * @buffer: holds the parsed user input
1043 * @idx: user input length
1044 * @size: buffer size
1046 struct trace_parser
{
1053 static inline bool trace_parser_loaded(struct trace_parser
*parser
)
1055 return (parser
->idx
!= 0);
1058 static inline bool trace_parser_cont(struct trace_parser
*parser
)
1060 return parser
->cont
;
1063 static inline void trace_parser_clear(struct trace_parser
*parser
)
1065 parser
->cont
= false;
1069 extern int trace_parser_get_init(struct trace_parser
*parser
, int size
);
1070 extern void trace_parser_put(struct trace_parser
*parser
);
1071 extern int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
1072 size_t cnt
, loff_t
*ppos
);
1075 * Only create function graph options if function graph is configured.
1077 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1078 # define FGRAPH_FLAGS \
1079 C(DISPLAY_GRAPH, "display-graph"),
1081 # define FGRAPH_FLAGS
1084 #ifdef CONFIG_BRANCH_TRACER
1085 # define BRANCH_FLAGS \
1086 C(BRANCH, "branch"),
1088 # define BRANCH_FLAGS
1091 #ifdef CONFIG_FUNCTION_TRACER
1092 # define FUNCTION_FLAGS \
1093 C(FUNCTION, "function-trace"), \
1094 C(FUNC_FORK, "function-fork"),
1095 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1097 # define FUNCTION_FLAGS
1098 # define FUNCTION_DEFAULT_FLAGS 0UL
1099 # define TRACE_ITER_FUNC_FORK 0UL
1102 #ifdef CONFIG_STACKTRACE
1103 # define STACK_FLAGS \
1104 C(STACKTRACE, "stacktrace"),
1106 # define STACK_FLAGS
1110 * trace_iterator_flags is an enumeration that defines bit
1111 * positions into trace_flags that controls the output.
1113 * NOTE: These bits must match the trace_options array in
1114 * trace.c (this macro guarantees it).
1116 #define TRACE_FLAGS \
1117 C(PRINT_PARENT, "print-parent"), \
1118 C(SYM_OFFSET, "sym-offset"), \
1119 C(SYM_ADDR, "sym-addr"), \
1120 C(VERBOSE, "verbose"), \
1124 C(BLOCK, "block"), \
1125 C(PRINTK, "trace_printk"), \
1126 C(ANNOTATE, "annotate"), \
1127 C(USERSTACKTRACE, "userstacktrace"), \
1128 C(SYM_USEROBJ, "sym-userobj"), \
1129 C(PRINTK_MSGONLY, "printk-msg-only"), \
1130 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1131 C(LATENCY_FMT, "latency-format"), \
1132 C(RECORD_CMD, "record-cmd"), \
1133 C(RECORD_TGID, "record-tgid"), \
1134 C(OVERWRITE, "overwrite"), \
1135 C(STOP_ON_FREE, "disable_on_free"), \
1136 C(IRQ_INFO, "irq-info"), \
1137 C(MARKERS, "markers"), \
1138 C(EVENT_FORK, "event-fork"), \
1145 * By defining C, we can make TRACE_FLAGS a list of bit names
1146 * that will define the bits for the flag masks.
1149 #define C(a, b) TRACE_ITER_##a##_BIT
1151 enum trace_iterator_bits
{
1153 /* Make sure we don't go more than we have bits for */
1158 * By redefining C, we can make TRACE_FLAGS a list of masks that
1159 * use the bits as defined above.
1162 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1164 enum trace_iterator_flags
{ TRACE_FLAGS
};
1167 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1168 * control the output of kernel symbols.
1170 #define TRACE_ITER_SYM_MASK \
1171 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1173 extern struct tracer nop_trace
;
1175 #ifdef CONFIG_BRANCH_TRACER
1176 extern int enable_branch_tracing(struct trace_array
*tr
);
1177 extern void disable_branch_tracing(void);
1178 static inline int trace_branch_enable(struct trace_array
*tr
)
1180 if (tr
->trace_flags
& TRACE_ITER_BRANCH
)
1181 return enable_branch_tracing(tr
);
1184 static inline void trace_branch_disable(void)
1186 /* due to races, always disable */
1187 disable_branch_tracing();
1190 static inline int trace_branch_enable(struct trace_array
*tr
)
1194 static inline void trace_branch_disable(void)
1197 #endif /* CONFIG_BRANCH_TRACER */
1199 /* set ring buffers to default size if not already done so */
1200 int tracing_update_buffers(void);
1202 struct ftrace_event_field
{
1203 struct list_head link
;
1212 struct event_filter
{
1213 int n_preds
; /* Number assigned */
1214 int a_preds
; /* allocated */
1215 struct filter_pred __rcu
*preds
;
1216 struct filter_pred __rcu
*root
;
1217 char *filter_string
;
1220 struct event_subsystem
{
1221 struct list_head list
;
1223 struct event_filter
*filter
;
1227 struct trace_subsystem_dir
{
1228 struct list_head list
;
1229 struct event_subsystem
*subsystem
;
1230 struct trace_array
*tr
;
1231 struct dentry
*entry
;
1236 extern int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
1237 struct ring_buffer
*buffer
,
1238 struct ring_buffer_event
*event
);
1240 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
1241 struct ring_buffer
*buffer
,
1242 struct ring_buffer_event
*event
,
1243 unsigned long flags
, int pc
,
1244 struct pt_regs
*regs
);
1246 static inline void trace_buffer_unlock_commit(struct trace_array
*tr
,
1247 struct ring_buffer
*buffer
,
1248 struct ring_buffer_event
*event
,
1249 unsigned long flags
, int pc
)
1251 trace_buffer_unlock_commit_regs(tr
, buffer
, event
, flags
, pc
, NULL
);
1254 DECLARE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
1255 DECLARE_PER_CPU(int, trace_buffered_event_cnt
);
1256 void trace_buffered_event_disable(void);
1257 void trace_buffered_event_enable(void);
1260 __trace_event_discard_commit(struct ring_buffer
*buffer
,
1261 struct ring_buffer_event
*event
)
1263 if (this_cpu_read(trace_buffered_event
) == event
) {
1264 /* Simply release the temp buffer */
1265 this_cpu_dec(trace_buffered_event_cnt
);
1268 ring_buffer_discard_commit(buffer
, event
);
1272 * Helper function for event_trigger_unlock_commit{_regs}().
1273 * If there are event triggers attached to this event that requires
1274 * filtering against its fields, then they wil be called as the
1275 * entry already holds the field information of the current event.
1277 * It also checks if the event should be discarded or not.
1278 * It is to be discarded if the event is soft disabled and the
1279 * event was only recorded to process triggers, or if the event
1280 * filter is active and this event did not match the filters.
1282 * Returns true if the event is discarded, false otherwise.
1285 __event_trigger_test_discard(struct trace_event_file
*file
,
1286 struct ring_buffer
*buffer
,
1287 struct ring_buffer_event
*event
,
1289 enum event_trigger_type
*tt
)
1291 unsigned long eflags
= file
->flags
;
1293 if (eflags
& EVENT_FILE_FL_TRIGGER_COND
)
1294 *tt
= event_triggers_call(file
, entry
);
1296 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &file
->flags
) ||
1297 (unlikely(file
->flags
& EVENT_FILE_FL_FILTERED
) &&
1298 !filter_match_preds(file
->filter
, entry
))) {
1299 __trace_event_discard_commit(buffer
, event
);
1307 * event_trigger_unlock_commit - handle triggers and finish event commit
1308 * @file: The file pointer assoctiated to the event
1309 * @buffer: The ring buffer that the event is being written to
1310 * @event: The event meta data in the ring buffer
1311 * @entry: The event itself
1312 * @irq_flags: The state of the interrupts at the start of the event
1313 * @pc: The state of the preempt count at the start of the event.
1315 * This is a helper function to handle triggers that require data
1316 * from the event itself. It also tests the event against filters and
1317 * if the event is soft disabled and should be discarded.
1320 event_trigger_unlock_commit(struct trace_event_file
*file
,
1321 struct ring_buffer
*buffer
,
1322 struct ring_buffer_event
*event
,
1323 void *entry
, unsigned long irq_flags
, int pc
)
1325 enum event_trigger_type tt
= ETT_NONE
;
1327 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
1328 trace_buffer_unlock_commit(file
->tr
, buffer
, event
, irq_flags
, pc
);
1331 event_triggers_post_call(file
, tt
, entry
);
1335 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1336 * @file: The file pointer assoctiated to the event
1337 * @buffer: The ring buffer that the event is being written to
1338 * @event: The event meta data in the ring buffer
1339 * @entry: The event itself
1340 * @irq_flags: The state of the interrupts at the start of the event
1341 * @pc: The state of the preempt count at the start of the event.
1343 * This is a helper function to handle triggers that require data
1344 * from the event itself. It also tests the event against filters and
1345 * if the event is soft disabled and should be discarded.
1347 * Same as event_trigger_unlock_commit() but calls
1348 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1351 event_trigger_unlock_commit_regs(struct trace_event_file
*file
,
1352 struct ring_buffer
*buffer
,
1353 struct ring_buffer_event
*event
,
1354 void *entry
, unsigned long irq_flags
, int pc
,
1355 struct pt_regs
*regs
)
1357 enum event_trigger_type tt
= ETT_NONE
;
1359 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
1360 trace_buffer_unlock_commit_regs(file
->tr
, buffer
, event
,
1361 irq_flags
, pc
, regs
);
1364 event_triggers_post_call(file
, tt
, entry
);
1367 #define FILTER_PRED_INVALID ((unsigned short)-1)
1368 #define FILTER_PRED_IS_RIGHT (1 << 15)
1369 #define FILTER_PRED_FOLD (1 << 15)
1372 * The max preds is the size of unsigned short with
1373 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1374 * and FOLD flags. The other is reserved.
1376 * 2^14 preds is way more than enough.
1378 #define MAX_FILTER_PRED 16384
1383 typedef int (*filter_pred_fn_t
) (struct filter_pred
*pred
, void *event
);
1385 typedef int (*regex_match_func
)(char *str
, struct regex
*r
, int len
);
1396 char pattern
[MAX_FILTER_STR_VAL
];
1399 regex_match_func match
;
1402 struct filter_pred
{
1403 filter_pred_fn_t fn
;
1406 unsigned short *ops
;
1407 struct ftrace_event_field
*field
;
1411 unsigned short index
;
1412 unsigned short parent
;
1413 unsigned short left
;
1414 unsigned short right
;
1417 static inline bool is_string_field(struct ftrace_event_field
*field
)
1419 return field
->filter_type
== FILTER_DYN_STRING
||
1420 field
->filter_type
== FILTER_STATIC_STRING
||
1421 field
->filter_type
== FILTER_PTR_STRING
||
1422 field
->filter_type
== FILTER_COMM
;
1425 static inline bool is_function_field(struct ftrace_event_field
*field
)
1427 return field
->filter_type
== FILTER_TRACE_FN
;
1430 extern enum regex_type
1431 filter_parse_regex(char *buff
, int len
, char **search
, int *not);
1432 extern void print_event_filter(struct trace_event_file
*file
,
1433 struct trace_seq
*s
);
1434 extern int apply_event_filter(struct trace_event_file
*file
,
1435 char *filter_string
);
1436 extern int apply_subsystem_event_filter(struct trace_subsystem_dir
*dir
,
1437 char *filter_string
);
1438 extern void print_subsystem_event_filter(struct event_subsystem
*system
,
1439 struct trace_seq
*s
);
1440 extern int filter_assign_type(const char *type
);
1441 extern int create_event_filter(struct trace_event_call
*call
,
1442 char *filter_str
, bool set_str
,
1443 struct event_filter
**filterp
);
1444 extern void free_event_filter(struct event_filter
*filter
);
1446 struct ftrace_event_field
*
1447 trace_find_event_field(struct trace_event_call
*call
, char *name
);
1449 extern void trace_event_enable_cmd_record(bool enable
);
1450 extern void trace_event_enable_tgid_record(bool enable
);
1452 extern int event_trace_add_tracer(struct dentry
*parent
, struct trace_array
*tr
);
1453 extern int event_trace_del_tracer(struct trace_array
*tr
);
1455 extern struct trace_event_file
*find_event_file(struct trace_array
*tr
,
1459 static inline void *event_file_data(struct file
*filp
)
1461 return READ_ONCE(file_inode(filp
)->i_private
);
1464 extern struct mutex event_mutex
;
1465 extern struct list_head ftrace_events
;
1467 extern const struct file_operations event_trigger_fops
;
1468 extern const struct file_operations event_hist_fops
;
1470 #ifdef CONFIG_HIST_TRIGGERS
1471 extern int register_trigger_hist_cmd(void);
1472 extern int register_trigger_hist_enable_disable_cmds(void);
1474 static inline int register_trigger_hist_cmd(void) { return 0; }
1475 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1478 extern int register_trigger_cmds(void);
1479 extern void clear_event_triggers(struct trace_array
*tr
);
1481 struct event_trigger_data
{
1482 unsigned long count
;
1484 struct event_trigger_ops
*ops
;
1485 struct event_command
*cmd_ops
;
1486 struct event_filter __rcu
*filter
;
1491 struct list_head list
;
1493 struct list_head named_list
;
1494 struct event_trigger_data
*named_data
;
1498 #define ENABLE_EVENT_STR "enable_event"
1499 #define DISABLE_EVENT_STR "disable_event"
1500 #define ENABLE_HIST_STR "enable_hist"
1501 #define DISABLE_HIST_STR "disable_hist"
1503 struct enable_trigger_data
{
1504 struct trace_event_file
*file
;
1509 extern int event_enable_trigger_print(struct seq_file
*m
,
1510 struct event_trigger_ops
*ops
,
1511 struct event_trigger_data
*data
);
1512 extern void event_enable_trigger_free(struct event_trigger_ops
*ops
,
1513 struct event_trigger_data
*data
);
1514 extern int event_enable_trigger_func(struct event_command
*cmd_ops
,
1515 struct trace_event_file
*file
,
1516 char *glob
, char *cmd
, char *param
);
1517 extern int event_enable_register_trigger(char *glob
,
1518 struct event_trigger_ops
*ops
,
1519 struct event_trigger_data
*data
,
1520 struct trace_event_file
*file
);
1521 extern void event_enable_unregister_trigger(char *glob
,
1522 struct event_trigger_ops
*ops
,
1523 struct event_trigger_data
*test
,
1524 struct trace_event_file
*file
);
1525 extern void trigger_data_free(struct event_trigger_data
*data
);
1526 extern int event_trigger_init(struct event_trigger_ops
*ops
,
1527 struct event_trigger_data
*data
);
1528 extern int trace_event_trigger_enable_disable(struct trace_event_file
*file
,
1529 int trigger_enable
);
1530 extern void update_cond_flag(struct trace_event_file
*file
);
1531 extern void unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
1532 struct event_trigger_data
*test
,
1533 struct trace_event_file
*file
);
1534 extern int set_trigger_filter(char *filter_str
,
1535 struct event_trigger_data
*trigger_data
,
1536 struct trace_event_file
*file
);
1537 extern struct event_trigger_data
*find_named_trigger(const char *name
);
1538 extern bool is_named_trigger(struct event_trigger_data
*test
);
1539 extern int save_named_trigger(const char *name
,
1540 struct event_trigger_data
*data
);
1541 extern void del_named_trigger(struct event_trigger_data
*data
);
1542 extern void pause_named_trigger(struct event_trigger_data
*data
);
1543 extern void unpause_named_trigger(struct event_trigger_data
*data
);
1544 extern void set_named_trigger_data(struct event_trigger_data
*data
,
1545 struct event_trigger_data
*named_data
);
1546 extern int register_event_command(struct event_command
*cmd
);
1547 extern int unregister_event_command(struct event_command
*cmd
);
1548 extern int register_trigger_hist_enable_disable_cmds(void);
1551 * struct event_trigger_ops - callbacks for trace event triggers
1553 * The methods in this structure provide per-event trigger hooks for
1554 * various trigger operations.
1556 * All the methods below, except for @init() and @free(), must be
1559 * @func: The trigger 'probe' function called when the triggering
1560 * event occurs. The data passed into this callback is the data
1561 * that was supplied to the event_command @reg() function that
1562 * registered the trigger (see struct event_command) along with
1563 * the trace record, rec.
1565 * @init: An optional initialization function called for the trigger
1566 * when the trigger is registered (via the event_command reg()
1567 * function). This can be used to perform per-trigger
1568 * initialization such as incrementing a per-trigger reference
1569 * count, for instance. This is usually implemented by the
1570 * generic utility function @event_trigger_init() (see
1571 * trace_event_triggers.c).
1573 * @free: An optional de-initialization function called for the
1574 * trigger when the trigger is unregistered (via the
1575 * event_command @reg() function). This can be used to perform
1576 * per-trigger de-initialization such as decrementing a
1577 * per-trigger reference count and freeing corresponding trigger
1578 * data, for instance. This is usually implemented by the
1579 * generic utility function @event_trigger_free() (see
1580 * trace_event_triggers.c).
1582 * @print: The callback function invoked to have the trigger print
1583 * itself. This is usually implemented by a wrapper function
1584 * that calls the generic utility function @event_trigger_print()
1585 * (see trace_event_triggers.c).
1587 struct event_trigger_ops
{
1588 void (*func
)(struct event_trigger_data
*data
,
1590 int (*init
)(struct event_trigger_ops
*ops
,
1591 struct event_trigger_data
*data
);
1592 void (*free
)(struct event_trigger_ops
*ops
,
1593 struct event_trigger_data
*data
);
1594 int (*print
)(struct seq_file
*m
,
1595 struct event_trigger_ops
*ops
,
1596 struct event_trigger_data
*data
);
1600 * struct event_command - callbacks and data members for event commands
1602 * Event commands are invoked by users by writing the command name
1603 * into the 'trigger' file associated with a trace event. The
1604 * parameters associated with a specific invocation of an event
1605 * command are used to create an event trigger instance, which is
1606 * added to the list of trigger instances associated with that trace
1607 * event. When the event is hit, the set of triggers associated with
1608 * that event is invoked.
1610 * The data members in this structure provide per-event command data
1611 * for various event commands.
1613 * All the data members below, except for @post_trigger, must be set
1614 * for each event command.
1616 * @name: The unique name that identifies the event command. This is
1617 * the name used when setting triggers via trigger files.
1619 * @trigger_type: A unique id that identifies the event command
1620 * 'type'. This value has two purposes, the first to ensure that
1621 * only one trigger of the same type can be set at a given time
1622 * for a particular event e.g. it doesn't make sense to have both
1623 * a traceon and traceoff trigger attached to a single event at
1624 * the same time, so traceon and traceoff have the same type
1625 * though they have different names. The @trigger_type value is
1626 * also used as a bit value for deferring the actual trigger
1627 * action until after the current event is finished. Some
1628 * commands need to do this if they themselves log to the trace
1629 * buffer (see the @post_trigger() member below). @trigger_type
1630 * values are defined by adding new values to the trigger_type
1631 * enum in include/linux/trace_events.h.
1633 * @flags: See the enum event_command_flags below.
1635 * All the methods below, except for @set_filter() and @unreg_all(),
1636 * must be implemented.
1638 * @func: The callback function responsible for parsing and
1639 * registering the trigger written to the 'trigger' file by the
1640 * user. It allocates the trigger instance and registers it with
1641 * the appropriate trace event. It makes use of the other
1642 * event_command callback functions to orchestrate this, and is
1643 * usually implemented by the generic utility function
1644 * @event_trigger_callback() (see trace_event_triggers.c).
1646 * @reg: Adds the trigger to the list of triggers associated with the
1647 * event, and enables the event trigger itself, after
1648 * initializing it (via the event_trigger_ops @init() function).
1649 * This is also where commands can use the @trigger_type value to
1650 * make the decision as to whether or not multiple instances of
1651 * the trigger should be allowed. This is usually implemented by
1652 * the generic utility function @register_trigger() (see
1653 * trace_event_triggers.c).
1655 * @unreg: Removes the trigger from the list of triggers associated
1656 * with the event, and disables the event trigger itself, after
1657 * initializing it (via the event_trigger_ops @free() function).
1658 * This is usually implemented by the generic utility function
1659 * @unregister_trigger() (see trace_event_triggers.c).
1661 * @unreg_all: An optional function called to remove all the triggers
1662 * from the list of triggers associated with the event. Called
1663 * when a trigger file is opened in truncate mode.
1665 * @set_filter: An optional function called to parse and set a filter
1666 * for the trigger. If no @set_filter() method is set for the
1667 * event command, filters set by the user for the command will be
1668 * ignored. This is usually implemented by the generic utility
1669 * function @set_trigger_filter() (see trace_event_triggers.c).
1671 * @get_trigger_ops: The callback function invoked to retrieve the
1672 * event_trigger_ops implementation associated with the command.
1674 struct event_command
{
1675 struct list_head list
;
1677 enum event_trigger_type trigger_type
;
1679 int (*func
)(struct event_command
*cmd_ops
,
1680 struct trace_event_file
*file
,
1681 char *glob
, char *cmd
, char *params
);
1682 int (*reg
)(char *glob
,
1683 struct event_trigger_ops
*ops
,
1684 struct event_trigger_data
*data
,
1685 struct trace_event_file
*file
);
1686 void (*unreg
)(char *glob
,
1687 struct event_trigger_ops
*ops
,
1688 struct event_trigger_data
*data
,
1689 struct trace_event_file
*file
);
1690 void (*unreg_all
)(struct trace_event_file
*file
);
1691 int (*set_filter
)(char *filter_str
,
1692 struct event_trigger_data
*data
,
1693 struct trace_event_file
*file
);
1694 struct event_trigger_ops
*(*get_trigger_ops
)(char *cmd
, char *param
);
1698 * enum event_command_flags - flags for struct event_command
1700 * @POST_TRIGGER: A flag that says whether or not this command needs
1701 * to have its action delayed until after the current event has
1702 * been closed. Some triggers need to avoid being invoked while
1703 * an event is currently in the process of being logged, since
1704 * the trigger may itself log data into the trace buffer. Thus
1705 * we make sure the current event is committed before invoking
1706 * those triggers. To do that, the trigger invocation is split
1707 * in two - the first part checks the filter using the current
1708 * trace record; if a command has the @post_trigger flag set, it
1709 * sets a bit for itself in the return value, otherwise it
1710 * directly invokes the trigger. Once all commands have been
1711 * either invoked or set their return flag, the current record is
1712 * either committed or discarded. At that point, if any commands
1713 * have deferred their triggers, those commands are finally
1714 * invoked following the close of the current event. In other
1715 * words, if the event_trigger_ops @func() probe implementation
1716 * itself logs to the trace buffer, this flag should be set,
1717 * otherwise it can be left unspecified.
1719 * @NEEDS_REC: A flag that says whether or not this command needs
1720 * access to the trace record in order to perform its function,
1721 * regardless of whether or not it has a filter associated with
1722 * it (filters make a trigger require access to the trace record
1723 * but are not always present).
1725 enum event_command_flags
{
1726 EVENT_CMD_FL_POST_TRIGGER
= 1,
1727 EVENT_CMD_FL_NEEDS_REC
= 2,
1730 static inline bool event_command_post_trigger(struct event_command
*cmd_ops
)
1732 return cmd_ops
->flags
& EVENT_CMD_FL_POST_TRIGGER
;
1735 static inline bool event_command_needs_rec(struct event_command
*cmd_ops
)
1737 return cmd_ops
->flags
& EVENT_CMD_FL_NEEDS_REC
;
1740 extern int trace_event_enable_disable(struct trace_event_file
*file
,
1741 int enable
, int soft_disable
);
1742 extern int tracing_alloc_snapshot(void);
1744 extern const char *__start___trace_bprintk_fmt
[];
1745 extern const char *__stop___trace_bprintk_fmt
[];
1747 extern const char *__start___tracepoint_str
[];
1748 extern const char *__stop___tracepoint_str
[];
1750 void trace_printk_control(bool enabled
);
1751 void trace_printk_init_buffers(void);
1752 void trace_printk_start_comm(void);
1753 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
);
1754 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
);
1756 #define MAX_EVENT_NAME_LEN 64
1758 extern int trace_run_command(const char *buf
, int (*createfn
)(int, char**));
1759 extern ssize_t
trace_parse_run_command(struct file
*file
,
1760 const char __user
*buffer
, size_t count
, loff_t
*ppos
,
1761 int (*createfn
)(int, char**));
1764 * Normal trace_printk() and friends allocates special buffers
1765 * to do the manipulation, as well as saves the print formats
1766 * into sections to display. But the trace infrastructure wants
1767 * to use these without the added overhead at the price of being
1768 * a bit slower (used mainly for warnings, where we don't care
1769 * about performance). The internal_trace_puts() is for such
1772 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1775 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1776 extern struct trace_event_call \
1777 __aligned(4) event_##call;
1778 #undef FTRACE_ENTRY_DUP
1779 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1780 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1782 #undef FTRACE_ENTRY_PACKED
1783 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1784 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1787 #include "trace_entries.h"
1789 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1790 int perf_ftrace_event_register(struct trace_event_call
*call
,
1791 enum trace_reg type
, void *data
);
1793 #define perf_ftrace_event_register NULL
1796 #ifdef CONFIG_FTRACE_SYSCALLS
1797 void init_ftrace_syscalls(void);
1798 const char *get_syscall_name(int syscall
);
1800 static inline void init_ftrace_syscalls(void) { }
1801 static inline const char *get_syscall_name(int syscall
)
1807 #ifdef CONFIG_EVENT_TRACING
1808 void trace_event_init(void);
1809 void trace_event_eval_update(struct trace_eval_map
**map
, int len
);
1811 static inline void __init
trace_event_init(void) { }
1812 static inline void trace_event_eval_update(struct trace_eval_map
**map
, int len
) { }
1815 extern struct trace_iterator
*tracepoint_print_iter
;
1817 #endif /* _LINUX_KERNEL_TRACE_H */