2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/trace_events.h>
16 #include <linux/compiler.h>
17 #include <linux/trace_seq.h>
18 #include <linux/glob.h>
20 #ifdef CONFIG_FTRACE_SYSCALLS
21 #include <asm/unistd.h> /* For NR_SYSCALLS */
22 #include <asm/syscall.h> /* some archs define it here */
26 __TRACE_FIRST_TYPE
= 0,
50 #define __field(type, item) type item;
53 #define __field_struct(type, item) __field(type, item)
56 #define __field_desc(type, container, item)
59 #define __array(type, item, size) type item[size];
62 #define __array_desc(type, container, item, size)
64 #undef __dynamic_array
65 #define __dynamic_array(type, item) type item[];
68 #define F_STRUCT(args...) args
71 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
72 struct struct_name { \
73 struct trace_entry ent; \
77 #undef FTRACE_ENTRY_DUP
78 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
80 #undef FTRACE_ENTRY_REG
81 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
86 #undef FTRACE_ENTRY_PACKED
87 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
89 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
92 #include "trace_entries.h"
95 * syscalls are special, and need special handling, this is why
96 * they are not included in trace_entries.h
98 struct syscall_trace_enter
{
99 struct trace_entry ent
;
101 unsigned long args
[];
104 struct syscall_trace_exit
{
105 struct trace_entry ent
;
110 struct kprobe_trace_entry_head
{
111 struct trace_entry ent
;
115 struct kretprobe_trace_entry_head
{
116 struct trace_entry ent
;
118 unsigned long ret_ip
;
122 * trace_flag_type is an enumeration that holds different
123 * states when a trace occurs. These are:
124 * IRQS_OFF - interrupts were disabled
125 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
126 * NEED_RESCHED - reschedule is requested
127 * HARDIRQ - inside an interrupt handler
128 * SOFTIRQ - inside a softirq handler
130 enum trace_flag_type
{
131 TRACE_FLAG_IRQS_OFF
= 0x01,
132 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
133 TRACE_FLAG_NEED_RESCHED
= 0x04,
134 TRACE_FLAG_HARDIRQ
= 0x08,
135 TRACE_FLAG_SOFTIRQ
= 0x10,
136 TRACE_FLAG_PREEMPT_RESCHED
= 0x20,
137 TRACE_FLAG_NMI
= 0x40,
140 #define TRACE_BUF_SIZE 1024
145 * The CPU trace array - it consists of thousands of trace entries
146 * plus some other descriptor data: (for example which task started
149 struct trace_array_cpu
{
151 void *buffer_page
; /* ring buffer spare */
153 unsigned long entries
;
154 unsigned long saved_latency
;
155 unsigned long critical_start
;
156 unsigned long critical_end
;
157 unsigned long critical_sequence
;
159 unsigned long policy
;
160 unsigned long rt_priority
;
161 unsigned long skipped_entries
;
162 u64 preempt_timestamp
;
165 char comm
[TASK_COMM_LEN
];
168 #ifdef CONFIG_FUNCTION_TRACER
169 bool ftrace_ignore_pid
;
174 struct trace_option_dentry
;
176 struct trace_buffer
{
177 struct trace_array
*tr
;
178 struct ring_buffer
*buffer
;
179 struct trace_array_cpu __percpu
*data
;
184 #define TRACE_FLAGS_MAX_SIZE 32
186 struct trace_options
{
187 struct tracer
*tracer
;
188 struct trace_option_dentry
*topts
;
191 struct trace_pid_list
{
197 * The trace array - an array of per-CPU trace arrays. This is the
198 * highest level data structure that individual tracers deal with.
199 * They have on/off state as well:
202 struct list_head list
;
204 struct trace_buffer trace_buffer
;
205 #ifdef CONFIG_TRACER_MAX_TRACE
207 * The max_buffer is used to snapshot the trace when a maximum
208 * latency is reached, or when the user initiates a snapshot.
209 * Some tracers will use this to store a maximum trace while
210 * it continues examining live traces.
212 * The buffers for the max_buffer are set up the same as the trace_buffer
213 * When a snapshot is taken, the buffer of the max_buffer is swapped
214 * with the buffer of the trace_buffer and the buffers are reset for
215 * the trace_buffer so the tracing can continue.
217 struct trace_buffer max_buffer
;
218 bool allocated_snapshot
;
220 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
221 unsigned long max_latency
;
223 struct trace_pid_list __rcu
*filtered_pids
;
225 * max_lock is used to protect the swapping of buffers
226 * when taking a max snapshot. The buffers themselves are
227 * protected by per_cpu spinlocks. But the action of the swap
228 * needs its own lock.
230 * This is defined as a arch_spinlock_t in order to help
231 * with performance when lockdep debugging is enabled.
233 * It is also used in other places outside the update_max_tr
234 * so it needs to be defined outside of the
235 * CONFIG_TRACER_MAX_TRACE.
237 arch_spinlock_t max_lock
;
239 #ifdef CONFIG_FTRACE_SYSCALLS
240 int sys_refcount_enter
;
241 int sys_refcount_exit
;
242 struct trace_event_file __rcu
*enter_syscall_files
[NR_syscalls
];
243 struct trace_event_file __rcu
*exit_syscall_files
[NR_syscalls
];
248 struct tracer
*current_trace
;
249 unsigned int trace_flags
;
250 unsigned char trace_flags_index
[TRACE_FLAGS_MAX_SIZE
];
252 raw_spinlock_t start_lock
;
254 struct dentry
*options
;
255 struct dentry
*percpu_dir
;
256 struct dentry
*event_dir
;
257 struct trace_options
*topts
;
258 struct list_head systems
;
259 struct list_head events
;
260 cpumask_var_t tracing_cpumask
; /* only trace on set CPUs */
262 #ifdef CONFIG_FUNCTION_TRACER
263 struct ftrace_ops
*ops
;
264 struct trace_pid_list __rcu
*function_pids
;
265 /* function tracing enabled */
266 int function_enabled
;
271 TRACE_ARRAY_FL_GLOBAL
= (1 << 0)
274 extern struct list_head ftrace_trace_arrays
;
276 extern struct mutex trace_types_lock
;
278 extern int trace_array_get(struct trace_array
*tr
);
279 extern void trace_array_put(struct trace_array
*tr
);
282 * The global tracer (top) should be the first trace array added,
283 * but we check the flag anyway.
285 static inline struct trace_array
*top_trace_array(void)
287 struct trace_array
*tr
;
289 if (list_empty(&ftrace_trace_arrays
))
292 tr
= list_entry(ftrace_trace_arrays
.prev
,
294 WARN_ON(!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
));
298 #define FTRACE_CMP_TYPE(var, type) \
299 __builtin_types_compatible_p(typeof(var), type *)
302 #define IF_ASSIGN(var, entry, etype, id) \
303 if (FTRACE_CMP_TYPE(var, etype)) { \
304 var = (typeof(var))(entry); \
305 WARN_ON(id && (entry)->type != id); \
309 /* Will cause compile errors if type is not found. */
310 extern void __ftrace_bad_type(void);
313 * The trace_assign_type is a verifier that the entry type is
314 * the same as the type being assigned. To add new types simply
315 * add a line with the following format:
317 * IF_ASSIGN(var, ent, type, id);
319 * Where "type" is the trace type that includes the trace_entry
320 * as the "ent" item. And "id" is the trace identifier that is
321 * used in the trace_type enum.
323 * If the type can have more than one id, then use zero.
325 #define trace_assign_type(var, ent) \
327 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
328 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
329 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
330 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
331 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
332 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
333 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
334 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
335 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
336 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
338 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
340 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
341 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
343 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
345 __ftrace_bad_type(); \
349 * An option specific to a tracer. This is a boolean value.
350 * The bit is the bit index that sets its value on the
351 * flags value in struct tracer_flags.
354 const char *name
; /* Will appear on the trace_options file */
355 u32 bit
; /* Mask assigned in val field in tracer_flags */
359 * The set of specific options for a tracer. Your tracer
360 * have to set the initial value of the flags val.
362 struct tracer_flags
{
364 struct tracer_opt
*opts
;
365 struct tracer
*trace
;
368 /* Makes more easy to define a tracer opt */
369 #define TRACER_OPT(s, b) .name = #s, .bit = b
372 struct trace_option_dentry
{
373 struct tracer_opt
*opt
;
374 struct tracer_flags
*flags
;
375 struct trace_array
*tr
;
376 struct dentry
*entry
;
380 * struct tracer - a specific tracer and its callbacks to interact with tracefs
381 * @name: the name chosen to select it on the available_tracers file
382 * @init: called when one switches to this tracer (echo name > current_tracer)
383 * @reset: called when one switches to another tracer
384 * @start: called when tracing is unpaused (echo 1 > tracing_on)
385 * @stop: called when tracing is paused (echo 0 > tracing_on)
386 * @update_thresh: called when tracing_thresh is updated
387 * @open: called when the trace file is opened
388 * @pipe_open: called when the trace_pipe file is opened
389 * @close: called when the trace file is released
390 * @pipe_close: called when the trace_pipe file is released
391 * @read: override the default read callback on trace_pipe
392 * @splice_read: override the default splice_read callback on trace_pipe
393 * @selftest: selftest to run on boot (see trace_selftest.c)
394 * @print_headers: override the first lines that describe your columns
395 * @print_line: callback that prints a trace
396 * @set_flag: signals one of your private flags changed (trace_options file)
397 * @flags: your private flags
401 int (*init
)(struct trace_array
*tr
);
402 void (*reset
)(struct trace_array
*tr
);
403 void (*start
)(struct trace_array
*tr
);
404 void (*stop
)(struct trace_array
*tr
);
405 int (*update_thresh
)(struct trace_array
*tr
);
406 void (*open
)(struct trace_iterator
*iter
);
407 void (*pipe_open
)(struct trace_iterator
*iter
);
408 void (*close
)(struct trace_iterator
*iter
);
409 void (*pipe_close
)(struct trace_iterator
*iter
);
410 ssize_t (*read
)(struct trace_iterator
*iter
,
411 struct file
*filp
, char __user
*ubuf
,
412 size_t cnt
, loff_t
*ppos
);
413 ssize_t (*splice_read
)(struct trace_iterator
*iter
,
416 struct pipe_inode_info
*pipe
,
419 #ifdef CONFIG_FTRACE_STARTUP_TEST
420 int (*selftest
)(struct tracer
*trace
,
421 struct trace_array
*tr
);
423 void (*print_header
)(struct seq_file
*m
);
424 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
425 /* If you handled the flag setting, return 0 */
426 int (*set_flag
)(struct trace_array
*tr
,
427 u32 old_flags
, u32 bit
, int set
);
428 /* Return 0 if OK with change, else return non-zero */
429 int (*flag_changed
)(struct trace_array
*tr
,
432 struct tracer_flags
*flags
;
436 bool allow_instances
;
437 #ifdef CONFIG_TRACER_MAX_TRACE
443 /* Only current can touch trace_recursion */
446 * For function tracing recursion:
447 * The order of these bits are important.
449 * When function tracing occurs, the following steps are made:
450 * If arch does not support a ftrace feature:
451 * call internal function (uses INTERNAL bits) which calls...
452 * If callback is registered to the "global" list, the list
453 * function is called and recursion checks the GLOBAL bits.
454 * then this function calls...
455 * The function callback, which can use the FTRACE bits to
456 * check for recursion.
458 * Now if the arch does not suppport a feature, and it calls
459 * the global list function which calls the ftrace callback
460 * all three of these steps will do a recursion protection.
461 * There's no reason to do one if the previous caller already
462 * did. The recursion that we are protecting against will
463 * go through the same steps again.
465 * To prevent the multiple recursion checks, if a recursion
466 * bit is set that is higher than the MAX bit of the current
467 * check, then we know that the check was made by the previous
468 * caller, and we can skip the current check.
472 TRACE_BUFFER_NMI_BIT
,
473 TRACE_BUFFER_IRQ_BIT
,
474 TRACE_BUFFER_SIRQ_BIT
,
476 /* Start of function recursion bits */
478 TRACE_FTRACE_NMI_BIT
,
479 TRACE_FTRACE_IRQ_BIT
,
480 TRACE_FTRACE_SIRQ_BIT
,
482 /* INTERNAL_BITs must be greater than FTRACE_BITs */
484 TRACE_INTERNAL_NMI_BIT
,
485 TRACE_INTERNAL_IRQ_BIT
,
486 TRACE_INTERNAL_SIRQ_BIT
,
490 * Abuse of the trace_recursion.
491 * As we need a way to maintain state if we are tracing the function
492 * graph in irq because we want to trace a particular function that
493 * was called in irq context but we have irq tracing off. Since this
494 * can only be modified by current, we can reuse trace_recursion.
499 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
500 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
501 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
503 #define TRACE_CONTEXT_BITS 4
505 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
506 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
508 #define TRACE_LIST_START TRACE_INTERNAL_BIT
509 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
511 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
513 static __always_inline
int trace_get_context_bit(void)
517 if (in_interrupt()) {
531 static __always_inline
int trace_test_and_set_recursion(int start
, int max
)
533 unsigned int val
= current
->trace_recursion
;
536 /* A previous recursion check was made */
537 if ((val
& TRACE_CONTEXT_MASK
) > max
)
540 bit
= trace_get_context_bit() + start
;
541 if (unlikely(val
& (1 << bit
)))
545 current
->trace_recursion
= val
;
551 static __always_inline
void trace_clear_recursion(int bit
)
553 unsigned int val
= current
->trace_recursion
;
562 current
->trace_recursion
= val
;
565 static inline struct ring_buffer_iter
*
566 trace_buffer_iter(struct trace_iterator
*iter
, int cpu
)
568 if (iter
->buffer_iter
&& iter
->buffer_iter
[cpu
])
569 return iter
->buffer_iter
[cpu
];
573 int tracer_init(struct tracer
*t
, struct trace_array
*tr
);
574 int tracing_is_enabled(void);
575 void tracing_reset(struct trace_buffer
*buf
, int cpu
);
576 void tracing_reset_online_cpus(struct trace_buffer
*buf
);
577 void tracing_reset_current(int cpu
);
578 void tracing_reset_all_online_cpus(void);
579 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
580 bool tracing_is_disabled(void);
581 int tracer_tracing_is_on(struct trace_array
*tr
);
582 struct dentry
*trace_create_file(const char *name
,
584 struct dentry
*parent
,
586 const struct file_operations
*fops
);
588 struct dentry
*tracing_init_dentry(void);
590 struct ring_buffer_event
;
592 struct ring_buffer_event
*
593 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
599 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
600 struct trace_array_cpu
*data
);
602 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
603 int *ent_cpu
, u64
*ent_ts
);
605 void trace_buffer_unlock_commit_nostack(struct ring_buffer
*buffer
,
606 struct ring_buffer_event
*event
);
608 int trace_empty(struct trace_iterator
*iter
);
610 void *trace_find_next_entry_inc(struct trace_iterator
*iter
);
612 void trace_init_global_iter(struct trace_iterator
*iter
);
614 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
);
616 void trace_function(struct trace_array
*tr
,
618 unsigned long parent_ip
,
619 unsigned long flags
, int pc
);
620 void trace_graph_function(struct trace_array
*tr
,
622 unsigned long parent_ip
,
623 unsigned long flags
, int pc
);
624 void trace_latency_header(struct seq_file
*m
);
625 void trace_default_header(struct seq_file
*m
);
626 void print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
);
627 int trace_empty(struct trace_iterator
*iter
);
629 void trace_graph_return(struct ftrace_graph_ret
*trace
);
630 int trace_graph_entry(struct ftrace_graph_ent
*trace
);
631 void set_graph_array(struct trace_array
*tr
);
633 void tracing_start_cmdline_record(void);
634 void tracing_stop_cmdline_record(void);
635 int register_tracer(struct tracer
*type
);
636 int is_tracing_stopped(void);
638 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
);
640 extern cpumask_var_t __read_mostly tracing_buffer_mask
;
642 #define for_each_tracing_cpu(cpu) \
643 for_each_cpu(cpu, tracing_buffer_mask)
645 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
647 extern unsigned long tracing_thresh
;
653 bool trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
,
655 bool trace_ignore_this_task(struct trace_pid_list
*filtered_pids
,
656 struct task_struct
*task
);
657 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
658 struct task_struct
*self
,
659 struct task_struct
*task
);
660 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
);
661 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
);
662 int trace_pid_show(struct seq_file
*m
, void *v
);
663 void trace_free_pid_list(struct trace_pid_list
*pid_list
);
664 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
665 struct trace_pid_list
**new_pid_list
,
666 const char __user
*ubuf
, size_t cnt
);
668 #ifdef CONFIG_TRACER_MAX_TRACE
669 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
670 void update_max_tr_single(struct trace_array
*tr
,
671 struct task_struct
*tsk
, int cpu
);
672 #endif /* CONFIG_TRACER_MAX_TRACE */
674 #ifdef CONFIG_STACKTRACE
675 void ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
,
678 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
681 static inline void ftrace_trace_userstack(struct ring_buffer
*buffer
,
682 unsigned long flags
, int pc
)
686 static inline void __trace_stack(struct trace_array
*tr
, unsigned long flags
,
690 #endif /* CONFIG_STACKTRACE */
692 extern u64
ftrace_now(int cpu
);
694 extern void trace_find_cmdline(int pid
, char comm
[]);
695 extern void trace_event_follow_fork(struct trace_array
*tr
, bool enable
);
697 #ifdef CONFIG_DYNAMIC_FTRACE
698 extern unsigned long ftrace_update_tot_cnt
;
700 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
701 extern int DYN_FTRACE_TEST_NAME(void);
702 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
703 extern int DYN_FTRACE_TEST_NAME2(void);
705 extern bool ring_buffer_expanded
;
706 extern bool tracing_selftest_disabled
;
708 #ifdef CONFIG_FTRACE_STARTUP_TEST
709 extern int trace_selftest_startup_function(struct tracer
*trace
,
710 struct trace_array
*tr
);
711 extern int trace_selftest_startup_function_graph(struct tracer
*trace
,
712 struct trace_array
*tr
);
713 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
714 struct trace_array
*tr
);
715 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
716 struct trace_array
*tr
);
717 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
718 struct trace_array
*tr
);
719 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
720 struct trace_array
*tr
);
721 extern int trace_selftest_startup_nop(struct tracer
*trace
,
722 struct trace_array
*tr
);
723 extern int trace_selftest_startup_sched_switch(struct tracer
*trace
,
724 struct trace_array
*tr
);
725 extern int trace_selftest_startup_branch(struct tracer
*trace
,
726 struct trace_array
*tr
);
728 * Tracer data references selftest functions that only occur
729 * on boot up. These can be __init functions. Thus, when selftests
730 * are enabled, then the tracers need to reference __init functions.
732 #define __tracer_data __refdata
734 /* Tracers are seldom changed. Optimize when selftests are disabled. */
735 #define __tracer_data __read_mostly
736 #endif /* CONFIG_FTRACE_STARTUP_TEST */
738 extern void *head_page(struct trace_array_cpu
*data
);
739 extern unsigned long long ns2usecs(u64 nsec
);
741 trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
);
743 trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
);
745 trace_array_vprintk(struct trace_array
*tr
,
746 unsigned long ip
, const char *fmt
, va_list args
);
747 int trace_array_printk(struct trace_array
*tr
,
748 unsigned long ip
, const char *fmt
, ...);
749 int trace_array_printk_buf(struct ring_buffer
*buffer
,
750 unsigned long ip
, const char *fmt
, ...);
751 void trace_printk_seq(struct trace_seq
*s
);
752 enum print_line_t
print_trace_line(struct trace_iterator
*iter
);
754 extern char trace_find_mark(unsigned long long duration
);
757 unsigned long size_bits
;
758 struct hlist_head
*buckets
;
763 struct ftrace_func_entry
*
764 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
);
766 static __always_inline
bool ftrace_hash_empty(struct ftrace_hash
*hash
)
768 return !hash
|| !hash
->count
;
771 /* Standard output formatting function used for function return traces */
772 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
775 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
776 #define TRACE_GRAPH_PRINT_CPU 0x2
777 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
778 #define TRACE_GRAPH_PRINT_PROC 0x8
779 #define TRACE_GRAPH_PRINT_DURATION 0x10
780 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
781 #define TRACE_GRAPH_PRINT_IRQS 0x40
782 #define TRACE_GRAPH_PRINT_TAIL 0x80
783 #define TRACE_GRAPH_SLEEP_TIME 0x100
784 #define TRACE_GRAPH_GRAPH_TIME 0x200
785 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
786 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
788 extern void ftrace_graph_sleep_time_control(bool enable
);
789 extern void ftrace_graph_graph_time_control(bool enable
);
791 extern enum print_line_t
792 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
);
793 extern void print_graph_headers_flags(struct seq_file
*s
, u32 flags
);
795 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
);
796 extern void graph_trace_open(struct trace_iterator
*iter
);
797 extern void graph_trace_close(struct trace_iterator
*iter
);
798 extern int __trace_graph_entry(struct trace_array
*tr
,
799 struct ftrace_graph_ent
*trace
,
800 unsigned long flags
, int pc
);
801 extern void __trace_graph_return(struct trace_array
*tr
,
802 struct ftrace_graph_ret
*trace
,
803 unsigned long flags
, int pc
);
805 #ifdef CONFIG_DYNAMIC_FTRACE
806 extern struct ftrace_hash
*ftrace_graph_hash
;
807 extern struct ftrace_hash
*ftrace_graph_notrace_hash
;
809 static inline int ftrace_graph_addr(unsigned long addr
)
813 preempt_disable_notrace();
815 if (ftrace_hash_empty(ftrace_graph_hash
)) {
820 if (ftrace_lookup_ip(ftrace_graph_hash
, addr
)) {
822 * If no irqs are to be traced, but a set_graph_function
823 * is set, and called by an interrupt handler, we still
827 trace_recursion_set(TRACE_IRQ_BIT
);
829 trace_recursion_clear(TRACE_IRQ_BIT
);
834 preempt_enable_notrace();
838 static inline int ftrace_graph_notrace_addr(unsigned long addr
)
842 preempt_disable_notrace();
844 if (ftrace_lookup_ip(ftrace_graph_notrace_hash
, addr
))
847 preempt_enable_notrace();
851 static inline int ftrace_graph_addr(unsigned long addr
)
856 static inline int ftrace_graph_notrace_addr(unsigned long addr
)
860 #endif /* CONFIG_DYNAMIC_FTRACE */
862 extern unsigned int fgraph_max_depth
;
864 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent
*trace
)
866 /* trace it when it is-nested-in or is a function enabled. */
867 return !(trace
->depth
|| ftrace_graph_addr(trace
->func
)) ||
868 (trace
->depth
< 0) ||
869 (fgraph_max_depth
&& trace
->depth
>= fgraph_max_depth
);
872 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
873 static inline enum print_line_t
874 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
)
876 return TRACE_TYPE_UNHANDLED
;
878 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
880 extern struct list_head ftrace_pids
;
882 #ifdef CONFIG_FUNCTION_TRACER
883 extern bool ftrace_filter_param __initdata
;
884 static inline int ftrace_trace_task(struct trace_array
*tr
)
886 return !this_cpu_read(tr
->trace_buffer
.data
->ftrace_ignore_pid
);
888 extern int ftrace_is_dead(void);
889 int ftrace_create_function_files(struct trace_array
*tr
,
890 struct dentry
*parent
);
891 void ftrace_destroy_function_files(struct trace_array
*tr
);
892 void ftrace_init_global_array_ops(struct trace_array
*tr
);
893 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
);
894 void ftrace_reset_array_ops(struct trace_array
*tr
);
895 int using_ftrace_ops_list_func(void);
896 void ftrace_init_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
897 void ftrace_init_tracefs_toplevel(struct trace_array
*tr
,
898 struct dentry
*d_tracer
);
899 void ftrace_clear_pids(struct trace_array
*tr
);
901 static inline int ftrace_trace_task(struct trace_array
*tr
)
905 static inline int ftrace_is_dead(void) { return 0; }
907 ftrace_create_function_files(struct trace_array
*tr
,
908 struct dentry
*parent
)
912 static inline void ftrace_destroy_function_files(struct trace_array
*tr
) { }
913 static inline __init
void
914 ftrace_init_global_array_ops(struct trace_array
*tr
) { }
915 static inline void ftrace_reset_array_ops(struct trace_array
*tr
) { }
916 static inline void ftrace_init_tracefs(struct trace_array
*tr
, struct dentry
*d
) { }
917 static inline void ftrace_init_tracefs_toplevel(struct trace_array
*tr
, struct dentry
*d
) { }
918 static inline void ftrace_clear_pids(struct trace_array
*tr
) { }
919 /* ftace_func_t type is not defined, use macro instead of static inline */
920 #define ftrace_init_array_ops(tr, func) do { } while (0)
921 #endif /* CONFIG_FUNCTION_TRACER */
923 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
924 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
925 struct dentry
*parent
);
926 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
);
929 * The ops parameter passed in is usually undefined.
930 * This must be a macro.
932 #define ftrace_create_filter_files(ops, parent) do { } while (0)
933 #define ftrace_destroy_filter_files(ops) do { } while (0)
934 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
936 bool ftrace_event_is_function(struct trace_event_call
*call
);
939 * struct trace_parser - servers for reading the user input separated by spaces
940 * @cont: set if the input is not complete - no final space char was found
941 * @buffer: holds the parsed user input
942 * @idx: user input length
945 struct trace_parser
{
952 static inline bool trace_parser_loaded(struct trace_parser
*parser
)
954 return (parser
->idx
!= 0);
957 static inline bool trace_parser_cont(struct trace_parser
*parser
)
962 static inline void trace_parser_clear(struct trace_parser
*parser
)
964 parser
->cont
= false;
968 extern int trace_parser_get_init(struct trace_parser
*parser
, int size
);
969 extern void trace_parser_put(struct trace_parser
*parser
);
970 extern int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
971 size_t cnt
, loff_t
*ppos
);
974 * Only create function graph options if function graph is configured.
976 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
977 # define FGRAPH_FLAGS \
978 C(DISPLAY_GRAPH, "display-graph"),
980 # define FGRAPH_FLAGS
983 #ifdef CONFIG_BRANCH_TRACER
984 # define BRANCH_FLAGS \
987 # define BRANCH_FLAGS
990 #ifdef CONFIG_FUNCTION_TRACER
991 # define FUNCTION_FLAGS \
992 C(FUNCTION, "function-trace"),
993 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
995 # define FUNCTION_FLAGS
996 # define FUNCTION_DEFAULT_FLAGS 0UL
999 #ifdef CONFIG_STACKTRACE
1000 # define STACK_FLAGS \
1001 C(STACKTRACE, "stacktrace"),
1003 # define STACK_FLAGS
1007 * trace_iterator_flags is an enumeration that defines bit
1008 * positions into trace_flags that controls the output.
1010 * NOTE: These bits must match the trace_options array in
1011 * trace.c (this macro guarantees it).
1013 #define TRACE_FLAGS \
1014 C(PRINT_PARENT, "print-parent"), \
1015 C(SYM_OFFSET, "sym-offset"), \
1016 C(SYM_ADDR, "sym-addr"), \
1017 C(VERBOSE, "verbose"), \
1021 C(BLOCK, "block"), \
1022 C(PRINTK, "trace_printk"), \
1023 C(ANNOTATE, "annotate"), \
1024 C(USERSTACKTRACE, "userstacktrace"), \
1025 C(SYM_USEROBJ, "sym-userobj"), \
1026 C(PRINTK_MSGONLY, "printk-msg-only"), \
1027 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1028 C(LATENCY_FMT, "latency-format"), \
1029 C(RECORD_CMD, "record-cmd"), \
1030 C(OVERWRITE, "overwrite"), \
1031 C(STOP_ON_FREE, "disable_on_free"), \
1032 C(IRQ_INFO, "irq-info"), \
1033 C(MARKERS, "markers"), \
1034 C(EVENT_FORK, "event-fork"), \
1041 * By defining C, we can make TRACE_FLAGS a list of bit names
1042 * that will define the bits for the flag masks.
1045 #define C(a, b) TRACE_ITER_##a##_BIT
1047 enum trace_iterator_bits
{
1049 /* Make sure we don't go more than we have bits for */
1054 * By redefining C, we can make TRACE_FLAGS a list of masks that
1055 * use the bits as defined above.
1058 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1060 enum trace_iterator_flags
{ TRACE_FLAGS
};
1063 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1064 * control the output of kernel symbols.
1066 #define TRACE_ITER_SYM_MASK \
1067 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1069 extern struct tracer nop_trace
;
1071 #ifdef CONFIG_BRANCH_TRACER
1072 extern int enable_branch_tracing(struct trace_array
*tr
);
1073 extern void disable_branch_tracing(void);
1074 static inline int trace_branch_enable(struct trace_array
*tr
)
1076 if (tr
->trace_flags
& TRACE_ITER_BRANCH
)
1077 return enable_branch_tracing(tr
);
1080 static inline void trace_branch_disable(void)
1082 /* due to races, always disable */
1083 disable_branch_tracing();
1086 static inline int trace_branch_enable(struct trace_array
*tr
)
1090 static inline void trace_branch_disable(void)
1093 #endif /* CONFIG_BRANCH_TRACER */
1095 /* set ring buffers to default size if not already done so */
1096 int tracing_update_buffers(void);
1098 struct ftrace_event_field
{
1099 struct list_head link
;
1108 struct event_filter
{
1109 int n_preds
; /* Number assigned */
1110 int a_preds
; /* allocated */
1111 struct filter_pred
*preds
;
1112 struct filter_pred
*root
;
1113 char *filter_string
;
1116 struct event_subsystem
{
1117 struct list_head list
;
1119 struct event_filter
*filter
;
1123 struct trace_subsystem_dir
{
1124 struct list_head list
;
1125 struct event_subsystem
*subsystem
;
1126 struct trace_array
*tr
;
1127 struct dentry
*entry
;
1132 extern int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
1133 struct ring_buffer
*buffer
,
1134 struct ring_buffer_event
*event
);
1136 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
1137 struct ring_buffer
*buffer
,
1138 struct ring_buffer_event
*event
,
1139 unsigned long flags
, int pc
,
1140 struct pt_regs
*regs
);
1142 static inline void trace_buffer_unlock_commit(struct trace_array
*tr
,
1143 struct ring_buffer
*buffer
,
1144 struct ring_buffer_event
*event
,
1145 unsigned long flags
, int pc
)
1147 trace_buffer_unlock_commit_regs(tr
, buffer
, event
, flags
, pc
, NULL
);
1150 DECLARE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
1151 DECLARE_PER_CPU(int, trace_buffered_event_cnt
);
1152 void trace_buffered_event_disable(void);
1153 void trace_buffered_event_enable(void);
1156 __trace_event_discard_commit(struct ring_buffer
*buffer
,
1157 struct ring_buffer_event
*event
)
1159 if (this_cpu_read(trace_buffered_event
) == event
) {
1160 /* Simply release the temp buffer */
1161 this_cpu_dec(trace_buffered_event_cnt
);
1164 ring_buffer_discard_commit(buffer
, event
);
1168 * Helper function for event_trigger_unlock_commit{_regs}().
1169 * If there are event triggers attached to this event that requires
1170 * filtering against its fields, then they wil be called as the
1171 * entry already holds the field information of the current event.
1173 * It also checks if the event should be discarded or not.
1174 * It is to be discarded if the event is soft disabled and the
1175 * event was only recorded to process triggers, or if the event
1176 * filter is active and this event did not match the filters.
1178 * Returns true if the event is discarded, false otherwise.
1181 __event_trigger_test_discard(struct trace_event_file
*file
,
1182 struct ring_buffer
*buffer
,
1183 struct ring_buffer_event
*event
,
1185 enum event_trigger_type
*tt
)
1187 unsigned long eflags
= file
->flags
;
1189 if (eflags
& EVENT_FILE_FL_TRIGGER_COND
)
1190 *tt
= event_triggers_call(file
, entry
);
1192 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &file
->flags
) ||
1193 (unlikely(file
->flags
& EVENT_FILE_FL_FILTERED
) &&
1194 !filter_match_preds(file
->filter
, entry
))) {
1195 __trace_event_discard_commit(buffer
, event
);
1203 * event_trigger_unlock_commit - handle triggers and finish event commit
1204 * @file: The file pointer assoctiated to the event
1205 * @buffer: The ring buffer that the event is being written to
1206 * @event: The event meta data in the ring buffer
1207 * @entry: The event itself
1208 * @irq_flags: The state of the interrupts at the start of the event
1209 * @pc: The state of the preempt count at the start of the event.
1211 * This is a helper function to handle triggers that require data
1212 * from the event itself. It also tests the event against filters and
1213 * if the event is soft disabled and should be discarded.
1216 event_trigger_unlock_commit(struct trace_event_file
*file
,
1217 struct ring_buffer
*buffer
,
1218 struct ring_buffer_event
*event
,
1219 void *entry
, unsigned long irq_flags
, int pc
)
1221 enum event_trigger_type tt
= ETT_NONE
;
1223 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
1224 trace_buffer_unlock_commit(file
->tr
, buffer
, event
, irq_flags
, pc
);
1227 event_triggers_post_call(file
, tt
, entry
);
1231 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1232 * @file: The file pointer assoctiated to the event
1233 * @buffer: The ring buffer that the event is being written to
1234 * @event: The event meta data in the ring buffer
1235 * @entry: The event itself
1236 * @irq_flags: The state of the interrupts at the start of the event
1237 * @pc: The state of the preempt count at the start of the event.
1239 * This is a helper function to handle triggers that require data
1240 * from the event itself. It also tests the event against filters and
1241 * if the event is soft disabled and should be discarded.
1243 * Same as event_trigger_unlock_commit() but calls
1244 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1247 event_trigger_unlock_commit_regs(struct trace_event_file
*file
,
1248 struct ring_buffer
*buffer
,
1249 struct ring_buffer_event
*event
,
1250 void *entry
, unsigned long irq_flags
, int pc
,
1251 struct pt_regs
*regs
)
1253 enum event_trigger_type tt
= ETT_NONE
;
1255 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
1256 trace_buffer_unlock_commit_regs(file
->tr
, buffer
, event
,
1257 irq_flags
, pc
, regs
);
1260 event_triggers_post_call(file
, tt
, entry
);
1263 #define FILTER_PRED_INVALID ((unsigned short)-1)
1264 #define FILTER_PRED_IS_RIGHT (1 << 15)
1265 #define FILTER_PRED_FOLD (1 << 15)
1268 * The max preds is the size of unsigned short with
1269 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1270 * and FOLD flags. The other is reserved.
1272 * 2^14 preds is way more than enough.
1274 #define MAX_FILTER_PRED 16384
1279 typedef int (*filter_pred_fn_t
) (struct filter_pred
*pred
, void *event
);
1281 typedef int (*regex_match_func
)(char *str
, struct regex
*r
, int len
);
1292 char pattern
[MAX_FILTER_STR_VAL
];
1295 regex_match_func match
;
1298 struct filter_pred
{
1299 filter_pred_fn_t fn
;
1302 unsigned short *ops
;
1303 struct ftrace_event_field
*field
;
1307 unsigned short index
;
1308 unsigned short parent
;
1309 unsigned short left
;
1310 unsigned short right
;
1313 static inline bool is_string_field(struct ftrace_event_field
*field
)
1315 return field
->filter_type
== FILTER_DYN_STRING
||
1316 field
->filter_type
== FILTER_STATIC_STRING
||
1317 field
->filter_type
== FILTER_PTR_STRING
||
1318 field
->filter_type
== FILTER_COMM
;
1321 static inline bool is_function_field(struct ftrace_event_field
*field
)
1323 return field
->filter_type
== FILTER_TRACE_FN
;
1326 extern enum regex_type
1327 filter_parse_regex(char *buff
, int len
, char **search
, int *not);
1328 extern void print_event_filter(struct trace_event_file
*file
,
1329 struct trace_seq
*s
);
1330 extern int apply_event_filter(struct trace_event_file
*file
,
1331 char *filter_string
);
1332 extern int apply_subsystem_event_filter(struct trace_subsystem_dir
*dir
,
1333 char *filter_string
);
1334 extern void print_subsystem_event_filter(struct event_subsystem
*system
,
1335 struct trace_seq
*s
);
1336 extern int filter_assign_type(const char *type
);
1337 extern int create_event_filter(struct trace_event_call
*call
,
1338 char *filter_str
, bool set_str
,
1339 struct event_filter
**filterp
);
1340 extern void free_event_filter(struct event_filter
*filter
);
1342 struct ftrace_event_field
*
1343 trace_find_event_field(struct trace_event_call
*call
, char *name
);
1345 extern void trace_event_enable_cmd_record(bool enable
);
1346 extern int event_trace_add_tracer(struct dentry
*parent
, struct trace_array
*tr
);
1347 extern int event_trace_del_tracer(struct trace_array
*tr
);
1349 extern struct trace_event_file
*find_event_file(struct trace_array
*tr
,
1353 static inline void *event_file_data(struct file
*filp
)
1355 return ACCESS_ONCE(file_inode(filp
)->i_private
);
1358 extern struct mutex event_mutex
;
1359 extern struct list_head ftrace_events
;
1361 extern const struct file_operations event_trigger_fops
;
1362 extern const struct file_operations event_hist_fops
;
1364 #ifdef CONFIG_HIST_TRIGGERS
1365 extern int register_trigger_hist_cmd(void);
1366 extern int register_trigger_hist_enable_disable_cmds(void);
1368 static inline int register_trigger_hist_cmd(void) { return 0; }
1369 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1372 extern int register_trigger_cmds(void);
1373 extern void clear_event_triggers(struct trace_array
*tr
);
1375 struct event_trigger_data
{
1376 unsigned long count
;
1378 struct event_trigger_ops
*ops
;
1379 struct event_command
*cmd_ops
;
1380 struct event_filter __rcu
*filter
;
1385 struct list_head list
;
1387 struct list_head named_list
;
1388 struct event_trigger_data
*named_data
;
1392 #define ENABLE_EVENT_STR "enable_event"
1393 #define DISABLE_EVENT_STR "disable_event"
1394 #define ENABLE_HIST_STR "enable_hist"
1395 #define DISABLE_HIST_STR "disable_hist"
1397 struct enable_trigger_data
{
1398 struct trace_event_file
*file
;
1403 extern int event_enable_trigger_print(struct seq_file
*m
,
1404 struct event_trigger_ops
*ops
,
1405 struct event_trigger_data
*data
);
1406 extern void event_enable_trigger_free(struct event_trigger_ops
*ops
,
1407 struct event_trigger_data
*data
);
1408 extern int event_enable_trigger_func(struct event_command
*cmd_ops
,
1409 struct trace_event_file
*file
,
1410 char *glob
, char *cmd
, char *param
);
1411 extern int event_enable_register_trigger(char *glob
,
1412 struct event_trigger_ops
*ops
,
1413 struct event_trigger_data
*data
,
1414 struct trace_event_file
*file
);
1415 extern void event_enable_unregister_trigger(char *glob
,
1416 struct event_trigger_ops
*ops
,
1417 struct event_trigger_data
*test
,
1418 struct trace_event_file
*file
);
1419 extern void trigger_data_free(struct event_trigger_data
*data
);
1420 extern int event_trigger_init(struct event_trigger_ops
*ops
,
1421 struct event_trigger_data
*data
);
1422 extern int trace_event_trigger_enable_disable(struct trace_event_file
*file
,
1423 int trigger_enable
);
1424 extern void update_cond_flag(struct trace_event_file
*file
);
1425 extern void unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
1426 struct event_trigger_data
*test
,
1427 struct trace_event_file
*file
);
1428 extern int set_trigger_filter(char *filter_str
,
1429 struct event_trigger_data
*trigger_data
,
1430 struct trace_event_file
*file
);
1431 extern struct event_trigger_data
*find_named_trigger(const char *name
);
1432 extern bool is_named_trigger(struct event_trigger_data
*test
);
1433 extern int save_named_trigger(const char *name
,
1434 struct event_trigger_data
*data
);
1435 extern void del_named_trigger(struct event_trigger_data
*data
);
1436 extern void pause_named_trigger(struct event_trigger_data
*data
);
1437 extern void unpause_named_trigger(struct event_trigger_data
*data
);
1438 extern void set_named_trigger_data(struct event_trigger_data
*data
,
1439 struct event_trigger_data
*named_data
);
1440 extern int register_event_command(struct event_command
*cmd
);
1441 extern int unregister_event_command(struct event_command
*cmd
);
1442 extern int register_trigger_hist_enable_disable_cmds(void);
1445 * struct event_trigger_ops - callbacks for trace event triggers
1447 * The methods in this structure provide per-event trigger hooks for
1448 * various trigger operations.
1450 * All the methods below, except for @init() and @free(), must be
1453 * @func: The trigger 'probe' function called when the triggering
1454 * event occurs. The data passed into this callback is the data
1455 * that was supplied to the event_command @reg() function that
1456 * registered the trigger (see struct event_command) along with
1457 * the trace record, rec.
1459 * @init: An optional initialization function called for the trigger
1460 * when the trigger is registered (via the event_command reg()
1461 * function). This can be used to perform per-trigger
1462 * initialization such as incrementing a per-trigger reference
1463 * count, for instance. This is usually implemented by the
1464 * generic utility function @event_trigger_init() (see
1465 * trace_event_triggers.c).
1467 * @free: An optional de-initialization function called for the
1468 * trigger when the trigger is unregistered (via the
1469 * event_command @reg() function). This can be used to perform
1470 * per-trigger de-initialization such as decrementing a
1471 * per-trigger reference count and freeing corresponding trigger
1472 * data, for instance. This is usually implemented by the
1473 * generic utility function @event_trigger_free() (see
1474 * trace_event_triggers.c).
1476 * @print: The callback function invoked to have the trigger print
1477 * itself. This is usually implemented by a wrapper function
1478 * that calls the generic utility function @event_trigger_print()
1479 * (see trace_event_triggers.c).
1481 struct event_trigger_ops
{
1482 void (*func
)(struct event_trigger_data
*data
,
1484 int (*init
)(struct event_trigger_ops
*ops
,
1485 struct event_trigger_data
*data
);
1486 void (*free
)(struct event_trigger_ops
*ops
,
1487 struct event_trigger_data
*data
);
1488 int (*print
)(struct seq_file
*m
,
1489 struct event_trigger_ops
*ops
,
1490 struct event_trigger_data
*data
);
1494 * struct event_command - callbacks and data members for event commands
1496 * Event commands are invoked by users by writing the command name
1497 * into the 'trigger' file associated with a trace event. The
1498 * parameters associated with a specific invocation of an event
1499 * command are used to create an event trigger instance, which is
1500 * added to the list of trigger instances associated with that trace
1501 * event. When the event is hit, the set of triggers associated with
1502 * that event is invoked.
1504 * The data members in this structure provide per-event command data
1505 * for various event commands.
1507 * All the data members below, except for @post_trigger, must be set
1508 * for each event command.
1510 * @name: The unique name that identifies the event command. This is
1511 * the name used when setting triggers via trigger files.
1513 * @trigger_type: A unique id that identifies the event command
1514 * 'type'. This value has two purposes, the first to ensure that
1515 * only one trigger of the same type can be set at a given time
1516 * for a particular event e.g. it doesn't make sense to have both
1517 * a traceon and traceoff trigger attached to a single event at
1518 * the same time, so traceon and traceoff have the same type
1519 * though they have different names. The @trigger_type value is
1520 * also used as a bit value for deferring the actual trigger
1521 * action until after the current event is finished. Some
1522 * commands need to do this if they themselves log to the trace
1523 * buffer (see the @post_trigger() member below). @trigger_type
1524 * values are defined by adding new values to the trigger_type
1525 * enum in include/linux/trace_events.h.
1527 * @flags: See the enum event_command_flags below.
1529 * All the methods below, except for @set_filter() and @unreg_all(),
1530 * must be implemented.
1532 * @func: The callback function responsible for parsing and
1533 * registering the trigger written to the 'trigger' file by the
1534 * user. It allocates the trigger instance and registers it with
1535 * the appropriate trace event. It makes use of the other
1536 * event_command callback functions to orchestrate this, and is
1537 * usually implemented by the generic utility function
1538 * @event_trigger_callback() (see trace_event_triggers.c).
1540 * @reg: Adds the trigger to the list of triggers associated with the
1541 * event, and enables the event trigger itself, after
1542 * initializing it (via the event_trigger_ops @init() function).
1543 * This is also where commands can use the @trigger_type value to
1544 * make the decision as to whether or not multiple instances of
1545 * the trigger should be allowed. This is usually implemented by
1546 * the generic utility function @register_trigger() (see
1547 * trace_event_triggers.c).
1549 * @unreg: Removes the trigger from the list of triggers associated
1550 * with the event, and disables the event trigger itself, after
1551 * initializing it (via the event_trigger_ops @free() function).
1552 * This is usually implemented by the generic utility function
1553 * @unregister_trigger() (see trace_event_triggers.c).
1555 * @unreg_all: An optional function called to remove all the triggers
1556 * from the list of triggers associated with the event. Called
1557 * when a trigger file is opened in truncate mode.
1559 * @set_filter: An optional function called to parse and set a filter
1560 * for the trigger. If no @set_filter() method is set for the
1561 * event command, filters set by the user for the command will be
1562 * ignored. This is usually implemented by the generic utility
1563 * function @set_trigger_filter() (see trace_event_triggers.c).
1565 * @get_trigger_ops: The callback function invoked to retrieve the
1566 * event_trigger_ops implementation associated with the command.
1568 struct event_command
{
1569 struct list_head list
;
1571 enum event_trigger_type trigger_type
;
1573 int (*func
)(struct event_command
*cmd_ops
,
1574 struct trace_event_file
*file
,
1575 char *glob
, char *cmd
, char *params
);
1576 int (*reg
)(char *glob
,
1577 struct event_trigger_ops
*ops
,
1578 struct event_trigger_data
*data
,
1579 struct trace_event_file
*file
);
1580 void (*unreg
)(char *glob
,
1581 struct event_trigger_ops
*ops
,
1582 struct event_trigger_data
*data
,
1583 struct trace_event_file
*file
);
1584 void (*unreg_all
)(struct trace_event_file
*file
);
1585 int (*set_filter
)(char *filter_str
,
1586 struct event_trigger_data
*data
,
1587 struct trace_event_file
*file
);
1588 struct event_trigger_ops
*(*get_trigger_ops
)(char *cmd
, char *param
);
1592 * enum event_command_flags - flags for struct event_command
1594 * @POST_TRIGGER: A flag that says whether or not this command needs
1595 * to have its action delayed until after the current event has
1596 * been closed. Some triggers need to avoid being invoked while
1597 * an event is currently in the process of being logged, since
1598 * the trigger may itself log data into the trace buffer. Thus
1599 * we make sure the current event is committed before invoking
1600 * those triggers. To do that, the trigger invocation is split
1601 * in two - the first part checks the filter using the current
1602 * trace record; if a command has the @post_trigger flag set, it
1603 * sets a bit for itself in the return value, otherwise it
1604 * directly invokes the trigger. Once all commands have been
1605 * either invoked or set their return flag, the current record is
1606 * either committed or discarded. At that point, if any commands
1607 * have deferred their triggers, those commands are finally
1608 * invoked following the close of the current event. In other
1609 * words, if the event_trigger_ops @func() probe implementation
1610 * itself logs to the trace buffer, this flag should be set,
1611 * otherwise it can be left unspecified.
1613 * @NEEDS_REC: A flag that says whether or not this command needs
1614 * access to the trace record in order to perform its function,
1615 * regardless of whether or not it has a filter associated with
1616 * it (filters make a trigger require access to the trace record
1617 * but are not always present).
1619 enum event_command_flags
{
1620 EVENT_CMD_FL_POST_TRIGGER
= 1,
1621 EVENT_CMD_FL_NEEDS_REC
= 2,
1624 static inline bool event_command_post_trigger(struct event_command
*cmd_ops
)
1626 return cmd_ops
->flags
& EVENT_CMD_FL_POST_TRIGGER
;
1629 static inline bool event_command_needs_rec(struct event_command
*cmd_ops
)
1631 return cmd_ops
->flags
& EVENT_CMD_FL_NEEDS_REC
;
1634 extern int trace_event_enable_disable(struct trace_event_file
*file
,
1635 int enable
, int soft_disable
);
1636 extern int tracing_alloc_snapshot(void);
1638 extern const char *__start___trace_bprintk_fmt
[];
1639 extern const char *__stop___trace_bprintk_fmt
[];
1641 extern const char *__start___tracepoint_str
[];
1642 extern const char *__stop___tracepoint_str
[];
1644 void trace_printk_control(bool enabled
);
1645 void trace_printk_init_buffers(void);
1646 void trace_printk_start_comm(void);
1647 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
);
1648 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
);
1651 * Normal trace_printk() and friends allocates special buffers
1652 * to do the manipulation, as well as saves the print formats
1653 * into sections to display. But the trace infrastructure wants
1654 * to use these without the added overhead at the price of being
1655 * a bit slower (used mainly for warnings, where we don't care
1656 * about performance). The internal_trace_puts() is for such
1659 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1662 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1663 extern struct trace_event_call \
1664 __aligned(4) event_##call;
1665 #undef FTRACE_ENTRY_DUP
1666 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1667 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1669 #undef FTRACE_ENTRY_PACKED
1670 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1671 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1674 #include "trace_entries.h"
1676 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1677 int perf_ftrace_event_register(struct trace_event_call
*call
,
1678 enum trace_reg type
, void *data
);
1680 #define perf_ftrace_event_register NULL
1683 #ifdef CONFIG_FTRACE_SYSCALLS
1684 void init_ftrace_syscalls(void);
1685 const char *get_syscall_name(int syscall
);
1687 static inline void init_ftrace_syscalls(void) { }
1688 static inline const char *get_syscall_name(int syscall
)
1694 #ifdef CONFIG_EVENT_TRACING
1695 void trace_event_init(void);
1696 void trace_event_enum_update(struct trace_enum_map
**map
, int len
);
1698 static inline void __init
trace_event_init(void) { }
1699 static inline void trace_event_enum_update(struct trace_enum_map
**map
, int len
) { }
1702 extern struct trace_iterator
*tracepoint_print_iter
;
1704 #endif /* _LINUX_KERNEL_TRACE_H */