1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
20 /* Used for event string fields when they are NULL */
21 #define EVENT_NULL_STR "(null)"
23 const char *trace_print_flags_seq(struct trace_seq
*p
, const char *delim
,
25 const struct trace_print_flags
*flag_array
);
27 const char *trace_print_symbols_seq(struct trace_seq
*p
, unsigned long val
,
28 const struct trace_print_flags
*symbol_array
);
30 #if BITS_PER_LONG == 32
31 const char *trace_print_flags_seq_u64(struct trace_seq
*p
, const char *delim
,
32 unsigned long long flags
,
33 const struct trace_print_flags_u64
*flag_array
);
35 const char *trace_print_symbols_seq_u64(struct trace_seq
*p
,
36 unsigned long long val
,
37 const struct trace_print_flags_u64
41 const char *trace_print_bitmask_seq(struct trace_seq
*p
, void *bitmask_ptr
,
42 unsigned int bitmask_size
);
44 const char *trace_print_hex_seq(struct trace_seq
*p
,
45 const unsigned char *buf
, int len
,
48 const char *trace_print_array_seq(struct trace_seq
*p
,
49 const void *buf
, int count
,
53 trace_print_hex_dump_seq(struct trace_seq
*p
, const char *prefix_str
,
54 int prefix_type
, int rowsize
, int groupsize
,
55 const void *buf
, size_t len
, bool ascii
);
57 struct trace_iterator
;
60 int trace_raw_output_prep(struct trace_iterator
*iter
,
61 struct trace_event
*event
);
63 void trace_event_printf(struct trace_iterator
*iter
, const char *fmt
, ...);
65 /* Used to find the offset and length of dynamic fields in trace events */
66 struct trace_dynamic_info
{
67 #ifdef CONFIG_CPU_BIG_ENDIAN
77 * The trace entry - the most basic unit of tracing. This is what
78 * is printed in the end as a single line in the trace output, such as:
80 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
85 unsigned char preempt_count
;
89 #define TRACE_EVENT_TYPE_MAX \
90 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
93 * Trace iterator - used by printout routines who present trace
94 * results to users and which routines might sleep, etc:
96 struct trace_iterator
{
97 struct trace_array
*tr
;
99 struct array_buffer
*array_buffer
;
103 struct ring_buffer_iter
**buffer_iter
;
104 unsigned long iter_flags
;
105 void *temp
; /* temp holder */
106 unsigned int temp_size
;
107 char *fmt
; /* modified format holder */
108 unsigned int fmt_size
;
111 /* trace_seq for __print_flags() and __print_symbolic() etc. */
112 struct trace_seq tmp_seq
;
114 cpumask_var_t started
;
116 /* Set when the file is closed to prevent new waiters */
119 /* it's true when current open file is snapshot */
122 /* The below is zeroed out in pipe_read */
123 struct trace_seq seq
;
124 struct trace_entry
*ent
;
125 unsigned long lost_events
;
134 /* All new field here will be zeroed out in pipe_read */
137 enum trace_iter_flags
{
138 TRACE_FILE_LAT_FMT
= 1,
139 TRACE_FILE_ANNOTATE
= 2,
140 TRACE_FILE_TIME_IN_NS
= 4,
144 typedef enum print_line_t (*trace_print_func
)(struct trace_iterator
*iter
,
145 int flags
, struct trace_event
*event
);
147 struct trace_event_functions
{
148 trace_print_func trace
;
149 trace_print_func raw
;
150 trace_print_func hex
;
151 trace_print_func binary
;
155 struct hlist_node node
;
157 struct trace_event_functions
*funcs
;
160 extern int register_trace_event(struct trace_event
*event
);
161 extern int unregister_trace_event(struct trace_event
*event
);
163 /* Return values for print_line callback */
165 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
166 TRACE_TYPE_HANDLED
= 1,
167 TRACE_TYPE_UNHANDLED
= 2, /* Relay to other output functions */
168 TRACE_TYPE_NO_CONSUME
= 3 /* Handled but ask to not consume */
171 enum print_line_t
trace_handle_return(struct trace_seq
*s
);
173 static inline void tracing_generic_entry_update(struct trace_entry
*entry
,
175 unsigned int trace_ctx
)
177 entry
->preempt_count
= trace_ctx
& 0xff;
178 entry
->pid
= current
->pid
;
180 entry
->flags
= trace_ctx
>> 16;
183 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status
);
185 enum trace_flag_type
{
186 TRACE_FLAG_IRQS_OFF
= 0x01,
187 TRACE_FLAG_NEED_RESCHED_LAZY
= 0x02,
188 TRACE_FLAG_NEED_RESCHED
= 0x04,
189 TRACE_FLAG_HARDIRQ
= 0x08,
190 TRACE_FLAG_SOFTIRQ
= 0x10,
191 TRACE_FLAG_PREEMPT_RESCHED
= 0x20,
192 TRACE_FLAG_NMI
= 0x40,
193 TRACE_FLAG_BH_OFF
= 0x80,
196 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags
)
198 unsigned int irq_status
= irqs_disabled_flags(irqflags
) ?
199 TRACE_FLAG_IRQS_OFF
: 0;
200 return tracing_gen_ctx_irq_test(irq_status
);
202 static inline unsigned int tracing_gen_ctx(void)
204 unsigned long irqflags
;
206 local_save_flags(irqflags
);
207 return tracing_gen_ctx_flags(irqflags
);
210 static inline unsigned int tracing_gen_ctx_dec(void)
212 unsigned int trace_ctx
;
214 trace_ctx
= tracing_gen_ctx();
216 * Subtract one from the preemption counter if preemption is enabled,
217 * see trace_event_buffer_reserve()for details.
219 if (IS_ENABLED(CONFIG_PREEMPTION
))
224 struct trace_event_file
;
226 struct ring_buffer_event
*
227 trace_event_buffer_lock_reserve(struct trace_buffer
**current_buffer
,
228 struct trace_event_file
*trace_file
,
229 int type
, unsigned long len
,
230 unsigned int trace_ctx
);
232 #define TRACE_RECORD_CMDLINE BIT(0)
233 #define TRACE_RECORD_TGID BIT(1)
235 void tracing_record_taskinfo(struct task_struct
*task
, int flags
);
236 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
237 struct task_struct
*next
, int flags
);
239 void tracing_record_cmdline(struct task_struct
*task
);
240 void tracing_record_tgid(struct task_struct
*task
);
242 int trace_output_call(struct trace_iterator
*iter
, char *name
, char *fmt
, ...)
249 TRACE_REG_UNREGISTER
,
250 #ifdef CONFIG_PERF_EVENTS
251 TRACE_REG_PERF_REGISTER
,
252 TRACE_REG_PERF_UNREGISTER
,
254 TRACE_REG_PERF_CLOSE
,
256 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
257 * custom action was taken and the default action is not to be
265 struct trace_event_call
;
267 #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
269 struct trace_event_fields
{
277 const int filter_type
;
280 int (*define_fields
)(struct trace_event_call
*);
284 struct trace_event_class
{
287 #ifdef CONFIG_PERF_EVENTS
290 int (*reg
)(struct trace_event_call
*event
,
291 enum trace_reg type
, void *data
);
292 struct trace_event_fields
*fields_array
;
293 struct list_head
*(*get_fields
)(struct trace_event_call
*);
294 struct list_head fields
;
295 int (*raw_init
)(struct trace_event_call
*);
298 extern int trace_event_reg(struct trace_event_call
*event
,
299 enum trace_reg type
, void *data
);
301 struct trace_event_buffer
{
302 struct trace_buffer
*buffer
;
303 struct ring_buffer_event
*event
;
304 struct trace_event_file
*trace_file
;
306 unsigned int trace_ctx
;
307 struct pt_regs
*regs
;
310 void *trace_event_buffer_reserve(struct trace_event_buffer
*fbuffer
,
311 struct trace_event_file
*trace_file
,
314 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
);
317 TRACE_EVENT_FL_CAP_ANY_BIT
,
318 TRACE_EVENT_FL_NO_SET_FILTER_BIT
,
319 TRACE_EVENT_FL_IGNORE_ENABLE_BIT
,
320 TRACE_EVENT_FL_TRACEPOINT_BIT
,
321 TRACE_EVENT_FL_DYNAMIC_BIT
,
322 TRACE_EVENT_FL_KPROBE_BIT
,
323 TRACE_EVENT_FL_UPROBE_BIT
,
324 TRACE_EVENT_FL_EPROBE_BIT
,
325 TRACE_EVENT_FL_FPROBE_BIT
,
326 TRACE_EVENT_FL_CUSTOM_BIT
,
331 * CAP_ANY - Any user can enable for perf
332 * NO_SET_FILTER - Set when filter has error and is to be ignored
333 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
334 * TRACEPOINT - Event is a tracepoint
335 * DYNAMIC - Event is a dynamic event (created at run time)
336 * KPROBE - Event is a kprobe
337 * UPROBE - Event is a uprobe
338 * EPROBE - Event is an event probe
339 * FPROBE - Event is an function probe
340 * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
341 * This is set when the custom event has not been attached
342 * to a tracepoint yet, then it is cleared when it is.
345 TRACE_EVENT_FL_CAP_ANY
= (1 << TRACE_EVENT_FL_CAP_ANY_BIT
),
346 TRACE_EVENT_FL_NO_SET_FILTER
= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT
),
347 TRACE_EVENT_FL_IGNORE_ENABLE
= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT
),
348 TRACE_EVENT_FL_TRACEPOINT
= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT
),
349 TRACE_EVENT_FL_DYNAMIC
= (1 << TRACE_EVENT_FL_DYNAMIC_BIT
),
350 TRACE_EVENT_FL_KPROBE
= (1 << TRACE_EVENT_FL_KPROBE_BIT
),
351 TRACE_EVENT_FL_UPROBE
= (1 << TRACE_EVENT_FL_UPROBE_BIT
),
352 TRACE_EVENT_FL_EPROBE
= (1 << TRACE_EVENT_FL_EPROBE_BIT
),
353 TRACE_EVENT_FL_FPROBE
= (1 << TRACE_EVENT_FL_FPROBE_BIT
),
354 TRACE_EVENT_FL_CUSTOM
= (1 << TRACE_EVENT_FL_CUSTOM_BIT
),
357 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
359 struct trace_event_call
{
360 struct list_head list
;
361 struct trace_event_class
*class;
364 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
365 struct tracepoint
*tp
;
367 struct trace_event event
;
370 * Static events can disappear with modules,
371 * where as dynamic ones need their own ref count.
379 /* See the TRACE_EVENT_FL_* flags above */
380 int flags
; /* static flags of different events */
382 #ifdef CONFIG_PERF_EVENTS
384 struct hlist_head __percpu
*perf_events
;
385 struct bpf_prog_array __rcu
*prog_array
;
387 int (*perf_perm
)(struct trace_event_call
*,
388 struct perf_event
*);
392 #ifdef CONFIG_DYNAMIC_EVENTS
393 bool trace_event_dyn_try_get_ref(struct trace_event_call
*call
);
394 void trace_event_dyn_put_ref(struct trace_event_call
*call
);
395 bool trace_event_dyn_busy(struct trace_event_call
*call
);
397 static inline bool trace_event_dyn_try_get_ref(struct trace_event_call
*call
)
399 /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
402 static inline void trace_event_dyn_put_ref(struct trace_event_call
*call
)
405 static inline bool trace_event_dyn_busy(struct trace_event_call
*call
)
407 /* Nothing should call this without DYNAIMIC_EVENTS configured. */
412 static inline bool trace_event_try_get_ref(struct trace_event_call
*call
)
414 if (call
->flags
& TRACE_EVENT_FL_DYNAMIC
)
415 return trace_event_dyn_try_get_ref(call
);
417 return try_module_get(call
->module
);
420 static inline void trace_event_put_ref(struct trace_event_call
*call
)
422 if (call
->flags
& TRACE_EVENT_FL_DYNAMIC
)
423 trace_event_dyn_put_ref(call
);
425 module_put(call
->module
);
428 #ifdef CONFIG_PERF_EVENTS
429 static inline bool bpf_prog_array_valid(struct trace_event_call
*call
)
432 * This inline function checks whether call->prog_array
433 * is valid or not. The function is called in various places,
434 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
436 * If this function returns true, and later call->prog_array
437 * becomes false inside rcu_read_lock/unlock region,
438 * we bail out then. If this function return false,
439 * there is a risk that we might miss a few events if the checking
440 * were delayed until inside rcu_read_lock/unlock region and
441 * call->prog_array happened to become non-NULL then.
443 * Here, READ_ONCE() is used instead of rcu_access_pointer().
444 * rcu_access_pointer() requires the actual definition of
445 * "struct bpf_prog_array" while READ_ONCE() only needs
446 * a declaration of the same type.
448 return !!READ_ONCE(call
->prog_array
);
452 static inline const char *
453 trace_event_name(struct trace_event_call
*call
)
455 if (call
->flags
& TRACE_EVENT_FL_CUSTOM
)
457 else if (call
->flags
& TRACE_EVENT_FL_TRACEPOINT
)
458 return call
->tp
? call
->tp
->name
: NULL
;
463 static inline struct list_head
*
464 trace_get_fields(struct trace_event_call
*event_call
)
466 if (!event_call
->class->get_fields
)
467 return &event_call
->class->fields
;
468 return event_call
->class->get_fields(event_call
);
471 struct trace_subsystem_dir
;
474 EVENT_FILE_FL_ENABLED_BIT
,
475 EVENT_FILE_FL_RECORDED_CMD_BIT
,
476 EVENT_FILE_FL_RECORDED_TGID_BIT
,
477 EVENT_FILE_FL_FILTERED_BIT
,
478 EVENT_FILE_FL_NO_SET_FILTER_BIT
,
479 EVENT_FILE_FL_SOFT_MODE_BIT
,
480 EVENT_FILE_FL_SOFT_DISABLED_BIT
,
481 EVENT_FILE_FL_TRIGGER_MODE_BIT
,
482 EVENT_FILE_FL_TRIGGER_COND_BIT
,
483 EVENT_FILE_FL_PID_FILTER_BIT
,
484 EVENT_FILE_FL_WAS_ENABLED_BIT
,
485 EVENT_FILE_FL_FREED_BIT
,
488 extern struct trace_event_file
*trace_get_event_file(const char *instance
,
491 extern void trace_put_event_file(struct trace_event_file
*file
);
493 #define MAX_DYNEVENT_CMD_LEN (2048)
496 DYNEVENT_TYPE_SYNTH
= 1,
497 DYNEVENT_TYPE_KPROBE
,
503 typedef int (*dynevent_create_fn_t
)(struct dynevent_cmd
*cmd
);
505 struct dynevent_cmd
{
507 const char *event_name
;
508 unsigned int n_fields
;
509 enum dynevent_type type
;
510 dynevent_create_fn_t run_command
;
514 extern int dynevent_create(struct dynevent_cmd
*cmd
);
516 extern int synth_event_delete(const char *name
);
518 extern void synth_event_cmd_init(struct dynevent_cmd
*cmd
,
519 char *buf
, int maxlen
);
521 extern int __synth_event_gen_cmd_start(struct dynevent_cmd
*cmd
,
523 struct module
*mod
, ...);
525 #define synth_event_gen_cmd_start(cmd, name, mod, ...) \
526 __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
528 struct synth_field_desc
{
533 extern int synth_event_gen_cmd_array_start(struct dynevent_cmd
*cmd
,
536 struct synth_field_desc
*fields
,
537 unsigned int n_fields
);
538 extern int synth_event_create(const char *name
,
539 struct synth_field_desc
*fields
,
540 unsigned int n_fields
, struct module
*mod
);
542 extern int synth_event_add_field(struct dynevent_cmd
*cmd
,
545 extern int synth_event_add_field_str(struct dynevent_cmd
*cmd
,
546 const char *type_name
);
547 extern int synth_event_add_fields(struct dynevent_cmd
*cmd
,
548 struct synth_field_desc
*fields
,
549 unsigned int n_fields
);
551 #define synth_event_gen_cmd_end(cmd) \
556 struct synth_event_trace_state
{
557 struct trace_event_buffer fbuffer
;
558 struct synth_trace_event
*entry
;
559 struct trace_buffer
*buffer
;
560 struct synth_event
*event
;
561 unsigned int cur_field
;
568 extern int synth_event_trace(struct trace_event_file
*file
,
569 unsigned int n_vals
, ...);
570 extern int synth_event_trace_array(struct trace_event_file
*file
, u64
*vals
,
571 unsigned int n_vals
);
572 extern int synth_event_trace_start(struct trace_event_file
*file
,
573 struct synth_event_trace_state
*trace_state
);
574 extern int synth_event_add_next_val(u64 val
,
575 struct synth_event_trace_state
*trace_state
);
576 extern int synth_event_add_val(const char *field_name
, u64 val
,
577 struct synth_event_trace_state
*trace_state
);
578 extern int synth_event_trace_end(struct synth_event_trace_state
*trace_state
);
580 extern int kprobe_event_delete(const char *name
);
582 extern void kprobe_event_cmd_init(struct dynevent_cmd
*cmd
,
583 char *buf
, int maxlen
);
585 #define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
586 __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
588 #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
589 __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
591 extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd
*cmd
,
594 const char *loc
, ...);
596 #define kprobe_event_add_fields(cmd, ...) \
597 __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
599 #define kprobe_event_add_field(cmd, field) \
600 __kprobe_event_add_fields(cmd, field, NULL)
602 extern int __kprobe_event_add_fields(struct dynevent_cmd
*cmd
, ...);
604 #define kprobe_event_gen_cmd_end(cmd) \
607 #define kretprobe_event_gen_cmd_end(cmd) \
612 * ENABLED - The event is enabled
613 * RECORDED_CMD - The comms should be recorded at sched_switch
614 * RECORDED_TGID - The tgids should be recorded at sched_switch
615 * FILTERED - The event has a filter attached
616 * NO_SET_FILTER - Set when filter has error and is to be ignored
617 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
618 * SOFT_DISABLED - When set, do not trace the event (even though its
619 * tracepoint may be enabled)
620 * TRIGGER_MODE - When set, invoke the triggers associated with the event
621 * TRIGGER_COND - When set, one or more triggers has an associated filter
622 * PID_FILTER - When set, the event is filtered based on pid
623 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
624 * FREED - File descriptor is freed, all fields should be considered invalid
627 EVENT_FILE_FL_ENABLED
= (1 << EVENT_FILE_FL_ENABLED_BIT
),
628 EVENT_FILE_FL_RECORDED_CMD
= (1 << EVENT_FILE_FL_RECORDED_CMD_BIT
),
629 EVENT_FILE_FL_RECORDED_TGID
= (1 << EVENT_FILE_FL_RECORDED_TGID_BIT
),
630 EVENT_FILE_FL_FILTERED
= (1 << EVENT_FILE_FL_FILTERED_BIT
),
631 EVENT_FILE_FL_NO_SET_FILTER
= (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT
),
632 EVENT_FILE_FL_SOFT_MODE
= (1 << EVENT_FILE_FL_SOFT_MODE_BIT
),
633 EVENT_FILE_FL_SOFT_DISABLED
= (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT
),
634 EVENT_FILE_FL_TRIGGER_MODE
= (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT
),
635 EVENT_FILE_FL_TRIGGER_COND
= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT
),
636 EVENT_FILE_FL_PID_FILTER
= (1 << EVENT_FILE_FL_PID_FILTER_BIT
),
637 EVENT_FILE_FL_WAS_ENABLED
= (1 << EVENT_FILE_FL_WAS_ENABLED_BIT
),
638 EVENT_FILE_FL_FREED
= (1 << EVENT_FILE_FL_FREED_BIT
),
641 struct trace_event_file
{
642 struct list_head list
;
643 struct trace_event_call
*event_call
;
644 struct event_filter __rcu
*filter
;
645 struct eventfs_inode
*ei
;
646 struct trace_array
*tr
;
647 struct trace_subsystem_dir
*system
;
648 struct list_head triggers
;
653 * bit 1: enabled cmd record
654 * bit 2: enable/disable with the soft disable bit
655 * bit 3: soft disabled
656 * bit 4: trigger enabled
658 * Note: The bits must be set atomically to prevent races
659 * from other writers. Reads of flags do not need to be in
660 * sync as they occur in critical sections. But the way flags
661 * is currently used, these changes do not affect the code
662 * except that when a change is made, it may have a slight
663 * delay in propagating the changes to other CPUs due to
664 * caching and such. Which is mostly OK ;-)
667 refcount_t ref
; /* ref count for opened files */
668 atomic_t sm_ref
; /* soft-mode reference counter */
669 atomic_t tm_ref
; /* trigger-mode reference counter */
672 #define __TRACE_EVENT_FLAGS(name, value) \
673 static int __init trace_init_flags_##name(void) \
675 event_##name.flags |= value; \
678 early_initcall(trace_init_flags_##name);
680 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
681 static int perf_perm_##name(struct trace_event_call *tp_event, \
682 struct perf_event *p_event) \
684 return ({ expr; }); \
686 static int __init trace_init_perf_perm_##name(void) \
688 event_##name.perf_perm = &perf_perm_##name; \
691 early_initcall(trace_init_perf_perm_##name);
693 #define PERF_MAX_TRACE_SIZE 8192
695 #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
697 enum event_trigger_type
{
699 ETT_TRACE_ONOFF
= (1 << 0),
700 ETT_SNAPSHOT
= (1 << 1),
701 ETT_STACKTRACE
= (1 << 2),
702 ETT_EVENT_ENABLE
= (1 << 3),
703 ETT_EVENT_HIST
= (1 << 4),
704 ETT_HIST_ENABLE
= (1 << 5),
705 ETT_EVENT_EPROBE
= (1 << 6),
708 extern int filter_match_preds(struct event_filter
*filter
, void *rec
);
710 extern enum event_trigger_type
711 event_triggers_call(struct trace_event_file
*file
,
712 struct trace_buffer
*buffer
, void *rec
,
713 struct ring_buffer_event
*event
);
715 event_triggers_post_call(struct trace_event_file
*file
,
716 enum event_trigger_type tt
);
718 bool trace_event_ignore_this_pid(struct trace_event_file
*trace_file
);
720 bool __trace_trigger_soft_disabled(struct trace_event_file
*file
);
723 * trace_trigger_soft_disabled - do triggers and test if soft disabled
724 * @file: The file pointer of the event to test
726 * If any triggers without filters are attached to this event, they
727 * will be called here. If the event is soft disabled and has no
728 * triggers that require testing the fields, it will return true,
731 static __always_inline
bool
732 trace_trigger_soft_disabled(struct trace_event_file
*file
)
734 unsigned long eflags
= file
->flags
;
736 if (likely(!(eflags
& (EVENT_FILE_FL_TRIGGER_MODE
|
737 EVENT_FILE_FL_SOFT_DISABLED
|
738 EVENT_FILE_FL_PID_FILTER
))))
741 if (likely(eflags
& EVENT_FILE_FL_TRIGGER_COND
))
744 return __trace_trigger_soft_disabled(file
);
747 #ifdef CONFIG_BPF_EVENTS
748 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
);
749 int perf_event_attach_bpf_prog(struct perf_event
*event
, struct bpf_prog
*prog
, u64 bpf_cookie
);
750 void perf_event_detach_bpf_prog(struct perf_event
*event
);
751 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
);
753 struct bpf_raw_tp_link
;
754 int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_raw_tp_link
*link
);
755 int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_raw_tp_link
*link
);
757 struct bpf_raw_event_map
*bpf_get_raw_tracepoint(const char *name
);
758 void bpf_put_raw_tracepoint(struct bpf_raw_event_map
*btp
);
759 int bpf_get_perf_event_info(const struct perf_event
*event
, u32
*prog_id
,
760 u32
*fd_type
, const char **buf
,
761 u64
*probe_offset
, u64
*probe_addr
,
762 unsigned long *missed
);
763 int bpf_kprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
);
764 int bpf_uprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
);
766 static inline unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
772 perf_event_attach_bpf_prog(struct perf_event
*event
, struct bpf_prog
*prog
, u64 bpf_cookie
)
777 static inline void perf_event_detach_bpf_prog(struct perf_event
*event
) { }
780 perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
784 struct bpf_raw_tp_link
;
785 static inline int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_raw_tp_link
*link
)
789 static inline int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_raw_tp_link
*link
)
793 static inline struct bpf_raw_event_map
*bpf_get_raw_tracepoint(const char *name
)
797 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map
*btp
)
800 static inline int bpf_get_perf_event_info(const struct perf_event
*event
,
801 u32
*prog_id
, u32
*fd_type
,
802 const char **buf
, u64
*probe_offset
,
803 u64
*probe_addr
, unsigned long *missed
)
808 bpf_kprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
813 bpf_uprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
821 FILTER_STATIC_STRING
,
832 extern int trace_event_raw_init(struct trace_event_call
*call
);
833 extern int trace_define_field(struct trace_event_call
*call
, const char *type
,
834 const char *name
, int offset
, int size
,
835 int is_signed
, int filter_type
);
836 extern int trace_add_event_call(struct trace_event_call
*call
);
837 extern int trace_remove_event_call(struct trace_event_call
*call
);
838 extern int trace_event_get_offsets(struct trace_event_call
*call
);
840 int ftrace_set_clr_event(struct trace_array
*tr
, char *buf
, int set
);
841 int trace_set_clr_event(const char *system
, const char *event
, int set
);
842 int trace_array_set_clr_event(struct trace_array
*tr
, const char *system
,
843 const char *event
, bool enable
);
845 * The double __builtin_constant_p is because gcc will give us an error
846 * if we try to allocate the static variable to fmt if it is not a
847 * constant. Even with the outer if statement optimizing out.
849 #define event_trace_printk(ip, fmt, args...) \
851 __trace_printk_check_format(fmt, ##args); \
852 tracing_record_cmdline(current); \
853 if (__builtin_constant_p(fmt)) { \
854 static const char *trace_printk_fmt \
855 __section("__trace_printk_fmt") = \
856 __builtin_constant_p(fmt) ? fmt : NULL; \
858 __trace_bprintk(ip, trace_printk_fmt, ##args); \
860 __trace_printk(ip, fmt, ##args); \
863 #ifdef CONFIG_PERF_EVENTS
866 DECLARE_PER_CPU(struct pt_regs
, perf_trace_regs
);
868 extern int perf_trace_init(struct perf_event
*event
);
869 extern void perf_trace_destroy(struct perf_event
*event
);
870 extern int perf_trace_add(struct perf_event
*event
, int flags
);
871 extern void perf_trace_del(struct perf_event
*event
, int flags
);
872 #ifdef CONFIG_KPROBE_EVENTS
873 extern int perf_kprobe_init(struct perf_event
*event
, bool is_retprobe
);
874 extern void perf_kprobe_destroy(struct perf_event
*event
);
875 extern int bpf_get_kprobe_info(const struct perf_event
*event
,
876 u32
*fd_type
, const char **symbol
,
877 u64
*probe_offset
, u64
*probe_addr
,
878 unsigned long *missed
,
879 bool perf_type_tracepoint
);
881 #ifdef CONFIG_UPROBE_EVENTS
882 extern int perf_uprobe_init(struct perf_event
*event
,
883 unsigned long ref_ctr_offset
, bool is_retprobe
);
884 extern void perf_uprobe_destroy(struct perf_event
*event
);
885 extern int bpf_get_uprobe_info(const struct perf_event
*event
,
886 u32
*fd_type
, const char **filename
,
887 u64
*probe_offset
, u64
*probe_addr
,
888 bool perf_type_tracepoint
);
890 extern int ftrace_profile_set_filter(struct perf_event
*event
, int event_id
,
892 extern void ftrace_profile_free_filter(struct perf_event
*event
);
893 void perf_trace_buf_update(void *record
, u16 type
);
894 void *perf_trace_buf_alloc(int size
, struct pt_regs
**regs
, int *rctxp
);
896 int perf_event_set_bpf_prog(struct perf_event
*event
, struct bpf_prog
*prog
, u64 bpf_cookie
);
897 void perf_event_free_bpf_prog(struct perf_event
*event
);
899 void bpf_trace_run1(struct bpf_raw_tp_link
*link
, u64 arg1
);
900 void bpf_trace_run2(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
);
901 void bpf_trace_run3(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
903 void bpf_trace_run4(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
905 void bpf_trace_run5(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
906 u64 arg3
, u64 arg4
, u64 arg5
);
907 void bpf_trace_run6(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
908 u64 arg3
, u64 arg4
, u64 arg5
, u64 arg6
);
909 void bpf_trace_run7(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
910 u64 arg3
, u64 arg4
, u64 arg5
, u64 arg6
, u64 arg7
);
911 void bpf_trace_run8(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
912 u64 arg3
, u64 arg4
, u64 arg5
, u64 arg6
, u64 arg7
,
914 void bpf_trace_run9(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
915 u64 arg3
, u64 arg4
, u64 arg5
, u64 arg6
, u64 arg7
,
917 void bpf_trace_run10(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
918 u64 arg3
, u64 arg4
, u64 arg5
, u64 arg6
, u64 arg7
,
919 u64 arg8
, u64 arg9
, u64 arg10
);
920 void bpf_trace_run11(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
921 u64 arg3
, u64 arg4
, u64 arg5
, u64 arg6
, u64 arg7
,
922 u64 arg8
, u64 arg9
, u64 arg10
, u64 arg11
);
923 void bpf_trace_run12(struct bpf_raw_tp_link
*link
, u64 arg1
, u64 arg2
,
924 u64 arg3
, u64 arg4
, u64 arg5
, u64 arg6
, u64 arg7
,
925 u64 arg8
, u64 arg9
, u64 arg10
, u64 arg11
, u64 arg12
);
926 void perf_trace_run_bpf_submit(void *raw_data
, int size
, int rctx
,
927 struct trace_event_call
*call
, u64 count
,
928 struct pt_regs
*regs
, struct hlist_head
*head
,
929 struct task_struct
*task
);
932 perf_trace_buf_submit(void *raw_data
, int size
, int rctx
, u16 type
,
933 u64 count
, struct pt_regs
*regs
, void *head
,
934 struct task_struct
*task
)
936 perf_tp_event(type
, count
, raw_data
, size
, regs
, head
, rctx
, task
);
941 #define TRACE_EVENT_STR_MAX 512
944 * gcc warns that you can not use a va_list in an inlined
945 * function. But lets me make it into a macro :-/
947 #define __trace_event_vstr_len(fmt, va) \
952 va_copy(__ap, *(va)); \
953 __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
956 min(__ret, TRACE_EVENT_STR_MAX); \
959 #endif /* _LINUX_TRACE_EVENT_H */
962 * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
963 * This is due to the way trace custom events work. If a file includes two
964 * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
965 * will override the TRACE_CUSTOM_EVENT and break the second include.
968 #ifndef TRACE_CUSTOM_EVENT
970 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
971 #define DEFINE_CUSTOM_EVENT(template, name, proto, args)
972 #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
974 #endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */