1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
19 const char *trace_print_flags_seq(struct trace_seq
*p
, const char *delim
,
21 const struct trace_print_flags
*flag_array
);
23 const char *trace_print_symbols_seq(struct trace_seq
*p
, unsigned long val
,
24 const struct trace_print_flags
*symbol_array
);
26 #if BITS_PER_LONG == 32
27 const char *trace_print_flags_seq_u64(struct trace_seq
*p
, const char *delim
,
28 unsigned long long flags
,
29 const struct trace_print_flags_u64
*flag_array
);
31 const char *trace_print_symbols_seq_u64(struct trace_seq
*p
,
32 unsigned long long val
,
33 const struct trace_print_flags_u64
37 const char *trace_print_bitmask_seq(struct trace_seq
*p
, void *bitmask_ptr
,
38 unsigned int bitmask_size
);
40 const char *trace_print_hex_seq(struct trace_seq
*p
,
41 const unsigned char *buf
, int len
,
44 const char *trace_print_array_seq(struct trace_seq
*p
,
45 const void *buf
, int count
,
48 struct trace_iterator
;
51 int trace_raw_output_prep(struct trace_iterator
*iter
,
52 struct trace_event
*event
);
55 * The trace entry - the most basic unit of tracing. This is what
56 * is printed in the end as a single line in the trace output, such as:
58 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
63 unsigned char preempt_count
;
67 #define TRACE_EVENT_TYPE_MAX \
68 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
71 * Trace iterator - used by printout routines who present trace
72 * results to users and which routines might sleep, etc:
74 struct trace_iterator
{
75 struct trace_array
*tr
;
77 struct trace_buffer
*trace_buffer
;
81 struct ring_buffer_iter
**buffer_iter
;
82 unsigned long iter_flags
;
84 /* trace_seq for __print_flags() and __print_symbolic() etc. */
85 struct trace_seq tmp_seq
;
87 cpumask_var_t started
;
89 /* it's true when current open file is snapshot */
92 /* The below is zeroed out in pipe_read */
94 struct trace_entry
*ent
;
95 unsigned long lost_events
;
104 /* All new field here will be zeroed out in pipe_read */
107 enum trace_iter_flags
{
108 TRACE_FILE_LAT_FMT
= 1,
109 TRACE_FILE_ANNOTATE
= 2,
110 TRACE_FILE_TIME_IN_NS
= 4,
114 typedef enum print_line_t (*trace_print_func
)(struct trace_iterator
*iter
,
115 int flags
, struct trace_event
*event
);
117 struct trace_event_functions
{
118 trace_print_func trace
;
119 trace_print_func raw
;
120 trace_print_func hex
;
121 trace_print_func binary
;
125 struct hlist_node node
;
126 struct list_head list
;
128 struct trace_event_functions
*funcs
;
131 extern int register_trace_event(struct trace_event
*event
);
132 extern int unregister_trace_event(struct trace_event
*event
);
134 /* Return values for print_line callback */
136 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
137 TRACE_TYPE_HANDLED
= 1,
138 TRACE_TYPE_UNHANDLED
= 2, /* Relay to other output functions */
139 TRACE_TYPE_NO_CONSUME
= 3 /* Handled but ask to not consume */
142 enum print_line_t
trace_handle_return(struct trace_seq
*s
);
144 void tracing_generic_entry_update(struct trace_entry
*entry
,
147 struct trace_event_file
;
149 struct ring_buffer_event
*
150 trace_event_buffer_lock_reserve(struct ring_buffer
**current_buffer
,
151 struct trace_event_file
*trace_file
,
152 int type
, unsigned long len
,
153 unsigned long flags
, int pc
);
155 #define TRACE_RECORD_CMDLINE BIT(0)
156 #define TRACE_RECORD_TGID BIT(1)
158 void tracing_record_taskinfo(struct task_struct
*task
, int flags
);
159 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
160 struct task_struct
*next
, int flags
);
162 void tracing_record_cmdline(struct task_struct
*task
);
163 void tracing_record_tgid(struct task_struct
*task
);
165 int trace_output_call(struct trace_iterator
*iter
, char *name
, char *fmt
, ...);
171 TRACE_REG_UNREGISTER
,
172 #ifdef CONFIG_PERF_EVENTS
173 TRACE_REG_PERF_REGISTER
,
174 TRACE_REG_PERF_UNREGISTER
,
176 TRACE_REG_PERF_CLOSE
,
178 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
179 * custom action was taken and the default action is not to be
187 struct trace_event_call
;
189 struct trace_event_class
{
192 #ifdef CONFIG_PERF_EVENTS
195 int (*reg
)(struct trace_event_call
*event
,
196 enum trace_reg type
, void *data
);
197 int (*define_fields
)(struct trace_event_call
*);
198 struct list_head
*(*get_fields
)(struct trace_event_call
*);
199 struct list_head fields
;
200 int (*raw_init
)(struct trace_event_call
*);
203 extern int trace_event_reg(struct trace_event_call
*event
,
204 enum trace_reg type
, void *data
);
206 struct trace_event_buffer
{
207 struct ring_buffer
*buffer
;
208 struct ring_buffer_event
*event
;
209 struct trace_event_file
*trace_file
;
215 void *trace_event_buffer_reserve(struct trace_event_buffer
*fbuffer
,
216 struct trace_event_file
*trace_file
,
219 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
);
222 TRACE_EVENT_FL_FILTERED_BIT
,
223 TRACE_EVENT_FL_CAP_ANY_BIT
,
224 TRACE_EVENT_FL_NO_SET_FILTER_BIT
,
225 TRACE_EVENT_FL_IGNORE_ENABLE_BIT
,
226 TRACE_EVENT_FL_TRACEPOINT_BIT
,
227 TRACE_EVENT_FL_KPROBE_BIT
,
228 TRACE_EVENT_FL_UPROBE_BIT
,
233 * FILTERED - The event has a filter attached
234 * CAP_ANY - Any user can enable for perf
235 * NO_SET_FILTER - Set when filter has error and is to be ignored
236 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
237 * TRACEPOINT - Event is a tracepoint
238 * KPROBE - Event is a kprobe
239 * UPROBE - Event is a uprobe
242 TRACE_EVENT_FL_FILTERED
= (1 << TRACE_EVENT_FL_FILTERED_BIT
),
243 TRACE_EVENT_FL_CAP_ANY
= (1 << TRACE_EVENT_FL_CAP_ANY_BIT
),
244 TRACE_EVENT_FL_NO_SET_FILTER
= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT
),
245 TRACE_EVENT_FL_IGNORE_ENABLE
= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT
),
246 TRACE_EVENT_FL_TRACEPOINT
= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT
),
247 TRACE_EVENT_FL_KPROBE
= (1 << TRACE_EVENT_FL_KPROBE_BIT
),
248 TRACE_EVENT_FL_UPROBE
= (1 << TRACE_EVENT_FL_UPROBE_BIT
),
251 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
253 struct trace_event_call
{
254 struct list_head list
;
255 struct trace_event_class
*class;
258 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
259 struct tracepoint
*tp
;
261 struct trace_event event
;
263 struct event_filter
*filter
;
267 * bit 0: filter_active
268 * bit 1: allow trace by non root (cap any)
269 * bit 2: failed to apply filter
270 * bit 3: trace internal event (do not enable)
271 * bit 4: Event was enabled by module
272 * bit 5: use call filter rather than file filter
273 * bit 6: Event is a tracepoint
275 int flags
; /* static flags of different events */
277 #ifdef CONFIG_PERF_EVENTS
279 struct hlist_head __percpu
*perf_events
;
280 struct bpf_prog_array __rcu
*prog_array
;
282 int (*perf_perm
)(struct trace_event_call
*,
283 struct perf_event
*);
287 #ifdef CONFIG_PERF_EVENTS
288 static inline bool bpf_prog_array_valid(struct trace_event_call
*call
)
291 * This inline function checks whether call->prog_array
292 * is valid or not. The function is called in various places,
293 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
295 * If this function returns true, and later call->prog_array
296 * becomes false inside rcu_read_lock/unlock region,
297 * we bail out then. If this function return false,
298 * there is a risk that we might miss a few events if the checking
299 * were delayed until inside rcu_read_lock/unlock region and
300 * call->prog_array happened to become non-NULL then.
302 * Here, READ_ONCE() is used instead of rcu_access_pointer().
303 * rcu_access_pointer() requires the actual definition of
304 * "struct bpf_prog_array" while READ_ONCE() only needs
305 * a declaration of the same type.
307 return !!READ_ONCE(call
->prog_array
);
311 static inline const char *
312 trace_event_name(struct trace_event_call
*call
)
314 if (call
->flags
& TRACE_EVENT_FL_TRACEPOINT
)
315 return call
->tp
? call
->tp
->name
: NULL
;
321 struct trace_subsystem_dir
;
324 EVENT_FILE_FL_ENABLED_BIT
,
325 EVENT_FILE_FL_RECORDED_CMD_BIT
,
326 EVENT_FILE_FL_RECORDED_TGID_BIT
,
327 EVENT_FILE_FL_FILTERED_BIT
,
328 EVENT_FILE_FL_NO_SET_FILTER_BIT
,
329 EVENT_FILE_FL_SOFT_MODE_BIT
,
330 EVENT_FILE_FL_SOFT_DISABLED_BIT
,
331 EVENT_FILE_FL_TRIGGER_MODE_BIT
,
332 EVENT_FILE_FL_TRIGGER_COND_BIT
,
333 EVENT_FILE_FL_PID_FILTER_BIT
,
334 EVENT_FILE_FL_WAS_ENABLED_BIT
,
339 * ENABLED - The event is enabled
340 * RECORDED_CMD - The comms should be recorded at sched_switch
341 * RECORDED_TGID - The tgids should be recorded at sched_switch
342 * FILTERED - The event has a filter attached
343 * NO_SET_FILTER - Set when filter has error and is to be ignored
344 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
345 * SOFT_DISABLED - When set, do not trace the event (even though its
346 * tracepoint may be enabled)
347 * TRIGGER_MODE - When set, invoke the triggers associated with the event
348 * TRIGGER_COND - When set, one or more triggers has an associated filter
349 * PID_FILTER - When set, the event is filtered based on pid
350 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
353 EVENT_FILE_FL_ENABLED
= (1 << EVENT_FILE_FL_ENABLED_BIT
),
354 EVENT_FILE_FL_RECORDED_CMD
= (1 << EVENT_FILE_FL_RECORDED_CMD_BIT
),
355 EVENT_FILE_FL_RECORDED_TGID
= (1 << EVENT_FILE_FL_RECORDED_TGID_BIT
),
356 EVENT_FILE_FL_FILTERED
= (1 << EVENT_FILE_FL_FILTERED_BIT
),
357 EVENT_FILE_FL_NO_SET_FILTER
= (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT
),
358 EVENT_FILE_FL_SOFT_MODE
= (1 << EVENT_FILE_FL_SOFT_MODE_BIT
),
359 EVENT_FILE_FL_SOFT_DISABLED
= (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT
),
360 EVENT_FILE_FL_TRIGGER_MODE
= (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT
),
361 EVENT_FILE_FL_TRIGGER_COND
= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT
),
362 EVENT_FILE_FL_PID_FILTER
= (1 << EVENT_FILE_FL_PID_FILTER_BIT
),
363 EVENT_FILE_FL_WAS_ENABLED
= (1 << EVENT_FILE_FL_WAS_ENABLED_BIT
),
366 struct trace_event_file
{
367 struct list_head list
;
368 struct trace_event_call
*event_call
;
369 struct event_filter __rcu
*filter
;
371 struct trace_array
*tr
;
372 struct trace_subsystem_dir
*system
;
373 struct list_head triggers
;
378 * bit 1: enabled cmd record
379 * bit 2: enable/disable with the soft disable bit
380 * bit 3: soft disabled
381 * bit 4: trigger enabled
383 * Note: The bits must be set atomically to prevent races
384 * from other writers. Reads of flags do not need to be in
385 * sync as they occur in critical sections. But the way flags
386 * is currently used, these changes do not affect the code
387 * except that when a change is made, it may have a slight
388 * delay in propagating the changes to other CPUs due to
389 * caching and such. Which is mostly OK ;-)
392 atomic_t sm_ref
; /* soft-mode reference counter */
393 atomic_t tm_ref
; /* trigger-mode reference counter */
396 #define __TRACE_EVENT_FLAGS(name, value) \
397 static int __init trace_init_flags_##name(void) \
399 event_##name.flags |= value; \
402 early_initcall(trace_init_flags_##name);
404 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
405 static int perf_perm_##name(struct trace_event_call *tp_event, \
406 struct perf_event *p_event) \
408 return ({ expr; }); \
410 static int __init trace_init_perf_perm_##name(void) \
412 event_##name.perf_perm = &perf_perm_##name; \
415 early_initcall(trace_init_perf_perm_##name);
417 #define PERF_MAX_TRACE_SIZE 2048
419 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
421 enum event_trigger_type
{
423 ETT_TRACE_ONOFF
= (1 << 0),
424 ETT_SNAPSHOT
= (1 << 1),
425 ETT_STACKTRACE
= (1 << 2),
426 ETT_EVENT_ENABLE
= (1 << 3),
427 ETT_EVENT_HIST
= (1 << 4),
428 ETT_HIST_ENABLE
= (1 << 5),
431 extern int filter_match_preds(struct event_filter
*filter
, void *rec
);
433 extern enum event_trigger_type
event_triggers_call(struct trace_event_file
*file
,
435 extern void event_triggers_post_call(struct trace_event_file
*file
,
436 enum event_trigger_type tt
,
439 bool trace_event_ignore_this_pid(struct trace_event_file
*trace_file
);
442 * trace_trigger_soft_disabled - do triggers and test if soft disabled
443 * @file: The file pointer of the event to test
445 * If any triggers without filters are attached to this event, they
446 * will be called here. If the event is soft disabled and has no
447 * triggers that require testing the fields, it will return true,
451 trace_trigger_soft_disabled(struct trace_event_file
*file
)
453 unsigned long eflags
= file
->flags
;
455 if (!(eflags
& EVENT_FILE_FL_TRIGGER_COND
)) {
456 if (eflags
& EVENT_FILE_FL_TRIGGER_MODE
)
457 event_triggers_call(file
, NULL
);
458 if (eflags
& EVENT_FILE_FL_SOFT_DISABLED
)
460 if (eflags
& EVENT_FILE_FL_PID_FILTER
)
461 return trace_event_ignore_this_pid(file
);
466 #ifdef CONFIG_BPF_EVENTS
467 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
);
468 int perf_event_attach_bpf_prog(struct perf_event
*event
, struct bpf_prog
*prog
);
469 void perf_event_detach_bpf_prog(struct perf_event
*event
);
470 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
);
472 static inline unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
478 perf_event_attach_bpf_prog(struct perf_event
*event
, struct bpf_prog
*prog
)
483 static inline void perf_event_detach_bpf_prog(struct perf_event
*event
) { }
486 perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
494 FILTER_STATIC_STRING
,
502 extern int trace_event_raw_init(struct trace_event_call
*call
);
503 extern int trace_define_field(struct trace_event_call
*call
, const char *type
,
504 const char *name
, int offset
, int size
,
505 int is_signed
, int filter_type
);
506 extern int trace_add_event_call(struct trace_event_call
*call
);
507 extern int trace_remove_event_call(struct trace_event_call
*call
);
508 extern int trace_event_get_offsets(struct trace_event_call
*call
);
510 #define is_signed_type(type) (((type)(-1)) < (type)1)
512 int trace_set_clr_event(const char *system
, const char *event
, int set
);
515 * The double __builtin_constant_p is because gcc will give us an error
516 * if we try to allocate the static variable to fmt if it is not a
517 * constant. Even with the outer if statement optimizing out.
519 #define event_trace_printk(ip, fmt, args...) \
521 __trace_printk_check_format(fmt, ##args); \
522 tracing_record_cmdline(current); \
523 if (__builtin_constant_p(fmt)) { \
524 static const char *trace_printk_fmt \
525 __attribute__((section("__trace_printk_fmt"))) = \
526 __builtin_constant_p(fmt) ? fmt : NULL; \
528 __trace_bprintk(ip, trace_printk_fmt, ##args); \
530 __trace_printk(ip, fmt, ##args); \
533 #ifdef CONFIG_PERF_EVENTS
536 DECLARE_PER_CPU(struct pt_regs
, perf_trace_regs
);
537 DECLARE_PER_CPU(int, bpf_kprobe_override
);
539 extern int perf_trace_init(struct perf_event
*event
);
540 extern void perf_trace_destroy(struct perf_event
*event
);
541 extern int perf_trace_add(struct perf_event
*event
, int flags
);
542 extern void perf_trace_del(struct perf_event
*event
, int flags
);
543 extern int ftrace_profile_set_filter(struct perf_event
*event
, int event_id
,
545 extern void ftrace_profile_free_filter(struct perf_event
*event
);
546 void perf_trace_buf_update(void *record
, u16 type
);
547 void *perf_trace_buf_alloc(int size
, struct pt_regs
**regs
, int *rctxp
);
549 void perf_trace_run_bpf_submit(void *raw_data
, int size
, int rctx
,
550 struct trace_event_call
*call
, u64 count
,
551 struct pt_regs
*regs
, struct hlist_head
*head
,
552 struct task_struct
*task
);
555 perf_trace_buf_submit(void *raw_data
, int size
, int rctx
, u16 type
,
556 u64 count
, struct pt_regs
*regs
, void *head
,
557 struct task_struct
*task
)
559 perf_tp_event(type
, count
, raw_data
, size
, regs
, head
, rctx
, task
);
564 #endif /* _LINUX_TRACE_EVENT_H */