4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 DEFINE_MUTEX(event_storage_mutex
);
31 EXPORT_SYMBOL_GPL(event_storage_mutex
);
33 char event_storage
[EVENT_STORAGE_SIZE
];
34 EXPORT_SYMBOL_GPL(event_storage
);
36 LIST_HEAD(ftrace_events
);
37 LIST_HEAD(ftrace_common_fields
);
40 trace_get_fields(struct ftrace_event_call
*event_call
)
42 if (!event_call
->class->get_fields
)
43 return &event_call
->class->fields
;
44 return event_call
->class->get_fields(event_call
);
47 static int __trace_define_field(struct list_head
*head
, const char *type
,
48 const char *name
, int offset
, int size
,
49 int is_signed
, int filter_type
)
51 struct ftrace_event_field
*field
;
53 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
57 field
->name
= kstrdup(name
, GFP_KERNEL
);
61 field
->type
= kstrdup(type
, GFP_KERNEL
);
65 if (filter_type
== FILTER_OTHER
)
66 field
->filter_type
= filter_assign_type(type
);
68 field
->filter_type
= filter_type
;
70 field
->offset
= offset
;
72 field
->is_signed
= is_signed
;
74 list_add(&field
->link
, head
);
86 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
87 const char *name
, int offset
, int size
, int is_signed
,
90 struct list_head
*head
;
92 if (WARN_ON(!call
->class))
95 head
= trace_get_fields(call
);
96 return __trace_define_field(head
, type
, name
, offset
, size
,
97 is_signed
, filter_type
);
99 EXPORT_SYMBOL_GPL(trace_define_field
);
101 #define __common_field(type, item) \
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
104 offsetof(typeof(ent), item), \
106 is_signed_type(type), FILTER_OTHER); \
110 static int trace_define_common_fields(void)
113 struct trace_entry ent
;
115 __common_field(unsigned short, type
);
116 __common_field(unsigned char, flags
);
117 __common_field(unsigned char, preempt_count
);
118 __common_field(int, pid
);
119 __common_field(int, padding
);
124 void trace_destroy_fields(struct ftrace_event_call
*call
)
126 struct ftrace_event_field
*field
, *next
;
127 struct list_head
*head
;
129 head
= trace_get_fields(call
);
130 list_for_each_entry_safe(field
, next
, head
, link
) {
131 list_del(&field
->link
);
138 int trace_event_raw_init(struct ftrace_event_call
*call
)
142 id
= register_ftrace_event(&call
->event
);
148 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
150 int ftrace_event_reg(struct ftrace_event_call
*call
, enum trace_reg type
)
153 case TRACE_REG_REGISTER
:
154 return tracepoint_probe_register(call
->name
,
157 case TRACE_REG_UNREGISTER
:
158 tracepoint_probe_unregister(call
->name
,
163 #ifdef CONFIG_PERF_EVENTS
164 case TRACE_REG_PERF_REGISTER
:
165 return tracepoint_probe_register(call
->name
,
166 call
->class->perf_probe
,
168 case TRACE_REG_PERF_UNREGISTER
:
169 tracepoint_probe_unregister(call
->name
,
170 call
->class->perf_probe
,
177 EXPORT_SYMBOL_GPL(ftrace_event_reg
);
179 void trace_event_enable_cmd_record(bool enable
)
181 struct ftrace_event_call
*call
;
183 mutex_lock(&event_mutex
);
184 list_for_each_entry(call
, &ftrace_events
, list
) {
185 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
))
189 tracing_start_cmdline_record();
190 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
192 tracing_stop_cmdline_record();
193 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
196 mutex_unlock(&event_mutex
);
199 static int ftrace_event_enable_disable(struct ftrace_event_call
*call
,
206 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
207 call
->flags
&= ~TRACE_EVENT_FL_ENABLED
;
208 if (call
->flags
& TRACE_EVENT_FL_RECORDED_CMD
) {
209 tracing_stop_cmdline_record();
210 call
->flags
&= ~TRACE_EVENT_FL_RECORDED_CMD
;
212 call
->class->reg(call
, TRACE_REG_UNREGISTER
);
216 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
)) {
217 if (trace_flags
& TRACE_ITER_RECORD_CMD
) {
218 tracing_start_cmdline_record();
219 call
->flags
|= TRACE_EVENT_FL_RECORDED_CMD
;
221 ret
= call
->class->reg(call
, TRACE_REG_REGISTER
);
223 tracing_stop_cmdline_record();
224 pr_info("event trace: Could not enable event "
228 call
->flags
|= TRACE_EVENT_FL_ENABLED
;
236 static void ftrace_clear_events(void)
238 struct ftrace_event_call
*call
;
240 mutex_lock(&event_mutex
);
241 list_for_each_entry(call
, &ftrace_events
, list
) {
242 ftrace_event_enable_disable(call
, 0);
244 mutex_unlock(&event_mutex
);
247 static void __put_system(struct event_subsystem
*system
)
249 struct event_filter
*filter
= system
->filter
;
251 WARN_ON_ONCE(system
->ref_count
== 0);
252 if (--system
->ref_count
)
256 kfree(filter
->filter_string
);
263 static void __get_system(struct event_subsystem
*system
)
265 WARN_ON_ONCE(system
->ref_count
== 0);
269 static void put_system(struct event_subsystem
*system
)
271 mutex_lock(&event_mutex
);
272 __put_system(system
);
273 mutex_unlock(&event_mutex
);
277 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
279 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
280 const char *event
, int set
)
282 struct ftrace_event_call
*call
;
285 mutex_lock(&event_mutex
);
286 list_for_each_entry(call
, &ftrace_events
, list
) {
288 if (!call
->name
|| !call
->class || !call
->class->reg
)
292 strcmp(match
, call
->name
) != 0 &&
293 strcmp(match
, call
->class->system
) != 0)
296 if (sub
&& strcmp(sub
, call
->class->system
) != 0)
299 if (event
&& strcmp(event
, call
->name
) != 0)
302 ftrace_event_enable_disable(call
, set
);
306 mutex_unlock(&event_mutex
);
311 static int ftrace_set_clr_event(char *buf
, int set
)
313 char *event
= NULL
, *sub
= NULL
, *match
;
316 * The buf format can be <subsystem>:<event-name>
317 * *:<event-name> means any event by that name.
318 * :<event-name> is the same.
320 * <subsystem>:* means all events in that subsystem
321 * <subsystem>: means the same.
323 * <name> (no ':') means all events in a subsystem with
324 * the name <name> or any event that matches <name>
327 match
= strsep(&buf
, ":");
333 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
335 if (!strlen(event
) || strcmp(event
, "*") == 0)
339 return __ftrace_set_clr_event(match
, sub
, event
, set
);
343 * trace_set_clr_event - enable or disable an event
344 * @system: system name to match (NULL for any system)
345 * @event: event name to match (NULL for all events, within system)
346 * @set: 1 to enable, 0 to disable
348 * This is a way for other parts of the kernel to enable or disable
351 * Returns 0 on success, -EINVAL if the parameters do not match any
354 int trace_set_clr_event(const char *system
, const char *event
, int set
)
356 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
358 EXPORT_SYMBOL_GPL(trace_set_clr_event
);
360 /* 128 should be much more than enough */
361 #define EVENT_BUF_SIZE 127
364 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
365 size_t cnt
, loff_t
*ppos
)
367 struct trace_parser parser
;
373 ret
= tracing_update_buffers();
377 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
380 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
382 if (read
>= 0 && trace_parser_loaded((&parser
))) {
385 if (*parser
.buffer
== '!')
388 parser
.buffer
[parser
.idx
] = 0;
390 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
398 trace_parser_put(&parser
);
404 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
406 struct ftrace_event_call
*call
= v
;
410 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
412 * The ftrace subsystem is for showing formats only.
413 * They can not be enabled or disabled via the event files.
415 if (call
->class && call
->class->reg
)
422 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
424 struct ftrace_event_call
*call
;
427 mutex_lock(&event_mutex
);
429 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
430 for (l
= 0; l
<= *pos
; ) {
431 call
= t_next(m
, call
, &l
);
439 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
441 struct ftrace_event_call
*call
= v
;
445 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
446 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
453 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
455 struct ftrace_event_call
*call
;
458 mutex_lock(&event_mutex
);
460 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
461 for (l
= 0; l
<= *pos
; ) {
462 call
= s_next(m
, call
, &l
);
469 static int t_show(struct seq_file
*m
, void *v
)
471 struct ftrace_event_call
*call
= v
;
473 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
474 seq_printf(m
, "%s:", call
->class->system
);
475 seq_printf(m
, "%s\n", call
->name
);
480 static void t_stop(struct seq_file
*m
, void *p
)
482 mutex_unlock(&event_mutex
);
486 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
488 const struct seq_operations
*seq_ops
;
490 if ((file
->f_mode
& FMODE_WRITE
) &&
491 (file
->f_flags
& O_TRUNC
))
492 ftrace_clear_events();
494 seq_ops
= inode
->i_private
;
495 return seq_open(file
, seq_ops
);
499 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
502 struct ftrace_event_call
*call
= filp
->private_data
;
505 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
510 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
514 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
517 struct ftrace_event_call
*call
= filp
->private_data
;
521 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
525 ret
= tracing_update_buffers();
532 mutex_lock(&event_mutex
);
533 ret
= ftrace_event_enable_disable(call
, val
);
534 mutex_unlock(&event_mutex
);
543 return ret
? ret
: cnt
;
547 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
550 const char set_to_char
[4] = { '?', '0', '1', 'X' };
551 struct event_subsystem
*system
= filp
->private_data
;
552 struct ftrace_event_call
*call
;
557 mutex_lock(&event_mutex
);
558 list_for_each_entry(call
, &ftrace_events
, list
) {
559 if (!call
->name
|| !call
->class || !call
->class->reg
)
562 if (system
&& strcmp(call
->class->system
, system
->name
) != 0)
566 * We need to find out if all the events are set
567 * or if all events or cleared, or if we have
570 set
|= (1 << !!(call
->flags
& TRACE_EVENT_FL_ENABLED
));
573 * If we have a mixture, no need to look further.
578 mutex_unlock(&event_mutex
);
580 buf
[0] = set_to_char
[set
];
583 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
589 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
592 struct event_subsystem
*system
= filp
->private_data
;
593 const char *name
= NULL
;
597 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
601 ret
= tracing_update_buffers();
605 if (val
!= 0 && val
!= 1)
609 * Opening of "enable" adds a ref count to system,
610 * so the name is safe to use.
615 ret
= __ftrace_set_clr_event(NULL
, name
, NULL
, val
);
629 FORMAT_FIELD_SEPERATOR
= 2,
633 static void *f_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
635 struct ftrace_event_call
*call
= m
->private;
636 struct ftrace_event_field
*field
;
637 struct list_head
*common_head
= &ftrace_common_fields
;
638 struct list_head
*head
= trace_get_fields(call
);
642 switch ((unsigned long)v
) {
644 if (unlikely(list_empty(common_head
)))
647 field
= list_entry(common_head
->prev
,
648 struct ftrace_event_field
, link
);
651 case FORMAT_FIELD_SEPERATOR
:
652 if (unlikely(list_empty(head
)))
655 field
= list_entry(head
->prev
, struct ftrace_event_field
, link
);
658 case FORMAT_PRINTFMT
:
664 if (field
->link
.prev
== common_head
)
665 return (void *)FORMAT_FIELD_SEPERATOR
;
666 else if (field
->link
.prev
== head
)
667 return (void *)FORMAT_PRINTFMT
;
669 field
= list_entry(field
->link
.prev
, struct ftrace_event_field
, link
);
674 static void *f_start(struct seq_file
*m
, loff_t
*pos
)
679 /* Start by showing the header */
681 return (void *)FORMAT_HEADER
;
683 p
= (void *)FORMAT_HEADER
;
685 p
= f_next(m
, p
, &l
);
686 } while (p
&& l
< *pos
);
691 static int f_show(struct seq_file
*m
, void *v
)
693 struct ftrace_event_call
*call
= m
->private;
694 struct ftrace_event_field
*field
;
695 const char *array_descriptor
;
697 switch ((unsigned long)v
) {
699 seq_printf(m
, "name: %s\n", call
->name
);
700 seq_printf(m
, "ID: %d\n", call
->event
.type
);
701 seq_printf(m
, "format:\n");
704 case FORMAT_FIELD_SEPERATOR
:
708 case FORMAT_PRINTFMT
:
709 seq_printf(m
, "\nprint fmt: %s\n",
717 * Smartly shows the array type(except dynamic array).
720 * If TYPE := TYPE[LEN], it is shown:
721 * field:TYPE VAR[LEN]
723 array_descriptor
= strchr(field
->type
, '[');
725 if (!strncmp(field
->type
, "__data_loc", 10))
726 array_descriptor
= NULL
;
728 if (!array_descriptor
)
729 seq_printf(m
, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
730 field
->type
, field
->name
, field
->offset
,
731 field
->size
, !!field
->is_signed
);
733 seq_printf(m
, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
734 (int)(array_descriptor
- field
->type
),
735 field
->type
, field
->name
,
736 array_descriptor
, field
->offset
,
737 field
->size
, !!field
->is_signed
);
742 static void f_stop(struct seq_file
*m
, void *p
)
746 static const struct seq_operations trace_format_seq_ops
= {
753 static int trace_format_open(struct inode
*inode
, struct file
*file
)
755 struct ftrace_event_call
*call
= inode
->i_private
;
759 ret
= seq_open(file
, &trace_format_seq_ops
);
763 m
= file
->private_data
;
770 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
772 struct ftrace_event_call
*call
= filp
->private_data
;
779 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
784 trace_seq_printf(s
, "%d\n", call
->event
.type
);
786 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
793 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
796 struct ftrace_event_call
*call
= filp
->private_data
;
803 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
809 print_event_filter(call
, s
);
810 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
818 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
821 struct ftrace_event_call
*call
= filp
->private_data
;
825 if (cnt
>= PAGE_SIZE
)
828 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
832 if (copy_from_user(buf
, ubuf
, cnt
)) {
833 free_page((unsigned long) buf
);
838 err
= apply_event_filter(call
, buf
);
839 free_page((unsigned long) buf
);
848 static LIST_HEAD(event_subsystems
);
850 static int subsystem_open(struct inode
*inode
, struct file
*filp
)
852 struct event_subsystem
*system
= NULL
;
855 if (!inode
->i_private
)
858 /* Make sure the system still exists */
859 mutex_lock(&event_mutex
);
860 list_for_each_entry(system
, &event_subsystems
, list
) {
861 if (system
== inode
->i_private
) {
862 /* Don't open systems with no events */
863 if (!system
->nr_events
) {
867 __get_system(system
);
871 mutex_unlock(&event_mutex
);
873 if (system
!= inode
->i_private
)
877 ret
= tracing_open_generic(inode
, filp
);
878 if (ret
< 0 && system
)
884 static int subsystem_release(struct inode
*inode
, struct file
*file
)
886 struct event_subsystem
*system
= inode
->i_private
;
895 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
898 struct event_subsystem
*system
= filp
->private_data
;
905 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
911 print_subsystem_event_filter(system
, s
);
912 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
920 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
923 struct event_subsystem
*system
= filp
->private_data
;
927 if (cnt
>= PAGE_SIZE
)
930 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
934 if (copy_from_user(buf
, ubuf
, cnt
)) {
935 free_page((unsigned long) buf
);
940 err
= apply_subsystem_event_filter(system
, buf
);
941 free_page((unsigned long) buf
);
951 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
953 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
960 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
967 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
974 static const struct seq_operations show_event_seq_ops
= {
981 static const struct seq_operations show_set_event_seq_ops
= {
988 static const struct file_operations ftrace_avail_fops
= {
989 .open
= ftrace_event_seq_open
,
992 .release
= seq_release
,
995 static const struct file_operations ftrace_set_event_fops
= {
996 .open
= ftrace_event_seq_open
,
998 .write
= ftrace_event_write
,
1000 .release
= seq_release
,
1003 static const struct file_operations ftrace_enable_fops
= {
1004 .open
= tracing_open_generic
,
1005 .read
= event_enable_read
,
1006 .write
= event_enable_write
,
1007 .llseek
= default_llseek
,
1010 static const struct file_operations ftrace_event_format_fops
= {
1011 .open
= trace_format_open
,
1013 .llseek
= seq_lseek
,
1014 .release
= seq_release
,
1017 static const struct file_operations ftrace_event_id_fops
= {
1018 .open
= tracing_open_generic
,
1019 .read
= event_id_read
,
1020 .llseek
= default_llseek
,
1023 static const struct file_operations ftrace_event_filter_fops
= {
1024 .open
= tracing_open_generic
,
1025 .read
= event_filter_read
,
1026 .write
= event_filter_write
,
1027 .llseek
= default_llseek
,
1030 static const struct file_operations ftrace_subsystem_filter_fops
= {
1031 .open
= subsystem_open
,
1032 .read
= subsystem_filter_read
,
1033 .write
= subsystem_filter_write
,
1034 .llseek
= default_llseek
,
1035 .release
= subsystem_release
,
1038 static const struct file_operations ftrace_system_enable_fops
= {
1039 .open
= subsystem_open
,
1040 .read
= system_enable_read
,
1041 .write
= system_enable_write
,
1042 .llseek
= default_llseek
,
1043 .release
= subsystem_release
,
1046 static const struct file_operations ftrace_show_header_fops
= {
1047 .open
= tracing_open_generic
,
1048 .read
= show_header
,
1049 .llseek
= default_llseek
,
1052 static struct dentry
*event_trace_events_dir(void)
1054 static struct dentry
*d_tracer
;
1055 static struct dentry
*d_events
;
1060 d_tracer
= tracing_init_dentry();
1064 d_events
= debugfs_create_dir("events", d_tracer
);
1066 pr_warning("Could not create debugfs "
1067 "'events' directory\n");
1072 static struct dentry
*
1073 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
1075 struct event_subsystem
*system
;
1076 struct dentry
*entry
;
1078 /* First see if we did not already create this dir */
1079 list_for_each_entry(system
, &event_subsystems
, list
) {
1080 if (strcmp(system
->name
, name
) == 0) {
1081 system
->nr_events
++;
1082 return system
->entry
;
1086 /* need to create new entry */
1087 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
1089 pr_warning("No memory to create event subsystem %s\n",
1094 system
->entry
= debugfs_create_dir(name
, d_events
);
1095 if (!system
->entry
) {
1096 pr_warning("Could not create event subsystem %s\n",
1102 system
->nr_events
= 1;
1103 system
->ref_count
= 1;
1104 system
->name
= kstrdup(name
, GFP_KERNEL
);
1105 if (!system
->name
) {
1106 debugfs_remove(system
->entry
);
1111 list_add(&system
->list
, &event_subsystems
);
1113 system
->filter
= NULL
;
1115 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
1116 if (!system
->filter
) {
1117 pr_warning("Could not allocate filter for subsystem "
1119 return system
->entry
;
1122 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
1123 &ftrace_subsystem_filter_fops
);
1125 kfree(system
->filter
);
1126 system
->filter
= NULL
;
1127 pr_warning("Could not create debugfs "
1128 "'%s/filter' entry\n", name
);
1131 trace_create_file("enable", 0644, system
->entry
, system
,
1132 &ftrace_system_enable_fops
);
1134 return system
->entry
;
1138 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
1139 const struct file_operations
*id
,
1140 const struct file_operations
*enable
,
1141 const struct file_operations
*filter
,
1142 const struct file_operations
*format
)
1144 struct list_head
*head
;
1148 * If the trace point header did not define TRACE_SYSTEM
1149 * then the system would be called "TRACE_SYSTEM".
1151 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
1152 d_events
= event_subsystem_dir(call
->class->system
, d_events
);
1154 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
1156 pr_warning("Could not create debugfs "
1157 "'%s' directory\n", call
->name
);
1161 if (call
->class->reg
)
1162 trace_create_file("enable", 0644, call
->dir
, call
,
1165 #ifdef CONFIG_PERF_EVENTS
1166 if (call
->event
.type
&& call
->class->reg
)
1167 trace_create_file("id", 0444, call
->dir
, call
,
1172 * Other events may have the same class. Only update
1173 * the fields if they are not already defined.
1175 head
= trace_get_fields(call
);
1176 if (list_empty(head
)) {
1177 ret
= call
->class->define_fields(call
);
1179 pr_warning("Could not initialize trace point"
1180 " events/%s\n", call
->name
);
1184 trace_create_file("filter", 0644, call
->dir
, call
,
1187 trace_create_file("format", 0444, call
->dir
, call
,
1194 __trace_add_event_call(struct ftrace_event_call
*call
, struct module
*mod
,
1195 const struct file_operations
*id
,
1196 const struct file_operations
*enable
,
1197 const struct file_operations
*filter
,
1198 const struct file_operations
*format
)
1200 struct dentry
*d_events
;
1203 /* The linker may leave blanks */
1207 if (call
->class->raw_init
) {
1208 ret
= call
->class->raw_init(call
);
1211 pr_warning("Could not initialize trace events/%s\n",
1217 d_events
= event_trace_events_dir();
1221 ret
= event_create_dir(call
, d_events
, id
, enable
, filter
, format
);
1223 list_add(&call
->list
, &ftrace_events
);
1229 /* Add an additional event_call dynamically */
1230 int trace_add_event_call(struct ftrace_event_call
*call
)
1233 mutex_lock(&event_mutex
);
1234 ret
= __trace_add_event_call(call
, NULL
, &ftrace_event_id_fops
,
1235 &ftrace_enable_fops
,
1236 &ftrace_event_filter_fops
,
1237 &ftrace_event_format_fops
);
1238 mutex_unlock(&event_mutex
);
1242 static void remove_subsystem_dir(const char *name
)
1244 struct event_subsystem
*system
;
1246 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1249 list_for_each_entry(system
, &event_subsystems
, list
) {
1250 if (strcmp(system
->name
, name
) == 0) {
1251 if (!--system
->nr_events
) {
1252 debugfs_remove_recursive(system
->entry
);
1253 list_del(&system
->list
);
1254 __put_system(system
);
1262 * Must be called under locking both of event_mutex and trace_event_mutex.
1264 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1266 ftrace_event_enable_disable(call
, 0);
1267 if (call
->event
.funcs
)
1268 __unregister_ftrace_event(&call
->event
);
1269 debugfs_remove_recursive(call
->dir
);
1270 list_del(&call
->list
);
1271 trace_destroy_fields(call
);
1272 destroy_preds(call
);
1273 remove_subsystem_dir(call
->class->system
);
1276 /* Remove an event_call */
1277 void trace_remove_event_call(struct ftrace_event_call
*call
)
1279 mutex_lock(&event_mutex
);
1280 down_write(&trace_event_mutex
);
1281 __trace_remove_event_call(call
);
1282 up_write(&trace_event_mutex
);
1283 mutex_unlock(&event_mutex
);
1286 #define for_each_event(event, start, end) \
1287 for (event = start; \
1288 (unsigned long)event < (unsigned long)end; \
1291 #ifdef CONFIG_MODULES
1293 static LIST_HEAD(ftrace_module_file_list
);
1296 * Modules must own their file_operations to keep up with
1297 * reference counting.
1299 struct ftrace_module_file_ops
{
1300 struct list_head list
;
1302 struct file_operations id
;
1303 struct file_operations enable
;
1304 struct file_operations format
;
1305 struct file_operations filter
;
1308 static struct ftrace_module_file_ops
*
1309 trace_create_file_ops(struct module
*mod
)
1311 struct ftrace_module_file_ops
*file_ops
;
1314 * This is a bit of a PITA. To allow for correct reference
1315 * counting, modules must "own" their file_operations.
1316 * To do this, we allocate the file operations that will be
1317 * used in the event directory.
1320 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1324 file_ops
->mod
= mod
;
1326 file_ops
->id
= ftrace_event_id_fops
;
1327 file_ops
->id
.owner
= mod
;
1329 file_ops
->enable
= ftrace_enable_fops
;
1330 file_ops
->enable
.owner
= mod
;
1332 file_ops
->filter
= ftrace_event_filter_fops
;
1333 file_ops
->filter
.owner
= mod
;
1335 file_ops
->format
= ftrace_event_format_fops
;
1336 file_ops
->format
.owner
= mod
;
1338 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1343 static void trace_module_add_events(struct module
*mod
)
1345 struct ftrace_module_file_ops
*file_ops
= NULL
;
1346 struct ftrace_event_call
**call
, **start
, **end
;
1348 start
= mod
->trace_events
;
1349 end
= mod
->trace_events
+ mod
->num_trace_events
;
1354 file_ops
= trace_create_file_ops(mod
);
1358 for_each_event(call
, start
, end
) {
1359 __trace_add_event_call(*call
, mod
,
1360 &file_ops
->id
, &file_ops
->enable
,
1361 &file_ops
->filter
, &file_ops
->format
);
1365 static void trace_module_remove_events(struct module
*mod
)
1367 struct ftrace_module_file_ops
*file_ops
;
1368 struct ftrace_event_call
*call
, *p
;
1371 down_write(&trace_event_mutex
);
1372 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1373 if (call
->mod
== mod
) {
1375 __trace_remove_event_call(call
);
1379 /* Now free the file_operations */
1380 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1381 if (file_ops
->mod
== mod
)
1384 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1385 list_del(&file_ops
->list
);
1390 * It is safest to reset the ring buffer if the module being unloaded
1391 * registered any events.
1394 tracing_reset_current_online_cpus();
1395 up_write(&trace_event_mutex
);
1398 static int trace_module_notify(struct notifier_block
*self
,
1399 unsigned long val
, void *data
)
1401 struct module
*mod
= data
;
1403 mutex_lock(&event_mutex
);
1405 case MODULE_STATE_COMING
:
1406 trace_module_add_events(mod
);
1408 case MODULE_STATE_GOING
:
1409 trace_module_remove_events(mod
);
1412 mutex_unlock(&event_mutex
);
1417 static int trace_module_notify(struct notifier_block
*self
,
1418 unsigned long val
, void *data
)
1422 #endif /* CONFIG_MODULES */
1424 static struct notifier_block trace_module_nb
= {
1425 .notifier_call
= trace_module_notify
,
1429 extern struct ftrace_event_call
*__start_ftrace_events
[];
1430 extern struct ftrace_event_call
*__stop_ftrace_events
[];
1432 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1434 static __init
int setup_trace_event(char *str
)
1436 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1437 ring_buffer_expanded
= 1;
1438 tracing_selftest_disabled
= 1;
1442 __setup("trace_event=", setup_trace_event
);
1444 static __init
int event_trace_init(void)
1446 struct ftrace_event_call
**call
;
1447 struct dentry
*d_tracer
;
1448 struct dentry
*entry
;
1449 struct dentry
*d_events
;
1451 char *buf
= bootup_event_buf
;
1454 d_tracer
= tracing_init_dentry();
1458 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1459 (void *)&show_event_seq_ops
,
1460 &ftrace_avail_fops
);
1462 pr_warning("Could not create debugfs "
1463 "'available_events' entry\n");
1465 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1466 (void *)&show_set_event_seq_ops
,
1467 &ftrace_set_event_fops
);
1469 pr_warning("Could not create debugfs "
1470 "'set_event' entry\n");
1472 d_events
= event_trace_events_dir();
1476 /* ring buffer internal formats */
1477 trace_create_file("header_page", 0444, d_events
,
1478 ring_buffer_print_page_header
,
1479 &ftrace_show_header_fops
);
1481 trace_create_file("header_event", 0444, d_events
,
1482 ring_buffer_print_entry_header
,
1483 &ftrace_show_header_fops
);
1485 trace_create_file("enable", 0644, d_events
,
1486 NULL
, &ftrace_system_enable_fops
);
1488 if (trace_define_common_fields())
1489 pr_warning("tracing: Failed to allocate common fields");
1491 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1492 __trace_add_event_call(*call
, NULL
, &ftrace_event_id_fops
,
1493 &ftrace_enable_fops
,
1494 &ftrace_event_filter_fops
,
1495 &ftrace_event_format_fops
);
1499 token
= strsep(&buf
, ",");
1506 ret
= ftrace_set_clr_event(token
, 1);
1508 pr_warning("Failed to enable trace event: %s\n", token
);
1511 ret
= register_module_notifier(&trace_module_nb
);
1513 pr_warning("Failed to register trace events module notifier\n");
1517 fs_initcall(event_trace_init
);
1519 #ifdef CONFIG_FTRACE_STARTUP_TEST
1521 static DEFINE_SPINLOCK(test_spinlock
);
1522 static DEFINE_SPINLOCK(test_spinlock_irq
);
1523 static DEFINE_MUTEX(test_mutex
);
1525 static __init
void test_work(struct work_struct
*dummy
)
1527 spin_lock(&test_spinlock
);
1528 spin_lock_irq(&test_spinlock_irq
);
1530 spin_unlock_irq(&test_spinlock_irq
);
1531 spin_unlock(&test_spinlock
);
1533 mutex_lock(&test_mutex
);
1535 mutex_unlock(&test_mutex
);
1538 static __init
int event_test_thread(void *unused
)
1542 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1544 pr_info("failed to kmalloc\n");
1546 schedule_on_each_cpu(test_work
);
1550 set_current_state(TASK_INTERRUPTIBLE
);
1551 while (!kthread_should_stop())
1558 * Do various things that may trigger events.
1560 static __init
void event_test_stuff(void)
1562 struct task_struct
*test_thread
;
1564 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1566 kthread_stop(test_thread
);
1570 * For every trace event defined, we will test each trace point separately,
1571 * and then by groups, and finally all trace points.
1573 static __init
void event_trace_self_tests(void)
1575 struct ftrace_event_call
*call
;
1576 struct event_subsystem
*system
;
1579 pr_info("Running tests on trace events:\n");
1581 list_for_each_entry(call
, &ftrace_events
, list
) {
1583 /* Only test those that have a probe */
1584 if (!call
->class || !call
->class->probe
)
1588 * Testing syscall events here is pretty useless, but
1589 * we still do it if configured. But this is time consuming.
1590 * What we really need is a user thread to perform the
1591 * syscalls as we test.
1593 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1594 if (call
->class->system
&&
1595 strcmp(call
->class->system
, "syscalls") == 0)
1599 pr_info("Testing event %s: ", call
->name
);
1602 * If an event is already enabled, someone is using
1603 * it and the self test should not be on.
1605 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
1606 pr_warning("Enabled event during self test!\n");
1611 ftrace_event_enable_disable(call
, 1);
1613 ftrace_event_enable_disable(call
, 0);
1618 /* Now test at the sub system level */
1620 pr_info("Running tests on trace event systems:\n");
1622 list_for_each_entry(system
, &event_subsystems
, list
) {
1624 /* the ftrace system is special, skip it */
1625 if (strcmp(system
->name
, "ftrace") == 0)
1628 pr_info("Testing event system %s: ", system
->name
);
1630 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1631 if (WARN_ON_ONCE(ret
)) {
1632 pr_warning("error enabling system %s\n",
1639 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1640 if (WARN_ON_ONCE(ret
))
1641 pr_warning("error disabling system %s\n",
1647 /* Test with all events enabled */
1649 pr_info("Running tests on all trace events:\n");
1650 pr_info("Testing all events: ");
1652 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1653 if (WARN_ON_ONCE(ret
)) {
1654 pr_warning("error enabling all events\n");
1661 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1662 if (WARN_ON_ONCE(ret
)) {
1663 pr_warning("error disabling all events\n");
1670 #ifdef CONFIG_FUNCTION_TRACER
1672 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1675 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1677 struct ring_buffer_event
*event
;
1678 struct ring_buffer
*buffer
;
1679 struct ftrace_entry
*entry
;
1680 unsigned long flags
;
1685 pc
= preempt_count();
1686 preempt_disable_notrace();
1687 cpu
= raw_smp_processor_id();
1688 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1693 local_save_flags(flags
);
1695 event
= trace_current_buffer_lock_reserve(&buffer
,
1696 TRACE_FN
, sizeof(*entry
),
1700 entry
= ring_buffer_event_data(event
);
1702 entry
->parent_ip
= parent_ip
;
1704 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1707 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1708 preempt_enable_notrace();
1711 static struct ftrace_ops trace_ops __initdata
=
1713 .func
= function_test_events_call
,
1716 static __init
void event_trace_self_test_with_function(void)
1719 ret
= register_ftrace_function(&trace_ops
);
1720 if (WARN_ON(ret
< 0)) {
1721 pr_info("Failed to enable function tracer for event tests\n");
1724 pr_info("Running tests again, along with the function tracer\n");
1725 event_trace_self_tests();
1726 unregister_ftrace_function(&trace_ops
);
1729 static __init
void event_trace_self_test_with_function(void)
1734 static __init
int event_trace_self_tests_init(void)
1736 if (!tracing_selftest_disabled
) {
1737 event_trace_self_tests();
1738 event_trace_self_test_with_function();
1744 late_initcall(event_trace_self_tests_init
);