4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex
);
30 LIST_HEAD(ftrace_events
);
33 trace_get_fields(struct ftrace_event_call
*event_call
)
35 if (!event_call
->class->get_fields
)
36 return &event_call
->class->fields
;
37 return event_call
->class->get_fields(event_call
);
40 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
41 const char *name
, int offset
, int size
, int is_signed
,
44 struct ftrace_event_field
*field
;
45 struct list_head
*head
;
47 if (WARN_ON(!call
->class))
50 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
54 field
->name
= kstrdup(name
, GFP_KERNEL
);
58 field
->type
= kstrdup(type
, GFP_KERNEL
);
62 if (filter_type
== FILTER_OTHER
)
63 field
->filter_type
= filter_assign_type(type
);
65 field
->filter_type
= filter_type
;
67 field
->offset
= offset
;
69 field
->is_signed
= is_signed
;
71 head
= trace_get_fields(call
);
72 list_add(&field
->link
, head
);
83 EXPORT_SYMBOL_GPL(trace_define_field
);
85 #define __common_field(type, item) \
86 ret = trace_define_field(call, #type, "common_" #item, \
87 offsetof(typeof(ent), item), \
89 is_signed_type(type), FILTER_OTHER); \
93 static int trace_define_common_fields(struct ftrace_event_call
*call
)
96 struct trace_entry ent
;
98 __common_field(unsigned short, type
);
99 __common_field(unsigned char, flags
);
100 __common_field(unsigned char, preempt_count
);
101 __common_field(int, pid
);
102 __common_field(int, lock_depth
);
107 void trace_destroy_fields(struct ftrace_event_call
*call
)
109 struct ftrace_event_field
*field
, *next
;
110 struct list_head
*head
;
112 head
= trace_get_fields(call
);
113 list_for_each_entry_safe(field
, next
, head
, link
) {
114 list_del(&field
->link
);
121 int trace_event_raw_init(struct ftrace_event_call
*call
)
125 id
= register_ftrace_event(&call
->event
);
131 EXPORT_SYMBOL_GPL(trace_event_raw_init
);
133 static int ftrace_event_enable_disable(struct ftrace_event_call
*call
,
140 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
141 call
->flags
&= ~TRACE_EVENT_FL_ENABLED
;
142 tracing_stop_cmdline_record();
143 if (call
->class->reg
)
144 call
->class->reg(call
, TRACE_REG_UNREGISTER
);
146 tracepoint_probe_unregister(call
->name
,
152 if (!(call
->flags
& TRACE_EVENT_FL_ENABLED
)) {
153 tracing_start_cmdline_record();
154 if (call
->class->reg
)
155 ret
= call
->class->reg(call
, TRACE_REG_REGISTER
);
157 ret
= tracepoint_probe_register(call
->name
,
161 tracing_stop_cmdline_record();
162 pr_info("event trace: Could not enable event "
166 call
->flags
|= TRACE_EVENT_FL_ENABLED
;
174 static void ftrace_clear_events(void)
176 struct ftrace_event_call
*call
;
178 mutex_lock(&event_mutex
);
179 list_for_each_entry(call
, &ftrace_events
, list
) {
180 ftrace_event_enable_disable(call
, 0);
182 mutex_unlock(&event_mutex
);
186 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
188 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
189 const char *event
, int set
)
191 struct ftrace_event_call
*call
;
194 mutex_lock(&event_mutex
);
195 list_for_each_entry(call
, &ftrace_events
, list
) {
197 if (!call
->name
|| !call
->class ||
198 (!call
->class->probe
&& !call
->class->reg
))
202 strcmp(match
, call
->name
) != 0 &&
203 strcmp(match
, call
->class->system
) != 0)
206 if (sub
&& strcmp(sub
, call
->class->system
) != 0)
209 if (event
&& strcmp(event
, call
->name
) != 0)
212 ftrace_event_enable_disable(call
, set
);
216 mutex_unlock(&event_mutex
);
221 static int ftrace_set_clr_event(char *buf
, int set
)
223 char *event
= NULL
, *sub
= NULL
, *match
;
226 * The buf format can be <subsystem>:<event-name>
227 * *:<event-name> means any event by that name.
228 * :<event-name> is the same.
230 * <subsystem>:* means all events in that subsystem
231 * <subsystem>: means the same.
233 * <name> (no ':') means all events in a subsystem with
234 * the name <name> or any event that matches <name>
237 match
= strsep(&buf
, ":");
243 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
245 if (!strlen(event
) || strcmp(event
, "*") == 0)
249 return __ftrace_set_clr_event(match
, sub
, event
, set
);
253 * trace_set_clr_event - enable or disable an event
254 * @system: system name to match (NULL for any system)
255 * @event: event name to match (NULL for all events, within system)
256 * @set: 1 to enable, 0 to disable
258 * This is a way for other parts of the kernel to enable or disable
261 * Returns 0 on success, -EINVAL if the parameters do not match any
264 int trace_set_clr_event(const char *system
, const char *event
, int set
)
266 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
269 /* 128 should be much more than enough */
270 #define EVENT_BUF_SIZE 127
273 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
274 size_t cnt
, loff_t
*ppos
)
276 struct trace_parser parser
;
282 ret
= tracing_update_buffers();
286 if (trace_parser_get_init(&parser
, EVENT_BUF_SIZE
+ 1))
289 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
291 if (read
>= 0 && trace_parser_loaded((&parser
))) {
294 if (*parser
.buffer
== '!')
297 parser
.buffer
[parser
.idx
] = 0;
299 ret
= ftrace_set_clr_event(parser
.buffer
+ !set
, set
);
307 trace_parser_put(&parser
);
313 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
315 struct ftrace_event_call
*call
= v
;
319 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
321 * The ftrace subsystem is for showing formats only.
322 * They can not be enabled or disabled via the event files.
324 if (call
->class && (call
->class->probe
|| call
->class->reg
))
331 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
333 struct ftrace_event_call
*call
;
336 mutex_lock(&event_mutex
);
338 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
339 for (l
= 0; l
<= *pos
; ) {
340 call
= t_next(m
, call
, &l
);
348 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
350 struct ftrace_event_call
*call
= v
;
354 list_for_each_entry_continue(call
, &ftrace_events
, list
) {
355 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
362 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
364 struct ftrace_event_call
*call
;
367 mutex_lock(&event_mutex
);
369 call
= list_entry(&ftrace_events
, struct ftrace_event_call
, list
);
370 for (l
= 0; l
<= *pos
; ) {
371 call
= s_next(m
, call
, &l
);
378 static int t_show(struct seq_file
*m
, void *v
)
380 struct ftrace_event_call
*call
= v
;
382 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
383 seq_printf(m
, "%s:", call
->class->system
);
384 seq_printf(m
, "%s\n", call
->name
);
389 static void t_stop(struct seq_file
*m
, void *p
)
391 mutex_unlock(&event_mutex
);
395 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
397 const struct seq_operations
*seq_ops
;
399 if ((file
->f_mode
& FMODE_WRITE
) &&
400 (file
->f_flags
& O_TRUNC
))
401 ftrace_clear_events();
403 seq_ops
= inode
->i_private
;
404 return seq_open(file
, seq_ops
);
408 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
411 struct ftrace_event_call
*call
= filp
->private_data
;
414 if (call
->flags
& TRACE_EVENT_FL_ENABLED
)
419 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
423 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
426 struct ftrace_event_call
*call
= filp
->private_data
;
431 if (cnt
>= sizeof(buf
))
434 if (copy_from_user(&buf
, ubuf
, cnt
))
439 ret
= strict_strtoul(buf
, 10, &val
);
443 ret
= tracing_update_buffers();
450 mutex_lock(&event_mutex
);
451 ret
= ftrace_event_enable_disable(call
, val
);
452 mutex_unlock(&event_mutex
);
461 return ret
? ret
: cnt
;
465 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
468 const char set_to_char
[4] = { '?', '0', '1', 'X' };
469 const char *system
= filp
->private_data
;
470 struct ftrace_event_call
*call
;
475 mutex_lock(&event_mutex
);
476 list_for_each_entry(call
, &ftrace_events
, list
) {
477 if (!call
->name
|| !call
->class ||
478 (!call
->class->probe
&& !call
->class->reg
))
481 if (system
&& strcmp(call
->class->system
, system
) != 0)
485 * We need to find out if all the events are set
486 * or if all events or cleared, or if we have
489 set
|= (1 << !!(call
->flags
& TRACE_EVENT_FL_ENABLED
));
492 * If we have a mixture, no need to look further.
497 mutex_unlock(&event_mutex
);
499 buf
[0] = set_to_char
[set
];
502 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
508 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
511 const char *system
= filp
->private_data
;
516 if (cnt
>= sizeof(buf
))
519 if (copy_from_user(&buf
, ubuf
, cnt
))
524 ret
= strict_strtoul(buf
, 10, &val
);
528 ret
= tracing_update_buffers();
532 if (val
!= 0 && val
!= 1)
535 ret
= __ftrace_set_clr_event(NULL
, system
, NULL
, val
);
548 event_format_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
551 struct ftrace_event_call
*call
= filp
->private_data
;
552 struct ftrace_event_field
*field
;
553 struct list_head
*head
;
555 int common_field_count
= 5;
562 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
568 trace_seq_printf(s
, "name: %s\n", call
->name
);
569 trace_seq_printf(s
, "ID: %d\n", call
->event
.type
);
570 trace_seq_printf(s
, "format:\n");
572 head
= trace_get_fields(call
);
573 list_for_each_entry_reverse(field
, head
, link
) {
575 * Smartly shows the array type(except dynamic array).
578 * If TYPE := TYPE[LEN], it is shown:
579 * field:TYPE VAR[LEN]
581 const char *array_descriptor
= strchr(field
->type
, '[');
583 if (!strncmp(field
->type
, "__data_loc", 10))
584 array_descriptor
= NULL
;
586 if (!array_descriptor
) {
587 r
= trace_seq_printf(s
, "\tfield:%s %s;\toffset:%u;"
588 "\tsize:%u;\tsigned:%d;\n",
589 field
->type
, field
->name
, field
->offset
,
590 field
->size
, !!field
->is_signed
);
592 r
= trace_seq_printf(s
, "\tfield:%.*s %s%s;\toffset:%u;"
593 "\tsize:%u;\tsigned:%d;\n",
594 (int)(array_descriptor
- field
->type
),
595 field
->type
, field
->name
,
596 array_descriptor
, field
->offset
,
597 field
->size
, !!field
->is_signed
);
600 if (--common_field_count
== 0)
601 r
= trace_seq_printf(s
, "\n");
608 r
= trace_seq_printf(s
, "\nprint fmt: %s\n",
613 * ug! The format output is bigger than a PAGE!!
615 buf
= "FORMAT TOO BIG\n";
616 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
621 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
629 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
631 struct ftrace_event_call
*call
= filp
->private_data
;
638 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
643 trace_seq_printf(s
, "%d\n", call
->event
.type
);
645 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
652 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
655 struct ftrace_event_call
*call
= filp
->private_data
;
662 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
668 print_event_filter(call
, s
);
669 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
677 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
680 struct ftrace_event_call
*call
= filp
->private_data
;
684 if (cnt
>= PAGE_SIZE
)
687 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
691 if (copy_from_user(buf
, ubuf
, cnt
)) {
692 free_page((unsigned long) buf
);
697 err
= apply_event_filter(call
, buf
);
698 free_page((unsigned long) buf
);
708 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
711 struct event_subsystem
*system
= filp
->private_data
;
718 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
724 print_subsystem_event_filter(system
, s
);
725 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
733 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
736 struct event_subsystem
*system
= filp
->private_data
;
740 if (cnt
>= PAGE_SIZE
)
743 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
747 if (copy_from_user(buf
, ubuf
, cnt
)) {
748 free_page((unsigned long) buf
);
753 err
= apply_subsystem_event_filter(system
, buf
);
754 free_page((unsigned long) buf
);
764 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
766 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
773 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
780 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
787 static const struct seq_operations show_event_seq_ops
= {
794 static const struct seq_operations show_set_event_seq_ops
= {
801 static const struct file_operations ftrace_avail_fops
= {
802 .open
= ftrace_event_seq_open
,
805 .release
= seq_release
,
808 static const struct file_operations ftrace_set_event_fops
= {
809 .open
= ftrace_event_seq_open
,
811 .write
= ftrace_event_write
,
813 .release
= seq_release
,
816 static const struct file_operations ftrace_enable_fops
= {
817 .open
= tracing_open_generic
,
818 .read
= event_enable_read
,
819 .write
= event_enable_write
,
822 static const struct file_operations ftrace_event_format_fops
= {
823 .open
= tracing_open_generic
,
824 .read
= event_format_read
,
827 static const struct file_operations ftrace_event_id_fops
= {
828 .open
= tracing_open_generic
,
829 .read
= event_id_read
,
832 static const struct file_operations ftrace_event_filter_fops
= {
833 .open
= tracing_open_generic
,
834 .read
= event_filter_read
,
835 .write
= event_filter_write
,
838 static const struct file_operations ftrace_subsystem_filter_fops
= {
839 .open
= tracing_open_generic
,
840 .read
= subsystem_filter_read
,
841 .write
= subsystem_filter_write
,
844 static const struct file_operations ftrace_system_enable_fops
= {
845 .open
= tracing_open_generic
,
846 .read
= system_enable_read
,
847 .write
= system_enable_write
,
850 static const struct file_operations ftrace_show_header_fops
= {
851 .open
= tracing_open_generic
,
855 static struct dentry
*event_trace_events_dir(void)
857 static struct dentry
*d_tracer
;
858 static struct dentry
*d_events
;
863 d_tracer
= tracing_init_dentry();
867 d_events
= debugfs_create_dir("events", d_tracer
);
869 pr_warning("Could not create debugfs "
870 "'events' directory\n");
875 static LIST_HEAD(event_subsystems
);
877 static struct dentry
*
878 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
880 struct event_subsystem
*system
;
881 struct dentry
*entry
;
883 /* First see if we did not already create this dir */
884 list_for_each_entry(system
, &event_subsystems
, list
) {
885 if (strcmp(system
->name
, name
) == 0) {
887 return system
->entry
;
891 /* need to create new entry */
892 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
894 pr_warning("No memory to create event subsystem %s\n",
899 system
->entry
= debugfs_create_dir(name
, d_events
);
900 if (!system
->entry
) {
901 pr_warning("Could not create event subsystem %s\n",
907 system
->nr_events
= 1;
908 system
->name
= kstrdup(name
, GFP_KERNEL
);
910 debugfs_remove(system
->entry
);
915 list_add(&system
->list
, &event_subsystems
);
917 system
->filter
= NULL
;
919 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
920 if (!system
->filter
) {
921 pr_warning("Could not allocate filter for subsystem "
923 return system
->entry
;
926 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
927 &ftrace_subsystem_filter_fops
);
929 kfree(system
->filter
);
930 system
->filter
= NULL
;
931 pr_warning("Could not create debugfs "
932 "'%s/filter' entry\n", name
);
935 trace_create_file("enable", 0644, system
->entry
,
936 (void *)system
->name
,
937 &ftrace_system_enable_fops
);
939 return system
->entry
;
943 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
944 const struct file_operations
*id
,
945 const struct file_operations
*enable
,
946 const struct file_operations
*filter
,
947 const struct file_operations
*format
)
949 struct list_head
*head
;
953 * If the trace point header did not define TRACE_SYSTEM
954 * then the system would be called "TRACE_SYSTEM".
956 if (strcmp(call
->class->system
, TRACE_SYSTEM
) != 0)
957 d_events
= event_subsystem_dir(call
->class->system
, d_events
);
959 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
961 pr_warning("Could not create debugfs "
962 "'%s' directory\n", call
->name
);
966 if (call
->class->probe
|| call
->class->reg
)
967 trace_create_file("enable", 0644, call
->dir
, call
,
970 #ifdef CONFIG_PERF_EVENTS
971 if (call
->event
.type
&& (call
->class->perf_probe
|| call
->class->reg
))
972 trace_create_file("id", 0444, call
->dir
, call
,
976 if (call
->class->define_fields
) {
978 * Other events may have the same class. Only update
979 * the fields if they are not already defined.
981 head
= trace_get_fields(call
);
982 if (list_empty(head
)) {
983 ret
= trace_define_common_fields(call
);
985 ret
= call
->class->define_fields(call
);
987 pr_warning("Could not initialize trace point"
988 " events/%s\n", call
->name
);
992 trace_create_file("filter", 0644, call
->dir
, call
,
996 trace_create_file("format", 0444, call
->dir
, call
,
1002 static int __trace_add_event_call(struct ftrace_event_call
*call
)
1004 struct dentry
*d_events
;
1010 if (call
->class->raw_init
) {
1011 ret
= call
->class->raw_init(call
);
1014 pr_warning("Could not initialize trace "
1015 "events/%s\n", call
->name
);
1020 d_events
= event_trace_events_dir();
1024 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
1025 &ftrace_enable_fops
, &ftrace_event_filter_fops
,
1026 &ftrace_event_format_fops
);
1028 list_add(&call
->list
, &ftrace_events
);
1033 /* Add an additional event_call dynamically */
1034 int trace_add_event_call(struct ftrace_event_call
*call
)
1037 mutex_lock(&event_mutex
);
1038 ret
= __trace_add_event_call(call
);
1039 mutex_unlock(&event_mutex
);
1043 static void remove_subsystem_dir(const char *name
)
1045 struct event_subsystem
*system
;
1047 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1050 list_for_each_entry(system
, &event_subsystems
, list
) {
1051 if (strcmp(system
->name
, name
) == 0) {
1052 if (!--system
->nr_events
) {
1053 struct event_filter
*filter
= system
->filter
;
1055 debugfs_remove_recursive(system
->entry
);
1056 list_del(&system
->list
);
1058 kfree(filter
->filter_string
);
1061 kfree(system
->name
);
1070 * Must be called under locking both of event_mutex and trace_event_mutex.
1072 static void __trace_remove_event_call(struct ftrace_event_call
*call
)
1074 ftrace_event_enable_disable(call
, 0);
1075 if (call
->event
.funcs
)
1076 __unregister_ftrace_event(&call
->event
);
1077 debugfs_remove_recursive(call
->dir
);
1078 list_del(&call
->list
);
1079 trace_destroy_fields(call
);
1080 destroy_preds(call
);
1081 remove_subsystem_dir(call
->class->system
);
1084 /* Remove an event_call */
1085 void trace_remove_event_call(struct ftrace_event_call
*call
)
1087 mutex_lock(&event_mutex
);
1088 down_write(&trace_event_mutex
);
1089 __trace_remove_event_call(call
);
1090 up_write(&trace_event_mutex
);
1091 mutex_unlock(&event_mutex
);
1094 #define for_each_event(event, start, end) \
1095 for (event = start; \
1096 (unsigned long)event < (unsigned long)end; \
1099 #ifdef CONFIG_MODULES
1101 static LIST_HEAD(ftrace_module_file_list
);
1104 * Modules must own their file_operations to keep up with
1105 * reference counting.
1107 struct ftrace_module_file_ops
{
1108 struct list_head list
;
1110 struct file_operations id
;
1111 struct file_operations enable
;
1112 struct file_operations format
;
1113 struct file_operations filter
;
1116 static struct ftrace_module_file_ops
*
1117 trace_create_file_ops(struct module
*mod
)
1119 struct ftrace_module_file_ops
*file_ops
;
1122 * This is a bit of a PITA. To allow for correct reference
1123 * counting, modules must "own" their file_operations.
1124 * To do this, we allocate the file operations that will be
1125 * used in the event directory.
1128 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1132 file_ops
->mod
= mod
;
1134 file_ops
->id
= ftrace_event_id_fops
;
1135 file_ops
->id
.owner
= mod
;
1137 file_ops
->enable
= ftrace_enable_fops
;
1138 file_ops
->enable
.owner
= mod
;
1140 file_ops
->filter
= ftrace_event_filter_fops
;
1141 file_ops
->filter
.owner
= mod
;
1143 file_ops
->format
= ftrace_event_format_fops
;
1144 file_ops
->format
.owner
= mod
;
1146 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1151 static void trace_module_add_events(struct module
*mod
)
1153 struct ftrace_module_file_ops
*file_ops
= NULL
;
1154 struct ftrace_event_call
*call
, *start
, *end
;
1155 struct dentry
*d_events
;
1158 start
= mod
->trace_events
;
1159 end
= mod
->trace_events
+ mod
->num_trace_events
;
1164 d_events
= event_trace_events_dir();
1168 for_each_event(call
, start
, end
) {
1169 /* The linker may leave blanks */
1172 if (call
->class->raw_init
) {
1173 ret
= call
->class->raw_init(call
);
1176 pr_warning("Could not initialize trace "
1177 "point events/%s\n", call
->name
);
1182 * This module has events, create file ops for this module
1183 * if not already done.
1186 file_ops
= trace_create_file_ops(mod
);
1191 ret
= event_create_dir(call
, d_events
,
1192 &file_ops
->id
, &file_ops
->enable
,
1193 &file_ops
->filter
, &file_ops
->format
);
1195 list_add(&call
->list
, &ftrace_events
);
1199 static void trace_module_remove_events(struct module
*mod
)
1201 struct ftrace_module_file_ops
*file_ops
;
1202 struct ftrace_event_call
*call
, *p
;
1205 down_write(&trace_event_mutex
);
1206 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1207 if (call
->mod
== mod
) {
1209 __trace_remove_event_call(call
);
1213 /* Now free the file_operations */
1214 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1215 if (file_ops
->mod
== mod
)
1218 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1219 list_del(&file_ops
->list
);
1224 * It is safest to reset the ring buffer if the module being unloaded
1225 * registered any events.
1228 tracing_reset_current_online_cpus();
1229 up_write(&trace_event_mutex
);
1232 static int trace_module_notify(struct notifier_block
*self
,
1233 unsigned long val
, void *data
)
1235 struct module
*mod
= data
;
1237 mutex_lock(&event_mutex
);
1239 case MODULE_STATE_COMING
:
1240 trace_module_add_events(mod
);
1242 case MODULE_STATE_GOING
:
1243 trace_module_remove_events(mod
);
1246 mutex_unlock(&event_mutex
);
1251 static int trace_module_notify(struct notifier_block
*self
,
1252 unsigned long val
, void *data
)
1256 #endif /* CONFIG_MODULES */
1258 static struct notifier_block trace_module_nb
= {
1259 .notifier_call
= trace_module_notify
,
1263 extern struct ftrace_event_call __start_ftrace_events
[];
1264 extern struct ftrace_event_call __stop_ftrace_events
[];
1266 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1268 static __init
int setup_trace_event(char *str
)
1270 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1271 ring_buffer_expanded
= 1;
1272 tracing_selftest_disabled
= 1;
1276 __setup("trace_event=", setup_trace_event
);
1278 static __init
int event_trace_init(void)
1280 struct ftrace_event_call
*call
;
1281 struct dentry
*d_tracer
;
1282 struct dentry
*entry
;
1283 struct dentry
*d_events
;
1285 char *buf
= bootup_event_buf
;
1288 d_tracer
= tracing_init_dentry();
1292 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1293 (void *)&show_event_seq_ops
,
1294 &ftrace_avail_fops
);
1296 pr_warning("Could not create debugfs "
1297 "'available_events' entry\n");
1299 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1300 (void *)&show_set_event_seq_ops
,
1301 &ftrace_set_event_fops
);
1303 pr_warning("Could not create debugfs "
1304 "'set_event' entry\n");
1306 d_events
= event_trace_events_dir();
1310 /* ring buffer internal formats */
1311 trace_create_file("header_page", 0444, d_events
,
1312 ring_buffer_print_page_header
,
1313 &ftrace_show_header_fops
);
1315 trace_create_file("header_event", 0444, d_events
,
1316 ring_buffer_print_entry_header
,
1317 &ftrace_show_header_fops
);
1319 trace_create_file("enable", 0644, d_events
,
1320 NULL
, &ftrace_system_enable_fops
);
1322 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1323 /* The linker may leave blanks */
1326 if (call
->class->raw_init
) {
1327 ret
= call
->class->raw_init(call
);
1330 pr_warning("Could not initialize trace "
1331 "point events/%s\n", call
->name
);
1335 ret
= event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
1336 &ftrace_enable_fops
,
1337 &ftrace_event_filter_fops
,
1338 &ftrace_event_format_fops
);
1340 list_add(&call
->list
, &ftrace_events
);
1344 token
= strsep(&buf
, ",");
1351 ret
= ftrace_set_clr_event(token
, 1);
1353 pr_warning("Failed to enable trace event: %s\n", token
);
1356 ret
= register_module_notifier(&trace_module_nb
);
1358 pr_warning("Failed to register trace events module notifier\n");
1362 fs_initcall(event_trace_init
);
1364 #ifdef CONFIG_FTRACE_STARTUP_TEST
1366 static DEFINE_SPINLOCK(test_spinlock
);
1367 static DEFINE_SPINLOCK(test_spinlock_irq
);
1368 static DEFINE_MUTEX(test_mutex
);
1370 static __init
void test_work(struct work_struct
*dummy
)
1372 spin_lock(&test_spinlock
);
1373 spin_lock_irq(&test_spinlock_irq
);
1375 spin_unlock_irq(&test_spinlock_irq
);
1376 spin_unlock(&test_spinlock
);
1378 mutex_lock(&test_mutex
);
1380 mutex_unlock(&test_mutex
);
1383 static __init
int event_test_thread(void *unused
)
1387 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1389 pr_info("failed to kmalloc\n");
1391 schedule_on_each_cpu(test_work
);
1395 set_current_state(TASK_INTERRUPTIBLE
);
1396 while (!kthread_should_stop())
1403 * Do various things that may trigger events.
1405 static __init
void event_test_stuff(void)
1407 struct task_struct
*test_thread
;
1409 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1411 kthread_stop(test_thread
);
1415 * For every trace event defined, we will test each trace point separately,
1416 * and then by groups, and finally all trace points.
1418 static __init
void event_trace_self_tests(void)
1420 struct ftrace_event_call
*call
;
1421 struct event_subsystem
*system
;
1424 pr_info("Running tests on trace events:\n");
1426 list_for_each_entry(call
, &ftrace_events
, list
) {
1428 /* Only test those that have a probe */
1429 if (!call
->class || !call
->class->probe
)
1433 * Testing syscall events here is pretty useless, but
1434 * we still do it if configured. But this is time consuming.
1435 * What we really need is a user thread to perform the
1436 * syscalls as we test.
1438 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1439 if (call
->class->system
&&
1440 strcmp(call
->class->system
, "syscalls") == 0)
1444 pr_info("Testing event %s: ", call
->name
);
1447 * If an event is already enabled, someone is using
1448 * it and the self test should not be on.
1450 if (call
->flags
& TRACE_EVENT_FL_ENABLED
) {
1451 pr_warning("Enabled event during self test!\n");
1456 ftrace_event_enable_disable(call
, 1);
1458 ftrace_event_enable_disable(call
, 0);
1463 /* Now test at the sub system level */
1465 pr_info("Running tests on trace event systems:\n");
1467 list_for_each_entry(system
, &event_subsystems
, list
) {
1469 /* the ftrace system is special, skip it */
1470 if (strcmp(system
->name
, "ftrace") == 0)
1473 pr_info("Testing event system %s: ", system
->name
);
1475 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1476 if (WARN_ON_ONCE(ret
)) {
1477 pr_warning("error enabling system %s\n",
1484 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1485 if (WARN_ON_ONCE(ret
))
1486 pr_warning("error disabling system %s\n",
1492 /* Test with all events enabled */
1494 pr_info("Running tests on all trace events:\n");
1495 pr_info("Testing all events: ");
1497 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1498 if (WARN_ON_ONCE(ret
)) {
1499 pr_warning("error enabling all events\n");
1506 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1507 if (WARN_ON_ONCE(ret
)) {
1508 pr_warning("error disabling all events\n");
1515 #ifdef CONFIG_FUNCTION_TRACER
1517 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1520 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1522 struct ring_buffer_event
*event
;
1523 struct ring_buffer
*buffer
;
1524 struct ftrace_entry
*entry
;
1525 unsigned long flags
;
1531 pc
= preempt_count();
1532 resched
= ftrace_preempt_disable();
1533 cpu
= raw_smp_processor_id();
1534 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1539 local_save_flags(flags
);
1541 event
= trace_current_buffer_lock_reserve(&buffer
,
1542 TRACE_FN
, sizeof(*entry
),
1546 entry
= ring_buffer_event_data(event
);
1548 entry
->parent_ip
= parent_ip
;
1550 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1553 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1554 ftrace_preempt_enable(resched
);
1557 static struct ftrace_ops trace_ops __initdata
=
1559 .func
= function_test_events_call
,
1562 static __init
void event_trace_self_test_with_function(void)
1564 register_ftrace_function(&trace_ops
);
1565 pr_info("Running tests again, along with the function tracer\n");
1566 event_trace_self_tests();
1567 unregister_ftrace_function(&trace_ops
);
1570 static __init
void event_trace_self_test_with_function(void)
1575 static __init
int event_trace_self_tests_init(void)
1577 if (!tracing_selftest_disabled
) {
1578 event_trace_self_tests();
1579 event_trace_self_test_with_function();
1585 late_initcall(event_trace_self_tests_init
);