4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include <asm/setup.h>
22 #include "trace_output.h"
24 #define TRACE_SYSTEM "TRACE_SYSTEM"
26 DEFINE_MUTEX(event_mutex
);
28 LIST_HEAD(ftrace_events
);
30 int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
31 const char *name
, int offset
, int size
, int is_signed
,
34 struct ftrace_event_field
*field
;
36 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
40 field
->name
= kstrdup(name
, GFP_KERNEL
);
44 field
->type
= kstrdup(type
, GFP_KERNEL
);
48 if (filter_type
== FILTER_OTHER
)
49 field
->filter_type
= filter_assign_type(type
);
51 field
->filter_type
= filter_type
;
53 field
->offset
= offset
;
55 field
->is_signed
= is_signed
;
57 list_add(&field
->link
, &call
->fields
);
70 EXPORT_SYMBOL_GPL(trace_define_field
);
72 #define __common_field(type, item) \
73 ret = trace_define_field(call, #type, "common_" #item, \
74 offsetof(typeof(ent), item), \
76 is_signed_type(type), FILTER_OTHER); \
80 int trace_define_common_fields(struct ftrace_event_call
*call
)
83 struct trace_entry ent
;
85 __common_field(unsigned short, type
);
86 __common_field(unsigned char, flags
);
87 __common_field(unsigned char, preempt_count
);
88 __common_field(int, pid
);
89 __common_field(int, tgid
);
93 EXPORT_SYMBOL_GPL(trace_define_common_fields
);
97 static void trace_destroy_fields(struct ftrace_event_call
*call
)
99 struct ftrace_event_field
*field
, *next
;
101 list_for_each_entry_safe(field
, next
, &call
->fields
, link
) {
102 list_del(&field
->link
);
109 #endif /* CONFIG_MODULES */
111 static void ftrace_event_enable_disable(struct ftrace_event_call
*call
,
118 tracing_stop_cmdline_record();
119 call
->unregfunc(call
->data
);
123 if (!call
->enabled
) {
125 tracing_start_cmdline_record();
126 call
->regfunc(call
->data
);
132 static void ftrace_clear_events(void)
134 struct ftrace_event_call
*call
;
136 mutex_lock(&event_mutex
);
137 list_for_each_entry(call
, &ftrace_events
, list
) {
138 ftrace_event_enable_disable(call
, 0);
140 mutex_unlock(&event_mutex
);
144 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
146 static int __ftrace_set_clr_event(const char *match
, const char *sub
,
147 const char *event
, int set
)
149 struct ftrace_event_call
*call
;
152 mutex_lock(&event_mutex
);
153 list_for_each_entry(call
, &ftrace_events
, list
) {
155 if (!call
->name
|| !call
->regfunc
)
159 strcmp(match
, call
->name
) != 0 &&
160 strcmp(match
, call
->system
) != 0)
163 if (sub
&& strcmp(sub
, call
->system
) != 0)
166 if (event
&& strcmp(event
, call
->name
) != 0)
169 ftrace_event_enable_disable(call
, set
);
173 mutex_unlock(&event_mutex
);
178 static int ftrace_set_clr_event(char *buf
, int set
)
180 char *event
= NULL
, *sub
= NULL
, *match
;
183 * The buf format can be <subsystem>:<event-name>
184 * *:<event-name> means any event by that name.
185 * :<event-name> is the same.
187 * <subsystem>:* means all events in that subsystem
188 * <subsystem>: means the same.
190 * <name> (no ':') means all events in a subsystem with
191 * the name <name> or any event that matches <name>
194 match
= strsep(&buf
, ":");
200 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
202 if (!strlen(event
) || strcmp(event
, "*") == 0)
206 return __ftrace_set_clr_event(match
, sub
, event
, set
);
210 * trace_set_clr_event - enable or disable an event
211 * @system: system name to match (NULL for any system)
212 * @event: event name to match (NULL for all events, within system)
213 * @set: 1 to enable, 0 to disable
215 * This is a way for other parts of the kernel to enable or disable
218 * Returns 0 on success, -EINVAL if the parameters do not match any
221 int trace_set_clr_event(const char *system
, const char *event
, int set
)
223 return __ftrace_set_clr_event(NULL
, system
, event
, set
);
226 /* 128 should be much more than enough */
227 #define EVENT_BUF_SIZE 127
230 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
231 size_t cnt
, loff_t
*ppos
)
242 ret
= tracing_update_buffers();
246 ret
= get_user(ch
, ubuf
++);
252 /* skip white space */
253 while (cnt
&& isspace(ch
)) {
254 ret
= get_user(ch
, ubuf
++);
261 /* Only white space found? */
268 buf
= kmalloc(EVENT_BUF_SIZE
+1, GFP_KERNEL
);
272 if (cnt
> EVENT_BUF_SIZE
)
273 cnt
= EVENT_BUF_SIZE
;
276 while (cnt
&& !isspace(ch
)) {
282 ret
= get_user(ch
, ubuf
++);
292 ret
= ftrace_set_clr_event(buf
, set
);
305 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
307 struct list_head
*list
= m
->private;
308 struct ftrace_event_call
*call
;
313 if (list
== &ftrace_events
)
316 call
= list_entry(list
, struct ftrace_event_call
, list
);
319 * The ftrace subsystem is for showing formats only.
320 * They can not be enabled or disabled via the event files.
328 m
->private = list
->next
;
333 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
335 struct ftrace_event_call
*call
= NULL
;
338 mutex_lock(&event_mutex
);
340 m
->private = ftrace_events
.next
;
341 for (l
= 0; l
<= *pos
; ) {
342 call
= t_next(m
, NULL
, &l
);
350 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
352 struct list_head
*list
= m
->private;
353 struct ftrace_event_call
*call
;
358 if (list
== &ftrace_events
)
361 call
= list_entry(list
, struct ftrace_event_call
, list
);
363 if (!call
->enabled
) {
368 m
->private = list
->next
;
373 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
375 struct ftrace_event_call
*call
= NULL
;
378 mutex_lock(&event_mutex
);
380 m
->private = ftrace_events
.next
;
381 for (l
= 0; l
<= *pos
; ) {
382 call
= s_next(m
, NULL
, &l
);
389 static int t_show(struct seq_file
*m
, void *v
)
391 struct ftrace_event_call
*call
= v
;
393 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
394 seq_printf(m
, "%s:", call
->system
);
395 seq_printf(m
, "%s\n", call
->name
);
400 static void t_stop(struct seq_file
*m
, void *p
)
402 mutex_unlock(&event_mutex
);
406 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
408 const struct seq_operations
*seq_ops
;
410 if ((file
->f_mode
& FMODE_WRITE
) &&
411 (file
->f_flags
& O_TRUNC
))
412 ftrace_clear_events();
414 seq_ops
= inode
->i_private
;
415 return seq_open(file
, seq_ops
);
419 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
422 struct ftrace_event_call
*call
= filp
->private_data
;
430 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
434 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
437 struct ftrace_event_call
*call
= filp
->private_data
;
442 if (cnt
>= sizeof(buf
))
445 if (copy_from_user(&buf
, ubuf
, cnt
))
450 ret
= strict_strtoul(buf
, 10, &val
);
454 ret
= tracing_update_buffers();
461 mutex_lock(&event_mutex
);
462 ftrace_event_enable_disable(call
, val
);
463 mutex_unlock(&event_mutex
);
476 system_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
479 const char set_to_char
[4] = { '?', '0', '1', 'X' };
480 const char *system
= filp
->private_data
;
481 struct ftrace_event_call
*call
;
486 mutex_lock(&event_mutex
);
487 list_for_each_entry(call
, &ftrace_events
, list
) {
488 if (!call
->name
|| !call
->regfunc
)
491 if (system
&& strcmp(call
->system
, system
) != 0)
495 * We need to find out if all the events are set
496 * or if all events or cleared, or if we have
499 set
|= (1 << !!call
->enabled
);
502 * If we have a mixture, no need to look further.
507 mutex_unlock(&event_mutex
);
509 buf
[0] = set_to_char
[set
];
512 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
518 system_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
521 const char *system
= filp
->private_data
;
526 if (cnt
>= sizeof(buf
))
529 if (copy_from_user(&buf
, ubuf
, cnt
))
534 ret
= strict_strtoul(buf
, 10, &val
);
538 ret
= tracing_update_buffers();
542 if (val
!= 0 && val
!= 1)
545 ret
= __ftrace_set_clr_event(NULL
, system
, NULL
, val
);
557 extern char *__bad_type_size(void);
560 #define FIELD(type, name) \
561 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
562 #type, "common_" #name, offsetof(typeof(field), name), \
565 static int trace_write_header(struct trace_seq
*s
)
567 struct trace_entry field
;
569 /* struct trace_entry */
570 return trace_seq_printf(s
,
571 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
572 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
573 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
574 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
575 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
577 FIELD(unsigned short, type
),
578 FIELD(unsigned char, flags
),
579 FIELD(unsigned char, preempt_count
),
585 event_format_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
588 struct ftrace_event_call
*call
= filp
->private_data
;
596 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
602 /* If any of the first writes fail, so will the show_format. */
604 trace_seq_printf(s
, "name: %s\n", call
->name
);
605 trace_seq_printf(s
, "ID: %d\n", call
->id
);
606 trace_seq_printf(s
, "format:\n");
607 trace_write_header(s
);
609 r
= call
->show_format(call
, s
);
612 * ug! The format output is bigger than a PAGE!!
614 buf
= "FORMAT TOO BIG\n";
615 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
620 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
628 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
630 struct ftrace_event_call
*call
= filp
->private_data
;
637 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
642 trace_seq_printf(s
, "%d\n", call
->id
);
644 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
651 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
654 struct ftrace_event_call
*call
= filp
->private_data
;
661 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
667 print_event_filter(call
, s
);
668 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
676 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
679 struct ftrace_event_call
*call
= filp
->private_data
;
683 if (cnt
>= PAGE_SIZE
)
686 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
690 if (copy_from_user(buf
, ubuf
, cnt
)) {
691 free_page((unsigned long) buf
);
696 err
= apply_event_filter(call
, buf
);
697 free_page((unsigned long) buf
);
707 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
710 struct event_subsystem
*system
= filp
->private_data
;
717 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
723 print_subsystem_event_filter(system
, s
);
724 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
732 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
735 struct event_subsystem
*system
= filp
->private_data
;
739 if (cnt
>= PAGE_SIZE
)
742 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
746 if (copy_from_user(buf
, ubuf
, cnt
)) {
747 free_page((unsigned long) buf
);
752 err
= apply_subsystem_event_filter(system
, buf
);
753 free_page((unsigned long) buf
);
763 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
765 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
772 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
779 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
786 static const struct seq_operations show_event_seq_ops
= {
793 static const struct seq_operations show_set_event_seq_ops
= {
800 static const struct file_operations ftrace_avail_fops
= {
801 .open
= ftrace_event_seq_open
,
804 .release
= seq_release
,
807 static const struct file_operations ftrace_set_event_fops
= {
808 .open
= ftrace_event_seq_open
,
810 .write
= ftrace_event_write
,
812 .release
= seq_release
,
815 static const struct file_operations ftrace_enable_fops
= {
816 .open
= tracing_open_generic
,
817 .read
= event_enable_read
,
818 .write
= event_enable_write
,
821 static const struct file_operations ftrace_event_format_fops
= {
822 .open
= tracing_open_generic
,
823 .read
= event_format_read
,
826 static const struct file_operations ftrace_event_id_fops
= {
827 .open
= tracing_open_generic
,
828 .read
= event_id_read
,
831 static const struct file_operations ftrace_event_filter_fops
= {
832 .open
= tracing_open_generic
,
833 .read
= event_filter_read
,
834 .write
= event_filter_write
,
837 static const struct file_operations ftrace_subsystem_filter_fops
= {
838 .open
= tracing_open_generic
,
839 .read
= subsystem_filter_read
,
840 .write
= subsystem_filter_write
,
843 static const struct file_operations ftrace_system_enable_fops
= {
844 .open
= tracing_open_generic
,
845 .read
= system_enable_read
,
846 .write
= system_enable_write
,
849 static const struct file_operations ftrace_show_header_fops
= {
850 .open
= tracing_open_generic
,
854 static struct dentry
*event_trace_events_dir(void)
856 static struct dentry
*d_tracer
;
857 static struct dentry
*d_events
;
862 d_tracer
= tracing_init_dentry();
866 d_events
= debugfs_create_dir("events", d_tracer
);
868 pr_warning("Could not create debugfs "
869 "'events' directory\n");
874 static LIST_HEAD(event_subsystems
);
876 static struct dentry
*
877 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
879 struct event_subsystem
*system
;
880 struct dentry
*entry
;
882 /* First see if we did not already create this dir */
883 list_for_each_entry(system
, &event_subsystems
, list
) {
884 if (strcmp(system
->name
, name
) == 0) {
886 return system
->entry
;
890 /* need to create new entry */
891 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
893 pr_warning("No memory to create event subsystem %s\n",
898 system
->entry
= debugfs_create_dir(name
, d_events
);
899 if (!system
->entry
) {
900 pr_warning("Could not create event subsystem %s\n",
906 system
->nr_events
= 1;
907 system
->name
= kstrdup(name
, GFP_KERNEL
);
909 debugfs_remove(system
->entry
);
914 list_add(&system
->list
, &event_subsystems
);
916 system
->filter
= NULL
;
918 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
919 if (!system
->filter
) {
920 pr_warning("Could not allocate filter for subsystem "
922 return system
->entry
;
925 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
926 &ftrace_subsystem_filter_fops
);
928 kfree(system
->filter
);
929 system
->filter
= NULL
;
930 pr_warning("Could not create debugfs "
931 "'%s/filter' entry\n", name
);
934 entry
= trace_create_file("enable", 0644, system
->entry
,
935 (void *)system
->name
,
936 &ftrace_system_enable_fops
);
938 return system
->entry
;
942 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
943 const struct file_operations
*id
,
944 const struct file_operations
*enable
,
945 const struct file_operations
*filter
,
946 const struct file_operations
*format
)
948 struct dentry
*entry
;
952 * If the trace point header did not define TRACE_SYSTEM
953 * then the system would be called "TRACE_SYSTEM".
955 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
956 d_events
= event_subsystem_dir(call
->system
, d_events
);
958 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
960 pr_warning("Could not create debugfs "
961 "'%s' directory\n", call
->name
);
966 entry
= trace_create_file("enable", 0644, call
->dir
, call
,
969 if (call
->id
&& call
->profile_enable
)
970 entry
= trace_create_file("id", 0444, call
->dir
, call
,
973 if (call
->define_fields
) {
974 ret
= call
->define_fields(call
);
976 pr_warning("Could not initialize trace point"
977 " events/%s\n", call
->name
);
980 entry
= trace_create_file("filter", 0644, call
->dir
, call
,
984 /* A trace may not want to export its format */
985 if (!call
->show_format
)
988 entry
= trace_create_file("format", 0444, call
->dir
, call
,
994 #define for_each_event(event, start, end) \
995 for (event = start; \
996 (unsigned long)event < (unsigned long)end; \
999 #ifdef CONFIG_MODULES
1001 static LIST_HEAD(ftrace_module_file_list
);
1004 * Modules must own their file_operations to keep up with
1005 * reference counting.
1007 struct ftrace_module_file_ops
{
1008 struct list_head list
;
1010 struct file_operations id
;
1011 struct file_operations enable
;
1012 struct file_operations format
;
1013 struct file_operations filter
;
1016 static void remove_subsystem_dir(const char *name
)
1018 struct event_subsystem
*system
;
1020 if (strcmp(name
, TRACE_SYSTEM
) == 0)
1023 list_for_each_entry(system
, &event_subsystems
, list
) {
1024 if (strcmp(system
->name
, name
) == 0) {
1025 if (!--system
->nr_events
) {
1026 struct event_filter
*filter
= system
->filter
;
1028 debugfs_remove_recursive(system
->entry
);
1029 list_del(&system
->list
);
1031 kfree(filter
->filter_string
);
1034 kfree(system
->name
);
1042 static struct ftrace_module_file_ops
*
1043 trace_create_file_ops(struct module
*mod
)
1045 struct ftrace_module_file_ops
*file_ops
;
1048 * This is a bit of a PITA. To allow for correct reference
1049 * counting, modules must "own" their file_operations.
1050 * To do this, we allocate the file operations that will be
1051 * used in the event directory.
1054 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
1058 file_ops
->mod
= mod
;
1060 file_ops
->id
= ftrace_event_id_fops
;
1061 file_ops
->id
.owner
= mod
;
1063 file_ops
->enable
= ftrace_enable_fops
;
1064 file_ops
->enable
.owner
= mod
;
1066 file_ops
->filter
= ftrace_event_filter_fops
;
1067 file_ops
->filter
.owner
= mod
;
1069 file_ops
->format
= ftrace_event_format_fops
;
1070 file_ops
->format
.owner
= mod
;
1072 list_add(&file_ops
->list
, &ftrace_module_file_list
);
1077 static void trace_module_add_events(struct module
*mod
)
1079 struct ftrace_module_file_ops
*file_ops
= NULL
;
1080 struct ftrace_event_call
*call
, *start
, *end
;
1081 struct dentry
*d_events
;
1084 start
= mod
->trace_events
;
1085 end
= mod
->trace_events
+ mod
->num_trace_events
;
1090 d_events
= event_trace_events_dir();
1094 for_each_event(call
, start
, end
) {
1095 /* The linker may leave blanks */
1098 if (call
->raw_init
) {
1099 ret
= call
->raw_init();
1102 pr_warning("Could not initialize trace "
1103 "point events/%s\n", call
->name
);
1108 * This module has events, create file ops for this module
1109 * if not already done.
1112 file_ops
= trace_create_file_ops(mod
);
1117 list_add(&call
->list
, &ftrace_events
);
1118 event_create_dir(call
, d_events
,
1119 &file_ops
->id
, &file_ops
->enable
,
1120 &file_ops
->filter
, &file_ops
->format
);
1124 static void trace_module_remove_events(struct module
*mod
)
1126 struct ftrace_module_file_ops
*file_ops
;
1127 struct ftrace_event_call
*call
, *p
;
1130 down_write(&trace_event_mutex
);
1131 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
1132 if (call
->mod
== mod
) {
1134 ftrace_event_enable_disable(call
, 0);
1136 __unregister_ftrace_event(call
->event
);
1137 debugfs_remove_recursive(call
->dir
);
1138 list_del(&call
->list
);
1139 trace_destroy_fields(call
);
1140 destroy_preds(call
);
1141 remove_subsystem_dir(call
->system
);
1145 /* Now free the file_operations */
1146 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
1147 if (file_ops
->mod
== mod
)
1150 if (&file_ops
->list
!= &ftrace_module_file_list
) {
1151 list_del(&file_ops
->list
);
1156 * It is safest to reset the ring buffer if the module being unloaded
1157 * registered any events.
1160 tracing_reset_current_online_cpus();
1161 up_write(&trace_event_mutex
);
1164 static int trace_module_notify(struct notifier_block
*self
,
1165 unsigned long val
, void *data
)
1167 struct module
*mod
= data
;
1169 mutex_lock(&event_mutex
);
1171 case MODULE_STATE_COMING
:
1172 trace_module_add_events(mod
);
1174 case MODULE_STATE_GOING
:
1175 trace_module_remove_events(mod
);
1178 mutex_unlock(&event_mutex
);
1183 static int trace_module_notify(struct notifier_block
*self
,
1184 unsigned long val
, void *data
)
1188 #endif /* CONFIG_MODULES */
1190 struct notifier_block trace_module_nb
= {
1191 .notifier_call
= trace_module_notify
,
1195 extern struct ftrace_event_call __start_ftrace_events
[];
1196 extern struct ftrace_event_call __stop_ftrace_events
[];
1198 static char bootup_event_buf
[COMMAND_LINE_SIZE
] __initdata
;
1200 static __init
int setup_trace_event(char *str
)
1202 strlcpy(bootup_event_buf
, str
, COMMAND_LINE_SIZE
);
1203 ring_buffer_expanded
= 1;
1204 tracing_selftest_disabled
= 1;
1208 __setup("trace_event=", setup_trace_event
);
1210 static __init
int event_trace_init(void)
1212 struct ftrace_event_call
*call
;
1213 struct dentry
*d_tracer
;
1214 struct dentry
*entry
;
1215 struct dentry
*d_events
;
1217 char *buf
= bootup_event_buf
;
1220 d_tracer
= tracing_init_dentry();
1224 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1225 (void *)&show_event_seq_ops
,
1226 &ftrace_avail_fops
);
1228 pr_warning("Could not create debugfs "
1229 "'available_events' entry\n");
1231 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1232 (void *)&show_set_event_seq_ops
,
1233 &ftrace_set_event_fops
);
1235 pr_warning("Could not create debugfs "
1236 "'set_event' entry\n");
1238 d_events
= event_trace_events_dir();
1242 /* ring buffer internal formats */
1243 trace_create_file("header_page", 0444, d_events
,
1244 ring_buffer_print_page_header
,
1245 &ftrace_show_header_fops
);
1247 trace_create_file("header_event", 0444, d_events
,
1248 ring_buffer_print_entry_header
,
1249 &ftrace_show_header_fops
);
1251 trace_create_file("enable", 0644, d_events
,
1252 NULL
, &ftrace_system_enable_fops
);
1254 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1255 /* The linker may leave blanks */
1258 if (call
->raw_init
) {
1259 ret
= call
->raw_init();
1262 pr_warning("Could not initialize trace "
1263 "point events/%s\n", call
->name
);
1267 list_add(&call
->list
, &ftrace_events
);
1268 event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
1269 &ftrace_enable_fops
, &ftrace_event_filter_fops
,
1270 &ftrace_event_format_fops
);
1274 token
= strsep(&buf
, ",");
1281 ret
= ftrace_set_clr_event(token
, 1);
1283 pr_warning("Failed to enable trace event: %s\n", token
);
1286 ret
= register_module_notifier(&trace_module_nb
);
1288 pr_warning("Failed to register trace events module notifier\n");
1292 fs_initcall(event_trace_init
);
1294 #ifdef CONFIG_FTRACE_STARTUP_TEST
1296 static DEFINE_SPINLOCK(test_spinlock
);
1297 static DEFINE_SPINLOCK(test_spinlock_irq
);
1298 static DEFINE_MUTEX(test_mutex
);
1300 static __init
void test_work(struct work_struct
*dummy
)
1302 spin_lock(&test_spinlock
);
1303 spin_lock_irq(&test_spinlock_irq
);
1305 spin_unlock_irq(&test_spinlock_irq
);
1306 spin_unlock(&test_spinlock
);
1308 mutex_lock(&test_mutex
);
1310 mutex_unlock(&test_mutex
);
1313 static __init
int event_test_thread(void *unused
)
1317 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1319 pr_info("failed to kmalloc\n");
1321 schedule_on_each_cpu(test_work
);
1325 set_current_state(TASK_INTERRUPTIBLE
);
1326 while (!kthread_should_stop())
1333 * Do various things that may trigger events.
1335 static __init
void event_test_stuff(void)
1337 struct task_struct
*test_thread
;
1339 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1341 kthread_stop(test_thread
);
1345 * For every trace event defined, we will test each trace point separately,
1346 * and then by groups, and finally all trace points.
1348 static __init
void event_trace_self_tests(void)
1350 struct ftrace_event_call
*call
;
1351 struct event_subsystem
*system
;
1354 pr_info("Running tests on trace events:\n");
1356 list_for_each_entry(call
, &ftrace_events
, list
) {
1358 /* Only test those that have a regfunc */
1362 pr_info("Testing event %s: ", call
->name
);
1365 * If an event is already enabled, someone is using
1366 * it and the self test should not be on.
1368 if (call
->enabled
) {
1369 pr_warning("Enabled event during self test!\n");
1374 ftrace_event_enable_disable(call
, 1);
1376 ftrace_event_enable_disable(call
, 0);
1381 /* Now test at the sub system level */
1383 pr_info("Running tests on trace event systems:\n");
1385 list_for_each_entry(system
, &event_subsystems
, list
) {
1387 /* the ftrace system is special, skip it */
1388 if (strcmp(system
->name
, "ftrace") == 0)
1391 pr_info("Testing event system %s: ", system
->name
);
1393 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 1);
1394 if (WARN_ON_ONCE(ret
)) {
1395 pr_warning("error enabling system %s\n",
1402 ret
= __ftrace_set_clr_event(NULL
, system
->name
, NULL
, 0);
1403 if (WARN_ON_ONCE(ret
))
1404 pr_warning("error disabling system %s\n",
1410 /* Test with all events enabled */
1412 pr_info("Running tests on all trace events:\n");
1413 pr_info("Testing all events: ");
1415 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 1);
1416 if (WARN_ON_ONCE(ret
)) {
1417 pr_warning("error enabling all events\n");
1424 ret
= __ftrace_set_clr_event(NULL
, NULL
, NULL
, 0);
1425 if (WARN_ON_ONCE(ret
)) {
1426 pr_warning("error disabling all events\n");
1433 #ifdef CONFIG_FUNCTION_TRACER
1435 static DEFINE_PER_CPU(atomic_t
, ftrace_test_event_disable
);
1438 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1440 struct ring_buffer_event
*event
;
1441 struct ring_buffer
*buffer
;
1442 struct ftrace_entry
*entry
;
1443 unsigned long flags
;
1449 pc
= preempt_count();
1450 resched
= ftrace_preempt_disable();
1451 cpu
= raw_smp_processor_id();
1452 disabled
= atomic_inc_return(&per_cpu(ftrace_test_event_disable
, cpu
));
1457 local_save_flags(flags
);
1459 event
= trace_current_buffer_lock_reserve(&buffer
,
1460 TRACE_FN
, sizeof(*entry
),
1464 entry
= ring_buffer_event_data(event
);
1466 entry
->parent_ip
= parent_ip
;
1468 trace_nowake_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1471 atomic_dec(&per_cpu(ftrace_test_event_disable
, cpu
));
1472 ftrace_preempt_enable(resched
);
1475 static struct ftrace_ops trace_ops __initdata
=
1477 .func
= function_test_events_call
,
1480 static __init
void event_trace_self_test_with_function(void)
1482 register_ftrace_function(&trace_ops
);
1483 pr_info("Running tests again, along with the function tracer\n");
1484 event_trace_self_tests();
1485 unregister_ftrace_function(&trace_ops
);
1488 static __init
void event_trace_self_test_with_function(void)
1493 static __init
int event_trace_self_tests_init(void)
1495 if (!tracing_selftest_disabled
) {
1496 event_trace_self_tests();
1497 event_trace_self_test_with_function();
1503 late_initcall(event_trace_self_tests_init
);