Linux 4.9.243
[linux/fpc-iii.git] / kernel / trace / trace_events.c
blob5bf072e437c413536e0314ce1ac718c113e69eda
1 /*
2 * event tracer
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 */
11 #define pr_fmt(fmt) fmt
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
19 #include <linux/ctype.h>
20 #include <linux/sort.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
24 #include <trace/events/sched.h>
26 #include <asm/setup.h>
28 #include "trace_output.h"
30 #undef TRACE_SYSTEM
31 #define TRACE_SYSTEM "TRACE_SYSTEM"
33 DEFINE_MUTEX(event_mutex);
35 LIST_HEAD(ftrace_events);
36 static LIST_HEAD(ftrace_generic_fields);
37 static LIST_HEAD(ftrace_common_fields);
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
44 static inline int system_refcount(struct event_subsystem *system)
46 return system->ref_count;
49 static int system_refcount_inc(struct event_subsystem *system)
51 return system->ref_count++;
54 static int system_refcount_dec(struct event_subsystem *system)
56 return --system->ref_count;
59 /* Double loops, do not use break, only goto's work */
60 #define do_for_each_event_file(tr, file) \
61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
62 list_for_each_entry(file, &tr->events, list)
64 #define do_for_each_event_file_safe(tr, file) \
65 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
66 struct trace_event_file *___n; \
67 list_for_each_entry_safe(file, ___n, &tr->events, list)
69 #define while_for_each_event_file() \
72 static struct list_head *
73 trace_get_fields(struct trace_event_call *event_call)
75 if (!event_call->class->get_fields)
76 return &event_call->class->fields;
77 return event_call->class->get_fields(event_call);
80 static struct ftrace_event_field *
81 __find_event_field(struct list_head *head, char *name)
83 struct ftrace_event_field *field;
85 list_for_each_entry(field, head, link) {
86 if (!strcmp(field->name, name))
87 return field;
90 return NULL;
93 struct ftrace_event_field *
94 trace_find_event_field(struct trace_event_call *call, char *name)
96 struct ftrace_event_field *field;
97 struct list_head *head;
99 head = trace_get_fields(call);
100 field = __find_event_field(head, name);
101 if (field)
102 return field;
104 field = __find_event_field(&ftrace_generic_fields, name);
105 if (field)
106 return field;
108 return __find_event_field(&ftrace_common_fields, name);
111 static int __trace_define_field(struct list_head *head, const char *type,
112 const char *name, int offset, int size,
113 int is_signed, int filter_type)
115 struct ftrace_event_field *field;
117 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
118 if (!field)
119 return -ENOMEM;
121 field->name = name;
122 field->type = type;
124 if (filter_type == FILTER_OTHER)
125 field->filter_type = filter_assign_type(type);
126 else
127 field->filter_type = filter_type;
129 field->offset = offset;
130 field->size = size;
131 field->is_signed = is_signed;
133 list_add(&field->link, head);
135 return 0;
138 int trace_define_field(struct trace_event_call *call, const char *type,
139 const char *name, int offset, int size, int is_signed,
140 int filter_type)
142 struct list_head *head;
144 if (WARN_ON(!call->class))
145 return 0;
147 head = trace_get_fields(call);
148 return __trace_define_field(head, type, name, offset, size,
149 is_signed, filter_type);
151 EXPORT_SYMBOL_GPL(trace_define_field);
153 #define __generic_field(type, item, filter_type) \
154 ret = __trace_define_field(&ftrace_generic_fields, #type, \
155 #item, 0, 0, is_signed_type(type), \
156 filter_type); \
157 if (ret) \
158 return ret;
160 #define __common_field(type, item) \
161 ret = __trace_define_field(&ftrace_common_fields, #type, \
162 "common_" #item, \
163 offsetof(typeof(ent), item), \
164 sizeof(ent.item), \
165 is_signed_type(type), FILTER_OTHER); \
166 if (ret) \
167 return ret;
169 static int trace_define_generic_fields(void)
171 int ret;
173 __generic_field(int, CPU, FILTER_CPU);
174 __generic_field(int, cpu, FILTER_CPU);
175 __generic_field(char *, COMM, FILTER_COMM);
176 __generic_field(char *, comm, FILTER_COMM);
178 return ret;
181 static int trace_define_common_fields(void)
183 int ret;
184 struct trace_entry ent;
186 __common_field(unsigned short, type);
187 __common_field(unsigned char, flags);
188 __common_field(unsigned char, preempt_count);
189 __common_field(int, pid);
191 return ret;
194 static void trace_destroy_fields(struct trace_event_call *call)
196 struct ftrace_event_field *field, *next;
197 struct list_head *head;
199 head = trace_get_fields(call);
200 list_for_each_entry_safe(field, next, head, link) {
201 list_del(&field->link);
202 kmem_cache_free(field_cachep, field);
207 * run-time version of trace_event_get_offsets_<call>() that returns the last
208 * accessible offset of trace fields excluding __dynamic_array bytes
210 int trace_event_get_offsets(struct trace_event_call *call)
212 struct ftrace_event_field *tail;
213 struct list_head *head;
215 head = trace_get_fields(call);
217 * head->next points to the last field with the largest offset,
218 * since it was added last by trace_define_field()
220 tail = list_first_entry(head, struct ftrace_event_field, link);
221 return tail->offset + tail->size;
224 int trace_event_raw_init(struct trace_event_call *call)
226 int id;
228 id = register_trace_event(&call->event);
229 if (!id)
230 return -ENODEV;
232 return 0;
234 EXPORT_SYMBOL_GPL(trace_event_raw_init);
236 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
238 struct trace_array *tr = trace_file->tr;
239 struct trace_array_cpu *data;
240 struct trace_pid_list *pid_list;
242 pid_list = rcu_dereference_sched(tr->filtered_pids);
243 if (!pid_list)
244 return false;
246 data = this_cpu_ptr(tr->trace_buffer.data);
248 return data->ignore_pid;
250 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
252 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
253 struct trace_event_file *trace_file,
254 unsigned long len)
256 struct trace_event_call *event_call = trace_file->event_call;
258 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
259 trace_event_ignore_this_pid(trace_file))
260 return NULL;
262 local_save_flags(fbuffer->flags);
263 fbuffer->pc = preempt_count();
265 * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
266 * preemption (adding one to the preempt_count). Since we are
267 * interested in the preempt_count at the time the tracepoint was
268 * hit, we need to subtract one to offset the increment.
270 if (IS_ENABLED(CONFIG_PREEMPT))
271 fbuffer->pc--;
272 fbuffer->trace_file = trace_file;
274 fbuffer->event =
275 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
276 event_call->event.type, len,
277 fbuffer->flags, fbuffer->pc);
278 if (!fbuffer->event)
279 return NULL;
281 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
282 return fbuffer->entry;
284 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
286 static DEFINE_SPINLOCK(tracepoint_iter_lock);
288 static void output_printk(struct trace_event_buffer *fbuffer)
290 struct trace_event_call *event_call;
291 struct trace_event *event;
292 unsigned long flags;
293 struct trace_iterator *iter = tracepoint_print_iter;
295 if (!iter)
296 return;
298 event_call = fbuffer->trace_file->event_call;
299 if (!event_call || !event_call->event.funcs ||
300 !event_call->event.funcs->trace)
301 return;
303 event = &fbuffer->trace_file->event_call->event;
305 spin_lock_irqsave(&tracepoint_iter_lock, flags);
306 trace_seq_init(&iter->seq);
307 iter->ent = fbuffer->entry;
308 event_call->event.funcs->trace(iter, 0, event);
309 trace_seq_putc(&iter->seq, 0);
310 printk("%s", iter->seq.buffer);
312 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
315 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
317 if (tracepoint_printk)
318 output_printk(fbuffer);
320 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
321 fbuffer->event, fbuffer->entry,
322 fbuffer->flags, fbuffer->pc);
324 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
326 int trace_event_reg(struct trace_event_call *call,
327 enum trace_reg type, void *data)
329 struct trace_event_file *file = data;
331 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
332 switch (type) {
333 case TRACE_REG_REGISTER:
334 return tracepoint_probe_register(call->tp,
335 call->class->probe,
336 file);
337 case TRACE_REG_UNREGISTER:
338 tracepoint_probe_unregister(call->tp,
339 call->class->probe,
340 file);
341 return 0;
343 #ifdef CONFIG_PERF_EVENTS
344 case TRACE_REG_PERF_REGISTER:
345 return tracepoint_probe_register(call->tp,
346 call->class->perf_probe,
347 call);
348 case TRACE_REG_PERF_UNREGISTER:
349 tracepoint_probe_unregister(call->tp,
350 call->class->perf_probe,
351 call);
352 return 0;
353 case TRACE_REG_PERF_OPEN:
354 case TRACE_REG_PERF_CLOSE:
355 case TRACE_REG_PERF_ADD:
356 case TRACE_REG_PERF_DEL:
357 return 0;
358 #endif
360 return 0;
362 EXPORT_SYMBOL_GPL(trace_event_reg);
364 void trace_event_enable_cmd_record(bool enable)
366 struct trace_event_file *file;
367 struct trace_array *tr;
369 mutex_lock(&event_mutex);
370 do_for_each_event_file(tr, file) {
372 if (!(file->flags & EVENT_FILE_FL_ENABLED))
373 continue;
375 if (enable) {
376 tracing_start_cmdline_record();
377 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
378 } else {
379 tracing_stop_cmdline_record();
380 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
382 } while_for_each_event_file();
383 mutex_unlock(&event_mutex);
386 static int __ftrace_event_enable_disable(struct trace_event_file *file,
387 int enable, int soft_disable)
389 struct trace_event_call *call = file->event_call;
390 struct trace_array *tr = file->tr;
391 unsigned long file_flags = file->flags;
392 int ret = 0;
393 int disable;
395 switch (enable) {
396 case 0:
398 * When soft_disable is set and enable is cleared, the sm_ref
399 * reference counter is decremented. If it reaches 0, we want
400 * to clear the SOFT_DISABLED flag but leave the event in the
401 * state that it was. That is, if the event was enabled and
402 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
403 * is set we do not want the event to be enabled before we
404 * clear the bit.
406 * When soft_disable is not set but the SOFT_MODE flag is,
407 * we do nothing. Do not disable the tracepoint, otherwise
408 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
410 if (soft_disable) {
411 if (atomic_dec_return(&file->sm_ref) > 0)
412 break;
413 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
414 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
415 } else
416 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
418 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
419 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
420 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
421 tracing_stop_cmdline_record();
422 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
424 call->class->reg(call, TRACE_REG_UNREGISTER, file);
426 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
427 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
428 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
429 else
430 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
431 break;
432 case 1:
434 * When soft_disable is set and enable is set, we want to
435 * register the tracepoint for the event, but leave the event
436 * as is. That means, if the event was already enabled, we do
437 * nothing (but set SOFT_MODE). If the event is disabled, we
438 * set SOFT_DISABLED before enabling the event tracepoint, so
439 * it still seems to be disabled.
441 if (!soft_disable)
442 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
443 else {
444 if (atomic_inc_return(&file->sm_ref) > 1)
445 break;
446 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
449 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
451 /* Keep the event disabled, when going to SOFT_MODE. */
452 if (soft_disable)
453 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
455 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
456 tracing_start_cmdline_record();
457 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
459 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
460 if (ret) {
461 tracing_stop_cmdline_record();
462 pr_info("event trace: Could not enable event "
463 "%s\n", trace_event_name(call));
464 break;
466 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
468 /* WAS_ENABLED gets set but never cleared. */
469 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
471 break;
474 /* Enable or disable use of trace_buffered_event */
475 if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
476 (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
477 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
478 trace_buffered_event_enable();
479 else
480 trace_buffered_event_disable();
483 return ret;
486 int trace_event_enable_disable(struct trace_event_file *file,
487 int enable, int soft_disable)
489 return __ftrace_event_enable_disable(file, enable, soft_disable);
492 static int ftrace_event_enable_disable(struct trace_event_file *file,
493 int enable)
495 return __ftrace_event_enable_disable(file, enable, 0);
498 static void ftrace_clear_events(struct trace_array *tr)
500 struct trace_event_file *file;
502 mutex_lock(&event_mutex);
503 list_for_each_entry(file, &tr->events, list) {
504 ftrace_event_enable_disable(file, 0);
506 mutex_unlock(&event_mutex);
509 static void
510 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
512 struct trace_pid_list *pid_list;
513 struct trace_array *tr = data;
515 pid_list = rcu_dereference_sched(tr->filtered_pids);
516 trace_filter_add_remove_task(pid_list, NULL, task);
519 static void
520 event_filter_pid_sched_process_fork(void *data,
521 struct task_struct *self,
522 struct task_struct *task)
524 struct trace_pid_list *pid_list;
525 struct trace_array *tr = data;
527 pid_list = rcu_dereference_sched(tr->filtered_pids);
528 trace_filter_add_remove_task(pid_list, self, task);
531 void trace_event_follow_fork(struct trace_array *tr, bool enable)
533 if (enable) {
534 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
535 tr, INT_MIN);
536 register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
537 tr, INT_MAX);
538 } else {
539 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
540 tr);
541 unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
542 tr);
546 static void
547 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
548 struct task_struct *prev, struct task_struct *next)
550 struct trace_array *tr = data;
551 struct trace_pid_list *pid_list;
553 pid_list = rcu_dereference_sched(tr->filtered_pids);
555 this_cpu_write(tr->trace_buffer.data->ignore_pid,
556 trace_ignore_this_task(pid_list, prev) &&
557 trace_ignore_this_task(pid_list, next));
560 static void
561 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
562 struct task_struct *prev, struct task_struct *next)
564 struct trace_array *tr = data;
565 struct trace_pid_list *pid_list;
567 pid_list = rcu_dereference_sched(tr->filtered_pids);
569 this_cpu_write(tr->trace_buffer.data->ignore_pid,
570 trace_ignore_this_task(pid_list, next));
573 static void
574 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
576 struct trace_array *tr = data;
577 struct trace_pid_list *pid_list;
579 /* Nothing to do if we are already tracing */
580 if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
581 return;
583 pid_list = rcu_dereference_sched(tr->filtered_pids);
585 this_cpu_write(tr->trace_buffer.data->ignore_pid,
586 trace_ignore_this_task(pid_list, task));
589 static void
590 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
592 struct trace_array *tr = data;
593 struct trace_pid_list *pid_list;
595 /* Nothing to do if we are not tracing */
596 if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
597 return;
599 pid_list = rcu_dereference_sched(tr->filtered_pids);
601 /* Set tracing if current is enabled */
602 this_cpu_write(tr->trace_buffer.data->ignore_pid,
603 trace_ignore_this_task(pid_list, current));
606 static void __ftrace_clear_event_pids(struct trace_array *tr)
608 struct trace_pid_list *pid_list;
609 struct trace_event_file *file;
610 int cpu;
612 pid_list = rcu_dereference_protected(tr->filtered_pids,
613 lockdep_is_held(&event_mutex));
614 if (!pid_list)
615 return;
617 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
618 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
620 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
621 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
623 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
624 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
626 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
627 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
629 list_for_each_entry(file, &tr->events, list) {
630 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
633 for_each_possible_cpu(cpu)
634 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
636 rcu_assign_pointer(tr->filtered_pids, NULL);
638 /* Wait till all users are no longer using pid filtering */
639 synchronize_sched();
641 trace_free_pid_list(pid_list);
644 static void ftrace_clear_event_pids(struct trace_array *tr)
646 mutex_lock(&event_mutex);
647 __ftrace_clear_event_pids(tr);
648 mutex_unlock(&event_mutex);
651 static void __put_system(struct event_subsystem *system)
653 struct event_filter *filter = system->filter;
655 WARN_ON_ONCE(system_refcount(system) == 0);
656 if (system_refcount_dec(system))
657 return;
659 list_del(&system->list);
661 if (filter) {
662 kfree(filter->filter_string);
663 kfree(filter);
665 kfree_const(system->name);
666 kfree(system);
669 static void __get_system(struct event_subsystem *system)
671 WARN_ON_ONCE(system_refcount(system) == 0);
672 system_refcount_inc(system);
675 static void __get_system_dir(struct trace_subsystem_dir *dir)
677 WARN_ON_ONCE(dir->ref_count == 0);
678 dir->ref_count++;
679 __get_system(dir->subsystem);
682 static void __put_system_dir(struct trace_subsystem_dir *dir)
684 WARN_ON_ONCE(dir->ref_count == 0);
685 /* If the subsystem is about to be freed, the dir must be too */
686 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
688 __put_system(dir->subsystem);
689 if (!--dir->ref_count)
690 kfree(dir);
693 static void put_system(struct trace_subsystem_dir *dir)
695 mutex_lock(&event_mutex);
696 __put_system_dir(dir);
697 mutex_unlock(&event_mutex);
700 static void remove_subsystem(struct trace_subsystem_dir *dir)
702 if (!dir)
703 return;
705 if (!--dir->nr_events) {
706 tracefs_remove_recursive(dir->entry);
707 list_del(&dir->list);
708 __put_system_dir(dir);
712 static void remove_event_file_dir(struct trace_event_file *file)
714 struct dentry *dir = file->dir;
715 struct dentry *child;
717 if (dir) {
718 spin_lock(&dir->d_lock); /* probably unneeded */
719 list_for_each_entry(child, &dir->d_subdirs, d_child) {
720 if (d_really_is_positive(child)) /* probably unneeded */
721 d_inode(child)->i_private = NULL;
723 spin_unlock(&dir->d_lock);
725 tracefs_remove_recursive(dir);
728 list_del(&file->list);
729 remove_subsystem(file->system);
730 free_event_filter(file->filter);
731 kmem_cache_free(file_cachep, file);
735 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
737 static int
738 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
739 const char *sub, const char *event, int set)
741 struct trace_event_file *file;
742 struct trace_event_call *call;
743 const char *name;
744 int ret = -EINVAL;
746 list_for_each_entry(file, &tr->events, list) {
748 call = file->event_call;
749 name = trace_event_name(call);
751 if (!name || !call->class || !call->class->reg)
752 continue;
754 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
755 continue;
757 if (match &&
758 strcmp(match, name) != 0 &&
759 strcmp(match, call->class->system) != 0)
760 continue;
762 if (sub && strcmp(sub, call->class->system) != 0)
763 continue;
765 if (event && strcmp(event, name) != 0)
766 continue;
768 ftrace_event_enable_disable(file, set);
770 ret = 0;
773 return ret;
776 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
777 const char *sub, const char *event, int set)
779 int ret;
781 mutex_lock(&event_mutex);
782 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
783 mutex_unlock(&event_mutex);
785 return ret;
788 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
790 char *event = NULL, *sub = NULL, *match;
791 int ret;
793 if (!tr)
794 return -ENOENT;
796 * The buf format can be <subsystem>:<event-name>
797 * *:<event-name> means any event by that name.
798 * :<event-name> is the same.
800 * <subsystem>:* means all events in that subsystem
801 * <subsystem>: means the same.
803 * <name> (no ':') means all events in a subsystem with
804 * the name <name> or any event that matches <name>
807 match = strsep(&buf, ":");
808 if (buf) {
809 sub = match;
810 event = buf;
811 match = NULL;
813 if (!strlen(sub) || strcmp(sub, "*") == 0)
814 sub = NULL;
815 if (!strlen(event) || strcmp(event, "*") == 0)
816 event = NULL;
819 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
821 /* Put back the colon to allow this to be called again */
822 if (buf)
823 *(buf - 1) = ':';
825 return ret;
829 * trace_set_clr_event - enable or disable an event
830 * @system: system name to match (NULL for any system)
831 * @event: event name to match (NULL for all events, within system)
832 * @set: 1 to enable, 0 to disable
834 * This is a way for other parts of the kernel to enable or disable
835 * event recording.
837 * Returns 0 on success, -EINVAL if the parameters do not match any
838 * registered events.
840 int trace_set_clr_event(const char *system, const char *event, int set)
842 struct trace_array *tr = top_trace_array();
844 if (!tr)
845 return -ENODEV;
847 return __ftrace_set_clr_event(tr, NULL, system, event, set);
849 EXPORT_SYMBOL_GPL(trace_set_clr_event);
851 /* 128 should be much more than enough */
852 #define EVENT_BUF_SIZE 127
854 static ssize_t
855 ftrace_event_write(struct file *file, const char __user *ubuf,
856 size_t cnt, loff_t *ppos)
858 struct trace_parser parser;
859 struct seq_file *m = file->private_data;
860 struct trace_array *tr = m->private;
861 ssize_t read, ret;
863 if (!cnt)
864 return 0;
866 ret = tracing_update_buffers();
867 if (ret < 0)
868 return ret;
870 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
871 return -ENOMEM;
873 read = trace_get_user(&parser, ubuf, cnt, ppos);
875 if (read >= 0 && trace_parser_loaded((&parser))) {
876 int set = 1;
878 if (*parser.buffer == '!')
879 set = 0;
881 parser.buffer[parser.idx] = 0;
883 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
884 if (ret)
885 goto out_put;
888 ret = read;
890 out_put:
891 trace_parser_put(&parser);
893 return ret;
896 static void *
897 t_next(struct seq_file *m, void *v, loff_t *pos)
899 struct trace_event_file *file = v;
900 struct trace_event_call *call;
901 struct trace_array *tr = m->private;
903 (*pos)++;
905 list_for_each_entry_continue(file, &tr->events, list) {
906 call = file->event_call;
908 * The ftrace subsystem is for showing formats only.
909 * They can not be enabled or disabled via the event files.
911 if (call->class && call->class->reg &&
912 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
913 return file;
916 return NULL;
919 static void *t_start(struct seq_file *m, loff_t *pos)
921 struct trace_event_file *file;
922 struct trace_array *tr = m->private;
923 loff_t l;
925 mutex_lock(&event_mutex);
927 file = list_entry(&tr->events, struct trace_event_file, list);
928 for (l = 0; l <= *pos; ) {
929 file = t_next(m, file, &l);
930 if (!file)
931 break;
933 return file;
936 static void *
937 s_next(struct seq_file *m, void *v, loff_t *pos)
939 struct trace_event_file *file = v;
940 struct trace_array *tr = m->private;
942 (*pos)++;
944 list_for_each_entry_continue(file, &tr->events, list) {
945 if (file->flags & EVENT_FILE_FL_ENABLED)
946 return file;
949 return NULL;
952 static void *s_start(struct seq_file *m, loff_t *pos)
954 struct trace_event_file *file;
955 struct trace_array *tr = m->private;
956 loff_t l;
958 mutex_lock(&event_mutex);
960 file = list_entry(&tr->events, struct trace_event_file, list);
961 for (l = 0; l <= *pos; ) {
962 file = s_next(m, file, &l);
963 if (!file)
964 break;
966 return file;
969 static int t_show(struct seq_file *m, void *v)
971 struct trace_event_file *file = v;
972 struct trace_event_call *call = file->event_call;
974 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
975 seq_printf(m, "%s:", call->class->system);
976 seq_printf(m, "%s\n", trace_event_name(call));
978 return 0;
981 static void t_stop(struct seq_file *m, void *p)
983 mutex_unlock(&event_mutex);
986 static void *
987 p_next(struct seq_file *m, void *v, loff_t *pos)
989 struct trace_array *tr = m->private;
990 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
992 return trace_pid_next(pid_list, v, pos);
995 static void *p_start(struct seq_file *m, loff_t *pos)
996 __acquires(RCU)
998 struct trace_pid_list *pid_list;
999 struct trace_array *tr = m->private;
1002 * Grab the mutex, to keep calls to p_next() having the same
1003 * tr->filtered_pids as p_start() has.
1004 * If we just passed the tr->filtered_pids around, then RCU would
1005 * have been enough, but doing that makes things more complex.
1007 mutex_lock(&event_mutex);
1008 rcu_read_lock_sched();
1010 pid_list = rcu_dereference_sched(tr->filtered_pids);
1012 if (!pid_list)
1013 return NULL;
1015 return trace_pid_start(pid_list, pos);
1018 static void p_stop(struct seq_file *m, void *p)
1019 __releases(RCU)
1021 rcu_read_unlock_sched();
1022 mutex_unlock(&event_mutex);
1025 static ssize_t
1026 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1027 loff_t *ppos)
1029 struct trace_event_file *file;
1030 unsigned long flags;
1031 char buf[4] = "0";
1033 mutex_lock(&event_mutex);
1034 file = event_file_data(filp);
1035 if (likely(file))
1036 flags = file->flags;
1037 mutex_unlock(&event_mutex);
1039 if (!file)
1040 return -ENODEV;
1042 if (flags & EVENT_FILE_FL_ENABLED &&
1043 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1044 strcpy(buf, "1");
1046 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1047 flags & EVENT_FILE_FL_SOFT_MODE)
1048 strcat(buf, "*");
1050 strcat(buf, "\n");
1052 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1055 static ssize_t
1056 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1057 loff_t *ppos)
1059 struct trace_event_file *file;
1060 unsigned long val;
1061 int ret;
1063 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1064 if (ret)
1065 return ret;
1067 ret = tracing_update_buffers();
1068 if (ret < 0)
1069 return ret;
1071 switch (val) {
1072 case 0:
1073 case 1:
1074 ret = -ENODEV;
1075 mutex_lock(&event_mutex);
1076 file = event_file_data(filp);
1077 if (likely(file))
1078 ret = ftrace_event_enable_disable(file, val);
1079 mutex_unlock(&event_mutex);
1080 break;
1082 default:
1083 return -EINVAL;
1086 *ppos += cnt;
1088 return ret ? ret : cnt;
1091 static ssize_t
1092 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1093 loff_t *ppos)
1095 const char set_to_char[4] = { '?', '0', '1', 'X' };
1096 struct trace_subsystem_dir *dir = filp->private_data;
1097 struct event_subsystem *system = dir->subsystem;
1098 struct trace_event_call *call;
1099 struct trace_event_file *file;
1100 struct trace_array *tr = dir->tr;
1101 char buf[2];
1102 int set = 0;
1103 int ret;
1105 mutex_lock(&event_mutex);
1106 list_for_each_entry(file, &tr->events, list) {
1107 call = file->event_call;
1108 if (!trace_event_name(call) || !call->class || !call->class->reg)
1109 continue;
1111 if (system && strcmp(call->class->system, system->name) != 0)
1112 continue;
1115 * We need to find out if all the events are set
1116 * or if all events or cleared, or if we have
1117 * a mixture.
1119 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1122 * If we have a mixture, no need to look further.
1124 if (set == 3)
1125 break;
1127 mutex_unlock(&event_mutex);
1129 buf[0] = set_to_char[set];
1130 buf[1] = '\n';
1132 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1134 return ret;
1137 static ssize_t
1138 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1139 loff_t *ppos)
1141 struct trace_subsystem_dir *dir = filp->private_data;
1142 struct event_subsystem *system = dir->subsystem;
1143 const char *name = NULL;
1144 unsigned long val;
1145 ssize_t ret;
1147 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1148 if (ret)
1149 return ret;
1151 ret = tracing_update_buffers();
1152 if (ret < 0)
1153 return ret;
1155 if (val != 0 && val != 1)
1156 return -EINVAL;
1159 * Opening of "enable" adds a ref count to system,
1160 * so the name is safe to use.
1162 if (system)
1163 name = system->name;
1165 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1166 if (ret)
1167 goto out;
1169 ret = cnt;
1171 out:
1172 *ppos += cnt;
1174 return ret;
1177 enum {
1178 FORMAT_HEADER = 1,
1179 FORMAT_FIELD_SEPERATOR = 2,
1180 FORMAT_PRINTFMT = 3,
1183 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1185 struct trace_event_call *call = event_file_data(m->private);
1186 struct list_head *common_head = &ftrace_common_fields;
1187 struct list_head *head = trace_get_fields(call);
1188 struct list_head *node = v;
1190 (*pos)++;
1192 switch ((unsigned long)v) {
1193 case FORMAT_HEADER:
1194 node = common_head;
1195 break;
1197 case FORMAT_FIELD_SEPERATOR:
1198 node = head;
1199 break;
1201 case FORMAT_PRINTFMT:
1202 /* all done */
1203 return NULL;
1206 node = node->prev;
1207 if (node == common_head)
1208 return (void *)FORMAT_FIELD_SEPERATOR;
1209 else if (node == head)
1210 return (void *)FORMAT_PRINTFMT;
1211 else
1212 return node;
1215 static int f_show(struct seq_file *m, void *v)
1217 struct trace_event_call *call = event_file_data(m->private);
1218 struct ftrace_event_field *field;
1219 const char *array_descriptor;
1221 switch ((unsigned long)v) {
1222 case FORMAT_HEADER:
1223 seq_printf(m, "name: %s\n", trace_event_name(call));
1224 seq_printf(m, "ID: %d\n", call->event.type);
1225 seq_puts(m, "format:\n");
1226 return 0;
1228 case FORMAT_FIELD_SEPERATOR:
1229 seq_putc(m, '\n');
1230 return 0;
1232 case FORMAT_PRINTFMT:
1233 seq_printf(m, "\nprint fmt: %s\n",
1234 call->print_fmt);
1235 return 0;
1238 field = list_entry(v, struct ftrace_event_field, link);
1240 * Smartly shows the array type(except dynamic array).
1241 * Normal:
1242 * field:TYPE VAR
1243 * If TYPE := TYPE[LEN], it is shown:
1244 * field:TYPE VAR[LEN]
1246 array_descriptor = strchr(field->type, '[');
1248 if (!strncmp(field->type, "__data_loc", 10))
1249 array_descriptor = NULL;
1251 if (!array_descriptor)
1252 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1253 field->type, field->name, field->offset,
1254 field->size, !!field->is_signed);
1255 else
1256 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1257 (int)(array_descriptor - field->type),
1258 field->type, field->name,
1259 array_descriptor, field->offset,
1260 field->size, !!field->is_signed);
1262 return 0;
1265 static void *f_start(struct seq_file *m, loff_t *pos)
1267 void *p = (void *)FORMAT_HEADER;
1268 loff_t l = 0;
1270 /* ->stop() is called even if ->start() fails */
1271 mutex_lock(&event_mutex);
1272 if (!event_file_data(m->private))
1273 return ERR_PTR(-ENODEV);
1275 while (l < *pos && p)
1276 p = f_next(m, p, &l);
1278 return p;
1281 static void f_stop(struct seq_file *m, void *p)
1283 mutex_unlock(&event_mutex);
1286 static const struct seq_operations trace_format_seq_ops = {
1287 .start = f_start,
1288 .next = f_next,
1289 .stop = f_stop,
1290 .show = f_show,
1293 static int trace_format_open(struct inode *inode, struct file *file)
1295 struct seq_file *m;
1296 int ret;
1298 ret = seq_open(file, &trace_format_seq_ops);
1299 if (ret < 0)
1300 return ret;
1302 m = file->private_data;
1303 m->private = file;
1305 return 0;
1308 static ssize_t
1309 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1311 int id = (long)event_file_data(filp);
1312 char buf[32];
1313 int len;
1315 if (unlikely(!id))
1316 return -ENODEV;
1318 len = sprintf(buf, "%d\n", id);
1320 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1323 static ssize_t
1324 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1325 loff_t *ppos)
1327 struct trace_event_file *file;
1328 struct trace_seq *s;
1329 int r = -ENODEV;
1331 if (*ppos)
1332 return 0;
1334 s = kmalloc(sizeof(*s), GFP_KERNEL);
1336 if (!s)
1337 return -ENOMEM;
1339 trace_seq_init(s);
1341 mutex_lock(&event_mutex);
1342 file = event_file_data(filp);
1343 if (file)
1344 print_event_filter(file, s);
1345 mutex_unlock(&event_mutex);
1347 if (file)
1348 r = simple_read_from_buffer(ubuf, cnt, ppos,
1349 s->buffer, trace_seq_used(s));
1351 kfree(s);
1353 return r;
1356 static ssize_t
1357 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1358 loff_t *ppos)
1360 struct trace_event_file *file;
1361 char *buf;
1362 int err = -ENODEV;
1364 if (cnt >= PAGE_SIZE)
1365 return -EINVAL;
1367 buf = memdup_user_nul(ubuf, cnt);
1368 if (IS_ERR(buf))
1369 return PTR_ERR(buf);
1371 mutex_lock(&event_mutex);
1372 file = event_file_data(filp);
1373 if (file)
1374 err = apply_event_filter(file, buf);
1375 mutex_unlock(&event_mutex);
1377 kfree(buf);
1378 if (err < 0)
1379 return err;
1381 *ppos += cnt;
1383 return cnt;
1386 static LIST_HEAD(event_subsystems);
1388 static int subsystem_open(struct inode *inode, struct file *filp)
1390 struct event_subsystem *system = NULL;
1391 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1392 struct trace_array *tr;
1393 int ret;
1395 if (tracing_is_disabled())
1396 return -ENODEV;
1398 /* Make sure the system still exists */
1399 mutex_lock(&trace_types_lock);
1400 mutex_lock(&event_mutex);
1401 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1402 list_for_each_entry(dir, &tr->systems, list) {
1403 if (dir == inode->i_private) {
1404 /* Don't open systems with no events */
1405 if (dir->nr_events) {
1406 __get_system_dir(dir);
1407 system = dir->subsystem;
1409 goto exit_loop;
1413 exit_loop:
1414 mutex_unlock(&event_mutex);
1415 mutex_unlock(&trace_types_lock);
1417 if (!system)
1418 return -ENODEV;
1420 /* Some versions of gcc think dir can be uninitialized here */
1421 WARN_ON(!dir);
1423 /* Still need to increment the ref count of the system */
1424 if (trace_array_get(tr) < 0) {
1425 put_system(dir);
1426 return -ENODEV;
1429 ret = tracing_open_generic(inode, filp);
1430 if (ret < 0) {
1431 trace_array_put(tr);
1432 put_system(dir);
1435 return ret;
1438 static int system_tr_open(struct inode *inode, struct file *filp)
1440 struct trace_subsystem_dir *dir;
1441 struct trace_array *tr = inode->i_private;
1442 int ret;
1444 if (tracing_is_disabled())
1445 return -ENODEV;
1447 if (trace_array_get(tr) < 0)
1448 return -ENODEV;
1450 /* Make a temporary dir that has no system but points to tr */
1451 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1452 if (!dir) {
1453 trace_array_put(tr);
1454 return -ENOMEM;
1457 dir->tr = tr;
1459 ret = tracing_open_generic(inode, filp);
1460 if (ret < 0) {
1461 trace_array_put(tr);
1462 kfree(dir);
1463 return ret;
1466 filp->private_data = dir;
1468 return 0;
1471 static int subsystem_release(struct inode *inode, struct file *file)
1473 struct trace_subsystem_dir *dir = file->private_data;
1475 trace_array_put(dir->tr);
1478 * If dir->subsystem is NULL, then this is a temporary
1479 * descriptor that was made for a trace_array to enable
1480 * all subsystems.
1482 if (dir->subsystem)
1483 put_system(dir);
1484 else
1485 kfree(dir);
1487 return 0;
1490 static ssize_t
1491 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1492 loff_t *ppos)
1494 struct trace_subsystem_dir *dir = filp->private_data;
1495 struct event_subsystem *system = dir->subsystem;
1496 struct trace_seq *s;
1497 int r;
1499 if (*ppos)
1500 return 0;
1502 s = kmalloc(sizeof(*s), GFP_KERNEL);
1503 if (!s)
1504 return -ENOMEM;
1506 trace_seq_init(s);
1508 print_subsystem_event_filter(system, s);
1509 r = simple_read_from_buffer(ubuf, cnt, ppos,
1510 s->buffer, trace_seq_used(s));
1512 kfree(s);
1514 return r;
1517 static ssize_t
1518 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1519 loff_t *ppos)
1521 struct trace_subsystem_dir *dir = filp->private_data;
1522 char *buf;
1523 int err;
1525 if (cnt >= PAGE_SIZE)
1526 return -EINVAL;
1528 buf = memdup_user_nul(ubuf, cnt);
1529 if (IS_ERR(buf))
1530 return PTR_ERR(buf);
1532 err = apply_subsystem_event_filter(dir, buf);
1533 kfree(buf);
1534 if (err < 0)
1535 return err;
1537 *ppos += cnt;
1539 return cnt;
1542 static ssize_t
1543 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1545 int (*func)(struct trace_seq *s) = filp->private_data;
1546 struct trace_seq *s;
1547 int r;
1549 if (*ppos)
1550 return 0;
1552 s = kmalloc(sizeof(*s), GFP_KERNEL);
1553 if (!s)
1554 return -ENOMEM;
1556 trace_seq_init(s);
1558 func(s);
1559 r = simple_read_from_buffer(ubuf, cnt, ppos,
1560 s->buffer, trace_seq_used(s));
1562 kfree(s);
1564 return r;
1567 static void ignore_task_cpu(void *data)
1569 struct trace_array *tr = data;
1570 struct trace_pid_list *pid_list;
1573 * This function is called by on_each_cpu() while the
1574 * event_mutex is held.
1576 pid_list = rcu_dereference_protected(tr->filtered_pids,
1577 mutex_is_locked(&event_mutex));
1579 this_cpu_write(tr->trace_buffer.data->ignore_pid,
1580 trace_ignore_this_task(pid_list, current));
1583 static ssize_t
1584 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1585 size_t cnt, loff_t *ppos)
1587 struct seq_file *m = filp->private_data;
1588 struct trace_array *tr = m->private;
1589 struct trace_pid_list *filtered_pids = NULL;
1590 struct trace_pid_list *pid_list;
1591 struct trace_event_file *file;
1592 ssize_t ret;
1594 if (!cnt)
1595 return 0;
1597 ret = tracing_update_buffers();
1598 if (ret < 0)
1599 return ret;
1601 mutex_lock(&event_mutex);
1603 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1604 lockdep_is_held(&event_mutex));
1606 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1607 if (ret < 0)
1608 goto out;
1610 rcu_assign_pointer(tr->filtered_pids, pid_list);
1612 list_for_each_entry(file, &tr->events, list) {
1613 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1616 if (filtered_pids) {
1617 synchronize_sched();
1618 trace_free_pid_list(filtered_pids);
1619 } else if (pid_list) {
1621 * Register a probe that is called before all other probes
1622 * to set ignore_pid if next or prev do not match.
1623 * Register a probe this is called after all other probes
1624 * to only keep ignore_pid set if next pid matches.
1626 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1627 tr, INT_MAX);
1628 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1629 tr, 0);
1631 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1632 tr, INT_MAX);
1633 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1634 tr, 0);
1636 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1637 tr, INT_MAX);
1638 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1639 tr, 0);
1641 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1642 tr, INT_MAX);
1643 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1644 tr, 0);
1648 * Ignoring of pids is done at task switch. But we have to
1649 * check for those tasks that are currently running.
1650 * Always do this in case a pid was appended or removed.
1652 on_each_cpu(ignore_task_cpu, tr, 1);
1654 out:
1655 mutex_unlock(&event_mutex);
1657 if (ret > 0)
1658 *ppos += ret;
1660 return ret;
1663 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1664 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1665 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1666 static int ftrace_event_release(struct inode *inode, struct file *file);
1668 static const struct seq_operations show_event_seq_ops = {
1669 .start = t_start,
1670 .next = t_next,
1671 .show = t_show,
1672 .stop = t_stop,
1675 static const struct seq_operations show_set_event_seq_ops = {
1676 .start = s_start,
1677 .next = s_next,
1678 .show = t_show,
1679 .stop = t_stop,
1682 static const struct seq_operations show_set_pid_seq_ops = {
1683 .start = p_start,
1684 .next = p_next,
1685 .show = trace_pid_show,
1686 .stop = p_stop,
1689 static const struct file_operations ftrace_avail_fops = {
1690 .open = ftrace_event_avail_open,
1691 .read = seq_read,
1692 .llseek = seq_lseek,
1693 .release = seq_release,
1696 static const struct file_operations ftrace_set_event_fops = {
1697 .open = ftrace_event_set_open,
1698 .read = seq_read,
1699 .write = ftrace_event_write,
1700 .llseek = seq_lseek,
1701 .release = ftrace_event_release,
1704 static const struct file_operations ftrace_set_event_pid_fops = {
1705 .open = ftrace_event_set_pid_open,
1706 .read = seq_read,
1707 .write = ftrace_event_pid_write,
1708 .llseek = seq_lseek,
1709 .release = ftrace_event_release,
1712 static const struct file_operations ftrace_enable_fops = {
1713 .open = tracing_open_generic,
1714 .read = event_enable_read,
1715 .write = event_enable_write,
1716 .llseek = default_llseek,
1719 static const struct file_operations ftrace_event_format_fops = {
1720 .open = trace_format_open,
1721 .read = seq_read,
1722 .llseek = seq_lseek,
1723 .release = seq_release,
1726 static const struct file_operations ftrace_event_id_fops = {
1727 .read = event_id_read,
1728 .llseek = default_llseek,
1731 static const struct file_operations ftrace_event_filter_fops = {
1732 .open = tracing_open_generic,
1733 .read = event_filter_read,
1734 .write = event_filter_write,
1735 .llseek = default_llseek,
1738 static const struct file_operations ftrace_subsystem_filter_fops = {
1739 .open = subsystem_open,
1740 .read = subsystem_filter_read,
1741 .write = subsystem_filter_write,
1742 .llseek = default_llseek,
1743 .release = subsystem_release,
1746 static const struct file_operations ftrace_system_enable_fops = {
1747 .open = subsystem_open,
1748 .read = system_enable_read,
1749 .write = system_enable_write,
1750 .llseek = default_llseek,
1751 .release = subsystem_release,
1754 static const struct file_operations ftrace_tr_enable_fops = {
1755 .open = system_tr_open,
1756 .read = system_enable_read,
1757 .write = system_enable_write,
1758 .llseek = default_llseek,
1759 .release = subsystem_release,
1762 static const struct file_operations ftrace_show_header_fops = {
1763 .open = tracing_open_generic,
1764 .read = show_header,
1765 .llseek = default_llseek,
1768 static int
1769 ftrace_event_open(struct inode *inode, struct file *file,
1770 const struct seq_operations *seq_ops)
1772 struct seq_file *m;
1773 int ret;
1775 ret = seq_open(file, seq_ops);
1776 if (ret < 0)
1777 return ret;
1778 m = file->private_data;
1779 /* copy tr over to seq ops */
1780 m->private = inode->i_private;
1782 return ret;
1785 static int ftrace_event_release(struct inode *inode, struct file *file)
1787 struct trace_array *tr = inode->i_private;
1789 trace_array_put(tr);
1791 return seq_release(inode, file);
1794 static int
1795 ftrace_event_avail_open(struct inode *inode, struct file *file)
1797 const struct seq_operations *seq_ops = &show_event_seq_ops;
1799 return ftrace_event_open(inode, file, seq_ops);
1802 static int
1803 ftrace_event_set_open(struct inode *inode, struct file *file)
1805 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1806 struct trace_array *tr = inode->i_private;
1807 int ret;
1809 if (trace_array_get(tr) < 0)
1810 return -ENODEV;
1812 if ((file->f_mode & FMODE_WRITE) &&
1813 (file->f_flags & O_TRUNC))
1814 ftrace_clear_events(tr);
1816 ret = ftrace_event_open(inode, file, seq_ops);
1817 if (ret < 0)
1818 trace_array_put(tr);
1819 return ret;
1822 static int
1823 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1825 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1826 struct trace_array *tr = inode->i_private;
1827 int ret;
1829 if (trace_array_get(tr) < 0)
1830 return -ENODEV;
1832 if ((file->f_mode & FMODE_WRITE) &&
1833 (file->f_flags & O_TRUNC))
1834 ftrace_clear_event_pids(tr);
1836 ret = ftrace_event_open(inode, file, seq_ops);
1837 if (ret < 0)
1838 trace_array_put(tr);
1839 return ret;
1842 static struct event_subsystem *
1843 create_new_subsystem(const char *name)
1845 struct event_subsystem *system;
1847 /* need to create new entry */
1848 system = kmalloc(sizeof(*system), GFP_KERNEL);
1849 if (!system)
1850 return NULL;
1852 system->ref_count = 1;
1854 /* Only allocate if dynamic (kprobes and modules) */
1855 system->name = kstrdup_const(name, GFP_KERNEL);
1856 if (!system->name)
1857 goto out_free;
1859 system->filter = NULL;
1861 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1862 if (!system->filter)
1863 goto out_free;
1865 list_add(&system->list, &event_subsystems);
1867 return system;
1869 out_free:
1870 kfree_const(system->name);
1871 kfree(system);
1872 return NULL;
1875 static struct dentry *
1876 event_subsystem_dir(struct trace_array *tr, const char *name,
1877 struct trace_event_file *file, struct dentry *parent)
1879 struct trace_subsystem_dir *dir;
1880 struct event_subsystem *system;
1881 struct dentry *entry;
1883 /* First see if we did not already create this dir */
1884 list_for_each_entry(dir, &tr->systems, list) {
1885 system = dir->subsystem;
1886 if (strcmp(system->name, name) == 0) {
1887 dir->nr_events++;
1888 file->system = dir;
1889 return dir->entry;
1893 /* Now see if the system itself exists. */
1894 list_for_each_entry(system, &event_subsystems, list) {
1895 if (strcmp(system->name, name) == 0)
1896 break;
1898 /* Reset system variable when not found */
1899 if (&system->list == &event_subsystems)
1900 system = NULL;
1902 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1903 if (!dir)
1904 goto out_fail;
1906 if (!system) {
1907 system = create_new_subsystem(name);
1908 if (!system)
1909 goto out_free;
1910 } else
1911 __get_system(system);
1913 dir->entry = tracefs_create_dir(name, parent);
1914 if (!dir->entry) {
1915 pr_warn("Failed to create system directory %s\n", name);
1916 __put_system(system);
1917 goto out_free;
1920 dir->tr = tr;
1921 dir->ref_count = 1;
1922 dir->nr_events = 1;
1923 dir->subsystem = system;
1924 file->system = dir;
1926 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1927 &ftrace_subsystem_filter_fops);
1928 if (!entry) {
1929 kfree(system->filter);
1930 system->filter = NULL;
1931 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1934 trace_create_file("enable", 0644, dir->entry, dir,
1935 &ftrace_system_enable_fops);
1937 list_add(&dir->list, &tr->systems);
1939 return dir->entry;
1941 out_free:
1942 kfree(dir);
1943 out_fail:
1944 /* Only print this message if failed on memory allocation */
1945 if (!dir || !system)
1946 pr_warn("No memory to create event subsystem %s\n", name);
1947 return NULL;
1950 static int
1951 event_create_dir(struct dentry *parent, struct trace_event_file *file)
1953 struct trace_event_call *call = file->event_call;
1954 struct trace_array *tr = file->tr;
1955 struct list_head *head;
1956 struct dentry *d_events;
1957 const char *name;
1958 int ret;
1961 * If the trace point header did not define TRACE_SYSTEM
1962 * then the system would be called "TRACE_SYSTEM".
1964 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1965 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1966 if (!d_events)
1967 return -ENOMEM;
1968 } else
1969 d_events = parent;
1971 name = trace_event_name(call);
1972 file->dir = tracefs_create_dir(name, d_events);
1973 if (!file->dir) {
1974 pr_warn("Could not create tracefs '%s' directory\n", name);
1975 return -1;
1978 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1979 trace_create_file("enable", 0644, file->dir, file,
1980 &ftrace_enable_fops);
1982 #ifdef CONFIG_PERF_EVENTS
1983 if (call->event.type && call->class->reg)
1984 trace_create_file("id", 0444, file->dir,
1985 (void *)(long)call->event.type,
1986 &ftrace_event_id_fops);
1987 #endif
1990 * Other events may have the same class. Only update
1991 * the fields if they are not already defined.
1993 head = trace_get_fields(call);
1994 if (list_empty(head)) {
1995 ret = call->class->define_fields(call);
1996 if (ret < 0) {
1997 pr_warn("Could not initialize trace point events/%s\n",
1998 name);
1999 return -1;
2002 trace_create_file("filter", 0644, file->dir, file,
2003 &ftrace_event_filter_fops);
2006 * Only event directories that can be enabled should have
2007 * triggers.
2009 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2010 trace_create_file("trigger", 0644, file->dir, file,
2011 &event_trigger_fops);
2013 #ifdef CONFIG_HIST_TRIGGERS
2014 trace_create_file("hist", 0444, file->dir, file,
2015 &event_hist_fops);
2016 #endif
2017 trace_create_file("format", 0444, file->dir, call,
2018 &ftrace_event_format_fops);
2020 return 0;
2023 static void remove_event_from_tracers(struct trace_event_call *call)
2025 struct trace_event_file *file;
2026 struct trace_array *tr;
2028 do_for_each_event_file_safe(tr, file) {
2029 if (file->event_call != call)
2030 continue;
2032 remove_event_file_dir(file);
2034 * The do_for_each_event_file_safe() is
2035 * a double loop. After finding the call for this
2036 * trace_array, we use break to jump to the next
2037 * trace_array.
2039 break;
2040 } while_for_each_event_file();
2043 static void event_remove(struct trace_event_call *call)
2045 struct trace_array *tr;
2046 struct trace_event_file *file;
2048 do_for_each_event_file(tr, file) {
2049 if (file->event_call != call)
2050 continue;
2051 ftrace_event_enable_disable(file, 0);
2053 * The do_for_each_event_file() is
2054 * a double loop. After finding the call for this
2055 * trace_array, we use break to jump to the next
2056 * trace_array.
2058 break;
2059 } while_for_each_event_file();
2061 if (call->event.funcs)
2062 __unregister_trace_event(&call->event);
2063 remove_event_from_tracers(call);
2064 list_del(&call->list);
2067 static int event_init(struct trace_event_call *call)
2069 int ret = 0;
2070 const char *name;
2072 name = trace_event_name(call);
2073 if (WARN_ON(!name))
2074 return -EINVAL;
2076 if (call->class->raw_init) {
2077 ret = call->class->raw_init(call);
2078 if (ret < 0 && ret != -ENOSYS)
2079 pr_warn("Could not initialize trace events/%s\n", name);
2082 return ret;
2085 static int
2086 __register_event(struct trace_event_call *call, struct module *mod)
2088 int ret;
2090 ret = event_init(call);
2091 if (ret < 0)
2092 return ret;
2094 list_add(&call->list, &ftrace_events);
2095 call->mod = mod;
2097 return 0;
2100 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2102 int rlen;
2103 int elen;
2105 /* Find the length of the enum value as a string */
2106 elen = snprintf(ptr, 0, "%ld", map->enum_value);
2107 /* Make sure there's enough room to replace the string with the value */
2108 if (len < elen)
2109 return NULL;
2111 snprintf(ptr, elen + 1, "%ld", map->enum_value);
2113 /* Get the rest of the string of ptr */
2114 rlen = strlen(ptr + len);
2115 memmove(ptr + elen, ptr + len, rlen);
2116 /* Make sure we end the new string */
2117 ptr[elen + rlen] = 0;
2119 return ptr + elen;
2122 static void update_event_printk(struct trace_event_call *call,
2123 struct trace_enum_map *map)
2125 char *ptr;
2126 int quote = 0;
2127 int len = strlen(map->enum_string);
2129 for (ptr = call->print_fmt; *ptr; ptr++) {
2130 if (*ptr == '\\') {
2131 ptr++;
2132 /* paranoid */
2133 if (!*ptr)
2134 break;
2135 continue;
2137 if (*ptr == '"') {
2138 quote ^= 1;
2139 continue;
2141 if (quote)
2142 continue;
2143 if (isdigit(*ptr)) {
2144 /* skip numbers */
2145 do {
2146 ptr++;
2147 /* Check for alpha chars like ULL */
2148 } while (isalnum(*ptr));
2149 if (!*ptr)
2150 break;
2152 * A number must have some kind of delimiter after
2153 * it, and we can ignore that too.
2155 continue;
2157 if (isalpha(*ptr) || *ptr == '_') {
2158 if (strncmp(map->enum_string, ptr, len) == 0 &&
2159 !isalnum(ptr[len]) && ptr[len] != '_') {
2160 ptr = enum_replace(ptr, map, len);
2161 /* Hmm, enum string smaller than value */
2162 if (WARN_ON_ONCE(!ptr))
2163 return;
2165 * No need to decrement here, as enum_replace()
2166 * returns the pointer to the character passed
2167 * the enum, and two enums can not be placed
2168 * back to back without something in between.
2169 * We can skip that something in between.
2171 continue;
2173 skip_more:
2174 do {
2175 ptr++;
2176 } while (isalnum(*ptr) || *ptr == '_');
2177 if (!*ptr)
2178 break;
2180 * If what comes after this variable is a '.' or
2181 * '->' then we can continue to ignore that string.
2183 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2184 ptr += *ptr == '.' ? 1 : 2;
2185 if (!*ptr)
2186 break;
2187 goto skip_more;
2190 * Once again, we can skip the delimiter that came
2191 * after the string.
2193 continue;
2198 void trace_event_enum_update(struct trace_enum_map **map, int len)
2200 struct trace_event_call *call, *p;
2201 const char *last_system = NULL;
2202 bool first = false;
2203 int last_i;
2204 int i;
2206 down_write(&trace_event_sem);
2207 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2208 /* events are usually grouped together with systems */
2209 if (!last_system || call->class->system != last_system) {
2210 first = true;
2211 last_i = 0;
2212 last_system = call->class->system;
2216 * Since calls are grouped by systems, the likelyhood that the
2217 * next call in the iteration belongs to the same system as the
2218 * previous call is high. As an optimization, we skip seaching
2219 * for a map[] that matches the call's system if the last call
2220 * was from the same system. That's what last_i is for. If the
2221 * call has the same system as the previous call, then last_i
2222 * will be the index of the first map[] that has a matching
2223 * system.
2225 for (i = last_i; i < len; i++) {
2226 if (call->class->system == map[i]->system) {
2227 /* Save the first system if need be */
2228 if (first) {
2229 last_i = i;
2230 first = false;
2232 update_event_printk(call, map[i]);
2236 up_write(&trace_event_sem);
2239 static struct trace_event_file *
2240 trace_create_new_event(struct trace_event_call *call,
2241 struct trace_array *tr)
2243 struct trace_event_file *file;
2245 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2246 if (!file)
2247 return NULL;
2249 file->event_call = call;
2250 file->tr = tr;
2251 atomic_set(&file->sm_ref, 0);
2252 atomic_set(&file->tm_ref, 0);
2253 INIT_LIST_HEAD(&file->triggers);
2254 list_add(&file->list, &tr->events);
2256 return file;
2259 /* Add an event to a trace directory */
2260 static int
2261 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2263 struct trace_event_file *file;
2265 file = trace_create_new_event(call, tr);
2266 if (!file)
2267 return -ENOMEM;
2269 return event_create_dir(tr->event_dir, file);
2273 * Just create a decriptor for early init. A descriptor is required
2274 * for enabling events at boot. We want to enable events before
2275 * the filesystem is initialized.
2277 static __init int
2278 __trace_early_add_new_event(struct trace_event_call *call,
2279 struct trace_array *tr)
2281 struct trace_event_file *file;
2283 file = trace_create_new_event(call, tr);
2284 if (!file)
2285 return -ENOMEM;
2287 return 0;
2290 struct ftrace_module_file_ops;
2291 static void __add_event_to_tracers(struct trace_event_call *call);
2293 /* Add an additional event_call dynamically */
2294 int trace_add_event_call(struct trace_event_call *call)
2296 int ret;
2297 mutex_lock(&trace_types_lock);
2298 mutex_lock(&event_mutex);
2300 ret = __register_event(call, NULL);
2301 if (ret >= 0)
2302 __add_event_to_tracers(call);
2304 mutex_unlock(&event_mutex);
2305 mutex_unlock(&trace_types_lock);
2306 return ret;
2310 * Must be called under locking of trace_types_lock, event_mutex and
2311 * trace_event_sem.
2313 static void __trace_remove_event_call(struct trace_event_call *call)
2315 event_remove(call);
2316 trace_destroy_fields(call);
2317 free_event_filter(call->filter);
2318 call->filter = NULL;
2321 static int probe_remove_event_call(struct trace_event_call *call)
2323 struct trace_array *tr;
2324 struct trace_event_file *file;
2326 #ifdef CONFIG_PERF_EVENTS
2327 if (call->perf_refcount)
2328 return -EBUSY;
2329 #endif
2330 do_for_each_event_file(tr, file) {
2331 if (file->event_call != call)
2332 continue;
2334 * We can't rely on ftrace_event_enable_disable(enable => 0)
2335 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2336 * TRACE_REG_UNREGISTER.
2338 if (file->flags & EVENT_FILE_FL_ENABLED)
2339 return -EBUSY;
2341 * The do_for_each_event_file_safe() is
2342 * a double loop. After finding the call for this
2343 * trace_array, we use break to jump to the next
2344 * trace_array.
2346 break;
2347 } while_for_each_event_file();
2349 __trace_remove_event_call(call);
2351 return 0;
2354 /* Remove an event_call */
2355 int trace_remove_event_call(struct trace_event_call *call)
2357 int ret;
2359 mutex_lock(&trace_types_lock);
2360 mutex_lock(&event_mutex);
2361 down_write(&trace_event_sem);
2362 ret = probe_remove_event_call(call);
2363 up_write(&trace_event_sem);
2364 mutex_unlock(&event_mutex);
2365 mutex_unlock(&trace_types_lock);
2367 return ret;
2370 #define for_each_event(event, start, end) \
2371 for (event = start; \
2372 (unsigned long)event < (unsigned long)end; \
2373 event++)
2375 #ifdef CONFIG_MODULES
2377 static void trace_module_add_events(struct module *mod)
2379 struct trace_event_call **call, **start, **end;
2381 if (!mod->num_trace_events)
2382 return;
2384 /* Don't add infrastructure for mods without tracepoints */
2385 if (trace_module_has_bad_taint(mod)) {
2386 pr_err("%s: module has bad taint, not creating trace events\n",
2387 mod->name);
2388 return;
2391 start = mod->trace_events;
2392 end = mod->trace_events + mod->num_trace_events;
2394 for_each_event(call, start, end) {
2395 __register_event(*call, mod);
2396 __add_event_to_tracers(*call);
2400 static void trace_module_remove_events(struct module *mod)
2402 struct trace_event_call *call, *p;
2403 bool clear_trace = false;
2405 down_write(&trace_event_sem);
2406 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2407 if (call->mod == mod) {
2408 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2409 clear_trace = true;
2410 __trace_remove_event_call(call);
2413 up_write(&trace_event_sem);
2416 * It is safest to reset the ring buffer if the module being unloaded
2417 * registered any events that were used. The only worry is if
2418 * a new module gets loaded, and takes on the same id as the events
2419 * of this module. When printing out the buffer, traced events left
2420 * over from this module may be passed to the new module events and
2421 * unexpected results may occur.
2423 if (clear_trace)
2424 tracing_reset_all_online_cpus();
2427 static int trace_module_notify(struct notifier_block *self,
2428 unsigned long val, void *data)
2430 struct module *mod = data;
2432 mutex_lock(&trace_types_lock);
2433 mutex_lock(&event_mutex);
2434 switch (val) {
2435 case MODULE_STATE_COMING:
2436 trace_module_add_events(mod);
2437 break;
2438 case MODULE_STATE_GOING:
2439 trace_module_remove_events(mod);
2440 break;
2442 mutex_unlock(&event_mutex);
2443 mutex_unlock(&trace_types_lock);
2445 return 0;
2448 static struct notifier_block trace_module_nb = {
2449 .notifier_call = trace_module_notify,
2450 .priority = 1, /* higher than trace.c module notify */
2452 #endif /* CONFIG_MODULES */
2454 /* Create a new event directory structure for a trace directory. */
2455 static void
2456 __trace_add_event_dirs(struct trace_array *tr)
2458 struct trace_event_call *call;
2459 int ret;
2461 list_for_each_entry(call, &ftrace_events, list) {
2462 ret = __trace_add_new_event(call, tr);
2463 if (ret < 0)
2464 pr_warn("Could not create directory for event %s\n",
2465 trace_event_name(call));
2469 struct trace_event_file *
2470 find_event_file(struct trace_array *tr, const char *system, const char *event)
2472 struct trace_event_file *file;
2473 struct trace_event_call *call;
2474 const char *name;
2476 list_for_each_entry(file, &tr->events, list) {
2478 call = file->event_call;
2479 name = trace_event_name(call);
2481 if (!name || !call->class || !call->class->reg)
2482 continue;
2484 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2485 continue;
2487 if (strcmp(event, name) == 0 &&
2488 strcmp(system, call->class->system) == 0)
2489 return file;
2491 return NULL;
2494 #ifdef CONFIG_DYNAMIC_FTRACE
2496 /* Avoid typos */
2497 #define ENABLE_EVENT_STR "enable_event"
2498 #define DISABLE_EVENT_STR "disable_event"
2500 struct event_probe_data {
2501 struct trace_event_file *file;
2502 unsigned long count;
2503 int ref;
2504 bool enable;
2507 static void
2508 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2510 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2511 struct event_probe_data *data = *pdata;
2513 if (!data)
2514 return;
2516 if (data->enable)
2517 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2518 else
2519 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2522 static void
2523 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2525 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2526 struct event_probe_data *data = *pdata;
2528 if (!data)
2529 return;
2531 if (!data->count)
2532 return;
2534 /* Skip if the event is in a state we want to switch to */
2535 if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2536 return;
2538 if (data->count != -1)
2539 (data->count)--;
2541 event_enable_probe(ip, parent_ip, _data);
2544 static int
2545 event_enable_print(struct seq_file *m, unsigned long ip,
2546 struct ftrace_probe_ops *ops, void *_data)
2548 struct event_probe_data *data = _data;
2550 seq_printf(m, "%ps:", (void *)ip);
2552 seq_printf(m, "%s:%s:%s",
2553 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2554 data->file->event_call->class->system,
2555 trace_event_name(data->file->event_call));
2557 if (data->count == -1)
2558 seq_puts(m, ":unlimited\n");
2559 else
2560 seq_printf(m, ":count=%ld\n", data->count);
2562 return 0;
2565 static int
2566 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2567 void **_data)
2569 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2570 struct event_probe_data *data = *pdata;
2572 data->ref++;
2573 return 0;
2576 static void
2577 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2578 void **_data)
2580 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2581 struct event_probe_data *data = *pdata;
2583 if (WARN_ON_ONCE(data->ref <= 0))
2584 return;
2586 data->ref--;
2587 if (!data->ref) {
2588 /* Remove the SOFT_MODE flag */
2589 __ftrace_event_enable_disable(data->file, 0, 1);
2590 module_put(data->file->event_call->mod);
2591 kfree(data);
2593 *pdata = NULL;
2596 static struct ftrace_probe_ops event_enable_probe_ops = {
2597 .func = event_enable_probe,
2598 .print = event_enable_print,
2599 .init = event_enable_init,
2600 .free = event_enable_free,
2603 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2604 .func = event_enable_count_probe,
2605 .print = event_enable_print,
2606 .init = event_enable_init,
2607 .free = event_enable_free,
2610 static struct ftrace_probe_ops event_disable_probe_ops = {
2611 .func = event_enable_probe,
2612 .print = event_enable_print,
2613 .init = event_enable_init,
2614 .free = event_enable_free,
2617 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2618 .func = event_enable_count_probe,
2619 .print = event_enable_print,
2620 .init = event_enable_init,
2621 .free = event_enable_free,
2624 static int
2625 event_enable_func(struct ftrace_hash *hash,
2626 char *glob, char *cmd, char *param, int enabled)
2628 struct trace_array *tr = top_trace_array();
2629 struct trace_event_file *file;
2630 struct ftrace_probe_ops *ops;
2631 struct event_probe_data *data;
2632 const char *system;
2633 const char *event;
2634 char *number;
2635 bool enable;
2636 int ret;
2638 if (!tr)
2639 return -ENODEV;
2641 /* hash funcs only work with set_ftrace_filter */
2642 if (!enabled || !param)
2643 return -EINVAL;
2645 system = strsep(&param, ":");
2646 if (!param)
2647 return -EINVAL;
2649 event = strsep(&param, ":");
2651 mutex_lock(&event_mutex);
2653 ret = -EINVAL;
2654 file = find_event_file(tr, system, event);
2655 if (!file)
2656 goto out;
2658 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2660 if (enable)
2661 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2662 else
2663 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2665 if (glob[0] == '!') {
2666 unregister_ftrace_function_probe_func(glob+1, ops);
2667 ret = 0;
2668 goto out;
2671 ret = -ENOMEM;
2672 data = kzalloc(sizeof(*data), GFP_KERNEL);
2673 if (!data)
2674 goto out;
2676 data->enable = enable;
2677 data->count = -1;
2678 data->file = file;
2680 if (!param)
2681 goto out_reg;
2683 number = strsep(&param, ":");
2685 ret = -EINVAL;
2686 if (!strlen(number))
2687 goto out_free;
2690 * We use the callback data field (which is a pointer)
2691 * as our counter.
2693 ret = kstrtoul(number, 0, &data->count);
2694 if (ret)
2695 goto out_free;
2697 out_reg:
2698 /* Don't let event modules unload while probe registered */
2699 ret = try_module_get(file->event_call->mod);
2700 if (!ret) {
2701 ret = -EBUSY;
2702 goto out_free;
2705 ret = __ftrace_event_enable_disable(file, 1, 1);
2706 if (ret < 0)
2707 goto out_put;
2708 ret = register_ftrace_function_probe(glob, ops, data);
2710 * The above returns on success the # of functions enabled,
2711 * but if it didn't find any functions it returns zero.
2712 * Consider no functions a failure too.
2714 if (!ret) {
2715 ret = -ENOENT;
2716 goto out_disable;
2717 } else if (ret < 0)
2718 goto out_disable;
2719 /* Just return zero, not the number of enabled functions */
2720 ret = 0;
2721 out:
2722 mutex_unlock(&event_mutex);
2723 return ret;
2725 out_disable:
2726 __ftrace_event_enable_disable(file, 0, 1);
2727 out_put:
2728 module_put(file->event_call->mod);
2729 out_free:
2730 kfree(data);
2731 goto out;
2734 static struct ftrace_func_command event_enable_cmd = {
2735 .name = ENABLE_EVENT_STR,
2736 .func = event_enable_func,
2739 static struct ftrace_func_command event_disable_cmd = {
2740 .name = DISABLE_EVENT_STR,
2741 .func = event_enable_func,
2744 static __init int register_event_cmds(void)
2746 int ret;
2748 ret = register_ftrace_command(&event_enable_cmd);
2749 if (WARN_ON(ret < 0))
2750 return ret;
2751 ret = register_ftrace_command(&event_disable_cmd);
2752 if (WARN_ON(ret < 0))
2753 unregister_ftrace_command(&event_enable_cmd);
2754 return ret;
2756 #else
2757 static inline int register_event_cmds(void) { return 0; }
2758 #endif /* CONFIG_DYNAMIC_FTRACE */
2761 * The top level array has already had its trace_event_file
2762 * descriptors created in order to allow for early events to
2763 * be recorded. This function is called after the tracefs has been
2764 * initialized, and we now have to create the files associated
2765 * to the events.
2767 static __init void
2768 __trace_early_add_event_dirs(struct trace_array *tr)
2770 struct trace_event_file *file;
2771 int ret;
2774 list_for_each_entry(file, &tr->events, list) {
2775 ret = event_create_dir(tr->event_dir, file);
2776 if (ret < 0)
2777 pr_warn("Could not create directory for event %s\n",
2778 trace_event_name(file->event_call));
2783 * For early boot up, the top trace array requires to have
2784 * a list of events that can be enabled. This must be done before
2785 * the filesystem is set up in order to allow events to be traced
2786 * early.
2788 static __init void
2789 __trace_early_add_events(struct trace_array *tr)
2791 struct trace_event_call *call;
2792 int ret;
2794 list_for_each_entry(call, &ftrace_events, list) {
2795 /* Early boot up should not have any modules loaded */
2796 if (WARN_ON_ONCE(call->mod))
2797 continue;
2799 ret = __trace_early_add_new_event(call, tr);
2800 if (ret < 0)
2801 pr_warn("Could not create early event %s\n",
2802 trace_event_name(call));
2806 /* Remove the event directory structure for a trace directory. */
2807 static void
2808 __trace_remove_event_dirs(struct trace_array *tr)
2810 struct trace_event_file *file, *next;
2812 list_for_each_entry_safe(file, next, &tr->events, list)
2813 remove_event_file_dir(file);
2816 static void __add_event_to_tracers(struct trace_event_call *call)
2818 struct trace_array *tr;
2820 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2821 __trace_add_new_event(call, tr);
2824 extern struct trace_event_call *__start_ftrace_events[];
2825 extern struct trace_event_call *__stop_ftrace_events[];
2827 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2829 static __init int setup_trace_event(char *str)
2831 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2832 ring_buffer_expanded = true;
2833 tracing_selftest_disabled = true;
2835 return 1;
2837 __setup("trace_event=", setup_trace_event);
2839 /* Expects to have event_mutex held when called */
2840 static int
2841 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2843 struct dentry *d_events;
2844 struct dentry *entry;
2846 entry = tracefs_create_file("set_event", 0644, parent,
2847 tr, &ftrace_set_event_fops);
2848 if (!entry) {
2849 pr_warn("Could not create tracefs 'set_event' entry\n");
2850 return -ENOMEM;
2853 d_events = tracefs_create_dir("events", parent);
2854 if (!d_events) {
2855 pr_warn("Could not create tracefs 'events' directory\n");
2856 return -ENOMEM;
2859 entry = tracefs_create_file("set_event_pid", 0644, parent,
2860 tr, &ftrace_set_event_pid_fops);
2862 /* ring buffer internal formats */
2863 trace_create_file("header_page", 0444, d_events,
2864 ring_buffer_print_page_header,
2865 &ftrace_show_header_fops);
2867 trace_create_file("header_event", 0444, d_events,
2868 ring_buffer_print_entry_header,
2869 &ftrace_show_header_fops);
2871 trace_create_file("enable", 0644, d_events,
2872 tr, &ftrace_tr_enable_fops);
2874 tr->event_dir = d_events;
2876 return 0;
2880 * event_trace_add_tracer - add a instance of a trace_array to events
2881 * @parent: The parent dentry to place the files/directories for events in
2882 * @tr: The trace array associated with these events
2884 * When a new instance is created, it needs to set up its events
2885 * directory, as well as other files associated with events. It also
2886 * creates the event hierachry in the @parent/events directory.
2888 * Returns 0 on success.
2890 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2892 int ret;
2894 mutex_lock(&event_mutex);
2896 ret = create_event_toplevel_files(parent, tr);
2897 if (ret)
2898 goto out_unlock;
2900 down_write(&trace_event_sem);
2901 __trace_add_event_dirs(tr);
2902 up_write(&trace_event_sem);
2904 out_unlock:
2905 mutex_unlock(&event_mutex);
2907 return ret;
2911 * The top trace array already had its file descriptors created.
2912 * Now the files themselves need to be created.
2914 static __init int
2915 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2917 int ret;
2919 mutex_lock(&event_mutex);
2921 ret = create_event_toplevel_files(parent, tr);
2922 if (ret)
2923 goto out_unlock;
2925 down_write(&trace_event_sem);
2926 __trace_early_add_event_dirs(tr);
2927 up_write(&trace_event_sem);
2929 out_unlock:
2930 mutex_unlock(&event_mutex);
2932 return ret;
2935 int event_trace_del_tracer(struct trace_array *tr)
2937 mutex_lock(&event_mutex);
2939 /* Disable any event triggers and associated soft-disabled events */
2940 clear_event_triggers(tr);
2942 /* Clear the pid list */
2943 __ftrace_clear_event_pids(tr);
2945 /* Disable any running events */
2946 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2948 /* Access to events are within rcu_read_lock_sched() */
2949 synchronize_sched();
2951 down_write(&trace_event_sem);
2952 __trace_remove_event_dirs(tr);
2953 tracefs_remove_recursive(tr->event_dir);
2954 up_write(&trace_event_sem);
2956 tr->event_dir = NULL;
2958 mutex_unlock(&event_mutex);
2960 return 0;
2963 static __init int event_trace_memsetup(void)
2965 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2966 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
2967 return 0;
2970 static __init void
2971 early_enable_events(struct trace_array *tr, bool disable_first)
2973 char *buf = bootup_event_buf;
2974 char *token;
2975 int ret;
2977 while (true) {
2978 token = strsep(&buf, ",");
2980 if (!token)
2981 break;
2983 if (*token) {
2984 /* Restarting syscalls requires that we stop them first */
2985 if (disable_first)
2986 ftrace_set_clr_event(tr, token, 0);
2988 ret = ftrace_set_clr_event(tr, token, 1);
2989 if (ret)
2990 pr_warn("Failed to enable trace event: %s\n", token);
2993 /* Put back the comma to allow this to be called again */
2994 if (buf)
2995 *(buf - 1) = ',';
2999 static __init int event_trace_enable(void)
3001 struct trace_array *tr = top_trace_array();
3002 struct trace_event_call **iter, *call;
3003 int ret;
3005 if (!tr)
3006 return -ENODEV;
3008 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3010 call = *iter;
3011 ret = event_init(call);
3012 if (!ret)
3013 list_add(&call->list, &ftrace_events);
3017 * We need the top trace array to have a working set of trace
3018 * points at early init, before the debug files and directories
3019 * are created. Create the file entries now, and attach them
3020 * to the actual file dentries later.
3022 __trace_early_add_events(tr);
3024 early_enable_events(tr, false);
3026 trace_printk_start_comm();
3028 register_event_cmds();
3030 register_trigger_cmds();
3032 return 0;
3036 * event_trace_enable() is called from trace_event_init() first to
3037 * initialize events and perhaps start any events that are on the
3038 * command line. Unfortunately, there are some events that will not
3039 * start this early, like the system call tracepoints that need
3040 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3041 * is called before pid 1 starts, and this flag is never set, making
3042 * the syscall tracepoint never get reached, but the event is enabled
3043 * regardless (and not doing anything).
3045 static __init int event_trace_enable_again(void)
3047 struct trace_array *tr;
3049 tr = top_trace_array();
3050 if (!tr)
3051 return -ENODEV;
3053 early_enable_events(tr, true);
3055 return 0;
3058 early_initcall(event_trace_enable_again);
3060 static __init int event_trace_init(void)
3062 struct trace_array *tr;
3063 struct dentry *d_tracer;
3064 struct dentry *entry;
3065 int ret;
3067 tr = top_trace_array();
3068 if (!tr)
3069 return -ENODEV;
3071 d_tracer = tracing_init_dentry();
3072 if (IS_ERR(d_tracer))
3073 return 0;
3075 entry = tracefs_create_file("available_events", 0444, d_tracer,
3076 tr, &ftrace_avail_fops);
3077 if (!entry)
3078 pr_warn("Could not create tracefs 'available_events' entry\n");
3080 if (trace_define_generic_fields())
3081 pr_warn("tracing: Failed to allocated generic fields");
3083 if (trace_define_common_fields())
3084 pr_warn("tracing: Failed to allocate common fields");
3086 ret = early_event_add_tracer(d_tracer, tr);
3087 if (ret)
3088 return ret;
3090 #ifdef CONFIG_MODULES
3091 ret = register_module_notifier(&trace_module_nb);
3092 if (ret)
3093 pr_warn("Failed to register trace events module notifier\n");
3094 #endif
3095 return 0;
3098 void __init trace_event_init(void)
3100 event_trace_memsetup();
3101 init_ftrace_syscalls();
3102 event_trace_enable();
3105 fs_initcall(event_trace_init);
3107 #ifdef CONFIG_FTRACE_STARTUP_TEST
3109 static DEFINE_SPINLOCK(test_spinlock);
3110 static DEFINE_SPINLOCK(test_spinlock_irq);
3111 static DEFINE_MUTEX(test_mutex);
3113 static __init void test_work(struct work_struct *dummy)
3115 spin_lock(&test_spinlock);
3116 spin_lock_irq(&test_spinlock_irq);
3117 udelay(1);
3118 spin_unlock_irq(&test_spinlock_irq);
3119 spin_unlock(&test_spinlock);
3121 mutex_lock(&test_mutex);
3122 msleep(1);
3123 mutex_unlock(&test_mutex);
3126 static __init int event_test_thread(void *unused)
3128 void *test_malloc;
3130 test_malloc = kmalloc(1234, GFP_KERNEL);
3131 if (!test_malloc)
3132 pr_info("failed to kmalloc\n");
3134 schedule_on_each_cpu(test_work);
3136 kfree(test_malloc);
3138 set_current_state(TASK_INTERRUPTIBLE);
3139 while (!kthread_should_stop()) {
3140 schedule();
3141 set_current_state(TASK_INTERRUPTIBLE);
3143 __set_current_state(TASK_RUNNING);
3145 return 0;
3149 * Do various things that may trigger events.
3151 static __init void event_test_stuff(void)
3153 struct task_struct *test_thread;
3155 test_thread = kthread_run(event_test_thread, NULL, "test-events");
3156 msleep(1);
3157 kthread_stop(test_thread);
3161 * For every trace event defined, we will test each trace point separately,
3162 * and then by groups, and finally all trace points.
3164 static __init void event_trace_self_tests(void)
3166 struct trace_subsystem_dir *dir;
3167 struct trace_event_file *file;
3168 struct trace_event_call *call;
3169 struct event_subsystem *system;
3170 struct trace_array *tr;
3171 int ret;
3173 tr = top_trace_array();
3174 if (!tr)
3175 return;
3177 pr_info("Running tests on trace events:\n");
3179 list_for_each_entry(file, &tr->events, list) {
3181 call = file->event_call;
3183 /* Only test those that have a probe */
3184 if (!call->class || !call->class->probe)
3185 continue;
3188 * Testing syscall events here is pretty useless, but
3189 * we still do it if configured. But this is time consuming.
3190 * What we really need is a user thread to perform the
3191 * syscalls as we test.
3193 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3194 if (call->class->system &&
3195 strcmp(call->class->system, "syscalls") == 0)
3196 continue;
3197 #endif
3199 pr_info("Testing event %s: ", trace_event_name(call));
3202 * If an event is already enabled, someone is using
3203 * it and the self test should not be on.
3205 if (file->flags & EVENT_FILE_FL_ENABLED) {
3206 pr_warn("Enabled event during self test!\n");
3207 WARN_ON_ONCE(1);
3208 continue;
3211 ftrace_event_enable_disable(file, 1);
3212 event_test_stuff();
3213 ftrace_event_enable_disable(file, 0);
3215 pr_cont("OK\n");
3218 /* Now test at the sub system level */
3220 pr_info("Running tests on trace event systems:\n");
3222 list_for_each_entry(dir, &tr->systems, list) {
3224 system = dir->subsystem;
3226 /* the ftrace system is special, skip it */
3227 if (strcmp(system->name, "ftrace") == 0)
3228 continue;
3230 pr_info("Testing event system %s: ", system->name);
3232 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3233 if (WARN_ON_ONCE(ret)) {
3234 pr_warn("error enabling system %s\n",
3235 system->name);
3236 continue;
3239 event_test_stuff();
3241 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3242 if (WARN_ON_ONCE(ret)) {
3243 pr_warn("error disabling system %s\n",
3244 system->name);
3245 continue;
3248 pr_cont("OK\n");
3251 /* Test with all events enabled */
3253 pr_info("Running tests on all trace events:\n");
3254 pr_info("Testing all events: ");
3256 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3257 if (WARN_ON_ONCE(ret)) {
3258 pr_warn("error enabling all events\n");
3259 return;
3262 event_test_stuff();
3264 /* reset sysname */
3265 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3266 if (WARN_ON_ONCE(ret)) {
3267 pr_warn("error disabling all events\n");
3268 return;
3271 pr_cont("OK\n");
3274 #ifdef CONFIG_FUNCTION_TRACER
3276 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3278 static struct trace_event_file event_trace_file __initdata;
3280 static void __init
3281 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3282 struct ftrace_ops *op, struct pt_regs *pt_regs)
3284 struct ring_buffer_event *event;
3285 struct ring_buffer *buffer;
3286 struct ftrace_entry *entry;
3287 unsigned long flags;
3288 long disabled;
3289 int cpu;
3290 int pc;
3292 pc = preempt_count();
3293 preempt_disable_notrace();
3294 cpu = raw_smp_processor_id();
3295 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3297 if (disabled != 1)
3298 goto out;
3300 local_save_flags(flags);
3302 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3303 TRACE_FN, sizeof(*entry),
3304 flags, pc);
3305 if (!event)
3306 goto out;
3307 entry = ring_buffer_event_data(event);
3308 entry->ip = ip;
3309 entry->parent_ip = parent_ip;
3311 event_trigger_unlock_commit(&event_trace_file, buffer, event,
3312 entry, flags, pc);
3313 out:
3314 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3315 preempt_enable_notrace();
3318 static struct ftrace_ops trace_ops __initdata =
3320 .func = function_test_events_call,
3321 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3324 static __init void event_trace_self_test_with_function(void)
3326 int ret;
3328 event_trace_file.tr = top_trace_array();
3329 if (WARN_ON(!event_trace_file.tr))
3330 return;
3332 ret = register_ftrace_function(&trace_ops);
3333 if (WARN_ON(ret < 0)) {
3334 pr_info("Failed to enable function tracer for event tests\n");
3335 return;
3337 pr_info("Running tests again, along with the function tracer\n");
3338 event_trace_self_tests();
3339 unregister_ftrace_function(&trace_ops);
3341 #else
3342 static __init void event_trace_self_test_with_function(void)
3345 #endif
3347 static __init int event_trace_self_tests_init(void)
3349 if (!tracing_selftest_disabled) {
3350 event_trace_self_tests();
3351 event_trace_self_test_with_function();
3354 return 0;
3357 late_initcall(event_trace_self_tests_init);
3359 #endif