1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_trigger - trace event triggers
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/ctype.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
16 static LIST_HEAD(trigger_commands
);
17 static DEFINE_MUTEX(trigger_cmd_mutex
);
19 void trigger_data_free(struct event_trigger_data
*data
)
21 if (data
->cmd_ops
->set_filter
)
22 data
->cmd_ops
->set_filter(NULL
, data
, NULL
);
24 /* make sure current triggers exit before free */
25 tracepoint_synchronize_unregister();
31 * event_triggers_call - Call triggers associated with a trace event
32 * @file: The trace_event_file associated with the event
33 * @rec: The trace entry for the event, NULL for unconditional invocation
35 * For each trigger associated with an event, invoke the trigger
36 * function registered with the associated trigger command. If rec is
37 * non-NULL, it means that the trigger requires further processing and
38 * shouldn't be unconditionally invoked. If rec is non-NULL and the
39 * trigger has a filter associated with it, rec will checked against
40 * the filter and if the record matches the trigger will be invoked.
41 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
42 * in any case until the current event is written, the trigger
43 * function isn't invoked but the bit associated with the deferred
44 * trigger is set in the return value.
46 * Returns an enum event_trigger_type value containing a set bit for
47 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 * Return: an enum event_trigger_type value containing a set bit for
52 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 enum event_trigger_type
55 event_triggers_call(struct trace_event_file
*file
, void *rec
,
56 struct ring_buffer_event
*event
)
58 struct event_trigger_data
*data
;
59 enum event_trigger_type tt
= ETT_NONE
;
60 struct event_filter
*filter
;
62 if (list_empty(&file
->triggers
))
65 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
69 data
->ops
->func(data
, rec
, event
);
72 filter
= rcu_dereference_sched(data
->filter
);
73 if (filter
&& !filter_match_preds(filter
, rec
))
75 if (event_command_post_trigger(data
->cmd_ops
)) {
76 tt
|= data
->cmd_ops
->trigger_type
;
79 data
->ops
->func(data
, rec
, event
);
83 EXPORT_SYMBOL_GPL(event_triggers_call
);
86 * event_triggers_post_call - Call 'post_triggers' for a trace event
87 * @file: The trace_event_file associated with the event
88 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90 * For each trigger associated with an event, invoke the trigger
91 * function registered with the associated trigger command, if the
92 * corresponding bit is set in the tt enum passed into this function.
93 * See @event_triggers_call for details on how those bits are set.
95 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
98 event_triggers_post_call(struct trace_event_file
*file
,
99 enum event_trigger_type tt
)
101 struct event_trigger_data
*data
;
103 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
106 if (data
->cmd_ops
->trigger_type
& tt
)
107 data
->ops
->func(data
, NULL
, NULL
);
110 EXPORT_SYMBOL_GPL(event_triggers_post_call
);
112 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
114 static void *trigger_next(struct seq_file
*m
, void *t
, loff_t
*pos
)
116 struct trace_event_file
*event_file
= event_file_data(m
->private);
118 if (t
== SHOW_AVAILABLE_TRIGGERS
)
121 return seq_list_next(t
, &event_file
->triggers
, pos
);
124 static void *trigger_start(struct seq_file
*m
, loff_t
*pos
)
126 struct trace_event_file
*event_file
;
128 /* ->stop() is called even if ->start() fails */
129 mutex_lock(&event_mutex
);
130 event_file
= event_file_data(m
->private);
131 if (unlikely(!event_file
))
132 return ERR_PTR(-ENODEV
);
134 if (list_empty(&event_file
->triggers
))
135 return *pos
== 0 ? SHOW_AVAILABLE_TRIGGERS
: NULL
;
137 return seq_list_start(&event_file
->triggers
, *pos
);
140 static void trigger_stop(struct seq_file
*m
, void *t
)
142 mutex_unlock(&event_mutex
);
145 static int trigger_show(struct seq_file
*m
, void *v
)
147 struct event_trigger_data
*data
;
148 struct event_command
*p
;
150 if (v
== SHOW_AVAILABLE_TRIGGERS
) {
151 seq_puts(m
, "# Available triggers:\n");
153 mutex_lock(&trigger_cmd_mutex
);
154 list_for_each_entry_reverse(p
, &trigger_commands
, list
)
155 seq_printf(m
, " %s", p
->name
);
157 mutex_unlock(&trigger_cmd_mutex
);
161 data
= list_entry(v
, struct event_trigger_data
, list
);
162 data
->ops
->print(m
, data
->ops
, data
);
167 static const struct seq_operations event_triggers_seq_ops
= {
168 .start
= trigger_start
,
169 .next
= trigger_next
,
170 .stop
= trigger_stop
,
171 .show
= trigger_show
,
174 static int event_trigger_regex_open(struct inode
*inode
, struct file
*file
)
178 mutex_lock(&event_mutex
);
180 if (unlikely(!event_file_data(file
))) {
181 mutex_unlock(&event_mutex
);
185 if ((file
->f_mode
& FMODE_WRITE
) &&
186 (file
->f_flags
& O_TRUNC
)) {
187 struct trace_event_file
*event_file
;
188 struct event_command
*p
;
190 event_file
= event_file_data(file
);
192 list_for_each_entry(p
, &trigger_commands
, list
) {
194 p
->unreg_all(event_file
);
198 if (file
->f_mode
& FMODE_READ
) {
199 ret
= seq_open(file
, &event_triggers_seq_ops
);
201 struct seq_file
*m
= file
->private_data
;
206 mutex_unlock(&event_mutex
);
211 static int trigger_process_regex(struct trace_event_file
*file
, char *buff
)
213 char *command
, *next
= buff
;
214 struct event_command
*p
;
217 command
= strsep(&next
, ": \t");
218 command
= (command
[0] != '!') ? command
: command
+ 1;
220 mutex_lock(&trigger_cmd_mutex
);
221 list_for_each_entry(p
, &trigger_commands
, list
) {
222 if (strcmp(p
->name
, command
) == 0) {
223 ret
= p
->func(p
, file
, buff
, command
, next
);
228 mutex_unlock(&trigger_cmd_mutex
);
233 static ssize_t
event_trigger_regex_write(struct file
*file
,
234 const char __user
*ubuf
,
235 size_t cnt
, loff_t
*ppos
)
237 struct trace_event_file
*event_file
;
244 if (cnt
>= PAGE_SIZE
)
247 buf
= memdup_user_nul(ubuf
, cnt
);
253 mutex_lock(&event_mutex
);
254 event_file
= event_file_data(file
);
255 if (unlikely(!event_file
)) {
256 mutex_unlock(&event_mutex
);
260 ret
= trigger_process_regex(event_file
, buf
);
261 mutex_unlock(&event_mutex
);
273 static int event_trigger_regex_release(struct inode
*inode
, struct file
*file
)
275 mutex_lock(&event_mutex
);
277 if (file
->f_mode
& FMODE_READ
)
278 seq_release(inode
, file
);
280 mutex_unlock(&event_mutex
);
286 event_trigger_write(struct file
*filp
, const char __user
*ubuf
,
287 size_t cnt
, loff_t
*ppos
)
289 return event_trigger_regex_write(filp
, ubuf
, cnt
, ppos
);
293 event_trigger_open(struct inode
*inode
, struct file
*filp
)
295 return event_trigger_regex_open(inode
, filp
);
299 event_trigger_release(struct inode
*inode
, struct file
*file
)
301 return event_trigger_regex_release(inode
, file
);
304 const struct file_operations event_trigger_fops
= {
305 .open
= event_trigger_open
,
307 .write
= event_trigger_write
,
308 .llseek
= tracing_lseek
,
309 .release
= event_trigger_release
,
313 * Currently we only register event commands from __init, so mark this
316 __init
int register_event_command(struct event_command
*cmd
)
318 struct event_command
*p
;
321 mutex_lock(&trigger_cmd_mutex
);
322 list_for_each_entry(p
, &trigger_commands
, list
) {
323 if (strcmp(cmd
->name
, p
->name
) == 0) {
328 list_add(&cmd
->list
, &trigger_commands
);
330 mutex_unlock(&trigger_cmd_mutex
);
336 * Currently we only unregister event commands from __init, so mark
339 __init
int unregister_event_command(struct event_command
*cmd
)
341 struct event_command
*p
, *n
;
344 mutex_lock(&trigger_cmd_mutex
);
345 list_for_each_entry_safe(p
, n
, &trigger_commands
, list
) {
346 if (strcmp(cmd
->name
, p
->name
) == 0) {
348 list_del_init(&p
->list
);
353 mutex_unlock(&trigger_cmd_mutex
);
359 * event_trigger_print - Generic event_trigger_ops @print implementation
360 * @name: The name of the event trigger
361 * @m: The seq_file being printed to
362 * @data: Trigger-specific data
363 * @filter_str: filter_str to print, if present
365 * Common implementation for event triggers to print themselves.
367 * Usually wrapped by a function that simply sets the @name of the
368 * trigger command and then invokes this.
370 * Return: 0 on success, errno otherwise
373 event_trigger_print(const char *name
, struct seq_file
*m
,
374 void *data
, char *filter_str
)
376 long count
= (long)data
;
381 seq_puts(m
, ":unlimited");
383 seq_printf(m
, ":count=%ld", count
);
386 seq_printf(m
, " if %s\n", filter_str
);
394 * event_trigger_init - Generic event_trigger_ops @init implementation
395 * @ops: The trigger ops associated with the trigger
396 * @data: Trigger-specific data
398 * Common implementation of event trigger initialization.
400 * Usually used directly as the @init method in event trigger
403 * Return: 0 on success, errno otherwise
405 int event_trigger_init(struct event_trigger_ops
*ops
,
406 struct event_trigger_data
*data
)
413 * event_trigger_free - Generic event_trigger_ops @free implementation
414 * @ops: The trigger ops associated with the trigger
415 * @data: Trigger-specific data
417 * Common implementation of event trigger de-initialization.
419 * Usually used directly as the @free method in event trigger
423 event_trigger_free(struct event_trigger_ops
*ops
,
424 struct event_trigger_data
*data
)
426 if (WARN_ON_ONCE(data
->ref
<= 0))
431 trigger_data_free(data
);
434 int trace_event_trigger_enable_disable(struct trace_event_file
*file
,
439 if (trigger_enable
) {
440 if (atomic_inc_return(&file
->tm_ref
) > 1)
442 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
443 ret
= trace_event_enable_disable(file
, 1, 1);
445 if (atomic_dec_return(&file
->tm_ref
) > 0)
447 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
448 ret
= trace_event_enable_disable(file
, 0, 1);
455 * clear_event_triggers - Clear all triggers associated with a trace array
456 * @tr: The trace array to clear
458 * For each trigger, the triggering event has its tm_ref decremented
459 * via trace_event_trigger_enable_disable(), and any associated event
460 * (in the case of enable/disable_event triggers) will have its sm_ref
461 * decremented via free()->trace_event_enable_disable(). That
462 * combination effectively reverses the soft-mode/trigger state added
463 * by trigger registration.
465 * Must be called with event_mutex held.
468 clear_event_triggers(struct trace_array
*tr
)
470 struct trace_event_file
*file
;
472 list_for_each_entry(file
, &tr
->events
, list
) {
473 struct event_trigger_data
*data
, *n
;
474 list_for_each_entry_safe(data
, n
, &file
->triggers
, list
) {
475 trace_event_trigger_enable_disable(file
, 0);
476 list_del_rcu(&data
->list
);
478 data
->ops
->free(data
->ops
, data
);
484 * update_cond_flag - Set or reset the TRIGGER_COND bit
485 * @file: The trace_event_file associated with the event
487 * If an event has triggers and any of those triggers has a filter or
488 * a post_trigger, trigger invocation needs to be deferred until after
489 * the current event has logged its data, and the event should have
490 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
493 void update_cond_flag(struct trace_event_file
*file
)
495 struct event_trigger_data
*data
;
496 bool set_cond
= false;
498 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
499 if (data
->filter
|| event_command_post_trigger(data
->cmd_ops
) ||
500 event_command_needs_rec(data
->cmd_ops
)) {
507 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
509 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
513 * register_trigger - Generic event_command @reg implementation
514 * @glob: The raw string used to register the trigger
515 * @ops: The trigger ops associated with the trigger
516 * @data: Trigger-specific data to associate with the trigger
517 * @file: The trace_event_file associated with the event
519 * Common implementation for event trigger registration.
521 * Usually used directly as the @reg method in event command
524 * Return: 0 on success, errno otherwise
526 static int register_trigger(char *glob
, struct event_trigger_ops
*ops
,
527 struct event_trigger_data
*data
,
528 struct trace_event_file
*file
)
530 struct event_trigger_data
*test
;
533 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
534 if (test
->cmd_ops
->trigger_type
== data
->cmd_ops
->trigger_type
) {
540 if (data
->ops
->init
) {
541 ret
= data
->ops
->init(data
->ops
, data
);
546 list_add_rcu(&data
->list
, &file
->triggers
);
549 update_cond_flag(file
);
550 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
551 list_del_rcu(&data
->list
);
552 update_cond_flag(file
);
560 * unregister_trigger - Generic event_command @unreg implementation
561 * @glob: The raw string used to register the trigger
562 * @ops: The trigger ops associated with the trigger
563 * @test: Trigger-specific data used to find the trigger to remove
564 * @file: The trace_event_file associated with the event
566 * Common implementation for event trigger unregistration.
568 * Usually used directly as the @unreg method in event command
571 static void unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
572 struct event_trigger_data
*test
,
573 struct trace_event_file
*file
)
575 struct event_trigger_data
*data
;
576 bool unregistered
= false;
578 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
579 if (data
->cmd_ops
->trigger_type
== test
->cmd_ops
->trigger_type
) {
581 list_del_rcu(&data
->list
);
582 trace_event_trigger_enable_disable(file
, 0);
583 update_cond_flag(file
);
588 if (unregistered
&& data
->ops
->free
)
589 data
->ops
->free(data
->ops
, data
);
593 * event_trigger_callback - Generic event_command @func implementation
594 * @cmd_ops: The command ops, used for trigger registration
595 * @file: The trace_event_file associated with the event
596 * @glob: The raw string used to register the trigger
597 * @cmd: The cmd portion of the string used to register the trigger
598 * @param: The params portion of the string used to register the trigger
600 * Common implementation for event command parsing and trigger
603 * Usually used directly as the @func method in event command
606 * Return: 0 on success, errno otherwise
609 event_trigger_callback(struct event_command
*cmd_ops
,
610 struct trace_event_file
*file
,
611 char *glob
, char *cmd
, char *param
)
613 struct event_trigger_data
*trigger_data
;
614 struct event_trigger_ops
*trigger_ops
;
615 char *trigger
= NULL
;
619 /* separate the trigger from the filter (t:n [if filter]) */
620 if (param
&& isdigit(param
[0]))
621 trigger
= strsep(¶m
, " \t");
623 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
626 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
630 trigger_data
->count
= -1;
631 trigger_data
->ops
= trigger_ops
;
632 trigger_data
->cmd_ops
= cmd_ops
;
633 trigger_data
->private_data
= file
;
634 INIT_LIST_HEAD(&trigger_data
->list
);
635 INIT_LIST_HEAD(&trigger_data
->named_list
);
637 if (glob
[0] == '!') {
638 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
645 number
= strsep(&trigger
, ":");
652 * We use the callback data field (which is a pointer)
655 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
660 if (!param
) /* if param is non-empty, it's supposed to be a filter */
663 if (!cmd_ops
->set_filter
)
666 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
671 /* Up the trigger_data count to make sure reg doesn't free it on failure */
672 event_trigger_init(trigger_ops
, trigger_data
);
673 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
675 * The above returns on success the # of functions enabled,
676 * but if it didn't find any functions it returns zero.
677 * Consider no functions a failure too.
680 cmd_ops
->unreg(glob
, trigger_ops
, trigger_data
, file
);
685 /* Down the counter of trigger_data or free it if not used anymore */
686 event_trigger_free(trigger_ops
, trigger_data
);
691 if (cmd_ops
->set_filter
)
692 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
698 * set_trigger_filter - Generic event_command @set_filter implementation
699 * @filter_str: The filter string for the trigger, NULL to remove filter
700 * @trigger_data: Trigger-specific data
701 * @file: The trace_event_file associated with the event
703 * Common implementation for event command filter parsing and filter
706 * Usually used directly as the @set_filter method in event command
709 * Also used to remove a filter (if filter_str = NULL).
711 * Return: 0 on success, errno otherwise
713 int set_trigger_filter(char *filter_str
,
714 struct event_trigger_data
*trigger_data
,
715 struct trace_event_file
*file
)
717 struct event_trigger_data
*data
= trigger_data
;
718 struct event_filter
*filter
= NULL
, *tmp
;
722 if (!filter_str
) /* clear the current filter */
725 s
= strsep(&filter_str
, " \t");
727 if (!strlen(s
) || strcmp(s
, "if") != 0)
733 /* The filter is for the 'trigger' event, not the triggered event */
734 ret
= create_event_filter(file
->tr
, file
->event_call
,
735 filter_str
, false, &filter
);
737 * If create_event_filter() fails, filter still needs to be freed.
738 * Which the calling code will do with data->filter.
741 tmp
= rcu_access_pointer(data
->filter
);
743 rcu_assign_pointer(data
->filter
, filter
);
746 /* Make sure the call is done with the filter */
747 tracepoint_synchronize_unregister();
748 free_event_filter(tmp
);
751 kfree(data
->filter_str
);
752 data
->filter_str
= NULL
;
755 data
->filter_str
= kstrdup(filter_str
, GFP_KERNEL
);
756 if (!data
->filter_str
) {
757 free_event_filter(rcu_access_pointer(data
->filter
));
766 static LIST_HEAD(named_triggers
);
769 * find_named_trigger - Find the common named trigger associated with @name
770 * @name: The name of the set of named triggers to find the common data for
772 * Named triggers are sets of triggers that share a common set of
773 * trigger data. The first named trigger registered with a given name
774 * owns the common trigger data that the others subsequently
775 * registered with the same name will reference. This function
776 * returns the common trigger data associated with that first
777 * registered instance.
779 * Return: the common trigger data for the given named trigger on
780 * success, NULL otherwise.
782 struct event_trigger_data
*find_named_trigger(const char *name
)
784 struct event_trigger_data
*data
;
789 list_for_each_entry(data
, &named_triggers
, named_list
) {
790 if (data
->named_data
)
792 if (strcmp(data
->name
, name
) == 0)
800 * is_named_trigger - determine if a given trigger is a named trigger
801 * @test: The trigger data to test
803 * Return: true if 'test' is a named trigger, false otherwise.
805 bool is_named_trigger(struct event_trigger_data
*test
)
807 struct event_trigger_data
*data
;
809 list_for_each_entry(data
, &named_triggers
, named_list
) {
818 * save_named_trigger - save the trigger in the named trigger list
819 * @name: The name of the named trigger set
820 * @data: The trigger data to save
822 * Return: 0 if successful, negative error otherwise.
824 int save_named_trigger(const char *name
, struct event_trigger_data
*data
)
826 data
->name
= kstrdup(name
, GFP_KERNEL
);
830 list_add(&data
->named_list
, &named_triggers
);
836 * del_named_trigger - delete a trigger from the named trigger list
837 * @data: The trigger data to delete
839 void del_named_trigger(struct event_trigger_data
*data
)
844 list_del(&data
->named_list
);
847 static void __pause_named_trigger(struct event_trigger_data
*data
, bool pause
)
849 struct event_trigger_data
*test
;
851 list_for_each_entry(test
, &named_triggers
, named_list
) {
852 if (strcmp(test
->name
, data
->name
) == 0) {
854 test
->paused_tmp
= test
->paused
;
857 test
->paused
= test
->paused_tmp
;
864 * pause_named_trigger - Pause all named triggers with the same name
865 * @data: The trigger data of a named trigger to pause
867 * Pauses a named trigger along with all other triggers having the
868 * same name. Because named triggers share a common set of data,
869 * pausing only one is meaningless, so pausing one named trigger needs
870 * to pause all triggers with the same name.
872 void pause_named_trigger(struct event_trigger_data
*data
)
874 __pause_named_trigger(data
, true);
878 * unpause_named_trigger - Un-pause all named triggers with the same name
879 * @data: The trigger data of a named trigger to unpause
881 * Un-pauses a named trigger along with all other triggers having the
882 * same name. Because named triggers share a common set of data,
883 * unpausing only one is meaningless, so unpausing one named trigger
884 * needs to unpause all triggers with the same name.
886 void unpause_named_trigger(struct event_trigger_data
*data
)
888 __pause_named_trigger(data
, false);
892 * set_named_trigger_data - Associate common named trigger data
893 * @data: The trigger data of a named trigger to unpause
895 * Named triggers are sets of triggers that share a common set of
896 * trigger data. The first named trigger registered with a given name
897 * owns the common trigger data that the others subsequently
898 * registered with the same name will reference. This function
899 * associates the common trigger data from the first trigger with the
902 void set_named_trigger_data(struct event_trigger_data
*data
,
903 struct event_trigger_data
*named_data
)
905 data
->named_data
= named_data
;
908 struct event_trigger_data
*
909 get_named_trigger_data(struct event_trigger_data
*data
)
911 return data
->named_data
;
915 traceon_trigger(struct event_trigger_data
*data
, void *rec
,
916 struct ring_buffer_event
*event
)
925 traceon_count_trigger(struct event_trigger_data
*data
, void *rec
,
926 struct ring_buffer_event
*event
)
934 if (data
->count
!= -1)
941 traceoff_trigger(struct event_trigger_data
*data
, void *rec
,
942 struct ring_buffer_event
*event
)
944 if (!tracing_is_on())
951 traceoff_count_trigger(struct event_trigger_data
*data
, void *rec
,
952 struct ring_buffer_event
*event
)
954 if (!tracing_is_on())
960 if (data
->count
!= -1)
967 traceon_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
968 struct event_trigger_data
*data
)
970 return event_trigger_print("traceon", m
, (void *)data
->count
,
975 traceoff_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
976 struct event_trigger_data
*data
)
978 return event_trigger_print("traceoff", m
, (void *)data
->count
,
982 static struct event_trigger_ops traceon_trigger_ops
= {
983 .func
= traceon_trigger
,
984 .print
= traceon_trigger_print
,
985 .init
= event_trigger_init
,
986 .free
= event_trigger_free
,
989 static struct event_trigger_ops traceon_count_trigger_ops
= {
990 .func
= traceon_count_trigger
,
991 .print
= traceon_trigger_print
,
992 .init
= event_trigger_init
,
993 .free
= event_trigger_free
,
996 static struct event_trigger_ops traceoff_trigger_ops
= {
997 .func
= traceoff_trigger
,
998 .print
= traceoff_trigger_print
,
999 .init
= event_trigger_init
,
1000 .free
= event_trigger_free
,
1003 static struct event_trigger_ops traceoff_count_trigger_ops
= {
1004 .func
= traceoff_count_trigger
,
1005 .print
= traceoff_trigger_print
,
1006 .init
= event_trigger_init
,
1007 .free
= event_trigger_free
,
1010 static struct event_trigger_ops
*
1011 onoff_get_trigger_ops(char *cmd
, char *param
)
1013 struct event_trigger_ops
*ops
;
1015 /* we register both traceon and traceoff to this callback */
1016 if (strcmp(cmd
, "traceon") == 0)
1017 ops
= param
? &traceon_count_trigger_ops
:
1018 &traceon_trigger_ops
;
1020 ops
= param
? &traceoff_count_trigger_ops
:
1021 &traceoff_trigger_ops
;
1026 static struct event_command trigger_traceon_cmd
= {
1028 .trigger_type
= ETT_TRACE_ONOFF
,
1029 .func
= event_trigger_callback
,
1030 .reg
= register_trigger
,
1031 .unreg
= unregister_trigger
,
1032 .get_trigger_ops
= onoff_get_trigger_ops
,
1033 .set_filter
= set_trigger_filter
,
1036 static struct event_command trigger_traceoff_cmd
= {
1038 .trigger_type
= ETT_TRACE_ONOFF
,
1039 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1040 .func
= event_trigger_callback
,
1041 .reg
= register_trigger
,
1042 .unreg
= unregister_trigger
,
1043 .get_trigger_ops
= onoff_get_trigger_ops
,
1044 .set_filter
= set_trigger_filter
,
1047 #ifdef CONFIG_TRACER_SNAPSHOT
1049 snapshot_trigger(struct event_trigger_data
*data
, void *rec
,
1050 struct ring_buffer_event
*event
)
1052 struct trace_event_file
*file
= data
->private_data
;
1055 tracing_snapshot_instance(file
->tr
);
1061 snapshot_count_trigger(struct event_trigger_data
*data
, void *rec
,
1062 struct ring_buffer_event
*event
)
1067 if (data
->count
!= -1)
1070 snapshot_trigger(data
, rec
, event
);
1074 register_snapshot_trigger(char *glob
, struct event_trigger_ops
*ops
,
1075 struct event_trigger_data
*data
,
1076 struct trace_event_file
*file
)
1078 int ret
= register_trigger(glob
, ops
, data
, file
);
1080 if (ret
> 0 && tracing_alloc_snapshot_instance(file
->tr
) != 0) {
1081 unregister_trigger(glob
, ops
, data
, file
);
1089 snapshot_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1090 struct event_trigger_data
*data
)
1092 return event_trigger_print("snapshot", m
, (void *)data
->count
,
1096 static struct event_trigger_ops snapshot_trigger_ops
= {
1097 .func
= snapshot_trigger
,
1098 .print
= snapshot_trigger_print
,
1099 .init
= event_trigger_init
,
1100 .free
= event_trigger_free
,
1103 static struct event_trigger_ops snapshot_count_trigger_ops
= {
1104 .func
= snapshot_count_trigger
,
1105 .print
= snapshot_trigger_print
,
1106 .init
= event_trigger_init
,
1107 .free
= event_trigger_free
,
1110 static struct event_trigger_ops
*
1111 snapshot_get_trigger_ops(char *cmd
, char *param
)
1113 return param
? &snapshot_count_trigger_ops
: &snapshot_trigger_ops
;
1116 static struct event_command trigger_snapshot_cmd
= {
1118 .trigger_type
= ETT_SNAPSHOT
,
1119 .func
= event_trigger_callback
,
1120 .reg
= register_snapshot_trigger
,
1121 .unreg
= unregister_trigger
,
1122 .get_trigger_ops
= snapshot_get_trigger_ops
,
1123 .set_filter
= set_trigger_filter
,
1126 static __init
int register_trigger_snapshot_cmd(void)
1130 ret
= register_event_command(&trigger_snapshot_cmd
);
1136 static __init
int register_trigger_snapshot_cmd(void) { return 0; }
1137 #endif /* CONFIG_TRACER_SNAPSHOT */
1139 #ifdef CONFIG_STACKTRACE
1140 #ifdef CONFIG_UNWINDER_ORC
1142 * event_triggers_post_call()
1143 * trace_event_raw_event_xxx()
1145 # define STACK_SKIP 2
1149 * stacktrace_trigger()
1150 * event_triggers_post_call()
1151 * trace_event_buffer_commit()
1152 * trace_event_raw_event_xxx()
1154 #define STACK_SKIP 4
1158 stacktrace_trigger(struct event_trigger_data
*data
, void *rec
,
1159 struct ring_buffer_event
*event
)
1161 trace_dump_stack(STACK_SKIP
);
1165 stacktrace_count_trigger(struct event_trigger_data
*data
, void *rec
,
1166 struct ring_buffer_event
*event
)
1171 if (data
->count
!= -1)
1174 stacktrace_trigger(data
, rec
, event
);
1178 stacktrace_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1179 struct event_trigger_data
*data
)
1181 return event_trigger_print("stacktrace", m
, (void *)data
->count
,
1185 static struct event_trigger_ops stacktrace_trigger_ops
= {
1186 .func
= stacktrace_trigger
,
1187 .print
= stacktrace_trigger_print
,
1188 .init
= event_trigger_init
,
1189 .free
= event_trigger_free
,
1192 static struct event_trigger_ops stacktrace_count_trigger_ops
= {
1193 .func
= stacktrace_count_trigger
,
1194 .print
= stacktrace_trigger_print
,
1195 .init
= event_trigger_init
,
1196 .free
= event_trigger_free
,
1199 static struct event_trigger_ops
*
1200 stacktrace_get_trigger_ops(char *cmd
, char *param
)
1202 return param
? &stacktrace_count_trigger_ops
: &stacktrace_trigger_ops
;
1205 static struct event_command trigger_stacktrace_cmd
= {
1206 .name
= "stacktrace",
1207 .trigger_type
= ETT_STACKTRACE
,
1208 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1209 .func
= event_trigger_callback
,
1210 .reg
= register_trigger
,
1211 .unreg
= unregister_trigger
,
1212 .get_trigger_ops
= stacktrace_get_trigger_ops
,
1213 .set_filter
= set_trigger_filter
,
1216 static __init
int register_trigger_stacktrace_cmd(void)
1220 ret
= register_event_command(&trigger_stacktrace_cmd
);
1226 static __init
int register_trigger_stacktrace_cmd(void) { return 0; }
1227 #endif /* CONFIG_STACKTRACE */
1229 static __init
void unregister_trigger_traceon_traceoff_cmds(void)
1231 unregister_event_command(&trigger_traceon_cmd
);
1232 unregister_event_command(&trigger_traceoff_cmd
);
1236 event_enable_trigger(struct event_trigger_data
*data
, void *rec
,
1237 struct ring_buffer_event
*event
)
1239 struct enable_trigger_data
*enable_data
= data
->private_data
;
1241 if (enable_data
->enable
)
1242 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1244 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1248 event_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
1249 struct ring_buffer_event
*event
)
1251 struct enable_trigger_data
*enable_data
= data
->private_data
;
1256 /* Skip if the event is in a state we want to switch to */
1257 if (enable_data
->enable
== !(enable_data
->file
->flags
& EVENT_FILE_FL_SOFT_DISABLED
))
1260 if (data
->count
!= -1)
1263 event_enable_trigger(data
, rec
, event
);
1266 int event_enable_trigger_print(struct seq_file
*m
,
1267 struct event_trigger_ops
*ops
,
1268 struct event_trigger_data
*data
)
1270 struct enable_trigger_data
*enable_data
= data
->private_data
;
1272 seq_printf(m
, "%s:%s:%s",
1274 (enable_data
->enable
? ENABLE_HIST_STR
: DISABLE_HIST_STR
) :
1275 (enable_data
->enable
? ENABLE_EVENT_STR
: DISABLE_EVENT_STR
),
1276 enable_data
->file
->event_call
->class->system
,
1277 trace_event_name(enable_data
->file
->event_call
));
1279 if (data
->count
== -1)
1280 seq_puts(m
, ":unlimited");
1282 seq_printf(m
, ":count=%ld", data
->count
);
1284 if (data
->filter_str
)
1285 seq_printf(m
, " if %s\n", data
->filter_str
);
1292 void event_enable_trigger_free(struct event_trigger_ops
*ops
,
1293 struct event_trigger_data
*data
)
1295 struct enable_trigger_data
*enable_data
= data
->private_data
;
1297 if (WARN_ON_ONCE(data
->ref
<= 0))
1302 /* Remove the SOFT_MODE flag */
1303 trace_event_enable_disable(enable_data
->file
, 0, 1);
1304 module_put(enable_data
->file
->event_call
->mod
);
1305 trigger_data_free(data
);
1310 static struct event_trigger_ops event_enable_trigger_ops
= {
1311 .func
= event_enable_trigger
,
1312 .print
= event_enable_trigger_print
,
1313 .init
= event_trigger_init
,
1314 .free
= event_enable_trigger_free
,
1317 static struct event_trigger_ops event_enable_count_trigger_ops
= {
1318 .func
= event_enable_count_trigger
,
1319 .print
= event_enable_trigger_print
,
1320 .init
= event_trigger_init
,
1321 .free
= event_enable_trigger_free
,
1324 static struct event_trigger_ops event_disable_trigger_ops
= {
1325 .func
= event_enable_trigger
,
1326 .print
= event_enable_trigger_print
,
1327 .init
= event_trigger_init
,
1328 .free
= event_enable_trigger_free
,
1331 static struct event_trigger_ops event_disable_count_trigger_ops
= {
1332 .func
= event_enable_count_trigger
,
1333 .print
= event_enable_trigger_print
,
1334 .init
= event_trigger_init
,
1335 .free
= event_enable_trigger_free
,
1338 int event_enable_trigger_func(struct event_command
*cmd_ops
,
1339 struct trace_event_file
*file
,
1340 char *glob
, char *cmd
, char *param
)
1342 struct trace_event_file
*event_enable_file
;
1343 struct enable_trigger_data
*enable_data
;
1344 struct event_trigger_data
*trigger_data
;
1345 struct event_trigger_ops
*trigger_ops
;
1346 struct trace_array
*tr
= file
->tr
;
1358 /* separate the trigger from the filter (s:e:n [if filter]) */
1359 trigger
= strsep(¶m
, " \t");
1363 system
= strsep(&trigger
, ":");
1367 event
= strsep(&trigger
, ":");
1370 event_enable_file
= find_event_file(tr
, system
, event
);
1371 if (!event_enable_file
)
1374 #ifdef CONFIG_HIST_TRIGGERS
1375 hist
= ((strcmp(cmd
, ENABLE_HIST_STR
) == 0) ||
1376 (strcmp(cmd
, DISABLE_HIST_STR
) == 0));
1378 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1379 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1381 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1383 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
1386 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
1390 enable_data
= kzalloc(sizeof(*enable_data
), GFP_KERNEL
);
1392 kfree(trigger_data
);
1396 trigger_data
->count
= -1;
1397 trigger_data
->ops
= trigger_ops
;
1398 trigger_data
->cmd_ops
= cmd_ops
;
1399 INIT_LIST_HEAD(&trigger_data
->list
);
1400 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
1402 enable_data
->hist
= hist
;
1403 enable_data
->enable
= enable
;
1404 enable_data
->file
= event_enable_file
;
1405 trigger_data
->private_data
= enable_data
;
1407 if (glob
[0] == '!') {
1408 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
1409 kfree(trigger_data
);
1415 /* Up the trigger_data count to make sure nothing frees it on failure */
1416 event_trigger_init(trigger_ops
, trigger_data
);
1419 number
= strsep(&trigger
, ":");
1422 if (!strlen(number
))
1426 * We use the callback data field (which is a pointer)
1429 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
1434 if (!param
) /* if param is non-empty, it's supposed to be a filter */
1437 if (!cmd_ops
->set_filter
)
1440 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
1445 /* Don't let event modules unload while probe registered */
1446 ret
= try_module_get(event_enable_file
->event_call
->mod
);
1452 ret
= trace_event_enable_disable(event_enable_file
, 1, 1);
1455 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
1457 * The above returns on success the # of functions enabled,
1458 * but if it didn't find any functions it returns zero.
1459 * Consider no functions a failure too.
1466 /* Just return zero, not the number of enabled functions */
1468 event_trigger_free(trigger_ops
, trigger_data
);
1473 trace_event_enable_disable(event_enable_file
, 0, 1);
1475 module_put(event_enable_file
->event_call
->mod
);
1477 if (cmd_ops
->set_filter
)
1478 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
1479 event_trigger_free(trigger_ops
, trigger_data
);
1484 int event_enable_register_trigger(char *glob
,
1485 struct event_trigger_ops
*ops
,
1486 struct event_trigger_data
*data
,
1487 struct trace_event_file
*file
)
1489 struct enable_trigger_data
*enable_data
= data
->private_data
;
1490 struct enable_trigger_data
*test_enable_data
;
1491 struct event_trigger_data
*test
;
1494 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1495 test_enable_data
= test
->private_data
;
1496 if (test_enable_data
&&
1497 (test
->cmd_ops
->trigger_type
==
1498 data
->cmd_ops
->trigger_type
) &&
1499 (test_enable_data
->file
== enable_data
->file
)) {
1505 if (data
->ops
->init
) {
1506 ret
= data
->ops
->init(data
->ops
, data
);
1511 list_add_rcu(&data
->list
, &file
->triggers
);
1514 update_cond_flag(file
);
1515 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
1516 list_del_rcu(&data
->list
);
1517 update_cond_flag(file
);
1524 void event_enable_unregister_trigger(char *glob
,
1525 struct event_trigger_ops
*ops
,
1526 struct event_trigger_data
*test
,
1527 struct trace_event_file
*file
)
1529 struct enable_trigger_data
*test_enable_data
= test
->private_data
;
1530 struct enable_trigger_data
*enable_data
;
1531 struct event_trigger_data
*data
;
1532 bool unregistered
= false;
1534 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
1535 enable_data
= data
->private_data
;
1537 (data
->cmd_ops
->trigger_type
==
1538 test
->cmd_ops
->trigger_type
) &&
1539 (enable_data
->file
== test_enable_data
->file
)) {
1540 unregistered
= true;
1541 list_del_rcu(&data
->list
);
1542 trace_event_trigger_enable_disable(file
, 0);
1543 update_cond_flag(file
);
1548 if (unregistered
&& data
->ops
->free
)
1549 data
->ops
->free(data
->ops
, data
);
1552 static struct event_trigger_ops
*
1553 event_enable_get_trigger_ops(char *cmd
, char *param
)
1555 struct event_trigger_ops
*ops
;
1558 #ifdef CONFIG_HIST_TRIGGERS
1559 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1560 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1562 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1565 ops
= param
? &event_enable_count_trigger_ops
:
1566 &event_enable_trigger_ops
;
1568 ops
= param
? &event_disable_count_trigger_ops
:
1569 &event_disable_trigger_ops
;
1574 static struct event_command trigger_enable_cmd
= {
1575 .name
= ENABLE_EVENT_STR
,
1576 .trigger_type
= ETT_EVENT_ENABLE
,
1577 .func
= event_enable_trigger_func
,
1578 .reg
= event_enable_register_trigger
,
1579 .unreg
= event_enable_unregister_trigger
,
1580 .get_trigger_ops
= event_enable_get_trigger_ops
,
1581 .set_filter
= set_trigger_filter
,
1584 static struct event_command trigger_disable_cmd
= {
1585 .name
= DISABLE_EVENT_STR
,
1586 .trigger_type
= ETT_EVENT_ENABLE
,
1587 .func
= event_enable_trigger_func
,
1588 .reg
= event_enable_register_trigger
,
1589 .unreg
= event_enable_unregister_trigger
,
1590 .get_trigger_ops
= event_enable_get_trigger_ops
,
1591 .set_filter
= set_trigger_filter
,
1594 static __init
void unregister_trigger_enable_disable_cmds(void)
1596 unregister_event_command(&trigger_enable_cmd
);
1597 unregister_event_command(&trigger_disable_cmd
);
1600 static __init
int register_trigger_enable_disable_cmds(void)
1604 ret
= register_event_command(&trigger_enable_cmd
);
1605 if (WARN_ON(ret
< 0))
1607 ret
= register_event_command(&trigger_disable_cmd
);
1608 if (WARN_ON(ret
< 0))
1609 unregister_trigger_enable_disable_cmds();
1614 static __init
int register_trigger_traceon_traceoff_cmds(void)
1618 ret
= register_event_command(&trigger_traceon_cmd
);
1619 if (WARN_ON(ret
< 0))
1621 ret
= register_event_command(&trigger_traceoff_cmd
);
1622 if (WARN_ON(ret
< 0))
1623 unregister_trigger_traceon_traceoff_cmds();
1628 __init
int register_trigger_cmds(void)
1630 register_trigger_traceon_traceoff_cmds();
1631 register_trigger_snapshot_cmd();
1632 register_trigger_stacktrace_cmd();
1633 register_trigger_enable_disable_cmds();
1634 register_trigger_hist_enable_disable_cmds();
1635 register_trigger_hist_cmd();