1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_trigger - trace event triggers
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/ctype.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
16 static LIST_HEAD(trigger_commands
);
17 static DEFINE_MUTEX(trigger_cmd_mutex
);
19 void trigger_data_free(struct event_trigger_data
*data
)
21 if (data
->cmd_ops
->set_filter
)
22 data
->cmd_ops
->set_filter(NULL
, data
, NULL
);
24 /* make sure current triggers exit before free */
25 tracepoint_synchronize_unregister();
31 * event_triggers_call - Call triggers associated with a trace event
32 * @file: The trace_event_file associated with the event
33 * @rec: The trace entry for the event, NULL for unconditional invocation
35 * For each trigger associated with an event, invoke the trigger
36 * function registered with the associated trigger command. If rec is
37 * non-NULL, it means that the trigger requires further processing and
38 * shouldn't be unconditionally invoked. If rec is non-NULL and the
39 * trigger has a filter associated with it, rec will checked against
40 * the filter and if the record matches the trigger will be invoked.
41 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
42 * in any case until the current event is written, the trigger
43 * function isn't invoked but the bit associated with the deferred
44 * trigger is set in the return value.
46 * Returns an enum event_trigger_type value containing a set bit for
47 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 * Return: an enum event_trigger_type value containing a set bit for
52 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 enum event_trigger_type
55 event_triggers_call(struct trace_event_file
*file
, void *rec
,
56 struct ring_buffer_event
*event
)
58 struct event_trigger_data
*data
;
59 enum event_trigger_type tt
= ETT_NONE
;
60 struct event_filter
*filter
;
62 if (list_empty(&file
->triggers
))
65 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
69 data
->ops
->func(data
, rec
, event
);
72 filter
= rcu_dereference_sched(data
->filter
);
73 if (filter
&& !filter_match_preds(filter
, rec
))
75 if (event_command_post_trigger(data
->cmd_ops
)) {
76 tt
|= data
->cmd_ops
->trigger_type
;
79 data
->ops
->func(data
, rec
, event
);
83 EXPORT_SYMBOL_GPL(event_triggers_call
);
86 * event_triggers_post_call - Call 'post_triggers' for a trace event
87 * @file: The trace_event_file associated with the event
88 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90 * For each trigger associated with an event, invoke the trigger
91 * function registered with the associated trigger command, if the
92 * corresponding bit is set in the tt enum passed into this function.
93 * See @event_triggers_call for details on how those bits are set.
95 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
98 event_triggers_post_call(struct trace_event_file
*file
,
99 enum event_trigger_type tt
)
101 struct event_trigger_data
*data
;
103 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
106 if (data
->cmd_ops
->trigger_type
& tt
)
107 data
->ops
->func(data
, NULL
, NULL
);
110 EXPORT_SYMBOL_GPL(event_triggers_post_call
);
112 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
114 static void *trigger_next(struct seq_file
*m
, void *t
, loff_t
*pos
)
116 struct trace_event_file
*event_file
= event_file_data(m
->private);
118 if (t
== SHOW_AVAILABLE_TRIGGERS
)
121 return seq_list_next(t
, &event_file
->triggers
, pos
);
124 static void *trigger_start(struct seq_file
*m
, loff_t
*pos
)
126 struct trace_event_file
*event_file
;
128 /* ->stop() is called even if ->start() fails */
129 mutex_lock(&event_mutex
);
130 event_file
= event_file_data(m
->private);
131 if (unlikely(!event_file
))
132 return ERR_PTR(-ENODEV
);
134 if (list_empty(&event_file
->triggers
))
135 return *pos
== 0 ? SHOW_AVAILABLE_TRIGGERS
: NULL
;
137 return seq_list_start(&event_file
->triggers
, *pos
);
140 static void trigger_stop(struct seq_file
*m
, void *t
)
142 mutex_unlock(&event_mutex
);
145 static int trigger_show(struct seq_file
*m
, void *v
)
147 struct event_trigger_data
*data
;
148 struct event_command
*p
;
150 if (v
== SHOW_AVAILABLE_TRIGGERS
) {
151 seq_puts(m
, "# Available triggers:\n");
153 mutex_lock(&trigger_cmd_mutex
);
154 list_for_each_entry_reverse(p
, &trigger_commands
, list
)
155 seq_printf(m
, " %s", p
->name
);
157 mutex_unlock(&trigger_cmd_mutex
);
161 data
= list_entry(v
, struct event_trigger_data
, list
);
162 data
->ops
->print(m
, data
->ops
, data
);
167 static const struct seq_operations event_triggers_seq_ops
= {
168 .start
= trigger_start
,
169 .next
= trigger_next
,
170 .stop
= trigger_stop
,
171 .show
= trigger_show
,
174 static int event_trigger_regex_open(struct inode
*inode
, struct file
*file
)
178 mutex_lock(&event_mutex
);
180 if (unlikely(!event_file_data(file
))) {
181 mutex_unlock(&event_mutex
);
185 if ((file
->f_mode
& FMODE_WRITE
) &&
186 (file
->f_flags
& O_TRUNC
)) {
187 struct trace_event_file
*event_file
;
188 struct event_command
*p
;
190 event_file
= event_file_data(file
);
192 list_for_each_entry(p
, &trigger_commands
, list
) {
194 p
->unreg_all(event_file
);
198 if (file
->f_mode
& FMODE_READ
) {
199 ret
= seq_open(file
, &event_triggers_seq_ops
);
201 struct seq_file
*m
= file
->private_data
;
206 mutex_unlock(&event_mutex
);
211 static int trigger_process_regex(struct trace_event_file
*file
, char *buff
)
213 char *command
, *next
= buff
;
214 struct event_command
*p
;
217 command
= strsep(&next
, ": \t");
218 command
= (command
[0] != '!') ? command
: command
+ 1;
220 mutex_lock(&trigger_cmd_mutex
);
221 list_for_each_entry(p
, &trigger_commands
, list
) {
222 if (strcmp(p
->name
, command
) == 0) {
223 ret
= p
->func(p
, file
, buff
, command
, next
);
228 mutex_unlock(&trigger_cmd_mutex
);
233 static ssize_t
event_trigger_regex_write(struct file
*file
,
234 const char __user
*ubuf
,
235 size_t cnt
, loff_t
*ppos
)
237 struct trace_event_file
*event_file
;
244 if (cnt
>= PAGE_SIZE
)
247 buf
= memdup_user_nul(ubuf
, cnt
);
253 mutex_lock(&event_mutex
);
254 event_file
= event_file_data(file
);
255 if (unlikely(!event_file
)) {
256 mutex_unlock(&event_mutex
);
260 ret
= trigger_process_regex(event_file
, buf
);
261 mutex_unlock(&event_mutex
);
273 static int event_trigger_regex_release(struct inode
*inode
, struct file
*file
)
275 mutex_lock(&event_mutex
);
277 if (file
->f_mode
& FMODE_READ
)
278 seq_release(inode
, file
);
280 mutex_unlock(&event_mutex
);
286 event_trigger_write(struct file
*filp
, const char __user
*ubuf
,
287 size_t cnt
, loff_t
*ppos
)
289 return event_trigger_regex_write(filp
, ubuf
, cnt
, ppos
);
293 event_trigger_open(struct inode
*inode
, struct file
*filp
)
295 return event_trigger_regex_open(inode
, filp
);
299 event_trigger_release(struct inode
*inode
, struct file
*file
)
301 return event_trigger_regex_release(inode
, file
);
304 const struct file_operations event_trigger_fops
= {
305 .open
= event_trigger_open
,
307 .write
= event_trigger_write
,
308 .llseek
= tracing_lseek
,
309 .release
= event_trigger_release
,
313 * Currently we only register event commands from __init, so mark this
316 __init
int register_event_command(struct event_command
*cmd
)
318 struct event_command
*p
;
321 mutex_lock(&trigger_cmd_mutex
);
322 list_for_each_entry(p
, &trigger_commands
, list
) {
323 if (strcmp(cmd
->name
, p
->name
) == 0) {
328 list_add(&cmd
->list
, &trigger_commands
);
330 mutex_unlock(&trigger_cmd_mutex
);
336 * Currently we only unregister event commands from __init, so mark
339 __init
int unregister_event_command(struct event_command
*cmd
)
341 struct event_command
*p
, *n
;
344 mutex_lock(&trigger_cmd_mutex
);
345 list_for_each_entry_safe(p
, n
, &trigger_commands
, list
) {
346 if (strcmp(cmd
->name
, p
->name
) == 0) {
348 list_del_init(&p
->list
);
353 mutex_unlock(&trigger_cmd_mutex
);
359 * event_trigger_print - Generic event_trigger_ops @print implementation
360 * @name: The name of the event trigger
361 * @m: The seq_file being printed to
362 * @data: Trigger-specific data
363 * @filter_str: filter_str to print, if present
365 * Common implementation for event triggers to print themselves.
367 * Usually wrapped by a function that simply sets the @name of the
368 * trigger command and then invokes this.
370 * Return: 0 on success, errno otherwise
373 event_trigger_print(const char *name
, struct seq_file
*m
,
374 void *data
, char *filter_str
)
376 long count
= (long)data
;
381 seq_puts(m
, ":unlimited");
383 seq_printf(m
, ":count=%ld", count
);
386 seq_printf(m
, " if %s\n", filter_str
);
394 * event_trigger_init - Generic event_trigger_ops @init implementation
395 * @ops: The trigger ops associated with the trigger
396 * @data: Trigger-specific data
398 * Common implementation of event trigger initialization.
400 * Usually used directly as the @init method in event trigger
403 * Return: 0 on success, errno otherwise
405 int event_trigger_init(struct event_trigger_ops
*ops
,
406 struct event_trigger_data
*data
)
413 * event_trigger_free - Generic event_trigger_ops @free implementation
414 * @ops: The trigger ops associated with the trigger
415 * @data: Trigger-specific data
417 * Common implementation of event trigger de-initialization.
419 * Usually used directly as the @free method in event trigger
423 event_trigger_free(struct event_trigger_ops
*ops
,
424 struct event_trigger_data
*data
)
426 if (WARN_ON_ONCE(data
->ref
<= 0))
431 trigger_data_free(data
);
434 int trace_event_trigger_enable_disable(struct trace_event_file
*file
,
439 if (trigger_enable
) {
440 if (atomic_inc_return(&file
->tm_ref
) > 1)
442 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
443 ret
= trace_event_enable_disable(file
, 1, 1);
445 if (atomic_dec_return(&file
->tm_ref
) > 0)
447 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
448 ret
= trace_event_enable_disable(file
, 0, 1);
455 * clear_event_triggers - Clear all triggers associated with a trace array
456 * @tr: The trace array to clear
458 * For each trigger, the triggering event has its tm_ref decremented
459 * via trace_event_trigger_enable_disable(), and any associated event
460 * (in the case of enable/disable_event triggers) will have its sm_ref
461 * decremented via free()->trace_event_enable_disable(). That
462 * combination effectively reverses the soft-mode/trigger state added
463 * by trigger registration.
465 * Must be called with event_mutex held.
468 clear_event_triggers(struct trace_array
*tr
)
470 struct trace_event_file
*file
;
472 list_for_each_entry(file
, &tr
->events
, list
) {
473 struct event_trigger_data
*data
, *n
;
474 list_for_each_entry_safe(data
, n
, &file
->triggers
, list
) {
475 trace_event_trigger_enable_disable(file
, 0);
476 list_del_rcu(&data
->list
);
478 data
->ops
->free(data
->ops
, data
);
484 * update_cond_flag - Set or reset the TRIGGER_COND bit
485 * @file: The trace_event_file associated with the event
487 * If an event has triggers and any of those triggers has a filter or
488 * a post_trigger, trigger invocation needs to be deferred until after
489 * the current event has logged its data, and the event should have
490 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
493 void update_cond_flag(struct trace_event_file
*file
)
495 struct event_trigger_data
*data
;
496 bool set_cond
= false;
498 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
499 if (data
->filter
|| event_command_post_trigger(data
->cmd_ops
) ||
500 event_command_needs_rec(data
->cmd_ops
)) {
507 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
509 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
513 * register_trigger - Generic event_command @reg implementation
514 * @glob: The raw string used to register the trigger
515 * @ops: The trigger ops associated with the trigger
516 * @data: Trigger-specific data to associate with the trigger
517 * @file: The trace_event_file associated with the event
519 * Common implementation for event trigger registration.
521 * Usually used directly as the @reg method in event command
524 * Return: 0 on success, errno otherwise
526 static int register_trigger(char *glob
, struct event_trigger_ops
*ops
,
527 struct event_trigger_data
*data
,
528 struct trace_event_file
*file
)
530 struct event_trigger_data
*test
;
533 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
534 if (test
->cmd_ops
->trigger_type
== data
->cmd_ops
->trigger_type
) {
540 if (data
->ops
->init
) {
541 ret
= data
->ops
->init(data
->ops
, data
);
546 list_add_rcu(&data
->list
, &file
->triggers
);
549 update_cond_flag(file
);
550 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
551 list_del_rcu(&data
->list
);
552 update_cond_flag(file
);
560 * unregister_trigger - Generic event_command @unreg implementation
561 * @glob: The raw string used to register the trigger
562 * @ops: The trigger ops associated with the trigger
563 * @test: Trigger-specific data used to find the trigger to remove
564 * @file: The trace_event_file associated with the event
566 * Common implementation for event trigger unregistration.
568 * Usually used directly as the @unreg method in event command
571 static void unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
572 struct event_trigger_data
*test
,
573 struct trace_event_file
*file
)
575 struct event_trigger_data
*data
;
576 bool unregistered
= false;
578 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
579 if (data
->cmd_ops
->trigger_type
== test
->cmd_ops
->trigger_type
) {
581 list_del_rcu(&data
->list
);
582 trace_event_trigger_enable_disable(file
, 0);
583 update_cond_flag(file
);
588 if (unregistered
&& data
->ops
->free
)
589 data
->ops
->free(data
->ops
, data
);
593 * event_trigger_callback - Generic event_command @func implementation
594 * @cmd_ops: The command ops, used for trigger registration
595 * @file: The trace_event_file associated with the event
596 * @glob: The raw string used to register the trigger
597 * @cmd: The cmd portion of the string used to register the trigger
598 * @param: The params portion of the string used to register the trigger
600 * Common implementation for event command parsing and trigger
603 * Usually used directly as the @func method in event command
606 * Return: 0 on success, errno otherwise
609 event_trigger_callback(struct event_command
*cmd_ops
,
610 struct trace_event_file
*file
,
611 char *glob
, char *cmd
, char *param
)
613 struct event_trigger_data
*trigger_data
;
614 struct event_trigger_ops
*trigger_ops
;
615 char *trigger
= NULL
;
619 /* separate the trigger from the filter (t:n [if filter]) */
620 if (param
&& isdigit(param
[0]))
621 trigger
= strsep(¶m
, " \t");
623 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
626 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
630 trigger_data
->count
= -1;
631 trigger_data
->ops
= trigger_ops
;
632 trigger_data
->cmd_ops
= cmd_ops
;
633 trigger_data
->private_data
= file
;
634 INIT_LIST_HEAD(&trigger_data
->list
);
635 INIT_LIST_HEAD(&trigger_data
->named_list
);
637 if (glob
[0] == '!') {
638 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
645 number
= strsep(&trigger
, ":");
652 * We use the callback data field (which is a pointer)
655 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
660 if (!param
) /* if param is non-empty, it's supposed to be a filter */
663 if (!cmd_ops
->set_filter
)
666 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
671 /* Up the trigger_data count to make sure reg doesn't free it on failure */
672 event_trigger_init(trigger_ops
, trigger_data
);
673 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
675 * The above returns on success the # of functions enabled,
676 * but if it didn't find any functions it returns zero.
677 * Consider no functions a failure too.
680 cmd_ops
->unreg(glob
, trigger_ops
, trigger_data
, file
);
685 /* Down the counter of trigger_data or free it if not used anymore */
686 event_trigger_free(trigger_ops
, trigger_data
);
691 if (cmd_ops
->set_filter
)
692 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
698 * set_trigger_filter - Generic event_command @set_filter implementation
699 * @filter_str: The filter string for the trigger, NULL to remove filter
700 * @trigger_data: Trigger-specific data
701 * @file: The trace_event_file associated with the event
703 * Common implementation for event command filter parsing and filter
706 * Usually used directly as the @set_filter method in event command
709 * Also used to remove a filter (if filter_str = NULL).
711 * Return: 0 on success, errno otherwise
713 int set_trigger_filter(char *filter_str
,
714 struct event_trigger_data
*trigger_data
,
715 struct trace_event_file
*file
)
717 struct event_trigger_data
*data
= trigger_data
;
718 struct event_filter
*filter
= NULL
, *tmp
;
722 if (!filter_str
) /* clear the current filter */
725 s
= strsep(&filter_str
, " \t");
727 if (!strlen(s
) || strcmp(s
, "if") != 0)
733 /* The filter is for the 'trigger' event, not the triggered event */
734 ret
= create_event_filter(file
->event_call
, filter_str
, false, &filter
);
736 * If create_event_filter() fails, filter still needs to be freed.
737 * Which the calling code will do with data->filter.
740 tmp
= rcu_access_pointer(data
->filter
);
742 rcu_assign_pointer(data
->filter
, filter
);
745 /* Make sure the call is done with the filter */
746 tracepoint_synchronize_unregister();
747 free_event_filter(tmp
);
750 kfree(data
->filter_str
);
751 data
->filter_str
= NULL
;
754 data
->filter_str
= kstrdup(filter_str
, GFP_KERNEL
);
755 if (!data
->filter_str
) {
756 free_event_filter(rcu_access_pointer(data
->filter
));
765 static LIST_HEAD(named_triggers
);
768 * find_named_trigger - Find the common named trigger associated with @name
769 * @name: The name of the set of named triggers to find the common data for
771 * Named triggers are sets of triggers that share a common set of
772 * trigger data. The first named trigger registered with a given name
773 * owns the common trigger data that the others subsequently
774 * registered with the same name will reference. This function
775 * returns the common trigger data associated with that first
776 * registered instance.
778 * Return: the common trigger data for the given named trigger on
779 * success, NULL otherwise.
781 struct event_trigger_data
*find_named_trigger(const char *name
)
783 struct event_trigger_data
*data
;
788 list_for_each_entry(data
, &named_triggers
, named_list
) {
789 if (data
->named_data
)
791 if (strcmp(data
->name
, name
) == 0)
799 * is_named_trigger - determine if a given trigger is a named trigger
800 * @test: The trigger data to test
802 * Return: true if 'test' is a named trigger, false otherwise.
804 bool is_named_trigger(struct event_trigger_data
*test
)
806 struct event_trigger_data
*data
;
808 list_for_each_entry(data
, &named_triggers
, named_list
) {
817 * save_named_trigger - save the trigger in the named trigger list
818 * @name: The name of the named trigger set
819 * @data: The trigger data to save
821 * Return: 0 if successful, negative error otherwise.
823 int save_named_trigger(const char *name
, struct event_trigger_data
*data
)
825 data
->name
= kstrdup(name
, GFP_KERNEL
);
829 list_add(&data
->named_list
, &named_triggers
);
835 * del_named_trigger - delete a trigger from the named trigger list
836 * @data: The trigger data to delete
838 void del_named_trigger(struct event_trigger_data
*data
)
843 list_del(&data
->named_list
);
846 static void __pause_named_trigger(struct event_trigger_data
*data
, bool pause
)
848 struct event_trigger_data
*test
;
850 list_for_each_entry(test
, &named_triggers
, named_list
) {
851 if (strcmp(test
->name
, data
->name
) == 0) {
853 test
->paused_tmp
= test
->paused
;
856 test
->paused
= test
->paused_tmp
;
863 * pause_named_trigger - Pause all named triggers with the same name
864 * @data: The trigger data of a named trigger to pause
866 * Pauses a named trigger along with all other triggers having the
867 * same name. Because named triggers share a common set of data,
868 * pausing only one is meaningless, so pausing one named trigger needs
869 * to pause all triggers with the same name.
871 void pause_named_trigger(struct event_trigger_data
*data
)
873 __pause_named_trigger(data
, true);
877 * unpause_named_trigger - Un-pause all named triggers with the same name
878 * @data: The trigger data of a named trigger to unpause
880 * Un-pauses a named trigger along with all other triggers having the
881 * same name. Because named triggers share a common set of data,
882 * unpausing only one is meaningless, so unpausing one named trigger
883 * needs to unpause all triggers with the same name.
885 void unpause_named_trigger(struct event_trigger_data
*data
)
887 __pause_named_trigger(data
, false);
891 * set_named_trigger_data - Associate common named trigger data
892 * @data: The trigger data of a named trigger to unpause
894 * Named triggers are sets of triggers that share a common set of
895 * trigger data. The first named trigger registered with a given name
896 * owns the common trigger data that the others subsequently
897 * registered with the same name will reference. This function
898 * associates the common trigger data from the first trigger with the
901 void set_named_trigger_data(struct event_trigger_data
*data
,
902 struct event_trigger_data
*named_data
)
904 data
->named_data
= named_data
;
907 struct event_trigger_data
*
908 get_named_trigger_data(struct event_trigger_data
*data
)
910 return data
->named_data
;
914 traceon_trigger(struct event_trigger_data
*data
, void *rec
,
915 struct ring_buffer_event
*event
)
924 traceon_count_trigger(struct event_trigger_data
*data
, void *rec
,
925 struct ring_buffer_event
*event
)
933 if (data
->count
!= -1)
940 traceoff_trigger(struct event_trigger_data
*data
, void *rec
,
941 struct ring_buffer_event
*event
)
943 if (!tracing_is_on())
950 traceoff_count_trigger(struct event_trigger_data
*data
, void *rec
,
951 struct ring_buffer_event
*event
)
953 if (!tracing_is_on())
959 if (data
->count
!= -1)
966 traceon_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
967 struct event_trigger_data
*data
)
969 return event_trigger_print("traceon", m
, (void *)data
->count
,
974 traceoff_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
975 struct event_trigger_data
*data
)
977 return event_trigger_print("traceoff", m
, (void *)data
->count
,
981 static struct event_trigger_ops traceon_trigger_ops
= {
982 .func
= traceon_trigger
,
983 .print
= traceon_trigger_print
,
984 .init
= event_trigger_init
,
985 .free
= event_trigger_free
,
988 static struct event_trigger_ops traceon_count_trigger_ops
= {
989 .func
= traceon_count_trigger
,
990 .print
= traceon_trigger_print
,
991 .init
= event_trigger_init
,
992 .free
= event_trigger_free
,
995 static struct event_trigger_ops traceoff_trigger_ops
= {
996 .func
= traceoff_trigger
,
997 .print
= traceoff_trigger_print
,
998 .init
= event_trigger_init
,
999 .free
= event_trigger_free
,
1002 static struct event_trigger_ops traceoff_count_trigger_ops
= {
1003 .func
= traceoff_count_trigger
,
1004 .print
= traceoff_trigger_print
,
1005 .init
= event_trigger_init
,
1006 .free
= event_trigger_free
,
1009 static struct event_trigger_ops
*
1010 onoff_get_trigger_ops(char *cmd
, char *param
)
1012 struct event_trigger_ops
*ops
;
1014 /* we register both traceon and traceoff to this callback */
1015 if (strcmp(cmd
, "traceon") == 0)
1016 ops
= param
? &traceon_count_trigger_ops
:
1017 &traceon_trigger_ops
;
1019 ops
= param
? &traceoff_count_trigger_ops
:
1020 &traceoff_trigger_ops
;
1025 static struct event_command trigger_traceon_cmd
= {
1027 .trigger_type
= ETT_TRACE_ONOFF
,
1028 .func
= event_trigger_callback
,
1029 .reg
= register_trigger
,
1030 .unreg
= unregister_trigger
,
1031 .get_trigger_ops
= onoff_get_trigger_ops
,
1032 .set_filter
= set_trigger_filter
,
1035 static struct event_command trigger_traceoff_cmd
= {
1037 .trigger_type
= ETT_TRACE_ONOFF
,
1038 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1039 .func
= event_trigger_callback
,
1040 .reg
= register_trigger
,
1041 .unreg
= unregister_trigger
,
1042 .get_trigger_ops
= onoff_get_trigger_ops
,
1043 .set_filter
= set_trigger_filter
,
1046 #ifdef CONFIG_TRACER_SNAPSHOT
1048 snapshot_trigger(struct event_trigger_data
*data
, void *rec
,
1049 struct ring_buffer_event
*event
)
1051 struct trace_event_file
*file
= data
->private_data
;
1054 tracing_snapshot_instance(file
->tr
);
1060 snapshot_count_trigger(struct event_trigger_data
*data
, void *rec
,
1061 struct ring_buffer_event
*event
)
1066 if (data
->count
!= -1)
1069 snapshot_trigger(data
, rec
, event
);
1073 register_snapshot_trigger(char *glob
, struct event_trigger_ops
*ops
,
1074 struct event_trigger_data
*data
,
1075 struct trace_event_file
*file
)
1077 int ret
= register_trigger(glob
, ops
, data
, file
);
1079 if (ret
> 0 && tracing_alloc_snapshot_instance(file
->tr
) != 0) {
1080 unregister_trigger(glob
, ops
, data
, file
);
1088 snapshot_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1089 struct event_trigger_data
*data
)
1091 return event_trigger_print("snapshot", m
, (void *)data
->count
,
1095 static struct event_trigger_ops snapshot_trigger_ops
= {
1096 .func
= snapshot_trigger
,
1097 .print
= snapshot_trigger_print
,
1098 .init
= event_trigger_init
,
1099 .free
= event_trigger_free
,
1102 static struct event_trigger_ops snapshot_count_trigger_ops
= {
1103 .func
= snapshot_count_trigger
,
1104 .print
= snapshot_trigger_print
,
1105 .init
= event_trigger_init
,
1106 .free
= event_trigger_free
,
1109 static struct event_trigger_ops
*
1110 snapshot_get_trigger_ops(char *cmd
, char *param
)
1112 return param
? &snapshot_count_trigger_ops
: &snapshot_trigger_ops
;
1115 static struct event_command trigger_snapshot_cmd
= {
1117 .trigger_type
= ETT_SNAPSHOT
,
1118 .func
= event_trigger_callback
,
1119 .reg
= register_snapshot_trigger
,
1120 .unreg
= unregister_trigger
,
1121 .get_trigger_ops
= snapshot_get_trigger_ops
,
1122 .set_filter
= set_trigger_filter
,
1125 static __init
int register_trigger_snapshot_cmd(void)
1129 ret
= register_event_command(&trigger_snapshot_cmd
);
1135 static __init
int register_trigger_snapshot_cmd(void) { return 0; }
1136 #endif /* CONFIG_TRACER_SNAPSHOT */
1138 #ifdef CONFIG_STACKTRACE
1139 #ifdef CONFIG_UNWINDER_ORC
1141 * event_triggers_post_call()
1142 * trace_event_raw_event_xxx()
1144 # define STACK_SKIP 2
1148 * stacktrace_trigger()
1149 * event_triggers_post_call()
1150 * trace_event_buffer_commit()
1151 * trace_event_raw_event_xxx()
1153 #define STACK_SKIP 4
1157 stacktrace_trigger(struct event_trigger_data
*data
, void *rec
,
1158 struct ring_buffer_event
*event
)
1160 trace_dump_stack(STACK_SKIP
);
1164 stacktrace_count_trigger(struct event_trigger_data
*data
, void *rec
,
1165 struct ring_buffer_event
*event
)
1170 if (data
->count
!= -1)
1173 stacktrace_trigger(data
, rec
, event
);
1177 stacktrace_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1178 struct event_trigger_data
*data
)
1180 return event_trigger_print("stacktrace", m
, (void *)data
->count
,
1184 static struct event_trigger_ops stacktrace_trigger_ops
= {
1185 .func
= stacktrace_trigger
,
1186 .print
= stacktrace_trigger_print
,
1187 .init
= event_trigger_init
,
1188 .free
= event_trigger_free
,
1191 static struct event_trigger_ops stacktrace_count_trigger_ops
= {
1192 .func
= stacktrace_count_trigger
,
1193 .print
= stacktrace_trigger_print
,
1194 .init
= event_trigger_init
,
1195 .free
= event_trigger_free
,
1198 static struct event_trigger_ops
*
1199 stacktrace_get_trigger_ops(char *cmd
, char *param
)
1201 return param
? &stacktrace_count_trigger_ops
: &stacktrace_trigger_ops
;
1204 static struct event_command trigger_stacktrace_cmd
= {
1205 .name
= "stacktrace",
1206 .trigger_type
= ETT_STACKTRACE
,
1207 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1208 .func
= event_trigger_callback
,
1209 .reg
= register_trigger
,
1210 .unreg
= unregister_trigger
,
1211 .get_trigger_ops
= stacktrace_get_trigger_ops
,
1212 .set_filter
= set_trigger_filter
,
1215 static __init
int register_trigger_stacktrace_cmd(void)
1219 ret
= register_event_command(&trigger_stacktrace_cmd
);
1225 static __init
int register_trigger_stacktrace_cmd(void) { return 0; }
1226 #endif /* CONFIG_STACKTRACE */
1228 static __init
void unregister_trigger_traceon_traceoff_cmds(void)
1230 unregister_event_command(&trigger_traceon_cmd
);
1231 unregister_event_command(&trigger_traceoff_cmd
);
1235 event_enable_trigger(struct event_trigger_data
*data
, void *rec
,
1236 struct ring_buffer_event
*event
)
1238 struct enable_trigger_data
*enable_data
= data
->private_data
;
1240 if (enable_data
->enable
)
1241 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1243 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1247 event_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
1248 struct ring_buffer_event
*event
)
1250 struct enable_trigger_data
*enable_data
= data
->private_data
;
1255 /* Skip if the event is in a state we want to switch to */
1256 if (enable_data
->enable
== !(enable_data
->file
->flags
& EVENT_FILE_FL_SOFT_DISABLED
))
1259 if (data
->count
!= -1)
1262 event_enable_trigger(data
, rec
, event
);
1265 int event_enable_trigger_print(struct seq_file
*m
,
1266 struct event_trigger_ops
*ops
,
1267 struct event_trigger_data
*data
)
1269 struct enable_trigger_data
*enable_data
= data
->private_data
;
1271 seq_printf(m
, "%s:%s:%s",
1273 (enable_data
->enable
? ENABLE_HIST_STR
: DISABLE_HIST_STR
) :
1274 (enable_data
->enable
? ENABLE_EVENT_STR
: DISABLE_EVENT_STR
),
1275 enable_data
->file
->event_call
->class->system
,
1276 trace_event_name(enable_data
->file
->event_call
));
1278 if (data
->count
== -1)
1279 seq_puts(m
, ":unlimited");
1281 seq_printf(m
, ":count=%ld", data
->count
);
1283 if (data
->filter_str
)
1284 seq_printf(m
, " if %s\n", data
->filter_str
);
1291 void event_enable_trigger_free(struct event_trigger_ops
*ops
,
1292 struct event_trigger_data
*data
)
1294 struct enable_trigger_data
*enable_data
= data
->private_data
;
1296 if (WARN_ON_ONCE(data
->ref
<= 0))
1301 /* Remove the SOFT_MODE flag */
1302 trace_event_enable_disable(enable_data
->file
, 0, 1);
1303 module_put(enable_data
->file
->event_call
->mod
);
1304 trigger_data_free(data
);
1309 static struct event_trigger_ops event_enable_trigger_ops
= {
1310 .func
= event_enable_trigger
,
1311 .print
= event_enable_trigger_print
,
1312 .init
= event_trigger_init
,
1313 .free
= event_enable_trigger_free
,
1316 static struct event_trigger_ops event_enable_count_trigger_ops
= {
1317 .func
= event_enable_count_trigger
,
1318 .print
= event_enable_trigger_print
,
1319 .init
= event_trigger_init
,
1320 .free
= event_enable_trigger_free
,
1323 static struct event_trigger_ops event_disable_trigger_ops
= {
1324 .func
= event_enable_trigger
,
1325 .print
= event_enable_trigger_print
,
1326 .init
= event_trigger_init
,
1327 .free
= event_enable_trigger_free
,
1330 static struct event_trigger_ops event_disable_count_trigger_ops
= {
1331 .func
= event_enable_count_trigger
,
1332 .print
= event_enable_trigger_print
,
1333 .init
= event_trigger_init
,
1334 .free
= event_enable_trigger_free
,
1337 int event_enable_trigger_func(struct event_command
*cmd_ops
,
1338 struct trace_event_file
*file
,
1339 char *glob
, char *cmd
, char *param
)
1341 struct trace_event_file
*event_enable_file
;
1342 struct enable_trigger_data
*enable_data
;
1343 struct event_trigger_data
*trigger_data
;
1344 struct event_trigger_ops
*trigger_ops
;
1345 struct trace_array
*tr
= file
->tr
;
1357 /* separate the trigger from the filter (s:e:n [if filter]) */
1358 trigger
= strsep(¶m
, " \t");
1362 system
= strsep(&trigger
, ":");
1366 event
= strsep(&trigger
, ":");
1369 event_enable_file
= find_event_file(tr
, system
, event
);
1370 if (!event_enable_file
)
1373 #ifdef CONFIG_HIST_TRIGGERS
1374 hist
= ((strcmp(cmd
, ENABLE_HIST_STR
) == 0) ||
1375 (strcmp(cmd
, DISABLE_HIST_STR
) == 0));
1377 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1378 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1380 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1382 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
1385 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
1389 enable_data
= kzalloc(sizeof(*enable_data
), GFP_KERNEL
);
1391 kfree(trigger_data
);
1395 trigger_data
->count
= -1;
1396 trigger_data
->ops
= trigger_ops
;
1397 trigger_data
->cmd_ops
= cmd_ops
;
1398 INIT_LIST_HEAD(&trigger_data
->list
);
1399 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
1401 enable_data
->hist
= hist
;
1402 enable_data
->enable
= enable
;
1403 enable_data
->file
= event_enable_file
;
1404 trigger_data
->private_data
= enable_data
;
1406 if (glob
[0] == '!') {
1407 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
1408 kfree(trigger_data
);
1414 /* Up the trigger_data count to make sure nothing frees it on failure */
1415 event_trigger_init(trigger_ops
, trigger_data
);
1418 number
= strsep(&trigger
, ":");
1421 if (!strlen(number
))
1425 * We use the callback data field (which is a pointer)
1428 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
1433 if (!param
) /* if param is non-empty, it's supposed to be a filter */
1436 if (!cmd_ops
->set_filter
)
1439 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
1444 /* Don't let event modules unload while probe registered */
1445 ret
= try_module_get(event_enable_file
->event_call
->mod
);
1451 ret
= trace_event_enable_disable(event_enable_file
, 1, 1);
1454 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
1456 * The above returns on success the # of functions enabled,
1457 * but if it didn't find any functions it returns zero.
1458 * Consider no functions a failure too.
1465 /* Just return zero, not the number of enabled functions */
1467 event_trigger_free(trigger_ops
, trigger_data
);
1472 trace_event_enable_disable(event_enable_file
, 0, 1);
1474 module_put(event_enable_file
->event_call
->mod
);
1476 if (cmd_ops
->set_filter
)
1477 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
1478 event_trigger_free(trigger_ops
, trigger_data
);
1483 int event_enable_register_trigger(char *glob
,
1484 struct event_trigger_ops
*ops
,
1485 struct event_trigger_data
*data
,
1486 struct trace_event_file
*file
)
1488 struct enable_trigger_data
*enable_data
= data
->private_data
;
1489 struct enable_trigger_data
*test_enable_data
;
1490 struct event_trigger_data
*test
;
1493 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1494 test_enable_data
= test
->private_data
;
1495 if (test_enable_data
&&
1496 (test
->cmd_ops
->trigger_type
==
1497 data
->cmd_ops
->trigger_type
) &&
1498 (test_enable_data
->file
== enable_data
->file
)) {
1504 if (data
->ops
->init
) {
1505 ret
= data
->ops
->init(data
->ops
, data
);
1510 list_add_rcu(&data
->list
, &file
->triggers
);
1513 update_cond_flag(file
);
1514 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
1515 list_del_rcu(&data
->list
);
1516 update_cond_flag(file
);
1523 void event_enable_unregister_trigger(char *glob
,
1524 struct event_trigger_ops
*ops
,
1525 struct event_trigger_data
*test
,
1526 struct trace_event_file
*file
)
1528 struct enable_trigger_data
*test_enable_data
= test
->private_data
;
1529 struct enable_trigger_data
*enable_data
;
1530 struct event_trigger_data
*data
;
1531 bool unregistered
= false;
1533 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
1534 enable_data
= data
->private_data
;
1536 (data
->cmd_ops
->trigger_type
==
1537 test
->cmd_ops
->trigger_type
) &&
1538 (enable_data
->file
== test_enable_data
->file
)) {
1539 unregistered
= true;
1540 list_del_rcu(&data
->list
);
1541 trace_event_trigger_enable_disable(file
, 0);
1542 update_cond_flag(file
);
1547 if (unregistered
&& data
->ops
->free
)
1548 data
->ops
->free(data
->ops
, data
);
1551 static struct event_trigger_ops
*
1552 event_enable_get_trigger_ops(char *cmd
, char *param
)
1554 struct event_trigger_ops
*ops
;
1557 #ifdef CONFIG_HIST_TRIGGERS
1558 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1559 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1561 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1564 ops
= param
? &event_enable_count_trigger_ops
:
1565 &event_enable_trigger_ops
;
1567 ops
= param
? &event_disable_count_trigger_ops
:
1568 &event_disable_trigger_ops
;
1573 static struct event_command trigger_enable_cmd
= {
1574 .name
= ENABLE_EVENT_STR
,
1575 .trigger_type
= ETT_EVENT_ENABLE
,
1576 .func
= event_enable_trigger_func
,
1577 .reg
= event_enable_register_trigger
,
1578 .unreg
= event_enable_unregister_trigger
,
1579 .get_trigger_ops
= event_enable_get_trigger_ops
,
1580 .set_filter
= set_trigger_filter
,
1583 static struct event_command trigger_disable_cmd
= {
1584 .name
= DISABLE_EVENT_STR
,
1585 .trigger_type
= ETT_EVENT_ENABLE
,
1586 .func
= event_enable_trigger_func
,
1587 .reg
= event_enable_register_trigger
,
1588 .unreg
= event_enable_unregister_trigger
,
1589 .get_trigger_ops
= event_enable_get_trigger_ops
,
1590 .set_filter
= set_trigger_filter
,
1593 static __init
void unregister_trigger_enable_disable_cmds(void)
1595 unregister_event_command(&trigger_enable_cmd
);
1596 unregister_event_command(&trigger_disable_cmd
);
1599 static __init
int register_trigger_enable_disable_cmds(void)
1603 ret
= register_event_command(&trigger_enable_cmd
);
1604 if (WARN_ON(ret
< 0))
1606 ret
= register_event_command(&trigger_disable_cmd
);
1607 if (WARN_ON(ret
< 0))
1608 unregister_trigger_enable_disable_cmds();
1613 static __init
int register_trigger_traceon_traceoff_cmds(void)
1617 ret
= register_event_command(&trigger_traceon_cmd
);
1618 if (WARN_ON(ret
< 0))
1620 ret
= register_event_command(&trigger_traceoff_cmd
);
1621 if (WARN_ON(ret
< 0))
1622 unregister_trigger_traceon_traceoff_cmds();
1627 __init
int register_trigger_cmds(void)
1629 register_trigger_traceon_traceoff_cmds();
1630 register_trigger_snapshot_cmd();
1631 register_trigger_stacktrace_cmd();
1632 register_trigger_enable_disable_cmds();
1633 register_trigger_hist_enable_disable_cmds();
1634 register_trigger_hist_cmd();