1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_trigger - trace event triggers
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
17 static LIST_HEAD(trigger_commands
);
18 static DEFINE_MUTEX(trigger_cmd_mutex
);
20 void trigger_data_free(struct event_trigger_data
*data
)
22 if (data
->cmd_ops
->set_filter
)
23 data
->cmd_ops
->set_filter(NULL
, data
, NULL
);
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @rec: The trace entry for the event, NULL for unconditional invocation
36 * For each trigger associated with an event, invoke the trigger
37 * function registered with the associated trigger command. If rec is
38 * non-NULL, it means that the trigger requires further processing and
39 * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 * trigger has a filter associated with it, rec will checked against
41 * the filter and if the record matches the trigger will be invoked.
42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 * in any case until the current event is written, the trigger
44 * function isn't invoked but the bit associated with the deferred
45 * trigger is set in the return value.
47 * Returns an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
52 * Return: an enum event_trigger_type value containing a set bit for
53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
55 enum event_trigger_type
56 event_triggers_call(struct trace_event_file
*file
, void *rec
,
57 struct ring_buffer_event
*event
)
59 struct event_trigger_data
*data
;
60 enum event_trigger_type tt
= ETT_NONE
;
61 struct event_filter
*filter
;
63 if (list_empty(&file
->triggers
))
66 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
70 data
->ops
->func(data
, rec
, event
);
73 filter
= rcu_dereference_sched(data
->filter
);
74 if (filter
&& !filter_match_preds(filter
, rec
))
76 if (event_command_post_trigger(data
->cmd_ops
)) {
77 tt
|= data
->cmd_ops
->trigger_type
;
80 data
->ops
->func(data
, rec
, event
);
84 EXPORT_SYMBOL_GPL(event_triggers_call
);
87 * event_triggers_post_call - Call 'post_triggers' for a trace event
88 * @file: The trace_event_file associated with the event
89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
91 * For each trigger associated with an event, invoke the trigger
92 * function registered with the associated trigger command, if the
93 * corresponding bit is set in the tt enum passed into this function.
94 * See @event_triggers_call for details on how those bits are set.
96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
99 event_triggers_post_call(struct trace_event_file
*file
,
100 enum event_trigger_type tt
)
102 struct event_trigger_data
*data
;
104 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
107 if (data
->cmd_ops
->trigger_type
& tt
)
108 data
->ops
->func(data
, NULL
, NULL
);
111 EXPORT_SYMBOL_GPL(event_triggers_post_call
);
113 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
115 static void *trigger_next(struct seq_file
*m
, void *t
, loff_t
*pos
)
117 struct trace_event_file
*event_file
= event_file_data(m
->private);
119 if (t
== SHOW_AVAILABLE_TRIGGERS
) {
123 return seq_list_next(t
, &event_file
->triggers
, pos
);
126 static void *trigger_start(struct seq_file
*m
, loff_t
*pos
)
128 struct trace_event_file
*event_file
;
130 /* ->stop() is called even if ->start() fails */
131 mutex_lock(&event_mutex
);
132 event_file
= event_file_data(m
->private);
133 if (unlikely(!event_file
))
134 return ERR_PTR(-ENODEV
);
136 if (list_empty(&event_file
->triggers
))
137 return *pos
== 0 ? SHOW_AVAILABLE_TRIGGERS
: NULL
;
139 return seq_list_start(&event_file
->triggers
, *pos
);
142 static void trigger_stop(struct seq_file
*m
, void *t
)
144 mutex_unlock(&event_mutex
);
147 static int trigger_show(struct seq_file
*m
, void *v
)
149 struct event_trigger_data
*data
;
150 struct event_command
*p
;
152 if (v
== SHOW_AVAILABLE_TRIGGERS
) {
153 seq_puts(m
, "# Available triggers:\n");
155 mutex_lock(&trigger_cmd_mutex
);
156 list_for_each_entry_reverse(p
, &trigger_commands
, list
)
157 seq_printf(m
, " %s", p
->name
);
159 mutex_unlock(&trigger_cmd_mutex
);
163 data
= list_entry(v
, struct event_trigger_data
, list
);
164 data
->ops
->print(m
, data
->ops
, data
);
169 static const struct seq_operations event_triggers_seq_ops
= {
170 .start
= trigger_start
,
171 .next
= trigger_next
,
172 .stop
= trigger_stop
,
173 .show
= trigger_show
,
176 static int event_trigger_regex_open(struct inode
*inode
, struct file
*file
)
180 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
184 mutex_lock(&event_mutex
);
186 if (unlikely(!event_file_data(file
))) {
187 mutex_unlock(&event_mutex
);
191 if ((file
->f_mode
& FMODE_WRITE
) &&
192 (file
->f_flags
& O_TRUNC
)) {
193 struct trace_event_file
*event_file
;
194 struct event_command
*p
;
196 event_file
= event_file_data(file
);
198 list_for_each_entry(p
, &trigger_commands
, list
) {
200 p
->unreg_all(event_file
);
204 if (file
->f_mode
& FMODE_READ
) {
205 ret
= seq_open(file
, &event_triggers_seq_ops
);
207 struct seq_file
*m
= file
->private_data
;
212 mutex_unlock(&event_mutex
);
217 int trigger_process_regex(struct trace_event_file
*file
, char *buff
)
219 char *command
, *next
= buff
;
220 struct event_command
*p
;
223 command
= strsep(&next
, ": \t");
224 command
= (command
[0] != '!') ? command
: command
+ 1;
226 mutex_lock(&trigger_cmd_mutex
);
227 list_for_each_entry(p
, &trigger_commands
, list
) {
228 if (strcmp(p
->name
, command
) == 0) {
229 ret
= p
->func(p
, file
, buff
, command
, next
);
234 mutex_unlock(&trigger_cmd_mutex
);
239 static ssize_t
event_trigger_regex_write(struct file
*file
,
240 const char __user
*ubuf
,
241 size_t cnt
, loff_t
*ppos
)
243 struct trace_event_file
*event_file
;
250 if (cnt
>= PAGE_SIZE
)
253 buf
= memdup_user_nul(ubuf
, cnt
);
259 mutex_lock(&event_mutex
);
260 event_file
= event_file_data(file
);
261 if (unlikely(!event_file
)) {
262 mutex_unlock(&event_mutex
);
266 ret
= trigger_process_regex(event_file
, buf
);
267 mutex_unlock(&event_mutex
);
279 static int event_trigger_regex_release(struct inode
*inode
, struct file
*file
)
281 mutex_lock(&event_mutex
);
283 if (file
->f_mode
& FMODE_READ
)
284 seq_release(inode
, file
);
286 mutex_unlock(&event_mutex
);
292 event_trigger_write(struct file
*filp
, const char __user
*ubuf
,
293 size_t cnt
, loff_t
*ppos
)
295 return event_trigger_regex_write(filp
, ubuf
, cnt
, ppos
);
299 event_trigger_open(struct inode
*inode
, struct file
*filp
)
301 /* Checks for tracefs lockdown */
302 return event_trigger_regex_open(inode
, filp
);
306 event_trigger_release(struct inode
*inode
, struct file
*file
)
308 return event_trigger_regex_release(inode
, file
);
311 const struct file_operations event_trigger_fops
= {
312 .open
= event_trigger_open
,
314 .write
= event_trigger_write
,
315 .llseek
= tracing_lseek
,
316 .release
= event_trigger_release
,
320 * Currently we only register event commands from __init, so mark this
323 __init
int register_event_command(struct event_command
*cmd
)
325 struct event_command
*p
;
328 mutex_lock(&trigger_cmd_mutex
);
329 list_for_each_entry(p
, &trigger_commands
, list
) {
330 if (strcmp(cmd
->name
, p
->name
) == 0) {
335 list_add(&cmd
->list
, &trigger_commands
);
337 mutex_unlock(&trigger_cmd_mutex
);
343 * Currently we only unregister event commands from __init, so mark
346 __init
int unregister_event_command(struct event_command
*cmd
)
348 struct event_command
*p
, *n
;
351 mutex_lock(&trigger_cmd_mutex
);
352 list_for_each_entry_safe(p
, n
, &trigger_commands
, list
) {
353 if (strcmp(cmd
->name
, p
->name
) == 0) {
355 list_del_init(&p
->list
);
360 mutex_unlock(&trigger_cmd_mutex
);
366 * event_trigger_print - Generic event_trigger_ops @print implementation
367 * @name: The name of the event trigger
368 * @m: The seq_file being printed to
369 * @data: Trigger-specific data
370 * @filter_str: filter_str to print, if present
372 * Common implementation for event triggers to print themselves.
374 * Usually wrapped by a function that simply sets the @name of the
375 * trigger command and then invokes this.
377 * Return: 0 on success, errno otherwise
380 event_trigger_print(const char *name
, struct seq_file
*m
,
381 void *data
, char *filter_str
)
383 long count
= (long)data
;
388 seq_puts(m
, ":unlimited");
390 seq_printf(m
, ":count=%ld", count
);
393 seq_printf(m
, " if %s\n", filter_str
);
401 * event_trigger_init - Generic event_trigger_ops @init implementation
402 * @ops: The trigger ops associated with the trigger
403 * @data: Trigger-specific data
405 * Common implementation of event trigger initialization.
407 * Usually used directly as the @init method in event trigger
410 * Return: 0 on success, errno otherwise
412 int event_trigger_init(struct event_trigger_ops
*ops
,
413 struct event_trigger_data
*data
)
420 * event_trigger_free - Generic event_trigger_ops @free implementation
421 * @ops: The trigger ops associated with the trigger
422 * @data: Trigger-specific data
424 * Common implementation of event trigger de-initialization.
426 * Usually used directly as the @free method in event trigger
430 event_trigger_free(struct event_trigger_ops
*ops
,
431 struct event_trigger_data
*data
)
433 if (WARN_ON_ONCE(data
->ref
<= 0))
438 trigger_data_free(data
);
441 int trace_event_trigger_enable_disable(struct trace_event_file
*file
,
446 if (trigger_enable
) {
447 if (atomic_inc_return(&file
->tm_ref
) > 1)
449 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
450 ret
= trace_event_enable_disable(file
, 1, 1);
452 if (atomic_dec_return(&file
->tm_ref
) > 0)
454 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
455 ret
= trace_event_enable_disable(file
, 0, 1);
462 * clear_event_triggers - Clear all triggers associated with a trace array
463 * @tr: The trace array to clear
465 * For each trigger, the triggering event has its tm_ref decremented
466 * via trace_event_trigger_enable_disable(), and any associated event
467 * (in the case of enable/disable_event triggers) will have its sm_ref
468 * decremented via free()->trace_event_enable_disable(). That
469 * combination effectively reverses the soft-mode/trigger state added
470 * by trigger registration.
472 * Must be called with event_mutex held.
475 clear_event_triggers(struct trace_array
*tr
)
477 struct trace_event_file
*file
;
479 list_for_each_entry(file
, &tr
->events
, list
) {
480 struct event_trigger_data
*data
, *n
;
481 list_for_each_entry_safe(data
, n
, &file
->triggers
, list
) {
482 trace_event_trigger_enable_disable(file
, 0);
483 list_del_rcu(&data
->list
);
485 data
->ops
->free(data
->ops
, data
);
491 * update_cond_flag - Set or reset the TRIGGER_COND bit
492 * @file: The trace_event_file associated with the event
494 * If an event has triggers and any of those triggers has a filter or
495 * a post_trigger, trigger invocation needs to be deferred until after
496 * the current event has logged its data, and the event should have
497 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
500 void update_cond_flag(struct trace_event_file
*file
)
502 struct event_trigger_data
*data
;
503 bool set_cond
= false;
505 lockdep_assert_held(&event_mutex
);
507 list_for_each_entry(data
, &file
->triggers
, list
) {
508 if (data
->filter
|| event_command_post_trigger(data
->cmd_ops
) ||
509 event_command_needs_rec(data
->cmd_ops
)) {
516 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
518 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
522 * register_trigger - Generic event_command @reg implementation
523 * @glob: The raw string used to register the trigger
524 * @ops: The trigger ops associated with the trigger
525 * @data: Trigger-specific data to associate with the trigger
526 * @file: The trace_event_file associated with the event
528 * Common implementation for event trigger registration.
530 * Usually used directly as the @reg method in event command
533 * Return: 0 on success, errno otherwise
535 static int register_trigger(char *glob
, struct event_trigger_ops
*ops
,
536 struct event_trigger_data
*data
,
537 struct trace_event_file
*file
)
539 struct event_trigger_data
*test
;
542 lockdep_assert_held(&event_mutex
);
544 list_for_each_entry(test
, &file
->triggers
, list
) {
545 if (test
->cmd_ops
->trigger_type
== data
->cmd_ops
->trigger_type
) {
551 if (data
->ops
->init
) {
552 ret
= data
->ops
->init(data
->ops
, data
);
557 list_add_rcu(&data
->list
, &file
->triggers
);
560 update_cond_flag(file
);
561 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
562 list_del_rcu(&data
->list
);
563 update_cond_flag(file
);
571 * unregister_trigger - Generic event_command @unreg implementation
572 * @glob: The raw string used to register the trigger
573 * @ops: The trigger ops associated with the trigger
574 * @test: Trigger-specific data used to find the trigger to remove
575 * @file: The trace_event_file associated with the event
577 * Common implementation for event trigger unregistration.
579 * Usually used directly as the @unreg method in event command
582 static void unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
583 struct event_trigger_data
*test
,
584 struct trace_event_file
*file
)
586 struct event_trigger_data
*data
;
587 bool unregistered
= false;
589 lockdep_assert_held(&event_mutex
);
591 list_for_each_entry(data
, &file
->triggers
, list
) {
592 if (data
->cmd_ops
->trigger_type
== test
->cmd_ops
->trigger_type
) {
594 list_del_rcu(&data
->list
);
595 trace_event_trigger_enable_disable(file
, 0);
596 update_cond_flag(file
);
601 if (unregistered
&& data
->ops
->free
)
602 data
->ops
->free(data
->ops
, data
);
606 * event_trigger_callback - Generic event_command @func implementation
607 * @cmd_ops: The command ops, used for trigger registration
608 * @file: The trace_event_file associated with the event
609 * @glob: The raw string used to register the trigger
610 * @cmd: The cmd portion of the string used to register the trigger
611 * @param: The params portion of the string used to register the trigger
613 * Common implementation for event command parsing and trigger
616 * Usually used directly as the @func method in event command
619 * Return: 0 on success, errno otherwise
622 event_trigger_callback(struct event_command
*cmd_ops
,
623 struct trace_event_file
*file
,
624 char *glob
, char *cmd
, char *param
)
626 struct event_trigger_data
*trigger_data
;
627 struct event_trigger_ops
*trigger_ops
;
628 char *trigger
= NULL
;
632 /* separate the trigger from the filter (t:n [if filter]) */
633 if (param
&& isdigit(param
[0]))
634 trigger
= strsep(¶m
, " \t");
636 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
639 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
643 trigger_data
->count
= -1;
644 trigger_data
->ops
= trigger_ops
;
645 trigger_data
->cmd_ops
= cmd_ops
;
646 trigger_data
->private_data
= file
;
647 INIT_LIST_HEAD(&trigger_data
->list
);
648 INIT_LIST_HEAD(&trigger_data
->named_list
);
650 if (glob
[0] == '!') {
651 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
658 number
= strsep(&trigger
, ":");
665 * We use the callback data field (which is a pointer)
668 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
673 if (!param
) /* if param is non-empty, it's supposed to be a filter */
676 if (!cmd_ops
->set_filter
)
679 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
684 /* Up the trigger_data count to make sure reg doesn't free it on failure */
685 event_trigger_init(trigger_ops
, trigger_data
);
686 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
688 * The above returns on success the # of functions enabled,
689 * but if it didn't find any functions it returns zero.
690 * Consider no functions a failure too.
693 cmd_ops
->unreg(glob
, trigger_ops
, trigger_data
, file
);
698 /* Down the counter of trigger_data or free it if not used anymore */
699 event_trigger_free(trigger_ops
, trigger_data
);
704 if (cmd_ops
->set_filter
)
705 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
711 * set_trigger_filter - Generic event_command @set_filter implementation
712 * @filter_str: The filter string for the trigger, NULL to remove filter
713 * @trigger_data: Trigger-specific data
714 * @file: The trace_event_file associated with the event
716 * Common implementation for event command filter parsing and filter
719 * Usually used directly as the @set_filter method in event command
722 * Also used to remove a filter (if filter_str = NULL).
724 * Return: 0 on success, errno otherwise
726 int set_trigger_filter(char *filter_str
,
727 struct event_trigger_data
*trigger_data
,
728 struct trace_event_file
*file
)
730 struct event_trigger_data
*data
= trigger_data
;
731 struct event_filter
*filter
= NULL
, *tmp
;
735 if (!filter_str
) /* clear the current filter */
738 s
= strsep(&filter_str
, " \t");
740 if (!strlen(s
) || strcmp(s
, "if") != 0)
746 /* The filter is for the 'trigger' event, not the triggered event */
747 ret
= create_event_filter(file
->tr
, file
->event_call
,
748 filter_str
, false, &filter
);
750 * If create_event_filter() fails, filter still needs to be freed.
751 * Which the calling code will do with data->filter.
754 tmp
= rcu_access_pointer(data
->filter
);
756 rcu_assign_pointer(data
->filter
, filter
);
759 /* Make sure the call is done with the filter */
760 tracepoint_synchronize_unregister();
761 free_event_filter(tmp
);
764 kfree(data
->filter_str
);
765 data
->filter_str
= NULL
;
768 data
->filter_str
= kstrdup(filter_str
, GFP_KERNEL
);
769 if (!data
->filter_str
) {
770 free_event_filter(rcu_access_pointer(data
->filter
));
779 static LIST_HEAD(named_triggers
);
782 * find_named_trigger - Find the common named trigger associated with @name
783 * @name: The name of the set of named triggers to find the common data for
785 * Named triggers are sets of triggers that share a common set of
786 * trigger data. The first named trigger registered with a given name
787 * owns the common trigger data that the others subsequently
788 * registered with the same name will reference. This function
789 * returns the common trigger data associated with that first
790 * registered instance.
792 * Return: the common trigger data for the given named trigger on
793 * success, NULL otherwise.
795 struct event_trigger_data
*find_named_trigger(const char *name
)
797 struct event_trigger_data
*data
;
802 list_for_each_entry(data
, &named_triggers
, named_list
) {
803 if (data
->named_data
)
805 if (strcmp(data
->name
, name
) == 0)
813 * is_named_trigger - determine if a given trigger is a named trigger
814 * @test: The trigger data to test
816 * Return: true if 'test' is a named trigger, false otherwise.
818 bool is_named_trigger(struct event_trigger_data
*test
)
820 struct event_trigger_data
*data
;
822 list_for_each_entry(data
, &named_triggers
, named_list
) {
831 * save_named_trigger - save the trigger in the named trigger list
832 * @name: The name of the named trigger set
833 * @data: The trigger data to save
835 * Return: 0 if successful, negative error otherwise.
837 int save_named_trigger(const char *name
, struct event_trigger_data
*data
)
839 data
->name
= kstrdup(name
, GFP_KERNEL
);
843 list_add(&data
->named_list
, &named_triggers
);
849 * del_named_trigger - delete a trigger from the named trigger list
850 * @data: The trigger data to delete
852 void del_named_trigger(struct event_trigger_data
*data
)
857 list_del(&data
->named_list
);
860 static void __pause_named_trigger(struct event_trigger_data
*data
, bool pause
)
862 struct event_trigger_data
*test
;
864 list_for_each_entry(test
, &named_triggers
, named_list
) {
865 if (strcmp(test
->name
, data
->name
) == 0) {
867 test
->paused_tmp
= test
->paused
;
870 test
->paused
= test
->paused_tmp
;
877 * pause_named_trigger - Pause all named triggers with the same name
878 * @data: The trigger data of a named trigger to pause
880 * Pauses a named trigger along with all other triggers having the
881 * same name. Because named triggers share a common set of data,
882 * pausing only one is meaningless, so pausing one named trigger needs
883 * to pause all triggers with the same name.
885 void pause_named_trigger(struct event_trigger_data
*data
)
887 __pause_named_trigger(data
, true);
891 * unpause_named_trigger - Un-pause all named triggers with the same name
892 * @data: The trigger data of a named trigger to unpause
894 * Un-pauses a named trigger along with all other triggers having the
895 * same name. Because named triggers share a common set of data,
896 * unpausing only one is meaningless, so unpausing one named trigger
897 * needs to unpause all triggers with the same name.
899 void unpause_named_trigger(struct event_trigger_data
*data
)
901 __pause_named_trigger(data
, false);
905 * set_named_trigger_data - Associate common named trigger data
906 * @data: The trigger data of a named trigger to unpause
908 * Named triggers are sets of triggers that share a common set of
909 * trigger data. The first named trigger registered with a given name
910 * owns the common trigger data that the others subsequently
911 * registered with the same name will reference. This function
912 * associates the common trigger data from the first trigger with the
915 void set_named_trigger_data(struct event_trigger_data
*data
,
916 struct event_trigger_data
*named_data
)
918 data
->named_data
= named_data
;
921 struct event_trigger_data
*
922 get_named_trigger_data(struct event_trigger_data
*data
)
924 return data
->named_data
;
928 traceon_trigger(struct event_trigger_data
*data
, void *rec
,
929 struct ring_buffer_event
*event
)
938 traceon_count_trigger(struct event_trigger_data
*data
, void *rec
,
939 struct ring_buffer_event
*event
)
947 if (data
->count
!= -1)
954 traceoff_trigger(struct event_trigger_data
*data
, void *rec
,
955 struct ring_buffer_event
*event
)
957 if (!tracing_is_on())
964 traceoff_count_trigger(struct event_trigger_data
*data
, void *rec
,
965 struct ring_buffer_event
*event
)
967 if (!tracing_is_on())
973 if (data
->count
!= -1)
980 traceon_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
981 struct event_trigger_data
*data
)
983 return event_trigger_print("traceon", m
, (void *)data
->count
,
988 traceoff_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
989 struct event_trigger_data
*data
)
991 return event_trigger_print("traceoff", m
, (void *)data
->count
,
995 static struct event_trigger_ops traceon_trigger_ops
= {
996 .func
= traceon_trigger
,
997 .print
= traceon_trigger_print
,
998 .init
= event_trigger_init
,
999 .free
= event_trigger_free
,
1002 static struct event_trigger_ops traceon_count_trigger_ops
= {
1003 .func
= traceon_count_trigger
,
1004 .print
= traceon_trigger_print
,
1005 .init
= event_trigger_init
,
1006 .free
= event_trigger_free
,
1009 static struct event_trigger_ops traceoff_trigger_ops
= {
1010 .func
= traceoff_trigger
,
1011 .print
= traceoff_trigger_print
,
1012 .init
= event_trigger_init
,
1013 .free
= event_trigger_free
,
1016 static struct event_trigger_ops traceoff_count_trigger_ops
= {
1017 .func
= traceoff_count_trigger
,
1018 .print
= traceoff_trigger_print
,
1019 .init
= event_trigger_init
,
1020 .free
= event_trigger_free
,
1023 static struct event_trigger_ops
*
1024 onoff_get_trigger_ops(char *cmd
, char *param
)
1026 struct event_trigger_ops
*ops
;
1028 /* we register both traceon and traceoff to this callback */
1029 if (strcmp(cmd
, "traceon") == 0)
1030 ops
= param
? &traceon_count_trigger_ops
:
1031 &traceon_trigger_ops
;
1033 ops
= param
? &traceoff_count_trigger_ops
:
1034 &traceoff_trigger_ops
;
1039 static struct event_command trigger_traceon_cmd
= {
1041 .trigger_type
= ETT_TRACE_ONOFF
,
1042 .func
= event_trigger_callback
,
1043 .reg
= register_trigger
,
1044 .unreg
= unregister_trigger
,
1045 .get_trigger_ops
= onoff_get_trigger_ops
,
1046 .set_filter
= set_trigger_filter
,
1049 static struct event_command trigger_traceoff_cmd
= {
1051 .trigger_type
= ETT_TRACE_ONOFF
,
1052 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1053 .func
= event_trigger_callback
,
1054 .reg
= register_trigger
,
1055 .unreg
= unregister_trigger
,
1056 .get_trigger_ops
= onoff_get_trigger_ops
,
1057 .set_filter
= set_trigger_filter
,
1060 #ifdef CONFIG_TRACER_SNAPSHOT
1062 snapshot_trigger(struct event_trigger_data
*data
, void *rec
,
1063 struct ring_buffer_event
*event
)
1065 struct trace_event_file
*file
= data
->private_data
;
1068 tracing_snapshot_instance(file
->tr
);
1074 snapshot_count_trigger(struct event_trigger_data
*data
, void *rec
,
1075 struct ring_buffer_event
*event
)
1080 if (data
->count
!= -1)
1083 snapshot_trigger(data
, rec
, event
);
1087 register_snapshot_trigger(char *glob
, struct event_trigger_ops
*ops
,
1088 struct event_trigger_data
*data
,
1089 struct trace_event_file
*file
)
1091 int ret
= register_trigger(glob
, ops
, data
, file
);
1093 if (ret
> 0 && tracing_alloc_snapshot_instance(file
->tr
) != 0) {
1094 unregister_trigger(glob
, ops
, data
, file
);
1102 snapshot_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1103 struct event_trigger_data
*data
)
1105 return event_trigger_print("snapshot", m
, (void *)data
->count
,
1109 static struct event_trigger_ops snapshot_trigger_ops
= {
1110 .func
= snapshot_trigger
,
1111 .print
= snapshot_trigger_print
,
1112 .init
= event_trigger_init
,
1113 .free
= event_trigger_free
,
1116 static struct event_trigger_ops snapshot_count_trigger_ops
= {
1117 .func
= snapshot_count_trigger
,
1118 .print
= snapshot_trigger_print
,
1119 .init
= event_trigger_init
,
1120 .free
= event_trigger_free
,
1123 static struct event_trigger_ops
*
1124 snapshot_get_trigger_ops(char *cmd
, char *param
)
1126 return param
? &snapshot_count_trigger_ops
: &snapshot_trigger_ops
;
1129 static struct event_command trigger_snapshot_cmd
= {
1131 .trigger_type
= ETT_SNAPSHOT
,
1132 .func
= event_trigger_callback
,
1133 .reg
= register_snapshot_trigger
,
1134 .unreg
= unregister_trigger
,
1135 .get_trigger_ops
= snapshot_get_trigger_ops
,
1136 .set_filter
= set_trigger_filter
,
1139 static __init
int register_trigger_snapshot_cmd(void)
1143 ret
= register_event_command(&trigger_snapshot_cmd
);
1149 static __init
int register_trigger_snapshot_cmd(void) { return 0; }
1150 #endif /* CONFIG_TRACER_SNAPSHOT */
1152 #ifdef CONFIG_STACKTRACE
1153 #ifdef CONFIG_UNWINDER_ORC
1155 * event_triggers_post_call()
1156 * trace_event_raw_event_xxx()
1158 # define STACK_SKIP 2
1162 * stacktrace_trigger()
1163 * event_triggers_post_call()
1164 * trace_event_buffer_commit()
1165 * trace_event_raw_event_xxx()
1167 #define STACK_SKIP 4
1171 stacktrace_trigger(struct event_trigger_data
*data
, void *rec
,
1172 struct ring_buffer_event
*event
)
1174 trace_dump_stack(STACK_SKIP
);
1178 stacktrace_count_trigger(struct event_trigger_data
*data
, void *rec
,
1179 struct ring_buffer_event
*event
)
1184 if (data
->count
!= -1)
1187 stacktrace_trigger(data
, rec
, event
);
1191 stacktrace_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1192 struct event_trigger_data
*data
)
1194 return event_trigger_print("stacktrace", m
, (void *)data
->count
,
1198 static struct event_trigger_ops stacktrace_trigger_ops
= {
1199 .func
= stacktrace_trigger
,
1200 .print
= stacktrace_trigger_print
,
1201 .init
= event_trigger_init
,
1202 .free
= event_trigger_free
,
1205 static struct event_trigger_ops stacktrace_count_trigger_ops
= {
1206 .func
= stacktrace_count_trigger
,
1207 .print
= stacktrace_trigger_print
,
1208 .init
= event_trigger_init
,
1209 .free
= event_trigger_free
,
1212 static struct event_trigger_ops
*
1213 stacktrace_get_trigger_ops(char *cmd
, char *param
)
1215 return param
? &stacktrace_count_trigger_ops
: &stacktrace_trigger_ops
;
1218 static struct event_command trigger_stacktrace_cmd
= {
1219 .name
= "stacktrace",
1220 .trigger_type
= ETT_STACKTRACE
,
1221 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1222 .func
= event_trigger_callback
,
1223 .reg
= register_trigger
,
1224 .unreg
= unregister_trigger
,
1225 .get_trigger_ops
= stacktrace_get_trigger_ops
,
1226 .set_filter
= set_trigger_filter
,
1229 static __init
int register_trigger_stacktrace_cmd(void)
1233 ret
= register_event_command(&trigger_stacktrace_cmd
);
1239 static __init
int register_trigger_stacktrace_cmd(void) { return 0; }
1240 #endif /* CONFIG_STACKTRACE */
1242 static __init
void unregister_trigger_traceon_traceoff_cmds(void)
1244 unregister_event_command(&trigger_traceon_cmd
);
1245 unregister_event_command(&trigger_traceoff_cmd
);
1249 event_enable_trigger(struct event_trigger_data
*data
, void *rec
,
1250 struct ring_buffer_event
*event
)
1252 struct enable_trigger_data
*enable_data
= data
->private_data
;
1254 if (enable_data
->enable
)
1255 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1257 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1261 event_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
1262 struct ring_buffer_event
*event
)
1264 struct enable_trigger_data
*enable_data
= data
->private_data
;
1269 /* Skip if the event is in a state we want to switch to */
1270 if (enable_data
->enable
== !(enable_data
->file
->flags
& EVENT_FILE_FL_SOFT_DISABLED
))
1273 if (data
->count
!= -1)
1276 event_enable_trigger(data
, rec
, event
);
1279 int event_enable_trigger_print(struct seq_file
*m
,
1280 struct event_trigger_ops
*ops
,
1281 struct event_trigger_data
*data
)
1283 struct enable_trigger_data
*enable_data
= data
->private_data
;
1285 seq_printf(m
, "%s:%s:%s",
1287 (enable_data
->enable
? ENABLE_HIST_STR
: DISABLE_HIST_STR
) :
1288 (enable_data
->enable
? ENABLE_EVENT_STR
: DISABLE_EVENT_STR
),
1289 enable_data
->file
->event_call
->class->system
,
1290 trace_event_name(enable_data
->file
->event_call
));
1292 if (data
->count
== -1)
1293 seq_puts(m
, ":unlimited");
1295 seq_printf(m
, ":count=%ld", data
->count
);
1297 if (data
->filter_str
)
1298 seq_printf(m
, " if %s\n", data
->filter_str
);
1305 void event_enable_trigger_free(struct event_trigger_ops
*ops
,
1306 struct event_trigger_data
*data
)
1308 struct enable_trigger_data
*enable_data
= data
->private_data
;
1310 if (WARN_ON_ONCE(data
->ref
<= 0))
1315 /* Remove the SOFT_MODE flag */
1316 trace_event_enable_disable(enable_data
->file
, 0, 1);
1317 module_put(enable_data
->file
->event_call
->mod
);
1318 trigger_data_free(data
);
1323 static struct event_trigger_ops event_enable_trigger_ops
= {
1324 .func
= event_enable_trigger
,
1325 .print
= event_enable_trigger_print
,
1326 .init
= event_trigger_init
,
1327 .free
= event_enable_trigger_free
,
1330 static struct event_trigger_ops event_enable_count_trigger_ops
= {
1331 .func
= event_enable_count_trigger
,
1332 .print
= event_enable_trigger_print
,
1333 .init
= event_trigger_init
,
1334 .free
= event_enable_trigger_free
,
1337 static struct event_trigger_ops event_disable_trigger_ops
= {
1338 .func
= event_enable_trigger
,
1339 .print
= event_enable_trigger_print
,
1340 .init
= event_trigger_init
,
1341 .free
= event_enable_trigger_free
,
1344 static struct event_trigger_ops event_disable_count_trigger_ops
= {
1345 .func
= event_enable_count_trigger
,
1346 .print
= event_enable_trigger_print
,
1347 .init
= event_trigger_init
,
1348 .free
= event_enable_trigger_free
,
1351 int event_enable_trigger_func(struct event_command
*cmd_ops
,
1352 struct trace_event_file
*file
,
1353 char *glob
, char *cmd
, char *param
)
1355 struct trace_event_file
*event_enable_file
;
1356 struct enable_trigger_data
*enable_data
;
1357 struct event_trigger_data
*trigger_data
;
1358 struct event_trigger_ops
*trigger_ops
;
1359 struct trace_array
*tr
= file
->tr
;
1371 /* separate the trigger from the filter (s:e:n [if filter]) */
1372 trigger
= strsep(¶m
, " \t");
1376 system
= strsep(&trigger
, ":");
1380 event
= strsep(&trigger
, ":");
1383 event_enable_file
= find_event_file(tr
, system
, event
);
1384 if (!event_enable_file
)
1387 #ifdef CONFIG_HIST_TRIGGERS
1388 hist
= ((strcmp(cmd
, ENABLE_HIST_STR
) == 0) ||
1389 (strcmp(cmd
, DISABLE_HIST_STR
) == 0));
1391 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1392 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1394 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1396 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
1399 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
1403 enable_data
= kzalloc(sizeof(*enable_data
), GFP_KERNEL
);
1405 kfree(trigger_data
);
1409 trigger_data
->count
= -1;
1410 trigger_data
->ops
= trigger_ops
;
1411 trigger_data
->cmd_ops
= cmd_ops
;
1412 INIT_LIST_HEAD(&trigger_data
->list
);
1413 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
1415 enable_data
->hist
= hist
;
1416 enable_data
->enable
= enable
;
1417 enable_data
->file
= event_enable_file
;
1418 trigger_data
->private_data
= enable_data
;
1420 if (glob
[0] == '!') {
1421 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
1422 kfree(trigger_data
);
1428 /* Up the trigger_data count to make sure nothing frees it on failure */
1429 event_trigger_init(trigger_ops
, trigger_data
);
1432 number
= strsep(&trigger
, ":");
1435 if (!strlen(number
))
1439 * We use the callback data field (which is a pointer)
1442 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
1447 if (!param
) /* if param is non-empty, it's supposed to be a filter */
1450 if (!cmd_ops
->set_filter
)
1453 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
1458 /* Don't let event modules unload while probe registered */
1459 ret
= try_module_get(event_enable_file
->event_call
->mod
);
1465 ret
= trace_event_enable_disable(event_enable_file
, 1, 1);
1468 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
1470 * The above returns on success the # of functions enabled,
1471 * but if it didn't find any functions it returns zero.
1472 * Consider no functions a failure too.
1479 /* Just return zero, not the number of enabled functions */
1481 event_trigger_free(trigger_ops
, trigger_data
);
1486 trace_event_enable_disable(event_enable_file
, 0, 1);
1488 module_put(event_enable_file
->event_call
->mod
);
1490 if (cmd_ops
->set_filter
)
1491 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
1492 event_trigger_free(trigger_ops
, trigger_data
);
1497 int event_enable_register_trigger(char *glob
,
1498 struct event_trigger_ops
*ops
,
1499 struct event_trigger_data
*data
,
1500 struct trace_event_file
*file
)
1502 struct enable_trigger_data
*enable_data
= data
->private_data
;
1503 struct enable_trigger_data
*test_enable_data
;
1504 struct event_trigger_data
*test
;
1507 lockdep_assert_held(&event_mutex
);
1509 list_for_each_entry(test
, &file
->triggers
, list
) {
1510 test_enable_data
= test
->private_data
;
1511 if (test_enable_data
&&
1512 (test
->cmd_ops
->trigger_type
==
1513 data
->cmd_ops
->trigger_type
) &&
1514 (test_enable_data
->file
== enable_data
->file
)) {
1520 if (data
->ops
->init
) {
1521 ret
= data
->ops
->init(data
->ops
, data
);
1526 list_add_rcu(&data
->list
, &file
->triggers
);
1529 update_cond_flag(file
);
1530 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
1531 list_del_rcu(&data
->list
);
1532 update_cond_flag(file
);
1539 void event_enable_unregister_trigger(char *glob
,
1540 struct event_trigger_ops
*ops
,
1541 struct event_trigger_data
*test
,
1542 struct trace_event_file
*file
)
1544 struct enable_trigger_data
*test_enable_data
= test
->private_data
;
1545 struct enable_trigger_data
*enable_data
;
1546 struct event_trigger_data
*data
;
1547 bool unregistered
= false;
1549 lockdep_assert_held(&event_mutex
);
1551 list_for_each_entry(data
, &file
->triggers
, list
) {
1552 enable_data
= data
->private_data
;
1554 (data
->cmd_ops
->trigger_type
==
1555 test
->cmd_ops
->trigger_type
) &&
1556 (enable_data
->file
== test_enable_data
->file
)) {
1557 unregistered
= true;
1558 list_del_rcu(&data
->list
);
1559 trace_event_trigger_enable_disable(file
, 0);
1560 update_cond_flag(file
);
1565 if (unregistered
&& data
->ops
->free
)
1566 data
->ops
->free(data
->ops
, data
);
1569 static struct event_trigger_ops
*
1570 event_enable_get_trigger_ops(char *cmd
, char *param
)
1572 struct event_trigger_ops
*ops
;
1575 #ifdef CONFIG_HIST_TRIGGERS
1576 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1577 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1579 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1582 ops
= param
? &event_enable_count_trigger_ops
:
1583 &event_enable_trigger_ops
;
1585 ops
= param
? &event_disable_count_trigger_ops
:
1586 &event_disable_trigger_ops
;
1591 static struct event_command trigger_enable_cmd
= {
1592 .name
= ENABLE_EVENT_STR
,
1593 .trigger_type
= ETT_EVENT_ENABLE
,
1594 .func
= event_enable_trigger_func
,
1595 .reg
= event_enable_register_trigger
,
1596 .unreg
= event_enable_unregister_trigger
,
1597 .get_trigger_ops
= event_enable_get_trigger_ops
,
1598 .set_filter
= set_trigger_filter
,
1601 static struct event_command trigger_disable_cmd
= {
1602 .name
= DISABLE_EVENT_STR
,
1603 .trigger_type
= ETT_EVENT_ENABLE
,
1604 .func
= event_enable_trigger_func
,
1605 .reg
= event_enable_register_trigger
,
1606 .unreg
= event_enable_unregister_trigger
,
1607 .get_trigger_ops
= event_enable_get_trigger_ops
,
1608 .set_filter
= set_trigger_filter
,
1611 static __init
void unregister_trigger_enable_disable_cmds(void)
1613 unregister_event_command(&trigger_enable_cmd
);
1614 unregister_event_command(&trigger_disable_cmd
);
1617 static __init
int register_trigger_enable_disable_cmds(void)
1621 ret
= register_event_command(&trigger_enable_cmd
);
1622 if (WARN_ON(ret
< 0))
1624 ret
= register_event_command(&trigger_disable_cmd
);
1625 if (WARN_ON(ret
< 0))
1626 unregister_trigger_enable_disable_cmds();
1631 static __init
int register_trigger_traceon_traceoff_cmds(void)
1635 ret
= register_event_command(&trigger_traceon_cmd
);
1636 if (WARN_ON(ret
< 0))
1638 ret
= register_event_command(&trigger_traceoff_cmd
);
1639 if (WARN_ON(ret
< 0))
1640 unregister_trigger_traceon_traceoff_cmds();
1645 __init
int register_trigger_cmds(void)
1647 register_trigger_traceon_traceoff_cmds();
1648 register_trigger_snapshot_cmd();
1649 register_trigger_stacktrace_cmd();
1650 register_trigger_enable_disable_cmds();
1651 register_trigger_hist_enable_disable_cmds();
1652 register_trigger_hist_cmd();