bcache: use (REQ_META|REQ_PRIO) to indicate bio for metadata
[linux/fpc-iii.git] / kernel / trace / trace_events_trigger.c
blobcd12ecb66eb9236e3ca741517ea55105d744ab6e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_trigger - trace event triggers
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
8 #include <linux/module.h>
9 #include <linux/ctype.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
14 #include "trace.h"
16 static LIST_HEAD(trigger_commands);
17 static DEFINE_MUTEX(trigger_cmd_mutex);
19 void trigger_data_free(struct event_trigger_data *data)
21 if (data->cmd_ops->set_filter)
22 data->cmd_ops->set_filter(NULL, data, NULL);
24 /* make sure current triggers exit before free */
25 tracepoint_synchronize_unregister();
27 kfree(data);
30 /**
31 * event_triggers_call - Call triggers associated with a trace event
32 * @file: The trace_event_file associated with the event
33 * @rec: The trace entry for the event, NULL for unconditional invocation
35 * For each trigger associated with an event, invoke the trigger
36 * function registered with the associated trigger command. If rec is
37 * non-NULL, it means that the trigger requires further processing and
38 * shouldn't be unconditionally invoked. If rec is non-NULL and the
39 * trigger has a filter associated with it, rec will checked against
40 * the filter and if the record matches the trigger will be invoked.
41 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
42 * in any case until the current event is written, the trigger
43 * function isn't invoked but the bit associated with the deferred
44 * trigger is set in the return value.
46 * Returns an enum event_trigger_type value containing a set bit for
47 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 * Return: an enum event_trigger_type value containing a set bit for
52 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 enum event_trigger_type
55 event_triggers_call(struct trace_event_file *file, void *rec,
56 struct ring_buffer_event *event)
58 struct event_trigger_data *data;
59 enum event_trigger_type tt = ETT_NONE;
60 struct event_filter *filter;
62 if (list_empty(&file->triggers))
63 return tt;
65 list_for_each_entry_rcu(data, &file->triggers, list) {
66 if (data->paused)
67 continue;
68 if (!rec) {
69 data->ops->func(data, rec, event);
70 continue;
72 filter = rcu_dereference_sched(data->filter);
73 if (filter && !filter_match_preds(filter, rec))
74 continue;
75 if (event_command_post_trigger(data->cmd_ops)) {
76 tt |= data->cmd_ops->trigger_type;
77 continue;
79 data->ops->func(data, rec, event);
81 return tt;
83 EXPORT_SYMBOL_GPL(event_triggers_call);
85 /**
86 * event_triggers_post_call - Call 'post_triggers' for a trace event
87 * @file: The trace_event_file associated with the event
88 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90 * For each trigger associated with an event, invoke the trigger
91 * function registered with the associated trigger command, if the
92 * corresponding bit is set in the tt enum passed into this function.
93 * See @event_triggers_call for details on how those bits are set.
95 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97 void
98 event_triggers_post_call(struct trace_event_file *file,
99 enum event_trigger_type tt)
101 struct event_trigger_data *data;
103 list_for_each_entry_rcu(data, &file->triggers, list) {
104 if (data->paused)
105 continue;
106 if (data->cmd_ops->trigger_type & tt)
107 data->ops->func(data, NULL, NULL);
110 EXPORT_SYMBOL_GPL(event_triggers_post_call);
112 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
114 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116 struct trace_event_file *event_file = event_file_data(m->private);
118 if (t == SHOW_AVAILABLE_TRIGGERS)
119 return NULL;
121 return seq_list_next(t, &event_file->triggers, pos);
124 static void *trigger_start(struct seq_file *m, loff_t *pos)
126 struct trace_event_file *event_file;
128 /* ->stop() is called even if ->start() fails */
129 mutex_lock(&event_mutex);
130 event_file = event_file_data(m->private);
131 if (unlikely(!event_file))
132 return ERR_PTR(-ENODEV);
134 if (list_empty(&event_file->triggers))
135 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
137 return seq_list_start(&event_file->triggers, *pos);
140 static void trigger_stop(struct seq_file *m, void *t)
142 mutex_unlock(&event_mutex);
145 static int trigger_show(struct seq_file *m, void *v)
147 struct event_trigger_data *data;
148 struct event_command *p;
150 if (v == SHOW_AVAILABLE_TRIGGERS) {
151 seq_puts(m, "# Available triggers:\n");
152 seq_putc(m, '#');
153 mutex_lock(&trigger_cmd_mutex);
154 list_for_each_entry_reverse(p, &trigger_commands, list)
155 seq_printf(m, " %s", p->name);
156 seq_putc(m, '\n');
157 mutex_unlock(&trigger_cmd_mutex);
158 return 0;
161 data = list_entry(v, struct event_trigger_data, list);
162 data->ops->print(m, data->ops, data);
164 return 0;
167 static const struct seq_operations event_triggers_seq_ops = {
168 .start = trigger_start,
169 .next = trigger_next,
170 .stop = trigger_stop,
171 .show = trigger_show,
174 static int event_trigger_regex_open(struct inode *inode, struct file *file)
176 int ret = 0;
178 mutex_lock(&event_mutex);
180 if (unlikely(!event_file_data(file))) {
181 mutex_unlock(&event_mutex);
182 return -ENODEV;
185 if ((file->f_mode & FMODE_WRITE) &&
186 (file->f_flags & O_TRUNC)) {
187 struct trace_event_file *event_file;
188 struct event_command *p;
190 event_file = event_file_data(file);
192 list_for_each_entry(p, &trigger_commands, list) {
193 if (p->unreg_all)
194 p->unreg_all(event_file);
198 if (file->f_mode & FMODE_READ) {
199 ret = seq_open(file, &event_triggers_seq_ops);
200 if (!ret) {
201 struct seq_file *m = file->private_data;
202 m->private = file;
206 mutex_unlock(&event_mutex);
208 return ret;
211 static int trigger_process_regex(struct trace_event_file *file, char *buff)
213 char *command, *next = buff;
214 struct event_command *p;
215 int ret = -EINVAL;
217 command = strsep(&next, ": \t");
218 command = (command[0] != '!') ? command : command + 1;
220 mutex_lock(&trigger_cmd_mutex);
221 list_for_each_entry(p, &trigger_commands, list) {
222 if (strcmp(p->name, command) == 0) {
223 ret = p->func(p, file, buff, command, next);
224 goto out_unlock;
227 out_unlock:
228 mutex_unlock(&trigger_cmd_mutex);
230 return ret;
233 static ssize_t event_trigger_regex_write(struct file *file,
234 const char __user *ubuf,
235 size_t cnt, loff_t *ppos)
237 struct trace_event_file *event_file;
238 ssize_t ret;
239 char *buf;
241 if (!cnt)
242 return 0;
244 if (cnt >= PAGE_SIZE)
245 return -EINVAL;
247 buf = memdup_user_nul(ubuf, cnt);
248 if (IS_ERR(buf))
249 return PTR_ERR(buf);
251 strim(buf);
253 mutex_lock(&event_mutex);
254 event_file = event_file_data(file);
255 if (unlikely(!event_file)) {
256 mutex_unlock(&event_mutex);
257 kfree(buf);
258 return -ENODEV;
260 ret = trigger_process_regex(event_file, buf);
261 mutex_unlock(&event_mutex);
263 kfree(buf);
264 if (ret < 0)
265 goto out;
267 *ppos += cnt;
268 ret = cnt;
269 out:
270 return ret;
273 static int event_trigger_regex_release(struct inode *inode, struct file *file)
275 mutex_lock(&event_mutex);
277 if (file->f_mode & FMODE_READ)
278 seq_release(inode, file);
280 mutex_unlock(&event_mutex);
282 return 0;
285 static ssize_t
286 event_trigger_write(struct file *filp, const char __user *ubuf,
287 size_t cnt, loff_t *ppos)
289 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
292 static int
293 event_trigger_open(struct inode *inode, struct file *filp)
295 return event_trigger_regex_open(inode, filp);
298 static int
299 event_trigger_release(struct inode *inode, struct file *file)
301 return event_trigger_regex_release(inode, file);
304 const struct file_operations event_trigger_fops = {
305 .open = event_trigger_open,
306 .read = seq_read,
307 .write = event_trigger_write,
308 .llseek = tracing_lseek,
309 .release = event_trigger_release,
313 * Currently we only register event commands from __init, so mark this
314 * __init too.
316 __init int register_event_command(struct event_command *cmd)
318 struct event_command *p;
319 int ret = 0;
321 mutex_lock(&trigger_cmd_mutex);
322 list_for_each_entry(p, &trigger_commands, list) {
323 if (strcmp(cmd->name, p->name) == 0) {
324 ret = -EBUSY;
325 goto out_unlock;
328 list_add(&cmd->list, &trigger_commands);
329 out_unlock:
330 mutex_unlock(&trigger_cmd_mutex);
332 return ret;
336 * Currently we only unregister event commands from __init, so mark
337 * this __init too.
339 __init int unregister_event_command(struct event_command *cmd)
341 struct event_command *p, *n;
342 int ret = -ENODEV;
344 mutex_lock(&trigger_cmd_mutex);
345 list_for_each_entry_safe(p, n, &trigger_commands, list) {
346 if (strcmp(cmd->name, p->name) == 0) {
347 ret = 0;
348 list_del_init(&p->list);
349 goto out_unlock;
352 out_unlock:
353 mutex_unlock(&trigger_cmd_mutex);
355 return ret;
359 * event_trigger_print - Generic event_trigger_ops @print implementation
360 * @name: The name of the event trigger
361 * @m: The seq_file being printed to
362 * @data: Trigger-specific data
363 * @filter_str: filter_str to print, if present
365 * Common implementation for event triggers to print themselves.
367 * Usually wrapped by a function that simply sets the @name of the
368 * trigger command and then invokes this.
370 * Return: 0 on success, errno otherwise
372 static int
373 event_trigger_print(const char *name, struct seq_file *m,
374 void *data, char *filter_str)
376 long count = (long)data;
378 seq_puts(m, name);
380 if (count == -1)
381 seq_puts(m, ":unlimited");
382 else
383 seq_printf(m, ":count=%ld", count);
385 if (filter_str)
386 seq_printf(m, " if %s\n", filter_str);
387 else
388 seq_putc(m, '\n');
390 return 0;
394 * event_trigger_init - Generic event_trigger_ops @init implementation
395 * @ops: The trigger ops associated with the trigger
396 * @data: Trigger-specific data
398 * Common implementation of event trigger initialization.
400 * Usually used directly as the @init method in event trigger
401 * implementations.
403 * Return: 0 on success, errno otherwise
405 int event_trigger_init(struct event_trigger_ops *ops,
406 struct event_trigger_data *data)
408 data->ref++;
409 return 0;
413 * event_trigger_free - Generic event_trigger_ops @free implementation
414 * @ops: The trigger ops associated with the trigger
415 * @data: Trigger-specific data
417 * Common implementation of event trigger de-initialization.
419 * Usually used directly as the @free method in event trigger
420 * implementations.
422 static void
423 event_trigger_free(struct event_trigger_ops *ops,
424 struct event_trigger_data *data)
426 if (WARN_ON_ONCE(data->ref <= 0))
427 return;
429 data->ref--;
430 if (!data->ref)
431 trigger_data_free(data);
434 int trace_event_trigger_enable_disable(struct trace_event_file *file,
435 int trigger_enable)
437 int ret = 0;
439 if (trigger_enable) {
440 if (atomic_inc_return(&file->tm_ref) > 1)
441 return ret;
442 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
443 ret = trace_event_enable_disable(file, 1, 1);
444 } else {
445 if (atomic_dec_return(&file->tm_ref) > 0)
446 return ret;
447 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
448 ret = trace_event_enable_disable(file, 0, 1);
451 return ret;
455 * clear_event_triggers - Clear all triggers associated with a trace array
456 * @tr: The trace array to clear
458 * For each trigger, the triggering event has its tm_ref decremented
459 * via trace_event_trigger_enable_disable(), and any associated event
460 * (in the case of enable/disable_event triggers) will have its sm_ref
461 * decremented via free()->trace_event_enable_disable(). That
462 * combination effectively reverses the soft-mode/trigger state added
463 * by trigger registration.
465 * Must be called with event_mutex held.
467 void
468 clear_event_triggers(struct trace_array *tr)
470 struct trace_event_file *file;
472 list_for_each_entry(file, &tr->events, list) {
473 struct event_trigger_data *data, *n;
474 list_for_each_entry_safe(data, n, &file->triggers, list) {
475 trace_event_trigger_enable_disable(file, 0);
476 list_del_rcu(&data->list);
477 if (data->ops->free)
478 data->ops->free(data->ops, data);
484 * update_cond_flag - Set or reset the TRIGGER_COND bit
485 * @file: The trace_event_file associated with the event
487 * If an event has triggers and any of those triggers has a filter or
488 * a post_trigger, trigger invocation needs to be deferred until after
489 * the current event has logged its data, and the event should have
490 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
491 * cleared.
493 void update_cond_flag(struct trace_event_file *file)
495 struct event_trigger_data *data;
496 bool set_cond = false;
498 list_for_each_entry_rcu(data, &file->triggers, list) {
499 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
500 event_command_needs_rec(data->cmd_ops)) {
501 set_cond = true;
502 break;
506 if (set_cond)
507 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
508 else
509 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
513 * register_trigger - Generic event_command @reg implementation
514 * @glob: The raw string used to register the trigger
515 * @ops: The trigger ops associated with the trigger
516 * @data: Trigger-specific data to associate with the trigger
517 * @file: The trace_event_file associated with the event
519 * Common implementation for event trigger registration.
521 * Usually used directly as the @reg method in event command
522 * implementations.
524 * Return: 0 on success, errno otherwise
526 static int register_trigger(char *glob, struct event_trigger_ops *ops,
527 struct event_trigger_data *data,
528 struct trace_event_file *file)
530 struct event_trigger_data *test;
531 int ret = 0;
533 list_for_each_entry_rcu(test, &file->triggers, list) {
534 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
535 ret = -EEXIST;
536 goto out;
540 if (data->ops->init) {
541 ret = data->ops->init(data->ops, data);
542 if (ret < 0)
543 goto out;
546 list_add_rcu(&data->list, &file->triggers);
547 ret++;
549 update_cond_flag(file);
550 if (trace_event_trigger_enable_disable(file, 1) < 0) {
551 list_del_rcu(&data->list);
552 update_cond_flag(file);
553 ret--;
555 out:
556 return ret;
560 * unregister_trigger - Generic event_command @unreg implementation
561 * @glob: The raw string used to register the trigger
562 * @ops: The trigger ops associated with the trigger
563 * @test: Trigger-specific data used to find the trigger to remove
564 * @file: The trace_event_file associated with the event
566 * Common implementation for event trigger unregistration.
568 * Usually used directly as the @unreg method in event command
569 * implementations.
571 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
572 struct event_trigger_data *test,
573 struct trace_event_file *file)
575 struct event_trigger_data *data;
576 bool unregistered = false;
578 list_for_each_entry_rcu(data, &file->triggers, list) {
579 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
580 unregistered = true;
581 list_del_rcu(&data->list);
582 trace_event_trigger_enable_disable(file, 0);
583 update_cond_flag(file);
584 break;
588 if (unregistered && data->ops->free)
589 data->ops->free(data->ops, data);
593 * event_trigger_callback - Generic event_command @func implementation
594 * @cmd_ops: The command ops, used for trigger registration
595 * @file: The trace_event_file associated with the event
596 * @glob: The raw string used to register the trigger
597 * @cmd: The cmd portion of the string used to register the trigger
598 * @param: The params portion of the string used to register the trigger
600 * Common implementation for event command parsing and trigger
601 * instantiation.
603 * Usually used directly as the @func method in event command
604 * implementations.
606 * Return: 0 on success, errno otherwise
608 static int
609 event_trigger_callback(struct event_command *cmd_ops,
610 struct trace_event_file *file,
611 char *glob, char *cmd, char *param)
613 struct event_trigger_data *trigger_data;
614 struct event_trigger_ops *trigger_ops;
615 char *trigger = NULL;
616 char *number;
617 int ret;
619 /* separate the trigger from the filter (t:n [if filter]) */
620 if (param && isdigit(param[0]))
621 trigger = strsep(&param, " \t");
623 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
625 ret = -ENOMEM;
626 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
627 if (!trigger_data)
628 goto out;
630 trigger_data->count = -1;
631 trigger_data->ops = trigger_ops;
632 trigger_data->cmd_ops = cmd_ops;
633 trigger_data->private_data = file;
634 INIT_LIST_HEAD(&trigger_data->list);
635 INIT_LIST_HEAD(&trigger_data->named_list);
637 if (glob[0] == '!') {
638 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
639 kfree(trigger_data);
640 ret = 0;
641 goto out;
644 if (trigger) {
645 number = strsep(&trigger, ":");
647 ret = -EINVAL;
648 if (!strlen(number))
649 goto out_free;
652 * We use the callback data field (which is a pointer)
653 * as our counter.
655 ret = kstrtoul(number, 0, &trigger_data->count);
656 if (ret)
657 goto out_free;
660 if (!param) /* if param is non-empty, it's supposed to be a filter */
661 goto out_reg;
663 if (!cmd_ops->set_filter)
664 goto out_reg;
666 ret = cmd_ops->set_filter(param, trigger_data, file);
667 if (ret < 0)
668 goto out_free;
670 out_reg:
671 /* Up the trigger_data count to make sure reg doesn't free it on failure */
672 event_trigger_init(trigger_ops, trigger_data);
673 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
675 * The above returns on success the # of functions enabled,
676 * but if it didn't find any functions it returns zero.
677 * Consider no functions a failure too.
679 if (!ret) {
680 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
681 ret = -ENOENT;
682 } else if (ret > 0)
683 ret = 0;
685 /* Down the counter of trigger_data or free it if not used anymore */
686 event_trigger_free(trigger_ops, trigger_data);
687 out:
688 return ret;
690 out_free:
691 if (cmd_ops->set_filter)
692 cmd_ops->set_filter(NULL, trigger_data, NULL);
693 kfree(trigger_data);
694 goto out;
698 * set_trigger_filter - Generic event_command @set_filter implementation
699 * @filter_str: The filter string for the trigger, NULL to remove filter
700 * @trigger_data: Trigger-specific data
701 * @file: The trace_event_file associated with the event
703 * Common implementation for event command filter parsing and filter
704 * instantiation.
706 * Usually used directly as the @set_filter method in event command
707 * implementations.
709 * Also used to remove a filter (if filter_str = NULL).
711 * Return: 0 on success, errno otherwise
713 int set_trigger_filter(char *filter_str,
714 struct event_trigger_data *trigger_data,
715 struct trace_event_file *file)
717 struct event_trigger_data *data = trigger_data;
718 struct event_filter *filter = NULL, *tmp;
719 int ret = -EINVAL;
720 char *s;
722 if (!filter_str) /* clear the current filter */
723 goto assign;
725 s = strsep(&filter_str, " \t");
727 if (!strlen(s) || strcmp(s, "if") != 0)
728 goto out;
730 if (!filter_str)
731 goto out;
733 /* The filter is for the 'trigger' event, not the triggered event */
734 ret = create_event_filter(file->event_call, filter_str, false, &filter);
736 * If create_event_filter() fails, filter still needs to be freed.
737 * Which the calling code will do with data->filter.
739 assign:
740 tmp = rcu_access_pointer(data->filter);
742 rcu_assign_pointer(data->filter, filter);
744 if (tmp) {
745 /* Make sure the call is done with the filter */
746 tracepoint_synchronize_unregister();
747 free_event_filter(tmp);
750 kfree(data->filter_str);
751 data->filter_str = NULL;
753 if (filter_str) {
754 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
755 if (!data->filter_str) {
756 free_event_filter(rcu_access_pointer(data->filter));
757 data->filter = NULL;
758 ret = -ENOMEM;
761 out:
762 return ret;
765 static LIST_HEAD(named_triggers);
768 * find_named_trigger - Find the common named trigger associated with @name
769 * @name: The name of the set of named triggers to find the common data for
771 * Named triggers are sets of triggers that share a common set of
772 * trigger data. The first named trigger registered with a given name
773 * owns the common trigger data that the others subsequently
774 * registered with the same name will reference. This function
775 * returns the common trigger data associated with that first
776 * registered instance.
778 * Return: the common trigger data for the given named trigger on
779 * success, NULL otherwise.
781 struct event_trigger_data *find_named_trigger(const char *name)
783 struct event_trigger_data *data;
785 if (!name)
786 return NULL;
788 list_for_each_entry(data, &named_triggers, named_list) {
789 if (data->named_data)
790 continue;
791 if (strcmp(data->name, name) == 0)
792 return data;
795 return NULL;
799 * is_named_trigger - determine if a given trigger is a named trigger
800 * @test: The trigger data to test
802 * Return: true if 'test' is a named trigger, false otherwise.
804 bool is_named_trigger(struct event_trigger_data *test)
806 struct event_trigger_data *data;
808 list_for_each_entry(data, &named_triggers, named_list) {
809 if (test == data)
810 return true;
813 return false;
817 * save_named_trigger - save the trigger in the named trigger list
818 * @name: The name of the named trigger set
819 * @data: The trigger data to save
821 * Return: 0 if successful, negative error otherwise.
823 int save_named_trigger(const char *name, struct event_trigger_data *data)
825 data->name = kstrdup(name, GFP_KERNEL);
826 if (!data->name)
827 return -ENOMEM;
829 list_add(&data->named_list, &named_triggers);
831 return 0;
835 * del_named_trigger - delete a trigger from the named trigger list
836 * @data: The trigger data to delete
838 void del_named_trigger(struct event_trigger_data *data)
840 kfree(data->name);
841 data->name = NULL;
843 list_del(&data->named_list);
846 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
848 struct event_trigger_data *test;
850 list_for_each_entry(test, &named_triggers, named_list) {
851 if (strcmp(test->name, data->name) == 0) {
852 if (pause) {
853 test->paused_tmp = test->paused;
854 test->paused = true;
855 } else {
856 test->paused = test->paused_tmp;
863 * pause_named_trigger - Pause all named triggers with the same name
864 * @data: The trigger data of a named trigger to pause
866 * Pauses a named trigger along with all other triggers having the
867 * same name. Because named triggers share a common set of data,
868 * pausing only one is meaningless, so pausing one named trigger needs
869 * to pause all triggers with the same name.
871 void pause_named_trigger(struct event_trigger_data *data)
873 __pause_named_trigger(data, true);
877 * unpause_named_trigger - Un-pause all named triggers with the same name
878 * @data: The trigger data of a named trigger to unpause
880 * Un-pauses a named trigger along with all other triggers having the
881 * same name. Because named triggers share a common set of data,
882 * unpausing only one is meaningless, so unpausing one named trigger
883 * needs to unpause all triggers with the same name.
885 void unpause_named_trigger(struct event_trigger_data *data)
887 __pause_named_trigger(data, false);
891 * set_named_trigger_data - Associate common named trigger data
892 * @data: The trigger data of a named trigger to unpause
894 * Named triggers are sets of triggers that share a common set of
895 * trigger data. The first named trigger registered with a given name
896 * owns the common trigger data that the others subsequently
897 * registered with the same name will reference. This function
898 * associates the common trigger data from the first trigger with the
899 * given trigger.
901 void set_named_trigger_data(struct event_trigger_data *data,
902 struct event_trigger_data *named_data)
904 data->named_data = named_data;
907 struct event_trigger_data *
908 get_named_trigger_data(struct event_trigger_data *data)
910 return data->named_data;
913 static void
914 traceon_trigger(struct event_trigger_data *data, void *rec,
915 struct ring_buffer_event *event)
917 if (tracing_is_on())
918 return;
920 tracing_on();
923 static void
924 traceon_count_trigger(struct event_trigger_data *data, void *rec,
925 struct ring_buffer_event *event)
927 if (tracing_is_on())
928 return;
930 if (!data->count)
931 return;
933 if (data->count != -1)
934 (data->count)--;
936 tracing_on();
939 static void
940 traceoff_trigger(struct event_trigger_data *data, void *rec,
941 struct ring_buffer_event *event)
943 if (!tracing_is_on())
944 return;
946 tracing_off();
949 static void
950 traceoff_count_trigger(struct event_trigger_data *data, void *rec,
951 struct ring_buffer_event *event)
953 if (!tracing_is_on())
954 return;
956 if (!data->count)
957 return;
959 if (data->count != -1)
960 (data->count)--;
962 tracing_off();
965 static int
966 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
967 struct event_trigger_data *data)
969 return event_trigger_print("traceon", m, (void *)data->count,
970 data->filter_str);
973 static int
974 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
975 struct event_trigger_data *data)
977 return event_trigger_print("traceoff", m, (void *)data->count,
978 data->filter_str);
981 static struct event_trigger_ops traceon_trigger_ops = {
982 .func = traceon_trigger,
983 .print = traceon_trigger_print,
984 .init = event_trigger_init,
985 .free = event_trigger_free,
988 static struct event_trigger_ops traceon_count_trigger_ops = {
989 .func = traceon_count_trigger,
990 .print = traceon_trigger_print,
991 .init = event_trigger_init,
992 .free = event_trigger_free,
995 static struct event_trigger_ops traceoff_trigger_ops = {
996 .func = traceoff_trigger,
997 .print = traceoff_trigger_print,
998 .init = event_trigger_init,
999 .free = event_trigger_free,
1002 static struct event_trigger_ops traceoff_count_trigger_ops = {
1003 .func = traceoff_count_trigger,
1004 .print = traceoff_trigger_print,
1005 .init = event_trigger_init,
1006 .free = event_trigger_free,
1009 static struct event_trigger_ops *
1010 onoff_get_trigger_ops(char *cmd, char *param)
1012 struct event_trigger_ops *ops;
1014 /* we register both traceon and traceoff to this callback */
1015 if (strcmp(cmd, "traceon") == 0)
1016 ops = param ? &traceon_count_trigger_ops :
1017 &traceon_trigger_ops;
1018 else
1019 ops = param ? &traceoff_count_trigger_ops :
1020 &traceoff_trigger_ops;
1022 return ops;
1025 static struct event_command trigger_traceon_cmd = {
1026 .name = "traceon",
1027 .trigger_type = ETT_TRACE_ONOFF,
1028 .func = event_trigger_callback,
1029 .reg = register_trigger,
1030 .unreg = unregister_trigger,
1031 .get_trigger_ops = onoff_get_trigger_ops,
1032 .set_filter = set_trigger_filter,
1035 static struct event_command trigger_traceoff_cmd = {
1036 .name = "traceoff",
1037 .trigger_type = ETT_TRACE_ONOFF,
1038 .flags = EVENT_CMD_FL_POST_TRIGGER,
1039 .func = event_trigger_callback,
1040 .reg = register_trigger,
1041 .unreg = unregister_trigger,
1042 .get_trigger_ops = onoff_get_trigger_ops,
1043 .set_filter = set_trigger_filter,
1046 #ifdef CONFIG_TRACER_SNAPSHOT
1047 static void
1048 snapshot_trigger(struct event_trigger_data *data, void *rec,
1049 struct ring_buffer_event *event)
1051 struct trace_event_file *file = data->private_data;
1053 if (file)
1054 tracing_snapshot_instance(file->tr);
1055 else
1056 tracing_snapshot();
1059 static void
1060 snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1061 struct ring_buffer_event *event)
1063 if (!data->count)
1064 return;
1066 if (data->count != -1)
1067 (data->count)--;
1069 snapshot_trigger(data, rec, event);
1072 static int
1073 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1074 struct event_trigger_data *data,
1075 struct trace_event_file *file)
1077 int ret = register_trigger(glob, ops, data, file);
1079 if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1080 unregister_trigger(glob, ops, data, file);
1081 ret = 0;
1084 return ret;
1087 static int
1088 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1089 struct event_trigger_data *data)
1091 return event_trigger_print("snapshot", m, (void *)data->count,
1092 data->filter_str);
1095 static struct event_trigger_ops snapshot_trigger_ops = {
1096 .func = snapshot_trigger,
1097 .print = snapshot_trigger_print,
1098 .init = event_trigger_init,
1099 .free = event_trigger_free,
1102 static struct event_trigger_ops snapshot_count_trigger_ops = {
1103 .func = snapshot_count_trigger,
1104 .print = snapshot_trigger_print,
1105 .init = event_trigger_init,
1106 .free = event_trigger_free,
1109 static struct event_trigger_ops *
1110 snapshot_get_trigger_ops(char *cmd, char *param)
1112 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1115 static struct event_command trigger_snapshot_cmd = {
1116 .name = "snapshot",
1117 .trigger_type = ETT_SNAPSHOT,
1118 .func = event_trigger_callback,
1119 .reg = register_snapshot_trigger,
1120 .unreg = unregister_trigger,
1121 .get_trigger_ops = snapshot_get_trigger_ops,
1122 .set_filter = set_trigger_filter,
1125 static __init int register_trigger_snapshot_cmd(void)
1127 int ret;
1129 ret = register_event_command(&trigger_snapshot_cmd);
1130 WARN_ON(ret < 0);
1132 return ret;
1134 #else
1135 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1136 #endif /* CONFIG_TRACER_SNAPSHOT */
1138 #ifdef CONFIG_STACKTRACE
1139 #ifdef CONFIG_UNWINDER_ORC
1140 /* Skip 2:
1141 * event_triggers_post_call()
1142 * trace_event_raw_event_xxx()
1144 # define STACK_SKIP 2
1145 #else
1147 * Skip 4:
1148 * stacktrace_trigger()
1149 * event_triggers_post_call()
1150 * trace_event_buffer_commit()
1151 * trace_event_raw_event_xxx()
1153 #define STACK_SKIP 4
1154 #endif
1156 static void
1157 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1158 struct ring_buffer_event *event)
1160 trace_dump_stack(STACK_SKIP);
1163 static void
1164 stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1165 struct ring_buffer_event *event)
1167 if (!data->count)
1168 return;
1170 if (data->count != -1)
1171 (data->count)--;
1173 stacktrace_trigger(data, rec, event);
1176 static int
1177 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1178 struct event_trigger_data *data)
1180 return event_trigger_print("stacktrace", m, (void *)data->count,
1181 data->filter_str);
1184 static struct event_trigger_ops stacktrace_trigger_ops = {
1185 .func = stacktrace_trigger,
1186 .print = stacktrace_trigger_print,
1187 .init = event_trigger_init,
1188 .free = event_trigger_free,
1191 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1192 .func = stacktrace_count_trigger,
1193 .print = stacktrace_trigger_print,
1194 .init = event_trigger_init,
1195 .free = event_trigger_free,
1198 static struct event_trigger_ops *
1199 stacktrace_get_trigger_ops(char *cmd, char *param)
1201 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1204 static struct event_command trigger_stacktrace_cmd = {
1205 .name = "stacktrace",
1206 .trigger_type = ETT_STACKTRACE,
1207 .flags = EVENT_CMD_FL_POST_TRIGGER,
1208 .func = event_trigger_callback,
1209 .reg = register_trigger,
1210 .unreg = unregister_trigger,
1211 .get_trigger_ops = stacktrace_get_trigger_ops,
1212 .set_filter = set_trigger_filter,
1215 static __init int register_trigger_stacktrace_cmd(void)
1217 int ret;
1219 ret = register_event_command(&trigger_stacktrace_cmd);
1220 WARN_ON(ret < 0);
1222 return ret;
1224 #else
1225 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1226 #endif /* CONFIG_STACKTRACE */
1228 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1230 unregister_event_command(&trigger_traceon_cmd);
1231 unregister_event_command(&trigger_traceoff_cmd);
1234 static void
1235 event_enable_trigger(struct event_trigger_data *data, void *rec,
1236 struct ring_buffer_event *event)
1238 struct enable_trigger_data *enable_data = data->private_data;
1240 if (enable_data->enable)
1241 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1242 else
1243 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1246 static void
1247 event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1248 struct ring_buffer_event *event)
1250 struct enable_trigger_data *enable_data = data->private_data;
1252 if (!data->count)
1253 return;
1255 /* Skip if the event is in a state we want to switch to */
1256 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1257 return;
1259 if (data->count != -1)
1260 (data->count)--;
1262 event_enable_trigger(data, rec, event);
1265 int event_enable_trigger_print(struct seq_file *m,
1266 struct event_trigger_ops *ops,
1267 struct event_trigger_data *data)
1269 struct enable_trigger_data *enable_data = data->private_data;
1271 seq_printf(m, "%s:%s:%s",
1272 enable_data->hist ?
1273 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1274 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1275 enable_data->file->event_call->class->system,
1276 trace_event_name(enable_data->file->event_call));
1278 if (data->count == -1)
1279 seq_puts(m, ":unlimited");
1280 else
1281 seq_printf(m, ":count=%ld", data->count);
1283 if (data->filter_str)
1284 seq_printf(m, " if %s\n", data->filter_str);
1285 else
1286 seq_putc(m, '\n');
1288 return 0;
1291 void event_enable_trigger_free(struct event_trigger_ops *ops,
1292 struct event_trigger_data *data)
1294 struct enable_trigger_data *enable_data = data->private_data;
1296 if (WARN_ON_ONCE(data->ref <= 0))
1297 return;
1299 data->ref--;
1300 if (!data->ref) {
1301 /* Remove the SOFT_MODE flag */
1302 trace_event_enable_disable(enable_data->file, 0, 1);
1303 module_put(enable_data->file->event_call->mod);
1304 trigger_data_free(data);
1305 kfree(enable_data);
1309 static struct event_trigger_ops event_enable_trigger_ops = {
1310 .func = event_enable_trigger,
1311 .print = event_enable_trigger_print,
1312 .init = event_trigger_init,
1313 .free = event_enable_trigger_free,
1316 static struct event_trigger_ops event_enable_count_trigger_ops = {
1317 .func = event_enable_count_trigger,
1318 .print = event_enable_trigger_print,
1319 .init = event_trigger_init,
1320 .free = event_enable_trigger_free,
1323 static struct event_trigger_ops event_disable_trigger_ops = {
1324 .func = event_enable_trigger,
1325 .print = event_enable_trigger_print,
1326 .init = event_trigger_init,
1327 .free = event_enable_trigger_free,
1330 static struct event_trigger_ops event_disable_count_trigger_ops = {
1331 .func = event_enable_count_trigger,
1332 .print = event_enable_trigger_print,
1333 .init = event_trigger_init,
1334 .free = event_enable_trigger_free,
1337 int event_enable_trigger_func(struct event_command *cmd_ops,
1338 struct trace_event_file *file,
1339 char *glob, char *cmd, char *param)
1341 struct trace_event_file *event_enable_file;
1342 struct enable_trigger_data *enable_data;
1343 struct event_trigger_data *trigger_data;
1344 struct event_trigger_ops *trigger_ops;
1345 struct trace_array *tr = file->tr;
1346 const char *system;
1347 const char *event;
1348 bool hist = false;
1349 char *trigger;
1350 char *number;
1351 bool enable;
1352 int ret;
1354 if (!param)
1355 return -EINVAL;
1357 /* separate the trigger from the filter (s:e:n [if filter]) */
1358 trigger = strsep(&param, " \t");
1359 if (!trigger)
1360 return -EINVAL;
1362 system = strsep(&trigger, ":");
1363 if (!trigger)
1364 return -EINVAL;
1366 event = strsep(&trigger, ":");
1368 ret = -EINVAL;
1369 event_enable_file = find_event_file(tr, system, event);
1370 if (!event_enable_file)
1371 goto out;
1373 #ifdef CONFIG_HIST_TRIGGERS
1374 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1375 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1377 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1378 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1379 #else
1380 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1381 #endif
1382 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1384 ret = -ENOMEM;
1385 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1386 if (!trigger_data)
1387 goto out;
1389 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1390 if (!enable_data) {
1391 kfree(trigger_data);
1392 goto out;
1395 trigger_data->count = -1;
1396 trigger_data->ops = trigger_ops;
1397 trigger_data->cmd_ops = cmd_ops;
1398 INIT_LIST_HEAD(&trigger_data->list);
1399 RCU_INIT_POINTER(trigger_data->filter, NULL);
1401 enable_data->hist = hist;
1402 enable_data->enable = enable;
1403 enable_data->file = event_enable_file;
1404 trigger_data->private_data = enable_data;
1406 if (glob[0] == '!') {
1407 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1408 kfree(trigger_data);
1409 kfree(enable_data);
1410 ret = 0;
1411 goto out;
1414 /* Up the trigger_data count to make sure nothing frees it on failure */
1415 event_trigger_init(trigger_ops, trigger_data);
1417 if (trigger) {
1418 number = strsep(&trigger, ":");
1420 ret = -EINVAL;
1421 if (!strlen(number))
1422 goto out_free;
1425 * We use the callback data field (which is a pointer)
1426 * as our counter.
1428 ret = kstrtoul(number, 0, &trigger_data->count);
1429 if (ret)
1430 goto out_free;
1433 if (!param) /* if param is non-empty, it's supposed to be a filter */
1434 goto out_reg;
1436 if (!cmd_ops->set_filter)
1437 goto out_reg;
1439 ret = cmd_ops->set_filter(param, trigger_data, file);
1440 if (ret < 0)
1441 goto out_free;
1443 out_reg:
1444 /* Don't let event modules unload while probe registered */
1445 ret = try_module_get(event_enable_file->event_call->mod);
1446 if (!ret) {
1447 ret = -EBUSY;
1448 goto out_free;
1451 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1452 if (ret < 0)
1453 goto out_put;
1454 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1456 * The above returns on success the # of functions enabled,
1457 * but if it didn't find any functions it returns zero.
1458 * Consider no functions a failure too.
1460 if (!ret) {
1461 ret = -ENOENT;
1462 goto out_disable;
1463 } else if (ret < 0)
1464 goto out_disable;
1465 /* Just return zero, not the number of enabled functions */
1466 ret = 0;
1467 event_trigger_free(trigger_ops, trigger_data);
1468 out:
1469 return ret;
1471 out_disable:
1472 trace_event_enable_disable(event_enable_file, 0, 1);
1473 out_put:
1474 module_put(event_enable_file->event_call->mod);
1475 out_free:
1476 if (cmd_ops->set_filter)
1477 cmd_ops->set_filter(NULL, trigger_data, NULL);
1478 event_trigger_free(trigger_ops, trigger_data);
1479 kfree(enable_data);
1480 goto out;
1483 int event_enable_register_trigger(char *glob,
1484 struct event_trigger_ops *ops,
1485 struct event_trigger_data *data,
1486 struct trace_event_file *file)
1488 struct enable_trigger_data *enable_data = data->private_data;
1489 struct enable_trigger_data *test_enable_data;
1490 struct event_trigger_data *test;
1491 int ret = 0;
1493 list_for_each_entry_rcu(test, &file->triggers, list) {
1494 test_enable_data = test->private_data;
1495 if (test_enable_data &&
1496 (test->cmd_ops->trigger_type ==
1497 data->cmd_ops->trigger_type) &&
1498 (test_enable_data->file == enable_data->file)) {
1499 ret = -EEXIST;
1500 goto out;
1504 if (data->ops->init) {
1505 ret = data->ops->init(data->ops, data);
1506 if (ret < 0)
1507 goto out;
1510 list_add_rcu(&data->list, &file->triggers);
1511 ret++;
1513 update_cond_flag(file);
1514 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1515 list_del_rcu(&data->list);
1516 update_cond_flag(file);
1517 ret--;
1519 out:
1520 return ret;
1523 void event_enable_unregister_trigger(char *glob,
1524 struct event_trigger_ops *ops,
1525 struct event_trigger_data *test,
1526 struct trace_event_file *file)
1528 struct enable_trigger_data *test_enable_data = test->private_data;
1529 struct enable_trigger_data *enable_data;
1530 struct event_trigger_data *data;
1531 bool unregistered = false;
1533 list_for_each_entry_rcu(data, &file->triggers, list) {
1534 enable_data = data->private_data;
1535 if (enable_data &&
1536 (data->cmd_ops->trigger_type ==
1537 test->cmd_ops->trigger_type) &&
1538 (enable_data->file == test_enable_data->file)) {
1539 unregistered = true;
1540 list_del_rcu(&data->list);
1541 trace_event_trigger_enable_disable(file, 0);
1542 update_cond_flag(file);
1543 break;
1547 if (unregistered && data->ops->free)
1548 data->ops->free(data->ops, data);
1551 static struct event_trigger_ops *
1552 event_enable_get_trigger_ops(char *cmd, char *param)
1554 struct event_trigger_ops *ops;
1555 bool enable;
1557 #ifdef CONFIG_HIST_TRIGGERS
1558 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1559 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1560 #else
1561 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1562 #endif
1563 if (enable)
1564 ops = param ? &event_enable_count_trigger_ops :
1565 &event_enable_trigger_ops;
1566 else
1567 ops = param ? &event_disable_count_trigger_ops :
1568 &event_disable_trigger_ops;
1570 return ops;
1573 static struct event_command trigger_enable_cmd = {
1574 .name = ENABLE_EVENT_STR,
1575 .trigger_type = ETT_EVENT_ENABLE,
1576 .func = event_enable_trigger_func,
1577 .reg = event_enable_register_trigger,
1578 .unreg = event_enable_unregister_trigger,
1579 .get_trigger_ops = event_enable_get_trigger_ops,
1580 .set_filter = set_trigger_filter,
1583 static struct event_command trigger_disable_cmd = {
1584 .name = DISABLE_EVENT_STR,
1585 .trigger_type = ETT_EVENT_ENABLE,
1586 .func = event_enable_trigger_func,
1587 .reg = event_enable_register_trigger,
1588 .unreg = event_enable_unregister_trigger,
1589 .get_trigger_ops = event_enable_get_trigger_ops,
1590 .set_filter = set_trigger_filter,
1593 static __init void unregister_trigger_enable_disable_cmds(void)
1595 unregister_event_command(&trigger_enable_cmd);
1596 unregister_event_command(&trigger_disable_cmd);
1599 static __init int register_trigger_enable_disable_cmds(void)
1601 int ret;
1603 ret = register_event_command(&trigger_enable_cmd);
1604 if (WARN_ON(ret < 0))
1605 return ret;
1606 ret = register_event_command(&trigger_disable_cmd);
1607 if (WARN_ON(ret < 0))
1608 unregister_trigger_enable_disable_cmds();
1610 return ret;
1613 static __init int register_trigger_traceon_traceoff_cmds(void)
1615 int ret;
1617 ret = register_event_command(&trigger_traceon_cmd);
1618 if (WARN_ON(ret < 0))
1619 return ret;
1620 ret = register_event_command(&trigger_traceoff_cmd);
1621 if (WARN_ON(ret < 0))
1622 unregister_trigger_traceon_traceoff_cmds();
1624 return ret;
1627 __init int register_trigger_cmds(void)
1629 register_trigger_traceon_traceoff_cmds();
1630 register_trigger_snapshot_cmd();
1631 register_trigger_stacktrace_cmd();
1632 register_trigger_enable_disable_cmds();
1633 register_trigger_hist_enable_disable_cmds();
1634 register_trigger_hist_cmd();
1636 return 0;