2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
22 #include <linux/rculist.h>
23 #include <linux/tracefs.h>
25 #include "tracing_map.h"
28 #define SYNTH_SYSTEM "synthetic"
29 #define SYNTH_FIELDS_MAX 16
31 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
35 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
36 struct tracing_map_elt
*elt
,
37 struct ring_buffer_event
*rbe
,
40 #define HIST_FIELD_OPERANDS_MAX 2
41 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
42 #define HIST_ACTIONS_MAX 8
53 struct hist_trigger_data
*hist_data
;
58 struct ftrace_event_field
*field
;
63 unsigned int is_signed
;
65 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
66 struct hist_trigger_data
*hist_data
;
68 enum field_op_id
operator;
73 unsigned int var_ref_idx
;
77 static u64
hist_field_none(struct hist_field
*field
,
78 struct tracing_map_elt
*elt
,
79 struct ring_buffer_event
*rbe
,
85 static u64
hist_field_counter(struct hist_field
*field
,
86 struct tracing_map_elt
*elt
,
87 struct ring_buffer_event
*rbe
,
93 static u64
hist_field_string(struct hist_field
*hist_field
,
94 struct tracing_map_elt
*elt
,
95 struct ring_buffer_event
*rbe
,
98 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
100 return (u64
)(unsigned long)addr
;
103 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
104 struct tracing_map_elt
*elt
,
105 struct ring_buffer_event
*rbe
,
108 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
109 int str_loc
= str_item
& 0xffff;
110 char *addr
= (char *)(event
+ str_loc
);
112 return (u64
)(unsigned long)addr
;
115 static u64
hist_field_pstring(struct hist_field
*hist_field
,
116 struct tracing_map_elt
*elt
,
117 struct ring_buffer_event
*rbe
,
120 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
122 return (u64
)(unsigned long)*addr
;
125 static u64
hist_field_log2(struct hist_field
*hist_field
,
126 struct tracing_map_elt
*elt
,
127 struct ring_buffer_event
*rbe
,
130 struct hist_field
*operand
= hist_field
->operands
[0];
132 u64 val
= operand
->fn(operand
, elt
, rbe
, event
);
134 return (u64
) ilog2(roundup_pow_of_two(val
));
137 static u64
hist_field_plus(struct hist_field
*hist_field
,
138 struct tracing_map_elt
*elt
,
139 struct ring_buffer_event
*rbe
,
142 struct hist_field
*operand1
= hist_field
->operands
[0];
143 struct hist_field
*operand2
= hist_field
->operands
[1];
145 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
146 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
151 static u64
hist_field_minus(struct hist_field
*hist_field
,
152 struct tracing_map_elt
*elt
,
153 struct ring_buffer_event
*rbe
,
156 struct hist_field
*operand1
= hist_field
->operands
[0];
157 struct hist_field
*operand2
= hist_field
->operands
[1];
159 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
160 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
165 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
166 struct tracing_map_elt
*elt
,
167 struct ring_buffer_event
*rbe
,
170 struct hist_field
*operand
= hist_field
->operands
[0];
172 s64 sval
= (s64
)operand
->fn(operand
, elt
, rbe
, event
);
173 u64 val
= (u64
)-sval
;
178 #define DEFINE_HIST_FIELD_FN(type) \
179 static u64 hist_field_##type(struct hist_field *hist_field, \
180 struct tracing_map_elt *elt, \
181 struct ring_buffer_event *rbe, \
184 type *addr = (type *)(event + hist_field->field->offset); \
186 return (u64)(unsigned long)*addr; \
189 DEFINE_HIST_FIELD_FN(s64
);
190 DEFINE_HIST_FIELD_FN(u64
);
191 DEFINE_HIST_FIELD_FN(s32
);
192 DEFINE_HIST_FIELD_FN(u32
);
193 DEFINE_HIST_FIELD_FN(s16
);
194 DEFINE_HIST_FIELD_FN(u16
);
195 DEFINE_HIST_FIELD_FN(s8
);
196 DEFINE_HIST_FIELD_FN(u8
);
198 #define for_each_hist_field(i, hist_data) \
199 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
201 #define for_each_hist_val_field(i, hist_data) \
202 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
204 #define for_each_hist_key_field(i, hist_data) \
205 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
207 #define HIST_STACKTRACE_DEPTH 16
208 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
209 #define HIST_STACKTRACE_SKIP 5
211 #define HITCOUNT_IDX 0
212 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
214 enum hist_field_flags
{
215 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
216 HIST_FIELD_FL_KEY
= 1 << 1,
217 HIST_FIELD_FL_STRING
= 1 << 2,
218 HIST_FIELD_FL_HEX
= 1 << 3,
219 HIST_FIELD_FL_SYM
= 1 << 4,
220 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
221 HIST_FIELD_FL_EXECNAME
= 1 << 6,
222 HIST_FIELD_FL_SYSCALL
= 1 << 7,
223 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
224 HIST_FIELD_FL_LOG2
= 1 << 9,
225 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
226 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
227 HIST_FIELD_FL_VAR
= 1 << 12,
228 HIST_FIELD_FL_EXPR
= 1 << 13,
229 HIST_FIELD_FL_VAR_REF
= 1 << 14,
230 HIST_FIELD_FL_CPU
= 1 << 15,
231 HIST_FIELD_FL_ALIAS
= 1 << 16,
236 char *name
[TRACING_MAP_VARS_MAX
];
237 char *expr
[TRACING_MAP_VARS_MAX
];
240 struct hist_trigger_attrs
{
250 unsigned int map_bits
;
252 char *assignment_str
[TRACING_MAP_VARS_MAX
];
253 unsigned int n_assignments
;
255 char *action_str
[HIST_ACTIONS_MAX
];
256 unsigned int n_actions
;
258 struct var_defs var_defs
;
262 struct hist_field
*var
;
263 struct hist_field
*val
;
266 struct field_var_hist
{
267 struct hist_trigger_data
*hist_data
;
271 struct hist_trigger_data
{
272 struct hist_field
*fields
[HIST_FIELDS_MAX
];
275 unsigned int n_fields
;
277 unsigned int key_size
;
278 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
279 unsigned int n_sort_keys
;
280 struct trace_event_file
*event_file
;
281 struct hist_trigger_attrs
*attrs
;
282 struct tracing_map
*map
;
283 bool enable_timestamps
;
285 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
286 unsigned int n_var_refs
;
288 struct action_data
*actions
[HIST_ACTIONS_MAX
];
289 unsigned int n_actions
;
291 struct hist_field
*synth_var_refs
[SYNTH_FIELDS_MAX
];
292 unsigned int n_synth_var_refs
;
293 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
294 unsigned int n_field_vars
;
295 unsigned int n_field_var_str
;
296 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
297 unsigned int n_field_var_hists
;
299 struct field_var
*max_vars
[SYNTH_FIELDS_MAX
];
300 unsigned int n_max_vars
;
301 unsigned int n_max_var_str
;
313 struct list_head list
;
316 struct synth_field
**fields
;
317 unsigned int n_fields
;
319 struct trace_event_class
class;
320 struct trace_event_call call
;
321 struct tracepoint
*tp
;
326 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
327 struct tracing_map_elt
*elt
, void *rec
,
328 struct ring_buffer_event
*rbe
,
329 struct action_data
*data
, u64
*var_ref_vals
);
333 unsigned int n_params
;
334 char *params
[SYNTH_FIELDS_MAX
];
338 unsigned int var_ref_idx
;
340 char *match_event_system
;
341 char *synth_event_name
;
342 struct synth_event
*synth_event
;
348 unsigned int max_var_ref_idx
;
349 struct hist_field
*max_var
;
350 struct hist_field
*var
;
356 static char last_hist_cmd
[MAX_FILTER_STR_VAL
];
357 static char hist_err_str
[MAX_FILTER_STR_VAL
];
359 static void last_cmd_set(char *str
)
364 strncpy(last_hist_cmd
, str
, MAX_FILTER_STR_VAL
- 1);
367 static void hist_err(char *str
, char *var
)
369 int maxlen
= MAX_FILTER_STR_VAL
- 1;
374 if (strlen(hist_err_str
))
380 if (strlen(hist_err_str
) + strlen(str
) + strlen(var
) > maxlen
)
383 strcat(hist_err_str
, str
);
384 strcat(hist_err_str
, var
);
387 static void hist_err_event(char *str
, char *system
, char *event
, char *var
)
389 char err
[MAX_FILTER_STR_VAL
];
392 snprintf(err
, MAX_FILTER_STR_VAL
, "%s.%s.%s", system
, event
, var
);
394 snprintf(err
, MAX_FILTER_STR_VAL
, "%s.%s", system
, event
);
396 strncpy(err
, var
, MAX_FILTER_STR_VAL
);
401 static void hist_err_clear(void)
403 hist_err_str
[0] = '\0';
406 static bool have_hist_err(void)
408 if (strlen(hist_err_str
))
414 static LIST_HEAD(synth_event_list
);
415 static DEFINE_MUTEX(synth_event_mutex
);
417 struct synth_trace_event
{
418 struct trace_entry ent
;
422 static int synth_event_define_fields(struct trace_event_call
*call
)
424 struct synth_trace_event trace
;
425 int offset
= offsetof(typeof(trace
), fields
);
426 struct synth_event
*event
= call
->data
;
427 unsigned int i
, size
, n_u64
;
432 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
433 size
= event
->fields
[i
]->size
;
434 is_signed
= event
->fields
[i
]->is_signed
;
435 type
= event
->fields
[i
]->type
;
436 name
= event
->fields
[i
]->name
;
437 ret
= trace_define_field(call
, type
, name
, offset
, size
,
438 is_signed
, FILTER_OTHER
);
442 if (event
->fields
[i
]->is_string
) {
443 offset
+= STR_VAR_LEN_MAX
;
444 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
446 offset
+= sizeof(u64
);
451 event
->n_u64
= n_u64
;
456 static bool synth_field_signed(char *type
)
458 if (strncmp(type
, "u", 1) == 0)
464 static int synth_field_is_string(char *type
)
466 if (strstr(type
, "char[") != NULL
)
472 static int synth_field_string_size(char *type
)
474 char buf
[4], *end
, *start
;
478 start
= strstr(type
, "char[");
481 start
+= strlen("char[");
483 end
= strchr(type
, ']');
484 if (!end
|| end
< start
)
491 strncpy(buf
, start
, len
);
494 err
= kstrtouint(buf
, 0, &size
);
498 if (size
> STR_VAR_LEN_MAX
)
504 static int synth_field_size(char *type
)
508 if (strcmp(type
, "s64") == 0)
510 else if (strcmp(type
, "u64") == 0)
512 else if (strcmp(type
, "s32") == 0)
514 else if (strcmp(type
, "u32") == 0)
516 else if (strcmp(type
, "s16") == 0)
518 else if (strcmp(type
, "u16") == 0)
520 else if (strcmp(type
, "s8") == 0)
522 else if (strcmp(type
, "u8") == 0)
524 else if (strcmp(type
, "char") == 0)
526 else if (strcmp(type
, "unsigned char") == 0)
527 size
= sizeof(unsigned char);
528 else if (strcmp(type
, "int") == 0)
530 else if (strcmp(type
, "unsigned int") == 0)
531 size
= sizeof(unsigned int);
532 else if (strcmp(type
, "long") == 0)
534 else if (strcmp(type
, "unsigned long") == 0)
535 size
= sizeof(unsigned long);
536 else if (strcmp(type
, "pid_t") == 0)
537 size
= sizeof(pid_t
);
538 else if (synth_field_is_string(type
))
539 size
= synth_field_string_size(type
);
544 static const char *synth_field_fmt(char *type
)
546 const char *fmt
= "%llu";
548 if (strcmp(type
, "s64") == 0)
550 else if (strcmp(type
, "u64") == 0)
552 else if (strcmp(type
, "s32") == 0)
554 else if (strcmp(type
, "u32") == 0)
556 else if (strcmp(type
, "s16") == 0)
558 else if (strcmp(type
, "u16") == 0)
560 else if (strcmp(type
, "s8") == 0)
562 else if (strcmp(type
, "u8") == 0)
564 else if (strcmp(type
, "char") == 0)
566 else if (strcmp(type
, "unsigned char") == 0)
568 else if (strcmp(type
, "int") == 0)
570 else if (strcmp(type
, "unsigned int") == 0)
572 else if (strcmp(type
, "long") == 0)
574 else if (strcmp(type
, "unsigned long") == 0)
576 else if (strcmp(type
, "pid_t") == 0)
578 else if (synth_field_is_string(type
))
584 static enum print_line_t
print_synth_event(struct trace_iterator
*iter
,
586 struct trace_event
*event
)
588 struct trace_array
*tr
= iter
->tr
;
589 struct trace_seq
*s
= &iter
->seq
;
590 struct synth_trace_event
*entry
;
591 struct synth_event
*se
;
592 unsigned int i
, n_u64
;
596 entry
= (struct synth_trace_event
*)iter
->ent
;
597 se
= container_of(event
, struct synth_event
, call
.event
);
599 trace_seq_printf(s
, "%s: ", se
->name
);
601 for (i
= 0, n_u64
= 0; i
< se
->n_fields
; i
++) {
602 if (trace_seq_has_overflowed(s
))
605 fmt
= synth_field_fmt(se
->fields
[i
]->type
);
607 /* parameter types */
608 if (tr
->trace_flags
& TRACE_ITER_VERBOSE
)
609 trace_seq_printf(s
, "%s ", fmt
);
611 snprintf(print_fmt
, sizeof(print_fmt
), "%%s=%s%%s", fmt
);
613 /* parameter values */
614 if (se
->fields
[i
]->is_string
) {
615 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
616 (char *)&entry
->fields
[n_u64
],
617 i
== se
->n_fields
- 1 ? "" : " ");
618 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
620 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
621 entry
->fields
[n_u64
],
622 i
== se
->n_fields
- 1 ? "" : " ");
627 trace_seq_putc(s
, '\n');
629 return trace_handle_return(s
);
632 static struct trace_event_functions synth_event_funcs
= {
633 .trace
= print_synth_event
636 static notrace
void trace_event_raw_event_synth(void *__data
,
638 unsigned int var_ref_idx
)
640 struct trace_event_file
*trace_file
= __data
;
641 struct synth_trace_event
*entry
;
642 struct trace_event_buffer fbuffer
;
643 struct ring_buffer
*buffer
;
644 struct synth_event
*event
;
645 unsigned int i
, n_u64
;
648 event
= trace_file
->event_call
->data
;
650 if (trace_trigger_soft_disabled(trace_file
))
653 fields_size
= event
->n_u64
* sizeof(u64
);
656 * Avoid ring buffer recursion detection, as this event
657 * is being performed within another event.
659 buffer
= trace_file
->tr
->trace_buffer
.buffer
;
660 ring_buffer_nest_start(buffer
);
662 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
,
663 sizeof(*entry
) + fields_size
);
667 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
668 if (event
->fields
[i
]->is_string
) {
669 char *str_val
= (char *)(long)var_ref_vals
[var_ref_idx
+ i
];
670 char *str_field
= (char *)&entry
->fields
[n_u64
];
672 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
673 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
675 entry
->fields
[n_u64
] = var_ref_vals
[var_ref_idx
+ i
];
680 trace_event_buffer_commit(&fbuffer
);
682 ring_buffer_nest_end(buffer
);
685 static void free_synth_event_print_fmt(struct trace_event_call
*call
)
688 kfree(call
->print_fmt
);
689 call
->print_fmt
= NULL
;
693 static int __set_synth_event_print_fmt(struct synth_event
*event
,
700 /* When len=0, we just calculate the needed length */
701 #define LEN_OR_ZERO (len ? len - pos : 0)
703 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
704 for (i
= 0; i
< event
->n_fields
; i
++) {
705 fmt
= synth_field_fmt(event
->fields
[i
]->type
);
706 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s=%s%s",
707 event
->fields
[i
]->name
, fmt
,
708 i
== event
->n_fields
- 1 ? "" : ", ");
710 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
712 for (i
= 0; i
< event
->n_fields
; i
++) {
713 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
714 ", REC->%s", event
->fields
[i
]->name
);
719 /* return the length of print_fmt */
723 static int set_synth_event_print_fmt(struct trace_event_call
*call
)
725 struct synth_event
*event
= call
->data
;
729 /* First: called with 0 length to calculate the needed length */
730 len
= __set_synth_event_print_fmt(event
, NULL
, 0);
732 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
736 /* Second: actually write the @print_fmt */
737 __set_synth_event_print_fmt(event
, print_fmt
, len
+ 1);
738 call
->print_fmt
= print_fmt
;
743 static void free_synth_field(struct synth_field
*field
)
750 static struct synth_field
*parse_synth_field(char *field_type
,
753 struct synth_field
*field
;
757 if (field_type
[0] == ';')
760 len
= strlen(field_name
);
761 if (field_name
[len
- 1] == ';')
762 field_name
[len
- 1] = '\0';
764 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
766 return ERR_PTR(-ENOMEM
);
768 len
= strlen(field_type
) + 1;
769 array
= strchr(field_name
, '[');
771 len
+= strlen(array
);
772 field
->type
= kzalloc(len
, GFP_KERNEL
);
777 strcat(field
->type
, field_type
);
779 strcat(field
->type
, array
);
783 field
->size
= synth_field_size(field
->type
);
789 if (synth_field_is_string(field
->type
))
790 field
->is_string
= true;
792 field
->is_signed
= synth_field_signed(field
->type
);
794 field
->name
= kstrdup(field_name
, GFP_KERNEL
);
802 free_synth_field(field
);
803 field
= ERR_PTR(ret
);
807 static void free_synth_tracepoint(struct tracepoint
*tp
)
816 static struct tracepoint
*alloc_synth_tracepoint(char *name
)
818 struct tracepoint
*tp
;
820 tp
= kzalloc(sizeof(*tp
), GFP_KERNEL
);
822 return ERR_PTR(-ENOMEM
);
824 tp
->name
= kstrdup(name
, GFP_KERNEL
);
827 return ERR_PTR(-ENOMEM
);
833 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
834 unsigned int var_ref_idx
);
836 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
837 unsigned int var_ref_idx
)
839 struct tracepoint
*tp
= event
->tp
;
841 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
842 struct tracepoint_func
*probe_func_ptr
;
843 synth_probe_func_t probe_func
;
846 if (!(cpu_online(raw_smp_processor_id())))
849 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
850 if (probe_func_ptr
) {
852 probe_func
= probe_func_ptr
->func
;
853 __data
= probe_func_ptr
->data
;
854 probe_func(__data
, var_ref_vals
, var_ref_idx
);
855 } while ((++probe_func_ptr
)->func
);
860 static struct synth_event
*find_synth_event(const char *name
)
862 struct synth_event
*event
;
864 list_for_each_entry(event
, &synth_event_list
, list
) {
865 if (strcmp(event
->name
, name
) == 0)
872 static int register_synth_event(struct synth_event
*event
)
874 struct trace_event_call
*call
= &event
->call
;
877 event
->call
.class = &event
->class;
878 event
->class.system
= kstrdup(SYNTH_SYSTEM
, GFP_KERNEL
);
879 if (!event
->class.system
) {
884 event
->tp
= alloc_synth_tracepoint(event
->name
);
885 if (IS_ERR(event
->tp
)) {
886 ret
= PTR_ERR(event
->tp
);
891 INIT_LIST_HEAD(&call
->class->fields
);
892 call
->event
.funcs
= &synth_event_funcs
;
893 call
->class->define_fields
= synth_event_define_fields
;
895 ret
= register_trace_event(&call
->event
);
900 call
->flags
= TRACE_EVENT_FL_TRACEPOINT
;
901 call
->class->reg
= trace_event_reg
;
902 call
->class->probe
= trace_event_raw_event_synth
;
904 call
->tp
= event
->tp
;
906 ret
= trace_add_event_call(call
);
908 pr_warn("Failed to register synthetic event: %s\n",
909 trace_event_name(call
));
913 ret
= set_synth_event_print_fmt(call
);
915 trace_remove_event_call(call
);
921 unregister_trace_event(&call
->event
);
925 static int unregister_synth_event(struct synth_event
*event
)
927 struct trace_event_call
*call
= &event
->call
;
930 ret
= trace_remove_event_call(call
);
935 static void free_synth_event(struct synth_event
*event
)
942 for (i
= 0; i
< event
->n_fields
; i
++)
943 free_synth_field(event
->fields
[i
]);
945 kfree(event
->fields
);
947 kfree(event
->class.system
);
948 free_synth_tracepoint(event
->tp
);
949 free_synth_event_print_fmt(&event
->call
);
953 static struct synth_event
*alloc_synth_event(char *event_name
, int n_fields
,
954 struct synth_field
**fields
)
956 struct synth_event
*event
;
959 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
961 event
= ERR_PTR(-ENOMEM
);
965 event
->name
= kstrdup(event_name
, GFP_KERNEL
);
968 event
= ERR_PTR(-ENOMEM
);
972 event
->fields
= kcalloc(n_fields
, sizeof(*event
->fields
), GFP_KERNEL
);
973 if (!event
->fields
) {
974 free_synth_event(event
);
975 event
= ERR_PTR(-ENOMEM
);
979 for (i
= 0; i
< n_fields
; i
++)
980 event
->fields
[i
] = fields
[i
];
982 event
->n_fields
= n_fields
;
987 static void action_trace(struct hist_trigger_data
*hist_data
,
988 struct tracing_map_elt
*elt
, void *rec
,
989 struct ring_buffer_event
*rbe
,
990 struct action_data
*data
, u64
*var_ref_vals
)
992 struct synth_event
*event
= data
->onmatch
.synth_event
;
994 trace_synth(event
, var_ref_vals
, data
->onmatch
.var_ref_idx
);
997 struct hist_var_data
{
998 struct list_head list
;
999 struct hist_trigger_data
*hist_data
;
1002 static void add_or_delete_synth_event(struct synth_event
*event
, int delete)
1005 free_synth_event(event
);
1007 mutex_lock(&synth_event_mutex
);
1008 if (!find_synth_event(event
->name
))
1009 list_add(&event
->list
, &synth_event_list
);
1011 free_synth_event(event
);
1012 mutex_unlock(&synth_event_mutex
);
1016 static int create_synth_event(int argc
, char **argv
)
1018 struct synth_field
*field
, *fields
[SYNTH_FIELDS_MAX
];
1019 struct synth_event
*event
= NULL
;
1020 bool delete_event
= false;
1021 int i
, n_fields
= 0, ret
= 0;
1024 mutex_lock(&synth_event_mutex
);
1028 * - Add synthetic event: <event_name> field[;field] ...
1029 * - Remove synthetic event: !<event_name> field[;field] ...
1030 * where 'field' = type field_name
1038 if (name
[0] == '!') {
1039 delete_event
= true;
1043 event
= find_synth_event(name
);
1051 list_del(&event
->list
);
1057 } else if (delete_event
)
1065 for (i
= 1; i
< argc
- 1; i
++) {
1066 if (strcmp(argv
[i
], ";") == 0)
1068 if (n_fields
== SYNTH_FIELDS_MAX
) {
1073 field
= parse_synth_field(argv
[i
], argv
[i
+ 1]);
1074 if (IS_ERR(field
)) {
1075 ret
= PTR_ERR(field
);
1078 fields
[n_fields
] = field
;
1087 event
= alloc_synth_event(name
, n_fields
, fields
);
1088 if (IS_ERR(event
)) {
1089 ret
= PTR_ERR(event
);
1094 mutex_unlock(&synth_event_mutex
);
1098 ret
= unregister_synth_event(event
);
1099 add_or_delete_synth_event(event
, !ret
);
1101 ret
= register_synth_event(event
);
1102 add_or_delete_synth_event(event
, ret
);
1108 mutex_unlock(&synth_event_mutex
);
1110 for (i
= 0; i
< n_fields
; i
++)
1111 free_synth_field(fields
[i
]);
1112 free_synth_event(event
);
1117 static int release_all_synth_events(void)
1119 struct list_head release_events
;
1120 struct synth_event
*event
, *e
;
1123 INIT_LIST_HEAD(&release_events
);
1125 mutex_lock(&synth_event_mutex
);
1127 list_for_each_entry(event
, &synth_event_list
, list
) {
1129 mutex_unlock(&synth_event_mutex
);
1134 list_splice_init(&event
->list
, &release_events
);
1136 mutex_unlock(&synth_event_mutex
);
1138 list_for_each_entry_safe(event
, e
, &release_events
, list
) {
1139 list_del(&event
->list
);
1141 ret
= unregister_synth_event(event
);
1142 add_or_delete_synth_event(event
, !ret
);
1149 static void *synth_events_seq_start(struct seq_file
*m
, loff_t
*pos
)
1151 mutex_lock(&synth_event_mutex
);
1153 return seq_list_start(&synth_event_list
, *pos
);
1156 static void *synth_events_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1158 return seq_list_next(v
, &synth_event_list
, pos
);
1161 static void synth_events_seq_stop(struct seq_file
*m
, void *v
)
1163 mutex_unlock(&synth_event_mutex
);
1166 static int synth_events_seq_show(struct seq_file
*m
, void *v
)
1168 struct synth_field
*field
;
1169 struct synth_event
*event
= v
;
1172 seq_printf(m
, "%s\t", event
->name
);
1174 for (i
= 0; i
< event
->n_fields
; i
++) {
1175 field
= event
->fields
[i
];
1177 /* parameter values */
1178 seq_printf(m
, "%s %s%s", field
->type
, field
->name
,
1179 i
== event
->n_fields
- 1 ? "" : "; ");
1187 static const struct seq_operations synth_events_seq_op
= {
1188 .start
= synth_events_seq_start
,
1189 .next
= synth_events_seq_next
,
1190 .stop
= synth_events_seq_stop
,
1191 .show
= synth_events_seq_show
1194 static int synth_events_open(struct inode
*inode
, struct file
*file
)
1198 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
1199 ret
= release_all_synth_events();
1204 return seq_open(file
, &synth_events_seq_op
);
1207 static ssize_t
synth_events_write(struct file
*file
,
1208 const char __user
*buffer
,
1209 size_t count
, loff_t
*ppos
)
1211 return trace_parse_run_command(file
, buffer
, count
, ppos
,
1212 create_synth_event
);
1215 static const struct file_operations synth_events_fops
= {
1216 .open
= synth_events_open
,
1217 .write
= synth_events_write
,
1219 .llseek
= seq_lseek
,
1220 .release
= seq_release
,
1223 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
1224 struct tracing_map_elt
*elt
,
1225 struct ring_buffer_event
*rbe
,
1228 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
1229 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1231 u64 ts
= ring_buffer_event_time_stamp(rbe
);
1233 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
1239 static u64
hist_field_cpu(struct hist_field
*hist_field
,
1240 struct tracing_map_elt
*elt
,
1241 struct ring_buffer_event
*rbe
,
1244 int cpu
= smp_processor_id();
1249 static struct hist_field
*
1250 check_field_for_var_ref(struct hist_field
*hist_field
,
1251 struct hist_trigger_data
*var_data
,
1252 unsigned int var_idx
)
1254 struct hist_field
*found
= NULL
;
1256 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1257 if (hist_field
->var
.idx
== var_idx
&&
1258 hist_field
->var
.hist_data
== var_data
) {
1266 static struct hist_field
*
1267 check_field_for_var_refs(struct hist_trigger_data
*hist_data
,
1268 struct hist_field
*hist_field
,
1269 struct hist_trigger_data
*var_data
,
1270 unsigned int var_idx
,
1273 struct hist_field
*found
= NULL
;
1282 found
= check_field_for_var_ref(hist_field
, var_data
, var_idx
);
1286 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1287 struct hist_field
*operand
;
1289 operand
= hist_field
->operands
[i
];
1290 found
= check_field_for_var_refs(hist_data
, operand
, var_data
,
1291 var_idx
, level
+ 1);
1299 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
1300 struct hist_trigger_data
*var_data
,
1301 unsigned int var_idx
)
1303 struct hist_field
*hist_field
, *found
= NULL
;
1306 for_each_hist_field(i
, hist_data
) {
1307 hist_field
= hist_data
->fields
[i
];
1308 found
= check_field_for_var_refs(hist_data
, hist_field
,
1309 var_data
, var_idx
, 0);
1314 for (i
= 0; i
< hist_data
->n_synth_var_refs
; i
++) {
1315 hist_field
= hist_data
->synth_var_refs
[i
];
1316 found
= check_field_for_var_refs(hist_data
, hist_field
,
1317 var_data
, var_idx
, 0);
1325 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
1326 unsigned int var_idx
)
1328 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1329 struct hist_field
*found
= NULL
;
1330 struct hist_var_data
*var_data
;
1332 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1333 if (var_data
->hist_data
== hist_data
)
1335 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
1343 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
1345 struct hist_field
*field
;
1349 for_each_hist_field(i
, hist_data
) {
1350 field
= hist_data
->fields
[i
];
1351 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
1352 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
1362 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
1364 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1365 struct hist_var_data
*var_data
, *found
= NULL
;
1367 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1368 if (var_data
->hist_data
== hist_data
) {
1377 static bool field_has_hist_vars(struct hist_field
*hist_field
,
1388 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
1389 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1392 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1393 struct hist_field
*operand
;
1395 operand
= hist_field
->operands
[i
];
1396 if (field_has_hist_vars(operand
, level
+ 1))
1403 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
1405 struct hist_field
*hist_field
;
1408 for_each_hist_field(i
, hist_data
) {
1409 hist_field
= hist_data
->fields
[i
];
1410 if (field_has_hist_vars(hist_field
, 0))
1417 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
1419 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1420 struct hist_var_data
*var_data
;
1422 var_data
= find_hist_vars(hist_data
);
1426 if (trace_array_get(tr
) < 0)
1429 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
1431 trace_array_put(tr
);
1435 var_data
->hist_data
= hist_data
;
1436 list_add(&var_data
->list
, &tr
->hist_vars
);
1441 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
1443 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1444 struct hist_var_data
*var_data
;
1446 var_data
= find_hist_vars(hist_data
);
1450 if (WARN_ON(check_var_refs(hist_data
)))
1453 list_del(&var_data
->list
);
1457 trace_array_put(tr
);
1460 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
1461 const char *var_name
)
1463 struct hist_field
*hist_field
, *found
= NULL
;
1466 for_each_hist_field(i
, hist_data
) {
1467 hist_field
= hist_data
->fields
[i
];
1468 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
1469 strcmp(hist_field
->var
.name
, var_name
) == 0) {
1478 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
1479 struct trace_event_file
*file
,
1480 const char *var_name
)
1482 struct hist_trigger_data
*test_data
;
1483 struct event_trigger_data
*test
;
1484 struct hist_field
*hist_field
;
1486 hist_field
= find_var_field(hist_data
, var_name
);
1490 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1491 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1492 test_data
= test
->private_data
;
1493 hist_field
= find_var_field(test_data
, var_name
);
1502 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
1507 struct hist_trigger_data
*var_hist_data
;
1508 struct hist_var_data
*var_data
;
1509 struct trace_event_file
*file
, *found
= NULL
;
1512 return find_event_file(tr
, system
, event_name
);
1514 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1515 var_hist_data
= var_data
->hist_data
;
1516 file
= var_hist_data
->event_file
;
1520 if (find_var_field(var_hist_data
, var_name
)) {
1522 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system
, event_name
, var_name
);
1533 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
1534 const char *var_name
)
1536 struct hist_trigger_data
*test_data
;
1537 struct event_trigger_data
*test
;
1538 struct hist_field
*hist_field
;
1540 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1541 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1542 test_data
= test
->private_data
;
1543 hist_field
= find_var_field(test_data
, var_name
);
1552 static struct hist_field
*
1553 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
1555 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1556 struct hist_field
*hist_field
, *found
= NULL
;
1557 struct trace_event_file
*file
;
1560 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
1561 struct action_data
*data
= hist_data
->actions
[i
];
1563 if (data
->fn
== action_trace
) {
1564 char *system
= data
->onmatch
.match_event_system
;
1565 char *event_name
= data
->onmatch
.match_event
;
1567 file
= find_var_file(tr
, system
, event_name
, var_name
);
1570 hist_field
= find_file_var(file
, var_name
);
1573 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system
, event_name
, var_name
);
1574 return ERR_PTR(-EINVAL
);
1584 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1589 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1590 struct hist_field
*hist_field
= NULL
;
1591 struct trace_event_file
*file
;
1593 if (!system
|| !event_name
) {
1594 hist_field
= find_match_var(hist_data
, var_name
);
1595 if (IS_ERR(hist_field
))
1601 file
= find_var_file(tr
, system
, event_name
, var_name
);
1605 hist_field
= find_file_var(file
, var_name
);
1610 struct hist_elt_data
{
1613 char *field_var_str
[SYNTH_FIELDS_MAX
];
1616 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1617 struct tracing_map_elt
*elt
,
1618 struct ring_buffer_event
*rbe
,
1621 struct hist_elt_data
*elt_data
;
1624 elt_data
= elt
->private_data
;
1625 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1630 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1631 u64
*var_ref_vals
, bool self
)
1633 struct hist_trigger_data
*var_data
;
1634 struct tracing_map_elt
*var_elt
;
1635 struct hist_field
*hist_field
;
1636 unsigned int i
, var_idx
;
1637 bool resolved
= true;
1640 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1641 hist_field
= hist_data
->var_refs
[i
];
1642 var_idx
= hist_field
->var
.idx
;
1643 var_data
= hist_field
->var
.hist_data
;
1645 if (var_data
== NULL
) {
1650 if ((self
&& var_data
!= hist_data
) ||
1651 (!self
&& var_data
== hist_data
))
1654 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1660 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1665 if (self
|| !hist_field
->read_once
)
1666 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1668 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1670 var_ref_vals
[i
] = var_val
;
1676 static const char *hist_field_name(struct hist_field
*field
,
1679 const char *field_name
= "";
1685 field_name
= field
->field
->name
;
1686 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1687 field
->flags
& HIST_FIELD_FL_ALIAS
)
1688 field_name
= hist_field_name(field
->operands
[0], ++level
);
1689 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1691 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1692 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1693 if (field
->system
) {
1694 static char full_name
[MAX_FILTER_STR_VAL
];
1696 strcat(full_name
, field
->system
);
1697 strcat(full_name
, ".");
1698 strcat(full_name
, field
->event_name
);
1699 strcat(full_name
, ".");
1700 strcat(full_name
, field
->name
);
1701 field_name
= full_name
;
1703 field_name
= field
->name
;
1704 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1705 field_name
= "common_timestamp";
1707 if (field_name
== NULL
)
1713 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
1715 hist_field_fn_t fn
= NULL
;
1717 switch (field_size
) {
1719 if (field_is_signed
)
1720 fn
= hist_field_s64
;
1722 fn
= hist_field_u64
;
1725 if (field_is_signed
)
1726 fn
= hist_field_s32
;
1728 fn
= hist_field_u32
;
1731 if (field_is_signed
)
1732 fn
= hist_field_s16
;
1734 fn
= hist_field_u16
;
1737 if (field_is_signed
)
1747 static int parse_map_size(char *str
)
1749 unsigned long size
, map_bits
;
1758 ret
= kstrtoul(str
, 0, &size
);
1762 map_bits
= ilog2(roundup_pow_of_two(size
));
1763 if (map_bits
< TRACING_MAP_BITS_MIN
||
1764 map_bits
> TRACING_MAP_BITS_MAX
)
1772 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
1779 for (i
= 0; i
< attrs
->n_assignments
; i
++)
1780 kfree(attrs
->assignment_str
[i
]);
1782 for (i
= 0; i
< attrs
->n_actions
; i
++)
1783 kfree(attrs
->action_str
[i
]);
1786 kfree(attrs
->sort_key_str
);
1787 kfree(attrs
->keys_str
);
1788 kfree(attrs
->vals_str
);
1789 kfree(attrs
->clock
);
1793 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
1797 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
1800 if ((strncmp(str
, "onmatch(", strlen("onmatch(")) == 0) ||
1801 (strncmp(str
, "onmax(", strlen("onmax(")) == 0)) {
1802 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
1803 if (!attrs
->action_str
[attrs
->n_actions
]) {
1814 static int parse_assignment(char *str
, struct hist_trigger_attrs
*attrs
)
1818 if ((strncmp(str
, "key=", strlen("key=")) == 0) ||
1819 (strncmp(str
, "keys=", strlen("keys=")) == 0)) {
1820 attrs
->keys_str
= kstrdup(str
, GFP_KERNEL
);
1821 if (!attrs
->keys_str
) {
1825 } else if ((strncmp(str
, "val=", strlen("val=")) == 0) ||
1826 (strncmp(str
, "vals=", strlen("vals=")) == 0) ||
1827 (strncmp(str
, "values=", strlen("values=")) == 0)) {
1828 attrs
->vals_str
= kstrdup(str
, GFP_KERNEL
);
1829 if (!attrs
->vals_str
) {
1833 } else if (strncmp(str
, "sort=", strlen("sort=")) == 0) {
1834 attrs
->sort_key_str
= kstrdup(str
, GFP_KERNEL
);
1835 if (!attrs
->sort_key_str
) {
1839 } else if (strncmp(str
, "name=", strlen("name=")) == 0) {
1840 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
1845 } else if (strncmp(str
, "clock=", strlen("clock=")) == 0) {
1852 str
= strstrip(str
);
1853 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
1854 if (!attrs
->clock
) {
1858 } else if (strncmp(str
, "size=", strlen("size=")) == 0) {
1859 int map_bits
= parse_map_size(str
);
1865 attrs
->map_bits
= map_bits
;
1869 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
1870 hist_err("Too many variables defined: ", str
);
1875 assignment
= kstrdup(str
, GFP_KERNEL
);
1881 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
1887 static struct hist_trigger_attrs
*parse_hist_trigger_attrs(char *trigger_str
)
1889 struct hist_trigger_attrs
*attrs
;
1892 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
1894 return ERR_PTR(-ENOMEM
);
1896 while (trigger_str
) {
1897 char *str
= strsep(&trigger_str
, ":");
1899 if (strchr(str
, '=')) {
1900 ret
= parse_assignment(str
, attrs
);
1903 } else if (strcmp(str
, "pause") == 0)
1904 attrs
->pause
= true;
1905 else if ((strcmp(str
, "cont") == 0) ||
1906 (strcmp(str
, "continue") == 0))
1908 else if (strcmp(str
, "clear") == 0)
1909 attrs
->clear
= true;
1911 ret
= parse_action(str
, attrs
);
1917 if (!attrs
->keys_str
) {
1922 if (!attrs
->clock
) {
1923 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
1924 if (!attrs
->clock
) {
1932 destroy_hist_trigger_attrs(attrs
);
1934 return ERR_PTR(ret
);
1937 static inline void save_comm(char *comm
, struct task_struct
*task
)
1940 strcpy(comm
, "<idle>");
1944 if (WARN_ON_ONCE(task
->pid
< 0)) {
1945 strcpy(comm
, "<XXX>");
1949 memcpy(comm
, task
->comm
, TASK_COMM_LEN
);
1952 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
1956 for (i
= 0; i
< SYNTH_FIELDS_MAX
; i
++)
1957 kfree(elt_data
->field_var_str
[i
]);
1959 kfree(elt_data
->comm
);
1963 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
1965 struct hist_elt_data
*elt_data
= elt
->private_data
;
1967 hist_elt_data_free(elt_data
);
1970 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
1972 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
1973 unsigned int size
= TASK_COMM_LEN
;
1974 struct hist_elt_data
*elt_data
;
1975 struct hist_field
*key_field
;
1976 unsigned int i
, n_str
;
1978 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
1982 for_each_hist_key_field(i
, hist_data
) {
1983 key_field
= hist_data
->fields
[i
];
1985 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
1986 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
1987 if (!elt_data
->comm
) {
1995 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_max_var_str
;
1997 size
= STR_VAR_LEN_MAX
;
1999 for (i
= 0; i
< n_str
; i
++) {
2000 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
2001 if (!elt_data
->field_var_str
[i
]) {
2002 hist_elt_data_free(elt_data
);
2007 elt
->private_data
= elt_data
;
2012 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
2014 struct hist_elt_data
*elt_data
= elt
->private_data
;
2017 save_comm(elt_data
->comm
, current
);
2020 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
2021 .elt_alloc
= hist_trigger_elt_data_alloc
,
2022 .elt_free
= hist_trigger_elt_data_free
,
2023 .elt_init
= hist_trigger_elt_data_init
,
2026 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
2028 const char *flags_str
= NULL
;
2030 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
2032 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
2034 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
2035 flags_str
= "sym-offset";
2036 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
2037 flags_str
= "execname";
2038 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
2039 flags_str
= "syscall";
2040 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
2042 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2043 flags_str
= "usecs";
2048 static void expr_field_str(struct hist_field
*field
, char *expr
)
2050 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
2053 strcat(expr
, hist_field_name(field
, 0));
2055 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
2056 const char *flags_str
= get_hist_field_flags(field
);
2060 strcat(expr
, flags_str
);
2065 static char *expr_str(struct hist_field
*field
, unsigned int level
)
2072 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2076 if (!field
->operands
[0]) {
2077 expr_field_str(field
, expr
);
2081 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
2085 subexpr
= expr_str(field
->operands
[0], ++level
);
2090 strcat(expr
, subexpr
);
2098 expr_field_str(field
->operands
[0], expr
);
2100 switch (field
->operator) {
2101 case FIELD_OP_MINUS
:
2112 expr_field_str(field
->operands
[1], expr
);
2117 static int contains_operator(char *str
)
2119 enum field_op_id field_op
= FIELD_OP_NONE
;
2122 op
= strpbrk(str
, "+-");
2124 return FIELD_OP_NONE
;
2129 field_op
= FIELD_OP_UNARY_MINUS
;
2131 field_op
= FIELD_OP_MINUS
;
2134 field_op
= FIELD_OP_PLUS
;
2143 static void destroy_hist_field(struct hist_field
*hist_field
,
2154 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
2155 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
2157 kfree(hist_field
->var
.name
);
2158 kfree(hist_field
->name
);
2159 kfree(hist_field
->type
);
2164 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
2165 struct ftrace_event_field
*field
,
2166 unsigned long flags
,
2169 struct hist_field
*hist_field
;
2171 if (field
&& is_function_field(field
))
2174 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
2178 hist_field
->hist_data
= hist_data
;
2180 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
2181 goto out
; /* caller will populate */
2183 if (flags
& HIST_FIELD_FL_VAR_REF
) {
2184 hist_field
->fn
= hist_field_var_ref
;
2188 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
2189 hist_field
->fn
= hist_field_counter
;
2190 hist_field
->size
= sizeof(u64
);
2191 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
2192 if (!hist_field
->type
)
2197 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
2198 hist_field
->fn
= hist_field_none
;
2202 if (flags
& HIST_FIELD_FL_LOG2
) {
2203 unsigned long fl
= flags
& ~HIST_FIELD_FL_LOG2
;
2204 hist_field
->fn
= hist_field_log2
;
2205 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
2206 hist_field
->size
= hist_field
->operands
[0]->size
;
2207 hist_field
->type
= kstrdup(hist_field
->operands
[0]->type
, GFP_KERNEL
);
2208 if (!hist_field
->type
)
2213 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
2214 hist_field
->fn
= hist_field_timestamp
;
2215 hist_field
->size
= sizeof(u64
);
2216 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
2217 if (!hist_field
->type
)
2222 if (flags
& HIST_FIELD_FL_CPU
) {
2223 hist_field
->fn
= hist_field_cpu
;
2224 hist_field
->size
= sizeof(int);
2225 hist_field
->type
= kstrdup("unsigned int", GFP_KERNEL
);
2226 if (!hist_field
->type
)
2231 if (WARN_ON_ONCE(!field
))
2234 if (is_string_field(field
)) {
2235 flags
|= HIST_FIELD_FL_STRING
;
2237 hist_field
->size
= MAX_FILTER_STR_VAL
;
2238 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
2239 if (!hist_field
->type
)
2242 if (field
->filter_type
== FILTER_STATIC_STRING
)
2243 hist_field
->fn
= hist_field_string
;
2244 else if (field
->filter_type
== FILTER_DYN_STRING
)
2245 hist_field
->fn
= hist_field_dynstring
;
2247 hist_field
->fn
= hist_field_pstring
;
2249 hist_field
->size
= field
->size
;
2250 hist_field
->is_signed
= field
->is_signed
;
2251 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
2252 if (!hist_field
->type
)
2255 hist_field
->fn
= select_value_fn(field
->size
,
2257 if (!hist_field
->fn
) {
2258 destroy_hist_field(hist_field
, 0);
2263 hist_field
->field
= field
;
2264 hist_field
->flags
= flags
;
2267 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
2268 if (!hist_field
->var
.name
)
2274 destroy_hist_field(hist_field
, 0);
2278 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
2282 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
2283 if (hist_data
->fields
[i
]) {
2284 destroy_hist_field(hist_data
->fields
[i
], 0);
2285 hist_data
->fields
[i
] = NULL
;
2290 static int init_var_ref(struct hist_field
*ref_field
,
2291 struct hist_field
*var_field
,
2292 char *system
, char *event_name
)
2296 ref_field
->var
.idx
= var_field
->var
.idx
;
2297 ref_field
->var
.hist_data
= var_field
->hist_data
;
2298 ref_field
->size
= var_field
->size
;
2299 ref_field
->is_signed
= var_field
->is_signed
;
2300 ref_field
->flags
|= var_field
->flags
&
2301 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2304 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
2305 if (!ref_field
->system
)
2310 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
2311 if (!ref_field
->event_name
) {
2317 if (var_field
->var
.name
) {
2318 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
2319 if (!ref_field
->name
) {
2323 } else if (var_field
->name
) {
2324 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
2325 if (!ref_field
->name
) {
2331 ref_field
->type
= kstrdup(var_field
->type
, GFP_KERNEL
);
2332 if (!ref_field
->type
) {
2339 kfree(ref_field
->system
);
2340 kfree(ref_field
->event_name
);
2341 kfree(ref_field
->name
);
2346 static struct hist_field
*create_var_ref(struct hist_field
*var_field
,
2347 char *system
, char *event_name
)
2349 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
2350 struct hist_field
*ref_field
;
2352 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
2354 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
2355 destroy_hist_field(ref_field
, 0);
2363 static bool is_var_ref(char *var_name
)
2365 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
2371 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
2377 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
2378 name
= hist_data
->attrs
->var_defs
.name
[i
];
2380 if (strcmp(var_name
, name
) == 0) {
2381 field
= hist_data
->attrs
->var_defs
.expr
[i
];
2382 if (contains_operator(field
) || is_var_ref(field
))
2391 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
2392 char *system
, char *event_name
,
2395 struct trace_event_call
*call
;
2397 if (system
&& event_name
) {
2398 call
= hist_data
->event_file
->event_call
;
2400 if (strcmp(system
, call
->class->system
) != 0)
2403 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2407 if (!!system
!= !!event_name
)
2410 if (!is_var_ref(var_name
))
2415 return field_name_from_var(hist_data
, var_name
);
2418 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
2419 char *system
, char *event_name
,
2422 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
2424 if (!is_var_ref(var_name
))
2429 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
2431 ref_field
= create_var_ref(var_field
, system
, event_name
);
2434 hist_err_event("Couldn't find variable: $",
2435 system
, event_name
, var_name
);
2440 static struct ftrace_event_field
*
2441 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
2442 char *field_str
, unsigned long *flags
)
2444 struct ftrace_event_field
*field
= NULL
;
2445 char *field_name
, *modifier
, *str
;
2447 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
2449 return ERR_PTR(-ENOMEM
);
2451 field_name
= strsep(&modifier
, ".");
2453 if (strcmp(modifier
, "hex") == 0)
2454 *flags
|= HIST_FIELD_FL_HEX
;
2455 else if (strcmp(modifier
, "sym") == 0)
2456 *flags
|= HIST_FIELD_FL_SYM
;
2457 else if (strcmp(modifier
, "sym-offset") == 0)
2458 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
2459 else if ((strcmp(modifier
, "execname") == 0) &&
2460 (strcmp(field_name
, "common_pid") == 0))
2461 *flags
|= HIST_FIELD_FL_EXECNAME
;
2462 else if (strcmp(modifier
, "syscall") == 0)
2463 *flags
|= HIST_FIELD_FL_SYSCALL
;
2464 else if (strcmp(modifier
, "log2") == 0)
2465 *flags
|= HIST_FIELD_FL_LOG2
;
2466 else if (strcmp(modifier
, "usecs") == 0)
2467 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
2469 hist_err("Invalid field modifier: ", modifier
);
2470 field
= ERR_PTR(-EINVAL
);
2475 if (strcmp(field_name
, "common_timestamp") == 0) {
2476 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
2477 hist_data
->enable_timestamps
= true;
2478 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2479 hist_data
->attrs
->ts_in_usecs
= true;
2480 } else if (strcmp(field_name
, "cpu") == 0)
2481 *flags
|= HIST_FIELD_FL_CPU
;
2483 field
= trace_find_event_field(file
->event_call
, field_name
);
2484 if (!field
|| !field
->size
) {
2485 hist_err("Couldn't find field: ", field_name
);
2486 field
= ERR_PTR(-EINVAL
);
2496 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
2497 struct hist_field
*var_ref
,
2500 struct hist_field
*alias
= NULL
;
2501 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
2503 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2507 alias
->fn
= var_ref
->fn
;
2508 alias
->operands
[0] = var_ref
;
2510 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2511 destroy_hist_field(alias
, 0);
2518 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2519 struct trace_event_file
*file
, char *str
,
2520 unsigned long *flags
, char *var_name
)
2522 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2523 struct ftrace_event_field
*field
= NULL
;
2524 struct hist_field
*hist_field
= NULL
;
2527 s
= strchr(str
, '.');
2529 s
= strchr(++s
, '.');
2531 ref_system
= strsep(&str
, ".");
2536 ref_event
= strsep(&str
, ".");
2545 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2547 hist_field
= parse_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2549 hist_data
->var_refs
[hist_data
->n_var_refs
] = hist_field
;
2550 hist_field
->var_ref_idx
= hist_data
->n_var_refs
++;
2552 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2563 field
= parse_field(hist_data
, file
, str
, flags
);
2564 if (IS_ERR(field
)) {
2565 ret
= PTR_ERR(field
);
2569 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2577 return ERR_PTR(ret
);
2580 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2581 struct trace_event_file
*file
,
2582 char *str
, unsigned long flags
,
2583 char *var_name
, unsigned int level
);
2585 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2586 struct trace_event_file
*file
,
2587 char *str
, unsigned long flags
,
2588 char *var_name
, unsigned int level
)
2590 struct hist_field
*operand1
, *expr
= NULL
;
2591 unsigned long operand_flags
;
2595 /* we support only -(xxx) i.e. explicit parens required */
2598 hist_err("Too many subexpressions (3 max): ", str
);
2603 str
++; /* skip leading '-' */
2605 s
= strchr(str
, '(');
2613 s
= strrchr(str
, ')');
2617 ret
= -EINVAL
; /* no closing ')' */
2621 flags
|= HIST_FIELD_FL_EXPR
;
2622 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2629 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2630 if (IS_ERR(operand1
)) {
2631 ret
= PTR_ERR(operand1
);
2635 expr
->flags
|= operand1
->flags
&
2636 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2637 expr
->fn
= hist_field_unary_minus
;
2638 expr
->operands
[0] = operand1
;
2639 expr
->operator = FIELD_OP_UNARY_MINUS
;
2640 expr
->name
= expr_str(expr
, 0);
2641 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
2649 destroy_hist_field(expr
, 0);
2650 return ERR_PTR(ret
);
2653 static int check_expr_operands(struct hist_field
*operand1
,
2654 struct hist_field
*operand2
)
2656 unsigned long operand1_flags
= operand1
->flags
;
2657 unsigned long operand2_flags
= operand2
->flags
;
2659 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2660 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2661 struct hist_field
*var
;
2663 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2666 operand1_flags
= var
->flags
;
2669 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2670 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2671 struct hist_field
*var
;
2673 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2676 operand2_flags
= var
->flags
;
2679 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2680 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2681 hist_err("Timestamp units in expression don't match", NULL
);
2688 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2689 struct trace_event_file
*file
,
2690 char *str
, unsigned long flags
,
2691 char *var_name
, unsigned int level
)
2693 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2694 unsigned long operand_flags
;
2695 int field_op
, ret
= -EINVAL
;
2696 char *sep
, *operand1_str
;
2699 hist_err("Too many subexpressions (3 max): ", str
);
2700 return ERR_PTR(-EINVAL
);
2703 field_op
= contains_operator(str
);
2705 if (field_op
== FIELD_OP_NONE
)
2706 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2708 if (field_op
== FIELD_OP_UNARY_MINUS
)
2709 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
2712 case FIELD_OP_MINUS
:
2722 operand1_str
= strsep(&str
, sep
);
2723 if (!operand1_str
|| !str
)
2727 operand1
= parse_atom(hist_data
, file
, operand1_str
,
2728 &operand_flags
, NULL
);
2729 if (IS_ERR(operand1
)) {
2730 ret
= PTR_ERR(operand1
);
2735 /* rest of string could be another expression e.g. b+c in a+b+c */
2737 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2738 if (IS_ERR(operand2
)) {
2739 ret
= PTR_ERR(operand2
);
2744 ret
= check_expr_operands(operand1
, operand2
);
2748 flags
|= HIST_FIELD_FL_EXPR
;
2750 flags
|= operand1
->flags
&
2751 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2753 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2759 operand1
->read_once
= true;
2760 operand2
->read_once
= true;
2762 expr
->operands
[0] = operand1
;
2763 expr
->operands
[1] = operand2
;
2764 expr
->operator = field_op
;
2765 expr
->name
= expr_str(expr
, 0);
2766 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
2773 case FIELD_OP_MINUS
:
2774 expr
->fn
= hist_field_minus
;
2777 expr
->fn
= hist_field_plus
;
2786 destroy_hist_field(operand1
, 0);
2787 destroy_hist_field(operand2
, 0);
2788 destroy_hist_field(expr
, 0);
2790 return ERR_PTR(ret
);
2793 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
2794 struct trace_event_file
*file
)
2796 struct event_trigger_data
*test
;
2798 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
2799 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2800 if (test
->private_data
== hist_data
)
2801 return test
->filter_str
;
2808 static struct event_command trigger_hist_cmd
;
2809 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
2810 struct trace_event_file
*file
,
2811 char *glob
, char *cmd
, char *param
);
2813 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
2814 struct hist_trigger_data
*hist_data
,
2815 unsigned int n_keys
)
2817 struct hist_field
*target_hist_field
, *hist_field
;
2818 unsigned int n
, i
, j
;
2820 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
2823 i
= hist_data
->n_vals
;
2824 j
= target_hist_data
->n_vals
;
2826 for (n
= 0; n
< n_keys
; n
++) {
2827 hist_field
= hist_data
->fields
[i
+ n
];
2828 target_hist_field
= target_hist_data
->fields
[j
+ n
];
2830 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
2832 if (hist_field
->size
!= target_hist_field
->size
)
2834 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
2841 static struct hist_trigger_data
*
2842 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
2843 struct trace_event_file
*file
)
2845 struct hist_trigger_data
*hist_data
;
2846 struct event_trigger_data
*test
;
2847 unsigned int n_keys
;
2849 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
2851 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
2852 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2853 hist_data
= test
->private_data
;
2855 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
2863 static struct trace_event_file
*event_file(struct trace_array
*tr
,
2864 char *system
, char *event_name
)
2866 struct trace_event_file
*file
;
2868 file
= find_event_file(tr
, system
, event_name
);
2870 return ERR_PTR(-EINVAL
);
2875 static struct hist_field
*
2876 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
2877 char *system
, char *event_name
, char *field_name
)
2879 struct hist_field
*event_var
;
2880 char *synthetic_name
;
2882 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2883 if (!synthetic_name
)
2884 return ERR_PTR(-ENOMEM
);
2886 strcpy(synthetic_name
, "synthetic_");
2887 strcat(synthetic_name
, field_name
);
2889 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
2891 kfree(synthetic_name
);
2897 * create_field_var_hist - Automatically create a histogram and var for a field
2898 * @target_hist_data: The target hist trigger
2899 * @subsys_name: Optional subsystem name
2900 * @event_name: Optional event name
2901 * @field_name: The name of the field (and the resulting variable)
2903 * Hist trigger actions fetch data from variables, not directly from
2904 * events. However, for convenience, users are allowed to directly
2905 * specify an event field in an action, which will be automatically
2906 * converted into a variable on their behalf.
2908 * If a user specifies a field on an event that isn't the event the
2909 * histogram currently being defined (the target event histogram), the
2910 * only way that can be accomplished is if a new hist trigger is
2911 * created and the field variable defined on that.
2913 * This function creates a new histogram compatible with the target
2914 * event (meaning a histogram with the same key as the target
2915 * histogram), and creates a variable for the specified field, but
2916 * with 'synthetic_' prepended to the variable name in order to avoid
2917 * collision with normal field variables.
2919 * Return: The variable created for the field.
2921 static struct hist_field
*
2922 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
2923 char *subsys_name
, char *event_name
, char *field_name
)
2925 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
2926 struct hist_field
*event_var
= ERR_PTR(-EINVAL
);
2927 struct hist_trigger_data
*hist_data
;
2928 unsigned int i
, n
, first
= true;
2929 struct field_var_hist
*var_hist
;
2930 struct trace_event_file
*file
;
2931 struct hist_field
*key_field
;
2936 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
2937 hist_err_event("onmatch: Too many field variables defined: ",
2938 subsys_name
, event_name
, field_name
);
2939 return ERR_PTR(-EINVAL
);
2942 file
= event_file(tr
, subsys_name
, event_name
);
2945 hist_err_event("onmatch: Event file not found: ",
2946 subsys_name
, event_name
, field_name
);
2947 ret
= PTR_ERR(file
);
2948 return ERR_PTR(ret
);
2952 * Look for a histogram compatible with target. We'll use the
2953 * found histogram specification to create a new matching
2954 * histogram with our variable on it. target_hist_data is not
2955 * yet a registered histogram so we can't use that.
2957 hist_data
= find_compatible_hist(target_hist_data
, file
);
2959 hist_err_event("onmatch: Matching event histogram not found: ",
2960 subsys_name
, event_name
, field_name
);
2961 return ERR_PTR(-EINVAL
);
2964 /* See if a synthetic field variable has already been created */
2965 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
2966 event_name
, field_name
);
2967 if (!IS_ERR_OR_NULL(event_var
))
2970 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
2972 return ERR_PTR(-ENOMEM
);
2974 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2977 return ERR_PTR(-ENOMEM
);
2980 /* Use the same keys as the compatible histogram */
2981 strcat(cmd
, "keys=");
2983 for_each_hist_key_field(i
, hist_data
) {
2984 key_field
= hist_data
->fields
[i
];
2987 strcat(cmd
, key_field
->field
->name
);
2991 /* Create the synthetic field variable specification */
2992 strcat(cmd
, ":synthetic_");
2993 strcat(cmd
, field_name
);
2995 strcat(cmd
, field_name
);
2997 /* Use the same filter as the compatible histogram */
2998 saved_filter
= find_trigger_filter(hist_data
, file
);
3000 strcat(cmd
, " if ");
3001 strcat(cmd
, saved_filter
);
3004 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
3005 if (!var_hist
->cmd
) {
3008 return ERR_PTR(-ENOMEM
);
3011 /* Save the compatible histogram information */
3012 var_hist
->hist_data
= hist_data
;
3014 /* Create the new histogram with our variable */
3015 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
3019 kfree(var_hist
->cmd
);
3021 hist_err_event("onmatch: Couldn't create histogram for field: ",
3022 subsys_name
, event_name
, field_name
);
3023 return ERR_PTR(ret
);
3028 /* If we can't find the variable, something went wrong */
3029 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3030 event_name
, field_name
);
3031 if (IS_ERR_OR_NULL(event_var
)) {
3032 kfree(var_hist
->cmd
);
3034 hist_err_event("onmatch: Couldn't find synthetic variable: ",
3035 subsys_name
, event_name
, field_name
);
3036 return ERR_PTR(-EINVAL
);
3039 n
= target_hist_data
->n_field_var_hists
;
3040 target_hist_data
->field_var_hists
[n
] = var_hist
;
3041 target_hist_data
->n_field_var_hists
++;
3046 static struct hist_field
*
3047 find_target_event_var(struct hist_trigger_data
*hist_data
,
3048 char *subsys_name
, char *event_name
, char *var_name
)
3050 struct trace_event_file
*file
= hist_data
->event_file
;
3051 struct hist_field
*hist_field
= NULL
;
3054 struct trace_event_call
*call
;
3059 call
= file
->event_call
;
3061 if (strcmp(subsys_name
, call
->class->system
) != 0)
3064 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3068 hist_field
= find_var_field(hist_data
, var_name
);
3073 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
3074 struct ring_buffer_event
*rbe
,
3076 struct field_var
**field_vars
,
3077 unsigned int n_field_vars
,
3078 unsigned int field_var_str_start
)
3080 struct hist_elt_data
*elt_data
= elt
->private_data
;
3081 unsigned int i
, j
, var_idx
;
3084 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
3085 struct field_var
*field_var
= field_vars
[i
];
3086 struct hist_field
*var
= field_var
->var
;
3087 struct hist_field
*val
= field_var
->val
;
3089 var_val
= val
->fn(val
, elt
, rbe
, rec
);
3090 var_idx
= var
->var
.idx
;
3092 if (val
->flags
& HIST_FIELD_FL_STRING
) {
3093 char *str
= elt_data
->field_var_str
[j
++];
3094 char *val_str
= (char *)(uintptr_t)var_val
;
3096 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
3097 var_val
= (u64
)(uintptr_t)str
;
3099 tracing_map_set_var(elt
, var_idx
, var_val
);
3103 static void update_field_vars(struct hist_trigger_data
*hist_data
,
3104 struct tracing_map_elt
*elt
,
3105 struct ring_buffer_event
*rbe
,
3108 __update_field_vars(elt
, rbe
, rec
, hist_data
->field_vars
,
3109 hist_data
->n_field_vars
, 0);
3112 static void update_max_vars(struct hist_trigger_data
*hist_data
,
3113 struct tracing_map_elt
*elt
,
3114 struct ring_buffer_event
*rbe
,
3117 __update_field_vars(elt
, rbe
, rec
, hist_data
->max_vars
,
3118 hist_data
->n_max_vars
, hist_data
->n_field_var_str
);
3121 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
3122 struct trace_event_file
*file
,
3123 char *name
, int size
, const char *type
)
3125 struct hist_field
*var
;
3128 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
3129 var
= ERR_PTR(-EINVAL
);
3133 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3135 var
= ERR_PTR(-ENOMEM
);
3139 idx
= tracing_map_add_var(hist_data
->map
);
3142 var
= ERR_PTR(-EINVAL
);
3146 var
->flags
= HIST_FIELD_FL_VAR
;
3148 var
->var
.hist_data
= var
->hist_data
= hist_data
;
3150 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
3151 var
->type
= kstrdup(type
, GFP_KERNEL
);
3152 if (!var
->var
.name
|| !var
->type
) {
3153 kfree(var
->var
.name
);
3156 var
= ERR_PTR(-ENOMEM
);
3162 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
3163 struct trace_event_file
*file
,
3166 struct hist_field
*val
= NULL
, *var
= NULL
;
3167 unsigned long flags
= HIST_FIELD_FL_VAR
;
3168 struct field_var
*field_var
;
3171 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
3172 hist_err("Too many field variables defined: ", field_name
);
3177 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
3179 hist_err("Couldn't parse field variable: ", field_name
);
3184 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
3186 hist_err("Couldn't create or find variable: ", field_name
);
3192 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
3200 field_var
->var
= var
;
3201 field_var
->val
= val
;
3205 field_var
= ERR_PTR(ret
);
3210 * create_target_field_var - Automatically create a variable for a field
3211 * @target_hist_data: The target hist trigger
3212 * @subsys_name: Optional subsystem name
3213 * @event_name: Optional event name
3214 * @var_name: The name of the field (and the resulting variable)
3216 * Hist trigger actions fetch data from variables, not directly from
3217 * events. However, for convenience, users are allowed to directly
3218 * specify an event field in an action, which will be automatically
3219 * converted into a variable on their behalf.
3221 * This function creates a field variable with the name var_name on
3222 * the hist trigger currently being defined on the target event. If
3223 * subsys_name and event_name are specified, this function simply
3224 * verifies that they do in fact match the target event subsystem and
3227 * Return: The variable created for the field.
3229 static struct field_var
*
3230 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
3231 char *subsys_name
, char *event_name
, char *var_name
)
3233 struct trace_event_file
*file
= target_hist_data
->event_file
;
3236 struct trace_event_call
*call
;
3241 call
= file
->event_call
;
3243 if (strcmp(subsys_name
, call
->class->system
) != 0)
3246 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3250 return create_field_var(target_hist_data
, file
, var_name
);
3253 static void onmax_print(struct seq_file
*m
,
3254 struct hist_trigger_data
*hist_data
,
3255 struct tracing_map_elt
*elt
,
3256 struct action_data
*data
)
3258 unsigned int i
, save_var_idx
, max_idx
= data
->onmax
.max_var
->var
.idx
;
3260 seq_printf(m
, "\n\tmax: %10llu", tracing_map_read_var(elt
, max_idx
));
3262 for (i
= 0; i
< hist_data
->n_max_vars
; i
++) {
3263 struct hist_field
*save_val
= hist_data
->max_vars
[i
]->val
;
3264 struct hist_field
*save_var
= hist_data
->max_vars
[i
]->var
;
3267 save_var_idx
= save_var
->var
.idx
;
3269 val
= tracing_map_read_var(elt
, save_var_idx
);
3271 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
3272 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
3273 (char *)(uintptr_t)(val
));
3275 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
3279 static void onmax_save(struct hist_trigger_data
*hist_data
,
3280 struct tracing_map_elt
*elt
, void *rec
,
3281 struct ring_buffer_event
*rbe
,
3282 struct action_data
*data
, u64
*var_ref_vals
)
3284 unsigned int max_idx
= data
->onmax
.max_var
->var
.idx
;
3285 unsigned int max_var_ref_idx
= data
->onmax
.max_var_ref_idx
;
3287 u64 var_val
, max_val
;
3289 var_val
= var_ref_vals
[max_var_ref_idx
];
3290 max_val
= tracing_map_read_var(elt
, max_idx
);
3292 if (var_val
<= max_val
)
3295 tracing_map_set_var(elt
, max_idx
, var_val
);
3297 update_max_vars(hist_data
, elt
, rbe
, rec
);
3300 static void onmax_destroy(struct action_data
*data
)
3304 destroy_hist_field(data
->onmax
.max_var
, 0);
3305 destroy_hist_field(data
->onmax
.var
, 0);
3307 kfree(data
->onmax
.var_str
);
3308 kfree(data
->onmax
.fn_name
);
3310 for (i
= 0; i
< data
->n_params
; i
++)
3311 kfree(data
->params
[i
]);
3316 static int onmax_create(struct hist_trigger_data
*hist_data
,
3317 struct action_data
*data
)
3319 struct trace_event_file
*file
= hist_data
->event_file
;
3320 struct hist_field
*var_field
, *ref_field
, *max_var
;
3321 unsigned int var_ref_idx
= hist_data
->n_var_refs
;
3322 struct field_var
*field_var
;
3323 char *onmax_var_str
, *param
;
3324 unsigned long flags
;
3328 onmax_var_str
= data
->onmax
.var_str
;
3329 if (onmax_var_str
[0] != '$') {
3330 hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str
);
3335 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, onmax_var_str
);
3337 hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str
);
3341 flags
= HIST_FIELD_FL_VAR_REF
;
3342 ref_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
3346 if (init_var_ref(ref_field
, var_field
, NULL
, NULL
)) {
3347 destroy_hist_field(ref_field
, 0);
3351 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
3352 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
3353 data
->onmax
.var
= ref_field
;
3355 data
->fn
= onmax_save
;
3356 data
->onmax
.max_var_ref_idx
= var_ref_idx
;
3357 max_var
= create_var(hist_data
, file
, "max", sizeof(u64
), "u64");
3358 if (IS_ERR(max_var
)) {
3359 hist_err("onmax: Couldn't create onmax variable: ", "max");
3360 ret
= PTR_ERR(max_var
);
3363 data
->onmax
.max_var
= max_var
;
3365 for (i
= 0; i
< data
->n_params
; i
++) {
3366 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3372 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
3373 if (IS_ERR(field_var
)) {
3374 hist_err("onmax: Couldn't create field variable: ", param
);
3375 ret
= PTR_ERR(field_var
);
3380 hist_data
->max_vars
[hist_data
->n_max_vars
++] = field_var
;
3381 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3382 hist_data
->n_max_var_str
++;
3390 static int parse_action_params(char *params
, struct action_data
*data
)
3392 char *param
, *saved_param
;
3396 if (data
->n_params
>= SYNTH_FIELDS_MAX
)
3399 param
= strsep(¶ms
, ",");
3405 param
= strstrip(param
);
3406 if (strlen(param
) < 2) {
3407 hist_err("Invalid action param: ", param
);
3412 saved_param
= kstrdup(param
, GFP_KERNEL
);
3418 data
->params
[data
->n_params
++] = saved_param
;
3424 static struct action_data
*onmax_parse(char *str
)
3426 char *onmax_fn_name
, *onmax_var_str
;
3427 struct action_data
*data
;
3430 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3432 return ERR_PTR(-ENOMEM
);
3434 onmax_var_str
= strsep(&str
, ")");
3435 if (!onmax_var_str
|| !str
) {
3440 data
->onmax
.var_str
= kstrdup(onmax_var_str
, GFP_KERNEL
);
3441 if (!data
->onmax
.var_str
) {
3450 onmax_fn_name
= strsep(&str
, "(");
3451 if (!onmax_fn_name
|| !str
)
3454 if (strncmp(onmax_fn_name
, "save", strlen("save")) == 0) {
3455 char *params
= strsep(&str
, ")");
3462 ret
= parse_action_params(params
, data
);
3468 data
->onmax
.fn_name
= kstrdup(onmax_fn_name
, GFP_KERNEL
);
3469 if (!data
->onmax
.fn_name
) {
3476 onmax_destroy(data
);
3477 data
= ERR_PTR(ret
);
3481 static void onmatch_destroy(struct action_data
*data
)
3485 mutex_lock(&synth_event_mutex
);
3487 kfree(data
->onmatch
.match_event
);
3488 kfree(data
->onmatch
.match_event_system
);
3489 kfree(data
->onmatch
.synth_event_name
);
3491 for (i
= 0; i
< data
->n_params
; i
++)
3492 kfree(data
->params
[i
]);
3494 if (data
->onmatch
.synth_event
)
3495 data
->onmatch
.synth_event
->ref
--;
3499 mutex_unlock(&synth_event_mutex
);
3502 static void destroy_field_var(struct field_var
*field_var
)
3507 destroy_hist_field(field_var
->var
, 0);
3508 destroy_hist_field(field_var
->val
, 0);
3513 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
3517 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
3518 destroy_field_var(hist_data
->field_vars
[i
]);
3521 static void save_field_var(struct hist_trigger_data
*hist_data
,
3522 struct field_var
*field_var
)
3524 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
3526 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3527 hist_data
->n_field_var_str
++;
3531 static void destroy_synth_var_refs(struct hist_trigger_data
*hist_data
)
3535 for (i
= 0; i
< hist_data
->n_synth_var_refs
; i
++)
3536 destroy_hist_field(hist_data
->synth_var_refs
[i
], 0);
3539 static void save_synth_var_ref(struct hist_trigger_data
*hist_data
,
3540 struct hist_field
*var_ref
)
3542 hist_data
->synth_var_refs
[hist_data
->n_synth_var_refs
++] = var_ref
;
3544 hist_data
->var_refs
[hist_data
->n_var_refs
] = var_ref
;
3545 var_ref
->var_ref_idx
= hist_data
->n_var_refs
++;
3548 static int check_synth_field(struct synth_event
*event
,
3549 struct hist_field
*hist_field
,
3550 unsigned int field_pos
)
3552 struct synth_field
*field
;
3554 if (field_pos
>= event
->n_fields
)
3557 field
= event
->fields
[field_pos
];
3559 if (strcmp(field
->type
, hist_field
->type
) != 0)
3565 static struct hist_field
*
3566 onmatch_find_var(struct hist_trigger_data
*hist_data
, struct action_data
*data
,
3567 char *system
, char *event
, char *var
)
3569 struct hist_field
*hist_field
;
3571 var
++; /* skip '$' */
3573 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
3576 system
= data
->onmatch
.match_event_system
;
3577 event
= data
->onmatch
.match_event
;
3580 hist_field
= find_event_var(hist_data
, system
, event
, var
);
3584 hist_err_event("onmatch: Couldn't find onmatch param: $", system
, event
, var
);
3589 static struct hist_field
*
3590 onmatch_create_field_var(struct hist_trigger_data
*hist_data
,
3591 struct action_data
*data
, char *system
,
3592 char *event
, char *var
)
3594 struct hist_field
*hist_field
= NULL
;
3595 struct field_var
*field_var
;
3598 * First try to create a field var on the target event (the
3599 * currently being defined). This will create a variable for
3600 * unqualified fields on the target event, or if qualified,
3601 * target fields that have qualified names matching the target.
3603 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
3605 if (field_var
&& !IS_ERR(field_var
)) {
3606 save_field_var(hist_data
, field_var
);
3607 hist_field
= field_var
->var
;
3611 * If no explicit system.event is specfied, default to
3612 * looking for fields on the onmatch(system.event.xxx)
3616 system
= data
->onmatch
.match_event_system
;
3617 event
= data
->onmatch
.match_event
;
3621 * At this point, we're looking at a field on another
3622 * event. Because we can't modify a hist trigger on
3623 * another event to add a variable for a field, we need
3624 * to create a new trigger on that event and create the
3625 * variable at the same time.
3627 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
3628 if (IS_ERR(hist_field
))
3634 destroy_field_var(field_var
);
3639 static int onmatch_create(struct hist_trigger_data
*hist_data
,
3640 struct trace_event_file
*file
,
3641 struct action_data
*data
)
3643 char *event_name
, *param
, *system
= NULL
;
3644 struct hist_field
*hist_field
, *var_ref
;
3645 unsigned int i
, var_ref_idx
;
3646 unsigned int field_pos
= 0;
3647 struct synth_event
*event
;
3650 mutex_lock(&synth_event_mutex
);
3651 event
= find_synth_event(data
->onmatch
.synth_event_name
);
3653 hist_err("onmatch: Couldn't find synthetic event: ", data
->onmatch
.synth_event_name
);
3654 mutex_unlock(&synth_event_mutex
);
3658 mutex_unlock(&synth_event_mutex
);
3660 var_ref_idx
= hist_data
->n_var_refs
;
3662 for (i
= 0; i
< data
->n_params
; i
++) {
3665 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3671 system
= strsep(¶m
, ".");
3673 param
= (char *)system
;
3674 system
= event_name
= NULL
;
3676 event_name
= strsep(¶m
, ".");
3684 if (param
[0] == '$')
3685 hist_field
= onmatch_find_var(hist_data
, data
, system
,
3688 hist_field
= onmatch_create_field_var(hist_data
, data
,
3699 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
3700 var_ref
= create_var_ref(hist_field
, system
, event_name
);
3707 save_synth_var_ref(hist_data
, var_ref
);
3713 hist_err_event("onmatch: Param type doesn't match synthetic event field type: ",
3714 system
, event_name
, param
);
3720 if (field_pos
!= event
->n_fields
) {
3721 hist_err("onmatch: Param count doesn't match synthetic event field count: ", event
->name
);
3726 data
->fn
= action_trace
;
3727 data
->onmatch
.synth_event
= event
;
3728 data
->onmatch
.var_ref_idx
= var_ref_idx
;
3732 mutex_lock(&synth_event_mutex
);
3734 mutex_unlock(&synth_event_mutex
);
3739 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
3741 char *match_event
, *match_event_system
;
3742 char *synth_event_name
, *params
;
3743 struct action_data
*data
;
3746 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3748 return ERR_PTR(-ENOMEM
);
3750 match_event
= strsep(&str
, ")");
3751 if (!match_event
|| !str
) {
3752 hist_err("onmatch: Missing closing paren: ", match_event
);
3756 match_event_system
= strsep(&match_event
, ".");
3758 hist_err("onmatch: Missing subsystem for match event: ", match_event_system
);
3762 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
3763 hist_err_event("onmatch: Invalid subsystem or event name: ",
3764 match_event_system
, match_event
, NULL
);
3768 data
->onmatch
.match_event
= kstrdup(match_event
, GFP_KERNEL
);
3769 if (!data
->onmatch
.match_event
) {
3774 data
->onmatch
.match_event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
3775 if (!data
->onmatch
.match_event_system
) {
3782 hist_err("onmatch: Missing . after onmatch(): ", str
);
3786 synth_event_name
= strsep(&str
, "(");
3787 if (!synth_event_name
|| !str
) {
3788 hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name
);
3792 data
->onmatch
.synth_event_name
= kstrdup(synth_event_name
, GFP_KERNEL
);
3793 if (!data
->onmatch
.synth_event_name
) {
3798 params
= strsep(&str
, ")");
3799 if (!params
|| !str
|| (str
&& strlen(str
))) {
3800 hist_err("onmatch: Missing closing paramlist paren: ", params
);
3804 ret
= parse_action_params(params
, data
);
3810 onmatch_destroy(data
);
3811 data
= ERR_PTR(ret
);
3815 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
3817 hist_data
->fields
[HITCOUNT_IDX
] =
3818 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
3819 if (!hist_data
->fields
[HITCOUNT_IDX
])
3822 hist_data
->n_vals
++;
3823 hist_data
->n_fields
++;
3825 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
3831 static int __create_val_field(struct hist_trigger_data
*hist_data
,
3832 unsigned int val_idx
,
3833 struct trace_event_file
*file
,
3834 char *var_name
, char *field_str
,
3835 unsigned long flags
)
3837 struct hist_field
*hist_field
;
3840 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
3841 if (IS_ERR(hist_field
)) {
3842 ret
= PTR_ERR(hist_field
);
3846 hist_data
->fields
[val_idx
] = hist_field
;
3848 ++hist_data
->n_vals
;
3849 ++hist_data
->n_fields
;
3851 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3857 static int create_val_field(struct hist_trigger_data
*hist_data
,
3858 unsigned int val_idx
,
3859 struct trace_event_file
*file
,
3862 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
3865 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
3868 static int create_var_field(struct hist_trigger_data
*hist_data
,
3869 unsigned int val_idx
,
3870 struct trace_event_file
*file
,
3871 char *var_name
, char *expr_str
)
3873 unsigned long flags
= 0;
3875 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3878 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
3879 hist_err("Variable already defined: ", var_name
);
3883 flags
|= HIST_FIELD_FL_VAR
;
3884 hist_data
->n_vars
++;
3885 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
3888 return __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
3891 static int create_val_fields(struct hist_trigger_data
*hist_data
,
3892 struct trace_event_file
*file
)
3894 char *fields_str
, *field_str
;
3895 unsigned int i
, j
= 1;
3898 ret
= create_hitcount_val(hist_data
);
3902 fields_str
= hist_data
->attrs
->vals_str
;
3906 strsep(&fields_str
, "=");
3910 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
3911 j
< TRACING_MAP_VALS_MAX
; i
++) {
3912 field_str
= strsep(&fields_str
, ",");
3916 if (strcmp(field_str
, "hitcount") == 0)
3919 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
3924 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
3930 static int create_key_field(struct hist_trigger_data
*hist_data
,
3931 unsigned int key_idx
,
3932 unsigned int key_offset
,
3933 struct trace_event_file
*file
,
3936 struct hist_field
*hist_field
= NULL
;
3938 unsigned long flags
= 0;
3939 unsigned int key_size
;
3942 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
3945 flags
|= HIST_FIELD_FL_KEY
;
3947 if (strcmp(field_str
, "stacktrace") == 0) {
3948 flags
|= HIST_FIELD_FL_STACKTRACE
;
3949 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
3950 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
3952 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
3954 if (IS_ERR(hist_field
)) {
3955 ret
= PTR_ERR(hist_field
);
3959 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) {
3960 hist_err("Using variable references as keys not supported: ", field_str
);
3961 destroy_hist_field(hist_field
, 0);
3966 key_size
= hist_field
->size
;
3969 hist_data
->fields
[key_idx
] = hist_field
;
3971 key_size
= ALIGN(key_size
, sizeof(u64
));
3972 hist_data
->fields
[key_idx
]->size
= key_size
;
3973 hist_data
->fields
[key_idx
]->offset
= key_offset
;
3975 hist_data
->key_size
+= key_size
;
3977 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
3982 hist_data
->n_keys
++;
3983 hist_data
->n_fields
++;
3985 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
3993 static int create_key_fields(struct hist_trigger_data
*hist_data
,
3994 struct trace_event_file
*file
)
3996 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
3997 char *fields_str
, *field_str
;
4000 fields_str
= hist_data
->attrs
->keys_str
;
4004 strsep(&fields_str
, "=");
4008 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
4009 field_str
= strsep(&fields_str
, ",");
4012 ret
= create_key_field(hist_data
, i
, key_offset
,
4027 static int create_var_fields(struct hist_trigger_data
*hist_data
,
4028 struct trace_event_file
*file
)
4030 unsigned int i
, j
= hist_data
->n_vals
;
4033 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
4035 for (i
= 0; i
< n_vars
; i
++) {
4036 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
4037 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
4039 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
4047 static void free_var_defs(struct hist_trigger_data
*hist_data
)
4051 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
4052 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
4053 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
4056 hist_data
->attrs
->var_defs
.n_vars
= 0;
4059 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
4061 char *s
, *str
, *var_name
, *field_str
;
4062 unsigned int i
, j
, n_vars
= 0;
4065 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
4066 str
= hist_data
->attrs
->assignment_str
[i
];
4067 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
4068 field_str
= strsep(&str
, ",");
4072 var_name
= strsep(&field_str
, "=");
4073 if (!var_name
|| !field_str
) {
4074 hist_err("Malformed assignment: ", var_name
);
4079 if (n_vars
== TRACING_MAP_VARS_MAX
) {
4080 hist_err("Too many variables defined: ", var_name
);
4085 s
= kstrdup(var_name
, GFP_KERNEL
);
4090 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
4092 s
= kstrdup(field_str
, GFP_KERNEL
);
4094 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
4098 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
4100 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
4106 free_var_defs(hist_data
);
4111 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
4112 struct trace_event_file
*file
)
4116 ret
= parse_var_defs(hist_data
);
4120 ret
= create_val_fields(hist_data
, file
);
4124 ret
= create_var_fields(hist_data
, file
);
4128 ret
= create_key_fields(hist_data
, file
);
4132 free_var_defs(hist_data
);
4137 static int is_descending(const char *str
)
4142 if (strcmp(str
, "descending") == 0)
4145 if (strcmp(str
, "ascending") == 0)
4151 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
4153 char *fields_str
= hist_data
->attrs
->sort_key_str
;
4154 struct tracing_map_sort_key
*sort_key
;
4155 int descending
, ret
= 0;
4156 unsigned int i
, j
, k
;
4158 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
4163 strsep(&fields_str
, "=");
4169 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
4170 struct hist_field
*hist_field
;
4171 char *field_str
, *field_name
;
4172 const char *test_name
;
4174 sort_key
= &hist_data
->sort_keys
[i
];
4176 field_str
= strsep(&fields_str
, ",");
4183 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
4188 field_name
= strsep(&field_str
, ".");
4194 if (strcmp(field_name
, "hitcount") == 0) {
4195 descending
= is_descending(field_str
);
4196 if (descending
< 0) {
4200 sort_key
->descending
= descending
;
4204 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4207 hist_field
= hist_data
->fields
[j
];
4208 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4213 test_name
= hist_field_name(hist_field
, 0);
4215 if (strcmp(field_name
, test_name
) == 0) {
4216 sort_key
->field_idx
= idx
;
4217 descending
= is_descending(field_str
);
4218 if (descending
< 0) {
4222 sort_key
->descending
= descending
;
4226 if (j
== hist_data
->n_fields
) {
4232 hist_data
->n_sort_keys
= i
;
4237 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4241 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4242 struct action_data
*data
= hist_data
->actions
[i
];
4244 if (data
->fn
== action_trace
)
4245 onmatch_destroy(data
);
4246 else if (data
->fn
== onmax_save
)
4247 onmax_destroy(data
);
4253 static int parse_actions(struct hist_trigger_data
*hist_data
)
4255 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4256 struct action_data
*data
;
4261 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4262 str
= hist_data
->attrs
->action_str
[i
];
4264 if (strncmp(str
, "onmatch(", strlen("onmatch(")) == 0) {
4265 char *action_str
= str
+ strlen("onmatch(");
4267 data
= onmatch_parse(tr
, action_str
);
4269 ret
= PTR_ERR(data
);
4272 data
->fn
= action_trace
;
4273 } else if (strncmp(str
, "onmax(", strlen("onmax(")) == 0) {
4274 char *action_str
= str
+ strlen("onmax(");
4276 data
= onmax_parse(action_str
);
4278 ret
= PTR_ERR(data
);
4281 data
->fn
= onmax_save
;
4287 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4293 static int create_actions(struct hist_trigger_data
*hist_data
,
4294 struct trace_event_file
*file
)
4296 struct action_data
*data
;
4300 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4301 data
= hist_data
->actions
[i
];
4303 if (data
->fn
== action_trace
) {
4304 ret
= onmatch_create(hist_data
, file
, data
);
4307 } else if (data
->fn
== onmax_save
) {
4308 ret
= onmax_create(hist_data
, data
);
4317 static void print_actions(struct seq_file
*m
,
4318 struct hist_trigger_data
*hist_data
,
4319 struct tracing_map_elt
*elt
)
4323 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4324 struct action_data
*data
= hist_data
->actions
[i
];
4326 if (data
->fn
== onmax_save
)
4327 onmax_print(m
, hist_data
, elt
, data
);
4331 static void print_onmax_spec(struct seq_file
*m
,
4332 struct hist_trigger_data
*hist_data
,
4333 struct action_data
*data
)
4337 seq_puts(m
, ":onmax(");
4338 seq_printf(m
, "%s", data
->onmax
.var_str
);
4339 seq_printf(m
, ").%s(", data
->onmax
.fn_name
);
4341 for (i
= 0; i
< hist_data
->n_max_vars
; i
++) {
4342 seq_printf(m
, "%s", hist_data
->max_vars
[i
]->var
->var
.name
);
4343 if (i
< hist_data
->n_max_vars
- 1)
4349 static void print_onmatch_spec(struct seq_file
*m
,
4350 struct hist_trigger_data
*hist_data
,
4351 struct action_data
*data
)
4355 seq_printf(m
, ":onmatch(%s.%s).", data
->onmatch
.match_event_system
,
4356 data
->onmatch
.match_event
);
4358 seq_printf(m
, "%s(", data
->onmatch
.synth_event
->name
);
4360 for (i
= 0; i
< data
->n_params
; i
++) {
4363 seq_printf(m
, "%s", data
->params
[i
]);
4369 static bool actions_match(struct hist_trigger_data
*hist_data
,
4370 struct hist_trigger_data
*hist_data_test
)
4374 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4377 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4378 struct action_data
*data
= hist_data
->actions
[i
];
4379 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4381 if (data
->fn
!= data_test
->fn
)
4384 if (data
->n_params
!= data_test
->n_params
)
4387 for (j
= 0; j
< data
->n_params
; j
++) {
4388 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4392 if (data
->fn
== action_trace
) {
4393 if (strcmp(data
->onmatch
.synth_event_name
,
4394 data_test
->onmatch
.synth_event_name
) != 0)
4396 if (strcmp(data
->onmatch
.match_event_system
,
4397 data_test
->onmatch
.match_event_system
) != 0)
4399 if (strcmp(data
->onmatch
.match_event
,
4400 data_test
->onmatch
.match_event
) != 0)
4402 } else if (data
->fn
== onmax_save
) {
4403 if (strcmp(data
->onmax
.var_str
,
4404 data_test
->onmax
.var_str
) != 0)
4406 if (strcmp(data
->onmax
.fn_name
,
4407 data_test
->onmax
.fn_name
) != 0)
4416 static void print_actions_spec(struct seq_file
*m
,
4417 struct hist_trigger_data
*hist_data
)
4421 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4422 struct action_data
*data
= hist_data
->actions
[i
];
4424 if (data
->fn
== action_trace
)
4425 print_onmatch_spec(m
, hist_data
, data
);
4426 else if (data
->fn
== onmax_save
)
4427 print_onmax_spec(m
, hist_data
, data
);
4431 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
4435 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
4436 kfree(hist_data
->field_var_hists
[i
]->cmd
);
4437 kfree(hist_data
->field_var_hists
[i
]);
4441 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
4446 destroy_hist_trigger_attrs(hist_data
->attrs
);
4447 destroy_hist_fields(hist_data
);
4448 tracing_map_destroy(hist_data
->map
);
4450 destroy_actions(hist_data
);
4451 destroy_field_vars(hist_data
);
4452 destroy_field_var_hists(hist_data
);
4453 destroy_synth_var_refs(hist_data
);
4458 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
4460 struct tracing_map
*map
= hist_data
->map
;
4461 struct ftrace_event_field
*field
;
4462 struct hist_field
*hist_field
;
4465 for_each_hist_field(i
, hist_data
) {
4466 hist_field
= hist_data
->fields
[i
];
4467 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
4468 tracing_map_cmp_fn_t cmp_fn
;
4470 field
= hist_field
->field
;
4472 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
4473 cmp_fn
= tracing_map_cmp_none
;
4475 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
4476 hist_field
->is_signed
);
4477 else if (is_string_field(field
))
4478 cmp_fn
= tracing_map_cmp_string
;
4480 cmp_fn
= tracing_map_cmp_num(field
->size
,
4482 idx
= tracing_map_add_key_field(map
,
4485 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
4486 idx
= tracing_map_add_sum_field(map
);
4491 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4492 idx
= tracing_map_add_var(map
);
4495 hist_field
->var
.idx
= idx
;
4496 hist_field
->var
.hist_data
= hist_data
;
4503 static struct hist_trigger_data
*
4504 create_hist_data(unsigned int map_bits
,
4505 struct hist_trigger_attrs
*attrs
,
4506 struct trace_event_file
*file
,
4509 const struct tracing_map_ops
*map_ops
= NULL
;
4510 struct hist_trigger_data
*hist_data
;
4513 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
4515 return ERR_PTR(-ENOMEM
);
4517 hist_data
->attrs
= attrs
;
4518 hist_data
->remove
= remove
;
4519 hist_data
->event_file
= file
;
4521 ret
= parse_actions(hist_data
);
4525 ret
= create_hist_fields(hist_data
, file
);
4529 ret
= create_sort_keys(hist_data
);
4533 map_ops
= &hist_trigger_elt_data_ops
;
4535 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
4536 map_ops
, hist_data
);
4537 if (IS_ERR(hist_data
->map
)) {
4538 ret
= PTR_ERR(hist_data
->map
);
4539 hist_data
->map
= NULL
;
4543 ret
= create_tracing_map_fields(hist_data
);
4549 hist_data
->attrs
= NULL
;
4551 destroy_hist_data(hist_data
);
4553 hist_data
= ERR_PTR(ret
);
4558 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
4559 struct tracing_map_elt
*elt
, void *rec
,
4560 struct ring_buffer_event
*rbe
,
4563 struct hist_elt_data
*elt_data
;
4564 struct hist_field
*hist_field
;
4565 unsigned int i
, var_idx
;
4568 elt_data
= elt
->private_data
;
4569 elt_data
->var_ref_vals
= var_ref_vals
;
4571 for_each_hist_val_field(i
, hist_data
) {
4572 hist_field
= hist_data
->fields
[i
];
4573 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
4574 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4575 var_idx
= hist_field
->var
.idx
;
4576 tracing_map_set_var(elt
, var_idx
, hist_val
);
4579 tracing_map_update_sum(elt
, i
, hist_val
);
4582 for_each_hist_key_field(i
, hist_data
) {
4583 hist_field
= hist_data
->fields
[i
];
4584 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4585 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
4586 var_idx
= hist_field
->var
.idx
;
4587 tracing_map_set_var(elt
, var_idx
, hist_val
);
4591 update_field_vars(hist_data
, elt
, rbe
, rec
);
4594 static inline void add_to_key(char *compound_key
, void *key
,
4595 struct hist_field
*key_field
, void *rec
)
4597 size_t size
= key_field
->size
;
4599 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4600 struct ftrace_event_field
*field
;
4602 field
= key_field
->field
;
4603 if (field
->filter_type
== FILTER_DYN_STRING
)
4604 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
4605 else if (field
->filter_type
== FILTER_PTR_STRING
)
4607 else if (field
->filter_type
== FILTER_STATIC_STRING
)
4610 /* ensure NULL-termination */
4611 if (size
> key_field
->size
- 1)
4612 size
= key_field
->size
- 1;
4615 memcpy(compound_key
+ key_field
->offset
, key
, size
);
4619 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
4620 struct tracing_map_elt
*elt
, void *rec
,
4621 struct ring_buffer_event
*rbe
, u64
*var_ref_vals
)
4623 struct action_data
*data
;
4626 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4627 data
= hist_data
->actions
[i
];
4628 data
->fn(hist_data
, elt
, rec
, rbe
, data
, var_ref_vals
);
4632 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
,
4633 struct ring_buffer_event
*rbe
)
4635 struct hist_trigger_data
*hist_data
= data
->private_data
;
4636 bool use_compound_key
= (hist_data
->n_keys
> 1);
4637 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
4638 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
4639 char compound_key
[HIST_KEY_SIZE_MAX
];
4640 struct tracing_map_elt
*elt
= NULL
;
4641 struct stack_trace stacktrace
;
4642 struct hist_field
*key_field
;
4647 memset(compound_key
, 0, hist_data
->key_size
);
4649 for_each_hist_key_field(i
, hist_data
) {
4650 key_field
= hist_data
->fields
[i
];
4652 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4653 stacktrace
.max_entries
= HIST_STACKTRACE_DEPTH
;
4654 stacktrace
.entries
= entries
;
4655 stacktrace
.nr_entries
= 0;
4656 stacktrace
.skip
= HIST_STACKTRACE_SKIP
;
4658 memset(stacktrace
.entries
, 0, HIST_STACKTRACE_SIZE
);
4659 save_stack_trace(&stacktrace
);
4663 field_contents
= key_field
->fn(key_field
, elt
, rbe
, rec
);
4664 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4665 key
= (void *)(unsigned long)field_contents
;
4666 use_compound_key
= true;
4668 key
= (void *)&field_contents
;
4671 if (use_compound_key
)
4672 add_to_key(compound_key
, key
, key_field
, rec
);
4675 if (use_compound_key
)
4678 if (hist_data
->n_var_refs
&&
4679 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
4682 elt
= tracing_map_insert(hist_data
->map
, key
);
4686 hist_trigger_elt_update(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
4688 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
4689 hist_trigger_actions(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
4692 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
4693 unsigned long *stacktrace_entries
,
4694 unsigned int max_entries
)
4696 char str
[KSYM_SYMBOL_LEN
];
4697 unsigned int spaces
= 8;
4700 for (i
= 0; i
< max_entries
; i
++) {
4701 if (stacktrace_entries
[i
] == ULONG_MAX
)
4704 seq_printf(m
, "%*c", 1 + spaces
, ' ');
4705 sprint_symbol(str
, stacktrace_entries
[i
]);
4706 seq_printf(m
, "%s\n", str
);
4711 hist_trigger_entry_print(struct seq_file
*m
,
4712 struct hist_trigger_data
*hist_data
, void *key
,
4713 struct tracing_map_elt
*elt
)
4715 struct hist_field
*key_field
;
4716 char str
[KSYM_SYMBOL_LEN
];
4717 bool multiline
= false;
4718 const char *field_name
;
4724 for_each_hist_key_field(i
, hist_data
) {
4725 key_field
= hist_data
->fields
[i
];
4727 if (i
> hist_data
->n_vals
)
4730 field_name
= hist_field_name(key_field
, 0);
4732 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
4733 uval
= *(u64
*)(key
+ key_field
->offset
);
4734 seq_printf(m
, "%s: %llx", field_name
, uval
);
4735 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
4736 uval
= *(u64
*)(key
+ key_field
->offset
);
4737 sprint_symbol_no_offset(str
, uval
);
4738 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
4740 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
4741 uval
= *(u64
*)(key
+ key_field
->offset
);
4742 sprint_symbol(str
, uval
);
4743 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
4745 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
4746 struct hist_elt_data
*elt_data
= elt
->private_data
;
4749 if (WARN_ON_ONCE(!elt_data
))
4752 comm
= elt_data
->comm
;
4754 uval
= *(u64
*)(key
+ key_field
->offset
);
4755 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
4757 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
4758 const char *syscall_name
;
4760 uval
= *(u64
*)(key
+ key_field
->offset
);
4761 syscall_name
= get_syscall_name(uval
);
4763 syscall_name
= "unknown_syscall";
4765 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
4766 syscall_name
, uval
);
4767 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4768 seq_puts(m
, "stacktrace:\n");
4769 hist_trigger_stacktrace_print(m
,
4770 key
+ key_field
->offset
,
4771 HIST_STACKTRACE_DEPTH
);
4773 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
4774 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
4775 *(u64
*)(key
+ key_field
->offset
));
4776 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4777 seq_printf(m
, "%s: %-50s", field_name
,
4778 (char *)(key
+ key_field
->offset
));
4780 uval
= *(u64
*)(key
+ key_field
->offset
);
4781 seq_printf(m
, "%s: %10llu", field_name
, uval
);
4790 seq_printf(m
, " hitcount: %10llu",
4791 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
4793 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
4794 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
4796 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
4797 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
4800 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
4801 seq_printf(m
, " %s: %10llx", field_name
,
4802 tracing_map_read_sum(elt
, i
));
4804 seq_printf(m
, " %s: %10llu", field_name
,
4805 tracing_map_read_sum(elt
, i
));
4809 print_actions(m
, hist_data
, elt
);
4814 static int print_entries(struct seq_file
*m
,
4815 struct hist_trigger_data
*hist_data
)
4817 struct tracing_map_sort_entry
**sort_entries
= NULL
;
4818 struct tracing_map
*map
= hist_data
->map
;
4821 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
4822 hist_data
->n_sort_keys
,
4827 for (i
= 0; i
< n_entries
; i
++)
4828 hist_trigger_entry_print(m
, hist_data
,
4829 sort_entries
[i
]->key
,
4830 sort_entries
[i
]->elt
);
4832 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
4837 static void hist_trigger_show(struct seq_file
*m
,
4838 struct event_trigger_data
*data
, int n
)
4840 struct hist_trigger_data
*hist_data
;
4844 seq_puts(m
, "\n\n");
4846 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
4847 data
->ops
->print(m
, data
->ops
, data
);
4848 seq_puts(m
, "#\n\n");
4850 hist_data
= data
->private_data
;
4851 n_entries
= print_entries(m
, hist_data
);
4855 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
4856 (u64
)atomic64_read(&hist_data
->map
->hits
),
4857 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
4860 static int hist_show(struct seq_file
*m
, void *v
)
4862 struct event_trigger_data
*data
;
4863 struct trace_event_file
*event_file
;
4866 mutex_lock(&event_mutex
);
4868 event_file
= event_file_data(m
->private);
4869 if (unlikely(!event_file
)) {
4874 list_for_each_entry_rcu(data
, &event_file
->triggers
, list
) {
4875 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
4876 hist_trigger_show(m
, data
, n
++);
4879 if (have_hist_err()) {
4880 seq_printf(m
, "\nERROR: %s\n", hist_err_str
);
4881 seq_printf(m
, " Last command: %s\n", last_hist_cmd
);
4885 mutex_unlock(&event_mutex
);
4890 static int event_hist_open(struct inode
*inode
, struct file
*file
)
4892 return single_open(file
, hist_show
, file
);
4895 const struct file_operations event_hist_fops
= {
4896 .open
= event_hist_open
,
4898 .llseek
= seq_lseek
,
4899 .release
= single_release
,
4902 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
4904 const char *field_name
= hist_field_name(hist_field
, 0);
4906 if (hist_field
->var
.name
)
4907 seq_printf(m
, "%s=", hist_field
->var
.name
);
4909 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
4911 else if (field_name
) {
4912 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
4913 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
4915 seq_printf(m
, "%s", field_name
);
4916 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
4917 seq_puts(m
, "common_timestamp");
4919 if (hist_field
->flags
) {
4920 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
4921 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
)) {
4922 const char *flags
= get_hist_field_flags(hist_field
);
4925 seq_printf(m
, ".%s", flags
);
4930 static int event_hist_trigger_print(struct seq_file
*m
,
4931 struct event_trigger_ops
*ops
,
4932 struct event_trigger_data
*data
)
4934 struct hist_trigger_data
*hist_data
= data
->private_data
;
4935 struct hist_field
*field
;
4936 bool have_var
= false;
4939 seq_puts(m
, "hist:");
4942 seq_printf(m
, "%s:", data
->name
);
4944 seq_puts(m
, "keys=");
4946 for_each_hist_key_field(i
, hist_data
) {
4947 field
= hist_data
->fields
[i
];
4949 if (i
> hist_data
->n_vals
)
4952 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
4953 seq_puts(m
, "stacktrace");
4955 hist_field_print(m
, field
);
4958 seq_puts(m
, ":vals=");
4960 for_each_hist_val_field(i
, hist_data
) {
4961 field
= hist_data
->fields
[i
];
4962 if (field
->flags
& HIST_FIELD_FL_VAR
) {
4967 if (i
== HITCOUNT_IDX
)
4968 seq_puts(m
, "hitcount");
4971 hist_field_print(m
, field
);
4980 for_each_hist_val_field(i
, hist_data
) {
4981 field
= hist_data
->fields
[i
];
4983 if (field
->flags
& HIST_FIELD_FL_VAR
) {
4986 hist_field_print(m
, field
);
4991 seq_puts(m
, ":sort=");
4993 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
4994 struct tracing_map_sort_key
*sort_key
;
4995 unsigned int idx
, first_key_idx
;
4998 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
5000 sort_key
= &hist_data
->sort_keys
[i
];
5001 idx
= sort_key
->field_idx
;
5003 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
5009 if (idx
== HITCOUNT_IDX
)
5010 seq_puts(m
, "hitcount");
5012 if (idx
>= first_key_idx
)
5013 idx
+= hist_data
->n_vars
;
5014 hist_field_print(m
, hist_data
->fields
[idx
]);
5017 if (sort_key
->descending
)
5018 seq_puts(m
, ".descending");
5020 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
5021 if (hist_data
->enable_timestamps
)
5022 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
5024 print_actions_spec(m
, hist_data
);
5026 if (data
->filter_str
)
5027 seq_printf(m
, " if %s", data
->filter_str
);
5030 seq_puts(m
, " [paused]");
5032 seq_puts(m
, " [active]");
5039 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
5040 struct event_trigger_data
*data
)
5042 struct hist_trigger_data
*hist_data
= data
->private_data
;
5044 if (!data
->ref
&& hist_data
->attrs
->name
)
5045 save_named_trigger(hist_data
->attrs
->name
, data
);
5052 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
5054 struct trace_event_file
*file
;
5059 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5060 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
5061 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
5062 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
5063 "!hist", "hist", cmd
);
5067 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
5068 struct event_trigger_data
*data
)
5070 struct hist_trigger_data
*hist_data
= data
->private_data
;
5072 if (WARN_ON_ONCE(data
->ref
<= 0))
5078 del_named_trigger(data
);
5080 trigger_data_free(data
);
5082 remove_hist_vars(hist_data
);
5084 unregister_field_var_hists(hist_data
);
5086 destroy_hist_data(hist_data
);
5090 static struct event_trigger_ops event_hist_trigger_ops
= {
5091 .func
= event_hist_trigger
,
5092 .print
= event_hist_trigger_print
,
5093 .init
= event_hist_trigger_init
,
5094 .free
= event_hist_trigger_free
,
5097 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
5098 struct event_trigger_data
*data
)
5102 save_named_trigger(data
->named_data
->name
, data
);
5104 event_hist_trigger_init(ops
, data
->named_data
);
5109 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
5110 struct event_trigger_data
*data
)
5112 if (WARN_ON_ONCE(data
->ref
<= 0))
5115 event_hist_trigger_free(ops
, data
->named_data
);
5119 del_named_trigger(data
);
5120 trigger_data_free(data
);
5124 static struct event_trigger_ops event_hist_trigger_named_ops
= {
5125 .func
= event_hist_trigger
,
5126 .print
= event_hist_trigger_print
,
5127 .init
= event_hist_trigger_named_init
,
5128 .free
= event_hist_trigger_named_free
,
5131 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
5134 return &event_hist_trigger_ops
;
5137 static void hist_clear(struct event_trigger_data
*data
)
5139 struct hist_trigger_data
*hist_data
= data
->private_data
;
5142 pause_named_trigger(data
);
5144 synchronize_sched();
5146 tracing_map_clear(hist_data
->map
);
5149 unpause_named_trigger(data
);
5152 static bool compatible_field(struct ftrace_event_field
*field
,
5153 struct ftrace_event_field
*test_field
)
5155 if (field
== test_field
)
5157 if (field
== NULL
|| test_field
== NULL
)
5159 if (strcmp(field
->name
, test_field
->name
) != 0)
5161 if (strcmp(field
->type
, test_field
->type
) != 0)
5163 if (field
->size
!= test_field
->size
)
5165 if (field
->is_signed
!= test_field
->is_signed
)
5171 static bool hist_trigger_match(struct event_trigger_data
*data
,
5172 struct event_trigger_data
*data_test
,
5173 struct event_trigger_data
*named_data
,
5176 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
5177 struct hist_trigger_data
*hist_data
, *hist_data_test
;
5178 struct hist_field
*key_field
, *key_field_test
;
5181 if (named_data
&& (named_data
!= data_test
) &&
5182 (named_data
!= data_test
->named_data
))
5185 if (!named_data
&& is_named_trigger(data_test
))
5188 hist_data
= data
->private_data
;
5189 hist_data_test
= data_test
->private_data
;
5191 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
5192 hist_data
->n_fields
!= hist_data_test
->n_fields
||
5193 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
5196 if (!ignore_filter
) {
5197 if ((data
->filter_str
&& !data_test
->filter_str
) ||
5198 (!data
->filter_str
&& data_test
->filter_str
))
5202 for_each_hist_field(i
, hist_data
) {
5203 key_field
= hist_data
->fields
[i
];
5204 key_field_test
= hist_data_test
->fields
[i
];
5206 if (key_field
->flags
!= key_field_test
->flags
)
5208 if (!compatible_field(key_field
->field
, key_field_test
->field
))
5210 if (key_field
->offset
!= key_field_test
->offset
)
5212 if (key_field
->size
!= key_field_test
->size
)
5214 if (key_field
->is_signed
!= key_field_test
->is_signed
)
5216 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
5218 if (key_field
->var
.name
&&
5219 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
5223 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5224 sort_key
= &hist_data
->sort_keys
[i
];
5225 sort_key_test
= &hist_data_test
->sort_keys
[i
];
5227 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
5228 sort_key
->descending
!= sort_key_test
->descending
)
5232 if (!ignore_filter
&& data
->filter_str
&&
5233 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
5236 if (!actions_match(hist_data
, hist_data_test
))
5242 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
5243 struct event_trigger_data
*data
,
5244 struct trace_event_file
*file
)
5246 struct hist_trigger_data
*hist_data
= data
->private_data
;
5247 struct event_trigger_data
*test
, *named_data
= NULL
;
5250 if (hist_data
->attrs
->name
) {
5251 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5253 if (!hist_trigger_match(data
, named_data
, named_data
,
5255 hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data
->attrs
->name
);
5262 if (hist_data
->attrs
->name
&& !named_data
)
5265 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5266 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5267 if (!hist_trigger_match(data
, test
, named_data
, false))
5269 if (hist_data
->attrs
->pause
)
5270 test
->paused
= true;
5271 else if (hist_data
->attrs
->cont
)
5272 test
->paused
= false;
5273 else if (hist_data
->attrs
->clear
)
5276 hist_err("Hist trigger already exists", NULL
);
5283 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
5284 hist_err("Can't clear or continue a nonexistent hist trigger", NULL
);
5289 if (hist_data
->attrs
->pause
)
5290 data
->paused
= true;
5293 data
->private_data
= named_data
->private_data
;
5294 set_named_trigger_data(data
, named_data
);
5295 data
->ops
= &event_hist_trigger_named_ops
;
5298 if (data
->ops
->init
) {
5299 ret
= data
->ops
->init(data
->ops
, data
);
5304 if (hist_data
->enable_timestamps
) {
5305 char *clock
= hist_data
->attrs
->clock
;
5307 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
5309 hist_err("Couldn't set trace_clock: ", clock
);
5313 tracing_set_time_stamp_abs(file
->tr
, true);
5317 destroy_hist_data(hist_data
);
5324 static int hist_trigger_enable(struct event_trigger_data
*data
,
5325 struct trace_event_file
*file
)
5329 list_add_tail_rcu(&data
->list
, &file
->triggers
);
5331 update_cond_flag(file
);
5333 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
5334 list_del_rcu(&data
->list
);
5335 update_cond_flag(file
);
5342 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
5343 struct trace_event_file
*file
)
5345 struct hist_trigger_data
*hist_data
= data
->private_data
;
5346 struct event_trigger_data
*test
, *named_data
= NULL
;
5349 if (hist_data
->attrs
->name
)
5350 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5352 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5353 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5354 if (hist_trigger_match(data
, test
, named_data
, false)) {
5364 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
5365 struct trace_event_file
*file
)
5367 struct hist_trigger_data
*hist_data
= data
->private_data
;
5368 struct event_trigger_data
*test
, *named_data
= NULL
;
5370 if (hist_data
->attrs
->name
)
5371 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5373 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5374 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5375 if (!hist_trigger_match(data
, test
, named_data
, false))
5377 hist_data
= test
->private_data
;
5378 if (check_var_refs(hist_data
))
5387 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
5388 struct event_trigger_data
*data
,
5389 struct trace_event_file
*file
)
5391 struct hist_trigger_data
*hist_data
= data
->private_data
;
5392 struct event_trigger_data
*test
, *named_data
= NULL
;
5393 bool unregistered
= false;
5395 if (hist_data
->attrs
->name
)
5396 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5398 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5399 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5400 if (!hist_trigger_match(data
, test
, named_data
, false))
5402 unregistered
= true;
5403 list_del_rcu(&test
->list
);
5404 trace_event_trigger_enable_disable(file
, 0);
5405 update_cond_flag(file
);
5410 if (unregistered
&& test
->ops
->free
)
5411 test
->ops
->free(test
->ops
, test
);
5413 if (hist_data
->enable_timestamps
) {
5414 if (!hist_data
->remove
|| unregistered
)
5415 tracing_set_time_stamp_abs(file
->tr
, false);
5419 static bool hist_file_check_refs(struct trace_event_file
*file
)
5421 struct hist_trigger_data
*hist_data
;
5422 struct event_trigger_data
*test
;
5424 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5425 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5426 hist_data
= test
->private_data
;
5427 if (check_var_refs(hist_data
))
5435 static void hist_unreg_all(struct trace_event_file
*file
)
5437 struct event_trigger_data
*test
, *n
;
5438 struct hist_trigger_data
*hist_data
;
5439 struct synth_event
*se
;
5440 const char *se_name
;
5442 if (hist_file_check_refs(file
))
5445 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
5446 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5447 hist_data
= test
->private_data
;
5448 list_del_rcu(&test
->list
);
5449 trace_event_trigger_enable_disable(file
, 0);
5451 mutex_lock(&synth_event_mutex
);
5452 se_name
= trace_event_name(file
->event_call
);
5453 se
= find_synth_event(se_name
);
5456 mutex_unlock(&synth_event_mutex
);
5458 update_cond_flag(file
);
5459 if (hist_data
->enable_timestamps
)
5460 tracing_set_time_stamp_abs(file
->tr
, false);
5461 if (test
->ops
->free
)
5462 test
->ops
->free(test
->ops
, test
);
5467 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
5468 struct trace_event_file
*file
,
5469 char *glob
, char *cmd
, char *param
)
5471 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
5472 struct event_trigger_data
*trigger_data
;
5473 struct hist_trigger_attrs
*attrs
;
5474 struct event_trigger_ops
*trigger_ops
;
5475 struct hist_trigger_data
*hist_data
;
5476 struct synth_event
*se
;
5477 const char *se_name
;
5478 bool remove
= false;
5482 if (glob
&& strlen(glob
)) {
5483 last_cmd_set(param
);
5494 * separate the trigger from the filter (k:v [if filter])
5495 * allowing for whitespace in the trigger
5497 p
= trigger
= param
;
5499 p
= strstr(p
, "if");
5504 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
5508 if (p
>= param
+ strlen(param
) - strlen("if") - 1)
5510 if (*(p
+ strlen("if")) != ' ' && *(p
+ strlen("if")) != '\t') {
5521 param
= strstrip(p
);
5522 trigger
= strstrip(trigger
);
5525 attrs
= parse_hist_trigger_attrs(trigger
);
5527 return PTR_ERR(attrs
);
5529 if (attrs
->map_bits
)
5530 hist_trigger_bits
= attrs
->map_bits
;
5532 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
5533 if (IS_ERR(hist_data
)) {
5534 destroy_hist_trigger_attrs(attrs
);
5535 return PTR_ERR(hist_data
);
5538 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
5540 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
5541 if (!trigger_data
) {
5546 trigger_data
->count
= -1;
5547 trigger_data
->ops
= trigger_ops
;
5548 trigger_data
->cmd_ops
= cmd_ops
;
5550 INIT_LIST_HEAD(&trigger_data
->list
);
5551 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
5553 trigger_data
->private_data
= hist_data
;
5555 /* if param is non-empty, it's supposed to be a filter */
5556 if (param
&& cmd_ops
->set_filter
) {
5557 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
5563 if (!have_hist_trigger_match(trigger_data
, file
))
5566 if (hist_trigger_check_refs(trigger_data
, file
)) {
5571 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5573 mutex_lock(&synth_event_mutex
);
5574 se_name
= trace_event_name(file
->event_call
);
5575 se
= find_synth_event(se_name
);
5578 mutex_unlock(&synth_event_mutex
);
5584 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
5586 * The above returns on success the # of triggers registered,
5587 * but if it didn't register any it returns zero. Consider no
5588 * triggers registered a failure too.
5591 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
5597 if (get_named_trigger_data(trigger_data
))
5600 if (has_hist_vars(hist_data
))
5601 save_hist_vars(hist_data
);
5603 ret
= create_actions(hist_data
, file
);
5607 ret
= tracing_map_init(hist_data
->map
);
5611 ret
= hist_trigger_enable(trigger_data
, file
);
5615 mutex_lock(&synth_event_mutex
);
5616 se_name
= trace_event_name(file
->event_call
);
5617 se
= find_synth_event(se_name
);
5620 mutex_unlock(&synth_event_mutex
);
5622 /* Just return zero, not the number of registered triggers */
5630 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5632 if (cmd_ops
->set_filter
)
5633 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
5635 remove_hist_vars(hist_data
);
5637 kfree(trigger_data
);
5639 destroy_hist_data(hist_data
);
5643 static struct event_command trigger_hist_cmd
= {
5645 .trigger_type
= ETT_EVENT_HIST
,
5646 .flags
= EVENT_CMD_FL_NEEDS_REC
,
5647 .func
= event_hist_trigger_func
,
5648 .reg
= hist_register_trigger
,
5649 .unreg
= hist_unregister_trigger
,
5650 .unreg_all
= hist_unreg_all
,
5651 .get_trigger_ops
= event_hist_get_trigger_ops
,
5652 .set_filter
= set_trigger_filter
,
5655 __init
int register_trigger_hist_cmd(void)
5659 ret
= register_event_command(&trigger_hist_cmd
);
5666 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
,
5667 struct ring_buffer_event
*event
)
5669 struct enable_trigger_data
*enable_data
= data
->private_data
;
5670 struct event_trigger_data
*test
;
5672 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
) {
5673 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5674 if (enable_data
->enable
)
5675 test
->paused
= false;
5677 test
->paused
= true;
5683 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
5684 struct ring_buffer_event
*event
)
5689 if (data
->count
!= -1)
5692 hist_enable_trigger(data
, rec
, event
);
5695 static struct event_trigger_ops hist_enable_trigger_ops
= {
5696 .func
= hist_enable_trigger
,
5697 .print
= event_enable_trigger_print
,
5698 .init
= event_trigger_init
,
5699 .free
= event_enable_trigger_free
,
5702 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
5703 .func
= hist_enable_count_trigger
,
5704 .print
= event_enable_trigger_print
,
5705 .init
= event_trigger_init
,
5706 .free
= event_enable_trigger_free
,
5709 static struct event_trigger_ops hist_disable_trigger_ops
= {
5710 .func
= hist_enable_trigger
,
5711 .print
= event_enable_trigger_print
,
5712 .init
= event_trigger_init
,
5713 .free
= event_enable_trigger_free
,
5716 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
5717 .func
= hist_enable_count_trigger
,
5718 .print
= event_enable_trigger_print
,
5719 .init
= event_trigger_init
,
5720 .free
= event_enable_trigger_free
,
5723 static struct event_trigger_ops
*
5724 hist_enable_get_trigger_ops(char *cmd
, char *param
)
5726 struct event_trigger_ops
*ops
;
5729 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
5732 ops
= param
? &hist_enable_count_trigger_ops
:
5733 &hist_enable_trigger_ops
;
5735 ops
= param
? &hist_disable_count_trigger_ops
:
5736 &hist_disable_trigger_ops
;
5741 static void hist_enable_unreg_all(struct trace_event_file
*file
)
5743 struct event_trigger_data
*test
, *n
;
5745 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
5746 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
5747 list_del_rcu(&test
->list
);
5748 update_cond_flag(file
);
5749 trace_event_trigger_enable_disable(file
, 0);
5750 if (test
->ops
->free
)
5751 test
->ops
->free(test
->ops
, test
);
5756 static struct event_command trigger_hist_enable_cmd
= {
5757 .name
= ENABLE_HIST_STR
,
5758 .trigger_type
= ETT_HIST_ENABLE
,
5759 .func
= event_enable_trigger_func
,
5760 .reg
= event_enable_register_trigger
,
5761 .unreg
= event_enable_unregister_trigger
,
5762 .unreg_all
= hist_enable_unreg_all
,
5763 .get_trigger_ops
= hist_enable_get_trigger_ops
,
5764 .set_filter
= set_trigger_filter
,
5767 static struct event_command trigger_hist_disable_cmd
= {
5768 .name
= DISABLE_HIST_STR
,
5769 .trigger_type
= ETT_HIST_ENABLE
,
5770 .func
= event_enable_trigger_func
,
5771 .reg
= event_enable_register_trigger
,
5772 .unreg
= event_enable_unregister_trigger
,
5773 .unreg_all
= hist_enable_unreg_all
,
5774 .get_trigger_ops
= hist_enable_get_trigger_ops
,
5775 .set_filter
= set_trigger_filter
,
5778 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
5780 unregister_event_command(&trigger_hist_enable_cmd
);
5781 unregister_event_command(&trigger_hist_disable_cmd
);
5784 __init
int register_trigger_hist_enable_disable_cmds(void)
5788 ret
= register_event_command(&trigger_hist_enable_cmd
);
5789 if (WARN_ON(ret
< 0))
5791 ret
= register_event_command(&trigger_hist_disable_cmd
);
5792 if (WARN_ON(ret
< 0))
5793 unregister_trigger_hist_enable_disable_cmds();
5798 static __init
int trace_events_hist_init(void)
5800 struct dentry
*entry
= NULL
;
5801 struct dentry
*d_tracer
;
5804 d_tracer
= tracing_init_dentry();
5805 if (IS_ERR(d_tracer
)) {
5806 err
= PTR_ERR(d_tracer
);
5810 entry
= tracefs_create_file("synthetic_events", 0644, d_tracer
,
5811 NULL
, &synth_events_fops
);
5819 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
5824 fs_initcall(trace_events_hist_init
);