2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
22 #include <linux/rculist.h>
23 #include <linux/tracefs.h>
25 #include "tracing_map.h"
28 #define SYNTH_SYSTEM "synthetic"
29 #define SYNTH_FIELDS_MAX 16
31 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
35 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
36 struct tracing_map_elt
*elt
,
37 struct ring_buffer_event
*rbe
,
40 #define HIST_FIELD_OPERANDS_MAX 2
41 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
42 #define HIST_ACTIONS_MAX 8
53 struct hist_trigger_data
*hist_data
;
58 struct ftrace_event_field
*field
;
63 unsigned int is_signed
;
65 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
66 struct hist_trigger_data
*hist_data
;
68 enum field_op_id
operator;
73 unsigned int var_ref_idx
;
77 static u64
hist_field_none(struct hist_field
*field
,
78 struct tracing_map_elt
*elt
,
79 struct ring_buffer_event
*rbe
,
85 static u64
hist_field_counter(struct hist_field
*field
,
86 struct tracing_map_elt
*elt
,
87 struct ring_buffer_event
*rbe
,
93 static u64
hist_field_string(struct hist_field
*hist_field
,
94 struct tracing_map_elt
*elt
,
95 struct ring_buffer_event
*rbe
,
98 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
100 return (u64
)(unsigned long)addr
;
103 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
104 struct tracing_map_elt
*elt
,
105 struct ring_buffer_event
*rbe
,
108 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
109 int str_loc
= str_item
& 0xffff;
110 char *addr
= (char *)(event
+ str_loc
);
112 return (u64
)(unsigned long)addr
;
115 static u64
hist_field_pstring(struct hist_field
*hist_field
,
116 struct tracing_map_elt
*elt
,
117 struct ring_buffer_event
*rbe
,
120 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
122 return (u64
)(unsigned long)*addr
;
125 static u64
hist_field_log2(struct hist_field
*hist_field
,
126 struct tracing_map_elt
*elt
,
127 struct ring_buffer_event
*rbe
,
130 struct hist_field
*operand
= hist_field
->operands
[0];
132 u64 val
= operand
->fn(operand
, elt
, rbe
, event
);
134 return (u64
) ilog2(roundup_pow_of_two(val
));
137 static u64
hist_field_plus(struct hist_field
*hist_field
,
138 struct tracing_map_elt
*elt
,
139 struct ring_buffer_event
*rbe
,
142 struct hist_field
*operand1
= hist_field
->operands
[0];
143 struct hist_field
*operand2
= hist_field
->operands
[1];
145 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
146 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
151 static u64
hist_field_minus(struct hist_field
*hist_field
,
152 struct tracing_map_elt
*elt
,
153 struct ring_buffer_event
*rbe
,
156 struct hist_field
*operand1
= hist_field
->operands
[0];
157 struct hist_field
*operand2
= hist_field
->operands
[1];
159 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
160 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
165 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
166 struct tracing_map_elt
*elt
,
167 struct ring_buffer_event
*rbe
,
170 struct hist_field
*operand
= hist_field
->operands
[0];
172 s64 sval
= (s64
)operand
->fn(operand
, elt
, rbe
, event
);
173 u64 val
= (u64
)-sval
;
178 #define DEFINE_HIST_FIELD_FN(type) \
179 static u64 hist_field_##type(struct hist_field *hist_field, \
180 struct tracing_map_elt *elt, \
181 struct ring_buffer_event *rbe, \
184 type *addr = (type *)(event + hist_field->field->offset); \
186 return (u64)(unsigned long)*addr; \
189 DEFINE_HIST_FIELD_FN(s64
);
190 DEFINE_HIST_FIELD_FN(u64
);
191 DEFINE_HIST_FIELD_FN(s32
);
192 DEFINE_HIST_FIELD_FN(u32
);
193 DEFINE_HIST_FIELD_FN(s16
);
194 DEFINE_HIST_FIELD_FN(u16
);
195 DEFINE_HIST_FIELD_FN(s8
);
196 DEFINE_HIST_FIELD_FN(u8
);
198 #define for_each_hist_field(i, hist_data) \
199 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
201 #define for_each_hist_val_field(i, hist_data) \
202 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
204 #define for_each_hist_key_field(i, hist_data) \
205 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
207 #define HIST_STACKTRACE_DEPTH 16
208 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
209 #define HIST_STACKTRACE_SKIP 5
211 #define HITCOUNT_IDX 0
212 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
214 enum hist_field_flags
{
215 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
216 HIST_FIELD_FL_KEY
= 1 << 1,
217 HIST_FIELD_FL_STRING
= 1 << 2,
218 HIST_FIELD_FL_HEX
= 1 << 3,
219 HIST_FIELD_FL_SYM
= 1 << 4,
220 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
221 HIST_FIELD_FL_EXECNAME
= 1 << 6,
222 HIST_FIELD_FL_SYSCALL
= 1 << 7,
223 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
224 HIST_FIELD_FL_LOG2
= 1 << 9,
225 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
226 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
227 HIST_FIELD_FL_VAR
= 1 << 12,
228 HIST_FIELD_FL_EXPR
= 1 << 13,
229 HIST_FIELD_FL_VAR_REF
= 1 << 14,
230 HIST_FIELD_FL_CPU
= 1 << 15,
231 HIST_FIELD_FL_ALIAS
= 1 << 16,
236 char *name
[TRACING_MAP_VARS_MAX
];
237 char *expr
[TRACING_MAP_VARS_MAX
];
240 struct hist_trigger_attrs
{
250 unsigned int map_bits
;
252 char *assignment_str
[TRACING_MAP_VARS_MAX
];
253 unsigned int n_assignments
;
255 char *action_str
[HIST_ACTIONS_MAX
];
256 unsigned int n_actions
;
258 struct var_defs var_defs
;
262 struct hist_field
*var
;
263 struct hist_field
*val
;
266 struct field_var_hist
{
267 struct hist_trigger_data
*hist_data
;
271 struct hist_trigger_data
{
272 struct hist_field
*fields
[HIST_FIELDS_MAX
];
275 unsigned int n_fields
;
277 unsigned int key_size
;
278 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
279 unsigned int n_sort_keys
;
280 struct trace_event_file
*event_file
;
281 struct hist_trigger_attrs
*attrs
;
282 struct tracing_map
*map
;
283 bool enable_timestamps
;
285 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
286 unsigned int n_var_refs
;
288 struct action_data
*actions
[HIST_ACTIONS_MAX
];
289 unsigned int n_actions
;
291 struct hist_field
*synth_var_refs
[SYNTH_FIELDS_MAX
];
292 unsigned int n_synth_var_refs
;
293 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
294 unsigned int n_field_vars
;
295 unsigned int n_field_var_str
;
296 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
297 unsigned int n_field_var_hists
;
299 struct field_var
*max_vars
[SYNTH_FIELDS_MAX
];
300 unsigned int n_max_vars
;
301 unsigned int n_max_var_str
;
313 struct list_head list
;
316 struct synth_field
**fields
;
317 unsigned int n_fields
;
319 struct trace_event_class
class;
320 struct trace_event_call call
;
321 struct tracepoint
*tp
;
326 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
327 struct tracing_map_elt
*elt
, void *rec
,
328 struct ring_buffer_event
*rbe
,
329 struct action_data
*data
, u64
*var_ref_vals
);
333 unsigned int n_params
;
334 char *params
[SYNTH_FIELDS_MAX
];
338 unsigned int var_ref_idx
;
340 char *match_event_system
;
341 char *synth_event_name
;
342 struct synth_event
*synth_event
;
348 unsigned int max_var_ref_idx
;
349 struct hist_field
*max_var
;
350 struct hist_field
*var
;
356 static char last_hist_cmd
[MAX_FILTER_STR_VAL
];
357 static char hist_err_str
[MAX_FILTER_STR_VAL
];
359 static void last_cmd_set(char *str
)
364 strncpy(last_hist_cmd
, str
, MAX_FILTER_STR_VAL
- 1);
367 static void hist_err(char *str
, char *var
)
369 int maxlen
= MAX_FILTER_STR_VAL
- 1;
374 if (strlen(hist_err_str
))
380 if (strlen(hist_err_str
) + strlen(str
) + strlen(var
) > maxlen
)
383 strcat(hist_err_str
, str
);
384 strcat(hist_err_str
, var
);
387 static void hist_err_event(char *str
, char *system
, char *event
, char *var
)
389 char err
[MAX_FILTER_STR_VAL
];
392 snprintf(err
, MAX_FILTER_STR_VAL
, "%s.%s.%s", system
, event
, var
);
394 snprintf(err
, MAX_FILTER_STR_VAL
, "%s.%s", system
, event
);
396 strncpy(err
, var
, MAX_FILTER_STR_VAL
);
401 static void hist_err_clear(void)
403 hist_err_str
[0] = '\0';
406 static bool have_hist_err(void)
408 if (strlen(hist_err_str
))
414 static LIST_HEAD(synth_event_list
);
415 static DEFINE_MUTEX(synth_event_mutex
);
417 struct synth_trace_event
{
418 struct trace_entry ent
;
422 static int synth_event_define_fields(struct trace_event_call
*call
)
424 struct synth_trace_event trace
;
425 int offset
= offsetof(typeof(trace
), fields
);
426 struct synth_event
*event
= call
->data
;
427 unsigned int i
, size
, n_u64
;
432 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
433 size
= event
->fields
[i
]->size
;
434 is_signed
= event
->fields
[i
]->is_signed
;
435 type
= event
->fields
[i
]->type
;
436 name
= event
->fields
[i
]->name
;
437 ret
= trace_define_field(call
, type
, name
, offset
, size
,
438 is_signed
, FILTER_OTHER
);
442 if (event
->fields
[i
]->is_string
) {
443 offset
+= STR_VAR_LEN_MAX
;
444 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
446 offset
+= sizeof(u64
);
451 event
->n_u64
= n_u64
;
456 static bool synth_field_signed(char *type
)
458 if (strncmp(type
, "u", 1) == 0)
464 static int synth_field_is_string(char *type
)
466 if (strstr(type
, "char[") != NULL
)
472 static int synth_field_string_size(char *type
)
474 char buf
[4], *end
, *start
;
478 start
= strstr(type
, "char[");
481 start
+= strlen("char[");
483 end
= strchr(type
, ']');
484 if (!end
|| end
< start
)
491 strncpy(buf
, start
, len
);
494 err
= kstrtouint(buf
, 0, &size
);
498 if (size
> STR_VAR_LEN_MAX
)
504 static int synth_field_size(char *type
)
508 if (strcmp(type
, "s64") == 0)
510 else if (strcmp(type
, "u64") == 0)
512 else if (strcmp(type
, "s32") == 0)
514 else if (strcmp(type
, "u32") == 0)
516 else if (strcmp(type
, "s16") == 0)
518 else if (strcmp(type
, "u16") == 0)
520 else if (strcmp(type
, "s8") == 0)
522 else if (strcmp(type
, "u8") == 0)
524 else if (strcmp(type
, "char") == 0)
526 else if (strcmp(type
, "unsigned char") == 0)
527 size
= sizeof(unsigned char);
528 else if (strcmp(type
, "int") == 0)
530 else if (strcmp(type
, "unsigned int") == 0)
531 size
= sizeof(unsigned int);
532 else if (strcmp(type
, "long") == 0)
534 else if (strcmp(type
, "unsigned long") == 0)
535 size
= sizeof(unsigned long);
536 else if (strcmp(type
, "pid_t") == 0)
537 size
= sizeof(pid_t
);
538 else if (synth_field_is_string(type
))
539 size
= synth_field_string_size(type
);
544 static const char *synth_field_fmt(char *type
)
546 const char *fmt
= "%llu";
548 if (strcmp(type
, "s64") == 0)
550 else if (strcmp(type
, "u64") == 0)
552 else if (strcmp(type
, "s32") == 0)
554 else if (strcmp(type
, "u32") == 0)
556 else if (strcmp(type
, "s16") == 0)
558 else if (strcmp(type
, "u16") == 0)
560 else if (strcmp(type
, "s8") == 0)
562 else if (strcmp(type
, "u8") == 0)
564 else if (strcmp(type
, "char") == 0)
566 else if (strcmp(type
, "unsigned char") == 0)
568 else if (strcmp(type
, "int") == 0)
570 else if (strcmp(type
, "unsigned int") == 0)
572 else if (strcmp(type
, "long") == 0)
574 else if (strcmp(type
, "unsigned long") == 0)
576 else if (strcmp(type
, "pid_t") == 0)
578 else if (synth_field_is_string(type
))
584 static enum print_line_t
print_synth_event(struct trace_iterator
*iter
,
586 struct trace_event
*event
)
588 struct trace_array
*tr
= iter
->tr
;
589 struct trace_seq
*s
= &iter
->seq
;
590 struct synth_trace_event
*entry
;
591 struct synth_event
*se
;
592 unsigned int i
, n_u64
;
596 entry
= (struct synth_trace_event
*)iter
->ent
;
597 se
= container_of(event
, struct synth_event
, call
.event
);
599 trace_seq_printf(s
, "%s: ", se
->name
);
601 for (i
= 0, n_u64
= 0; i
< se
->n_fields
; i
++) {
602 if (trace_seq_has_overflowed(s
))
605 fmt
= synth_field_fmt(se
->fields
[i
]->type
);
607 /* parameter types */
608 if (tr
->trace_flags
& TRACE_ITER_VERBOSE
)
609 trace_seq_printf(s
, "%s ", fmt
);
611 snprintf(print_fmt
, sizeof(print_fmt
), "%%s=%s%%s", fmt
);
613 /* parameter values */
614 if (se
->fields
[i
]->is_string
) {
615 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
616 (char *)&entry
->fields
[n_u64
],
617 i
== se
->n_fields
- 1 ? "" : " ");
618 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
620 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
621 entry
->fields
[n_u64
],
622 i
== se
->n_fields
- 1 ? "" : " ");
627 trace_seq_putc(s
, '\n');
629 return trace_handle_return(s
);
632 static struct trace_event_functions synth_event_funcs
= {
633 .trace
= print_synth_event
636 static notrace
void trace_event_raw_event_synth(void *__data
,
638 unsigned int var_ref_idx
)
640 struct trace_event_file
*trace_file
= __data
;
641 struct synth_trace_event
*entry
;
642 struct trace_event_buffer fbuffer
;
643 struct ring_buffer
*buffer
;
644 struct synth_event
*event
;
645 unsigned int i
, n_u64
;
648 event
= trace_file
->event_call
->data
;
650 if (trace_trigger_soft_disabled(trace_file
))
653 fields_size
= event
->n_u64
* sizeof(u64
);
656 * Avoid ring buffer recursion detection, as this event
657 * is being performed within another event.
659 buffer
= trace_file
->tr
->trace_buffer
.buffer
;
660 ring_buffer_nest_start(buffer
);
662 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
,
663 sizeof(*entry
) + fields_size
);
667 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
668 if (event
->fields
[i
]->is_string
) {
669 char *str_val
= (char *)(long)var_ref_vals
[var_ref_idx
+ i
];
670 char *str_field
= (char *)&entry
->fields
[n_u64
];
672 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
673 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
675 entry
->fields
[n_u64
] = var_ref_vals
[var_ref_idx
+ i
];
680 trace_event_buffer_commit(&fbuffer
);
682 ring_buffer_nest_end(buffer
);
685 static void free_synth_event_print_fmt(struct trace_event_call
*call
)
688 kfree(call
->print_fmt
);
689 call
->print_fmt
= NULL
;
693 static int __set_synth_event_print_fmt(struct synth_event
*event
,
700 /* When len=0, we just calculate the needed length */
701 #define LEN_OR_ZERO (len ? len - pos : 0)
703 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
704 for (i
= 0; i
< event
->n_fields
; i
++) {
705 fmt
= synth_field_fmt(event
->fields
[i
]->type
);
706 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s=%s%s",
707 event
->fields
[i
]->name
, fmt
,
708 i
== event
->n_fields
- 1 ? "" : ", ");
710 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
712 for (i
= 0; i
< event
->n_fields
; i
++) {
713 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
714 ", REC->%s", event
->fields
[i
]->name
);
719 /* return the length of print_fmt */
723 static int set_synth_event_print_fmt(struct trace_event_call
*call
)
725 struct synth_event
*event
= call
->data
;
729 /* First: called with 0 length to calculate the needed length */
730 len
= __set_synth_event_print_fmt(event
, NULL
, 0);
732 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
736 /* Second: actually write the @print_fmt */
737 __set_synth_event_print_fmt(event
, print_fmt
, len
+ 1);
738 call
->print_fmt
= print_fmt
;
743 static void free_synth_field(struct synth_field
*field
)
750 static struct synth_field
*parse_synth_field(char *field_type
,
753 struct synth_field
*field
;
757 if (field_type
[0] == ';')
760 len
= strlen(field_name
);
761 if (field_name
[len
- 1] == ';')
762 field_name
[len
- 1] = '\0';
764 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
766 return ERR_PTR(-ENOMEM
);
768 len
= strlen(field_type
) + 1;
769 array
= strchr(field_name
, '[');
771 len
+= strlen(array
);
772 field
->type
= kzalloc(len
, GFP_KERNEL
);
777 strcat(field
->type
, field_type
);
779 strcat(field
->type
, array
);
783 field
->size
= synth_field_size(field
->type
);
789 if (synth_field_is_string(field
->type
))
790 field
->is_string
= true;
792 field
->is_signed
= synth_field_signed(field
->type
);
794 field
->name
= kstrdup(field_name
, GFP_KERNEL
);
802 free_synth_field(field
);
803 field
= ERR_PTR(ret
);
807 static void free_synth_tracepoint(struct tracepoint
*tp
)
816 static struct tracepoint
*alloc_synth_tracepoint(char *name
)
818 struct tracepoint
*tp
;
820 tp
= kzalloc(sizeof(*tp
), GFP_KERNEL
);
822 return ERR_PTR(-ENOMEM
);
824 tp
->name
= kstrdup(name
, GFP_KERNEL
);
827 return ERR_PTR(-ENOMEM
);
833 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
834 unsigned int var_ref_idx
);
836 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
837 unsigned int var_ref_idx
)
839 struct tracepoint
*tp
= event
->tp
;
841 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
842 struct tracepoint_func
*probe_func_ptr
;
843 synth_probe_func_t probe_func
;
846 if (!(cpu_online(raw_smp_processor_id())))
849 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
850 if (probe_func_ptr
) {
852 probe_func
= probe_func_ptr
->func
;
853 __data
= probe_func_ptr
->data
;
854 probe_func(__data
, var_ref_vals
, var_ref_idx
);
855 } while ((++probe_func_ptr
)->func
);
860 static struct synth_event
*find_synth_event(const char *name
)
862 struct synth_event
*event
;
864 list_for_each_entry(event
, &synth_event_list
, list
) {
865 if (strcmp(event
->name
, name
) == 0)
872 static int register_synth_event(struct synth_event
*event
)
874 struct trace_event_call
*call
= &event
->call
;
877 event
->call
.class = &event
->class;
878 event
->class.system
= kstrdup(SYNTH_SYSTEM
, GFP_KERNEL
);
879 if (!event
->class.system
) {
884 event
->tp
= alloc_synth_tracepoint(event
->name
);
885 if (IS_ERR(event
->tp
)) {
886 ret
= PTR_ERR(event
->tp
);
891 INIT_LIST_HEAD(&call
->class->fields
);
892 call
->event
.funcs
= &synth_event_funcs
;
893 call
->class->define_fields
= synth_event_define_fields
;
895 ret
= register_trace_event(&call
->event
);
900 call
->flags
= TRACE_EVENT_FL_TRACEPOINT
;
901 call
->class->reg
= trace_event_reg
;
902 call
->class->probe
= trace_event_raw_event_synth
;
904 call
->tp
= event
->tp
;
906 ret
= trace_add_event_call(call
);
908 pr_warn("Failed to register synthetic event: %s\n",
909 trace_event_name(call
));
913 ret
= set_synth_event_print_fmt(call
);
915 trace_remove_event_call(call
);
921 unregister_trace_event(&call
->event
);
925 static int unregister_synth_event(struct synth_event
*event
)
927 struct trace_event_call
*call
= &event
->call
;
930 ret
= trace_remove_event_call(call
);
935 static void free_synth_event(struct synth_event
*event
)
942 for (i
= 0; i
< event
->n_fields
; i
++)
943 free_synth_field(event
->fields
[i
]);
945 kfree(event
->fields
);
947 kfree(event
->class.system
);
948 free_synth_tracepoint(event
->tp
);
949 free_synth_event_print_fmt(&event
->call
);
953 static struct synth_event
*alloc_synth_event(char *event_name
, int n_fields
,
954 struct synth_field
**fields
)
956 struct synth_event
*event
;
959 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
961 event
= ERR_PTR(-ENOMEM
);
965 event
->name
= kstrdup(event_name
, GFP_KERNEL
);
968 event
= ERR_PTR(-ENOMEM
);
972 event
->fields
= kcalloc(n_fields
, sizeof(*event
->fields
), GFP_KERNEL
);
973 if (!event
->fields
) {
974 free_synth_event(event
);
975 event
= ERR_PTR(-ENOMEM
);
979 for (i
= 0; i
< n_fields
; i
++)
980 event
->fields
[i
] = fields
[i
];
982 event
->n_fields
= n_fields
;
987 static void action_trace(struct hist_trigger_data
*hist_data
,
988 struct tracing_map_elt
*elt
, void *rec
,
989 struct ring_buffer_event
*rbe
,
990 struct action_data
*data
, u64
*var_ref_vals
)
992 struct synth_event
*event
= data
->onmatch
.synth_event
;
994 trace_synth(event
, var_ref_vals
, data
->onmatch
.var_ref_idx
);
997 struct hist_var_data
{
998 struct list_head list
;
999 struct hist_trigger_data
*hist_data
;
1002 static void add_or_delete_synth_event(struct synth_event
*event
, int delete)
1005 free_synth_event(event
);
1007 mutex_lock(&synth_event_mutex
);
1008 if (!find_synth_event(event
->name
))
1009 list_add(&event
->list
, &synth_event_list
);
1011 free_synth_event(event
);
1012 mutex_unlock(&synth_event_mutex
);
1016 static int create_synth_event(int argc
, char **argv
)
1018 struct synth_field
*field
, *fields
[SYNTH_FIELDS_MAX
];
1019 struct synth_event
*event
= NULL
;
1020 bool delete_event
= false;
1021 int i
, n_fields
= 0, ret
= 0;
1024 mutex_lock(&synth_event_mutex
);
1028 * - Add synthetic event: <event_name> field[;field] ...
1029 * - Remove synthetic event: !<event_name> field[;field] ...
1030 * where 'field' = type field_name
1038 if (name
[0] == '!') {
1039 delete_event
= true;
1043 event
= find_synth_event(name
);
1051 list_del(&event
->list
);
1057 } else if (delete_event
)
1065 for (i
= 1; i
< argc
- 1; i
++) {
1066 if (strcmp(argv
[i
], ";") == 0)
1068 if (n_fields
== SYNTH_FIELDS_MAX
) {
1073 field
= parse_synth_field(argv
[i
], argv
[i
+ 1]);
1074 if (IS_ERR(field
)) {
1075 ret
= PTR_ERR(field
);
1078 fields
[n_fields
] = field
;
1087 event
= alloc_synth_event(name
, n_fields
, fields
);
1088 if (IS_ERR(event
)) {
1089 ret
= PTR_ERR(event
);
1094 mutex_unlock(&synth_event_mutex
);
1098 ret
= unregister_synth_event(event
);
1099 add_or_delete_synth_event(event
, !ret
);
1101 ret
= register_synth_event(event
);
1102 add_or_delete_synth_event(event
, ret
);
1108 mutex_unlock(&synth_event_mutex
);
1110 for (i
= 0; i
< n_fields
; i
++)
1111 free_synth_field(fields
[i
]);
1112 free_synth_event(event
);
1117 static int release_all_synth_events(void)
1119 struct list_head release_events
;
1120 struct synth_event
*event
, *e
;
1123 INIT_LIST_HEAD(&release_events
);
1125 mutex_lock(&synth_event_mutex
);
1127 list_for_each_entry(event
, &synth_event_list
, list
) {
1129 mutex_unlock(&synth_event_mutex
);
1134 list_splice_init(&event
->list
, &release_events
);
1136 mutex_unlock(&synth_event_mutex
);
1138 list_for_each_entry_safe(event
, e
, &release_events
, list
) {
1139 list_del(&event
->list
);
1141 ret
= unregister_synth_event(event
);
1142 add_or_delete_synth_event(event
, !ret
);
1149 static void *synth_events_seq_start(struct seq_file
*m
, loff_t
*pos
)
1151 mutex_lock(&synth_event_mutex
);
1153 return seq_list_start(&synth_event_list
, *pos
);
1156 static void *synth_events_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1158 return seq_list_next(v
, &synth_event_list
, pos
);
1161 static void synth_events_seq_stop(struct seq_file
*m
, void *v
)
1163 mutex_unlock(&synth_event_mutex
);
1166 static int synth_events_seq_show(struct seq_file
*m
, void *v
)
1168 struct synth_field
*field
;
1169 struct synth_event
*event
= v
;
1172 seq_printf(m
, "%s\t", event
->name
);
1174 for (i
= 0; i
< event
->n_fields
; i
++) {
1175 field
= event
->fields
[i
];
1177 /* parameter values */
1178 seq_printf(m
, "%s %s%s", field
->type
, field
->name
,
1179 i
== event
->n_fields
- 1 ? "" : "; ");
1187 static const struct seq_operations synth_events_seq_op
= {
1188 .start
= synth_events_seq_start
,
1189 .next
= synth_events_seq_next
,
1190 .stop
= synth_events_seq_stop
,
1191 .show
= synth_events_seq_show
1194 static int synth_events_open(struct inode
*inode
, struct file
*file
)
1198 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
1199 ret
= release_all_synth_events();
1204 return seq_open(file
, &synth_events_seq_op
);
1207 static ssize_t
synth_events_write(struct file
*file
,
1208 const char __user
*buffer
,
1209 size_t count
, loff_t
*ppos
)
1211 return trace_parse_run_command(file
, buffer
, count
, ppos
,
1212 create_synth_event
);
1215 static const struct file_operations synth_events_fops
= {
1216 .open
= synth_events_open
,
1217 .write
= synth_events_write
,
1219 .llseek
= seq_lseek
,
1220 .release
= seq_release
,
1223 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
1224 struct tracing_map_elt
*elt
,
1225 struct ring_buffer_event
*rbe
,
1228 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
1229 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1231 u64 ts
= ring_buffer_event_time_stamp(rbe
);
1233 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
1239 static u64
hist_field_cpu(struct hist_field
*hist_field
,
1240 struct tracing_map_elt
*elt
,
1241 struct ring_buffer_event
*rbe
,
1244 int cpu
= smp_processor_id();
1249 static struct hist_field
*
1250 check_field_for_var_ref(struct hist_field
*hist_field
,
1251 struct hist_trigger_data
*var_data
,
1252 unsigned int var_idx
)
1254 struct hist_field
*found
= NULL
;
1256 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1257 if (hist_field
->var
.idx
== var_idx
&&
1258 hist_field
->var
.hist_data
== var_data
) {
1266 static struct hist_field
*
1267 check_field_for_var_refs(struct hist_trigger_data
*hist_data
,
1268 struct hist_field
*hist_field
,
1269 struct hist_trigger_data
*var_data
,
1270 unsigned int var_idx
,
1273 struct hist_field
*found
= NULL
;
1282 found
= check_field_for_var_ref(hist_field
, var_data
, var_idx
);
1286 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1287 struct hist_field
*operand
;
1289 operand
= hist_field
->operands
[i
];
1290 found
= check_field_for_var_refs(hist_data
, operand
, var_data
,
1291 var_idx
, level
+ 1);
1299 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
1300 struct hist_trigger_data
*var_data
,
1301 unsigned int var_idx
)
1303 struct hist_field
*hist_field
, *found
= NULL
;
1306 for_each_hist_field(i
, hist_data
) {
1307 hist_field
= hist_data
->fields
[i
];
1308 found
= check_field_for_var_refs(hist_data
, hist_field
,
1309 var_data
, var_idx
, 0);
1314 for (i
= 0; i
< hist_data
->n_synth_var_refs
; i
++) {
1315 hist_field
= hist_data
->synth_var_refs
[i
];
1316 found
= check_field_for_var_refs(hist_data
, hist_field
,
1317 var_data
, var_idx
, 0);
1325 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
1326 unsigned int var_idx
)
1328 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1329 struct hist_field
*found
= NULL
;
1330 struct hist_var_data
*var_data
;
1332 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1333 if (var_data
->hist_data
== hist_data
)
1335 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
1343 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
1345 struct hist_field
*field
;
1349 for_each_hist_field(i
, hist_data
) {
1350 field
= hist_data
->fields
[i
];
1351 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
1352 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
1362 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
1364 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1365 struct hist_var_data
*var_data
, *found
= NULL
;
1367 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1368 if (var_data
->hist_data
== hist_data
) {
1377 static bool field_has_hist_vars(struct hist_field
*hist_field
,
1388 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
1389 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1392 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1393 struct hist_field
*operand
;
1395 operand
= hist_field
->operands
[i
];
1396 if (field_has_hist_vars(operand
, level
+ 1))
1403 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
1405 struct hist_field
*hist_field
;
1408 for_each_hist_field(i
, hist_data
) {
1409 hist_field
= hist_data
->fields
[i
];
1410 if (field_has_hist_vars(hist_field
, 0))
1417 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
1419 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1420 struct hist_var_data
*var_data
;
1422 var_data
= find_hist_vars(hist_data
);
1426 if (trace_array_get(tr
) < 0)
1429 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
1431 trace_array_put(tr
);
1435 var_data
->hist_data
= hist_data
;
1436 list_add(&var_data
->list
, &tr
->hist_vars
);
1441 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
1443 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1444 struct hist_var_data
*var_data
;
1446 var_data
= find_hist_vars(hist_data
);
1450 if (WARN_ON(check_var_refs(hist_data
)))
1453 list_del(&var_data
->list
);
1457 trace_array_put(tr
);
1460 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
1461 const char *var_name
)
1463 struct hist_field
*hist_field
, *found
= NULL
;
1466 for_each_hist_field(i
, hist_data
) {
1467 hist_field
= hist_data
->fields
[i
];
1468 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
1469 strcmp(hist_field
->var
.name
, var_name
) == 0) {
1478 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
1479 struct trace_event_file
*file
,
1480 const char *var_name
)
1482 struct hist_trigger_data
*test_data
;
1483 struct event_trigger_data
*test
;
1484 struct hist_field
*hist_field
;
1486 hist_field
= find_var_field(hist_data
, var_name
);
1490 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1491 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1492 test_data
= test
->private_data
;
1493 hist_field
= find_var_field(test_data
, var_name
);
1502 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
1507 struct hist_trigger_data
*var_hist_data
;
1508 struct hist_var_data
*var_data
;
1509 struct trace_event_file
*file
, *found
= NULL
;
1512 return find_event_file(tr
, system
, event_name
);
1514 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1515 var_hist_data
= var_data
->hist_data
;
1516 file
= var_hist_data
->event_file
;
1520 if (find_var_field(var_hist_data
, var_name
)) {
1522 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system
, event_name
, var_name
);
1533 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
1534 const char *var_name
)
1536 struct hist_trigger_data
*test_data
;
1537 struct event_trigger_data
*test
;
1538 struct hist_field
*hist_field
;
1540 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1541 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1542 test_data
= test
->private_data
;
1543 hist_field
= find_var_field(test_data
, var_name
);
1552 static struct hist_field
*
1553 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
1555 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1556 struct hist_field
*hist_field
, *found
= NULL
;
1557 struct trace_event_file
*file
;
1560 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
1561 struct action_data
*data
= hist_data
->actions
[i
];
1563 if (data
->fn
== action_trace
) {
1564 char *system
= data
->onmatch
.match_event_system
;
1565 char *event_name
= data
->onmatch
.match_event
;
1567 file
= find_var_file(tr
, system
, event_name
, var_name
);
1570 hist_field
= find_file_var(file
, var_name
);
1573 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system
, event_name
, var_name
);
1574 return ERR_PTR(-EINVAL
);
1584 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1589 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1590 struct hist_field
*hist_field
= NULL
;
1591 struct trace_event_file
*file
;
1593 if (!system
|| !event_name
) {
1594 hist_field
= find_match_var(hist_data
, var_name
);
1595 if (IS_ERR(hist_field
))
1601 file
= find_var_file(tr
, system
, event_name
, var_name
);
1605 hist_field
= find_file_var(file
, var_name
);
1610 struct hist_elt_data
{
1613 char *field_var_str
[SYNTH_FIELDS_MAX
];
1616 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1617 struct tracing_map_elt
*elt
,
1618 struct ring_buffer_event
*rbe
,
1621 struct hist_elt_data
*elt_data
;
1624 elt_data
= elt
->private_data
;
1625 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1630 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1631 u64
*var_ref_vals
, bool self
)
1633 struct hist_trigger_data
*var_data
;
1634 struct tracing_map_elt
*var_elt
;
1635 struct hist_field
*hist_field
;
1636 unsigned int i
, var_idx
;
1637 bool resolved
= true;
1640 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1641 hist_field
= hist_data
->var_refs
[i
];
1642 var_idx
= hist_field
->var
.idx
;
1643 var_data
= hist_field
->var
.hist_data
;
1645 if (var_data
== NULL
) {
1650 if ((self
&& var_data
!= hist_data
) ||
1651 (!self
&& var_data
== hist_data
))
1654 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1660 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1665 if (self
|| !hist_field
->read_once
)
1666 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1668 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1670 var_ref_vals
[i
] = var_val
;
1676 static const char *hist_field_name(struct hist_field
*field
,
1679 const char *field_name
= "";
1685 field_name
= field
->field
->name
;
1686 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1687 field
->flags
& HIST_FIELD_FL_ALIAS
)
1688 field_name
= hist_field_name(field
->operands
[0], ++level
);
1689 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1691 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1692 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1693 if (field
->system
) {
1694 static char full_name
[MAX_FILTER_STR_VAL
];
1696 strcat(full_name
, field
->system
);
1697 strcat(full_name
, ".");
1698 strcat(full_name
, field
->event_name
);
1699 strcat(full_name
, ".");
1700 strcat(full_name
, field
->name
);
1701 field_name
= full_name
;
1703 field_name
= field
->name
;
1704 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1705 field_name
= "common_timestamp";
1707 if (field_name
== NULL
)
1713 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
1715 hist_field_fn_t fn
= NULL
;
1717 switch (field_size
) {
1719 if (field_is_signed
)
1720 fn
= hist_field_s64
;
1722 fn
= hist_field_u64
;
1725 if (field_is_signed
)
1726 fn
= hist_field_s32
;
1728 fn
= hist_field_u32
;
1731 if (field_is_signed
)
1732 fn
= hist_field_s16
;
1734 fn
= hist_field_u16
;
1737 if (field_is_signed
)
1747 static int parse_map_size(char *str
)
1749 unsigned long size
, map_bits
;
1758 ret
= kstrtoul(str
, 0, &size
);
1762 map_bits
= ilog2(roundup_pow_of_two(size
));
1763 if (map_bits
< TRACING_MAP_BITS_MIN
||
1764 map_bits
> TRACING_MAP_BITS_MAX
)
1772 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
1779 for (i
= 0; i
< attrs
->n_assignments
; i
++)
1780 kfree(attrs
->assignment_str
[i
]);
1782 for (i
= 0; i
< attrs
->n_actions
; i
++)
1783 kfree(attrs
->action_str
[i
]);
1786 kfree(attrs
->sort_key_str
);
1787 kfree(attrs
->keys_str
);
1788 kfree(attrs
->vals_str
);
1789 kfree(attrs
->clock
);
1793 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
1797 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
1800 if ((strncmp(str
, "onmatch(", strlen("onmatch(")) == 0) ||
1801 (strncmp(str
, "onmax(", strlen("onmax(")) == 0)) {
1802 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
1803 if (!attrs
->action_str
[attrs
->n_actions
]) {
1814 static int parse_assignment(char *str
, struct hist_trigger_attrs
*attrs
)
1818 if ((strncmp(str
, "key=", strlen("key=")) == 0) ||
1819 (strncmp(str
, "keys=", strlen("keys=")) == 0)) {
1820 attrs
->keys_str
= kstrdup(str
, GFP_KERNEL
);
1821 if (!attrs
->keys_str
) {
1825 } else if ((strncmp(str
, "val=", strlen("val=")) == 0) ||
1826 (strncmp(str
, "vals=", strlen("vals=")) == 0) ||
1827 (strncmp(str
, "values=", strlen("values=")) == 0)) {
1828 attrs
->vals_str
= kstrdup(str
, GFP_KERNEL
);
1829 if (!attrs
->vals_str
) {
1833 } else if (strncmp(str
, "sort=", strlen("sort=")) == 0) {
1834 attrs
->sort_key_str
= kstrdup(str
, GFP_KERNEL
);
1835 if (!attrs
->sort_key_str
) {
1839 } else if (strncmp(str
, "name=", strlen("name=")) == 0) {
1840 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
1845 } else if (strncmp(str
, "clock=", strlen("clock=")) == 0) {
1852 str
= strstrip(str
);
1853 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
1854 if (!attrs
->clock
) {
1858 } else if (strncmp(str
, "size=", strlen("size=")) == 0) {
1859 int map_bits
= parse_map_size(str
);
1865 attrs
->map_bits
= map_bits
;
1869 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
1870 hist_err("Too many variables defined: ", str
);
1875 assignment
= kstrdup(str
, GFP_KERNEL
);
1881 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
1887 static struct hist_trigger_attrs
*parse_hist_trigger_attrs(char *trigger_str
)
1889 struct hist_trigger_attrs
*attrs
;
1892 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
1894 return ERR_PTR(-ENOMEM
);
1896 while (trigger_str
) {
1897 char *str
= strsep(&trigger_str
, ":");
1899 if (strchr(str
, '=')) {
1900 ret
= parse_assignment(str
, attrs
);
1903 } else if (strcmp(str
, "pause") == 0)
1904 attrs
->pause
= true;
1905 else if ((strcmp(str
, "cont") == 0) ||
1906 (strcmp(str
, "continue") == 0))
1908 else if (strcmp(str
, "clear") == 0)
1909 attrs
->clear
= true;
1911 ret
= parse_action(str
, attrs
);
1917 if (!attrs
->keys_str
) {
1922 if (!attrs
->clock
) {
1923 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
1924 if (!attrs
->clock
) {
1932 destroy_hist_trigger_attrs(attrs
);
1934 return ERR_PTR(ret
);
1937 static inline void save_comm(char *comm
, struct task_struct
*task
)
1940 strcpy(comm
, "<idle>");
1944 if (WARN_ON_ONCE(task
->pid
< 0)) {
1945 strcpy(comm
, "<XXX>");
1949 memcpy(comm
, task
->comm
, TASK_COMM_LEN
);
1952 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
1956 for (i
= 0; i
< SYNTH_FIELDS_MAX
; i
++)
1957 kfree(elt_data
->field_var_str
[i
]);
1959 kfree(elt_data
->comm
);
1963 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
1965 struct hist_elt_data
*elt_data
= elt
->private_data
;
1967 hist_elt_data_free(elt_data
);
1970 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
1972 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
1973 unsigned int size
= TASK_COMM_LEN
;
1974 struct hist_elt_data
*elt_data
;
1975 struct hist_field
*key_field
;
1976 unsigned int i
, n_str
;
1978 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
1982 for_each_hist_key_field(i
, hist_data
) {
1983 key_field
= hist_data
->fields
[i
];
1985 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
1986 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
1987 if (!elt_data
->comm
) {
1995 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_max_var_str
;
1997 size
= STR_VAR_LEN_MAX
;
1999 for (i
= 0; i
< n_str
; i
++) {
2000 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
2001 if (!elt_data
->field_var_str
[i
]) {
2002 hist_elt_data_free(elt_data
);
2007 elt
->private_data
= elt_data
;
2012 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
2014 struct hist_elt_data
*elt_data
= elt
->private_data
;
2017 save_comm(elt_data
->comm
, current
);
2020 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
2021 .elt_alloc
= hist_trigger_elt_data_alloc
,
2022 .elt_free
= hist_trigger_elt_data_free
,
2023 .elt_init
= hist_trigger_elt_data_init
,
2026 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
2028 const char *flags_str
= NULL
;
2030 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
2032 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
2034 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
2035 flags_str
= "sym-offset";
2036 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
2037 flags_str
= "execname";
2038 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
2039 flags_str
= "syscall";
2040 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
2042 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2043 flags_str
= "usecs";
2048 static void expr_field_str(struct hist_field
*field
, char *expr
)
2050 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
2053 strcat(expr
, hist_field_name(field
, 0));
2055 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
2056 const char *flags_str
= get_hist_field_flags(field
);
2060 strcat(expr
, flags_str
);
2065 static char *expr_str(struct hist_field
*field
, unsigned int level
)
2072 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2076 if (!field
->operands
[0]) {
2077 expr_field_str(field
, expr
);
2081 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
2085 subexpr
= expr_str(field
->operands
[0], ++level
);
2090 strcat(expr
, subexpr
);
2098 expr_field_str(field
->operands
[0], expr
);
2100 switch (field
->operator) {
2101 case FIELD_OP_MINUS
:
2112 expr_field_str(field
->operands
[1], expr
);
2117 static int contains_operator(char *str
)
2119 enum field_op_id field_op
= FIELD_OP_NONE
;
2122 op
= strpbrk(str
, "+-");
2124 return FIELD_OP_NONE
;
2129 field_op
= FIELD_OP_UNARY_MINUS
;
2131 field_op
= FIELD_OP_MINUS
;
2134 field_op
= FIELD_OP_PLUS
;
2143 static void destroy_hist_field(struct hist_field
*hist_field
,
2154 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
2155 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
2157 kfree(hist_field
->var
.name
);
2158 kfree(hist_field
->name
);
2159 kfree(hist_field
->type
);
2164 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
2165 struct ftrace_event_field
*field
,
2166 unsigned long flags
,
2169 struct hist_field
*hist_field
;
2171 if (field
&& is_function_field(field
))
2174 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
2178 hist_field
->hist_data
= hist_data
;
2180 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
2181 goto out
; /* caller will populate */
2183 if (flags
& HIST_FIELD_FL_VAR_REF
) {
2184 hist_field
->fn
= hist_field_var_ref
;
2188 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
2189 hist_field
->fn
= hist_field_counter
;
2190 hist_field
->size
= sizeof(u64
);
2191 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
2192 if (!hist_field
->type
)
2197 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
2198 hist_field
->fn
= hist_field_none
;
2202 if (flags
& HIST_FIELD_FL_LOG2
) {
2203 unsigned long fl
= flags
& ~HIST_FIELD_FL_LOG2
;
2204 hist_field
->fn
= hist_field_log2
;
2205 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
2206 hist_field
->size
= hist_field
->operands
[0]->size
;
2207 hist_field
->type
= kstrdup(hist_field
->operands
[0]->type
, GFP_KERNEL
);
2208 if (!hist_field
->type
)
2213 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
2214 hist_field
->fn
= hist_field_timestamp
;
2215 hist_field
->size
= sizeof(u64
);
2216 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
2217 if (!hist_field
->type
)
2222 if (flags
& HIST_FIELD_FL_CPU
) {
2223 hist_field
->fn
= hist_field_cpu
;
2224 hist_field
->size
= sizeof(int);
2225 hist_field
->type
= kstrdup("unsigned int", GFP_KERNEL
);
2226 if (!hist_field
->type
)
2231 if (WARN_ON_ONCE(!field
))
2234 if (is_string_field(field
)) {
2235 flags
|= HIST_FIELD_FL_STRING
;
2237 hist_field
->size
= MAX_FILTER_STR_VAL
;
2238 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
2239 if (!hist_field
->type
)
2242 if (field
->filter_type
== FILTER_STATIC_STRING
)
2243 hist_field
->fn
= hist_field_string
;
2244 else if (field
->filter_type
== FILTER_DYN_STRING
)
2245 hist_field
->fn
= hist_field_dynstring
;
2247 hist_field
->fn
= hist_field_pstring
;
2249 hist_field
->size
= field
->size
;
2250 hist_field
->is_signed
= field
->is_signed
;
2251 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
2252 if (!hist_field
->type
)
2255 hist_field
->fn
= select_value_fn(field
->size
,
2257 if (!hist_field
->fn
) {
2258 destroy_hist_field(hist_field
, 0);
2263 hist_field
->field
= field
;
2264 hist_field
->flags
= flags
;
2267 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
2268 if (!hist_field
->var
.name
)
2274 destroy_hist_field(hist_field
, 0);
2278 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
2282 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
2283 if (hist_data
->fields
[i
]) {
2284 destroy_hist_field(hist_data
->fields
[i
], 0);
2285 hist_data
->fields
[i
] = NULL
;
2290 static int init_var_ref(struct hist_field
*ref_field
,
2291 struct hist_field
*var_field
,
2292 char *system
, char *event_name
)
2296 ref_field
->var
.idx
= var_field
->var
.idx
;
2297 ref_field
->var
.hist_data
= var_field
->hist_data
;
2298 ref_field
->size
= var_field
->size
;
2299 ref_field
->is_signed
= var_field
->is_signed
;
2300 ref_field
->flags
|= var_field
->flags
&
2301 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2304 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
2305 if (!ref_field
->system
)
2310 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
2311 if (!ref_field
->event_name
) {
2317 if (var_field
->var
.name
) {
2318 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
2319 if (!ref_field
->name
) {
2323 } else if (var_field
->name
) {
2324 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
2325 if (!ref_field
->name
) {
2331 ref_field
->type
= kstrdup(var_field
->type
, GFP_KERNEL
);
2332 if (!ref_field
->type
) {
2339 kfree(ref_field
->system
);
2340 kfree(ref_field
->event_name
);
2341 kfree(ref_field
->name
);
2346 static struct hist_field
*create_var_ref(struct hist_field
*var_field
,
2347 char *system
, char *event_name
)
2349 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
2350 struct hist_field
*ref_field
;
2352 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
2354 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
2355 destroy_hist_field(ref_field
, 0);
2363 static bool is_var_ref(char *var_name
)
2365 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
2371 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
2377 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
2378 name
= hist_data
->attrs
->var_defs
.name
[i
];
2380 if (strcmp(var_name
, name
) == 0) {
2381 field
= hist_data
->attrs
->var_defs
.expr
[i
];
2382 if (contains_operator(field
) || is_var_ref(field
))
2391 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
2392 char *system
, char *event_name
,
2395 struct trace_event_call
*call
;
2397 if (system
&& event_name
) {
2398 call
= hist_data
->event_file
->event_call
;
2400 if (strcmp(system
, call
->class->system
) != 0)
2403 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2407 if (!!system
!= !!event_name
)
2410 if (!is_var_ref(var_name
))
2415 return field_name_from_var(hist_data
, var_name
);
2418 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
2419 char *system
, char *event_name
,
2422 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
2424 if (!is_var_ref(var_name
))
2429 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
2431 ref_field
= create_var_ref(var_field
, system
, event_name
);
2434 hist_err_event("Couldn't find variable: $",
2435 system
, event_name
, var_name
);
2440 static struct ftrace_event_field
*
2441 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
2442 char *field_str
, unsigned long *flags
)
2444 struct ftrace_event_field
*field
= NULL
;
2445 char *field_name
, *modifier
, *str
;
2447 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
2449 return ERR_PTR(-ENOMEM
);
2451 field_name
= strsep(&modifier
, ".");
2453 if (strcmp(modifier
, "hex") == 0)
2454 *flags
|= HIST_FIELD_FL_HEX
;
2455 else if (strcmp(modifier
, "sym") == 0)
2456 *flags
|= HIST_FIELD_FL_SYM
;
2457 else if (strcmp(modifier
, "sym-offset") == 0)
2458 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
2459 else if ((strcmp(modifier
, "execname") == 0) &&
2460 (strcmp(field_name
, "common_pid") == 0))
2461 *flags
|= HIST_FIELD_FL_EXECNAME
;
2462 else if (strcmp(modifier
, "syscall") == 0)
2463 *flags
|= HIST_FIELD_FL_SYSCALL
;
2464 else if (strcmp(modifier
, "log2") == 0)
2465 *flags
|= HIST_FIELD_FL_LOG2
;
2466 else if (strcmp(modifier
, "usecs") == 0)
2467 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
2469 field
= ERR_PTR(-EINVAL
);
2474 if (strcmp(field_name
, "common_timestamp") == 0) {
2475 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
2476 hist_data
->enable_timestamps
= true;
2477 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2478 hist_data
->attrs
->ts_in_usecs
= true;
2479 } else if (strcmp(field_name
, "cpu") == 0)
2480 *flags
|= HIST_FIELD_FL_CPU
;
2482 field
= trace_find_event_field(file
->event_call
, field_name
);
2483 if (!field
|| !field
->size
) {
2484 field
= ERR_PTR(-EINVAL
);
2494 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
2495 struct hist_field
*var_ref
,
2498 struct hist_field
*alias
= NULL
;
2499 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
2501 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2505 alias
->fn
= var_ref
->fn
;
2506 alias
->operands
[0] = var_ref
;
2508 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2509 destroy_hist_field(alias
, 0);
2516 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2517 struct trace_event_file
*file
, char *str
,
2518 unsigned long *flags
, char *var_name
)
2520 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2521 struct ftrace_event_field
*field
= NULL
;
2522 struct hist_field
*hist_field
= NULL
;
2525 s
= strchr(str
, '.');
2527 s
= strchr(++s
, '.');
2529 ref_system
= strsep(&str
, ".");
2534 ref_event
= strsep(&str
, ".");
2543 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2545 hist_field
= parse_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2547 hist_data
->var_refs
[hist_data
->n_var_refs
] = hist_field
;
2548 hist_field
->var_ref_idx
= hist_data
->n_var_refs
++;
2550 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2561 field
= parse_field(hist_data
, file
, str
, flags
);
2562 if (IS_ERR(field
)) {
2563 ret
= PTR_ERR(field
);
2567 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2575 return ERR_PTR(ret
);
2578 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2579 struct trace_event_file
*file
,
2580 char *str
, unsigned long flags
,
2581 char *var_name
, unsigned int level
);
2583 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2584 struct trace_event_file
*file
,
2585 char *str
, unsigned long flags
,
2586 char *var_name
, unsigned int level
)
2588 struct hist_field
*operand1
, *expr
= NULL
;
2589 unsigned long operand_flags
;
2593 /* we support only -(xxx) i.e. explicit parens required */
2596 hist_err("Too many subexpressions (3 max): ", str
);
2601 str
++; /* skip leading '-' */
2603 s
= strchr(str
, '(');
2611 s
= strrchr(str
, ')');
2615 ret
= -EINVAL
; /* no closing ')' */
2619 flags
|= HIST_FIELD_FL_EXPR
;
2620 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2627 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2628 if (IS_ERR(operand1
)) {
2629 ret
= PTR_ERR(operand1
);
2633 expr
->flags
|= operand1
->flags
&
2634 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2635 expr
->fn
= hist_field_unary_minus
;
2636 expr
->operands
[0] = operand1
;
2637 expr
->operator = FIELD_OP_UNARY_MINUS
;
2638 expr
->name
= expr_str(expr
, 0);
2639 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
2647 destroy_hist_field(expr
, 0);
2648 return ERR_PTR(ret
);
2651 static int check_expr_operands(struct hist_field
*operand1
,
2652 struct hist_field
*operand2
)
2654 unsigned long operand1_flags
= operand1
->flags
;
2655 unsigned long operand2_flags
= operand2
->flags
;
2657 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2658 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2659 struct hist_field
*var
;
2661 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2664 operand1_flags
= var
->flags
;
2667 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2668 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2669 struct hist_field
*var
;
2671 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2674 operand2_flags
= var
->flags
;
2677 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2678 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2679 hist_err("Timestamp units in expression don't match", NULL
);
2686 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2687 struct trace_event_file
*file
,
2688 char *str
, unsigned long flags
,
2689 char *var_name
, unsigned int level
)
2691 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2692 unsigned long operand_flags
;
2693 int field_op
, ret
= -EINVAL
;
2694 char *sep
, *operand1_str
;
2697 hist_err("Too many subexpressions (3 max): ", str
);
2698 return ERR_PTR(-EINVAL
);
2701 field_op
= contains_operator(str
);
2703 if (field_op
== FIELD_OP_NONE
)
2704 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2706 if (field_op
== FIELD_OP_UNARY_MINUS
)
2707 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
2710 case FIELD_OP_MINUS
:
2720 operand1_str
= strsep(&str
, sep
);
2721 if (!operand1_str
|| !str
)
2725 operand1
= parse_atom(hist_data
, file
, operand1_str
,
2726 &operand_flags
, NULL
);
2727 if (IS_ERR(operand1
)) {
2728 ret
= PTR_ERR(operand1
);
2733 /* rest of string could be another expression e.g. b+c in a+b+c */
2735 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2736 if (IS_ERR(operand2
)) {
2737 ret
= PTR_ERR(operand2
);
2742 ret
= check_expr_operands(operand1
, operand2
);
2746 flags
|= HIST_FIELD_FL_EXPR
;
2748 flags
|= operand1
->flags
&
2749 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2751 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2757 operand1
->read_once
= true;
2758 operand2
->read_once
= true;
2760 expr
->operands
[0] = operand1
;
2761 expr
->operands
[1] = operand2
;
2762 expr
->operator = field_op
;
2763 expr
->name
= expr_str(expr
, 0);
2764 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
2771 case FIELD_OP_MINUS
:
2772 expr
->fn
= hist_field_minus
;
2775 expr
->fn
= hist_field_plus
;
2784 destroy_hist_field(operand1
, 0);
2785 destroy_hist_field(operand2
, 0);
2786 destroy_hist_field(expr
, 0);
2788 return ERR_PTR(ret
);
2791 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
2792 struct trace_event_file
*file
)
2794 struct event_trigger_data
*test
;
2796 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
2797 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2798 if (test
->private_data
== hist_data
)
2799 return test
->filter_str
;
2806 static struct event_command trigger_hist_cmd
;
2807 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
2808 struct trace_event_file
*file
,
2809 char *glob
, char *cmd
, char *param
);
2811 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
2812 struct hist_trigger_data
*hist_data
,
2813 unsigned int n_keys
)
2815 struct hist_field
*target_hist_field
, *hist_field
;
2816 unsigned int n
, i
, j
;
2818 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
2821 i
= hist_data
->n_vals
;
2822 j
= target_hist_data
->n_vals
;
2824 for (n
= 0; n
< n_keys
; n
++) {
2825 hist_field
= hist_data
->fields
[i
+ n
];
2826 target_hist_field
= target_hist_data
->fields
[j
+ n
];
2828 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
2830 if (hist_field
->size
!= target_hist_field
->size
)
2832 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
2839 static struct hist_trigger_data
*
2840 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
2841 struct trace_event_file
*file
)
2843 struct hist_trigger_data
*hist_data
;
2844 struct event_trigger_data
*test
;
2845 unsigned int n_keys
;
2847 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
2849 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
2850 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2851 hist_data
= test
->private_data
;
2853 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
2861 static struct trace_event_file
*event_file(struct trace_array
*tr
,
2862 char *system
, char *event_name
)
2864 struct trace_event_file
*file
;
2866 file
= find_event_file(tr
, system
, event_name
);
2868 return ERR_PTR(-EINVAL
);
2873 static struct hist_field
*
2874 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
2875 char *system
, char *event_name
, char *field_name
)
2877 struct hist_field
*event_var
;
2878 char *synthetic_name
;
2880 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2881 if (!synthetic_name
)
2882 return ERR_PTR(-ENOMEM
);
2884 strcpy(synthetic_name
, "synthetic_");
2885 strcat(synthetic_name
, field_name
);
2887 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
2889 kfree(synthetic_name
);
2895 * create_field_var_hist - Automatically create a histogram and var for a field
2896 * @target_hist_data: The target hist trigger
2897 * @subsys_name: Optional subsystem name
2898 * @event_name: Optional event name
2899 * @field_name: The name of the field (and the resulting variable)
2901 * Hist trigger actions fetch data from variables, not directly from
2902 * events. However, for convenience, users are allowed to directly
2903 * specify an event field in an action, which will be automatically
2904 * converted into a variable on their behalf.
2906 * If a user specifies a field on an event that isn't the event the
2907 * histogram currently being defined (the target event histogram), the
2908 * only way that can be accomplished is if a new hist trigger is
2909 * created and the field variable defined on that.
2911 * This function creates a new histogram compatible with the target
2912 * event (meaning a histogram with the same key as the target
2913 * histogram), and creates a variable for the specified field, but
2914 * with 'synthetic_' prepended to the variable name in order to avoid
2915 * collision with normal field variables.
2917 * Return: The variable created for the field.
2919 static struct hist_field
*
2920 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
2921 char *subsys_name
, char *event_name
, char *field_name
)
2923 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
2924 struct hist_field
*event_var
= ERR_PTR(-EINVAL
);
2925 struct hist_trigger_data
*hist_data
;
2926 unsigned int i
, n
, first
= true;
2927 struct field_var_hist
*var_hist
;
2928 struct trace_event_file
*file
;
2929 struct hist_field
*key_field
;
2934 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
2935 hist_err_event("onmatch: Too many field variables defined: ",
2936 subsys_name
, event_name
, field_name
);
2937 return ERR_PTR(-EINVAL
);
2940 file
= event_file(tr
, subsys_name
, event_name
);
2943 hist_err_event("onmatch: Event file not found: ",
2944 subsys_name
, event_name
, field_name
);
2945 ret
= PTR_ERR(file
);
2946 return ERR_PTR(ret
);
2950 * Look for a histogram compatible with target. We'll use the
2951 * found histogram specification to create a new matching
2952 * histogram with our variable on it. target_hist_data is not
2953 * yet a registered histogram so we can't use that.
2955 hist_data
= find_compatible_hist(target_hist_data
, file
);
2957 hist_err_event("onmatch: Matching event histogram not found: ",
2958 subsys_name
, event_name
, field_name
);
2959 return ERR_PTR(-EINVAL
);
2962 /* See if a synthetic field variable has already been created */
2963 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
2964 event_name
, field_name
);
2965 if (!IS_ERR_OR_NULL(event_var
))
2968 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
2970 return ERR_PTR(-ENOMEM
);
2972 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2975 return ERR_PTR(-ENOMEM
);
2978 /* Use the same keys as the compatible histogram */
2979 strcat(cmd
, "keys=");
2981 for_each_hist_key_field(i
, hist_data
) {
2982 key_field
= hist_data
->fields
[i
];
2985 strcat(cmd
, key_field
->field
->name
);
2989 /* Create the synthetic field variable specification */
2990 strcat(cmd
, ":synthetic_");
2991 strcat(cmd
, field_name
);
2993 strcat(cmd
, field_name
);
2995 /* Use the same filter as the compatible histogram */
2996 saved_filter
= find_trigger_filter(hist_data
, file
);
2998 strcat(cmd
, " if ");
2999 strcat(cmd
, saved_filter
);
3002 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
3003 if (!var_hist
->cmd
) {
3006 return ERR_PTR(-ENOMEM
);
3009 /* Save the compatible histogram information */
3010 var_hist
->hist_data
= hist_data
;
3012 /* Create the new histogram with our variable */
3013 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
3017 kfree(var_hist
->cmd
);
3019 hist_err_event("onmatch: Couldn't create histogram for field: ",
3020 subsys_name
, event_name
, field_name
);
3021 return ERR_PTR(ret
);
3026 /* If we can't find the variable, something went wrong */
3027 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3028 event_name
, field_name
);
3029 if (IS_ERR_OR_NULL(event_var
)) {
3030 kfree(var_hist
->cmd
);
3032 hist_err_event("onmatch: Couldn't find synthetic variable: ",
3033 subsys_name
, event_name
, field_name
);
3034 return ERR_PTR(-EINVAL
);
3037 n
= target_hist_data
->n_field_var_hists
;
3038 target_hist_data
->field_var_hists
[n
] = var_hist
;
3039 target_hist_data
->n_field_var_hists
++;
3044 static struct hist_field
*
3045 find_target_event_var(struct hist_trigger_data
*hist_data
,
3046 char *subsys_name
, char *event_name
, char *var_name
)
3048 struct trace_event_file
*file
= hist_data
->event_file
;
3049 struct hist_field
*hist_field
= NULL
;
3052 struct trace_event_call
*call
;
3057 call
= file
->event_call
;
3059 if (strcmp(subsys_name
, call
->class->system
) != 0)
3062 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3066 hist_field
= find_var_field(hist_data
, var_name
);
3071 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
3072 struct ring_buffer_event
*rbe
,
3074 struct field_var
**field_vars
,
3075 unsigned int n_field_vars
,
3076 unsigned int field_var_str_start
)
3078 struct hist_elt_data
*elt_data
= elt
->private_data
;
3079 unsigned int i
, j
, var_idx
;
3082 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
3083 struct field_var
*field_var
= field_vars
[i
];
3084 struct hist_field
*var
= field_var
->var
;
3085 struct hist_field
*val
= field_var
->val
;
3087 var_val
= val
->fn(val
, elt
, rbe
, rec
);
3088 var_idx
= var
->var
.idx
;
3090 if (val
->flags
& HIST_FIELD_FL_STRING
) {
3091 char *str
= elt_data
->field_var_str
[j
++];
3092 char *val_str
= (char *)(uintptr_t)var_val
;
3094 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
3095 var_val
= (u64
)(uintptr_t)str
;
3097 tracing_map_set_var(elt
, var_idx
, var_val
);
3101 static void update_field_vars(struct hist_trigger_data
*hist_data
,
3102 struct tracing_map_elt
*elt
,
3103 struct ring_buffer_event
*rbe
,
3106 __update_field_vars(elt
, rbe
, rec
, hist_data
->field_vars
,
3107 hist_data
->n_field_vars
, 0);
3110 static void update_max_vars(struct hist_trigger_data
*hist_data
,
3111 struct tracing_map_elt
*elt
,
3112 struct ring_buffer_event
*rbe
,
3115 __update_field_vars(elt
, rbe
, rec
, hist_data
->max_vars
,
3116 hist_data
->n_max_vars
, hist_data
->n_field_var_str
);
3119 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
3120 struct trace_event_file
*file
,
3121 char *name
, int size
, const char *type
)
3123 struct hist_field
*var
;
3126 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
3127 var
= ERR_PTR(-EINVAL
);
3131 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3133 var
= ERR_PTR(-ENOMEM
);
3137 idx
= tracing_map_add_var(hist_data
->map
);
3140 var
= ERR_PTR(-EINVAL
);
3144 var
->flags
= HIST_FIELD_FL_VAR
;
3146 var
->var
.hist_data
= var
->hist_data
= hist_data
;
3148 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
3149 var
->type
= kstrdup(type
, GFP_KERNEL
);
3150 if (!var
->var
.name
|| !var
->type
) {
3151 kfree(var
->var
.name
);
3154 var
= ERR_PTR(-ENOMEM
);
3160 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
3161 struct trace_event_file
*file
,
3164 struct hist_field
*val
= NULL
, *var
= NULL
;
3165 unsigned long flags
= HIST_FIELD_FL_VAR
;
3166 struct field_var
*field_var
;
3169 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
3170 hist_err("Too many field variables defined: ", field_name
);
3175 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
3177 hist_err("Couldn't parse field variable: ", field_name
);
3182 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
3184 hist_err("Couldn't create or find variable: ", field_name
);
3190 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
3198 field_var
->var
= var
;
3199 field_var
->val
= val
;
3203 field_var
= ERR_PTR(ret
);
3208 * create_target_field_var - Automatically create a variable for a field
3209 * @target_hist_data: The target hist trigger
3210 * @subsys_name: Optional subsystem name
3211 * @event_name: Optional event name
3212 * @var_name: The name of the field (and the resulting variable)
3214 * Hist trigger actions fetch data from variables, not directly from
3215 * events. However, for convenience, users are allowed to directly
3216 * specify an event field in an action, which will be automatically
3217 * converted into a variable on their behalf.
3219 * This function creates a field variable with the name var_name on
3220 * the hist trigger currently being defined on the target event. If
3221 * subsys_name and event_name are specified, this function simply
3222 * verifies that they do in fact match the target event subsystem and
3225 * Return: The variable created for the field.
3227 static struct field_var
*
3228 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
3229 char *subsys_name
, char *event_name
, char *var_name
)
3231 struct trace_event_file
*file
= target_hist_data
->event_file
;
3234 struct trace_event_call
*call
;
3239 call
= file
->event_call
;
3241 if (strcmp(subsys_name
, call
->class->system
) != 0)
3244 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3248 return create_field_var(target_hist_data
, file
, var_name
);
3251 static void onmax_print(struct seq_file
*m
,
3252 struct hist_trigger_data
*hist_data
,
3253 struct tracing_map_elt
*elt
,
3254 struct action_data
*data
)
3256 unsigned int i
, save_var_idx
, max_idx
= data
->onmax
.max_var
->var
.idx
;
3258 seq_printf(m
, "\n\tmax: %10llu", tracing_map_read_var(elt
, max_idx
));
3260 for (i
= 0; i
< hist_data
->n_max_vars
; i
++) {
3261 struct hist_field
*save_val
= hist_data
->max_vars
[i
]->val
;
3262 struct hist_field
*save_var
= hist_data
->max_vars
[i
]->var
;
3265 save_var_idx
= save_var
->var
.idx
;
3267 val
= tracing_map_read_var(elt
, save_var_idx
);
3269 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
3270 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
3271 (char *)(uintptr_t)(val
));
3273 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
3277 static void onmax_save(struct hist_trigger_data
*hist_data
,
3278 struct tracing_map_elt
*elt
, void *rec
,
3279 struct ring_buffer_event
*rbe
,
3280 struct action_data
*data
, u64
*var_ref_vals
)
3282 unsigned int max_idx
= data
->onmax
.max_var
->var
.idx
;
3283 unsigned int max_var_ref_idx
= data
->onmax
.max_var_ref_idx
;
3285 u64 var_val
, max_val
;
3287 var_val
= var_ref_vals
[max_var_ref_idx
];
3288 max_val
= tracing_map_read_var(elt
, max_idx
);
3290 if (var_val
<= max_val
)
3293 tracing_map_set_var(elt
, max_idx
, var_val
);
3295 update_max_vars(hist_data
, elt
, rbe
, rec
);
3298 static void onmax_destroy(struct action_data
*data
)
3302 destroy_hist_field(data
->onmax
.max_var
, 0);
3303 destroy_hist_field(data
->onmax
.var
, 0);
3305 kfree(data
->onmax
.var_str
);
3306 kfree(data
->onmax
.fn_name
);
3308 for (i
= 0; i
< data
->n_params
; i
++)
3309 kfree(data
->params
[i
]);
3314 static int onmax_create(struct hist_trigger_data
*hist_data
,
3315 struct action_data
*data
)
3317 struct trace_event_file
*file
= hist_data
->event_file
;
3318 struct hist_field
*var_field
, *ref_field
, *max_var
;
3319 unsigned int var_ref_idx
= hist_data
->n_var_refs
;
3320 struct field_var
*field_var
;
3321 char *onmax_var_str
, *param
;
3322 unsigned long flags
;
3326 onmax_var_str
= data
->onmax
.var_str
;
3327 if (onmax_var_str
[0] != '$') {
3328 hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str
);
3333 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, onmax_var_str
);
3335 hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str
);
3339 flags
= HIST_FIELD_FL_VAR_REF
;
3340 ref_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
3344 if (init_var_ref(ref_field
, var_field
, NULL
, NULL
)) {
3345 destroy_hist_field(ref_field
, 0);
3349 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
3350 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
3351 data
->onmax
.var
= ref_field
;
3353 data
->fn
= onmax_save
;
3354 data
->onmax
.max_var_ref_idx
= var_ref_idx
;
3355 max_var
= create_var(hist_data
, file
, "max", sizeof(u64
), "u64");
3356 if (IS_ERR(max_var
)) {
3357 hist_err("onmax: Couldn't create onmax variable: ", "max");
3358 ret
= PTR_ERR(max_var
);
3361 data
->onmax
.max_var
= max_var
;
3363 for (i
= 0; i
< data
->n_params
; i
++) {
3364 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3370 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
3371 if (IS_ERR(field_var
)) {
3372 hist_err("onmax: Couldn't create field variable: ", param
);
3373 ret
= PTR_ERR(field_var
);
3378 hist_data
->max_vars
[hist_data
->n_max_vars
++] = field_var
;
3379 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3380 hist_data
->n_max_var_str
++;
3388 static int parse_action_params(char *params
, struct action_data
*data
)
3390 char *param
, *saved_param
;
3394 if (data
->n_params
>= SYNTH_FIELDS_MAX
)
3397 param
= strsep(¶ms
, ",");
3403 param
= strstrip(param
);
3404 if (strlen(param
) < 2) {
3405 hist_err("Invalid action param: ", param
);
3410 saved_param
= kstrdup(param
, GFP_KERNEL
);
3416 data
->params
[data
->n_params
++] = saved_param
;
3422 static struct action_data
*onmax_parse(char *str
)
3424 char *onmax_fn_name
, *onmax_var_str
;
3425 struct action_data
*data
;
3428 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3430 return ERR_PTR(-ENOMEM
);
3432 onmax_var_str
= strsep(&str
, ")");
3433 if (!onmax_var_str
|| !str
) {
3438 data
->onmax
.var_str
= kstrdup(onmax_var_str
, GFP_KERNEL
);
3439 if (!data
->onmax
.var_str
) {
3448 onmax_fn_name
= strsep(&str
, "(");
3449 if (!onmax_fn_name
|| !str
)
3452 if (strncmp(onmax_fn_name
, "save", strlen("save")) == 0) {
3453 char *params
= strsep(&str
, ")");
3460 ret
= parse_action_params(params
, data
);
3466 data
->onmax
.fn_name
= kstrdup(onmax_fn_name
, GFP_KERNEL
);
3467 if (!data
->onmax
.fn_name
) {
3474 onmax_destroy(data
);
3475 data
= ERR_PTR(ret
);
3479 static void onmatch_destroy(struct action_data
*data
)
3483 mutex_lock(&synth_event_mutex
);
3485 kfree(data
->onmatch
.match_event
);
3486 kfree(data
->onmatch
.match_event_system
);
3487 kfree(data
->onmatch
.synth_event_name
);
3489 for (i
= 0; i
< data
->n_params
; i
++)
3490 kfree(data
->params
[i
]);
3492 if (data
->onmatch
.synth_event
)
3493 data
->onmatch
.synth_event
->ref
--;
3497 mutex_unlock(&synth_event_mutex
);
3500 static void destroy_field_var(struct field_var
*field_var
)
3505 destroy_hist_field(field_var
->var
, 0);
3506 destroy_hist_field(field_var
->val
, 0);
3511 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
3515 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
3516 destroy_field_var(hist_data
->field_vars
[i
]);
3519 static void save_field_var(struct hist_trigger_data
*hist_data
,
3520 struct field_var
*field_var
)
3522 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
3524 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3525 hist_data
->n_field_var_str
++;
3529 static void destroy_synth_var_refs(struct hist_trigger_data
*hist_data
)
3533 for (i
= 0; i
< hist_data
->n_synth_var_refs
; i
++)
3534 destroy_hist_field(hist_data
->synth_var_refs
[i
], 0);
3537 static void save_synth_var_ref(struct hist_trigger_data
*hist_data
,
3538 struct hist_field
*var_ref
)
3540 hist_data
->synth_var_refs
[hist_data
->n_synth_var_refs
++] = var_ref
;
3542 hist_data
->var_refs
[hist_data
->n_var_refs
] = var_ref
;
3543 var_ref
->var_ref_idx
= hist_data
->n_var_refs
++;
3546 static int check_synth_field(struct synth_event
*event
,
3547 struct hist_field
*hist_field
,
3548 unsigned int field_pos
)
3550 struct synth_field
*field
;
3552 if (field_pos
>= event
->n_fields
)
3555 field
= event
->fields
[field_pos
];
3557 if (strcmp(field
->type
, hist_field
->type
) != 0)
3563 static struct hist_field
*
3564 onmatch_find_var(struct hist_trigger_data
*hist_data
, struct action_data
*data
,
3565 char *system
, char *event
, char *var
)
3567 struct hist_field
*hist_field
;
3569 var
++; /* skip '$' */
3571 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
3574 system
= data
->onmatch
.match_event_system
;
3575 event
= data
->onmatch
.match_event
;
3578 hist_field
= find_event_var(hist_data
, system
, event
, var
);
3582 hist_err_event("onmatch: Couldn't find onmatch param: $", system
, event
, var
);
3587 static struct hist_field
*
3588 onmatch_create_field_var(struct hist_trigger_data
*hist_data
,
3589 struct action_data
*data
, char *system
,
3590 char *event
, char *var
)
3592 struct hist_field
*hist_field
= NULL
;
3593 struct field_var
*field_var
;
3596 * First try to create a field var on the target event (the
3597 * currently being defined). This will create a variable for
3598 * unqualified fields on the target event, or if qualified,
3599 * target fields that have qualified names matching the target.
3601 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
3603 if (field_var
&& !IS_ERR(field_var
)) {
3604 save_field_var(hist_data
, field_var
);
3605 hist_field
= field_var
->var
;
3609 * If no explicit system.event is specfied, default to
3610 * looking for fields on the onmatch(system.event.xxx)
3614 system
= data
->onmatch
.match_event_system
;
3615 event
= data
->onmatch
.match_event
;
3619 * At this point, we're looking at a field on another
3620 * event. Because we can't modify a hist trigger on
3621 * another event to add a variable for a field, we need
3622 * to create a new trigger on that event and create the
3623 * variable at the same time.
3625 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
3626 if (IS_ERR(hist_field
))
3632 destroy_field_var(field_var
);
3637 static int onmatch_create(struct hist_trigger_data
*hist_data
,
3638 struct trace_event_file
*file
,
3639 struct action_data
*data
)
3641 char *event_name
, *param
, *system
= NULL
;
3642 struct hist_field
*hist_field
, *var_ref
;
3643 unsigned int i
, var_ref_idx
;
3644 unsigned int field_pos
= 0;
3645 struct synth_event
*event
;
3648 mutex_lock(&synth_event_mutex
);
3649 event
= find_synth_event(data
->onmatch
.synth_event_name
);
3651 hist_err("onmatch: Couldn't find synthetic event: ", data
->onmatch
.synth_event_name
);
3652 mutex_unlock(&synth_event_mutex
);
3656 mutex_unlock(&synth_event_mutex
);
3658 var_ref_idx
= hist_data
->n_var_refs
;
3660 for (i
= 0; i
< data
->n_params
; i
++) {
3663 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3669 system
= strsep(¶m
, ".");
3671 param
= (char *)system
;
3672 system
= event_name
= NULL
;
3674 event_name
= strsep(¶m
, ".");
3682 if (param
[0] == '$')
3683 hist_field
= onmatch_find_var(hist_data
, data
, system
,
3686 hist_field
= onmatch_create_field_var(hist_data
, data
,
3697 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
3698 var_ref
= create_var_ref(hist_field
, system
, event_name
);
3705 save_synth_var_ref(hist_data
, var_ref
);
3711 hist_err_event("onmatch: Param type doesn't match synthetic event field type: ",
3712 system
, event_name
, param
);
3718 if (field_pos
!= event
->n_fields
) {
3719 hist_err("onmatch: Param count doesn't match synthetic event field count: ", event
->name
);
3724 data
->fn
= action_trace
;
3725 data
->onmatch
.synth_event
= event
;
3726 data
->onmatch
.var_ref_idx
= var_ref_idx
;
3730 mutex_lock(&synth_event_mutex
);
3732 mutex_unlock(&synth_event_mutex
);
3737 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
3739 char *match_event
, *match_event_system
;
3740 char *synth_event_name
, *params
;
3741 struct action_data
*data
;
3744 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3746 return ERR_PTR(-ENOMEM
);
3748 match_event
= strsep(&str
, ")");
3749 if (!match_event
|| !str
) {
3750 hist_err("onmatch: Missing closing paren: ", match_event
);
3754 match_event_system
= strsep(&match_event
, ".");
3756 hist_err("onmatch: Missing subsystem for match event: ", match_event_system
);
3760 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
3761 hist_err_event("onmatch: Invalid subsystem or event name: ",
3762 match_event_system
, match_event
, NULL
);
3766 data
->onmatch
.match_event
= kstrdup(match_event
, GFP_KERNEL
);
3767 if (!data
->onmatch
.match_event
) {
3772 data
->onmatch
.match_event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
3773 if (!data
->onmatch
.match_event_system
) {
3780 hist_err("onmatch: Missing . after onmatch(): ", str
);
3784 synth_event_name
= strsep(&str
, "(");
3785 if (!synth_event_name
|| !str
) {
3786 hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name
);
3790 data
->onmatch
.synth_event_name
= kstrdup(synth_event_name
, GFP_KERNEL
);
3791 if (!data
->onmatch
.synth_event_name
) {
3796 params
= strsep(&str
, ")");
3797 if (!params
|| !str
|| (str
&& strlen(str
))) {
3798 hist_err("onmatch: Missing closing paramlist paren: ", params
);
3802 ret
= parse_action_params(params
, data
);
3808 onmatch_destroy(data
);
3809 data
= ERR_PTR(ret
);
3813 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
3815 hist_data
->fields
[HITCOUNT_IDX
] =
3816 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
3817 if (!hist_data
->fields
[HITCOUNT_IDX
])
3820 hist_data
->n_vals
++;
3821 hist_data
->n_fields
++;
3823 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
3829 static int __create_val_field(struct hist_trigger_data
*hist_data
,
3830 unsigned int val_idx
,
3831 struct trace_event_file
*file
,
3832 char *var_name
, char *field_str
,
3833 unsigned long flags
)
3835 struct hist_field
*hist_field
;
3838 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
3839 if (IS_ERR(hist_field
)) {
3840 ret
= PTR_ERR(hist_field
);
3844 hist_data
->fields
[val_idx
] = hist_field
;
3846 ++hist_data
->n_vals
;
3847 ++hist_data
->n_fields
;
3849 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3855 static int create_val_field(struct hist_trigger_data
*hist_data
,
3856 unsigned int val_idx
,
3857 struct trace_event_file
*file
,
3860 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
3863 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
3866 static int create_var_field(struct hist_trigger_data
*hist_data
,
3867 unsigned int val_idx
,
3868 struct trace_event_file
*file
,
3869 char *var_name
, char *expr_str
)
3871 unsigned long flags
= 0;
3873 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3876 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
3877 hist_err("Variable already defined: ", var_name
);
3881 flags
|= HIST_FIELD_FL_VAR
;
3882 hist_data
->n_vars
++;
3883 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
3886 return __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
3889 static int create_val_fields(struct hist_trigger_data
*hist_data
,
3890 struct trace_event_file
*file
)
3892 char *fields_str
, *field_str
;
3893 unsigned int i
, j
= 1;
3896 ret
= create_hitcount_val(hist_data
);
3900 fields_str
= hist_data
->attrs
->vals_str
;
3904 strsep(&fields_str
, "=");
3908 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
3909 j
< TRACING_MAP_VALS_MAX
; i
++) {
3910 field_str
= strsep(&fields_str
, ",");
3914 if (strcmp(field_str
, "hitcount") == 0)
3917 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
3922 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
3928 static int create_key_field(struct hist_trigger_data
*hist_data
,
3929 unsigned int key_idx
,
3930 unsigned int key_offset
,
3931 struct trace_event_file
*file
,
3934 struct hist_field
*hist_field
= NULL
;
3936 unsigned long flags
= 0;
3937 unsigned int key_size
;
3940 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
3943 flags
|= HIST_FIELD_FL_KEY
;
3945 if (strcmp(field_str
, "stacktrace") == 0) {
3946 flags
|= HIST_FIELD_FL_STACKTRACE
;
3947 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
3948 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
3950 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
3952 if (IS_ERR(hist_field
)) {
3953 ret
= PTR_ERR(hist_field
);
3957 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) {
3958 hist_err("Using variable references as keys not supported: ", field_str
);
3959 destroy_hist_field(hist_field
, 0);
3964 key_size
= hist_field
->size
;
3967 hist_data
->fields
[key_idx
] = hist_field
;
3969 key_size
= ALIGN(key_size
, sizeof(u64
));
3970 hist_data
->fields
[key_idx
]->size
= key_size
;
3971 hist_data
->fields
[key_idx
]->offset
= key_offset
;
3973 hist_data
->key_size
+= key_size
;
3975 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
3980 hist_data
->n_keys
++;
3981 hist_data
->n_fields
++;
3983 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
3991 static int create_key_fields(struct hist_trigger_data
*hist_data
,
3992 struct trace_event_file
*file
)
3994 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
3995 char *fields_str
, *field_str
;
3998 fields_str
= hist_data
->attrs
->keys_str
;
4002 strsep(&fields_str
, "=");
4006 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
4007 field_str
= strsep(&fields_str
, ",");
4010 ret
= create_key_field(hist_data
, i
, key_offset
,
4025 static int create_var_fields(struct hist_trigger_data
*hist_data
,
4026 struct trace_event_file
*file
)
4028 unsigned int i
, j
= hist_data
->n_vals
;
4031 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
4033 for (i
= 0; i
< n_vars
; i
++) {
4034 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
4035 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
4037 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
4045 static void free_var_defs(struct hist_trigger_data
*hist_data
)
4049 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
4050 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
4051 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
4054 hist_data
->attrs
->var_defs
.n_vars
= 0;
4057 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
4059 char *s
, *str
, *var_name
, *field_str
;
4060 unsigned int i
, j
, n_vars
= 0;
4063 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
4064 str
= hist_data
->attrs
->assignment_str
[i
];
4065 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
4066 field_str
= strsep(&str
, ",");
4070 var_name
= strsep(&field_str
, "=");
4071 if (!var_name
|| !field_str
) {
4072 hist_err("Malformed assignment: ", var_name
);
4077 if (n_vars
== TRACING_MAP_VARS_MAX
) {
4078 hist_err("Too many variables defined: ", var_name
);
4083 s
= kstrdup(var_name
, GFP_KERNEL
);
4088 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
4090 s
= kstrdup(field_str
, GFP_KERNEL
);
4092 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
4096 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
4098 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
4104 free_var_defs(hist_data
);
4109 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
4110 struct trace_event_file
*file
)
4114 ret
= parse_var_defs(hist_data
);
4118 ret
= create_val_fields(hist_data
, file
);
4122 ret
= create_var_fields(hist_data
, file
);
4126 ret
= create_key_fields(hist_data
, file
);
4130 free_var_defs(hist_data
);
4135 static int is_descending(const char *str
)
4140 if (strcmp(str
, "descending") == 0)
4143 if (strcmp(str
, "ascending") == 0)
4149 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
4151 char *fields_str
= hist_data
->attrs
->sort_key_str
;
4152 struct tracing_map_sort_key
*sort_key
;
4153 int descending
, ret
= 0;
4154 unsigned int i
, j
, k
;
4156 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
4161 strsep(&fields_str
, "=");
4167 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
4168 struct hist_field
*hist_field
;
4169 char *field_str
, *field_name
;
4170 const char *test_name
;
4172 sort_key
= &hist_data
->sort_keys
[i
];
4174 field_str
= strsep(&fields_str
, ",");
4181 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
4186 field_name
= strsep(&field_str
, ".");
4192 if (strcmp(field_name
, "hitcount") == 0) {
4193 descending
= is_descending(field_str
);
4194 if (descending
< 0) {
4198 sort_key
->descending
= descending
;
4202 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4205 hist_field
= hist_data
->fields
[j
];
4206 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4211 test_name
= hist_field_name(hist_field
, 0);
4213 if (strcmp(field_name
, test_name
) == 0) {
4214 sort_key
->field_idx
= idx
;
4215 descending
= is_descending(field_str
);
4216 if (descending
< 0) {
4220 sort_key
->descending
= descending
;
4224 if (j
== hist_data
->n_fields
) {
4230 hist_data
->n_sort_keys
= i
;
4235 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4239 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4240 struct action_data
*data
= hist_data
->actions
[i
];
4242 if (data
->fn
== action_trace
)
4243 onmatch_destroy(data
);
4244 else if (data
->fn
== onmax_save
)
4245 onmax_destroy(data
);
4251 static int parse_actions(struct hist_trigger_data
*hist_data
)
4253 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4254 struct action_data
*data
;
4259 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4260 str
= hist_data
->attrs
->action_str
[i
];
4262 if (strncmp(str
, "onmatch(", strlen("onmatch(")) == 0) {
4263 char *action_str
= str
+ strlen("onmatch(");
4265 data
= onmatch_parse(tr
, action_str
);
4267 ret
= PTR_ERR(data
);
4270 data
->fn
= action_trace
;
4271 } else if (strncmp(str
, "onmax(", strlen("onmax(")) == 0) {
4272 char *action_str
= str
+ strlen("onmax(");
4274 data
= onmax_parse(action_str
);
4276 ret
= PTR_ERR(data
);
4279 data
->fn
= onmax_save
;
4285 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4291 static int create_actions(struct hist_trigger_data
*hist_data
,
4292 struct trace_event_file
*file
)
4294 struct action_data
*data
;
4298 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4299 data
= hist_data
->actions
[i
];
4301 if (data
->fn
== action_trace
) {
4302 ret
= onmatch_create(hist_data
, file
, data
);
4305 } else if (data
->fn
== onmax_save
) {
4306 ret
= onmax_create(hist_data
, data
);
4315 static void print_actions(struct seq_file
*m
,
4316 struct hist_trigger_data
*hist_data
,
4317 struct tracing_map_elt
*elt
)
4321 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4322 struct action_data
*data
= hist_data
->actions
[i
];
4324 if (data
->fn
== onmax_save
)
4325 onmax_print(m
, hist_data
, elt
, data
);
4329 static void print_onmax_spec(struct seq_file
*m
,
4330 struct hist_trigger_data
*hist_data
,
4331 struct action_data
*data
)
4335 seq_puts(m
, ":onmax(");
4336 seq_printf(m
, "%s", data
->onmax
.var_str
);
4337 seq_printf(m
, ").%s(", data
->onmax
.fn_name
);
4339 for (i
= 0; i
< hist_data
->n_max_vars
; i
++) {
4340 seq_printf(m
, "%s", hist_data
->max_vars
[i
]->var
->var
.name
);
4341 if (i
< hist_data
->n_max_vars
- 1)
4347 static void print_onmatch_spec(struct seq_file
*m
,
4348 struct hist_trigger_data
*hist_data
,
4349 struct action_data
*data
)
4353 seq_printf(m
, ":onmatch(%s.%s).", data
->onmatch
.match_event_system
,
4354 data
->onmatch
.match_event
);
4356 seq_printf(m
, "%s(", data
->onmatch
.synth_event
->name
);
4358 for (i
= 0; i
< data
->n_params
; i
++) {
4361 seq_printf(m
, "%s", data
->params
[i
]);
4367 static bool actions_match(struct hist_trigger_data
*hist_data
,
4368 struct hist_trigger_data
*hist_data_test
)
4372 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4375 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4376 struct action_data
*data
= hist_data
->actions
[i
];
4377 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4379 if (data
->fn
!= data_test
->fn
)
4382 if (data
->n_params
!= data_test
->n_params
)
4385 for (j
= 0; j
< data
->n_params
; j
++) {
4386 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4390 if (data
->fn
== action_trace
) {
4391 if (strcmp(data
->onmatch
.synth_event_name
,
4392 data_test
->onmatch
.synth_event_name
) != 0)
4394 if (strcmp(data
->onmatch
.match_event_system
,
4395 data_test
->onmatch
.match_event_system
) != 0)
4397 if (strcmp(data
->onmatch
.match_event
,
4398 data_test
->onmatch
.match_event
) != 0)
4400 } else if (data
->fn
== onmax_save
) {
4401 if (strcmp(data
->onmax
.var_str
,
4402 data_test
->onmax
.var_str
) != 0)
4404 if (strcmp(data
->onmax
.fn_name
,
4405 data_test
->onmax
.fn_name
) != 0)
4414 static void print_actions_spec(struct seq_file
*m
,
4415 struct hist_trigger_data
*hist_data
)
4419 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4420 struct action_data
*data
= hist_data
->actions
[i
];
4422 if (data
->fn
== action_trace
)
4423 print_onmatch_spec(m
, hist_data
, data
);
4424 else if (data
->fn
== onmax_save
)
4425 print_onmax_spec(m
, hist_data
, data
);
4429 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
4433 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
4434 kfree(hist_data
->field_var_hists
[i
]->cmd
);
4435 kfree(hist_data
->field_var_hists
[i
]);
4439 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
4444 destroy_hist_trigger_attrs(hist_data
->attrs
);
4445 destroy_hist_fields(hist_data
);
4446 tracing_map_destroy(hist_data
->map
);
4448 destroy_actions(hist_data
);
4449 destroy_field_vars(hist_data
);
4450 destroy_field_var_hists(hist_data
);
4451 destroy_synth_var_refs(hist_data
);
4456 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
4458 struct tracing_map
*map
= hist_data
->map
;
4459 struct ftrace_event_field
*field
;
4460 struct hist_field
*hist_field
;
4463 for_each_hist_field(i
, hist_data
) {
4464 hist_field
= hist_data
->fields
[i
];
4465 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
4466 tracing_map_cmp_fn_t cmp_fn
;
4468 field
= hist_field
->field
;
4470 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
4471 cmp_fn
= tracing_map_cmp_none
;
4473 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
4474 hist_field
->is_signed
);
4475 else if (is_string_field(field
))
4476 cmp_fn
= tracing_map_cmp_string
;
4478 cmp_fn
= tracing_map_cmp_num(field
->size
,
4480 idx
= tracing_map_add_key_field(map
,
4483 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
4484 idx
= tracing_map_add_sum_field(map
);
4489 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4490 idx
= tracing_map_add_var(map
);
4493 hist_field
->var
.idx
= idx
;
4494 hist_field
->var
.hist_data
= hist_data
;
4501 static struct hist_trigger_data
*
4502 create_hist_data(unsigned int map_bits
,
4503 struct hist_trigger_attrs
*attrs
,
4504 struct trace_event_file
*file
,
4507 const struct tracing_map_ops
*map_ops
= NULL
;
4508 struct hist_trigger_data
*hist_data
;
4511 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
4513 return ERR_PTR(-ENOMEM
);
4515 hist_data
->attrs
= attrs
;
4516 hist_data
->remove
= remove
;
4517 hist_data
->event_file
= file
;
4519 ret
= parse_actions(hist_data
);
4523 ret
= create_hist_fields(hist_data
, file
);
4527 ret
= create_sort_keys(hist_data
);
4531 map_ops
= &hist_trigger_elt_data_ops
;
4533 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
4534 map_ops
, hist_data
);
4535 if (IS_ERR(hist_data
->map
)) {
4536 ret
= PTR_ERR(hist_data
->map
);
4537 hist_data
->map
= NULL
;
4541 ret
= create_tracing_map_fields(hist_data
);
4547 hist_data
->attrs
= NULL
;
4549 destroy_hist_data(hist_data
);
4551 hist_data
= ERR_PTR(ret
);
4556 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
4557 struct tracing_map_elt
*elt
, void *rec
,
4558 struct ring_buffer_event
*rbe
,
4561 struct hist_elt_data
*elt_data
;
4562 struct hist_field
*hist_field
;
4563 unsigned int i
, var_idx
;
4566 elt_data
= elt
->private_data
;
4567 elt_data
->var_ref_vals
= var_ref_vals
;
4569 for_each_hist_val_field(i
, hist_data
) {
4570 hist_field
= hist_data
->fields
[i
];
4571 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
4572 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4573 var_idx
= hist_field
->var
.idx
;
4574 tracing_map_set_var(elt
, var_idx
, hist_val
);
4577 tracing_map_update_sum(elt
, i
, hist_val
);
4580 for_each_hist_key_field(i
, hist_data
) {
4581 hist_field
= hist_data
->fields
[i
];
4582 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4583 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
4584 var_idx
= hist_field
->var
.idx
;
4585 tracing_map_set_var(elt
, var_idx
, hist_val
);
4589 update_field_vars(hist_data
, elt
, rbe
, rec
);
4592 static inline void add_to_key(char *compound_key
, void *key
,
4593 struct hist_field
*key_field
, void *rec
)
4595 size_t size
= key_field
->size
;
4597 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4598 struct ftrace_event_field
*field
;
4600 field
= key_field
->field
;
4601 if (field
->filter_type
== FILTER_DYN_STRING
)
4602 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
4603 else if (field
->filter_type
== FILTER_PTR_STRING
)
4605 else if (field
->filter_type
== FILTER_STATIC_STRING
)
4608 /* ensure NULL-termination */
4609 if (size
> key_field
->size
- 1)
4610 size
= key_field
->size
- 1;
4613 memcpy(compound_key
+ key_field
->offset
, key
, size
);
4617 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
4618 struct tracing_map_elt
*elt
, void *rec
,
4619 struct ring_buffer_event
*rbe
, u64
*var_ref_vals
)
4621 struct action_data
*data
;
4624 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4625 data
= hist_data
->actions
[i
];
4626 data
->fn(hist_data
, elt
, rec
, rbe
, data
, var_ref_vals
);
4630 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
,
4631 struct ring_buffer_event
*rbe
)
4633 struct hist_trigger_data
*hist_data
= data
->private_data
;
4634 bool use_compound_key
= (hist_data
->n_keys
> 1);
4635 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
4636 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
4637 char compound_key
[HIST_KEY_SIZE_MAX
];
4638 struct tracing_map_elt
*elt
= NULL
;
4639 struct stack_trace stacktrace
;
4640 struct hist_field
*key_field
;
4645 memset(compound_key
, 0, hist_data
->key_size
);
4647 for_each_hist_key_field(i
, hist_data
) {
4648 key_field
= hist_data
->fields
[i
];
4650 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4651 stacktrace
.max_entries
= HIST_STACKTRACE_DEPTH
;
4652 stacktrace
.entries
= entries
;
4653 stacktrace
.nr_entries
= 0;
4654 stacktrace
.skip
= HIST_STACKTRACE_SKIP
;
4656 memset(stacktrace
.entries
, 0, HIST_STACKTRACE_SIZE
);
4657 save_stack_trace(&stacktrace
);
4661 field_contents
= key_field
->fn(key_field
, elt
, rbe
, rec
);
4662 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4663 key
= (void *)(unsigned long)field_contents
;
4664 use_compound_key
= true;
4666 key
= (void *)&field_contents
;
4669 if (use_compound_key
)
4670 add_to_key(compound_key
, key
, key_field
, rec
);
4673 if (use_compound_key
)
4676 if (hist_data
->n_var_refs
&&
4677 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
4680 elt
= tracing_map_insert(hist_data
->map
, key
);
4684 hist_trigger_elt_update(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
4686 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
4687 hist_trigger_actions(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
4690 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
4691 unsigned long *stacktrace_entries
,
4692 unsigned int max_entries
)
4694 char str
[KSYM_SYMBOL_LEN
];
4695 unsigned int spaces
= 8;
4698 for (i
= 0; i
< max_entries
; i
++) {
4699 if (stacktrace_entries
[i
] == ULONG_MAX
)
4702 seq_printf(m
, "%*c", 1 + spaces
, ' ');
4703 sprint_symbol(str
, stacktrace_entries
[i
]);
4704 seq_printf(m
, "%s\n", str
);
4709 hist_trigger_entry_print(struct seq_file
*m
,
4710 struct hist_trigger_data
*hist_data
, void *key
,
4711 struct tracing_map_elt
*elt
)
4713 struct hist_field
*key_field
;
4714 char str
[KSYM_SYMBOL_LEN
];
4715 bool multiline
= false;
4716 const char *field_name
;
4722 for_each_hist_key_field(i
, hist_data
) {
4723 key_field
= hist_data
->fields
[i
];
4725 if (i
> hist_data
->n_vals
)
4728 field_name
= hist_field_name(key_field
, 0);
4730 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
4731 uval
= *(u64
*)(key
+ key_field
->offset
);
4732 seq_printf(m
, "%s: %llx", field_name
, uval
);
4733 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
4734 uval
= *(u64
*)(key
+ key_field
->offset
);
4735 sprint_symbol_no_offset(str
, uval
);
4736 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
4738 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
4739 uval
= *(u64
*)(key
+ key_field
->offset
);
4740 sprint_symbol(str
, uval
);
4741 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
4743 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
4744 struct hist_elt_data
*elt_data
= elt
->private_data
;
4747 if (WARN_ON_ONCE(!elt_data
))
4750 comm
= elt_data
->comm
;
4752 uval
= *(u64
*)(key
+ key_field
->offset
);
4753 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
4755 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
4756 const char *syscall_name
;
4758 uval
= *(u64
*)(key
+ key_field
->offset
);
4759 syscall_name
= get_syscall_name(uval
);
4761 syscall_name
= "unknown_syscall";
4763 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
4764 syscall_name
, uval
);
4765 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4766 seq_puts(m
, "stacktrace:\n");
4767 hist_trigger_stacktrace_print(m
,
4768 key
+ key_field
->offset
,
4769 HIST_STACKTRACE_DEPTH
);
4771 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
4772 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
4773 *(u64
*)(key
+ key_field
->offset
));
4774 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4775 seq_printf(m
, "%s: %-50s", field_name
,
4776 (char *)(key
+ key_field
->offset
));
4778 uval
= *(u64
*)(key
+ key_field
->offset
);
4779 seq_printf(m
, "%s: %10llu", field_name
, uval
);
4788 seq_printf(m
, " hitcount: %10llu",
4789 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
4791 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
4792 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
4794 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
4795 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
4798 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
4799 seq_printf(m
, " %s: %10llx", field_name
,
4800 tracing_map_read_sum(elt
, i
));
4802 seq_printf(m
, " %s: %10llu", field_name
,
4803 tracing_map_read_sum(elt
, i
));
4807 print_actions(m
, hist_data
, elt
);
4812 static int print_entries(struct seq_file
*m
,
4813 struct hist_trigger_data
*hist_data
)
4815 struct tracing_map_sort_entry
**sort_entries
= NULL
;
4816 struct tracing_map
*map
= hist_data
->map
;
4819 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
4820 hist_data
->n_sort_keys
,
4825 for (i
= 0; i
< n_entries
; i
++)
4826 hist_trigger_entry_print(m
, hist_data
,
4827 sort_entries
[i
]->key
,
4828 sort_entries
[i
]->elt
);
4830 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
4835 static void hist_trigger_show(struct seq_file
*m
,
4836 struct event_trigger_data
*data
, int n
)
4838 struct hist_trigger_data
*hist_data
;
4842 seq_puts(m
, "\n\n");
4844 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
4845 data
->ops
->print(m
, data
->ops
, data
);
4846 seq_puts(m
, "#\n\n");
4848 hist_data
= data
->private_data
;
4849 n_entries
= print_entries(m
, hist_data
);
4853 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
4854 (u64
)atomic64_read(&hist_data
->map
->hits
),
4855 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
4858 static int hist_show(struct seq_file
*m
, void *v
)
4860 struct event_trigger_data
*data
;
4861 struct trace_event_file
*event_file
;
4864 mutex_lock(&event_mutex
);
4866 event_file
= event_file_data(m
->private);
4867 if (unlikely(!event_file
)) {
4872 list_for_each_entry_rcu(data
, &event_file
->triggers
, list
) {
4873 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
4874 hist_trigger_show(m
, data
, n
++);
4877 if (have_hist_err()) {
4878 seq_printf(m
, "\nERROR: %s\n", hist_err_str
);
4879 seq_printf(m
, " Last command: %s\n", last_hist_cmd
);
4883 mutex_unlock(&event_mutex
);
4888 static int event_hist_open(struct inode
*inode
, struct file
*file
)
4890 return single_open(file
, hist_show
, file
);
4893 const struct file_operations event_hist_fops
= {
4894 .open
= event_hist_open
,
4896 .llseek
= seq_lseek
,
4897 .release
= single_release
,
4900 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
4902 const char *field_name
= hist_field_name(hist_field
, 0);
4904 if (hist_field
->var
.name
)
4905 seq_printf(m
, "%s=", hist_field
->var
.name
);
4907 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
4909 else if (field_name
) {
4910 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
4911 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
4913 seq_printf(m
, "%s", field_name
);
4914 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
4915 seq_puts(m
, "common_timestamp");
4918 static int event_hist_trigger_print(struct seq_file
*m
,
4919 struct event_trigger_ops
*ops
,
4920 struct event_trigger_data
*data
)
4922 struct hist_trigger_data
*hist_data
= data
->private_data
;
4923 struct hist_field
*field
;
4924 bool have_var
= false;
4927 seq_puts(m
, "hist:");
4930 seq_printf(m
, "%s:", data
->name
);
4932 seq_puts(m
, "keys=");
4934 for_each_hist_key_field(i
, hist_data
) {
4935 field
= hist_data
->fields
[i
];
4937 if (i
> hist_data
->n_vals
)
4940 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
4941 seq_puts(m
, "stacktrace");
4943 hist_field_print(m
, field
);
4946 seq_puts(m
, ":vals=");
4948 for_each_hist_val_field(i
, hist_data
) {
4949 field
= hist_data
->fields
[i
];
4950 if (field
->flags
& HIST_FIELD_FL_VAR
) {
4955 if (i
== HITCOUNT_IDX
)
4956 seq_puts(m
, "hitcount");
4959 hist_field_print(m
, field
);
4968 for_each_hist_val_field(i
, hist_data
) {
4969 field
= hist_data
->fields
[i
];
4971 if (field
->flags
& HIST_FIELD_FL_VAR
) {
4974 hist_field_print(m
, field
);
4979 seq_puts(m
, ":sort=");
4981 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
4982 struct tracing_map_sort_key
*sort_key
;
4983 unsigned int idx
, first_key_idx
;
4986 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
4988 sort_key
= &hist_data
->sort_keys
[i
];
4989 idx
= sort_key
->field_idx
;
4991 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
4997 if (idx
== HITCOUNT_IDX
)
4998 seq_puts(m
, "hitcount");
5000 if (idx
>= first_key_idx
)
5001 idx
+= hist_data
->n_vars
;
5002 hist_field_print(m
, hist_data
->fields
[idx
]);
5005 if (sort_key
->descending
)
5006 seq_puts(m
, ".descending");
5008 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
5009 if (hist_data
->enable_timestamps
)
5010 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
5012 print_actions_spec(m
, hist_data
);
5014 if (data
->filter_str
)
5015 seq_printf(m
, " if %s", data
->filter_str
);
5018 seq_puts(m
, " [paused]");
5020 seq_puts(m
, " [active]");
5027 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
5028 struct event_trigger_data
*data
)
5030 struct hist_trigger_data
*hist_data
= data
->private_data
;
5032 if (!data
->ref
&& hist_data
->attrs
->name
)
5033 save_named_trigger(hist_data
->attrs
->name
, data
);
5040 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
5042 struct trace_event_file
*file
;
5047 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5048 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
5049 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
5050 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
5051 "!hist", "hist", cmd
);
5055 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
5056 struct event_trigger_data
*data
)
5058 struct hist_trigger_data
*hist_data
= data
->private_data
;
5060 if (WARN_ON_ONCE(data
->ref
<= 0))
5066 del_named_trigger(data
);
5068 trigger_data_free(data
);
5070 remove_hist_vars(hist_data
);
5072 unregister_field_var_hists(hist_data
);
5074 destroy_hist_data(hist_data
);
5078 static struct event_trigger_ops event_hist_trigger_ops
= {
5079 .func
= event_hist_trigger
,
5080 .print
= event_hist_trigger_print
,
5081 .init
= event_hist_trigger_init
,
5082 .free
= event_hist_trigger_free
,
5085 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
5086 struct event_trigger_data
*data
)
5090 save_named_trigger(data
->named_data
->name
, data
);
5092 event_hist_trigger_init(ops
, data
->named_data
);
5097 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
5098 struct event_trigger_data
*data
)
5100 if (WARN_ON_ONCE(data
->ref
<= 0))
5103 event_hist_trigger_free(ops
, data
->named_data
);
5107 del_named_trigger(data
);
5108 trigger_data_free(data
);
5112 static struct event_trigger_ops event_hist_trigger_named_ops
= {
5113 .func
= event_hist_trigger
,
5114 .print
= event_hist_trigger_print
,
5115 .init
= event_hist_trigger_named_init
,
5116 .free
= event_hist_trigger_named_free
,
5119 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
5122 return &event_hist_trigger_ops
;
5125 static void hist_clear(struct event_trigger_data
*data
)
5127 struct hist_trigger_data
*hist_data
= data
->private_data
;
5130 pause_named_trigger(data
);
5132 synchronize_sched();
5134 tracing_map_clear(hist_data
->map
);
5137 unpause_named_trigger(data
);
5140 static bool compatible_field(struct ftrace_event_field
*field
,
5141 struct ftrace_event_field
*test_field
)
5143 if (field
== test_field
)
5145 if (field
== NULL
|| test_field
== NULL
)
5147 if (strcmp(field
->name
, test_field
->name
) != 0)
5149 if (strcmp(field
->type
, test_field
->type
) != 0)
5151 if (field
->size
!= test_field
->size
)
5153 if (field
->is_signed
!= test_field
->is_signed
)
5159 static bool hist_trigger_match(struct event_trigger_data
*data
,
5160 struct event_trigger_data
*data_test
,
5161 struct event_trigger_data
*named_data
,
5164 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
5165 struct hist_trigger_data
*hist_data
, *hist_data_test
;
5166 struct hist_field
*key_field
, *key_field_test
;
5169 if (named_data
&& (named_data
!= data_test
) &&
5170 (named_data
!= data_test
->named_data
))
5173 if (!named_data
&& is_named_trigger(data_test
))
5176 hist_data
= data
->private_data
;
5177 hist_data_test
= data_test
->private_data
;
5179 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
5180 hist_data
->n_fields
!= hist_data_test
->n_fields
||
5181 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
5184 if (!ignore_filter
) {
5185 if ((data
->filter_str
&& !data_test
->filter_str
) ||
5186 (!data
->filter_str
&& data_test
->filter_str
))
5190 for_each_hist_field(i
, hist_data
) {
5191 key_field
= hist_data
->fields
[i
];
5192 key_field_test
= hist_data_test
->fields
[i
];
5194 if (key_field
->flags
!= key_field_test
->flags
)
5196 if (!compatible_field(key_field
->field
, key_field_test
->field
))
5198 if (key_field
->offset
!= key_field_test
->offset
)
5200 if (key_field
->size
!= key_field_test
->size
)
5202 if (key_field
->is_signed
!= key_field_test
->is_signed
)
5204 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
5206 if (key_field
->var
.name
&&
5207 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
5211 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5212 sort_key
= &hist_data
->sort_keys
[i
];
5213 sort_key_test
= &hist_data_test
->sort_keys
[i
];
5215 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
5216 sort_key
->descending
!= sort_key_test
->descending
)
5220 if (!ignore_filter
&& data
->filter_str
&&
5221 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
5224 if (!actions_match(hist_data
, hist_data_test
))
5230 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
5231 struct event_trigger_data
*data
,
5232 struct trace_event_file
*file
)
5234 struct hist_trigger_data
*hist_data
= data
->private_data
;
5235 struct event_trigger_data
*test
, *named_data
= NULL
;
5238 if (hist_data
->attrs
->name
) {
5239 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5241 if (!hist_trigger_match(data
, named_data
, named_data
,
5243 hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data
->attrs
->name
);
5250 if (hist_data
->attrs
->name
&& !named_data
)
5253 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5254 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5255 if (!hist_trigger_match(data
, test
, named_data
, false))
5257 if (hist_data
->attrs
->pause
)
5258 test
->paused
= true;
5259 else if (hist_data
->attrs
->cont
)
5260 test
->paused
= false;
5261 else if (hist_data
->attrs
->clear
)
5264 hist_err("Hist trigger already exists", NULL
);
5271 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
5272 hist_err("Can't clear or continue a nonexistent hist trigger", NULL
);
5277 if (hist_data
->attrs
->pause
)
5278 data
->paused
= true;
5281 data
->private_data
= named_data
->private_data
;
5282 set_named_trigger_data(data
, named_data
);
5283 data
->ops
= &event_hist_trigger_named_ops
;
5286 if (data
->ops
->init
) {
5287 ret
= data
->ops
->init(data
->ops
, data
);
5292 if (hist_data
->enable_timestamps
) {
5293 char *clock
= hist_data
->attrs
->clock
;
5295 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
5297 hist_err("Couldn't set trace_clock: ", clock
);
5301 tracing_set_time_stamp_abs(file
->tr
, true);
5305 destroy_hist_data(hist_data
);
5312 static int hist_trigger_enable(struct event_trigger_data
*data
,
5313 struct trace_event_file
*file
)
5317 list_add_tail_rcu(&data
->list
, &file
->triggers
);
5319 update_cond_flag(file
);
5321 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
5322 list_del_rcu(&data
->list
);
5323 update_cond_flag(file
);
5330 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
5331 struct trace_event_file
*file
)
5333 struct hist_trigger_data
*hist_data
= data
->private_data
;
5334 struct event_trigger_data
*test
, *named_data
= NULL
;
5337 if (hist_data
->attrs
->name
)
5338 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5340 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5341 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5342 if (hist_trigger_match(data
, test
, named_data
, false)) {
5352 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
5353 struct trace_event_file
*file
)
5355 struct hist_trigger_data
*hist_data
= data
->private_data
;
5356 struct event_trigger_data
*test
, *named_data
= NULL
;
5358 if (hist_data
->attrs
->name
)
5359 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5361 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5362 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5363 if (!hist_trigger_match(data
, test
, named_data
, false))
5365 hist_data
= test
->private_data
;
5366 if (check_var_refs(hist_data
))
5375 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
5376 struct event_trigger_data
*data
,
5377 struct trace_event_file
*file
)
5379 struct hist_trigger_data
*hist_data
= data
->private_data
;
5380 struct event_trigger_data
*test
, *named_data
= NULL
;
5381 bool unregistered
= false;
5383 if (hist_data
->attrs
->name
)
5384 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5386 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5387 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5388 if (!hist_trigger_match(data
, test
, named_data
, false))
5390 unregistered
= true;
5391 list_del_rcu(&test
->list
);
5392 trace_event_trigger_enable_disable(file
, 0);
5393 update_cond_flag(file
);
5398 if (unregistered
&& test
->ops
->free
)
5399 test
->ops
->free(test
->ops
, test
);
5401 if (hist_data
->enable_timestamps
) {
5402 if (!hist_data
->remove
|| unregistered
)
5403 tracing_set_time_stamp_abs(file
->tr
, false);
5407 static bool hist_file_check_refs(struct trace_event_file
*file
)
5409 struct hist_trigger_data
*hist_data
;
5410 struct event_trigger_data
*test
;
5412 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5413 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5414 hist_data
= test
->private_data
;
5415 if (check_var_refs(hist_data
))
5423 static void hist_unreg_all(struct trace_event_file
*file
)
5425 struct event_trigger_data
*test
, *n
;
5426 struct hist_trigger_data
*hist_data
;
5427 struct synth_event
*se
;
5428 const char *se_name
;
5430 if (hist_file_check_refs(file
))
5433 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
5434 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5435 hist_data
= test
->private_data
;
5436 list_del_rcu(&test
->list
);
5437 trace_event_trigger_enable_disable(file
, 0);
5439 mutex_lock(&synth_event_mutex
);
5440 se_name
= trace_event_name(file
->event_call
);
5441 se
= find_synth_event(se_name
);
5444 mutex_unlock(&synth_event_mutex
);
5446 update_cond_flag(file
);
5447 if (hist_data
->enable_timestamps
)
5448 tracing_set_time_stamp_abs(file
->tr
, false);
5449 if (test
->ops
->free
)
5450 test
->ops
->free(test
->ops
, test
);
5455 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
5456 struct trace_event_file
*file
,
5457 char *glob
, char *cmd
, char *param
)
5459 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
5460 struct event_trigger_data
*trigger_data
;
5461 struct hist_trigger_attrs
*attrs
;
5462 struct event_trigger_ops
*trigger_ops
;
5463 struct hist_trigger_data
*hist_data
;
5464 struct synth_event
*se
;
5465 const char *se_name
;
5466 bool remove
= false;
5470 if (glob
&& strlen(glob
)) {
5471 last_cmd_set(param
);
5482 * separate the trigger from the filter (k:v [if filter])
5483 * allowing for whitespace in the trigger
5485 p
= trigger
= param
;
5487 p
= strstr(p
, "if");
5492 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
5496 if (p
>= param
+ strlen(param
) - strlen("if") - 1)
5498 if (*(p
+ strlen("if")) != ' ' && *(p
+ strlen("if")) != '\t') {
5509 param
= strstrip(p
);
5510 trigger
= strstrip(trigger
);
5513 attrs
= parse_hist_trigger_attrs(trigger
);
5515 return PTR_ERR(attrs
);
5517 if (attrs
->map_bits
)
5518 hist_trigger_bits
= attrs
->map_bits
;
5520 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
5521 if (IS_ERR(hist_data
)) {
5522 destroy_hist_trigger_attrs(attrs
);
5523 return PTR_ERR(hist_data
);
5526 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
5528 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
5529 if (!trigger_data
) {
5534 trigger_data
->count
= -1;
5535 trigger_data
->ops
= trigger_ops
;
5536 trigger_data
->cmd_ops
= cmd_ops
;
5538 INIT_LIST_HEAD(&trigger_data
->list
);
5539 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
5541 trigger_data
->private_data
= hist_data
;
5543 /* if param is non-empty, it's supposed to be a filter */
5544 if (param
&& cmd_ops
->set_filter
) {
5545 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
5551 if (!have_hist_trigger_match(trigger_data
, file
))
5554 if (hist_trigger_check_refs(trigger_data
, file
)) {
5559 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5561 mutex_lock(&synth_event_mutex
);
5562 se_name
= trace_event_name(file
->event_call
);
5563 se
= find_synth_event(se_name
);
5566 mutex_unlock(&synth_event_mutex
);
5572 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
5574 * The above returns on success the # of triggers registered,
5575 * but if it didn't register any it returns zero. Consider no
5576 * triggers registered a failure too.
5579 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
5585 if (get_named_trigger_data(trigger_data
))
5588 if (has_hist_vars(hist_data
))
5589 save_hist_vars(hist_data
);
5591 ret
= create_actions(hist_data
, file
);
5595 ret
= tracing_map_init(hist_data
->map
);
5599 ret
= hist_trigger_enable(trigger_data
, file
);
5603 mutex_lock(&synth_event_mutex
);
5604 se_name
= trace_event_name(file
->event_call
);
5605 se
= find_synth_event(se_name
);
5608 mutex_unlock(&synth_event_mutex
);
5610 /* Just return zero, not the number of registered triggers */
5618 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5620 if (cmd_ops
->set_filter
)
5621 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
5623 remove_hist_vars(hist_data
);
5625 kfree(trigger_data
);
5627 destroy_hist_data(hist_data
);
5631 static struct event_command trigger_hist_cmd
= {
5633 .trigger_type
= ETT_EVENT_HIST
,
5634 .flags
= EVENT_CMD_FL_NEEDS_REC
,
5635 .func
= event_hist_trigger_func
,
5636 .reg
= hist_register_trigger
,
5637 .unreg
= hist_unregister_trigger
,
5638 .unreg_all
= hist_unreg_all
,
5639 .get_trigger_ops
= event_hist_get_trigger_ops
,
5640 .set_filter
= set_trigger_filter
,
5643 __init
int register_trigger_hist_cmd(void)
5647 ret
= register_event_command(&trigger_hist_cmd
);
5654 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
,
5655 struct ring_buffer_event
*event
)
5657 struct enable_trigger_data
*enable_data
= data
->private_data
;
5658 struct event_trigger_data
*test
;
5660 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
) {
5661 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5662 if (enable_data
->enable
)
5663 test
->paused
= false;
5665 test
->paused
= true;
5671 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
5672 struct ring_buffer_event
*event
)
5677 if (data
->count
!= -1)
5680 hist_enable_trigger(data
, rec
, event
);
5683 static struct event_trigger_ops hist_enable_trigger_ops
= {
5684 .func
= hist_enable_trigger
,
5685 .print
= event_enable_trigger_print
,
5686 .init
= event_trigger_init
,
5687 .free
= event_enable_trigger_free
,
5690 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
5691 .func
= hist_enable_count_trigger
,
5692 .print
= event_enable_trigger_print
,
5693 .init
= event_trigger_init
,
5694 .free
= event_enable_trigger_free
,
5697 static struct event_trigger_ops hist_disable_trigger_ops
= {
5698 .func
= hist_enable_trigger
,
5699 .print
= event_enable_trigger_print
,
5700 .init
= event_trigger_init
,
5701 .free
= event_enable_trigger_free
,
5704 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
5705 .func
= hist_enable_count_trigger
,
5706 .print
= event_enable_trigger_print
,
5707 .init
= event_trigger_init
,
5708 .free
= event_enable_trigger_free
,
5711 static struct event_trigger_ops
*
5712 hist_enable_get_trigger_ops(char *cmd
, char *param
)
5714 struct event_trigger_ops
*ops
;
5717 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
5720 ops
= param
? &hist_enable_count_trigger_ops
:
5721 &hist_enable_trigger_ops
;
5723 ops
= param
? &hist_disable_count_trigger_ops
:
5724 &hist_disable_trigger_ops
;
5729 static void hist_enable_unreg_all(struct trace_event_file
*file
)
5731 struct event_trigger_data
*test
, *n
;
5733 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
5734 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
5735 list_del_rcu(&test
->list
);
5736 update_cond_flag(file
);
5737 trace_event_trigger_enable_disable(file
, 0);
5738 if (test
->ops
->free
)
5739 test
->ops
->free(test
->ops
, test
);
5744 static struct event_command trigger_hist_enable_cmd
= {
5745 .name
= ENABLE_HIST_STR
,
5746 .trigger_type
= ETT_HIST_ENABLE
,
5747 .func
= event_enable_trigger_func
,
5748 .reg
= event_enable_register_trigger
,
5749 .unreg
= event_enable_unregister_trigger
,
5750 .unreg_all
= hist_enable_unreg_all
,
5751 .get_trigger_ops
= hist_enable_get_trigger_ops
,
5752 .set_filter
= set_trigger_filter
,
5755 static struct event_command trigger_hist_disable_cmd
= {
5756 .name
= DISABLE_HIST_STR
,
5757 .trigger_type
= ETT_HIST_ENABLE
,
5758 .func
= event_enable_trigger_func
,
5759 .reg
= event_enable_register_trigger
,
5760 .unreg
= event_enable_unregister_trigger
,
5761 .unreg_all
= hist_enable_unreg_all
,
5762 .get_trigger_ops
= hist_enable_get_trigger_ops
,
5763 .set_filter
= set_trigger_filter
,
5766 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
5768 unregister_event_command(&trigger_hist_enable_cmd
);
5769 unregister_event_command(&trigger_hist_disable_cmd
);
5772 __init
int register_trigger_hist_enable_disable_cmds(void)
5776 ret
= register_event_command(&trigger_hist_enable_cmd
);
5777 if (WARN_ON(ret
< 0))
5779 ret
= register_event_command(&trigger_hist_disable_cmd
);
5780 if (WARN_ON(ret
< 0))
5781 unregister_trigger_hist_enable_disable_cmds();
5786 static __init
int trace_events_hist_init(void)
5788 struct dentry
*entry
= NULL
;
5789 struct dentry
*d_tracer
;
5792 d_tracer
= tracing_init_dentry();
5793 if (IS_ERR(d_tracer
)) {
5794 err
= PTR_ERR(d_tracer
);
5798 entry
= tracefs_create_file("synthetic_events", 0644, d_tracer
,
5799 NULL
, &synth_events_fops
);
5807 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
5812 fs_initcall(trace_events_hist_init
);