1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/stacktrace.h>
13 #include <linux/rculist.h>
14 #include <linux/tracefs.h>
16 #include "tracing_map.h"
18 #include "trace_dynevent.h"
20 #define SYNTH_SYSTEM "synthetic"
21 #define SYNTH_FIELDS_MAX 16
23 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
26 C(NONE, "No error"), \
27 C(DUPLICATE_VAR, "Variable already defined"), \
28 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
29 C(TOO_MANY_VARS, "Too many variables defined"), \
30 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
31 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
32 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
33 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
34 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
35 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
36 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
37 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
38 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
39 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
40 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
41 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
42 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
43 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
44 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
45 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
46 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
47 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
48 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
49 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
50 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
51 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
52 C(TOO_MANY_PARAMS, "Too many action params"), \
53 C(PARAM_NOT_FOUND, "Couldn't find param"), \
54 C(INVALID_PARAM, "Invalid action param"), \
55 C(ACTION_NOT_FOUND, "No action found"), \
56 C(NO_SAVE_PARAMS, "No params found for save()"), \
57 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
58 C(ACTION_MISMATCH, "Handler doesn't support action"), \
59 C(NO_CLOSING_PAREN, "No closing paren found"), \
60 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
61 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
62 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
63 C(VAR_NOT_FOUND, "Couldn't find variable"), \
64 C(FIELD_NOT_FOUND, "Couldn't find field"),
67 #define C(a, b) HIST_ERR_##a
74 static const char *err_text
[] = { ERRORS
};
78 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
79 struct tracing_map_elt
*elt
,
80 struct ring_buffer_event
*rbe
,
83 #define HIST_FIELD_OPERANDS_MAX 2
84 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
85 #define HIST_ACTIONS_MAX 8
95 * A hist_var (histogram variable) contains variable information for
96 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
97 * flag set. A hist_var has a variable name e.g. ts0, and is
98 * associated with a given histogram trigger, as specified by
99 * hist_data. The hist_var idx is the unique index assigned to the
100 * variable by the hist trigger's tracing_map. The idx is what is
101 * used to set a variable's value and, by a variable reference, to
106 struct hist_trigger_data
*hist_data
;
111 struct ftrace_event_field
*field
;
116 unsigned int is_signed
;
118 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
119 struct hist_trigger_data
*hist_data
;
122 * Variable fields contain variable-specific info in var.
125 enum field_op_id
operator;
130 * The name field is used for EXPR and VAR_REF fields. VAR
131 * fields contain the variable name in var.name.
136 * When a histogram trigger is hit, if it has any references
137 * to variables, the values of those variables are collected
138 * into a var_ref_vals array by resolve_var_refs(). The
139 * current value of each variable is read from the tracing_map
140 * using the hist field's hist_var.idx and entered into the
141 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
143 unsigned int var_ref_idx
;
147 static u64
hist_field_none(struct hist_field
*field
,
148 struct tracing_map_elt
*elt
,
149 struct ring_buffer_event
*rbe
,
155 static u64
hist_field_counter(struct hist_field
*field
,
156 struct tracing_map_elt
*elt
,
157 struct ring_buffer_event
*rbe
,
163 static u64
hist_field_string(struct hist_field
*hist_field
,
164 struct tracing_map_elt
*elt
,
165 struct ring_buffer_event
*rbe
,
168 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
170 return (u64
)(unsigned long)addr
;
173 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
174 struct tracing_map_elt
*elt
,
175 struct ring_buffer_event
*rbe
,
178 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
179 int str_loc
= str_item
& 0xffff;
180 char *addr
= (char *)(event
+ str_loc
);
182 return (u64
)(unsigned long)addr
;
185 static u64
hist_field_pstring(struct hist_field
*hist_field
,
186 struct tracing_map_elt
*elt
,
187 struct ring_buffer_event
*rbe
,
190 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
192 return (u64
)(unsigned long)*addr
;
195 static u64
hist_field_log2(struct hist_field
*hist_field
,
196 struct tracing_map_elt
*elt
,
197 struct ring_buffer_event
*rbe
,
200 struct hist_field
*operand
= hist_field
->operands
[0];
202 u64 val
= operand
->fn(operand
, elt
, rbe
, event
);
204 return (u64
) ilog2(roundup_pow_of_two(val
));
207 static u64
hist_field_plus(struct hist_field
*hist_field
,
208 struct tracing_map_elt
*elt
,
209 struct ring_buffer_event
*rbe
,
212 struct hist_field
*operand1
= hist_field
->operands
[0];
213 struct hist_field
*operand2
= hist_field
->operands
[1];
215 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
216 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
221 static u64
hist_field_minus(struct hist_field
*hist_field
,
222 struct tracing_map_elt
*elt
,
223 struct ring_buffer_event
*rbe
,
226 struct hist_field
*operand1
= hist_field
->operands
[0];
227 struct hist_field
*operand2
= hist_field
->operands
[1];
229 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
230 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
235 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
236 struct tracing_map_elt
*elt
,
237 struct ring_buffer_event
*rbe
,
240 struct hist_field
*operand
= hist_field
->operands
[0];
242 s64 sval
= (s64
)operand
->fn(operand
, elt
, rbe
, event
);
243 u64 val
= (u64
)-sval
;
248 #define DEFINE_HIST_FIELD_FN(type) \
249 static u64 hist_field_##type(struct hist_field *hist_field, \
250 struct tracing_map_elt *elt, \
251 struct ring_buffer_event *rbe, \
254 type *addr = (type *)(event + hist_field->field->offset); \
256 return (u64)(unsigned long)*addr; \
259 DEFINE_HIST_FIELD_FN(s64
);
260 DEFINE_HIST_FIELD_FN(u64
);
261 DEFINE_HIST_FIELD_FN(s32
);
262 DEFINE_HIST_FIELD_FN(u32
);
263 DEFINE_HIST_FIELD_FN(s16
);
264 DEFINE_HIST_FIELD_FN(u16
);
265 DEFINE_HIST_FIELD_FN(s8
);
266 DEFINE_HIST_FIELD_FN(u8
);
268 #define for_each_hist_field(i, hist_data) \
269 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
271 #define for_each_hist_val_field(i, hist_data) \
272 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
274 #define for_each_hist_key_field(i, hist_data) \
275 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
277 #define HIST_STACKTRACE_DEPTH 16
278 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
279 #define HIST_STACKTRACE_SKIP 5
281 #define HITCOUNT_IDX 0
282 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
284 enum hist_field_flags
{
285 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
286 HIST_FIELD_FL_KEY
= 1 << 1,
287 HIST_FIELD_FL_STRING
= 1 << 2,
288 HIST_FIELD_FL_HEX
= 1 << 3,
289 HIST_FIELD_FL_SYM
= 1 << 4,
290 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
291 HIST_FIELD_FL_EXECNAME
= 1 << 6,
292 HIST_FIELD_FL_SYSCALL
= 1 << 7,
293 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
294 HIST_FIELD_FL_LOG2
= 1 << 9,
295 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
296 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
297 HIST_FIELD_FL_VAR
= 1 << 12,
298 HIST_FIELD_FL_EXPR
= 1 << 13,
299 HIST_FIELD_FL_VAR_REF
= 1 << 14,
300 HIST_FIELD_FL_CPU
= 1 << 15,
301 HIST_FIELD_FL_ALIAS
= 1 << 16,
306 char *name
[TRACING_MAP_VARS_MAX
];
307 char *expr
[TRACING_MAP_VARS_MAX
];
310 struct hist_trigger_attrs
{
320 unsigned int map_bits
;
322 char *assignment_str
[TRACING_MAP_VARS_MAX
];
323 unsigned int n_assignments
;
325 char *action_str
[HIST_ACTIONS_MAX
];
326 unsigned int n_actions
;
328 struct var_defs var_defs
;
332 struct hist_field
*var
;
333 struct hist_field
*val
;
336 struct field_var_hist
{
337 struct hist_trigger_data
*hist_data
;
341 struct hist_trigger_data
{
342 struct hist_field
*fields
[HIST_FIELDS_MAX
];
345 unsigned int n_fields
;
347 unsigned int key_size
;
348 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
349 unsigned int n_sort_keys
;
350 struct trace_event_file
*event_file
;
351 struct hist_trigger_attrs
*attrs
;
352 struct tracing_map
*map
;
353 bool enable_timestamps
;
355 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
356 unsigned int n_var_refs
;
358 struct action_data
*actions
[HIST_ACTIONS_MAX
];
359 unsigned int n_actions
;
361 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
362 unsigned int n_field_vars
;
363 unsigned int n_field_var_str
;
364 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
365 unsigned int n_field_var_hists
;
367 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
368 unsigned int n_save_vars
;
369 unsigned int n_save_var_str
;
372 static int synth_event_create(int argc
, const char **argv
);
373 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
);
374 static int synth_event_release(struct dyn_event
*ev
);
375 static bool synth_event_is_busy(struct dyn_event
*ev
);
376 static bool synth_event_match(const char *system
, const char *event
,
377 struct dyn_event
*ev
);
379 static struct dyn_event_operations synth_event_ops
= {
380 .create
= synth_event_create
,
381 .show
= synth_event_show
,
382 .is_busy
= synth_event_is_busy
,
383 .free
= synth_event_release
,
384 .match
= synth_event_match
,
396 struct dyn_event devent
;
399 struct synth_field
**fields
;
400 unsigned int n_fields
;
402 struct trace_event_class
class;
403 struct trace_event_call call
;
404 struct tracepoint
*tp
;
407 static bool is_synth_event(struct dyn_event
*ev
)
409 return ev
->ops
== &synth_event_ops
;
412 static struct synth_event
*to_synth_event(struct dyn_event
*ev
)
414 return container_of(ev
, struct synth_event
, devent
);
417 static bool synth_event_is_busy(struct dyn_event
*ev
)
419 struct synth_event
*event
= to_synth_event(ev
);
421 return event
->ref
!= 0;
424 static bool synth_event_match(const char *system
, const char *event
,
425 struct dyn_event
*ev
)
427 struct synth_event
*sev
= to_synth_event(ev
);
429 return strcmp(sev
->name
, event
) == 0 &&
430 (!system
|| strcmp(system
, SYNTH_SYSTEM
) == 0);
435 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
436 struct tracing_map_elt
*elt
, void *rec
,
437 struct ring_buffer_event
*rbe
, void *key
,
438 struct action_data
*data
, u64
*var_ref_vals
);
440 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
455 enum handler_id handler
;
456 enum action_id action
;
460 unsigned int n_params
;
461 char *params
[SYNTH_FIELDS_MAX
];
464 * When a histogram trigger is hit, the values of any
465 * references to variables, including variables being passed
466 * as parameters to synthetic events, are collected into a
467 * var_ref_vals array. This var_ref_idx is the index of the
468 * first param in the array to be passed to the synthetic
471 unsigned int var_ref_idx
;
472 struct synth_event
*synth_event
;
473 bool use_trace_keyword
;
474 char *synth_event_name
;
484 * var_str contains the $-unstripped variable
485 * name referenced by var_ref, and used when
486 * printing the action. Because var_ref
487 * creation is deferred to create_actions(),
488 * we need a per-action way to save it until
489 * then, thus var_str.
494 * var_ref refers to the variable being
495 * tracked e.g onmax($var).
497 struct hist_field
*var_ref
;
500 * track_var contains the 'invisible' tracking
501 * variable created to keep the current
504 struct hist_field
*track_var
;
506 check_track_val_fn_t check_val
;
507 action_fn_t save_data
;
516 unsigned int key_len
;
518 struct tracing_map_elt elt
;
520 struct action_data
*action_data
;
521 struct hist_trigger_data
*hist_data
;
524 struct hist_elt_data
{
527 char *field_var_str
[SYNTH_FIELDS_MAX
];
530 struct snapshot_context
{
531 struct tracing_map_elt
*elt
;
535 static void track_data_free(struct track_data
*track_data
)
537 struct hist_elt_data
*elt_data
;
542 kfree(track_data
->key
);
544 elt_data
= track_data
->elt
.private_data
;
546 kfree(elt_data
->comm
);
553 static struct track_data
*track_data_alloc(unsigned int key_len
,
554 struct action_data
*action_data
,
555 struct hist_trigger_data
*hist_data
)
557 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
558 struct hist_elt_data
*elt_data
;
561 return ERR_PTR(-ENOMEM
);
563 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
565 track_data_free(data
);
566 return ERR_PTR(-ENOMEM
);
569 data
->key_len
= key_len
;
570 data
->action_data
= action_data
;
571 data
->hist_data
= hist_data
;
573 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
575 track_data_free(data
);
576 return ERR_PTR(-ENOMEM
);
578 data
->elt
.private_data
= elt_data
;
580 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
581 if (!elt_data
->comm
) {
582 track_data_free(data
);
583 return ERR_PTR(-ENOMEM
);
589 static char last_cmd
[MAX_FILTER_STR_VAL
];
590 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
592 static int errpos(char *str
)
594 return err_pos(last_cmd
, str
);
597 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
599 const char *system
= NULL
, *name
= NULL
;
600 struct trace_event_call
*call
;
605 strncpy(last_cmd
, str
, MAX_FILTER_STR_VAL
- 1);
608 call
= file
->event_call
;
610 system
= call
->class->system
;
612 name
= trace_event_name(call
);
619 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, "hist:%s:%s", system
, name
);
622 static void hist_err(struct trace_array
*tr
, u8 err_type
, u8 err_pos
)
624 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
628 static void hist_err_clear(void)
631 last_cmd_loc
[0] = '\0';
634 struct synth_trace_event
{
635 struct trace_entry ent
;
639 static int synth_event_define_fields(struct trace_event_call
*call
)
641 struct synth_trace_event trace
;
642 int offset
= offsetof(typeof(trace
), fields
);
643 struct synth_event
*event
= call
->data
;
644 unsigned int i
, size
, n_u64
;
649 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
650 size
= event
->fields
[i
]->size
;
651 is_signed
= event
->fields
[i
]->is_signed
;
652 type
= event
->fields
[i
]->type
;
653 name
= event
->fields
[i
]->name
;
654 ret
= trace_define_field(call
, type
, name
, offset
, size
,
655 is_signed
, FILTER_OTHER
);
659 if (event
->fields
[i
]->is_string
) {
660 offset
+= STR_VAR_LEN_MAX
;
661 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
663 offset
+= sizeof(u64
);
668 event
->n_u64
= n_u64
;
673 static bool synth_field_signed(char *type
)
675 if (str_has_prefix(type
, "u"))
681 static int synth_field_is_string(char *type
)
683 if (strstr(type
, "char[") != NULL
)
689 static int synth_field_string_size(char *type
)
691 char buf
[4], *end
, *start
;
695 start
= strstr(type
, "char[");
698 start
+= sizeof("char[") - 1;
700 end
= strchr(type
, ']');
701 if (!end
|| end
< start
)
708 strncpy(buf
, start
, len
);
711 err
= kstrtouint(buf
, 0, &size
);
715 if (size
> STR_VAR_LEN_MAX
)
721 static int synth_field_size(char *type
)
725 if (strcmp(type
, "s64") == 0)
727 else if (strcmp(type
, "u64") == 0)
729 else if (strcmp(type
, "s32") == 0)
731 else if (strcmp(type
, "u32") == 0)
733 else if (strcmp(type
, "s16") == 0)
735 else if (strcmp(type
, "u16") == 0)
737 else if (strcmp(type
, "s8") == 0)
739 else if (strcmp(type
, "u8") == 0)
741 else if (strcmp(type
, "char") == 0)
743 else if (strcmp(type
, "unsigned char") == 0)
744 size
= sizeof(unsigned char);
745 else if (strcmp(type
, "int") == 0)
747 else if (strcmp(type
, "unsigned int") == 0)
748 size
= sizeof(unsigned int);
749 else if (strcmp(type
, "long") == 0)
751 else if (strcmp(type
, "unsigned long") == 0)
752 size
= sizeof(unsigned long);
753 else if (strcmp(type
, "pid_t") == 0)
754 size
= sizeof(pid_t
);
755 else if (synth_field_is_string(type
))
756 size
= synth_field_string_size(type
);
761 static const char *synth_field_fmt(char *type
)
763 const char *fmt
= "%llu";
765 if (strcmp(type
, "s64") == 0)
767 else if (strcmp(type
, "u64") == 0)
769 else if (strcmp(type
, "s32") == 0)
771 else if (strcmp(type
, "u32") == 0)
773 else if (strcmp(type
, "s16") == 0)
775 else if (strcmp(type
, "u16") == 0)
777 else if (strcmp(type
, "s8") == 0)
779 else if (strcmp(type
, "u8") == 0)
781 else if (strcmp(type
, "char") == 0)
783 else if (strcmp(type
, "unsigned char") == 0)
785 else if (strcmp(type
, "int") == 0)
787 else if (strcmp(type
, "unsigned int") == 0)
789 else if (strcmp(type
, "long") == 0)
791 else if (strcmp(type
, "unsigned long") == 0)
793 else if (strcmp(type
, "pid_t") == 0)
795 else if (synth_field_is_string(type
))
801 static enum print_line_t
print_synth_event(struct trace_iterator
*iter
,
803 struct trace_event
*event
)
805 struct trace_array
*tr
= iter
->tr
;
806 struct trace_seq
*s
= &iter
->seq
;
807 struct synth_trace_event
*entry
;
808 struct synth_event
*se
;
809 unsigned int i
, n_u64
;
813 entry
= (struct synth_trace_event
*)iter
->ent
;
814 se
= container_of(event
, struct synth_event
, call
.event
);
816 trace_seq_printf(s
, "%s: ", se
->name
);
818 for (i
= 0, n_u64
= 0; i
< se
->n_fields
; i
++) {
819 if (trace_seq_has_overflowed(s
))
822 fmt
= synth_field_fmt(se
->fields
[i
]->type
);
824 /* parameter types */
825 if (tr
->trace_flags
& TRACE_ITER_VERBOSE
)
826 trace_seq_printf(s
, "%s ", fmt
);
828 snprintf(print_fmt
, sizeof(print_fmt
), "%%s=%s%%s", fmt
);
830 /* parameter values */
831 if (se
->fields
[i
]->is_string
) {
832 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
833 (char *)&entry
->fields
[n_u64
],
834 i
== se
->n_fields
- 1 ? "" : " ");
835 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
837 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
838 entry
->fields
[n_u64
],
839 i
== se
->n_fields
- 1 ? "" : " ");
844 trace_seq_putc(s
, '\n');
846 return trace_handle_return(s
);
849 static struct trace_event_functions synth_event_funcs
= {
850 .trace
= print_synth_event
853 static notrace
void trace_event_raw_event_synth(void *__data
,
855 unsigned int var_ref_idx
)
857 struct trace_event_file
*trace_file
= __data
;
858 struct synth_trace_event
*entry
;
859 struct trace_event_buffer fbuffer
;
860 struct ring_buffer
*buffer
;
861 struct synth_event
*event
;
862 unsigned int i
, n_u64
;
865 event
= trace_file
->event_call
->data
;
867 if (trace_trigger_soft_disabled(trace_file
))
870 fields_size
= event
->n_u64
* sizeof(u64
);
873 * Avoid ring buffer recursion detection, as this event
874 * is being performed within another event.
876 buffer
= trace_file
->tr
->trace_buffer
.buffer
;
877 ring_buffer_nest_start(buffer
);
879 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
,
880 sizeof(*entry
) + fields_size
);
884 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
885 if (event
->fields
[i
]->is_string
) {
886 char *str_val
= (char *)(long)var_ref_vals
[var_ref_idx
+ i
];
887 char *str_field
= (char *)&entry
->fields
[n_u64
];
889 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
890 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
892 entry
->fields
[n_u64
] = var_ref_vals
[var_ref_idx
+ i
];
897 trace_event_buffer_commit(&fbuffer
);
899 ring_buffer_nest_end(buffer
);
902 static void free_synth_event_print_fmt(struct trace_event_call
*call
)
905 kfree(call
->print_fmt
);
906 call
->print_fmt
= NULL
;
910 static int __set_synth_event_print_fmt(struct synth_event
*event
,
917 /* When len=0, we just calculate the needed length */
918 #define LEN_OR_ZERO (len ? len - pos : 0)
920 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
921 for (i
= 0; i
< event
->n_fields
; i
++) {
922 fmt
= synth_field_fmt(event
->fields
[i
]->type
);
923 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s=%s%s",
924 event
->fields
[i
]->name
, fmt
,
925 i
== event
->n_fields
- 1 ? "" : ", ");
927 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
929 for (i
= 0; i
< event
->n_fields
; i
++) {
930 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
931 ", REC->%s", event
->fields
[i
]->name
);
936 /* return the length of print_fmt */
940 static int set_synth_event_print_fmt(struct trace_event_call
*call
)
942 struct synth_event
*event
= call
->data
;
946 /* First: called with 0 length to calculate the needed length */
947 len
= __set_synth_event_print_fmt(event
, NULL
, 0);
949 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
953 /* Second: actually write the @print_fmt */
954 __set_synth_event_print_fmt(event
, print_fmt
, len
+ 1);
955 call
->print_fmt
= print_fmt
;
960 static void free_synth_field(struct synth_field
*field
)
967 static struct synth_field
*parse_synth_field(int argc
, const char **argv
,
970 struct synth_field
*field
;
971 const char *prefix
= NULL
, *field_type
= argv
[0], *field_name
, *array
;
974 if (field_type
[0] == ';')
977 if (!strcmp(field_type
, "unsigned")) {
979 return ERR_PTR(-EINVAL
);
980 prefix
= "unsigned ";
981 field_type
= argv
[1];
982 field_name
= argv
[2];
985 field_name
= argv
[1];
989 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
991 return ERR_PTR(-ENOMEM
);
993 len
= strlen(field_name
);
994 array
= strchr(field_name
, '[');
996 len
-= strlen(array
);
997 else if (field_name
[len
- 1] == ';')
1000 field
->name
= kmemdup_nul(field_name
, len
, GFP_KERNEL
);
1006 if (field_type
[0] == ';')
1008 len
= strlen(field_type
) + 1;
1010 len
+= strlen(array
);
1012 len
+= strlen(prefix
);
1014 field
->type
= kzalloc(len
, GFP_KERNEL
);
1020 strcat(field
->type
, prefix
);
1021 strcat(field
->type
, field_type
);
1023 strcat(field
->type
, array
);
1024 if (field
->type
[len
- 1] == ';')
1025 field
->type
[len
- 1] = '\0';
1028 field
->size
= synth_field_size(field
->type
);
1034 if (synth_field_is_string(field
->type
))
1035 field
->is_string
= true;
1037 field
->is_signed
= synth_field_signed(field
->type
);
1042 free_synth_field(field
);
1043 field
= ERR_PTR(ret
);
1047 static void free_synth_tracepoint(struct tracepoint
*tp
)
1056 static struct tracepoint
*alloc_synth_tracepoint(char *name
)
1058 struct tracepoint
*tp
;
1060 tp
= kzalloc(sizeof(*tp
), GFP_KERNEL
);
1062 return ERR_PTR(-ENOMEM
);
1064 tp
->name
= kstrdup(name
, GFP_KERNEL
);
1067 return ERR_PTR(-ENOMEM
);
1073 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
1074 unsigned int var_ref_idx
);
1076 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
1077 unsigned int var_ref_idx
)
1079 struct tracepoint
*tp
= event
->tp
;
1081 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
1082 struct tracepoint_func
*probe_func_ptr
;
1083 synth_probe_func_t probe_func
;
1086 if (!(cpu_online(raw_smp_processor_id())))
1089 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
1090 if (probe_func_ptr
) {
1092 probe_func
= probe_func_ptr
->func
;
1093 __data
= probe_func_ptr
->data
;
1094 probe_func(__data
, var_ref_vals
, var_ref_idx
);
1095 } while ((++probe_func_ptr
)->func
);
1100 static struct synth_event
*find_synth_event(const char *name
)
1102 struct dyn_event
*pos
;
1103 struct synth_event
*event
;
1105 for_each_dyn_event(pos
) {
1106 if (!is_synth_event(pos
))
1108 event
= to_synth_event(pos
);
1109 if (strcmp(event
->name
, name
) == 0)
1116 static int register_synth_event(struct synth_event
*event
)
1118 struct trace_event_call
*call
= &event
->call
;
1121 event
->call
.class = &event
->class;
1122 event
->class.system
= kstrdup(SYNTH_SYSTEM
, GFP_KERNEL
);
1123 if (!event
->class.system
) {
1128 event
->tp
= alloc_synth_tracepoint(event
->name
);
1129 if (IS_ERR(event
->tp
)) {
1130 ret
= PTR_ERR(event
->tp
);
1135 INIT_LIST_HEAD(&call
->class->fields
);
1136 call
->event
.funcs
= &synth_event_funcs
;
1137 call
->class->define_fields
= synth_event_define_fields
;
1139 ret
= register_trace_event(&call
->event
);
1144 call
->flags
= TRACE_EVENT_FL_TRACEPOINT
;
1145 call
->class->reg
= trace_event_reg
;
1146 call
->class->probe
= trace_event_raw_event_synth
;
1148 call
->tp
= event
->tp
;
1150 ret
= trace_add_event_call(call
);
1152 pr_warn("Failed to register synthetic event: %s\n",
1153 trace_event_name(call
));
1157 ret
= set_synth_event_print_fmt(call
);
1159 trace_remove_event_call(call
);
1165 unregister_trace_event(&call
->event
);
1169 static int unregister_synth_event(struct synth_event
*event
)
1171 struct trace_event_call
*call
= &event
->call
;
1174 ret
= trace_remove_event_call(call
);
1179 static void free_synth_event(struct synth_event
*event
)
1186 for (i
= 0; i
< event
->n_fields
; i
++)
1187 free_synth_field(event
->fields
[i
]);
1189 kfree(event
->fields
);
1191 kfree(event
->class.system
);
1192 free_synth_tracepoint(event
->tp
);
1193 free_synth_event_print_fmt(&event
->call
);
1197 static struct synth_event
*alloc_synth_event(const char *name
, int n_fields
,
1198 struct synth_field
**fields
)
1200 struct synth_event
*event
;
1203 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1205 event
= ERR_PTR(-ENOMEM
);
1209 event
->name
= kstrdup(name
, GFP_KERNEL
);
1212 event
= ERR_PTR(-ENOMEM
);
1216 event
->fields
= kcalloc(n_fields
, sizeof(*event
->fields
), GFP_KERNEL
);
1217 if (!event
->fields
) {
1218 free_synth_event(event
);
1219 event
= ERR_PTR(-ENOMEM
);
1223 dyn_event_init(&event
->devent
, &synth_event_ops
);
1225 for (i
= 0; i
< n_fields
; i
++)
1226 event
->fields
[i
] = fields
[i
];
1228 event
->n_fields
= n_fields
;
1233 static void action_trace(struct hist_trigger_data
*hist_data
,
1234 struct tracing_map_elt
*elt
, void *rec
,
1235 struct ring_buffer_event
*rbe
, void *key
,
1236 struct action_data
*data
, u64
*var_ref_vals
)
1238 struct synth_event
*event
= data
->synth_event
;
1240 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
1243 struct hist_var_data
{
1244 struct list_head list
;
1245 struct hist_trigger_data
*hist_data
;
1248 static int __create_synth_event(int argc
, const char *name
, const char **argv
)
1250 struct synth_field
*field
, *fields
[SYNTH_FIELDS_MAX
];
1251 struct synth_event
*event
= NULL
;
1252 int i
, consumed
= 0, n_fields
= 0, ret
= 0;
1256 * - Add synthetic event: <event_name> field[;field] ...
1257 * - Remove synthetic event: !<event_name> field[;field] ...
1258 * where 'field' = type field_name
1261 if (name
[0] == '\0' || argc
< 1)
1264 mutex_lock(&event_mutex
);
1266 event
= find_synth_event(name
);
1272 for (i
= 0; i
< argc
- 1; i
++) {
1273 if (strcmp(argv
[i
], ";") == 0)
1275 if (n_fields
== SYNTH_FIELDS_MAX
) {
1280 field
= parse_synth_field(argc
- i
, &argv
[i
], &consumed
);
1281 if (IS_ERR(field
)) {
1282 ret
= PTR_ERR(field
);
1285 fields
[n_fields
++] = field
;
1289 if (i
< argc
&& strcmp(argv
[i
], ";") != 0) {
1294 event
= alloc_synth_event(name
, n_fields
, fields
);
1295 if (IS_ERR(event
)) {
1296 ret
= PTR_ERR(event
);
1300 ret
= register_synth_event(event
);
1302 dyn_event_add(&event
->devent
);
1304 free_synth_event(event
);
1306 mutex_unlock(&event_mutex
);
1310 for (i
= 0; i
< n_fields
; i
++)
1311 free_synth_field(fields
[i
]);
1316 static int create_or_delete_synth_event(int argc
, char **argv
)
1318 const char *name
= argv
[0];
1319 struct synth_event
*event
= NULL
;
1322 /* trace_run_command() ensures argc != 0 */
1323 if (name
[0] == '!') {
1324 mutex_lock(&event_mutex
);
1325 event
= find_synth_event(name
+ 1);
1330 ret
= unregister_synth_event(event
);
1332 dyn_event_remove(&event
->devent
);
1333 free_synth_event(event
);
1338 mutex_unlock(&event_mutex
);
1342 ret
= __create_synth_event(argc
- 1, name
, (const char **)argv
+ 1);
1343 return ret
== -ECANCELED
? -EINVAL
: ret
;
1346 static int synth_event_create(int argc
, const char **argv
)
1348 const char *name
= argv
[0];
1351 if (name
[0] != 's' || name
[1] != ':')
1355 /* This interface accepts group name prefix */
1356 if (strchr(name
, '/')) {
1357 len
= str_has_prefix(name
, SYNTH_SYSTEM
"/");
1362 return __create_synth_event(argc
- 1, name
, argv
+ 1);
1365 static int synth_event_release(struct dyn_event
*ev
)
1367 struct synth_event
*event
= to_synth_event(ev
);
1373 ret
= unregister_synth_event(event
);
1377 dyn_event_remove(ev
);
1378 free_synth_event(event
);
1382 static int __synth_event_show(struct seq_file
*m
, struct synth_event
*event
)
1384 struct synth_field
*field
;
1387 seq_printf(m
, "%s\t", event
->name
);
1389 for (i
= 0; i
< event
->n_fields
; i
++) {
1390 field
= event
->fields
[i
];
1392 /* parameter values */
1393 seq_printf(m
, "%s %s%s", field
->type
, field
->name
,
1394 i
== event
->n_fields
- 1 ? "" : "; ");
1402 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
)
1404 struct synth_event
*event
= to_synth_event(ev
);
1406 seq_printf(m
, "s:%s/", event
->class.system
);
1408 return __synth_event_show(m
, event
);
1411 static int synth_events_seq_show(struct seq_file
*m
, void *v
)
1413 struct dyn_event
*ev
= v
;
1415 if (!is_synth_event(ev
))
1418 return __synth_event_show(m
, to_synth_event(ev
));
1421 static const struct seq_operations synth_events_seq_op
= {
1422 .start
= dyn_event_seq_start
,
1423 .next
= dyn_event_seq_next
,
1424 .stop
= dyn_event_seq_stop
,
1425 .show
= synth_events_seq_show
,
1428 static int synth_events_open(struct inode
*inode
, struct file
*file
)
1432 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
1433 ret
= dyn_events_release_all(&synth_event_ops
);
1438 return seq_open(file
, &synth_events_seq_op
);
1441 static ssize_t
synth_events_write(struct file
*file
,
1442 const char __user
*buffer
,
1443 size_t count
, loff_t
*ppos
)
1445 return trace_parse_run_command(file
, buffer
, count
, ppos
,
1446 create_or_delete_synth_event
);
1449 static const struct file_operations synth_events_fops
= {
1450 .open
= synth_events_open
,
1451 .write
= synth_events_write
,
1453 .llseek
= seq_lseek
,
1454 .release
= seq_release
,
1457 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
1458 struct tracing_map_elt
*elt
,
1459 struct ring_buffer_event
*rbe
,
1462 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
1463 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1465 u64 ts
= ring_buffer_event_time_stamp(rbe
);
1467 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
1473 static u64
hist_field_cpu(struct hist_field
*hist_field
,
1474 struct tracing_map_elt
*elt
,
1475 struct ring_buffer_event
*rbe
,
1478 int cpu
= smp_processor_id();
1484 * check_field_for_var_ref - Check if a VAR_REF field references a variable
1485 * @hist_field: The VAR_REF field to check
1486 * @var_data: The hist trigger that owns the variable
1487 * @var_idx: The trigger variable identifier
1489 * Check the given VAR_REF field to see whether or not it references
1490 * the given variable associated with the given trigger.
1492 * Return: The VAR_REF field if it does reference the variable, NULL if not
1494 static struct hist_field
*
1495 check_field_for_var_ref(struct hist_field
*hist_field
,
1496 struct hist_trigger_data
*var_data
,
1497 unsigned int var_idx
)
1499 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
1501 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
1502 hist_field
->var
.hist_data
== var_data
)
1509 * find_var_ref - Check if a trigger has a reference to a trigger variable
1510 * @hist_data: The hist trigger that might have a reference to the variable
1511 * @var_data: The hist trigger that owns the variable
1512 * @var_idx: The trigger variable identifier
1514 * Check the list of var_refs[] on the first hist trigger to see
1515 * whether any of them are references to the variable on the second
1518 * Return: The VAR_REF field referencing the variable if so, NULL if not
1520 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
1521 struct hist_trigger_data
*var_data
,
1522 unsigned int var_idx
)
1524 struct hist_field
*hist_field
;
1527 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1528 hist_field
= hist_data
->var_refs
[i
];
1529 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
1537 * find_any_var_ref - Check if there is a reference to a given trigger variable
1538 * @hist_data: The hist trigger
1539 * @var_idx: The trigger variable identifier
1541 * Check to see whether the given variable is currently referenced by
1542 * any other trigger.
1544 * The trigger the variable is defined on is explicitly excluded - the
1545 * assumption being that a self-reference doesn't prevent a trigger
1546 * from being removed.
1548 * Return: The VAR_REF field referencing the variable if so, NULL if not
1550 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
1551 unsigned int var_idx
)
1553 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1554 struct hist_field
*found
= NULL
;
1555 struct hist_var_data
*var_data
;
1557 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1558 if (var_data
->hist_data
== hist_data
)
1560 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
1569 * check_var_refs - Check if there is a reference to any of trigger's variables
1570 * @hist_data: The hist trigger
1572 * A trigger can define one or more variables. If any one of them is
1573 * currently referenced by any other trigger, this function will
1576 * Typically used to determine whether or not a trigger can be removed
1577 * - if there are any references to a trigger's variables, it cannot.
1579 * Return: True if there is a reference to any of trigger's variables
1581 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
1583 struct hist_field
*field
;
1587 for_each_hist_field(i
, hist_data
) {
1588 field
= hist_data
->fields
[i
];
1589 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
1590 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
1600 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
1602 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1603 struct hist_var_data
*var_data
, *found
= NULL
;
1605 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1606 if (var_data
->hist_data
== hist_data
) {
1615 static bool field_has_hist_vars(struct hist_field
*hist_field
,
1626 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
1627 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1630 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1631 struct hist_field
*operand
;
1633 operand
= hist_field
->operands
[i
];
1634 if (field_has_hist_vars(operand
, level
+ 1))
1641 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
1643 struct hist_field
*hist_field
;
1646 for_each_hist_field(i
, hist_data
) {
1647 hist_field
= hist_data
->fields
[i
];
1648 if (field_has_hist_vars(hist_field
, 0))
1655 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
1657 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1658 struct hist_var_data
*var_data
;
1660 var_data
= find_hist_vars(hist_data
);
1664 if (trace_array_get(tr
) < 0)
1667 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
1669 trace_array_put(tr
);
1673 var_data
->hist_data
= hist_data
;
1674 list_add(&var_data
->list
, &tr
->hist_vars
);
1679 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
1681 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1682 struct hist_var_data
*var_data
;
1684 var_data
= find_hist_vars(hist_data
);
1688 if (WARN_ON(check_var_refs(hist_data
)))
1691 list_del(&var_data
->list
);
1695 trace_array_put(tr
);
1698 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
1699 const char *var_name
)
1701 struct hist_field
*hist_field
, *found
= NULL
;
1704 for_each_hist_field(i
, hist_data
) {
1705 hist_field
= hist_data
->fields
[i
];
1706 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
1707 strcmp(hist_field
->var
.name
, var_name
) == 0) {
1716 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
1717 struct trace_event_file
*file
,
1718 const char *var_name
)
1720 struct hist_trigger_data
*test_data
;
1721 struct event_trigger_data
*test
;
1722 struct hist_field
*hist_field
;
1724 hist_field
= find_var_field(hist_data
, var_name
);
1728 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1729 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1730 test_data
= test
->private_data
;
1731 hist_field
= find_var_field(test_data
, var_name
);
1740 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
1745 struct hist_trigger_data
*var_hist_data
;
1746 struct hist_var_data
*var_data
;
1747 struct trace_event_file
*file
, *found
= NULL
;
1750 return find_event_file(tr
, system
, event_name
);
1752 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1753 var_hist_data
= var_data
->hist_data
;
1754 file
= var_hist_data
->event_file
;
1758 if (find_var_field(var_hist_data
, var_name
)) {
1760 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
1771 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
1772 const char *var_name
)
1774 struct hist_trigger_data
*test_data
;
1775 struct event_trigger_data
*test
;
1776 struct hist_field
*hist_field
;
1778 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1779 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1780 test_data
= test
->private_data
;
1781 hist_field
= find_var_field(test_data
, var_name
);
1790 static struct hist_field
*
1791 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
1793 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1794 struct hist_field
*hist_field
, *found
= NULL
;
1795 struct trace_event_file
*file
;
1798 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
1799 struct action_data
*data
= hist_data
->actions
[i
];
1801 if (data
->handler
== HANDLER_ONMATCH
) {
1802 char *system
= data
->match_data
.event_system
;
1803 char *event_name
= data
->match_data
.event
;
1805 file
= find_var_file(tr
, system
, event_name
, var_name
);
1808 hist_field
= find_file_var(file
, var_name
);
1811 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
1813 return ERR_PTR(-EINVAL
);
1823 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1828 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1829 struct hist_field
*hist_field
= NULL
;
1830 struct trace_event_file
*file
;
1832 if (!system
|| !event_name
) {
1833 hist_field
= find_match_var(hist_data
, var_name
);
1834 if (IS_ERR(hist_field
))
1840 file
= find_var_file(tr
, system
, event_name
, var_name
);
1844 hist_field
= find_file_var(file
, var_name
);
1849 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1850 struct tracing_map_elt
*elt
,
1851 struct ring_buffer_event
*rbe
,
1854 struct hist_elt_data
*elt_data
;
1857 if (WARN_ON_ONCE(!elt
))
1860 elt_data
= elt
->private_data
;
1861 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1866 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1867 u64
*var_ref_vals
, bool self
)
1869 struct hist_trigger_data
*var_data
;
1870 struct tracing_map_elt
*var_elt
;
1871 struct hist_field
*hist_field
;
1872 unsigned int i
, var_idx
;
1873 bool resolved
= true;
1876 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1877 hist_field
= hist_data
->var_refs
[i
];
1878 var_idx
= hist_field
->var
.idx
;
1879 var_data
= hist_field
->var
.hist_data
;
1881 if (var_data
== NULL
) {
1886 if ((self
&& var_data
!= hist_data
) ||
1887 (!self
&& var_data
== hist_data
))
1890 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1896 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1901 if (self
|| !hist_field
->read_once
)
1902 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1904 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1906 var_ref_vals
[i
] = var_val
;
1912 static const char *hist_field_name(struct hist_field
*field
,
1915 const char *field_name
= "";
1921 field_name
= field
->field
->name
;
1922 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1923 field
->flags
& HIST_FIELD_FL_ALIAS
)
1924 field_name
= hist_field_name(field
->operands
[0], ++level
);
1925 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1927 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1928 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1929 if (field
->system
) {
1930 static char full_name
[MAX_FILTER_STR_VAL
];
1932 strcat(full_name
, field
->system
);
1933 strcat(full_name
, ".");
1934 strcat(full_name
, field
->event_name
);
1935 strcat(full_name
, ".");
1936 strcat(full_name
, field
->name
);
1937 field_name
= full_name
;
1939 field_name
= field
->name
;
1940 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1941 field_name
= "common_timestamp";
1943 if (field_name
== NULL
)
1949 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
1951 hist_field_fn_t fn
= NULL
;
1953 switch (field_size
) {
1955 if (field_is_signed
)
1956 fn
= hist_field_s64
;
1958 fn
= hist_field_u64
;
1961 if (field_is_signed
)
1962 fn
= hist_field_s32
;
1964 fn
= hist_field_u32
;
1967 if (field_is_signed
)
1968 fn
= hist_field_s16
;
1970 fn
= hist_field_u16
;
1973 if (field_is_signed
)
1983 static int parse_map_size(char *str
)
1985 unsigned long size
, map_bits
;
1994 ret
= kstrtoul(str
, 0, &size
);
1998 map_bits
= ilog2(roundup_pow_of_two(size
));
1999 if (map_bits
< TRACING_MAP_BITS_MIN
||
2000 map_bits
> TRACING_MAP_BITS_MAX
)
2008 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
2015 for (i
= 0; i
< attrs
->n_assignments
; i
++)
2016 kfree(attrs
->assignment_str
[i
]);
2018 for (i
= 0; i
< attrs
->n_actions
; i
++)
2019 kfree(attrs
->action_str
[i
]);
2022 kfree(attrs
->sort_key_str
);
2023 kfree(attrs
->keys_str
);
2024 kfree(attrs
->vals_str
);
2025 kfree(attrs
->clock
);
2029 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
2033 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
2036 if ((str_has_prefix(str
, "onmatch(")) ||
2037 (str_has_prefix(str
, "onmax(")) ||
2038 (str_has_prefix(str
, "onchange("))) {
2039 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
2040 if (!attrs
->action_str
[attrs
->n_actions
]) {
2050 static int parse_assignment(struct trace_array
*tr
,
2051 char *str
, struct hist_trigger_attrs
*attrs
)
2055 if ((str_has_prefix(str
, "key=")) ||
2056 (str_has_prefix(str
, "keys="))) {
2057 attrs
->keys_str
= kstrdup(str
, GFP_KERNEL
);
2058 if (!attrs
->keys_str
) {
2062 } else if ((str_has_prefix(str
, "val=")) ||
2063 (str_has_prefix(str
, "vals=")) ||
2064 (str_has_prefix(str
, "values="))) {
2065 attrs
->vals_str
= kstrdup(str
, GFP_KERNEL
);
2066 if (!attrs
->vals_str
) {
2070 } else if (str_has_prefix(str
, "sort=")) {
2071 attrs
->sort_key_str
= kstrdup(str
, GFP_KERNEL
);
2072 if (!attrs
->sort_key_str
) {
2076 } else if (str_has_prefix(str
, "name=")) {
2077 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
2082 } else if (str_has_prefix(str
, "clock=")) {
2089 str
= strstrip(str
);
2090 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
2091 if (!attrs
->clock
) {
2095 } else if (str_has_prefix(str
, "size=")) {
2096 int map_bits
= parse_map_size(str
);
2102 attrs
->map_bits
= map_bits
;
2106 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
2107 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
2112 assignment
= kstrdup(str
, GFP_KERNEL
);
2118 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
2124 static struct hist_trigger_attrs
*
2125 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
2127 struct hist_trigger_attrs
*attrs
;
2130 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
2132 return ERR_PTR(-ENOMEM
);
2134 while (trigger_str
) {
2135 char *str
= strsep(&trigger_str
, ":");
2137 if (strchr(str
, '=')) {
2138 ret
= parse_assignment(tr
, str
, attrs
);
2141 } else if (strcmp(str
, "pause") == 0)
2142 attrs
->pause
= true;
2143 else if ((strcmp(str
, "cont") == 0) ||
2144 (strcmp(str
, "continue") == 0))
2146 else if (strcmp(str
, "clear") == 0)
2147 attrs
->clear
= true;
2149 ret
= parse_action(str
, attrs
);
2155 if (!attrs
->keys_str
) {
2160 if (!attrs
->clock
) {
2161 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
2162 if (!attrs
->clock
) {
2170 destroy_hist_trigger_attrs(attrs
);
2172 return ERR_PTR(ret
);
2175 static inline void save_comm(char *comm
, struct task_struct
*task
)
2178 strcpy(comm
, "<idle>");
2182 if (WARN_ON_ONCE(task
->pid
< 0)) {
2183 strcpy(comm
, "<XXX>");
2187 strncpy(comm
, task
->comm
, TASK_COMM_LEN
);
2190 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
2194 for (i
= 0; i
< SYNTH_FIELDS_MAX
; i
++)
2195 kfree(elt_data
->field_var_str
[i
]);
2197 kfree(elt_data
->comm
);
2201 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
2203 struct hist_elt_data
*elt_data
= elt
->private_data
;
2205 hist_elt_data_free(elt_data
);
2208 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
2210 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
2211 unsigned int size
= TASK_COMM_LEN
;
2212 struct hist_elt_data
*elt_data
;
2213 struct hist_field
*key_field
;
2214 unsigned int i
, n_str
;
2216 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
2220 for_each_hist_key_field(i
, hist_data
) {
2221 key_field
= hist_data
->fields
[i
];
2223 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
2224 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
2225 if (!elt_data
->comm
) {
2233 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
;
2235 size
= STR_VAR_LEN_MAX
;
2237 for (i
= 0; i
< n_str
; i
++) {
2238 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
2239 if (!elt_data
->field_var_str
[i
]) {
2240 hist_elt_data_free(elt_data
);
2245 elt
->private_data
= elt_data
;
2250 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
2252 struct hist_elt_data
*elt_data
= elt
->private_data
;
2255 save_comm(elt_data
->comm
, current
);
2258 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
2259 .elt_alloc
= hist_trigger_elt_data_alloc
,
2260 .elt_free
= hist_trigger_elt_data_free
,
2261 .elt_init
= hist_trigger_elt_data_init
,
2264 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
2266 const char *flags_str
= NULL
;
2268 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
2270 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
2272 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
2273 flags_str
= "sym-offset";
2274 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
2275 flags_str
= "execname";
2276 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
2277 flags_str
= "syscall";
2278 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
2280 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2281 flags_str
= "usecs";
2286 static void expr_field_str(struct hist_field
*field
, char *expr
)
2288 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
2291 strcat(expr
, hist_field_name(field
, 0));
2293 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
2294 const char *flags_str
= get_hist_field_flags(field
);
2298 strcat(expr
, flags_str
);
2303 static char *expr_str(struct hist_field
*field
, unsigned int level
)
2310 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2314 if (!field
->operands
[0]) {
2315 expr_field_str(field
, expr
);
2319 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
2323 subexpr
= expr_str(field
->operands
[0], ++level
);
2328 strcat(expr
, subexpr
);
2336 expr_field_str(field
->operands
[0], expr
);
2338 switch (field
->operator) {
2339 case FIELD_OP_MINUS
:
2350 expr_field_str(field
->operands
[1], expr
);
2355 static int contains_operator(char *str
)
2357 enum field_op_id field_op
= FIELD_OP_NONE
;
2360 op
= strpbrk(str
, "+-");
2362 return FIELD_OP_NONE
;
2367 field_op
= FIELD_OP_UNARY_MINUS
;
2369 field_op
= FIELD_OP_MINUS
;
2372 field_op
= FIELD_OP_PLUS
;
2381 static void __destroy_hist_field(struct hist_field
*hist_field
)
2383 kfree(hist_field
->var
.name
);
2384 kfree(hist_field
->name
);
2385 kfree(hist_field
->type
);
2390 static void destroy_hist_field(struct hist_field
*hist_field
,
2401 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
2402 return; /* var refs will be destroyed separately */
2404 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
2405 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
2407 __destroy_hist_field(hist_field
);
2410 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
2411 struct ftrace_event_field
*field
,
2412 unsigned long flags
,
2415 struct hist_field
*hist_field
;
2417 if (field
&& is_function_field(field
))
2420 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
2424 hist_field
->hist_data
= hist_data
;
2426 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
2427 goto out
; /* caller will populate */
2429 if (flags
& HIST_FIELD_FL_VAR_REF
) {
2430 hist_field
->fn
= hist_field_var_ref
;
2434 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
2435 hist_field
->fn
= hist_field_counter
;
2436 hist_field
->size
= sizeof(u64
);
2437 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
2438 if (!hist_field
->type
)
2443 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
2444 hist_field
->fn
= hist_field_none
;
2448 if (flags
& HIST_FIELD_FL_LOG2
) {
2449 unsigned long fl
= flags
& ~HIST_FIELD_FL_LOG2
;
2450 hist_field
->fn
= hist_field_log2
;
2451 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
2452 hist_field
->size
= hist_field
->operands
[0]->size
;
2453 hist_field
->type
= kstrdup(hist_field
->operands
[0]->type
, GFP_KERNEL
);
2454 if (!hist_field
->type
)
2459 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
2460 hist_field
->fn
= hist_field_timestamp
;
2461 hist_field
->size
= sizeof(u64
);
2462 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
2463 if (!hist_field
->type
)
2468 if (flags
& HIST_FIELD_FL_CPU
) {
2469 hist_field
->fn
= hist_field_cpu
;
2470 hist_field
->size
= sizeof(int);
2471 hist_field
->type
= kstrdup("unsigned int", GFP_KERNEL
);
2472 if (!hist_field
->type
)
2477 if (WARN_ON_ONCE(!field
))
2480 if (is_string_field(field
)) {
2481 flags
|= HIST_FIELD_FL_STRING
;
2483 hist_field
->size
= MAX_FILTER_STR_VAL
;
2484 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
2485 if (!hist_field
->type
)
2488 if (field
->filter_type
== FILTER_STATIC_STRING
)
2489 hist_field
->fn
= hist_field_string
;
2490 else if (field
->filter_type
== FILTER_DYN_STRING
)
2491 hist_field
->fn
= hist_field_dynstring
;
2493 hist_field
->fn
= hist_field_pstring
;
2495 hist_field
->size
= field
->size
;
2496 hist_field
->is_signed
= field
->is_signed
;
2497 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
2498 if (!hist_field
->type
)
2501 hist_field
->fn
= select_value_fn(field
->size
,
2503 if (!hist_field
->fn
) {
2504 destroy_hist_field(hist_field
, 0);
2509 hist_field
->field
= field
;
2510 hist_field
->flags
= flags
;
2513 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
2514 if (!hist_field
->var
.name
)
2520 destroy_hist_field(hist_field
, 0);
2524 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
2528 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
2529 if (hist_data
->fields
[i
]) {
2530 destroy_hist_field(hist_data
->fields
[i
], 0);
2531 hist_data
->fields
[i
] = NULL
;
2535 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2536 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
2537 __destroy_hist_field(hist_data
->var_refs
[i
]);
2538 hist_data
->var_refs
[i
] = NULL
;
2542 static int init_var_ref(struct hist_field
*ref_field
,
2543 struct hist_field
*var_field
,
2544 char *system
, char *event_name
)
2548 ref_field
->var
.idx
= var_field
->var
.idx
;
2549 ref_field
->var
.hist_data
= var_field
->hist_data
;
2550 ref_field
->size
= var_field
->size
;
2551 ref_field
->is_signed
= var_field
->is_signed
;
2552 ref_field
->flags
|= var_field
->flags
&
2553 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2556 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
2557 if (!ref_field
->system
)
2562 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
2563 if (!ref_field
->event_name
) {
2569 if (var_field
->var
.name
) {
2570 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
2571 if (!ref_field
->name
) {
2575 } else if (var_field
->name
) {
2576 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
2577 if (!ref_field
->name
) {
2583 ref_field
->type
= kstrdup(var_field
->type
, GFP_KERNEL
);
2584 if (!ref_field
->type
) {
2591 kfree(ref_field
->system
);
2592 kfree(ref_field
->event_name
);
2593 kfree(ref_field
->name
);
2599 * create_var_ref - Create a variable reference and attach it to trigger
2600 * @hist_data: The trigger that will be referencing the variable
2601 * @var_field: The VAR field to create a reference to
2602 * @system: The optional system string
2603 * @event_name: The optional event_name string
2605 * Given a variable hist_field, create a VAR_REF hist_field that
2606 * represents a reference to it.
2608 * This function also adds the reference to the trigger that
2609 * now references the variable.
2611 * Return: The VAR_REF field if successful, NULL if not
2613 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
2614 struct hist_field
*var_field
,
2615 char *system
, char *event_name
)
2617 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
2618 struct hist_field
*ref_field
;
2620 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
2622 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
2623 destroy_hist_field(ref_field
, 0);
2627 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
2628 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
2634 static bool is_var_ref(char *var_name
)
2636 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
2642 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
2648 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
2649 name
= hist_data
->attrs
->var_defs
.name
[i
];
2651 if (strcmp(var_name
, name
) == 0) {
2652 field
= hist_data
->attrs
->var_defs
.expr
[i
];
2653 if (contains_operator(field
) || is_var_ref(field
))
2662 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
2663 char *system
, char *event_name
,
2666 struct trace_event_call
*call
;
2668 if (system
&& event_name
) {
2669 call
= hist_data
->event_file
->event_call
;
2671 if (strcmp(system
, call
->class->system
) != 0)
2674 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2678 if (!!system
!= !!event_name
)
2681 if (!is_var_ref(var_name
))
2686 return field_name_from_var(hist_data
, var_name
);
2689 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
2690 char *system
, char *event_name
,
2693 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
2694 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2696 if (!is_var_ref(var_name
))
2701 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
2703 ref_field
= create_var_ref(hist_data
, var_field
,
2704 system
, event_name
);
2707 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
2712 static struct ftrace_event_field
*
2713 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
2714 char *field_str
, unsigned long *flags
)
2716 struct ftrace_event_field
*field
= NULL
;
2717 char *field_name
, *modifier
, *str
;
2718 struct trace_array
*tr
= file
->tr
;
2720 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
2722 return ERR_PTR(-ENOMEM
);
2724 field_name
= strsep(&modifier
, ".");
2726 if (strcmp(modifier
, "hex") == 0)
2727 *flags
|= HIST_FIELD_FL_HEX
;
2728 else if (strcmp(modifier
, "sym") == 0)
2729 *flags
|= HIST_FIELD_FL_SYM
;
2730 else if (strcmp(modifier
, "sym-offset") == 0)
2731 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
2732 else if ((strcmp(modifier
, "execname") == 0) &&
2733 (strcmp(field_name
, "common_pid") == 0))
2734 *flags
|= HIST_FIELD_FL_EXECNAME
;
2735 else if (strcmp(modifier
, "syscall") == 0)
2736 *flags
|= HIST_FIELD_FL_SYSCALL
;
2737 else if (strcmp(modifier
, "log2") == 0)
2738 *flags
|= HIST_FIELD_FL_LOG2
;
2739 else if (strcmp(modifier
, "usecs") == 0)
2740 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
2742 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
2743 field
= ERR_PTR(-EINVAL
);
2748 if (strcmp(field_name
, "common_timestamp") == 0) {
2749 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
2750 hist_data
->enable_timestamps
= true;
2751 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2752 hist_data
->attrs
->ts_in_usecs
= true;
2753 } else if (strcmp(field_name
, "cpu") == 0)
2754 *flags
|= HIST_FIELD_FL_CPU
;
2756 field
= trace_find_event_field(file
->event_call
, field_name
);
2757 if (!field
|| !field
->size
) {
2758 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
, errpos(field_name
));
2759 field
= ERR_PTR(-EINVAL
);
2769 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
2770 struct hist_field
*var_ref
,
2773 struct hist_field
*alias
= NULL
;
2774 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
2776 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2780 alias
->fn
= var_ref
->fn
;
2781 alias
->operands
[0] = var_ref
;
2783 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2784 destroy_hist_field(alias
, 0);
2791 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2792 struct trace_event_file
*file
, char *str
,
2793 unsigned long *flags
, char *var_name
)
2795 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2796 struct ftrace_event_field
*field
= NULL
;
2797 struct hist_field
*hist_field
= NULL
;
2800 s
= strchr(str
, '.');
2802 s
= strchr(++s
, '.');
2804 ref_system
= strsep(&str
, ".");
2809 ref_event
= strsep(&str
, ".");
2818 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2820 hist_field
= parse_var_ref(hist_data
, ref_system
,
2821 ref_event
, ref_var
);
2824 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2835 field
= parse_field(hist_data
, file
, str
, flags
);
2836 if (IS_ERR(field
)) {
2837 ret
= PTR_ERR(field
);
2841 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2849 return ERR_PTR(ret
);
2852 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2853 struct trace_event_file
*file
,
2854 char *str
, unsigned long flags
,
2855 char *var_name
, unsigned int level
);
2857 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2858 struct trace_event_file
*file
,
2859 char *str
, unsigned long flags
,
2860 char *var_name
, unsigned int level
)
2862 struct hist_field
*operand1
, *expr
= NULL
;
2863 unsigned long operand_flags
;
2867 /* we support only -(xxx) i.e. explicit parens required */
2870 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2875 str
++; /* skip leading '-' */
2877 s
= strchr(str
, '(');
2885 s
= strrchr(str
, ')');
2889 ret
= -EINVAL
; /* no closing ')' */
2893 flags
|= HIST_FIELD_FL_EXPR
;
2894 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2901 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2902 if (IS_ERR(operand1
)) {
2903 ret
= PTR_ERR(operand1
);
2907 expr
->flags
|= operand1
->flags
&
2908 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2909 expr
->fn
= hist_field_unary_minus
;
2910 expr
->operands
[0] = operand1
;
2911 expr
->operator = FIELD_OP_UNARY_MINUS
;
2912 expr
->name
= expr_str(expr
, 0);
2913 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
2921 destroy_hist_field(expr
, 0);
2922 return ERR_PTR(ret
);
2925 static int check_expr_operands(struct trace_array
*tr
,
2926 struct hist_field
*operand1
,
2927 struct hist_field
*operand2
)
2929 unsigned long operand1_flags
= operand1
->flags
;
2930 unsigned long operand2_flags
= operand2
->flags
;
2932 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2933 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2934 struct hist_field
*var
;
2936 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2939 operand1_flags
= var
->flags
;
2942 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2943 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2944 struct hist_field
*var
;
2946 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2949 operand2_flags
= var
->flags
;
2952 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2953 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2954 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
2961 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2962 struct trace_event_file
*file
,
2963 char *str
, unsigned long flags
,
2964 char *var_name
, unsigned int level
)
2966 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2967 unsigned long operand_flags
;
2968 int field_op
, ret
= -EINVAL
;
2969 char *sep
, *operand1_str
;
2972 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2973 return ERR_PTR(-EINVAL
);
2976 field_op
= contains_operator(str
);
2978 if (field_op
== FIELD_OP_NONE
)
2979 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2981 if (field_op
== FIELD_OP_UNARY_MINUS
)
2982 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
2985 case FIELD_OP_MINUS
:
2995 operand1_str
= strsep(&str
, sep
);
2996 if (!operand1_str
|| !str
)
3000 operand1
= parse_atom(hist_data
, file
, operand1_str
,
3001 &operand_flags
, NULL
);
3002 if (IS_ERR(operand1
)) {
3003 ret
= PTR_ERR(operand1
);
3008 /* rest of string could be another expression e.g. b+c in a+b+c */
3010 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
3011 if (IS_ERR(operand2
)) {
3012 ret
= PTR_ERR(operand2
);
3017 ret
= check_expr_operands(file
->tr
, operand1
, operand2
);
3021 flags
|= HIST_FIELD_FL_EXPR
;
3023 flags
|= operand1
->flags
&
3024 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3026 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3032 operand1
->read_once
= true;
3033 operand2
->read_once
= true;
3035 expr
->operands
[0] = operand1
;
3036 expr
->operands
[1] = operand2
;
3037 expr
->operator = field_op
;
3038 expr
->name
= expr_str(expr
, 0);
3039 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
3046 case FIELD_OP_MINUS
:
3047 expr
->fn
= hist_field_minus
;
3050 expr
->fn
= hist_field_plus
;
3059 destroy_hist_field(operand1
, 0);
3060 destroy_hist_field(operand2
, 0);
3061 destroy_hist_field(expr
, 0);
3063 return ERR_PTR(ret
);
3066 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
3067 struct trace_event_file
*file
)
3069 struct event_trigger_data
*test
;
3071 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
3072 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
3073 if (test
->private_data
== hist_data
)
3074 return test
->filter_str
;
3081 static struct event_command trigger_hist_cmd
;
3082 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
3083 struct trace_event_file
*file
,
3084 char *glob
, char *cmd
, char *param
);
3086 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
3087 struct hist_trigger_data
*hist_data
,
3088 unsigned int n_keys
)
3090 struct hist_field
*target_hist_field
, *hist_field
;
3091 unsigned int n
, i
, j
;
3093 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
3096 i
= hist_data
->n_vals
;
3097 j
= target_hist_data
->n_vals
;
3099 for (n
= 0; n
< n_keys
; n
++) {
3100 hist_field
= hist_data
->fields
[i
+ n
];
3101 target_hist_field
= target_hist_data
->fields
[j
+ n
];
3103 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
3105 if (hist_field
->size
!= target_hist_field
->size
)
3107 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
3114 static struct hist_trigger_data
*
3115 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
3116 struct trace_event_file
*file
)
3118 struct hist_trigger_data
*hist_data
;
3119 struct event_trigger_data
*test
;
3120 unsigned int n_keys
;
3122 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
3124 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
3125 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
3126 hist_data
= test
->private_data
;
3128 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
3136 static struct trace_event_file
*event_file(struct trace_array
*tr
,
3137 char *system
, char *event_name
)
3139 struct trace_event_file
*file
;
3141 file
= __find_event_file(tr
, system
, event_name
);
3143 return ERR_PTR(-EINVAL
);
3148 static struct hist_field
*
3149 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
3150 char *system
, char *event_name
, char *field_name
)
3152 struct hist_field
*event_var
;
3153 char *synthetic_name
;
3155 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3156 if (!synthetic_name
)
3157 return ERR_PTR(-ENOMEM
);
3159 strcpy(synthetic_name
, "synthetic_");
3160 strcat(synthetic_name
, field_name
);
3162 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
3164 kfree(synthetic_name
);
3170 * create_field_var_hist - Automatically create a histogram and var for a field
3171 * @target_hist_data: The target hist trigger
3172 * @subsys_name: Optional subsystem name
3173 * @event_name: Optional event name
3174 * @field_name: The name of the field (and the resulting variable)
3176 * Hist trigger actions fetch data from variables, not directly from
3177 * events. However, for convenience, users are allowed to directly
3178 * specify an event field in an action, which will be automatically
3179 * converted into a variable on their behalf.
3181 * If a user specifies a field on an event that isn't the event the
3182 * histogram currently being defined (the target event histogram), the
3183 * only way that can be accomplished is if a new hist trigger is
3184 * created and the field variable defined on that.
3186 * This function creates a new histogram compatible with the target
3187 * event (meaning a histogram with the same key as the target
3188 * histogram), and creates a variable for the specified field, but
3189 * with 'synthetic_' prepended to the variable name in order to avoid
3190 * collision with normal field variables.
3192 * Return: The variable created for the field.
3194 static struct hist_field
*
3195 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
3196 char *subsys_name
, char *event_name
, char *field_name
)
3198 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
3199 struct hist_field
*event_var
= ERR_PTR(-EINVAL
);
3200 struct hist_trigger_data
*hist_data
;
3201 unsigned int i
, n
, first
= true;
3202 struct field_var_hist
*var_hist
;
3203 struct trace_event_file
*file
;
3204 struct hist_field
*key_field
;
3209 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
3210 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
3211 return ERR_PTR(-EINVAL
);
3214 file
= event_file(tr
, subsys_name
, event_name
);
3217 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
3218 ret
= PTR_ERR(file
);
3219 return ERR_PTR(ret
);
3223 * Look for a histogram compatible with target. We'll use the
3224 * found histogram specification to create a new matching
3225 * histogram with our variable on it. target_hist_data is not
3226 * yet a registered histogram so we can't use that.
3228 hist_data
= find_compatible_hist(target_hist_data
, file
);
3230 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
3231 return ERR_PTR(-EINVAL
);
3234 /* See if a synthetic field variable has already been created */
3235 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3236 event_name
, field_name
);
3237 if (!IS_ERR_OR_NULL(event_var
))
3240 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
3242 return ERR_PTR(-ENOMEM
);
3244 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3247 return ERR_PTR(-ENOMEM
);
3250 /* Use the same keys as the compatible histogram */
3251 strcat(cmd
, "keys=");
3253 for_each_hist_key_field(i
, hist_data
) {
3254 key_field
= hist_data
->fields
[i
];
3257 strcat(cmd
, key_field
->field
->name
);
3261 /* Create the synthetic field variable specification */
3262 strcat(cmd
, ":synthetic_");
3263 strcat(cmd
, field_name
);
3265 strcat(cmd
, field_name
);
3267 /* Use the same filter as the compatible histogram */
3268 saved_filter
= find_trigger_filter(hist_data
, file
);
3270 strcat(cmd
, " if ");
3271 strcat(cmd
, saved_filter
);
3274 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
3275 if (!var_hist
->cmd
) {
3278 return ERR_PTR(-ENOMEM
);
3281 /* Save the compatible histogram information */
3282 var_hist
->hist_data
= hist_data
;
3284 /* Create the new histogram with our variable */
3285 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
3289 kfree(var_hist
->cmd
);
3291 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
3292 return ERR_PTR(ret
);
3297 /* If we can't find the variable, something went wrong */
3298 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3299 event_name
, field_name
);
3300 if (IS_ERR_OR_NULL(event_var
)) {
3301 kfree(var_hist
->cmd
);
3303 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
3304 return ERR_PTR(-EINVAL
);
3307 n
= target_hist_data
->n_field_var_hists
;
3308 target_hist_data
->field_var_hists
[n
] = var_hist
;
3309 target_hist_data
->n_field_var_hists
++;
3314 static struct hist_field
*
3315 find_target_event_var(struct hist_trigger_data
*hist_data
,
3316 char *subsys_name
, char *event_name
, char *var_name
)
3318 struct trace_event_file
*file
= hist_data
->event_file
;
3319 struct hist_field
*hist_field
= NULL
;
3322 struct trace_event_call
*call
;
3327 call
= file
->event_call
;
3329 if (strcmp(subsys_name
, call
->class->system
) != 0)
3332 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3336 hist_field
= find_var_field(hist_data
, var_name
);
3341 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
3342 struct ring_buffer_event
*rbe
,
3344 struct field_var
**field_vars
,
3345 unsigned int n_field_vars
,
3346 unsigned int field_var_str_start
)
3348 struct hist_elt_data
*elt_data
= elt
->private_data
;
3349 unsigned int i
, j
, var_idx
;
3352 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
3353 struct field_var
*field_var
= field_vars
[i
];
3354 struct hist_field
*var
= field_var
->var
;
3355 struct hist_field
*val
= field_var
->val
;
3357 var_val
= val
->fn(val
, elt
, rbe
, rec
);
3358 var_idx
= var
->var
.idx
;
3360 if (val
->flags
& HIST_FIELD_FL_STRING
) {
3361 char *str
= elt_data
->field_var_str
[j
++];
3362 char *val_str
= (char *)(uintptr_t)var_val
;
3364 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
3365 var_val
= (u64
)(uintptr_t)str
;
3367 tracing_map_set_var(elt
, var_idx
, var_val
);
3371 static void update_field_vars(struct hist_trigger_data
*hist_data
,
3372 struct tracing_map_elt
*elt
,
3373 struct ring_buffer_event
*rbe
,
3376 __update_field_vars(elt
, rbe
, rec
, hist_data
->field_vars
,
3377 hist_data
->n_field_vars
, 0);
3380 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
3381 struct tracing_map_elt
*elt
, void *rec
,
3382 struct ring_buffer_event
*rbe
, void *key
,
3383 struct action_data
*data
, u64
*var_ref_vals
)
3385 __update_field_vars(elt
, rbe
, rec
, hist_data
->save_vars
,
3386 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
3389 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
3390 struct trace_event_file
*file
,
3391 char *name
, int size
, const char *type
)
3393 struct hist_field
*var
;
3396 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
3397 var
= ERR_PTR(-EINVAL
);
3401 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3403 var
= ERR_PTR(-ENOMEM
);
3407 idx
= tracing_map_add_var(hist_data
->map
);
3410 var
= ERR_PTR(-EINVAL
);
3414 var
->flags
= HIST_FIELD_FL_VAR
;
3416 var
->var
.hist_data
= var
->hist_data
= hist_data
;
3418 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
3419 var
->type
= kstrdup(type
, GFP_KERNEL
);
3420 if (!var
->var
.name
|| !var
->type
) {
3421 kfree(var
->var
.name
);
3424 var
= ERR_PTR(-ENOMEM
);
3430 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
3431 struct trace_event_file
*file
,
3434 struct hist_field
*val
= NULL
, *var
= NULL
;
3435 unsigned long flags
= HIST_FIELD_FL_VAR
;
3436 struct trace_array
*tr
= file
->tr
;
3437 struct field_var
*field_var
;
3440 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
3441 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
3446 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
3448 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
3453 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
3455 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
3461 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
3469 field_var
->var
= var
;
3470 field_var
->val
= val
;
3474 field_var
= ERR_PTR(ret
);
3479 * create_target_field_var - Automatically create a variable for a field
3480 * @target_hist_data: The target hist trigger
3481 * @subsys_name: Optional subsystem name
3482 * @event_name: Optional event name
3483 * @var_name: The name of the field (and the resulting variable)
3485 * Hist trigger actions fetch data from variables, not directly from
3486 * events. However, for convenience, users are allowed to directly
3487 * specify an event field in an action, which will be automatically
3488 * converted into a variable on their behalf.
3490 * This function creates a field variable with the name var_name on
3491 * the hist trigger currently being defined on the target event. If
3492 * subsys_name and event_name are specified, this function simply
3493 * verifies that they do in fact match the target event subsystem and
3496 * Return: The variable created for the field.
3498 static struct field_var
*
3499 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
3500 char *subsys_name
, char *event_name
, char *var_name
)
3502 struct trace_event_file
*file
= target_hist_data
->event_file
;
3505 struct trace_event_call
*call
;
3510 call
= file
->event_call
;
3512 if (strcmp(subsys_name
, call
->class->system
) != 0)
3515 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3519 return create_field_var(target_hist_data
, file
, var_name
);
3522 static bool check_track_val_max(u64 track_val
, u64 var_val
)
3524 if (var_val
<= track_val
)
3530 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
3532 if (var_val
== track_val
)
3538 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
3539 struct tracing_map_elt
*elt
,
3540 struct action_data
*data
)
3542 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3545 track_val
= tracing_map_read_var(elt
, track_var_idx
);
3550 static void save_track_val(struct hist_trigger_data
*hist_data
,
3551 struct tracing_map_elt
*elt
,
3552 struct action_data
*data
, u64 var_val
)
3554 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3556 tracing_map_set_var(elt
, track_var_idx
, var_val
);
3559 static void save_track_data(struct hist_trigger_data
*hist_data
,
3560 struct tracing_map_elt
*elt
, void *rec
,
3561 struct ring_buffer_event
*rbe
, void *key
,
3562 struct action_data
*data
, u64
*var_ref_vals
)
3564 if (data
->track_data
.save_data
)
3565 data
->track_data
.save_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
3568 static bool check_track_val(struct tracing_map_elt
*elt
,
3569 struct action_data
*data
,
3572 struct hist_trigger_data
*hist_data
;
3575 hist_data
= data
->track_data
.track_var
->hist_data
;
3576 track_val
= get_track_val(hist_data
, elt
, data
);
3578 return data
->track_data
.check_val(track_val
, var_val
);
3581 #ifdef CONFIG_TRACER_SNAPSHOT
3582 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3584 /* called with tr->max_lock held */
3585 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
3586 struct hist_elt_data
*elt_data
, *track_elt_data
;
3587 struct snapshot_context
*context
= cond_data
;
3588 struct action_data
*action
;
3594 action
= track_data
->action_data
;
3596 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
3597 track_data
->action_data
);
3599 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
3602 track_data
->track_val
= track_val
;
3603 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
3605 elt_data
= context
->elt
->private_data
;
3606 track_elt_data
= track_data
->elt
.private_data
;
3608 strncpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
3610 track_data
->updated
= true;
3615 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3616 struct tracing_map_elt
*elt
, void *rec
,
3617 struct ring_buffer_event
*rbe
, void *key
,
3618 struct action_data
*data
,
3621 struct trace_event_file
*file
= hist_data
->event_file
;
3622 struct snapshot_context context
;
3627 tracing_snapshot_cond(file
->tr
, &context
);
3630 static void hist_trigger_print_key(struct seq_file
*m
,
3631 struct hist_trigger_data
*hist_data
,
3633 struct tracing_map_elt
*elt
);
3635 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
3639 if (!hist_data
->n_actions
)
3642 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
3643 struct action_data
*data
= hist_data
->actions
[i
];
3645 if (data
->action
== ACTION_SNAPSHOT
)
3652 static void track_data_snapshot_print(struct seq_file
*m
,
3653 struct hist_trigger_data
*hist_data
)
3655 struct trace_event_file
*file
= hist_data
->event_file
;
3656 struct track_data
*track_data
;
3657 struct action_data
*action
;
3659 track_data
= tracing_cond_snapshot_data(file
->tr
);
3663 if (!track_data
->updated
)
3666 action
= snapshot_action(hist_data
);
3670 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3671 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
3672 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
3673 action
->track_data
.var_str
, track_data
->track_val
);
3675 seq_puts(m
, "\ttriggered by event with key: ");
3676 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
3680 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3684 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3685 struct tracing_map_elt
*elt
, void *rec
,
3686 struct ring_buffer_event
*rbe
, void *key
,
3687 struct action_data
*data
,
3688 u64
*var_ref_vals
) {}
3689 static void track_data_snapshot_print(struct seq_file
*m
,
3690 struct hist_trigger_data
*hist_data
) {}
3691 #endif /* CONFIG_TRACER_SNAPSHOT */
3693 static void track_data_print(struct seq_file
*m
,
3694 struct hist_trigger_data
*hist_data
,
3695 struct tracing_map_elt
*elt
,
3696 struct action_data
*data
)
3698 u64 track_val
= get_track_val(hist_data
, elt
, data
);
3699 unsigned int i
, save_var_idx
;
3701 if (data
->handler
== HANDLER_ONMAX
)
3702 seq_printf(m
, "\n\tmax: %10llu", track_val
);
3703 else if (data
->handler
== HANDLER_ONCHANGE
)
3704 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
3706 if (data
->action
== ACTION_SNAPSHOT
)
3709 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
3710 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
3711 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
3714 save_var_idx
= save_var
->var
.idx
;
3716 val
= tracing_map_read_var(elt
, save_var_idx
);
3718 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
3719 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
3720 (char *)(uintptr_t)(val
));
3722 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
3726 static void ontrack_action(struct hist_trigger_data
*hist_data
,
3727 struct tracing_map_elt
*elt
, void *rec
,
3728 struct ring_buffer_event
*rbe
, void *key
,
3729 struct action_data
*data
, u64
*var_ref_vals
)
3731 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
3733 if (check_track_val(elt
, data
, var_val
)) {
3734 save_track_val(hist_data
, elt
, data
, var_val
);
3735 save_track_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
3739 static void action_data_destroy(struct action_data
*data
)
3743 lockdep_assert_held(&event_mutex
);
3745 kfree(data
->action_name
);
3747 for (i
= 0; i
< data
->n_params
; i
++)
3748 kfree(data
->params
[i
]);
3750 if (data
->synth_event
)
3751 data
->synth_event
->ref
--;
3753 kfree(data
->synth_event_name
);
3758 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
3759 struct action_data
*data
)
3761 struct trace_event_file
*file
= hist_data
->event_file
;
3763 destroy_hist_field(data
->track_data
.track_var
, 0);
3765 if (data
->action
== ACTION_SNAPSHOT
) {
3766 struct track_data
*track_data
;
3768 track_data
= tracing_cond_snapshot_data(file
->tr
);
3769 if (track_data
&& track_data
->hist_data
== hist_data
) {
3770 tracing_snapshot_cond_disable(file
->tr
);
3771 track_data_free(track_data
);
3775 kfree(data
->track_data
.var_str
);
3777 action_data_destroy(data
);
3780 static int action_create(struct hist_trigger_data
*hist_data
,
3781 struct action_data
*data
);
3783 static int track_data_create(struct hist_trigger_data
*hist_data
,
3784 struct action_data
*data
)
3786 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
3787 struct trace_event_file
*file
= hist_data
->event_file
;
3788 struct trace_array
*tr
= file
->tr
;
3789 char *track_data_var_str
;
3792 track_data_var_str
= data
->track_data
.var_str
;
3793 if (track_data_var_str
[0] != '$') {
3794 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
3797 track_data_var_str
++;
3799 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
3801 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
3805 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
3809 data
->track_data
.var_ref
= ref_field
;
3811 if (data
->handler
== HANDLER_ONMAX
)
3812 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
3813 if (IS_ERR(track_var
)) {
3814 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3815 ret
= PTR_ERR(track_var
);
3819 if (data
->handler
== HANDLER_ONCHANGE
)
3820 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
3821 if (IS_ERR(track_var
)) {
3822 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3823 ret
= PTR_ERR(track_var
);
3826 data
->track_data
.track_var
= track_var
;
3828 ret
= action_create(hist_data
, data
);
3833 static int parse_action_params(struct trace_array
*tr
, char *params
,
3834 struct action_data
*data
)
3836 char *param
, *saved_param
;
3837 bool first_param
= true;
3841 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
3842 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
3846 param
= strsep(¶ms
, ",");
3848 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
3853 param
= strstrip(param
);
3854 if (strlen(param
) < 2) {
3855 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
3860 saved_param
= kstrdup(param
, GFP_KERNEL
);
3866 if (first_param
&& data
->use_trace_keyword
) {
3867 data
->synth_event_name
= saved_param
;
3868 first_param
= false;
3871 first_param
= false;
3873 data
->params
[data
->n_params
++] = saved_param
;
3879 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
3880 enum handler_id handler
)
3887 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3892 action_name
= strsep(&str
, "(");
3893 if (!action_name
|| !str
) {
3894 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3899 if (str_has_prefix(action_name
, "save")) {
3900 char *params
= strsep(&str
, ")");
3903 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
3908 ret
= parse_action_params(tr
, params
, data
);
3912 if (handler
== HANDLER_ONMAX
)
3913 data
->track_data
.check_val
= check_track_val_max
;
3914 else if (handler
== HANDLER_ONCHANGE
)
3915 data
->track_data
.check_val
= check_track_val_changed
;
3917 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3922 data
->track_data
.save_data
= save_track_data_vars
;
3923 data
->fn
= ontrack_action
;
3924 data
->action
= ACTION_SAVE
;
3925 } else if (str_has_prefix(action_name
, "snapshot")) {
3926 char *params
= strsep(&str
, ")");
3929 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
3934 if (handler
== HANDLER_ONMAX
)
3935 data
->track_data
.check_val
= check_track_val_max
;
3936 else if (handler
== HANDLER_ONCHANGE
)
3937 data
->track_data
.check_val
= check_track_val_changed
;
3939 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3944 data
->track_data
.save_data
= save_track_data_snapshot
;
3945 data
->fn
= ontrack_action
;
3946 data
->action
= ACTION_SNAPSHOT
;
3948 char *params
= strsep(&str
, ")");
3950 if (str_has_prefix(action_name
, "trace"))
3951 data
->use_trace_keyword
= true;
3954 ret
= parse_action_params(tr
, params
, data
);
3959 if (handler
== HANDLER_ONMAX
)
3960 data
->track_data
.check_val
= check_track_val_max
;
3961 else if (handler
== HANDLER_ONCHANGE
)
3962 data
->track_data
.check_val
= check_track_val_changed
;
3964 if (handler
!= HANDLER_ONMATCH
) {
3965 data
->track_data
.save_data
= action_trace
;
3966 data
->fn
= ontrack_action
;
3968 data
->fn
= action_trace
;
3970 data
->action
= ACTION_TRACE
;
3973 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
3974 if (!data
->action_name
) {
3979 data
->handler
= handler
;
3984 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
3985 char *str
, enum handler_id handler
)
3987 struct action_data
*data
;
3991 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3993 return ERR_PTR(-ENOMEM
);
3995 var_str
= strsep(&str
, ")");
3996 if (!var_str
|| !str
) {
4001 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
4002 if (!data
->track_data
.var_str
) {
4007 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
4013 track_data_destroy(hist_data
, data
);
4014 data
= ERR_PTR(ret
);
4018 static void onmatch_destroy(struct action_data
*data
)
4020 kfree(data
->match_data
.event
);
4021 kfree(data
->match_data
.event_system
);
4023 action_data_destroy(data
);
4026 static void destroy_field_var(struct field_var
*field_var
)
4031 destroy_hist_field(field_var
->var
, 0);
4032 destroy_hist_field(field_var
->val
, 0);
4037 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
4041 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
4042 destroy_field_var(hist_data
->field_vars
[i
]);
4045 static void save_field_var(struct hist_trigger_data
*hist_data
,
4046 struct field_var
*field_var
)
4048 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
4050 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
4051 hist_data
->n_field_var_str
++;
4055 static int check_synth_field(struct synth_event
*event
,
4056 struct hist_field
*hist_field
,
4057 unsigned int field_pos
)
4059 struct synth_field
*field
;
4061 if (field_pos
>= event
->n_fields
)
4064 field
= event
->fields
[field_pos
];
4066 if (strcmp(field
->type
, hist_field
->type
) != 0)
4072 static struct hist_field
*
4073 trace_action_find_var(struct hist_trigger_data
*hist_data
,
4074 struct action_data
*data
,
4075 char *system
, char *event
, char *var
)
4077 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4078 struct hist_field
*hist_field
;
4080 var
++; /* skip '$' */
4082 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
4084 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
4085 system
= data
->match_data
.event_system
;
4086 event
= data
->match_data
.event
;
4089 hist_field
= find_event_var(hist_data
, system
, event
, var
);
4093 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
4098 static struct hist_field
*
4099 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
4100 struct action_data
*data
, char *system
,
4101 char *event
, char *var
)
4103 struct hist_field
*hist_field
= NULL
;
4104 struct field_var
*field_var
;
4107 * First try to create a field var on the target event (the
4108 * currently being defined). This will create a variable for
4109 * unqualified fields on the target event, or if qualified,
4110 * target fields that have qualified names matching the target.
4112 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
4114 if (field_var
&& !IS_ERR(field_var
)) {
4115 save_field_var(hist_data
, field_var
);
4116 hist_field
= field_var
->var
;
4120 * If no explicit system.event is specfied, default to
4121 * looking for fields on the onmatch(system.event.xxx)
4124 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
4125 system
= data
->match_data
.event_system
;
4126 event
= data
->match_data
.event
;
4130 * At this point, we're looking at a field on another
4131 * event. Because we can't modify a hist trigger on
4132 * another event to add a variable for a field, we need
4133 * to create a new trigger on that event and create the
4134 * variable at the same time.
4136 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
4137 if (IS_ERR(hist_field
))
4143 destroy_field_var(field_var
);
4148 static int trace_action_create(struct hist_trigger_data
*hist_data
,
4149 struct action_data
*data
)
4151 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4152 char *event_name
, *param
, *system
= NULL
;
4153 struct hist_field
*hist_field
, *var_ref
;
4154 unsigned int i
, var_ref_idx
;
4155 unsigned int field_pos
= 0;
4156 struct synth_event
*event
;
4157 char *synth_event_name
;
4160 lockdep_assert_held(&event_mutex
);
4162 if (data
->use_trace_keyword
)
4163 synth_event_name
= data
->synth_event_name
;
4165 synth_event_name
= data
->action_name
;
4167 event
= find_synth_event(synth_event_name
);
4169 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
4175 var_ref_idx
= hist_data
->n_var_refs
;
4177 for (i
= 0; i
< data
->n_params
; i
++) {
4180 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4186 system
= strsep(¶m
, ".");
4188 param
= (char *)system
;
4189 system
= event_name
= NULL
;
4191 event_name
= strsep(¶m
, ".");
4199 if (param
[0] == '$')
4200 hist_field
= trace_action_find_var(hist_data
, data
,
4204 hist_field
= trace_action_create_field_var(hist_data
,
4216 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
4217 var_ref
= create_var_ref(hist_data
, hist_field
,
4218 system
, event_name
);
4230 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
4236 if (field_pos
!= event
->n_fields
) {
4237 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
4242 data
->synth_event
= event
;
4243 data
->var_ref_idx
= var_ref_idx
;
4252 static int action_create(struct hist_trigger_data
*hist_data
,
4253 struct action_data
*data
)
4255 struct trace_event_file
*file
= hist_data
->event_file
;
4256 struct trace_array
*tr
= file
->tr
;
4257 struct track_data
*track_data
;
4258 struct field_var
*field_var
;
4263 if (data
->action
== ACTION_TRACE
)
4264 return trace_action_create(hist_data
, data
);
4266 if (data
->action
== ACTION_SNAPSHOT
) {
4267 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
4268 if (IS_ERR(track_data
)) {
4269 ret
= PTR_ERR(track_data
);
4273 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
4274 cond_snapshot_update
);
4276 track_data_free(track_data
);
4281 if (data
->action
== ACTION_SAVE
) {
4282 if (hist_data
->n_save_vars
) {
4284 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
4288 for (i
= 0; i
< data
->n_params
; i
++) {
4289 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4295 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
4296 if (IS_ERR(field_var
)) {
4297 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
4299 ret
= PTR_ERR(field_var
);
4304 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
4305 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
4306 hist_data
->n_save_var_str
++;
4314 static int onmatch_create(struct hist_trigger_data
*hist_data
,
4315 struct action_data
*data
)
4317 return action_create(hist_data
, data
);
4320 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
4322 char *match_event
, *match_event_system
;
4323 struct action_data
*data
;
4326 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
4328 return ERR_PTR(-ENOMEM
);
4330 match_event
= strsep(&str
, ")");
4331 if (!match_event
|| !str
) {
4332 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
4336 match_event_system
= strsep(&match_event
, ".");
4338 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
4342 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
4343 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
4347 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
4348 if (!data
->match_data
.event
) {
4353 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
4354 if (!data
->match_data
.event_system
) {
4359 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
4365 onmatch_destroy(data
);
4366 data
= ERR_PTR(ret
);
4370 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
4372 hist_data
->fields
[HITCOUNT_IDX
] =
4373 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
4374 if (!hist_data
->fields
[HITCOUNT_IDX
])
4377 hist_data
->n_vals
++;
4378 hist_data
->n_fields
++;
4380 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
4386 static int __create_val_field(struct hist_trigger_data
*hist_data
,
4387 unsigned int val_idx
,
4388 struct trace_event_file
*file
,
4389 char *var_name
, char *field_str
,
4390 unsigned long flags
)
4392 struct hist_field
*hist_field
;
4395 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
4396 if (IS_ERR(hist_field
)) {
4397 ret
= PTR_ERR(hist_field
);
4401 hist_data
->fields
[val_idx
] = hist_field
;
4403 ++hist_data
->n_vals
;
4404 ++hist_data
->n_fields
;
4406 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4412 static int create_val_field(struct hist_trigger_data
*hist_data
,
4413 unsigned int val_idx
,
4414 struct trace_event_file
*file
,
4417 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
4420 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
4423 static int create_var_field(struct hist_trigger_data
*hist_data
,
4424 unsigned int val_idx
,
4425 struct trace_event_file
*file
,
4426 char *var_name
, char *expr_str
)
4428 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4429 unsigned long flags
= 0;
4431 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4434 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
4435 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
4439 flags
|= HIST_FIELD_FL_VAR
;
4440 hist_data
->n_vars
++;
4441 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
4444 return __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
4447 static int create_val_fields(struct hist_trigger_data
*hist_data
,
4448 struct trace_event_file
*file
)
4450 char *fields_str
, *field_str
;
4451 unsigned int i
, j
= 1;
4454 ret
= create_hitcount_val(hist_data
);
4458 fields_str
= hist_data
->attrs
->vals_str
;
4462 strsep(&fields_str
, "=");
4466 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
4467 j
< TRACING_MAP_VALS_MAX
; i
++) {
4468 field_str
= strsep(&fields_str
, ",");
4472 if (strcmp(field_str
, "hitcount") == 0)
4475 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
4480 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
4486 static int create_key_field(struct hist_trigger_data
*hist_data
,
4487 unsigned int key_idx
,
4488 unsigned int key_offset
,
4489 struct trace_event_file
*file
,
4492 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4493 struct hist_field
*hist_field
= NULL
;
4494 unsigned long flags
= 0;
4495 unsigned int key_size
;
4498 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
4501 flags
|= HIST_FIELD_FL_KEY
;
4503 if (strcmp(field_str
, "stacktrace") == 0) {
4504 flags
|= HIST_FIELD_FL_STACKTRACE
;
4505 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
4506 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
4508 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
4510 if (IS_ERR(hist_field
)) {
4511 ret
= PTR_ERR(hist_field
);
4515 if (field_has_hist_vars(hist_field
, 0)) {
4516 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
4517 destroy_hist_field(hist_field
, 0);
4522 key_size
= hist_field
->size
;
4525 hist_data
->fields
[key_idx
] = hist_field
;
4527 key_size
= ALIGN(key_size
, sizeof(u64
));
4528 hist_data
->fields
[key_idx
]->size
= key_size
;
4529 hist_data
->fields
[key_idx
]->offset
= key_offset
;
4531 hist_data
->key_size
+= key_size
;
4533 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
4538 hist_data
->n_keys
++;
4539 hist_data
->n_fields
++;
4541 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
4549 static int create_key_fields(struct hist_trigger_data
*hist_data
,
4550 struct trace_event_file
*file
)
4552 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
4553 char *fields_str
, *field_str
;
4556 fields_str
= hist_data
->attrs
->keys_str
;
4560 strsep(&fields_str
, "=");
4564 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
4565 field_str
= strsep(&fields_str
, ",");
4568 ret
= create_key_field(hist_data
, i
, key_offset
,
4583 static int create_var_fields(struct hist_trigger_data
*hist_data
,
4584 struct trace_event_file
*file
)
4586 unsigned int i
, j
= hist_data
->n_vals
;
4589 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
4591 for (i
= 0; i
< n_vars
; i
++) {
4592 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
4593 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
4595 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
4603 static void free_var_defs(struct hist_trigger_data
*hist_data
)
4607 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
4608 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
4609 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
4612 hist_data
->attrs
->var_defs
.n_vars
= 0;
4615 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
4617 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4618 char *s
, *str
, *var_name
, *field_str
;
4619 unsigned int i
, j
, n_vars
= 0;
4622 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
4623 str
= hist_data
->attrs
->assignment_str
[i
];
4624 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
4625 field_str
= strsep(&str
, ",");
4629 var_name
= strsep(&field_str
, "=");
4630 if (!var_name
|| !field_str
) {
4631 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
4637 if (n_vars
== TRACING_MAP_VARS_MAX
) {
4638 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
4643 s
= kstrdup(var_name
, GFP_KERNEL
);
4648 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
4650 s
= kstrdup(field_str
, GFP_KERNEL
);
4652 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
4656 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
4658 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
4664 free_var_defs(hist_data
);
4669 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
4670 struct trace_event_file
*file
)
4674 ret
= parse_var_defs(hist_data
);
4678 ret
= create_val_fields(hist_data
, file
);
4682 ret
= create_var_fields(hist_data
, file
);
4686 ret
= create_key_fields(hist_data
, file
);
4690 free_var_defs(hist_data
);
4695 static int is_descending(const char *str
)
4700 if (strcmp(str
, "descending") == 0)
4703 if (strcmp(str
, "ascending") == 0)
4709 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
4711 char *fields_str
= hist_data
->attrs
->sort_key_str
;
4712 struct tracing_map_sort_key
*sort_key
;
4713 int descending
, ret
= 0;
4714 unsigned int i
, j
, k
;
4716 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
4721 strsep(&fields_str
, "=");
4727 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
4728 struct hist_field
*hist_field
;
4729 char *field_str
, *field_name
;
4730 const char *test_name
;
4732 sort_key
= &hist_data
->sort_keys
[i
];
4734 field_str
= strsep(&fields_str
, ",");
4741 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
4746 field_name
= strsep(&field_str
, ".");
4752 if (strcmp(field_name
, "hitcount") == 0) {
4753 descending
= is_descending(field_str
);
4754 if (descending
< 0) {
4758 sort_key
->descending
= descending
;
4762 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4765 hist_field
= hist_data
->fields
[j
];
4766 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4771 test_name
= hist_field_name(hist_field
, 0);
4773 if (strcmp(field_name
, test_name
) == 0) {
4774 sort_key
->field_idx
= idx
;
4775 descending
= is_descending(field_str
);
4776 if (descending
< 0) {
4780 sort_key
->descending
= descending
;
4784 if (j
== hist_data
->n_fields
) {
4790 hist_data
->n_sort_keys
= i
;
4795 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4799 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4800 struct action_data
*data
= hist_data
->actions
[i
];
4802 if (data
->handler
== HANDLER_ONMATCH
)
4803 onmatch_destroy(data
);
4804 else if (data
->handler
== HANDLER_ONMAX
||
4805 data
->handler
== HANDLER_ONCHANGE
)
4806 track_data_destroy(hist_data
, data
);
4812 static int parse_actions(struct hist_trigger_data
*hist_data
)
4814 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4815 struct action_data
*data
;
4821 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4822 str
= hist_data
->attrs
->action_str
[i
];
4824 if ((len
= str_has_prefix(str
, "onmatch("))) {
4825 char *action_str
= str
+ len
;
4827 data
= onmatch_parse(tr
, action_str
);
4829 ret
= PTR_ERR(data
);
4832 } else if ((len
= str_has_prefix(str
, "onmax("))) {
4833 char *action_str
= str
+ len
;
4835 data
= track_data_parse(hist_data
, action_str
,
4838 ret
= PTR_ERR(data
);
4841 } else if ((len
= str_has_prefix(str
, "onchange("))) {
4842 char *action_str
= str
+ len
;
4844 data
= track_data_parse(hist_data
, action_str
,
4847 ret
= PTR_ERR(data
);
4855 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4861 static int create_actions(struct hist_trigger_data
*hist_data
)
4863 struct action_data
*data
;
4867 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4868 data
= hist_data
->actions
[i
];
4870 if (data
->handler
== HANDLER_ONMATCH
) {
4871 ret
= onmatch_create(hist_data
, data
);
4874 } else if (data
->handler
== HANDLER_ONMAX
||
4875 data
->handler
== HANDLER_ONCHANGE
) {
4876 ret
= track_data_create(hist_data
, data
);
4888 static void print_actions(struct seq_file
*m
,
4889 struct hist_trigger_data
*hist_data
,
4890 struct tracing_map_elt
*elt
)
4894 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4895 struct action_data
*data
= hist_data
->actions
[i
];
4897 if (data
->action
== ACTION_SNAPSHOT
)
4900 if (data
->handler
== HANDLER_ONMAX
||
4901 data
->handler
== HANDLER_ONCHANGE
)
4902 track_data_print(m
, hist_data
, elt
, data
);
4906 static void print_action_spec(struct seq_file
*m
,
4907 struct hist_trigger_data
*hist_data
,
4908 struct action_data
*data
)
4912 if (data
->action
== ACTION_SAVE
) {
4913 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4914 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
4915 if (i
< hist_data
->n_save_vars
- 1)
4918 } else if (data
->action
== ACTION_TRACE
) {
4919 if (data
->use_trace_keyword
)
4920 seq_printf(m
, "%s", data
->synth_event_name
);
4921 for (i
= 0; i
< data
->n_params
; i
++) {
4922 if (i
|| data
->use_trace_keyword
)
4924 seq_printf(m
, "%s", data
->params
[i
]);
4929 static void print_track_data_spec(struct seq_file
*m
,
4930 struct hist_trigger_data
*hist_data
,
4931 struct action_data
*data
)
4933 if (data
->handler
== HANDLER_ONMAX
)
4934 seq_puts(m
, ":onmax(");
4935 else if (data
->handler
== HANDLER_ONCHANGE
)
4936 seq_puts(m
, ":onchange(");
4937 seq_printf(m
, "%s", data
->track_data
.var_str
);
4938 seq_printf(m
, ").%s(", data
->action_name
);
4940 print_action_spec(m
, hist_data
, data
);
4945 static void print_onmatch_spec(struct seq_file
*m
,
4946 struct hist_trigger_data
*hist_data
,
4947 struct action_data
*data
)
4949 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
4950 data
->match_data
.event
);
4952 seq_printf(m
, "%s(", data
->action_name
);
4954 print_action_spec(m
, hist_data
, data
);
4959 static bool actions_match(struct hist_trigger_data
*hist_data
,
4960 struct hist_trigger_data
*hist_data_test
)
4964 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4967 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4968 struct action_data
*data
= hist_data
->actions
[i
];
4969 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4970 char *action_name
, *action_name_test
;
4972 if (data
->handler
!= data_test
->handler
)
4974 if (data
->action
!= data_test
->action
)
4977 if (data
->n_params
!= data_test
->n_params
)
4980 for (j
= 0; j
< data
->n_params
; j
++) {
4981 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4985 if (data
->use_trace_keyword
)
4986 action_name
= data
->synth_event_name
;
4988 action_name
= data
->action_name
;
4990 if (data_test
->use_trace_keyword
)
4991 action_name_test
= data_test
->synth_event_name
;
4993 action_name_test
= data_test
->action_name
;
4995 if (strcmp(action_name
, action_name_test
) != 0)
4998 if (data
->handler
== HANDLER_ONMATCH
) {
4999 if (strcmp(data
->match_data
.event_system
,
5000 data_test
->match_data
.event_system
) != 0)
5002 if (strcmp(data
->match_data
.event
,
5003 data_test
->match_data
.event
) != 0)
5005 } else if (data
->handler
== HANDLER_ONMAX
||
5006 data
->handler
== HANDLER_ONCHANGE
) {
5007 if (strcmp(data
->track_data
.var_str
,
5008 data_test
->track_data
.var_str
) != 0)
5017 static void print_actions_spec(struct seq_file
*m
,
5018 struct hist_trigger_data
*hist_data
)
5022 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5023 struct action_data
*data
= hist_data
->actions
[i
];
5025 if (data
->handler
== HANDLER_ONMATCH
)
5026 print_onmatch_spec(m
, hist_data
, data
);
5027 else if (data
->handler
== HANDLER_ONMAX
||
5028 data
->handler
== HANDLER_ONCHANGE
)
5029 print_track_data_spec(m
, hist_data
, data
);
5033 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
5037 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5038 kfree(hist_data
->field_var_hists
[i
]->cmd
);
5039 kfree(hist_data
->field_var_hists
[i
]);
5043 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
5048 destroy_hist_trigger_attrs(hist_data
->attrs
);
5049 destroy_hist_fields(hist_data
);
5050 tracing_map_destroy(hist_data
->map
);
5052 destroy_actions(hist_data
);
5053 destroy_field_vars(hist_data
);
5054 destroy_field_var_hists(hist_data
);
5059 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
5061 struct tracing_map
*map
= hist_data
->map
;
5062 struct ftrace_event_field
*field
;
5063 struct hist_field
*hist_field
;
5066 for_each_hist_field(i
, hist_data
) {
5067 hist_field
= hist_data
->fields
[i
];
5068 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
5069 tracing_map_cmp_fn_t cmp_fn
;
5071 field
= hist_field
->field
;
5073 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5074 cmp_fn
= tracing_map_cmp_none
;
5076 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
5077 hist_field
->is_signed
);
5078 else if (is_string_field(field
))
5079 cmp_fn
= tracing_map_cmp_string
;
5081 cmp_fn
= tracing_map_cmp_num(field
->size
,
5083 idx
= tracing_map_add_key_field(map
,
5086 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
5087 idx
= tracing_map_add_sum_field(map
);
5092 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5093 idx
= tracing_map_add_var(map
);
5096 hist_field
->var
.idx
= idx
;
5097 hist_field
->var
.hist_data
= hist_data
;
5104 static struct hist_trigger_data
*
5105 create_hist_data(unsigned int map_bits
,
5106 struct hist_trigger_attrs
*attrs
,
5107 struct trace_event_file
*file
,
5110 const struct tracing_map_ops
*map_ops
= NULL
;
5111 struct hist_trigger_data
*hist_data
;
5114 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
5116 return ERR_PTR(-ENOMEM
);
5118 hist_data
->attrs
= attrs
;
5119 hist_data
->remove
= remove
;
5120 hist_data
->event_file
= file
;
5122 ret
= parse_actions(hist_data
);
5126 ret
= create_hist_fields(hist_data
, file
);
5130 ret
= create_sort_keys(hist_data
);
5134 map_ops
= &hist_trigger_elt_data_ops
;
5136 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
5137 map_ops
, hist_data
);
5138 if (IS_ERR(hist_data
->map
)) {
5139 ret
= PTR_ERR(hist_data
->map
);
5140 hist_data
->map
= NULL
;
5144 ret
= create_tracing_map_fields(hist_data
);
5150 hist_data
->attrs
= NULL
;
5152 destroy_hist_data(hist_data
);
5154 hist_data
= ERR_PTR(ret
);
5159 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
5160 struct tracing_map_elt
*elt
, void *rec
,
5161 struct ring_buffer_event
*rbe
,
5164 struct hist_elt_data
*elt_data
;
5165 struct hist_field
*hist_field
;
5166 unsigned int i
, var_idx
;
5169 elt_data
= elt
->private_data
;
5170 elt_data
->var_ref_vals
= var_ref_vals
;
5172 for_each_hist_val_field(i
, hist_data
) {
5173 hist_field
= hist_data
->fields
[i
];
5174 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
5175 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5176 var_idx
= hist_field
->var
.idx
;
5177 tracing_map_set_var(elt
, var_idx
, hist_val
);
5180 tracing_map_update_sum(elt
, i
, hist_val
);
5183 for_each_hist_key_field(i
, hist_data
) {
5184 hist_field
= hist_data
->fields
[i
];
5185 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5186 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
5187 var_idx
= hist_field
->var
.idx
;
5188 tracing_map_set_var(elt
, var_idx
, hist_val
);
5192 update_field_vars(hist_data
, elt
, rbe
, rec
);
5195 static inline void add_to_key(char *compound_key
, void *key
,
5196 struct hist_field
*key_field
, void *rec
)
5198 size_t size
= key_field
->size
;
5200 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5201 struct ftrace_event_field
*field
;
5203 field
= key_field
->field
;
5204 if (field
->filter_type
== FILTER_DYN_STRING
)
5205 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
5206 else if (field
->filter_type
== FILTER_PTR_STRING
)
5208 else if (field
->filter_type
== FILTER_STATIC_STRING
)
5211 /* ensure NULL-termination */
5212 if (size
> key_field
->size
- 1)
5213 size
= key_field
->size
- 1;
5215 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
5217 memcpy(compound_key
+ key_field
->offset
, key
, size
);
5221 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
5222 struct tracing_map_elt
*elt
, void *rec
,
5223 struct ring_buffer_event
*rbe
, void *key
,
5226 struct action_data
*data
;
5229 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5230 data
= hist_data
->actions
[i
];
5231 data
->fn(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
5235 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
,
5236 struct ring_buffer_event
*rbe
)
5238 struct hist_trigger_data
*hist_data
= data
->private_data
;
5239 bool use_compound_key
= (hist_data
->n_keys
> 1);
5240 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
5241 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
5242 char compound_key
[HIST_KEY_SIZE_MAX
];
5243 struct tracing_map_elt
*elt
= NULL
;
5244 struct hist_field
*key_field
;
5249 memset(compound_key
, 0, hist_data
->key_size
);
5251 for_each_hist_key_field(i
, hist_data
) {
5252 key_field
= hist_data
->fields
[i
];
5254 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5255 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
5256 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
5257 HIST_STACKTRACE_SKIP
);
5260 field_contents
= key_field
->fn(key_field
, elt
, rbe
, rec
);
5261 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5262 key
= (void *)(unsigned long)field_contents
;
5263 use_compound_key
= true;
5265 key
= (void *)&field_contents
;
5268 if (use_compound_key
)
5269 add_to_key(compound_key
, key
, key_field
, rec
);
5272 if (use_compound_key
)
5275 if (hist_data
->n_var_refs
&&
5276 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
5279 elt
= tracing_map_insert(hist_data
->map
, key
);
5283 hist_trigger_elt_update(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
5285 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
5286 hist_trigger_actions(hist_data
, elt
, rec
, rbe
, key
, var_ref_vals
);
5289 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
5290 unsigned long *stacktrace_entries
,
5291 unsigned int max_entries
)
5293 char str
[KSYM_SYMBOL_LEN
];
5294 unsigned int spaces
= 8;
5297 for (i
= 0; i
< max_entries
; i
++) {
5298 if (!stacktrace_entries
[i
])
5301 seq_printf(m
, "%*c", 1 + spaces
, ' ');
5302 sprint_symbol(str
, stacktrace_entries
[i
]);
5303 seq_printf(m
, "%s\n", str
);
5307 static void hist_trigger_print_key(struct seq_file
*m
,
5308 struct hist_trigger_data
*hist_data
,
5310 struct tracing_map_elt
*elt
)
5312 struct hist_field
*key_field
;
5313 char str
[KSYM_SYMBOL_LEN
];
5314 bool multiline
= false;
5315 const char *field_name
;
5321 for_each_hist_key_field(i
, hist_data
) {
5322 key_field
= hist_data
->fields
[i
];
5324 if (i
> hist_data
->n_vals
)
5327 field_name
= hist_field_name(key_field
, 0);
5329 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
5330 uval
= *(u64
*)(key
+ key_field
->offset
);
5331 seq_printf(m
, "%s: %llx", field_name
, uval
);
5332 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
5333 uval
= *(u64
*)(key
+ key_field
->offset
);
5334 sprint_symbol_no_offset(str
, uval
);
5335 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
5337 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
5338 uval
= *(u64
*)(key
+ key_field
->offset
);
5339 sprint_symbol(str
, uval
);
5340 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
5342 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
5343 struct hist_elt_data
*elt_data
= elt
->private_data
;
5346 if (WARN_ON_ONCE(!elt_data
))
5349 comm
= elt_data
->comm
;
5351 uval
= *(u64
*)(key
+ key_field
->offset
);
5352 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
5354 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
5355 const char *syscall_name
;
5357 uval
= *(u64
*)(key
+ key_field
->offset
);
5358 syscall_name
= get_syscall_name(uval
);
5360 syscall_name
= "unknown_syscall";
5362 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
5363 syscall_name
, uval
);
5364 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5365 seq_puts(m
, "stacktrace:\n");
5366 hist_trigger_stacktrace_print(m
,
5367 key
+ key_field
->offset
,
5368 HIST_STACKTRACE_DEPTH
);
5370 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
5371 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
5372 *(u64
*)(key
+ key_field
->offset
));
5373 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5374 seq_printf(m
, "%s: %-50s", field_name
,
5375 (char *)(key
+ key_field
->offset
));
5377 uval
= *(u64
*)(key
+ key_field
->offset
);
5378 seq_printf(m
, "%s: %10llu", field_name
, uval
);
5388 static void hist_trigger_entry_print(struct seq_file
*m
,
5389 struct hist_trigger_data
*hist_data
,
5391 struct tracing_map_elt
*elt
)
5393 const char *field_name
;
5396 hist_trigger_print_key(m
, hist_data
, key
, elt
);
5398 seq_printf(m
, " hitcount: %10llu",
5399 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
5401 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
5402 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
5404 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
5405 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
5408 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
5409 seq_printf(m
, " %s: %10llx", field_name
,
5410 tracing_map_read_sum(elt
, i
));
5412 seq_printf(m
, " %s: %10llu", field_name
,
5413 tracing_map_read_sum(elt
, i
));
5417 print_actions(m
, hist_data
, elt
);
5422 static int print_entries(struct seq_file
*m
,
5423 struct hist_trigger_data
*hist_data
)
5425 struct tracing_map_sort_entry
**sort_entries
= NULL
;
5426 struct tracing_map
*map
= hist_data
->map
;
5429 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
5430 hist_data
->n_sort_keys
,
5435 for (i
= 0; i
< n_entries
; i
++)
5436 hist_trigger_entry_print(m
, hist_data
,
5437 sort_entries
[i
]->key
,
5438 sort_entries
[i
]->elt
);
5440 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
5445 static void hist_trigger_show(struct seq_file
*m
,
5446 struct event_trigger_data
*data
, int n
)
5448 struct hist_trigger_data
*hist_data
;
5452 seq_puts(m
, "\n\n");
5454 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
5455 data
->ops
->print(m
, data
->ops
, data
);
5456 seq_puts(m
, "#\n\n");
5458 hist_data
= data
->private_data
;
5459 n_entries
= print_entries(m
, hist_data
);
5463 track_data_snapshot_print(m
, hist_data
);
5465 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
5466 (u64
)atomic64_read(&hist_data
->map
->hits
),
5467 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
5470 static int hist_show(struct seq_file
*m
, void *v
)
5472 struct event_trigger_data
*data
;
5473 struct trace_event_file
*event_file
;
5476 mutex_lock(&event_mutex
);
5478 event_file
= event_file_data(m
->private);
5479 if (unlikely(!event_file
)) {
5484 list_for_each_entry_rcu(data
, &event_file
->triggers
, list
) {
5485 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5486 hist_trigger_show(m
, data
, n
++);
5490 mutex_unlock(&event_mutex
);
5495 static int event_hist_open(struct inode
*inode
, struct file
*file
)
5497 return single_open(file
, hist_show
, file
);
5500 const struct file_operations event_hist_fops
= {
5501 .open
= event_hist_open
,
5503 .llseek
= seq_lseek
,
5504 .release
= single_release
,
5507 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
5509 const char *field_name
= hist_field_name(hist_field
, 0);
5511 if (hist_field
->var
.name
)
5512 seq_printf(m
, "%s=", hist_field
->var
.name
);
5514 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
5516 else if (field_name
) {
5517 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
5518 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
5520 seq_printf(m
, "%s", field_name
);
5521 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
5522 seq_puts(m
, "common_timestamp");
5524 if (hist_field
->flags
) {
5525 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
5526 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
)) {
5527 const char *flags
= get_hist_field_flags(hist_field
);
5530 seq_printf(m
, ".%s", flags
);
5535 static int event_hist_trigger_print(struct seq_file
*m
,
5536 struct event_trigger_ops
*ops
,
5537 struct event_trigger_data
*data
)
5539 struct hist_trigger_data
*hist_data
= data
->private_data
;
5540 struct hist_field
*field
;
5541 bool have_var
= false;
5544 seq_puts(m
, "hist:");
5547 seq_printf(m
, "%s:", data
->name
);
5549 seq_puts(m
, "keys=");
5551 for_each_hist_key_field(i
, hist_data
) {
5552 field
= hist_data
->fields
[i
];
5554 if (i
> hist_data
->n_vals
)
5557 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5558 seq_puts(m
, "stacktrace");
5560 hist_field_print(m
, field
);
5563 seq_puts(m
, ":vals=");
5565 for_each_hist_val_field(i
, hist_data
) {
5566 field
= hist_data
->fields
[i
];
5567 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5572 if (i
== HITCOUNT_IDX
)
5573 seq_puts(m
, "hitcount");
5576 hist_field_print(m
, field
);
5585 for_each_hist_val_field(i
, hist_data
) {
5586 field
= hist_data
->fields
[i
];
5588 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5591 hist_field_print(m
, field
);
5596 seq_puts(m
, ":sort=");
5598 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5599 struct tracing_map_sort_key
*sort_key
;
5600 unsigned int idx
, first_key_idx
;
5603 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
5605 sort_key
= &hist_data
->sort_keys
[i
];
5606 idx
= sort_key
->field_idx
;
5608 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
5614 if (idx
== HITCOUNT_IDX
)
5615 seq_puts(m
, "hitcount");
5617 if (idx
>= first_key_idx
)
5618 idx
+= hist_data
->n_vars
;
5619 hist_field_print(m
, hist_data
->fields
[idx
]);
5622 if (sort_key
->descending
)
5623 seq_puts(m
, ".descending");
5625 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
5626 if (hist_data
->enable_timestamps
)
5627 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
5629 print_actions_spec(m
, hist_data
);
5631 if (data
->filter_str
)
5632 seq_printf(m
, " if %s", data
->filter_str
);
5635 seq_puts(m
, " [paused]");
5637 seq_puts(m
, " [active]");
5644 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
5645 struct event_trigger_data
*data
)
5647 struct hist_trigger_data
*hist_data
= data
->private_data
;
5649 if (!data
->ref
&& hist_data
->attrs
->name
)
5650 save_named_trigger(hist_data
->attrs
->name
, data
);
5657 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
5659 struct trace_event_file
*file
;
5664 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5665 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
5666 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
5667 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
5668 "!hist", "hist", cmd
);
5672 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
5673 struct event_trigger_data
*data
)
5675 struct hist_trigger_data
*hist_data
= data
->private_data
;
5677 if (WARN_ON_ONCE(data
->ref
<= 0))
5683 del_named_trigger(data
);
5685 trigger_data_free(data
);
5687 remove_hist_vars(hist_data
);
5689 unregister_field_var_hists(hist_data
);
5691 destroy_hist_data(hist_data
);
5695 static struct event_trigger_ops event_hist_trigger_ops
= {
5696 .func
= event_hist_trigger
,
5697 .print
= event_hist_trigger_print
,
5698 .init
= event_hist_trigger_init
,
5699 .free
= event_hist_trigger_free
,
5702 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
5703 struct event_trigger_data
*data
)
5707 save_named_trigger(data
->named_data
->name
, data
);
5709 event_hist_trigger_init(ops
, data
->named_data
);
5714 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
5715 struct event_trigger_data
*data
)
5717 if (WARN_ON_ONCE(data
->ref
<= 0))
5720 event_hist_trigger_free(ops
, data
->named_data
);
5724 del_named_trigger(data
);
5725 trigger_data_free(data
);
5729 static struct event_trigger_ops event_hist_trigger_named_ops
= {
5730 .func
= event_hist_trigger
,
5731 .print
= event_hist_trigger_print
,
5732 .init
= event_hist_trigger_named_init
,
5733 .free
= event_hist_trigger_named_free
,
5736 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
5739 return &event_hist_trigger_ops
;
5742 static void hist_clear(struct event_trigger_data
*data
)
5744 struct hist_trigger_data
*hist_data
= data
->private_data
;
5747 pause_named_trigger(data
);
5749 tracepoint_synchronize_unregister();
5751 tracing_map_clear(hist_data
->map
);
5754 unpause_named_trigger(data
);
5757 static bool compatible_field(struct ftrace_event_field
*field
,
5758 struct ftrace_event_field
*test_field
)
5760 if (field
== test_field
)
5762 if (field
== NULL
|| test_field
== NULL
)
5764 if (strcmp(field
->name
, test_field
->name
) != 0)
5766 if (strcmp(field
->type
, test_field
->type
) != 0)
5768 if (field
->size
!= test_field
->size
)
5770 if (field
->is_signed
!= test_field
->is_signed
)
5776 static bool hist_trigger_match(struct event_trigger_data
*data
,
5777 struct event_trigger_data
*data_test
,
5778 struct event_trigger_data
*named_data
,
5781 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
5782 struct hist_trigger_data
*hist_data
, *hist_data_test
;
5783 struct hist_field
*key_field
, *key_field_test
;
5786 if (named_data
&& (named_data
!= data_test
) &&
5787 (named_data
!= data_test
->named_data
))
5790 if (!named_data
&& is_named_trigger(data_test
))
5793 hist_data
= data
->private_data
;
5794 hist_data_test
= data_test
->private_data
;
5796 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
5797 hist_data
->n_fields
!= hist_data_test
->n_fields
||
5798 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
5801 if (!ignore_filter
) {
5802 if ((data
->filter_str
&& !data_test
->filter_str
) ||
5803 (!data
->filter_str
&& data_test
->filter_str
))
5807 for_each_hist_field(i
, hist_data
) {
5808 key_field
= hist_data
->fields
[i
];
5809 key_field_test
= hist_data_test
->fields
[i
];
5811 if (key_field
->flags
!= key_field_test
->flags
)
5813 if (!compatible_field(key_field
->field
, key_field_test
->field
))
5815 if (key_field
->offset
!= key_field_test
->offset
)
5817 if (key_field
->size
!= key_field_test
->size
)
5819 if (key_field
->is_signed
!= key_field_test
->is_signed
)
5821 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
5823 if (key_field
->var
.name
&&
5824 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
5828 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5829 sort_key
= &hist_data
->sort_keys
[i
];
5830 sort_key_test
= &hist_data_test
->sort_keys
[i
];
5832 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
5833 sort_key
->descending
!= sort_key_test
->descending
)
5837 if (!ignore_filter
&& data
->filter_str
&&
5838 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
5841 if (!actions_match(hist_data
, hist_data_test
))
5847 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
5848 struct event_trigger_data
*data
,
5849 struct trace_event_file
*file
)
5851 struct hist_trigger_data
*hist_data
= data
->private_data
;
5852 struct event_trigger_data
*test
, *named_data
= NULL
;
5853 struct trace_array
*tr
= file
->tr
;
5856 if (hist_data
->attrs
->name
) {
5857 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5859 if (!hist_trigger_match(data
, named_data
, named_data
,
5861 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
5868 if (hist_data
->attrs
->name
&& !named_data
)
5871 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5872 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5873 if (!hist_trigger_match(data
, test
, named_data
, false))
5875 if (hist_data
->attrs
->pause
)
5876 test
->paused
= true;
5877 else if (hist_data
->attrs
->cont
)
5878 test
->paused
= false;
5879 else if (hist_data
->attrs
->clear
)
5882 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
5889 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
5890 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
5895 if (hist_data
->attrs
->pause
)
5896 data
->paused
= true;
5899 data
->private_data
= named_data
->private_data
;
5900 set_named_trigger_data(data
, named_data
);
5901 data
->ops
= &event_hist_trigger_named_ops
;
5904 if (data
->ops
->init
) {
5905 ret
= data
->ops
->init(data
->ops
, data
);
5910 if (hist_data
->enable_timestamps
) {
5911 char *clock
= hist_data
->attrs
->clock
;
5913 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
5915 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
5919 tracing_set_time_stamp_abs(file
->tr
, true);
5923 destroy_hist_data(hist_data
);
5930 static int hist_trigger_enable(struct event_trigger_data
*data
,
5931 struct trace_event_file
*file
)
5935 list_add_tail_rcu(&data
->list
, &file
->triggers
);
5937 update_cond_flag(file
);
5939 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
5940 list_del_rcu(&data
->list
);
5941 update_cond_flag(file
);
5948 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
5949 struct trace_event_file
*file
)
5951 struct hist_trigger_data
*hist_data
= data
->private_data
;
5952 struct event_trigger_data
*test
, *named_data
= NULL
;
5955 if (hist_data
->attrs
->name
)
5956 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5958 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5959 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5960 if (hist_trigger_match(data
, test
, named_data
, false)) {
5970 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
5971 struct trace_event_file
*file
)
5973 struct hist_trigger_data
*hist_data
= data
->private_data
;
5974 struct event_trigger_data
*test
, *named_data
= NULL
;
5976 if (hist_data
->attrs
->name
)
5977 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5979 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
5980 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5981 if (!hist_trigger_match(data
, test
, named_data
, false))
5983 hist_data
= test
->private_data
;
5984 if (check_var_refs(hist_data
))
5993 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
5994 struct event_trigger_data
*data
,
5995 struct trace_event_file
*file
)
5997 struct hist_trigger_data
*hist_data
= data
->private_data
;
5998 struct event_trigger_data
*test
, *named_data
= NULL
;
5999 bool unregistered
= false;
6001 if (hist_data
->attrs
->name
)
6002 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6004 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
6005 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6006 if (!hist_trigger_match(data
, test
, named_data
, false))
6008 unregistered
= true;
6009 list_del_rcu(&test
->list
);
6010 trace_event_trigger_enable_disable(file
, 0);
6011 update_cond_flag(file
);
6016 if (unregistered
&& test
->ops
->free
)
6017 test
->ops
->free(test
->ops
, test
);
6019 if (hist_data
->enable_timestamps
) {
6020 if (!hist_data
->remove
|| unregistered
)
6021 tracing_set_time_stamp_abs(file
->tr
, false);
6025 static bool hist_file_check_refs(struct trace_event_file
*file
)
6027 struct hist_trigger_data
*hist_data
;
6028 struct event_trigger_data
*test
;
6030 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
6031 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6032 hist_data
= test
->private_data
;
6033 if (check_var_refs(hist_data
))
6041 static void hist_unreg_all(struct trace_event_file
*file
)
6043 struct event_trigger_data
*test
, *n
;
6044 struct hist_trigger_data
*hist_data
;
6045 struct synth_event
*se
;
6046 const char *se_name
;
6048 lockdep_assert_held(&event_mutex
);
6050 if (hist_file_check_refs(file
))
6053 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6054 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6055 hist_data
= test
->private_data
;
6056 list_del_rcu(&test
->list
);
6057 trace_event_trigger_enable_disable(file
, 0);
6059 se_name
= trace_event_name(file
->event_call
);
6060 se
= find_synth_event(se_name
);
6064 update_cond_flag(file
);
6065 if (hist_data
->enable_timestamps
)
6066 tracing_set_time_stamp_abs(file
->tr
, false);
6067 if (test
->ops
->free
)
6068 test
->ops
->free(test
->ops
, test
);
6073 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
6074 struct trace_event_file
*file
,
6075 char *glob
, char *cmd
, char *param
)
6077 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
6078 struct event_trigger_data
*trigger_data
;
6079 struct hist_trigger_attrs
*attrs
;
6080 struct event_trigger_ops
*trigger_ops
;
6081 struct hist_trigger_data
*hist_data
;
6082 struct synth_event
*se
;
6083 const char *se_name
;
6084 bool remove
= false;
6088 lockdep_assert_held(&event_mutex
);
6090 if (glob
&& strlen(glob
)) {
6092 last_cmd_set(file
, param
);
6102 * separate the trigger from the filter (k:v [if filter])
6103 * allowing for whitespace in the trigger
6105 p
= trigger
= param
;
6107 p
= strstr(p
, "if");
6112 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
6116 if (p
>= param
+ strlen(param
) - (sizeof("if") - 1) - 1)
6118 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
6129 param
= strstrip(p
);
6130 trigger
= strstrip(trigger
);
6133 attrs
= parse_hist_trigger_attrs(file
->tr
, trigger
);
6135 return PTR_ERR(attrs
);
6137 if (attrs
->map_bits
)
6138 hist_trigger_bits
= attrs
->map_bits
;
6140 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
6141 if (IS_ERR(hist_data
)) {
6142 destroy_hist_trigger_attrs(attrs
);
6143 return PTR_ERR(hist_data
);
6146 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
6148 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
6149 if (!trigger_data
) {
6154 trigger_data
->count
= -1;
6155 trigger_data
->ops
= trigger_ops
;
6156 trigger_data
->cmd_ops
= cmd_ops
;
6158 INIT_LIST_HEAD(&trigger_data
->list
);
6159 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
6161 trigger_data
->private_data
= hist_data
;
6163 /* if param is non-empty, it's supposed to be a filter */
6164 if (param
&& cmd_ops
->set_filter
) {
6165 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
6171 if (!have_hist_trigger_match(trigger_data
, file
))
6174 if (hist_trigger_check_refs(trigger_data
, file
)) {
6179 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
6180 se_name
= trace_event_name(file
->event_call
);
6181 se
= find_synth_event(se_name
);
6188 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
6190 * The above returns on success the # of triggers registered,
6191 * but if it didn't register any it returns zero. Consider no
6192 * triggers registered a failure too.
6195 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
6201 if (get_named_trigger_data(trigger_data
))
6204 if (has_hist_vars(hist_data
))
6205 save_hist_vars(hist_data
);
6207 ret
= create_actions(hist_data
);
6211 ret
= tracing_map_init(hist_data
->map
);
6215 ret
= hist_trigger_enable(trigger_data
, file
);
6219 se_name
= trace_event_name(file
->event_call
);
6220 se
= find_synth_event(se_name
);
6223 /* Just return zero, not the number of registered triggers */
6231 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
6233 if (cmd_ops
->set_filter
)
6234 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
6236 remove_hist_vars(hist_data
);
6238 kfree(trigger_data
);
6240 destroy_hist_data(hist_data
);
6244 static struct event_command trigger_hist_cmd
= {
6246 .trigger_type
= ETT_EVENT_HIST
,
6247 .flags
= EVENT_CMD_FL_NEEDS_REC
,
6248 .func
= event_hist_trigger_func
,
6249 .reg
= hist_register_trigger
,
6250 .unreg
= hist_unregister_trigger
,
6251 .unreg_all
= hist_unreg_all
,
6252 .get_trigger_ops
= event_hist_get_trigger_ops
,
6253 .set_filter
= set_trigger_filter
,
6256 __init
int register_trigger_hist_cmd(void)
6260 ret
= register_event_command(&trigger_hist_cmd
);
6267 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
,
6268 struct ring_buffer_event
*event
)
6270 struct enable_trigger_data
*enable_data
= data
->private_data
;
6271 struct event_trigger_data
*test
;
6273 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
) {
6274 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6275 if (enable_data
->enable
)
6276 test
->paused
= false;
6278 test
->paused
= true;
6284 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
6285 struct ring_buffer_event
*event
)
6290 if (data
->count
!= -1)
6293 hist_enable_trigger(data
, rec
, event
);
6296 static struct event_trigger_ops hist_enable_trigger_ops
= {
6297 .func
= hist_enable_trigger
,
6298 .print
= event_enable_trigger_print
,
6299 .init
= event_trigger_init
,
6300 .free
= event_enable_trigger_free
,
6303 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
6304 .func
= hist_enable_count_trigger
,
6305 .print
= event_enable_trigger_print
,
6306 .init
= event_trigger_init
,
6307 .free
= event_enable_trigger_free
,
6310 static struct event_trigger_ops hist_disable_trigger_ops
= {
6311 .func
= hist_enable_trigger
,
6312 .print
= event_enable_trigger_print
,
6313 .init
= event_trigger_init
,
6314 .free
= event_enable_trigger_free
,
6317 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
6318 .func
= hist_enable_count_trigger
,
6319 .print
= event_enable_trigger_print
,
6320 .init
= event_trigger_init
,
6321 .free
= event_enable_trigger_free
,
6324 static struct event_trigger_ops
*
6325 hist_enable_get_trigger_ops(char *cmd
, char *param
)
6327 struct event_trigger_ops
*ops
;
6330 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
6333 ops
= param
? &hist_enable_count_trigger_ops
:
6334 &hist_enable_trigger_ops
;
6336 ops
= param
? &hist_disable_count_trigger_ops
:
6337 &hist_disable_trigger_ops
;
6342 static void hist_enable_unreg_all(struct trace_event_file
*file
)
6344 struct event_trigger_data
*test
, *n
;
6346 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6347 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
6348 list_del_rcu(&test
->list
);
6349 update_cond_flag(file
);
6350 trace_event_trigger_enable_disable(file
, 0);
6351 if (test
->ops
->free
)
6352 test
->ops
->free(test
->ops
, test
);
6357 static struct event_command trigger_hist_enable_cmd
= {
6358 .name
= ENABLE_HIST_STR
,
6359 .trigger_type
= ETT_HIST_ENABLE
,
6360 .func
= event_enable_trigger_func
,
6361 .reg
= event_enable_register_trigger
,
6362 .unreg
= event_enable_unregister_trigger
,
6363 .unreg_all
= hist_enable_unreg_all
,
6364 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6365 .set_filter
= set_trigger_filter
,
6368 static struct event_command trigger_hist_disable_cmd
= {
6369 .name
= DISABLE_HIST_STR
,
6370 .trigger_type
= ETT_HIST_ENABLE
,
6371 .func
= event_enable_trigger_func
,
6372 .reg
= event_enable_register_trigger
,
6373 .unreg
= event_enable_unregister_trigger
,
6374 .unreg_all
= hist_enable_unreg_all
,
6375 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6376 .set_filter
= set_trigger_filter
,
6379 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
6381 unregister_event_command(&trigger_hist_enable_cmd
);
6382 unregister_event_command(&trigger_hist_disable_cmd
);
6385 __init
int register_trigger_hist_enable_disable_cmds(void)
6389 ret
= register_event_command(&trigger_hist_enable_cmd
);
6390 if (WARN_ON(ret
< 0))
6392 ret
= register_event_command(&trigger_hist_disable_cmd
);
6393 if (WARN_ON(ret
< 0))
6394 unregister_trigger_hist_enable_disable_cmds();
6399 static __init
int trace_events_hist_init(void)
6401 struct dentry
*entry
= NULL
;
6402 struct dentry
*d_tracer
;
6405 err
= dyn_event_register(&synth_event_ops
);
6407 pr_warn("Could not register synth_event_ops\n");
6411 d_tracer
= tracing_init_dentry();
6412 if (IS_ERR(d_tracer
)) {
6413 err
= PTR_ERR(d_tracer
);
6417 entry
= tracefs_create_file("synthetic_events", 0644, d_tracer
,
6418 NULL
, &synth_events_fops
);
6426 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6431 fs_initcall(trace_events_hist_init
);