1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
22 #include "trace_synth.h"
25 C(NONE, "No error"), \
26 C(DUPLICATE_VAR, "Variable already defined"), \
27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 C(TOO_MANY_VARS, "Too many variables defined"), \
29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
51 C(TOO_MANY_PARAMS, "Too many action params"), \
52 C(PARAM_NOT_FOUND, "Couldn't find param"), \
53 C(INVALID_PARAM, "Invalid action param"), \
54 C(ACTION_NOT_FOUND, "No action found"), \
55 C(NO_SAVE_PARAMS, "No params found for save()"), \
56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 C(ACTION_MISMATCH, "Handler doesn't support action"), \
58 C(NO_CLOSING_PAREN, "No closing paren found"), \
59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
62 C(VAR_NOT_FOUND, "Couldn't find variable"), \
63 C(FIELD_NOT_FOUND, "Couldn't find field"), \
64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
66 C(EMPTY_SORT_FIELD, "Empty sort field"), \
67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"),
71 #define C(a, b) HIST_ERR_##a
78 static const char *err_text
[] = { ERRORS
};
82 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
83 struct tracing_map_elt
*elt
,
84 struct ring_buffer_event
*rbe
,
87 #define HIST_FIELD_OPERANDS_MAX 2
88 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
89 #define HIST_ACTIONS_MAX 8
99 * A hist_var (histogram variable) contains variable information for
100 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
101 * flag set. A hist_var has a variable name e.g. ts0, and is
102 * associated with a given histogram trigger, as specified by
103 * hist_data. The hist_var idx is the unique index assigned to the
104 * variable by the hist trigger's tracing_map. The idx is what is
105 * used to set a variable's value and, by a variable reference, to
110 struct hist_trigger_data
*hist_data
;
115 struct ftrace_event_field
*field
;
121 unsigned int is_signed
;
123 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
124 struct hist_trigger_data
*hist_data
;
127 * Variable fields contain variable-specific info in var.
130 enum field_op_id
operator;
135 * The name field is used for EXPR and VAR_REF fields. VAR
136 * fields contain the variable name in var.name.
141 * When a histogram trigger is hit, if it has any references
142 * to variables, the values of those variables are collected
143 * into a var_ref_vals array by resolve_var_refs(). The
144 * current value of each variable is read from the tracing_map
145 * using the hist field's hist_var.idx and entered into the
146 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
148 unsigned int var_ref_idx
;
151 unsigned int var_str_idx
;
154 static u64
hist_field_none(struct hist_field
*field
,
155 struct tracing_map_elt
*elt
,
156 struct ring_buffer_event
*rbe
,
162 static u64
hist_field_counter(struct hist_field
*field
,
163 struct tracing_map_elt
*elt
,
164 struct ring_buffer_event
*rbe
,
170 static u64
hist_field_string(struct hist_field
*hist_field
,
171 struct tracing_map_elt
*elt
,
172 struct ring_buffer_event
*rbe
,
175 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
177 return (u64
)(unsigned long)addr
;
180 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
181 struct tracing_map_elt
*elt
,
182 struct ring_buffer_event
*rbe
,
185 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
186 int str_loc
= str_item
& 0xffff;
187 char *addr
= (char *)(event
+ str_loc
);
189 return (u64
)(unsigned long)addr
;
192 static u64
hist_field_pstring(struct hist_field
*hist_field
,
193 struct tracing_map_elt
*elt
,
194 struct ring_buffer_event
*rbe
,
197 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
199 return (u64
)(unsigned long)*addr
;
202 static u64
hist_field_log2(struct hist_field
*hist_field
,
203 struct tracing_map_elt
*elt
,
204 struct ring_buffer_event
*rbe
,
207 struct hist_field
*operand
= hist_field
->operands
[0];
209 u64 val
= operand
->fn(operand
, elt
, rbe
, event
);
211 return (u64
) ilog2(roundup_pow_of_two(val
));
214 static u64
hist_field_plus(struct hist_field
*hist_field
,
215 struct tracing_map_elt
*elt
,
216 struct ring_buffer_event
*rbe
,
219 struct hist_field
*operand1
= hist_field
->operands
[0];
220 struct hist_field
*operand2
= hist_field
->operands
[1];
222 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
223 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
228 static u64
hist_field_minus(struct hist_field
*hist_field
,
229 struct tracing_map_elt
*elt
,
230 struct ring_buffer_event
*rbe
,
233 struct hist_field
*operand1
= hist_field
->operands
[0];
234 struct hist_field
*operand2
= hist_field
->operands
[1];
236 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
237 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
242 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
243 struct tracing_map_elt
*elt
,
244 struct ring_buffer_event
*rbe
,
247 struct hist_field
*operand
= hist_field
->operands
[0];
249 s64 sval
= (s64
)operand
->fn(operand
, elt
, rbe
, event
);
250 u64 val
= (u64
)-sval
;
255 #define DEFINE_HIST_FIELD_FN(type) \
256 static u64 hist_field_##type(struct hist_field *hist_field, \
257 struct tracing_map_elt *elt, \
258 struct ring_buffer_event *rbe, \
261 type *addr = (type *)(event + hist_field->field->offset); \
263 return (u64)(unsigned long)*addr; \
266 DEFINE_HIST_FIELD_FN(s64
);
267 DEFINE_HIST_FIELD_FN(u64
);
268 DEFINE_HIST_FIELD_FN(s32
);
269 DEFINE_HIST_FIELD_FN(u32
);
270 DEFINE_HIST_FIELD_FN(s16
);
271 DEFINE_HIST_FIELD_FN(u16
);
272 DEFINE_HIST_FIELD_FN(s8
);
273 DEFINE_HIST_FIELD_FN(u8
);
275 #define for_each_hist_field(i, hist_data) \
276 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
278 #define for_each_hist_val_field(i, hist_data) \
279 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
281 #define for_each_hist_key_field(i, hist_data) \
282 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
284 #define HIST_STACKTRACE_DEPTH 16
285 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
286 #define HIST_STACKTRACE_SKIP 5
288 #define HITCOUNT_IDX 0
289 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
291 enum hist_field_flags
{
292 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
293 HIST_FIELD_FL_KEY
= 1 << 1,
294 HIST_FIELD_FL_STRING
= 1 << 2,
295 HIST_FIELD_FL_HEX
= 1 << 3,
296 HIST_FIELD_FL_SYM
= 1 << 4,
297 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
298 HIST_FIELD_FL_EXECNAME
= 1 << 6,
299 HIST_FIELD_FL_SYSCALL
= 1 << 7,
300 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
301 HIST_FIELD_FL_LOG2
= 1 << 9,
302 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
303 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
304 HIST_FIELD_FL_VAR
= 1 << 12,
305 HIST_FIELD_FL_EXPR
= 1 << 13,
306 HIST_FIELD_FL_VAR_REF
= 1 << 14,
307 HIST_FIELD_FL_CPU
= 1 << 15,
308 HIST_FIELD_FL_ALIAS
= 1 << 16,
313 char *name
[TRACING_MAP_VARS_MAX
];
314 char *expr
[TRACING_MAP_VARS_MAX
];
317 struct hist_trigger_attrs
{
327 unsigned int map_bits
;
329 char *assignment_str
[TRACING_MAP_VARS_MAX
];
330 unsigned int n_assignments
;
332 char *action_str
[HIST_ACTIONS_MAX
];
333 unsigned int n_actions
;
335 struct var_defs var_defs
;
339 struct hist_field
*var
;
340 struct hist_field
*val
;
343 struct field_var_hist
{
344 struct hist_trigger_data
*hist_data
;
348 struct hist_trigger_data
{
349 struct hist_field
*fields
[HIST_FIELDS_MAX
];
352 unsigned int n_fields
;
354 unsigned int n_var_str
;
355 unsigned int key_size
;
356 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
357 unsigned int n_sort_keys
;
358 struct trace_event_file
*event_file
;
359 struct hist_trigger_attrs
*attrs
;
360 struct tracing_map
*map
;
361 bool enable_timestamps
;
363 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
364 unsigned int n_var_refs
;
366 struct action_data
*actions
[HIST_ACTIONS_MAX
];
367 unsigned int n_actions
;
369 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
370 unsigned int n_field_vars
;
371 unsigned int n_field_var_str
;
372 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
373 unsigned int n_field_var_hists
;
375 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
376 unsigned int n_save_vars
;
377 unsigned int n_save_var_str
;
382 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
383 struct tracing_map_elt
*elt
, void *rec
,
384 struct ring_buffer_event
*rbe
, void *key
,
385 struct action_data
*data
, u64
*var_ref_vals
);
387 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
402 enum handler_id handler
;
403 enum action_id action
;
407 unsigned int n_params
;
408 char *params
[SYNTH_FIELDS_MAX
];
411 * When a histogram trigger is hit, the values of any
412 * references to variables, including variables being passed
413 * as parameters to synthetic events, are collected into a
414 * var_ref_vals array. This var_ref_idx array is an array of
415 * indices into the var_ref_vals array, one for each synthetic
416 * event param, and is passed to the synthetic event
419 unsigned int var_ref_idx
[TRACING_MAP_VARS_MAX
];
420 struct synth_event
*synth_event
;
421 bool use_trace_keyword
;
422 char *synth_event_name
;
432 * var_str contains the $-unstripped variable
433 * name referenced by var_ref, and used when
434 * printing the action. Because var_ref
435 * creation is deferred to create_actions(),
436 * we need a per-action way to save it until
437 * then, thus var_str.
442 * var_ref refers to the variable being
443 * tracked e.g onmax($var).
445 struct hist_field
*var_ref
;
448 * track_var contains the 'invisible' tracking
449 * variable created to keep the current
452 struct hist_field
*track_var
;
454 check_track_val_fn_t check_val
;
455 action_fn_t save_data
;
464 unsigned int key_len
;
466 struct tracing_map_elt elt
;
468 struct action_data
*action_data
;
469 struct hist_trigger_data
*hist_data
;
472 struct hist_elt_data
{
475 char *field_var_str
[SYNTH_FIELDS_MAX
];
478 struct snapshot_context
{
479 struct tracing_map_elt
*elt
;
483 static void track_data_free(struct track_data
*track_data
)
485 struct hist_elt_data
*elt_data
;
490 kfree(track_data
->key
);
492 elt_data
= track_data
->elt
.private_data
;
494 kfree(elt_data
->comm
);
501 static struct track_data
*track_data_alloc(unsigned int key_len
,
502 struct action_data
*action_data
,
503 struct hist_trigger_data
*hist_data
)
505 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
506 struct hist_elt_data
*elt_data
;
509 return ERR_PTR(-ENOMEM
);
511 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
513 track_data_free(data
);
514 return ERR_PTR(-ENOMEM
);
517 data
->key_len
= key_len
;
518 data
->action_data
= action_data
;
519 data
->hist_data
= hist_data
;
521 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
523 track_data_free(data
);
524 return ERR_PTR(-ENOMEM
);
527 data
->elt
.private_data
= elt_data
;
529 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
530 if (!elt_data
->comm
) {
531 track_data_free(data
);
532 return ERR_PTR(-ENOMEM
);
538 static char last_cmd
[MAX_FILTER_STR_VAL
];
539 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
541 static int errpos(char *str
)
543 return err_pos(last_cmd
, str
);
546 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
548 const char *system
= NULL
, *name
= NULL
;
549 struct trace_event_call
*call
;
554 strcpy(last_cmd
, "hist:");
555 strncat(last_cmd
, str
, MAX_FILTER_STR_VAL
- 1 - sizeof("hist:"));
558 call
= file
->event_call
;
559 system
= call
->class->system
;
561 name
= trace_event_name(call
);
568 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, "hist:%s:%s", system
, name
);
571 static void hist_err(struct trace_array
*tr
, u8 err_type
, u8 err_pos
)
573 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
577 static void hist_err_clear(void)
580 last_cmd_loc
[0] = '\0';
583 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
584 unsigned int *var_ref_idx
);
586 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
587 unsigned int *var_ref_idx
)
589 struct tracepoint
*tp
= event
->tp
;
591 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
592 struct tracepoint_func
*probe_func_ptr
;
593 synth_probe_func_t probe_func
;
596 if (!(cpu_online(raw_smp_processor_id())))
599 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
600 if (probe_func_ptr
) {
602 probe_func
= probe_func_ptr
->func
;
603 __data
= probe_func_ptr
->data
;
604 probe_func(__data
, var_ref_vals
, var_ref_idx
);
605 } while ((++probe_func_ptr
)->func
);
610 static void action_trace(struct hist_trigger_data
*hist_data
,
611 struct tracing_map_elt
*elt
, void *rec
,
612 struct ring_buffer_event
*rbe
, void *key
,
613 struct action_data
*data
, u64
*var_ref_vals
)
615 struct synth_event
*event
= data
->synth_event
;
617 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
620 struct hist_var_data
{
621 struct list_head list
;
622 struct hist_trigger_data
*hist_data
;
625 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
626 struct tracing_map_elt
*elt
,
627 struct ring_buffer_event
*rbe
,
630 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
631 struct trace_array
*tr
= hist_data
->event_file
->tr
;
633 u64 ts
= ring_buffer_event_time_stamp(rbe
);
635 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
641 static u64
hist_field_cpu(struct hist_field
*hist_field
,
642 struct tracing_map_elt
*elt
,
643 struct ring_buffer_event
*rbe
,
646 int cpu
= smp_processor_id();
652 * check_field_for_var_ref - Check if a VAR_REF field references a variable
653 * @hist_field: The VAR_REF field to check
654 * @var_data: The hist trigger that owns the variable
655 * @var_idx: The trigger variable identifier
657 * Check the given VAR_REF field to see whether or not it references
658 * the given variable associated with the given trigger.
660 * Return: The VAR_REF field if it does reference the variable, NULL if not
662 static struct hist_field
*
663 check_field_for_var_ref(struct hist_field
*hist_field
,
664 struct hist_trigger_data
*var_data
,
665 unsigned int var_idx
)
667 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
669 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
670 hist_field
->var
.hist_data
== var_data
)
677 * find_var_ref - Check if a trigger has a reference to a trigger variable
678 * @hist_data: The hist trigger that might have a reference to the variable
679 * @var_data: The hist trigger that owns the variable
680 * @var_idx: The trigger variable identifier
682 * Check the list of var_refs[] on the first hist trigger to see
683 * whether any of them are references to the variable on the second
686 * Return: The VAR_REF field referencing the variable if so, NULL if not
688 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
689 struct hist_trigger_data
*var_data
,
690 unsigned int var_idx
)
692 struct hist_field
*hist_field
;
695 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
696 hist_field
= hist_data
->var_refs
[i
];
697 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
705 * find_any_var_ref - Check if there is a reference to a given trigger variable
706 * @hist_data: The hist trigger
707 * @var_idx: The trigger variable identifier
709 * Check to see whether the given variable is currently referenced by
712 * The trigger the variable is defined on is explicitly excluded - the
713 * assumption being that a self-reference doesn't prevent a trigger
714 * from being removed.
716 * Return: The VAR_REF field referencing the variable if so, NULL if not
718 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
719 unsigned int var_idx
)
721 struct trace_array
*tr
= hist_data
->event_file
->tr
;
722 struct hist_field
*found
= NULL
;
723 struct hist_var_data
*var_data
;
725 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
726 if (var_data
->hist_data
== hist_data
)
728 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
737 * check_var_refs - Check if there is a reference to any of trigger's variables
738 * @hist_data: The hist trigger
740 * A trigger can define one or more variables. If any one of them is
741 * currently referenced by any other trigger, this function will
744 * Typically used to determine whether or not a trigger can be removed
745 * - if there are any references to a trigger's variables, it cannot.
747 * Return: True if there is a reference to any of trigger's variables
749 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
751 struct hist_field
*field
;
755 for_each_hist_field(i
, hist_data
) {
756 field
= hist_data
->fields
[i
];
757 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
758 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
768 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
770 struct trace_array
*tr
= hist_data
->event_file
->tr
;
771 struct hist_var_data
*var_data
, *found
= NULL
;
773 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
774 if (var_data
->hist_data
== hist_data
) {
783 static bool field_has_hist_vars(struct hist_field
*hist_field
,
794 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
795 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
798 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
799 struct hist_field
*operand
;
801 operand
= hist_field
->operands
[i
];
802 if (field_has_hist_vars(operand
, level
+ 1))
809 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
811 struct hist_field
*hist_field
;
814 for_each_hist_field(i
, hist_data
) {
815 hist_field
= hist_data
->fields
[i
];
816 if (field_has_hist_vars(hist_field
, 0))
823 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
825 struct trace_array
*tr
= hist_data
->event_file
->tr
;
826 struct hist_var_data
*var_data
;
828 var_data
= find_hist_vars(hist_data
);
832 if (tracing_check_open_get_tr(tr
))
835 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
841 var_data
->hist_data
= hist_data
;
842 list_add(&var_data
->list
, &tr
->hist_vars
);
847 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
849 struct trace_array
*tr
= hist_data
->event_file
->tr
;
850 struct hist_var_data
*var_data
;
852 var_data
= find_hist_vars(hist_data
);
856 if (WARN_ON(check_var_refs(hist_data
)))
859 list_del(&var_data
->list
);
866 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
867 const char *var_name
)
869 struct hist_field
*hist_field
, *found
= NULL
;
872 for_each_hist_field(i
, hist_data
) {
873 hist_field
= hist_data
->fields
[i
];
874 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
875 strcmp(hist_field
->var
.name
, var_name
) == 0) {
884 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
885 struct trace_event_file
*file
,
886 const char *var_name
)
888 struct hist_trigger_data
*test_data
;
889 struct event_trigger_data
*test
;
890 struct hist_field
*hist_field
;
892 lockdep_assert_held(&event_mutex
);
894 hist_field
= find_var_field(hist_data
, var_name
);
898 list_for_each_entry(test
, &file
->triggers
, list
) {
899 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
900 test_data
= test
->private_data
;
901 hist_field
= find_var_field(test_data
, var_name
);
910 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
915 struct hist_trigger_data
*var_hist_data
;
916 struct hist_var_data
*var_data
;
917 struct trace_event_file
*file
, *found
= NULL
;
920 return find_event_file(tr
, system
, event_name
);
922 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
923 var_hist_data
= var_data
->hist_data
;
924 file
= var_hist_data
->event_file
;
928 if (find_var_field(var_hist_data
, var_name
)) {
930 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
941 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
942 const char *var_name
)
944 struct hist_trigger_data
*test_data
;
945 struct event_trigger_data
*test
;
946 struct hist_field
*hist_field
;
948 lockdep_assert_held(&event_mutex
);
950 list_for_each_entry(test
, &file
->triggers
, list
) {
951 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
952 test_data
= test
->private_data
;
953 hist_field
= find_var_field(test_data
, var_name
);
962 static struct hist_field
*
963 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
965 struct trace_array
*tr
= hist_data
->event_file
->tr
;
966 struct hist_field
*hist_field
, *found
= NULL
;
967 struct trace_event_file
*file
;
970 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
971 struct action_data
*data
= hist_data
->actions
[i
];
973 if (data
->handler
== HANDLER_ONMATCH
) {
974 char *system
= data
->match_data
.event_system
;
975 char *event_name
= data
->match_data
.event
;
977 file
= find_var_file(tr
, system
, event_name
, var_name
);
980 hist_field
= find_file_var(file
, var_name
);
983 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
985 return ERR_PTR(-EINVAL
);
995 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1000 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1001 struct hist_field
*hist_field
= NULL
;
1002 struct trace_event_file
*file
;
1004 if (!system
|| !event_name
) {
1005 hist_field
= find_match_var(hist_data
, var_name
);
1006 if (IS_ERR(hist_field
))
1012 file
= find_var_file(tr
, system
, event_name
, var_name
);
1016 hist_field
= find_file_var(file
, var_name
);
1021 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1022 struct tracing_map_elt
*elt
,
1023 struct ring_buffer_event
*rbe
,
1026 struct hist_elt_data
*elt_data
;
1029 if (WARN_ON_ONCE(!elt
))
1032 elt_data
= elt
->private_data
;
1033 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1038 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1039 u64
*var_ref_vals
, bool self
)
1041 struct hist_trigger_data
*var_data
;
1042 struct tracing_map_elt
*var_elt
;
1043 struct hist_field
*hist_field
;
1044 unsigned int i
, var_idx
;
1045 bool resolved
= true;
1048 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1049 hist_field
= hist_data
->var_refs
[i
];
1050 var_idx
= hist_field
->var
.idx
;
1051 var_data
= hist_field
->var
.hist_data
;
1053 if (var_data
== NULL
) {
1058 if ((self
&& var_data
!= hist_data
) ||
1059 (!self
&& var_data
== hist_data
))
1062 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1068 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1073 if (self
|| !hist_field
->read_once
)
1074 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1076 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1078 var_ref_vals
[i
] = var_val
;
1084 static const char *hist_field_name(struct hist_field
*field
,
1087 const char *field_name
= "";
1093 field_name
= field
->field
->name
;
1094 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1095 field
->flags
& HIST_FIELD_FL_ALIAS
)
1096 field_name
= hist_field_name(field
->operands
[0], ++level
);
1097 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1099 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1100 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1101 if (field
->system
) {
1102 static char full_name
[MAX_FILTER_STR_VAL
];
1104 strcat(full_name
, field
->system
);
1105 strcat(full_name
, ".");
1106 strcat(full_name
, field
->event_name
);
1107 strcat(full_name
, ".");
1108 strcat(full_name
, field
->name
);
1109 field_name
= full_name
;
1111 field_name
= field
->name
;
1112 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1113 field_name
= "common_timestamp";
1115 if (field_name
== NULL
)
1121 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
1123 hist_field_fn_t fn
= NULL
;
1125 switch (field_size
) {
1127 if (field_is_signed
)
1128 fn
= hist_field_s64
;
1130 fn
= hist_field_u64
;
1133 if (field_is_signed
)
1134 fn
= hist_field_s32
;
1136 fn
= hist_field_u32
;
1139 if (field_is_signed
)
1140 fn
= hist_field_s16
;
1142 fn
= hist_field_u16
;
1145 if (field_is_signed
)
1155 static int parse_map_size(char *str
)
1157 unsigned long size
, map_bits
;
1160 ret
= kstrtoul(str
, 0, &size
);
1164 map_bits
= ilog2(roundup_pow_of_two(size
));
1165 if (map_bits
< TRACING_MAP_BITS_MIN
||
1166 map_bits
> TRACING_MAP_BITS_MAX
)
1174 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
1181 for (i
= 0; i
< attrs
->n_assignments
; i
++)
1182 kfree(attrs
->assignment_str
[i
]);
1184 for (i
= 0; i
< attrs
->n_actions
; i
++)
1185 kfree(attrs
->action_str
[i
]);
1188 kfree(attrs
->sort_key_str
);
1189 kfree(attrs
->keys_str
);
1190 kfree(attrs
->vals_str
);
1191 kfree(attrs
->clock
);
1195 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
1199 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
1202 if ((str_has_prefix(str
, "onmatch(")) ||
1203 (str_has_prefix(str
, "onmax(")) ||
1204 (str_has_prefix(str
, "onchange("))) {
1205 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
1206 if (!attrs
->action_str
[attrs
->n_actions
]) {
1216 static int parse_assignment(struct trace_array
*tr
,
1217 char *str
, struct hist_trigger_attrs
*attrs
)
1221 if ((len
= str_has_prefix(str
, "key=")) ||
1222 (len
= str_has_prefix(str
, "keys="))) {
1223 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1224 if (!attrs
->keys_str
) {
1228 } else if ((len
= str_has_prefix(str
, "val=")) ||
1229 (len
= str_has_prefix(str
, "vals=")) ||
1230 (len
= str_has_prefix(str
, "values="))) {
1231 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1232 if (!attrs
->vals_str
) {
1236 } else if ((len
= str_has_prefix(str
, "sort="))) {
1237 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1238 if (!attrs
->sort_key_str
) {
1242 } else if (str_has_prefix(str
, "name=")) {
1243 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
1248 } else if ((len
= str_has_prefix(str
, "clock="))) {
1251 str
= strstrip(str
);
1252 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
1253 if (!attrs
->clock
) {
1257 } else if ((len
= str_has_prefix(str
, "size="))) {
1258 int map_bits
= parse_map_size(str
+ len
);
1264 attrs
->map_bits
= map_bits
;
1268 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
1269 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
1274 assignment
= kstrdup(str
, GFP_KERNEL
);
1280 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
1286 static struct hist_trigger_attrs
*
1287 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
1289 struct hist_trigger_attrs
*attrs
;
1292 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
1294 return ERR_PTR(-ENOMEM
);
1296 while (trigger_str
) {
1297 char *str
= strsep(&trigger_str
, ":");
1300 rhs
= strchr(str
, '=');
1302 if (!strlen(++rhs
)) {
1304 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
1307 ret
= parse_assignment(tr
, str
, attrs
);
1310 } else if (strcmp(str
, "pause") == 0)
1311 attrs
->pause
= true;
1312 else if ((strcmp(str
, "cont") == 0) ||
1313 (strcmp(str
, "continue") == 0))
1315 else if (strcmp(str
, "clear") == 0)
1316 attrs
->clear
= true;
1318 ret
= parse_action(str
, attrs
);
1324 if (!attrs
->keys_str
) {
1329 if (!attrs
->clock
) {
1330 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
1331 if (!attrs
->clock
) {
1339 destroy_hist_trigger_attrs(attrs
);
1341 return ERR_PTR(ret
);
1344 static inline void save_comm(char *comm
, struct task_struct
*task
)
1347 strcpy(comm
, "<idle>");
1351 if (WARN_ON_ONCE(task
->pid
< 0)) {
1352 strcpy(comm
, "<XXX>");
1356 strncpy(comm
, task
->comm
, TASK_COMM_LEN
);
1359 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
1363 for (i
= 0; i
< SYNTH_FIELDS_MAX
; i
++)
1364 kfree(elt_data
->field_var_str
[i
]);
1366 kfree(elt_data
->comm
);
1370 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
1372 struct hist_elt_data
*elt_data
= elt
->private_data
;
1374 hist_elt_data_free(elt_data
);
1377 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
1379 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
1380 unsigned int size
= TASK_COMM_LEN
;
1381 struct hist_elt_data
*elt_data
;
1382 struct hist_field
*key_field
;
1383 unsigned int i
, n_str
;
1385 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
1389 for_each_hist_key_field(i
, hist_data
) {
1390 key_field
= hist_data
->fields
[i
];
1392 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
1393 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
1394 if (!elt_data
->comm
) {
1402 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
+
1403 hist_data
->n_var_str
;
1404 if (n_str
> SYNTH_FIELDS_MAX
) {
1405 hist_elt_data_free(elt_data
);
1409 BUILD_BUG_ON(STR_VAR_LEN_MAX
& (sizeof(u64
) - 1));
1411 size
= STR_VAR_LEN_MAX
;
1413 for (i
= 0; i
< n_str
; i
++) {
1414 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
1415 if (!elt_data
->field_var_str
[i
]) {
1416 hist_elt_data_free(elt_data
);
1421 elt
->private_data
= elt_data
;
1426 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
1428 struct hist_elt_data
*elt_data
= elt
->private_data
;
1431 save_comm(elt_data
->comm
, current
);
1434 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
1435 .elt_alloc
= hist_trigger_elt_data_alloc
,
1436 .elt_free
= hist_trigger_elt_data_free
,
1437 .elt_init
= hist_trigger_elt_data_init
,
1440 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
1442 const char *flags_str
= NULL
;
1444 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
1446 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
1448 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
1449 flags_str
= "sym-offset";
1450 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
1451 flags_str
= "execname";
1452 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
1453 flags_str
= "syscall";
1454 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
1456 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
1457 flags_str
= "usecs";
1462 static void expr_field_str(struct hist_field
*field
, char *expr
)
1464 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
1467 strcat(expr
, hist_field_name(field
, 0));
1469 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
1470 const char *flags_str
= get_hist_field_flags(field
);
1474 strcat(expr
, flags_str
);
1479 static char *expr_str(struct hist_field
*field
, unsigned int level
)
1486 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
1490 if (!field
->operands
[0]) {
1491 expr_field_str(field
, expr
);
1495 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
1499 subexpr
= expr_str(field
->operands
[0], ++level
);
1504 strcat(expr
, subexpr
);
1512 expr_field_str(field
->operands
[0], expr
);
1514 switch (field
->operator) {
1515 case FIELD_OP_MINUS
:
1526 expr_field_str(field
->operands
[1], expr
);
1531 static int contains_operator(char *str
)
1533 enum field_op_id field_op
= FIELD_OP_NONE
;
1536 op
= strpbrk(str
, "+-");
1538 return FIELD_OP_NONE
;
1543 field_op
= FIELD_OP_UNARY_MINUS
;
1545 field_op
= FIELD_OP_MINUS
;
1548 field_op
= FIELD_OP_PLUS
;
1557 static void get_hist_field(struct hist_field
*hist_field
)
1562 static void __destroy_hist_field(struct hist_field
*hist_field
)
1564 if (--hist_field
->ref
> 1)
1567 kfree(hist_field
->var
.name
);
1568 kfree(hist_field
->name
);
1569 kfree(hist_field
->type
);
1571 kfree(hist_field
->system
);
1572 kfree(hist_field
->event_name
);
1577 static void destroy_hist_field(struct hist_field
*hist_field
,
1588 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1589 return; /* var refs will be destroyed separately */
1591 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
1592 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
1594 __destroy_hist_field(hist_field
);
1597 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
1598 struct ftrace_event_field
*field
,
1599 unsigned long flags
,
1602 struct hist_field
*hist_field
;
1604 if (field
&& is_function_field(field
))
1607 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
1611 hist_field
->ref
= 1;
1613 hist_field
->hist_data
= hist_data
;
1615 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
1616 goto out
; /* caller will populate */
1618 if (flags
& HIST_FIELD_FL_VAR_REF
) {
1619 hist_field
->fn
= hist_field_var_ref
;
1623 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
1624 hist_field
->fn
= hist_field_counter
;
1625 hist_field
->size
= sizeof(u64
);
1626 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
1627 if (!hist_field
->type
)
1632 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
1633 hist_field
->fn
= hist_field_none
;
1637 if (flags
& HIST_FIELD_FL_LOG2
) {
1638 unsigned long fl
= flags
& ~HIST_FIELD_FL_LOG2
;
1639 hist_field
->fn
= hist_field_log2
;
1640 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
1641 hist_field
->size
= hist_field
->operands
[0]->size
;
1642 hist_field
->type
= kstrdup(hist_field
->operands
[0]->type
, GFP_KERNEL
);
1643 if (!hist_field
->type
)
1648 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
1649 hist_field
->fn
= hist_field_timestamp
;
1650 hist_field
->size
= sizeof(u64
);
1651 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
1652 if (!hist_field
->type
)
1657 if (flags
& HIST_FIELD_FL_CPU
) {
1658 hist_field
->fn
= hist_field_cpu
;
1659 hist_field
->size
= sizeof(int);
1660 hist_field
->type
= kstrdup("unsigned int", GFP_KERNEL
);
1661 if (!hist_field
->type
)
1666 if (WARN_ON_ONCE(!field
))
1669 if (is_string_field(field
)) {
1670 flags
|= HIST_FIELD_FL_STRING
;
1672 hist_field
->size
= MAX_FILTER_STR_VAL
;
1673 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
1674 if (!hist_field
->type
)
1677 if (field
->filter_type
== FILTER_STATIC_STRING
)
1678 hist_field
->fn
= hist_field_string
;
1679 else if (field
->filter_type
== FILTER_DYN_STRING
)
1680 hist_field
->fn
= hist_field_dynstring
;
1682 hist_field
->fn
= hist_field_pstring
;
1684 hist_field
->size
= field
->size
;
1685 hist_field
->is_signed
= field
->is_signed
;
1686 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
1687 if (!hist_field
->type
)
1690 hist_field
->fn
= select_value_fn(field
->size
,
1692 if (!hist_field
->fn
) {
1693 destroy_hist_field(hist_field
, 0);
1698 hist_field
->field
= field
;
1699 hist_field
->flags
= flags
;
1702 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
1703 if (!hist_field
->var
.name
)
1709 destroy_hist_field(hist_field
, 0);
1713 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
1717 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
1718 if (hist_data
->fields
[i
]) {
1719 destroy_hist_field(hist_data
->fields
[i
], 0);
1720 hist_data
->fields
[i
] = NULL
;
1724 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1725 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
1726 __destroy_hist_field(hist_data
->var_refs
[i
]);
1727 hist_data
->var_refs
[i
] = NULL
;
1731 static int init_var_ref(struct hist_field
*ref_field
,
1732 struct hist_field
*var_field
,
1733 char *system
, char *event_name
)
1737 ref_field
->var
.idx
= var_field
->var
.idx
;
1738 ref_field
->var
.hist_data
= var_field
->hist_data
;
1739 ref_field
->size
= var_field
->size
;
1740 ref_field
->is_signed
= var_field
->is_signed
;
1741 ref_field
->flags
|= var_field
->flags
&
1742 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
1745 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
1746 if (!ref_field
->system
)
1751 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
1752 if (!ref_field
->event_name
) {
1758 if (var_field
->var
.name
) {
1759 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
1760 if (!ref_field
->name
) {
1764 } else if (var_field
->name
) {
1765 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
1766 if (!ref_field
->name
) {
1772 ref_field
->type
= kstrdup(var_field
->type
, GFP_KERNEL
);
1773 if (!ref_field
->type
) {
1780 kfree(ref_field
->system
);
1781 kfree(ref_field
->event_name
);
1782 kfree(ref_field
->name
);
1787 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
1788 struct hist_field
*var_field
)
1790 struct hist_field
*ref_field
;
1793 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1794 ref_field
= hist_data
->var_refs
[i
];
1795 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
1796 ref_field
->var
.hist_data
== var_field
->hist_data
)
1804 * create_var_ref - Create a variable reference and attach it to trigger
1805 * @hist_data: The trigger that will be referencing the variable
1806 * @var_field: The VAR field to create a reference to
1807 * @system: The optional system string
1808 * @event_name: The optional event_name string
1810 * Given a variable hist_field, create a VAR_REF hist_field that
1811 * represents a reference to it.
1813 * This function also adds the reference to the trigger that
1814 * now references the variable.
1816 * Return: The VAR_REF field if successful, NULL if not
1818 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
1819 struct hist_field
*var_field
,
1820 char *system
, char *event_name
)
1822 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
1823 struct hist_field
*ref_field
;
1826 /* Check if the variable already exists */
1827 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1828 ref_field
= hist_data
->var_refs
[i
];
1829 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
1830 ref_field
->var
.hist_data
== var_field
->hist_data
) {
1831 get_hist_field(ref_field
);
1836 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
1838 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
1839 destroy_hist_field(ref_field
, 0);
1843 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
1844 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
1850 static bool is_var_ref(char *var_name
)
1852 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
1858 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
1864 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
1865 name
= hist_data
->attrs
->var_defs
.name
[i
];
1867 if (strcmp(var_name
, name
) == 0) {
1868 field
= hist_data
->attrs
->var_defs
.expr
[i
];
1869 if (contains_operator(field
) || is_var_ref(field
))
1878 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
1879 char *system
, char *event_name
,
1882 struct trace_event_call
*call
;
1884 if (system
&& event_name
) {
1885 call
= hist_data
->event_file
->event_call
;
1887 if (strcmp(system
, call
->class->system
) != 0)
1890 if (strcmp(event_name
, trace_event_name(call
)) != 0)
1894 if (!!system
!= !!event_name
)
1897 if (!is_var_ref(var_name
))
1902 return field_name_from_var(hist_data
, var_name
);
1905 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
1906 char *system
, char *event_name
,
1909 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
1910 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1912 if (!is_var_ref(var_name
))
1917 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
1919 ref_field
= create_var_ref(hist_data
, var_field
,
1920 system
, event_name
);
1923 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
1928 static struct ftrace_event_field
*
1929 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
1930 char *field_str
, unsigned long *flags
)
1932 struct ftrace_event_field
*field
= NULL
;
1933 char *field_name
, *modifier
, *str
;
1934 struct trace_array
*tr
= file
->tr
;
1936 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
1938 return ERR_PTR(-ENOMEM
);
1940 field_name
= strsep(&modifier
, ".");
1942 if (strcmp(modifier
, "hex") == 0)
1943 *flags
|= HIST_FIELD_FL_HEX
;
1944 else if (strcmp(modifier
, "sym") == 0)
1945 *flags
|= HIST_FIELD_FL_SYM
;
1946 else if (strcmp(modifier
, "sym-offset") == 0)
1947 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
1948 else if ((strcmp(modifier
, "execname") == 0) &&
1949 (strcmp(field_name
, "common_pid") == 0))
1950 *flags
|= HIST_FIELD_FL_EXECNAME
;
1951 else if (strcmp(modifier
, "syscall") == 0)
1952 *flags
|= HIST_FIELD_FL_SYSCALL
;
1953 else if (strcmp(modifier
, "log2") == 0)
1954 *flags
|= HIST_FIELD_FL_LOG2
;
1955 else if (strcmp(modifier
, "usecs") == 0)
1956 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
1958 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
1959 field
= ERR_PTR(-EINVAL
);
1964 if (strcmp(field_name
, "common_timestamp") == 0) {
1965 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
1966 hist_data
->enable_timestamps
= true;
1967 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
1968 hist_data
->attrs
->ts_in_usecs
= true;
1969 } else if (strcmp(field_name
, "cpu") == 0)
1970 *flags
|= HIST_FIELD_FL_CPU
;
1972 field
= trace_find_event_field(file
->event_call
, field_name
);
1973 if (!field
|| !field
->size
) {
1974 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
, errpos(field_name
));
1975 field
= ERR_PTR(-EINVAL
);
1985 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
1986 struct hist_field
*var_ref
,
1989 struct hist_field
*alias
= NULL
;
1990 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
1992 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
1996 alias
->fn
= var_ref
->fn
;
1997 alias
->operands
[0] = var_ref
;
1999 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2000 destroy_hist_field(alias
, 0);
2004 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
2009 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2010 struct trace_event_file
*file
, char *str
,
2011 unsigned long *flags
, char *var_name
)
2013 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2014 struct ftrace_event_field
*field
= NULL
;
2015 struct hist_field
*hist_field
= NULL
;
2018 s
= strchr(str
, '.');
2020 s
= strchr(++s
, '.');
2022 ref_system
= strsep(&str
, ".");
2027 ref_event
= strsep(&str
, ".");
2036 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2038 hist_field
= parse_var_ref(hist_data
, ref_system
,
2039 ref_event
, ref_var
);
2042 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2053 field
= parse_field(hist_data
, file
, str
, flags
);
2054 if (IS_ERR(field
)) {
2055 ret
= PTR_ERR(field
);
2059 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2067 return ERR_PTR(ret
);
2070 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2071 struct trace_event_file
*file
,
2072 char *str
, unsigned long flags
,
2073 char *var_name
, unsigned int level
);
2075 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2076 struct trace_event_file
*file
,
2077 char *str
, unsigned long flags
,
2078 char *var_name
, unsigned int level
)
2080 struct hist_field
*operand1
, *expr
= NULL
;
2081 unsigned long operand_flags
;
2085 /* we support only -(xxx) i.e. explicit parens required */
2088 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2093 str
++; /* skip leading '-' */
2095 s
= strchr(str
, '(');
2103 s
= strrchr(str
, ')');
2107 ret
= -EINVAL
; /* no closing ')' */
2111 flags
|= HIST_FIELD_FL_EXPR
;
2112 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2119 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2120 if (IS_ERR(operand1
)) {
2121 ret
= PTR_ERR(operand1
);
2125 expr
->flags
|= operand1
->flags
&
2126 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2127 expr
->fn
= hist_field_unary_minus
;
2128 expr
->operands
[0] = operand1
;
2129 expr
->operator = FIELD_OP_UNARY_MINUS
;
2130 expr
->name
= expr_str(expr
, 0);
2131 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
2139 destroy_hist_field(expr
, 0);
2140 return ERR_PTR(ret
);
2143 static int check_expr_operands(struct trace_array
*tr
,
2144 struct hist_field
*operand1
,
2145 struct hist_field
*operand2
)
2147 unsigned long operand1_flags
= operand1
->flags
;
2148 unsigned long operand2_flags
= operand2
->flags
;
2150 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2151 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2152 struct hist_field
*var
;
2154 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2157 operand1_flags
= var
->flags
;
2160 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2161 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2162 struct hist_field
*var
;
2164 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2167 operand2_flags
= var
->flags
;
2170 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2171 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2172 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
2179 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2180 struct trace_event_file
*file
,
2181 char *str
, unsigned long flags
,
2182 char *var_name
, unsigned int level
)
2184 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2185 unsigned long operand_flags
;
2186 int field_op
, ret
= -EINVAL
;
2187 char *sep
, *operand1_str
;
2190 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2191 return ERR_PTR(-EINVAL
);
2194 field_op
= contains_operator(str
);
2196 if (field_op
== FIELD_OP_NONE
)
2197 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2199 if (field_op
== FIELD_OP_UNARY_MINUS
)
2200 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
2203 case FIELD_OP_MINUS
:
2213 operand1_str
= strsep(&str
, sep
);
2214 if (!operand1_str
|| !str
)
2218 operand1
= parse_atom(hist_data
, file
, operand1_str
,
2219 &operand_flags
, NULL
);
2220 if (IS_ERR(operand1
)) {
2221 ret
= PTR_ERR(operand1
);
2226 /* rest of string could be another expression e.g. b+c in a+b+c */
2228 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2229 if (IS_ERR(operand2
)) {
2230 ret
= PTR_ERR(operand2
);
2235 ret
= check_expr_operands(file
->tr
, operand1
, operand2
);
2239 flags
|= HIST_FIELD_FL_EXPR
;
2241 flags
|= operand1
->flags
&
2242 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2244 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2250 operand1
->read_once
= true;
2251 operand2
->read_once
= true;
2253 expr
->operands
[0] = operand1
;
2254 expr
->operands
[1] = operand2
;
2255 expr
->operator = field_op
;
2256 expr
->name
= expr_str(expr
, 0);
2257 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
2264 case FIELD_OP_MINUS
:
2265 expr
->fn
= hist_field_minus
;
2268 expr
->fn
= hist_field_plus
;
2277 destroy_hist_field(operand1
, 0);
2278 destroy_hist_field(operand2
, 0);
2279 destroy_hist_field(expr
, 0);
2281 return ERR_PTR(ret
);
2284 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
2285 struct trace_event_file
*file
)
2287 struct event_trigger_data
*test
;
2289 lockdep_assert_held(&event_mutex
);
2291 list_for_each_entry(test
, &file
->triggers
, list
) {
2292 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2293 if (test
->private_data
== hist_data
)
2294 return test
->filter_str
;
2301 static struct event_command trigger_hist_cmd
;
2302 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
2303 struct trace_event_file
*file
,
2304 char *glob
, char *cmd
, char *param
);
2306 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
2307 struct hist_trigger_data
*hist_data
,
2308 unsigned int n_keys
)
2310 struct hist_field
*target_hist_field
, *hist_field
;
2311 unsigned int n
, i
, j
;
2313 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
2316 i
= hist_data
->n_vals
;
2317 j
= target_hist_data
->n_vals
;
2319 for (n
= 0; n
< n_keys
; n
++) {
2320 hist_field
= hist_data
->fields
[i
+ n
];
2321 target_hist_field
= target_hist_data
->fields
[j
+ n
];
2323 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
2325 if (hist_field
->size
!= target_hist_field
->size
)
2327 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
2334 static struct hist_trigger_data
*
2335 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
2336 struct trace_event_file
*file
)
2338 struct hist_trigger_data
*hist_data
;
2339 struct event_trigger_data
*test
;
2340 unsigned int n_keys
;
2342 lockdep_assert_held(&event_mutex
);
2344 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
2346 list_for_each_entry(test
, &file
->triggers
, list
) {
2347 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2348 hist_data
= test
->private_data
;
2350 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
2358 static struct trace_event_file
*event_file(struct trace_array
*tr
,
2359 char *system
, char *event_name
)
2361 struct trace_event_file
*file
;
2363 file
= __find_event_file(tr
, system
, event_name
);
2365 return ERR_PTR(-EINVAL
);
2370 static struct hist_field
*
2371 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
2372 char *system
, char *event_name
, char *field_name
)
2374 struct hist_field
*event_var
;
2375 char *synthetic_name
;
2377 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2378 if (!synthetic_name
)
2379 return ERR_PTR(-ENOMEM
);
2381 strcpy(synthetic_name
, "synthetic_");
2382 strcat(synthetic_name
, field_name
);
2384 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
2386 kfree(synthetic_name
);
2392 * create_field_var_hist - Automatically create a histogram and var for a field
2393 * @target_hist_data: The target hist trigger
2394 * @subsys_name: Optional subsystem name
2395 * @event_name: Optional event name
2396 * @field_name: The name of the field (and the resulting variable)
2398 * Hist trigger actions fetch data from variables, not directly from
2399 * events. However, for convenience, users are allowed to directly
2400 * specify an event field in an action, which will be automatically
2401 * converted into a variable on their behalf.
2403 * If a user specifies a field on an event that isn't the event the
2404 * histogram currently being defined (the target event histogram), the
2405 * only way that can be accomplished is if a new hist trigger is
2406 * created and the field variable defined on that.
2408 * This function creates a new histogram compatible with the target
2409 * event (meaning a histogram with the same key as the target
2410 * histogram), and creates a variable for the specified field, but
2411 * with 'synthetic_' prepended to the variable name in order to avoid
2412 * collision with normal field variables.
2414 * Return: The variable created for the field.
2416 static struct hist_field
*
2417 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
2418 char *subsys_name
, char *event_name
, char *field_name
)
2420 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
2421 struct hist_field
*event_var
= ERR_PTR(-EINVAL
);
2422 struct hist_trigger_data
*hist_data
;
2423 unsigned int i
, n
, first
= true;
2424 struct field_var_hist
*var_hist
;
2425 struct trace_event_file
*file
;
2426 struct hist_field
*key_field
;
2431 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
2432 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
2433 return ERR_PTR(-EINVAL
);
2436 file
= event_file(tr
, subsys_name
, event_name
);
2439 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
2440 ret
= PTR_ERR(file
);
2441 return ERR_PTR(ret
);
2445 * Look for a histogram compatible with target. We'll use the
2446 * found histogram specification to create a new matching
2447 * histogram with our variable on it. target_hist_data is not
2448 * yet a registered histogram so we can't use that.
2450 hist_data
= find_compatible_hist(target_hist_data
, file
);
2452 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
2453 return ERR_PTR(-EINVAL
);
2456 /* See if a synthetic field variable has already been created */
2457 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
2458 event_name
, field_name
);
2459 if (!IS_ERR_OR_NULL(event_var
))
2462 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
2464 return ERR_PTR(-ENOMEM
);
2466 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2469 return ERR_PTR(-ENOMEM
);
2472 /* Use the same keys as the compatible histogram */
2473 strcat(cmd
, "keys=");
2475 for_each_hist_key_field(i
, hist_data
) {
2476 key_field
= hist_data
->fields
[i
];
2479 strcat(cmd
, key_field
->field
->name
);
2483 /* Create the synthetic field variable specification */
2484 strcat(cmd
, ":synthetic_");
2485 strcat(cmd
, field_name
);
2487 strcat(cmd
, field_name
);
2489 /* Use the same filter as the compatible histogram */
2490 saved_filter
= find_trigger_filter(hist_data
, file
);
2492 strcat(cmd
, " if ");
2493 strcat(cmd
, saved_filter
);
2496 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
2497 if (!var_hist
->cmd
) {
2500 return ERR_PTR(-ENOMEM
);
2503 /* Save the compatible histogram information */
2504 var_hist
->hist_data
= hist_data
;
2506 /* Create the new histogram with our variable */
2507 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
2511 kfree(var_hist
->cmd
);
2513 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
2514 return ERR_PTR(ret
);
2519 /* If we can't find the variable, something went wrong */
2520 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
2521 event_name
, field_name
);
2522 if (IS_ERR_OR_NULL(event_var
)) {
2523 kfree(var_hist
->cmd
);
2525 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
2526 return ERR_PTR(-EINVAL
);
2529 n
= target_hist_data
->n_field_var_hists
;
2530 target_hist_data
->field_var_hists
[n
] = var_hist
;
2531 target_hist_data
->n_field_var_hists
++;
2536 static struct hist_field
*
2537 find_target_event_var(struct hist_trigger_data
*hist_data
,
2538 char *subsys_name
, char *event_name
, char *var_name
)
2540 struct trace_event_file
*file
= hist_data
->event_file
;
2541 struct hist_field
*hist_field
= NULL
;
2544 struct trace_event_call
*call
;
2549 call
= file
->event_call
;
2551 if (strcmp(subsys_name
, call
->class->system
) != 0)
2554 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2558 hist_field
= find_var_field(hist_data
, var_name
);
2563 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
2564 struct ring_buffer_event
*rbe
,
2566 struct field_var
**field_vars
,
2567 unsigned int n_field_vars
,
2568 unsigned int field_var_str_start
)
2570 struct hist_elt_data
*elt_data
= elt
->private_data
;
2571 unsigned int i
, j
, var_idx
;
2574 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
2575 struct field_var
*field_var
= field_vars
[i
];
2576 struct hist_field
*var
= field_var
->var
;
2577 struct hist_field
*val
= field_var
->val
;
2579 var_val
= val
->fn(val
, elt
, rbe
, rec
);
2580 var_idx
= var
->var
.idx
;
2582 if (val
->flags
& HIST_FIELD_FL_STRING
) {
2583 char *str
= elt_data
->field_var_str
[j
++];
2584 char *val_str
= (char *)(uintptr_t)var_val
;
2586 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
2587 var_val
= (u64
)(uintptr_t)str
;
2589 tracing_map_set_var(elt
, var_idx
, var_val
);
2593 static void update_field_vars(struct hist_trigger_data
*hist_data
,
2594 struct tracing_map_elt
*elt
,
2595 struct ring_buffer_event
*rbe
,
2598 __update_field_vars(elt
, rbe
, rec
, hist_data
->field_vars
,
2599 hist_data
->n_field_vars
, 0);
2602 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
2603 struct tracing_map_elt
*elt
, void *rec
,
2604 struct ring_buffer_event
*rbe
, void *key
,
2605 struct action_data
*data
, u64
*var_ref_vals
)
2607 __update_field_vars(elt
, rbe
, rec
, hist_data
->save_vars
,
2608 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
2611 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
2612 struct trace_event_file
*file
,
2613 char *name
, int size
, const char *type
)
2615 struct hist_field
*var
;
2618 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
2619 var
= ERR_PTR(-EINVAL
);
2623 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
2625 var
= ERR_PTR(-ENOMEM
);
2629 idx
= tracing_map_add_var(hist_data
->map
);
2632 var
= ERR_PTR(-EINVAL
);
2637 var
->flags
= HIST_FIELD_FL_VAR
;
2639 var
->var
.hist_data
= var
->hist_data
= hist_data
;
2641 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
2642 var
->type
= kstrdup(type
, GFP_KERNEL
);
2643 if (!var
->var
.name
|| !var
->type
) {
2644 kfree(var
->var
.name
);
2647 var
= ERR_PTR(-ENOMEM
);
2653 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
2654 struct trace_event_file
*file
,
2657 struct hist_field
*val
= NULL
, *var
= NULL
;
2658 unsigned long flags
= HIST_FIELD_FL_VAR
;
2659 struct trace_array
*tr
= file
->tr
;
2660 struct field_var
*field_var
;
2663 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
2664 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
2669 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
2671 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
2676 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
2678 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
2684 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
2692 field_var
->var
= var
;
2693 field_var
->val
= val
;
2697 field_var
= ERR_PTR(ret
);
2702 * create_target_field_var - Automatically create a variable for a field
2703 * @target_hist_data: The target hist trigger
2704 * @subsys_name: Optional subsystem name
2705 * @event_name: Optional event name
2706 * @var_name: The name of the field (and the resulting variable)
2708 * Hist trigger actions fetch data from variables, not directly from
2709 * events. However, for convenience, users are allowed to directly
2710 * specify an event field in an action, which will be automatically
2711 * converted into a variable on their behalf.
2713 * This function creates a field variable with the name var_name on
2714 * the hist trigger currently being defined on the target event. If
2715 * subsys_name and event_name are specified, this function simply
2716 * verifies that they do in fact match the target event subsystem and
2719 * Return: The variable created for the field.
2721 static struct field_var
*
2722 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
2723 char *subsys_name
, char *event_name
, char *var_name
)
2725 struct trace_event_file
*file
= target_hist_data
->event_file
;
2728 struct trace_event_call
*call
;
2733 call
= file
->event_call
;
2735 if (strcmp(subsys_name
, call
->class->system
) != 0)
2738 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2742 return create_field_var(target_hist_data
, file
, var_name
);
2745 static bool check_track_val_max(u64 track_val
, u64 var_val
)
2747 if (var_val
<= track_val
)
2753 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
2755 if (var_val
== track_val
)
2761 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
2762 struct tracing_map_elt
*elt
,
2763 struct action_data
*data
)
2765 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
2768 track_val
= tracing_map_read_var(elt
, track_var_idx
);
2773 static void save_track_val(struct hist_trigger_data
*hist_data
,
2774 struct tracing_map_elt
*elt
,
2775 struct action_data
*data
, u64 var_val
)
2777 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
2779 tracing_map_set_var(elt
, track_var_idx
, var_val
);
2782 static void save_track_data(struct hist_trigger_data
*hist_data
,
2783 struct tracing_map_elt
*elt
, void *rec
,
2784 struct ring_buffer_event
*rbe
, void *key
,
2785 struct action_data
*data
, u64
*var_ref_vals
)
2787 if (data
->track_data
.save_data
)
2788 data
->track_data
.save_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
2791 static bool check_track_val(struct tracing_map_elt
*elt
,
2792 struct action_data
*data
,
2795 struct hist_trigger_data
*hist_data
;
2798 hist_data
= data
->track_data
.track_var
->hist_data
;
2799 track_val
= get_track_val(hist_data
, elt
, data
);
2801 return data
->track_data
.check_val(track_val
, var_val
);
2804 #ifdef CONFIG_TRACER_SNAPSHOT
2805 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
2807 /* called with tr->max_lock held */
2808 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
2809 struct hist_elt_data
*elt_data
, *track_elt_data
;
2810 struct snapshot_context
*context
= cond_data
;
2811 struct action_data
*action
;
2817 action
= track_data
->action_data
;
2819 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
2820 track_data
->action_data
);
2822 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
2825 track_data
->track_val
= track_val
;
2826 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
2828 elt_data
= context
->elt
->private_data
;
2829 track_elt_data
= track_data
->elt
.private_data
;
2831 strncpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
2833 track_data
->updated
= true;
2838 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
2839 struct tracing_map_elt
*elt
, void *rec
,
2840 struct ring_buffer_event
*rbe
, void *key
,
2841 struct action_data
*data
,
2844 struct trace_event_file
*file
= hist_data
->event_file
;
2845 struct snapshot_context context
;
2850 tracing_snapshot_cond(file
->tr
, &context
);
2853 static void hist_trigger_print_key(struct seq_file
*m
,
2854 struct hist_trigger_data
*hist_data
,
2856 struct tracing_map_elt
*elt
);
2858 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
2862 if (!hist_data
->n_actions
)
2865 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
2866 struct action_data
*data
= hist_data
->actions
[i
];
2868 if (data
->action
== ACTION_SNAPSHOT
)
2875 static void track_data_snapshot_print(struct seq_file
*m
,
2876 struct hist_trigger_data
*hist_data
)
2878 struct trace_event_file
*file
= hist_data
->event_file
;
2879 struct track_data
*track_data
;
2880 struct action_data
*action
;
2882 track_data
= tracing_cond_snapshot_data(file
->tr
);
2886 if (!track_data
->updated
)
2889 action
= snapshot_action(hist_data
);
2893 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
2894 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
2895 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
2896 action
->track_data
.var_str
, track_data
->track_val
);
2898 seq_puts(m
, "\ttriggered by event with key: ");
2899 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
2903 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
2907 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
2908 struct tracing_map_elt
*elt
, void *rec
,
2909 struct ring_buffer_event
*rbe
, void *key
,
2910 struct action_data
*data
,
2911 u64
*var_ref_vals
) {}
2912 static void track_data_snapshot_print(struct seq_file
*m
,
2913 struct hist_trigger_data
*hist_data
) {}
2914 #endif /* CONFIG_TRACER_SNAPSHOT */
2916 static void track_data_print(struct seq_file
*m
,
2917 struct hist_trigger_data
*hist_data
,
2918 struct tracing_map_elt
*elt
,
2919 struct action_data
*data
)
2921 u64 track_val
= get_track_val(hist_data
, elt
, data
);
2922 unsigned int i
, save_var_idx
;
2924 if (data
->handler
== HANDLER_ONMAX
)
2925 seq_printf(m
, "\n\tmax: %10llu", track_val
);
2926 else if (data
->handler
== HANDLER_ONCHANGE
)
2927 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
2929 if (data
->action
== ACTION_SNAPSHOT
)
2932 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
2933 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
2934 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
2937 save_var_idx
= save_var
->var
.idx
;
2939 val
= tracing_map_read_var(elt
, save_var_idx
);
2941 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
2942 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
2943 (char *)(uintptr_t)(val
));
2945 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
2949 static void ontrack_action(struct hist_trigger_data
*hist_data
,
2950 struct tracing_map_elt
*elt
, void *rec
,
2951 struct ring_buffer_event
*rbe
, void *key
,
2952 struct action_data
*data
, u64
*var_ref_vals
)
2954 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
2956 if (check_track_val(elt
, data
, var_val
)) {
2957 save_track_val(hist_data
, elt
, data
, var_val
);
2958 save_track_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
2962 static void action_data_destroy(struct action_data
*data
)
2966 lockdep_assert_held(&event_mutex
);
2968 kfree(data
->action_name
);
2970 for (i
= 0; i
< data
->n_params
; i
++)
2971 kfree(data
->params
[i
]);
2973 if (data
->synth_event
)
2974 data
->synth_event
->ref
--;
2976 kfree(data
->synth_event_name
);
2981 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
2982 struct action_data
*data
)
2984 struct trace_event_file
*file
= hist_data
->event_file
;
2986 destroy_hist_field(data
->track_data
.track_var
, 0);
2988 if (data
->action
== ACTION_SNAPSHOT
) {
2989 struct track_data
*track_data
;
2991 track_data
= tracing_cond_snapshot_data(file
->tr
);
2992 if (track_data
&& track_data
->hist_data
== hist_data
) {
2993 tracing_snapshot_cond_disable(file
->tr
);
2994 track_data_free(track_data
);
2998 kfree(data
->track_data
.var_str
);
3000 action_data_destroy(data
);
3003 static int action_create(struct hist_trigger_data
*hist_data
,
3004 struct action_data
*data
);
3006 static int track_data_create(struct hist_trigger_data
*hist_data
,
3007 struct action_data
*data
)
3009 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
3010 struct trace_event_file
*file
= hist_data
->event_file
;
3011 struct trace_array
*tr
= file
->tr
;
3012 char *track_data_var_str
;
3015 track_data_var_str
= data
->track_data
.var_str
;
3016 if (track_data_var_str
[0] != '$') {
3017 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
3020 track_data_var_str
++;
3022 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
3024 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
3028 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
3032 data
->track_data
.var_ref
= ref_field
;
3034 if (data
->handler
== HANDLER_ONMAX
)
3035 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
3036 if (IS_ERR(track_var
)) {
3037 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3038 ret
= PTR_ERR(track_var
);
3042 if (data
->handler
== HANDLER_ONCHANGE
)
3043 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
3044 if (IS_ERR(track_var
)) {
3045 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3046 ret
= PTR_ERR(track_var
);
3049 data
->track_data
.track_var
= track_var
;
3051 ret
= action_create(hist_data
, data
);
3056 static int parse_action_params(struct trace_array
*tr
, char *params
,
3057 struct action_data
*data
)
3059 char *param
, *saved_param
;
3060 bool first_param
= true;
3064 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
3065 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
3069 param
= strsep(¶ms
, ",");
3071 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
3076 param
= strstrip(param
);
3077 if (strlen(param
) < 2) {
3078 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
3083 saved_param
= kstrdup(param
, GFP_KERNEL
);
3089 if (first_param
&& data
->use_trace_keyword
) {
3090 data
->synth_event_name
= saved_param
;
3091 first_param
= false;
3094 first_param
= false;
3096 data
->params
[data
->n_params
++] = saved_param
;
3102 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
3103 enum handler_id handler
)
3110 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3115 action_name
= strsep(&str
, "(");
3116 if (!action_name
|| !str
) {
3117 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3122 if (str_has_prefix(action_name
, "save")) {
3123 char *params
= strsep(&str
, ")");
3126 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
3131 ret
= parse_action_params(tr
, params
, data
);
3135 if (handler
== HANDLER_ONMAX
)
3136 data
->track_data
.check_val
= check_track_val_max
;
3137 else if (handler
== HANDLER_ONCHANGE
)
3138 data
->track_data
.check_val
= check_track_val_changed
;
3140 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3145 data
->track_data
.save_data
= save_track_data_vars
;
3146 data
->fn
= ontrack_action
;
3147 data
->action
= ACTION_SAVE
;
3148 } else if (str_has_prefix(action_name
, "snapshot")) {
3149 char *params
= strsep(&str
, ")");
3152 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
3157 if (handler
== HANDLER_ONMAX
)
3158 data
->track_data
.check_val
= check_track_val_max
;
3159 else if (handler
== HANDLER_ONCHANGE
)
3160 data
->track_data
.check_val
= check_track_val_changed
;
3162 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3167 data
->track_data
.save_data
= save_track_data_snapshot
;
3168 data
->fn
= ontrack_action
;
3169 data
->action
= ACTION_SNAPSHOT
;
3171 char *params
= strsep(&str
, ")");
3173 if (str_has_prefix(action_name
, "trace"))
3174 data
->use_trace_keyword
= true;
3177 ret
= parse_action_params(tr
, params
, data
);
3182 if (handler
== HANDLER_ONMAX
)
3183 data
->track_data
.check_val
= check_track_val_max
;
3184 else if (handler
== HANDLER_ONCHANGE
)
3185 data
->track_data
.check_val
= check_track_val_changed
;
3187 if (handler
!= HANDLER_ONMATCH
) {
3188 data
->track_data
.save_data
= action_trace
;
3189 data
->fn
= ontrack_action
;
3191 data
->fn
= action_trace
;
3193 data
->action
= ACTION_TRACE
;
3196 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
3197 if (!data
->action_name
) {
3202 data
->handler
= handler
;
3207 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
3208 char *str
, enum handler_id handler
)
3210 struct action_data
*data
;
3214 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3216 return ERR_PTR(-ENOMEM
);
3218 var_str
= strsep(&str
, ")");
3219 if (!var_str
|| !str
) {
3224 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
3225 if (!data
->track_data
.var_str
) {
3230 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
3236 track_data_destroy(hist_data
, data
);
3237 data
= ERR_PTR(ret
);
3241 static void onmatch_destroy(struct action_data
*data
)
3243 kfree(data
->match_data
.event
);
3244 kfree(data
->match_data
.event_system
);
3246 action_data_destroy(data
);
3249 static void destroy_field_var(struct field_var
*field_var
)
3254 destroy_hist_field(field_var
->var
, 0);
3255 destroy_hist_field(field_var
->val
, 0);
3260 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
3264 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
3265 destroy_field_var(hist_data
->field_vars
[i
]);
3267 for (i
= 0; i
< hist_data
->n_save_vars
; i
++)
3268 destroy_field_var(hist_data
->save_vars
[i
]);
3271 static void save_field_var(struct hist_trigger_data
*hist_data
,
3272 struct field_var
*field_var
)
3274 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
3276 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3277 hist_data
->n_field_var_str
++;
3281 static int check_synth_field(struct synth_event
*event
,
3282 struct hist_field
*hist_field
,
3283 unsigned int field_pos
)
3285 struct synth_field
*field
;
3287 if (field_pos
>= event
->n_fields
)
3290 field
= event
->fields
[field_pos
];
3293 * A dynamic string synth field can accept static or
3294 * dynamic. A static string synth field can only accept a
3295 * same-sized static string, which is checked for later.
3297 if (strstr(hist_field
->type
, "char[") && field
->is_string
3298 && field
->is_dynamic
)
3301 if (strcmp(field
->type
, hist_field
->type
) != 0) {
3302 if (field
->size
!= hist_field
->size
||
3303 field
->is_signed
!= hist_field
->is_signed
)
3310 static struct hist_field
*
3311 trace_action_find_var(struct hist_trigger_data
*hist_data
,
3312 struct action_data
*data
,
3313 char *system
, char *event
, char *var
)
3315 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3316 struct hist_field
*hist_field
;
3318 var
++; /* skip '$' */
3320 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
3322 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3323 system
= data
->match_data
.event_system
;
3324 event
= data
->match_data
.event
;
3327 hist_field
= find_event_var(hist_data
, system
, event
, var
);
3331 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
3336 static struct hist_field
*
3337 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
3338 struct action_data
*data
, char *system
,
3339 char *event
, char *var
)
3341 struct hist_field
*hist_field
= NULL
;
3342 struct field_var
*field_var
;
3345 * First try to create a field var on the target event (the
3346 * currently being defined). This will create a variable for
3347 * unqualified fields on the target event, or if qualified,
3348 * target fields that have qualified names matching the target.
3350 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
3352 if (field_var
&& !IS_ERR(field_var
)) {
3353 save_field_var(hist_data
, field_var
);
3354 hist_field
= field_var
->var
;
3358 * If no explicit system.event is specified, default to
3359 * looking for fields on the onmatch(system.event.xxx)
3362 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3363 system
= data
->match_data
.event_system
;
3364 event
= data
->match_data
.event
;
3368 * At this point, we're looking at a field on another
3369 * event. Because we can't modify a hist trigger on
3370 * another event to add a variable for a field, we need
3371 * to create a new trigger on that event and create the
3372 * variable at the same time.
3374 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
3375 if (IS_ERR(hist_field
))
3381 destroy_field_var(field_var
);
3386 static int trace_action_create(struct hist_trigger_data
*hist_data
,
3387 struct action_data
*data
)
3389 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3390 char *event_name
, *param
, *system
= NULL
;
3391 struct hist_field
*hist_field
, *var_ref
;
3393 unsigned int field_pos
= 0;
3394 struct synth_event
*event
;
3395 char *synth_event_name
;
3396 int var_ref_idx
, ret
= 0;
3398 lockdep_assert_held(&event_mutex
);
3400 if (data
->use_trace_keyword
)
3401 synth_event_name
= data
->synth_event_name
;
3403 synth_event_name
= data
->action_name
;
3405 event
= find_synth_event(synth_event_name
);
3407 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
3413 for (i
= 0; i
< data
->n_params
; i
++) {
3416 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3422 system
= strsep(¶m
, ".");
3424 param
= (char *)system
;
3425 system
= event_name
= NULL
;
3427 event_name
= strsep(¶m
, ".");
3435 if (param
[0] == '$')
3436 hist_field
= trace_action_find_var(hist_data
, data
,
3440 hist_field
= trace_action_create_field_var(hist_data
,
3452 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
3453 var_ref
= create_var_ref(hist_data
, hist_field
,
3454 system
, event_name
);
3461 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
3462 if (WARN_ON(var_ref_idx
< 0)) {
3467 data
->var_ref_idx
[i
] = var_ref_idx
;
3474 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
3480 if (field_pos
!= event
->n_fields
) {
3481 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
3486 data
->synth_event
= event
;
3495 static int action_create(struct hist_trigger_data
*hist_data
,
3496 struct action_data
*data
)
3498 struct trace_event_file
*file
= hist_data
->event_file
;
3499 struct trace_array
*tr
= file
->tr
;
3500 struct track_data
*track_data
;
3501 struct field_var
*field_var
;
3506 if (data
->action
== ACTION_TRACE
)
3507 return trace_action_create(hist_data
, data
);
3509 if (data
->action
== ACTION_SNAPSHOT
) {
3510 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
3511 if (IS_ERR(track_data
)) {
3512 ret
= PTR_ERR(track_data
);
3516 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
3517 cond_snapshot_update
);
3519 track_data_free(track_data
);
3524 if (data
->action
== ACTION_SAVE
) {
3525 if (hist_data
->n_save_vars
) {
3527 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
3531 for (i
= 0; i
< data
->n_params
; i
++) {
3532 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3538 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
3539 if (IS_ERR(field_var
)) {
3540 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
3542 ret
= PTR_ERR(field_var
);
3547 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
3548 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3549 hist_data
->n_save_var_str
++;
3557 static int onmatch_create(struct hist_trigger_data
*hist_data
,
3558 struct action_data
*data
)
3560 return action_create(hist_data
, data
);
3563 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
3565 char *match_event
, *match_event_system
;
3566 struct action_data
*data
;
3569 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3571 return ERR_PTR(-ENOMEM
);
3573 match_event
= strsep(&str
, ")");
3574 if (!match_event
|| !str
) {
3575 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
3579 match_event_system
= strsep(&match_event
, ".");
3581 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
3585 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
3586 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
3590 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
3591 if (!data
->match_data
.event
) {
3596 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
3597 if (!data
->match_data
.event_system
) {
3602 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
3608 onmatch_destroy(data
);
3609 data
= ERR_PTR(ret
);
3613 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
3615 hist_data
->fields
[HITCOUNT_IDX
] =
3616 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
3617 if (!hist_data
->fields
[HITCOUNT_IDX
])
3620 hist_data
->n_vals
++;
3621 hist_data
->n_fields
++;
3623 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
3629 static int __create_val_field(struct hist_trigger_data
*hist_data
,
3630 unsigned int val_idx
,
3631 struct trace_event_file
*file
,
3632 char *var_name
, char *field_str
,
3633 unsigned long flags
)
3635 struct hist_field
*hist_field
;
3638 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
3639 if (IS_ERR(hist_field
)) {
3640 ret
= PTR_ERR(hist_field
);
3644 hist_data
->fields
[val_idx
] = hist_field
;
3646 ++hist_data
->n_vals
;
3647 ++hist_data
->n_fields
;
3649 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3655 static int create_val_field(struct hist_trigger_data
*hist_data
,
3656 unsigned int val_idx
,
3657 struct trace_event_file
*file
,
3660 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
3663 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
3666 static int create_var_field(struct hist_trigger_data
*hist_data
,
3667 unsigned int val_idx
,
3668 struct trace_event_file
*file
,
3669 char *var_name
, char *expr_str
)
3671 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3672 unsigned long flags
= 0;
3675 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3678 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
3679 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
3683 flags
|= HIST_FIELD_FL_VAR
;
3684 hist_data
->n_vars
++;
3685 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
3688 ret
= __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
3690 if (!ret
&& hist_data
->fields
[val_idx
]->flags
& HIST_FIELD_FL_STRING
)
3691 hist_data
->fields
[val_idx
]->var_str_idx
= hist_data
->n_var_str
++;
3696 static int create_val_fields(struct hist_trigger_data
*hist_data
,
3697 struct trace_event_file
*file
)
3699 char *fields_str
, *field_str
;
3700 unsigned int i
, j
= 1;
3703 ret
= create_hitcount_val(hist_data
);
3707 fields_str
= hist_data
->attrs
->vals_str
;
3711 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
3712 j
< TRACING_MAP_VALS_MAX
; i
++) {
3713 field_str
= strsep(&fields_str
, ",");
3717 if (strcmp(field_str
, "hitcount") == 0)
3720 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
3725 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
3731 static int create_key_field(struct hist_trigger_data
*hist_data
,
3732 unsigned int key_idx
,
3733 unsigned int key_offset
,
3734 struct trace_event_file
*file
,
3737 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3738 struct hist_field
*hist_field
= NULL
;
3739 unsigned long flags
= 0;
3740 unsigned int key_size
;
3743 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
3746 flags
|= HIST_FIELD_FL_KEY
;
3748 if (strcmp(field_str
, "stacktrace") == 0) {
3749 flags
|= HIST_FIELD_FL_STACKTRACE
;
3750 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
3751 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
3753 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
3755 if (IS_ERR(hist_field
)) {
3756 ret
= PTR_ERR(hist_field
);
3760 if (field_has_hist_vars(hist_field
, 0)) {
3761 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
3762 destroy_hist_field(hist_field
, 0);
3767 key_size
= hist_field
->size
;
3770 hist_data
->fields
[key_idx
] = hist_field
;
3772 key_size
= ALIGN(key_size
, sizeof(u64
));
3773 hist_data
->fields
[key_idx
]->size
= key_size
;
3774 hist_data
->fields
[key_idx
]->offset
= key_offset
;
3776 hist_data
->key_size
+= key_size
;
3778 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
3783 hist_data
->n_keys
++;
3784 hist_data
->n_fields
++;
3786 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
3794 static int create_key_fields(struct hist_trigger_data
*hist_data
,
3795 struct trace_event_file
*file
)
3797 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
3798 char *fields_str
, *field_str
;
3801 fields_str
= hist_data
->attrs
->keys_str
;
3805 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
3806 field_str
= strsep(&fields_str
, ",");
3809 ret
= create_key_field(hist_data
, i
, key_offset
,
3824 static int create_var_fields(struct hist_trigger_data
*hist_data
,
3825 struct trace_event_file
*file
)
3827 unsigned int i
, j
= hist_data
->n_vals
;
3830 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
3832 for (i
= 0; i
< n_vars
; i
++) {
3833 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
3834 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
3836 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
3844 static void free_var_defs(struct hist_trigger_data
*hist_data
)
3848 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
3849 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
3850 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
3853 hist_data
->attrs
->var_defs
.n_vars
= 0;
3856 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
3858 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3859 char *s
, *str
, *var_name
, *field_str
;
3860 unsigned int i
, j
, n_vars
= 0;
3863 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
3864 str
= hist_data
->attrs
->assignment_str
[i
];
3865 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
3866 field_str
= strsep(&str
, ",");
3870 var_name
= strsep(&field_str
, "=");
3871 if (!var_name
|| !field_str
) {
3872 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
3878 if (n_vars
== TRACING_MAP_VARS_MAX
) {
3879 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
3884 s
= kstrdup(var_name
, GFP_KERNEL
);
3889 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
3891 s
= kstrdup(field_str
, GFP_KERNEL
);
3896 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
3898 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
3904 free_var_defs(hist_data
);
3909 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
3910 struct trace_event_file
*file
)
3914 ret
= parse_var_defs(hist_data
);
3918 ret
= create_val_fields(hist_data
, file
);
3922 ret
= create_var_fields(hist_data
, file
);
3926 ret
= create_key_fields(hist_data
, file
);
3930 free_var_defs(hist_data
);
3935 static int is_descending(struct trace_array
*tr
, const char *str
)
3940 if (strcmp(str
, "descending") == 0)
3943 if (strcmp(str
, "ascending") == 0)
3946 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
3951 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
3953 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3954 char *fields_str
= hist_data
->attrs
->sort_key_str
;
3955 struct tracing_map_sort_key
*sort_key
;
3956 int descending
, ret
= 0;
3957 unsigned int i
, j
, k
;
3959 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
3964 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
3965 struct hist_field
*hist_field
;
3966 char *field_str
, *field_name
;
3967 const char *test_name
;
3969 sort_key
= &hist_data
->sort_keys
[i
];
3971 field_str
= strsep(&fields_str
, ",");
3977 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
3981 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
3982 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
3987 field_name
= strsep(&field_str
, ".");
3988 if (!field_name
|| !*field_name
) {
3990 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
3994 if (strcmp(field_name
, "hitcount") == 0) {
3995 descending
= is_descending(tr
, field_str
);
3996 if (descending
< 0) {
4000 sort_key
->descending
= descending
;
4004 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4007 hist_field
= hist_data
->fields
[j
];
4008 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4013 test_name
= hist_field_name(hist_field
, 0);
4015 if (strcmp(field_name
, test_name
) == 0) {
4016 sort_key
->field_idx
= idx
;
4017 descending
= is_descending(tr
, field_str
);
4018 if (descending
< 0) {
4022 sort_key
->descending
= descending
;
4026 if (j
== hist_data
->n_fields
) {
4028 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
4033 hist_data
->n_sort_keys
= i
;
4038 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4042 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4043 struct action_data
*data
= hist_data
->actions
[i
];
4045 if (data
->handler
== HANDLER_ONMATCH
)
4046 onmatch_destroy(data
);
4047 else if (data
->handler
== HANDLER_ONMAX
||
4048 data
->handler
== HANDLER_ONCHANGE
)
4049 track_data_destroy(hist_data
, data
);
4055 static int parse_actions(struct hist_trigger_data
*hist_data
)
4057 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4058 struct action_data
*data
;
4064 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4065 str
= hist_data
->attrs
->action_str
[i
];
4067 if ((len
= str_has_prefix(str
, "onmatch("))) {
4068 char *action_str
= str
+ len
;
4070 data
= onmatch_parse(tr
, action_str
);
4072 ret
= PTR_ERR(data
);
4075 } else if ((len
= str_has_prefix(str
, "onmax("))) {
4076 char *action_str
= str
+ len
;
4078 data
= track_data_parse(hist_data
, action_str
,
4081 ret
= PTR_ERR(data
);
4084 } else if ((len
= str_has_prefix(str
, "onchange("))) {
4085 char *action_str
= str
+ len
;
4087 data
= track_data_parse(hist_data
, action_str
,
4090 ret
= PTR_ERR(data
);
4098 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4104 static int create_actions(struct hist_trigger_data
*hist_data
)
4106 struct action_data
*data
;
4110 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4111 data
= hist_data
->actions
[i
];
4113 if (data
->handler
== HANDLER_ONMATCH
) {
4114 ret
= onmatch_create(hist_data
, data
);
4117 } else if (data
->handler
== HANDLER_ONMAX
||
4118 data
->handler
== HANDLER_ONCHANGE
) {
4119 ret
= track_data_create(hist_data
, data
);
4131 static void print_actions(struct seq_file
*m
,
4132 struct hist_trigger_data
*hist_data
,
4133 struct tracing_map_elt
*elt
)
4137 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4138 struct action_data
*data
= hist_data
->actions
[i
];
4140 if (data
->action
== ACTION_SNAPSHOT
)
4143 if (data
->handler
== HANDLER_ONMAX
||
4144 data
->handler
== HANDLER_ONCHANGE
)
4145 track_data_print(m
, hist_data
, elt
, data
);
4149 static void print_action_spec(struct seq_file
*m
,
4150 struct hist_trigger_data
*hist_data
,
4151 struct action_data
*data
)
4155 if (data
->action
== ACTION_SAVE
) {
4156 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4157 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
4158 if (i
< hist_data
->n_save_vars
- 1)
4161 } else if (data
->action
== ACTION_TRACE
) {
4162 if (data
->use_trace_keyword
)
4163 seq_printf(m
, "%s", data
->synth_event_name
);
4164 for (i
= 0; i
< data
->n_params
; i
++) {
4165 if (i
|| data
->use_trace_keyword
)
4167 seq_printf(m
, "%s", data
->params
[i
]);
4172 static void print_track_data_spec(struct seq_file
*m
,
4173 struct hist_trigger_data
*hist_data
,
4174 struct action_data
*data
)
4176 if (data
->handler
== HANDLER_ONMAX
)
4177 seq_puts(m
, ":onmax(");
4178 else if (data
->handler
== HANDLER_ONCHANGE
)
4179 seq_puts(m
, ":onchange(");
4180 seq_printf(m
, "%s", data
->track_data
.var_str
);
4181 seq_printf(m
, ").%s(", data
->action_name
);
4183 print_action_spec(m
, hist_data
, data
);
4188 static void print_onmatch_spec(struct seq_file
*m
,
4189 struct hist_trigger_data
*hist_data
,
4190 struct action_data
*data
)
4192 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
4193 data
->match_data
.event
);
4195 seq_printf(m
, "%s(", data
->action_name
);
4197 print_action_spec(m
, hist_data
, data
);
4202 static bool actions_match(struct hist_trigger_data
*hist_data
,
4203 struct hist_trigger_data
*hist_data_test
)
4207 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4210 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4211 struct action_data
*data
= hist_data
->actions
[i
];
4212 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4213 char *action_name
, *action_name_test
;
4215 if (data
->handler
!= data_test
->handler
)
4217 if (data
->action
!= data_test
->action
)
4220 if (data
->n_params
!= data_test
->n_params
)
4223 for (j
= 0; j
< data
->n_params
; j
++) {
4224 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4228 if (data
->use_trace_keyword
)
4229 action_name
= data
->synth_event_name
;
4231 action_name
= data
->action_name
;
4233 if (data_test
->use_trace_keyword
)
4234 action_name_test
= data_test
->synth_event_name
;
4236 action_name_test
= data_test
->action_name
;
4238 if (strcmp(action_name
, action_name_test
) != 0)
4241 if (data
->handler
== HANDLER_ONMATCH
) {
4242 if (strcmp(data
->match_data
.event_system
,
4243 data_test
->match_data
.event_system
) != 0)
4245 if (strcmp(data
->match_data
.event
,
4246 data_test
->match_data
.event
) != 0)
4248 } else if (data
->handler
== HANDLER_ONMAX
||
4249 data
->handler
== HANDLER_ONCHANGE
) {
4250 if (strcmp(data
->track_data
.var_str
,
4251 data_test
->track_data
.var_str
) != 0)
4260 static void print_actions_spec(struct seq_file
*m
,
4261 struct hist_trigger_data
*hist_data
)
4265 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4266 struct action_data
*data
= hist_data
->actions
[i
];
4268 if (data
->handler
== HANDLER_ONMATCH
)
4269 print_onmatch_spec(m
, hist_data
, data
);
4270 else if (data
->handler
== HANDLER_ONMAX
||
4271 data
->handler
== HANDLER_ONCHANGE
)
4272 print_track_data_spec(m
, hist_data
, data
);
4276 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
4280 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
4281 kfree(hist_data
->field_var_hists
[i
]->cmd
);
4282 kfree(hist_data
->field_var_hists
[i
]);
4286 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
4291 destroy_hist_trigger_attrs(hist_data
->attrs
);
4292 destroy_hist_fields(hist_data
);
4293 tracing_map_destroy(hist_data
->map
);
4295 destroy_actions(hist_data
);
4296 destroy_field_vars(hist_data
);
4297 destroy_field_var_hists(hist_data
);
4302 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
4304 struct tracing_map
*map
= hist_data
->map
;
4305 struct ftrace_event_field
*field
;
4306 struct hist_field
*hist_field
;
4309 for_each_hist_field(i
, hist_data
) {
4310 hist_field
= hist_data
->fields
[i
];
4311 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
4312 tracing_map_cmp_fn_t cmp_fn
;
4314 field
= hist_field
->field
;
4316 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
4317 cmp_fn
= tracing_map_cmp_none
;
4319 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
4320 hist_field
->is_signed
);
4321 else if (is_string_field(field
))
4322 cmp_fn
= tracing_map_cmp_string
;
4324 cmp_fn
= tracing_map_cmp_num(field
->size
,
4326 idx
= tracing_map_add_key_field(map
,
4329 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
4330 idx
= tracing_map_add_sum_field(map
);
4335 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4336 idx
= tracing_map_add_var(map
);
4339 hist_field
->var
.idx
= idx
;
4340 hist_field
->var
.hist_data
= hist_data
;
4347 static struct hist_trigger_data
*
4348 create_hist_data(unsigned int map_bits
,
4349 struct hist_trigger_attrs
*attrs
,
4350 struct trace_event_file
*file
,
4353 const struct tracing_map_ops
*map_ops
= NULL
;
4354 struct hist_trigger_data
*hist_data
;
4357 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
4359 return ERR_PTR(-ENOMEM
);
4361 hist_data
->attrs
= attrs
;
4362 hist_data
->remove
= remove
;
4363 hist_data
->event_file
= file
;
4365 ret
= parse_actions(hist_data
);
4369 ret
= create_hist_fields(hist_data
, file
);
4373 ret
= create_sort_keys(hist_data
);
4377 map_ops
= &hist_trigger_elt_data_ops
;
4379 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
4380 map_ops
, hist_data
);
4381 if (IS_ERR(hist_data
->map
)) {
4382 ret
= PTR_ERR(hist_data
->map
);
4383 hist_data
->map
= NULL
;
4387 ret
= create_tracing_map_fields(hist_data
);
4393 hist_data
->attrs
= NULL
;
4395 destroy_hist_data(hist_data
);
4397 hist_data
= ERR_PTR(ret
);
4402 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
4403 struct tracing_map_elt
*elt
, void *rec
,
4404 struct ring_buffer_event
*rbe
,
4407 struct hist_elt_data
*elt_data
;
4408 struct hist_field
*hist_field
;
4409 unsigned int i
, var_idx
;
4412 elt_data
= elt
->private_data
;
4413 elt_data
->var_ref_vals
= var_ref_vals
;
4415 for_each_hist_val_field(i
, hist_data
) {
4416 hist_field
= hist_data
->fields
[i
];
4417 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
4418 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4419 var_idx
= hist_field
->var
.idx
;
4421 if (hist_field
->flags
& HIST_FIELD_FL_STRING
) {
4422 unsigned int str_start
, var_str_idx
, idx
;
4423 char *str
, *val_str
;
4425 str_start
= hist_data
->n_field_var_str
+
4426 hist_data
->n_save_var_str
;
4427 var_str_idx
= hist_field
->var_str_idx
;
4428 idx
= str_start
+ var_str_idx
;
4430 str
= elt_data
->field_var_str
[idx
];
4431 val_str
= (char *)(uintptr_t)hist_val
;
4432 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
4434 hist_val
= (u64
)(uintptr_t)str
;
4436 tracing_map_set_var(elt
, var_idx
, hist_val
);
4439 tracing_map_update_sum(elt
, i
, hist_val
);
4442 for_each_hist_key_field(i
, hist_data
) {
4443 hist_field
= hist_data
->fields
[i
];
4444 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4445 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
4446 var_idx
= hist_field
->var
.idx
;
4447 tracing_map_set_var(elt
, var_idx
, hist_val
);
4451 update_field_vars(hist_data
, elt
, rbe
, rec
);
4454 static inline void add_to_key(char *compound_key
, void *key
,
4455 struct hist_field
*key_field
, void *rec
)
4457 size_t size
= key_field
->size
;
4459 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4460 struct ftrace_event_field
*field
;
4462 field
= key_field
->field
;
4463 if (field
->filter_type
== FILTER_DYN_STRING
)
4464 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
4465 else if (field
->filter_type
== FILTER_PTR_STRING
)
4467 else if (field
->filter_type
== FILTER_STATIC_STRING
)
4470 /* ensure NULL-termination */
4471 if (size
> key_field
->size
- 1)
4472 size
= key_field
->size
- 1;
4474 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
4476 memcpy(compound_key
+ key_field
->offset
, key
, size
);
4480 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
4481 struct tracing_map_elt
*elt
, void *rec
,
4482 struct ring_buffer_event
*rbe
, void *key
,
4485 struct action_data
*data
;
4488 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4489 data
= hist_data
->actions
[i
];
4490 data
->fn(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
4494 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
,
4495 struct ring_buffer_event
*rbe
)
4497 struct hist_trigger_data
*hist_data
= data
->private_data
;
4498 bool use_compound_key
= (hist_data
->n_keys
> 1);
4499 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
4500 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
4501 char compound_key
[HIST_KEY_SIZE_MAX
];
4502 struct tracing_map_elt
*elt
= NULL
;
4503 struct hist_field
*key_field
;
4508 memset(compound_key
, 0, hist_data
->key_size
);
4510 for_each_hist_key_field(i
, hist_data
) {
4511 key_field
= hist_data
->fields
[i
];
4513 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4514 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
4515 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
4516 HIST_STACKTRACE_SKIP
);
4519 field_contents
= key_field
->fn(key_field
, elt
, rbe
, rec
);
4520 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4521 key
= (void *)(unsigned long)field_contents
;
4522 use_compound_key
= true;
4524 key
= (void *)&field_contents
;
4527 if (use_compound_key
)
4528 add_to_key(compound_key
, key
, key_field
, rec
);
4531 if (use_compound_key
)
4534 if (hist_data
->n_var_refs
&&
4535 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
4538 elt
= tracing_map_insert(hist_data
->map
, key
);
4542 hist_trigger_elt_update(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
4544 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
4545 hist_trigger_actions(hist_data
, elt
, rec
, rbe
, key
, var_ref_vals
);
4548 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
4549 unsigned long *stacktrace_entries
,
4550 unsigned int max_entries
)
4552 char str
[KSYM_SYMBOL_LEN
];
4553 unsigned int spaces
= 8;
4556 for (i
= 0; i
< max_entries
; i
++) {
4557 if (!stacktrace_entries
[i
])
4560 seq_printf(m
, "%*c", 1 + spaces
, ' ');
4561 sprint_symbol(str
, stacktrace_entries
[i
]);
4562 seq_printf(m
, "%s\n", str
);
4566 static void hist_trigger_print_key(struct seq_file
*m
,
4567 struct hist_trigger_data
*hist_data
,
4569 struct tracing_map_elt
*elt
)
4571 struct hist_field
*key_field
;
4572 char str
[KSYM_SYMBOL_LEN
];
4573 bool multiline
= false;
4574 const char *field_name
;
4580 for_each_hist_key_field(i
, hist_data
) {
4581 key_field
= hist_data
->fields
[i
];
4583 if (i
> hist_data
->n_vals
)
4586 field_name
= hist_field_name(key_field
, 0);
4588 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
4589 uval
= *(u64
*)(key
+ key_field
->offset
);
4590 seq_printf(m
, "%s: %llx", field_name
, uval
);
4591 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
4592 uval
= *(u64
*)(key
+ key_field
->offset
);
4593 sprint_symbol_no_offset(str
, uval
);
4594 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
4596 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
4597 uval
= *(u64
*)(key
+ key_field
->offset
);
4598 sprint_symbol(str
, uval
);
4599 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
4601 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
4602 struct hist_elt_data
*elt_data
= elt
->private_data
;
4605 if (WARN_ON_ONCE(!elt_data
))
4608 comm
= elt_data
->comm
;
4610 uval
= *(u64
*)(key
+ key_field
->offset
);
4611 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
4613 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
4614 const char *syscall_name
;
4616 uval
= *(u64
*)(key
+ key_field
->offset
);
4617 syscall_name
= get_syscall_name(uval
);
4619 syscall_name
= "unknown_syscall";
4621 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
4622 syscall_name
, uval
);
4623 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4624 seq_puts(m
, "stacktrace:\n");
4625 hist_trigger_stacktrace_print(m
,
4626 key
+ key_field
->offset
,
4627 HIST_STACKTRACE_DEPTH
);
4629 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
4630 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
4631 *(u64
*)(key
+ key_field
->offset
));
4632 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4633 seq_printf(m
, "%s: %-50s", field_name
,
4634 (char *)(key
+ key_field
->offset
));
4636 uval
= *(u64
*)(key
+ key_field
->offset
);
4637 seq_printf(m
, "%s: %10llu", field_name
, uval
);
4647 static void hist_trigger_entry_print(struct seq_file
*m
,
4648 struct hist_trigger_data
*hist_data
,
4650 struct tracing_map_elt
*elt
)
4652 const char *field_name
;
4655 hist_trigger_print_key(m
, hist_data
, key
, elt
);
4657 seq_printf(m
, " hitcount: %10llu",
4658 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
4660 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
4661 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
4663 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
4664 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
4667 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
4668 seq_printf(m
, " %s: %10llx", field_name
,
4669 tracing_map_read_sum(elt
, i
));
4671 seq_printf(m
, " %s: %10llu", field_name
,
4672 tracing_map_read_sum(elt
, i
));
4676 print_actions(m
, hist_data
, elt
);
4681 static int print_entries(struct seq_file
*m
,
4682 struct hist_trigger_data
*hist_data
)
4684 struct tracing_map_sort_entry
**sort_entries
= NULL
;
4685 struct tracing_map
*map
= hist_data
->map
;
4688 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
4689 hist_data
->n_sort_keys
,
4694 for (i
= 0; i
< n_entries
; i
++)
4695 hist_trigger_entry_print(m
, hist_data
,
4696 sort_entries
[i
]->key
,
4697 sort_entries
[i
]->elt
);
4699 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
4704 static void hist_trigger_show(struct seq_file
*m
,
4705 struct event_trigger_data
*data
, int n
)
4707 struct hist_trigger_data
*hist_data
;
4711 seq_puts(m
, "\n\n");
4713 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
4714 data
->ops
->print(m
, data
->ops
, data
);
4715 seq_puts(m
, "#\n\n");
4717 hist_data
= data
->private_data
;
4718 n_entries
= print_entries(m
, hist_data
);
4722 track_data_snapshot_print(m
, hist_data
);
4724 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
4725 (u64
)atomic64_read(&hist_data
->map
->hits
),
4726 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
4729 static int hist_show(struct seq_file
*m
, void *v
)
4731 struct event_trigger_data
*data
;
4732 struct trace_event_file
*event_file
;
4735 mutex_lock(&event_mutex
);
4737 event_file
= event_file_data(m
->private);
4738 if (unlikely(!event_file
)) {
4743 list_for_each_entry(data
, &event_file
->triggers
, list
) {
4744 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
4745 hist_trigger_show(m
, data
, n
++);
4749 mutex_unlock(&event_mutex
);
4754 static int event_hist_open(struct inode
*inode
, struct file
*file
)
4758 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
4762 return single_open(file
, hist_show
, file
);
4765 const struct file_operations event_hist_fops
= {
4766 .open
= event_hist_open
,
4768 .llseek
= seq_lseek
,
4769 .release
= single_release
,
4772 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
4773 static void hist_field_debug_show_flags(struct seq_file
*m
,
4774 unsigned long flags
)
4776 seq_puts(m
, " flags:\n");
4778 if (flags
& HIST_FIELD_FL_KEY
)
4779 seq_puts(m
, " HIST_FIELD_FL_KEY\n");
4780 else if (flags
& HIST_FIELD_FL_HITCOUNT
)
4781 seq_puts(m
, " VAL: HIST_FIELD_FL_HITCOUNT\n");
4782 else if (flags
& HIST_FIELD_FL_VAR
)
4783 seq_puts(m
, " HIST_FIELD_FL_VAR\n");
4784 else if (flags
& HIST_FIELD_FL_VAR_REF
)
4785 seq_puts(m
, " HIST_FIELD_FL_VAR_REF\n");
4787 seq_puts(m
, " VAL: normal u64 value\n");
4789 if (flags
& HIST_FIELD_FL_ALIAS
)
4790 seq_puts(m
, " HIST_FIELD_FL_ALIAS\n");
4793 static int hist_field_debug_show(struct seq_file
*m
,
4794 struct hist_field
*field
, unsigned long flags
)
4796 if ((field
->flags
& flags
) != flags
) {
4797 seq_printf(m
, "ERROR: bad flags - %lx\n", flags
);
4801 hist_field_debug_show_flags(m
, field
->flags
);
4803 seq_printf(m
, " ftrace_event_field name: %s\n",
4804 field
->field
->name
);
4806 if (field
->flags
& HIST_FIELD_FL_VAR
) {
4807 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
4808 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
4812 if (field
->flags
& HIST_FIELD_FL_ALIAS
)
4813 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4814 field
->var_ref_idx
);
4816 if (field
->flags
& HIST_FIELD_FL_VAR_REF
) {
4817 seq_printf(m
, " name: %s\n", field
->name
);
4818 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
4820 seq_printf(m
, " var.hist_data: %p\n", field
->var
.hist_data
);
4821 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4822 field
->var_ref_idx
);
4824 seq_printf(m
, " system: %s\n", field
->system
);
4825 if (field
->event_name
)
4826 seq_printf(m
, " event_name: %s\n", field
->event_name
);
4829 seq_printf(m
, " type: %s\n", field
->type
);
4830 seq_printf(m
, " size: %u\n", field
->size
);
4831 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
4836 static int field_var_debug_show(struct seq_file
*m
,
4837 struct field_var
*field_var
, unsigned int i
,
4840 const char *vars_name
= save_vars
? "save_vars" : "field_vars";
4841 struct hist_field
*field
;
4844 seq_printf(m
, "\n hist_data->%s[%d]:\n", vars_name
, i
);
4846 field
= field_var
->var
;
4848 seq_printf(m
, "\n %s[%d].var:\n", vars_name
, i
);
4850 hist_field_debug_show_flags(m
, field
->flags
);
4851 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
4852 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
4855 field
= field_var
->val
;
4857 seq_printf(m
, "\n %s[%d].val:\n", vars_name
, i
);
4859 seq_printf(m
, " ftrace_event_field name: %s\n",
4860 field
->field
->name
);
4866 seq_printf(m
, " type: %s\n", field
->type
);
4867 seq_printf(m
, " size: %u\n", field
->size
);
4868 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
4873 static int hist_action_debug_show(struct seq_file
*m
,
4874 struct action_data
*data
, int i
)
4878 if (data
->handler
== HANDLER_ONMAX
||
4879 data
->handler
== HANDLER_ONCHANGE
) {
4880 seq_printf(m
, "\n hist_data->actions[%d].track_data.var_ref:\n", i
);
4881 ret
= hist_field_debug_show(m
, data
->track_data
.var_ref
,
4882 HIST_FIELD_FL_VAR_REF
);
4886 seq_printf(m
, "\n hist_data->actions[%d].track_data.track_var:\n", i
);
4887 ret
= hist_field_debug_show(m
, data
->track_data
.track_var
,
4893 if (data
->handler
== HANDLER_ONMATCH
) {
4894 seq_printf(m
, "\n hist_data->actions[%d].match_data.event_system: %s\n",
4895 i
, data
->match_data
.event_system
);
4896 seq_printf(m
, " hist_data->actions[%d].match_data.event: %s\n",
4897 i
, data
->match_data
.event
);
4903 static int hist_actions_debug_show(struct seq_file
*m
,
4904 struct hist_trigger_data
*hist_data
)
4908 if (hist_data
->n_actions
)
4909 seq_puts(m
, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
4911 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4912 struct action_data
*action
= hist_data
->actions
[i
];
4914 ret
= hist_action_debug_show(m
, action
, i
);
4919 if (hist_data
->n_save_vars
)
4920 seq_puts(m
, "\n save action variables (save() params):\n");
4922 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4923 ret
= field_var_debug_show(m
, hist_data
->save_vars
[i
], i
, true);
4931 static void hist_trigger_debug_show(struct seq_file
*m
,
4932 struct event_trigger_data
*data
, int n
)
4934 struct hist_trigger_data
*hist_data
;
4938 seq_puts(m
, "\n\n");
4940 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
4941 data
->ops
->print(m
, data
->ops
, data
);
4942 seq_puts(m
, "#\n\n");
4944 hist_data
= data
->private_data
;
4946 seq_printf(m
, "hist_data: %p\n\n", hist_data
);
4947 seq_printf(m
, " n_vals: %u\n", hist_data
->n_vals
);
4948 seq_printf(m
, " n_keys: %u\n", hist_data
->n_keys
);
4949 seq_printf(m
, " n_fields: %u\n", hist_data
->n_fields
);
4951 seq_puts(m
, "\n val fields:\n\n");
4953 seq_puts(m
, " hist_data->fields[0]:\n");
4954 ret
= hist_field_debug_show(m
, hist_data
->fields
[0],
4955 HIST_FIELD_FL_HITCOUNT
);
4959 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
4960 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
4961 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
], 0);
4966 seq_puts(m
, "\n key fields:\n");
4968 for (i
= hist_data
->n_vals
; i
< hist_data
->n_fields
; i
++) {
4969 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
4970 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
],
4976 if (hist_data
->n_var_refs
)
4977 seq_puts(m
, "\n variable reference fields:\n");
4979 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
4980 seq_printf(m
, "\n hist_data->var_refs[%d]:\n", i
);
4981 ret
= hist_field_debug_show(m
, hist_data
->var_refs
[i
],
4982 HIST_FIELD_FL_VAR_REF
);
4987 if (hist_data
->n_field_vars
)
4988 seq_puts(m
, "\n field variables:\n");
4990 for (i
= 0; i
< hist_data
->n_field_vars
; i
++) {
4991 ret
= field_var_debug_show(m
, hist_data
->field_vars
[i
], i
, false);
4996 ret
= hist_actions_debug_show(m
, hist_data
);
5001 static int hist_debug_show(struct seq_file
*m
, void *v
)
5003 struct event_trigger_data
*data
;
5004 struct trace_event_file
*event_file
;
5007 mutex_lock(&event_mutex
);
5009 event_file
= event_file_data(m
->private);
5010 if (unlikely(!event_file
)) {
5015 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5016 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5017 hist_trigger_debug_show(m
, data
, n
++);
5021 mutex_unlock(&event_mutex
);
5026 static int event_hist_debug_open(struct inode
*inode
, struct file
*file
)
5030 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
5034 return single_open(file
, hist_debug_show
, file
);
5037 const struct file_operations event_hist_debug_fops
= {
5038 .open
= event_hist_debug_open
,
5040 .llseek
= seq_lseek
,
5041 .release
= single_release
,
5045 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
5047 const char *field_name
= hist_field_name(hist_field
, 0);
5049 if (hist_field
->var
.name
)
5050 seq_printf(m
, "%s=", hist_field
->var
.name
);
5052 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
5054 else if (field_name
) {
5055 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
5056 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
5058 seq_printf(m
, "%s", field_name
);
5059 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
5060 seq_puts(m
, "common_timestamp");
5062 if (hist_field
->flags
) {
5063 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
5064 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
)) {
5065 const char *flags
= get_hist_field_flags(hist_field
);
5068 seq_printf(m
, ".%s", flags
);
5073 static int event_hist_trigger_print(struct seq_file
*m
,
5074 struct event_trigger_ops
*ops
,
5075 struct event_trigger_data
*data
)
5077 struct hist_trigger_data
*hist_data
= data
->private_data
;
5078 struct hist_field
*field
;
5079 bool have_var
= false;
5082 seq_puts(m
, "hist:");
5085 seq_printf(m
, "%s:", data
->name
);
5087 seq_puts(m
, "keys=");
5089 for_each_hist_key_field(i
, hist_data
) {
5090 field
= hist_data
->fields
[i
];
5092 if (i
> hist_data
->n_vals
)
5095 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5096 seq_puts(m
, "stacktrace");
5098 hist_field_print(m
, field
);
5101 seq_puts(m
, ":vals=");
5103 for_each_hist_val_field(i
, hist_data
) {
5104 field
= hist_data
->fields
[i
];
5105 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5110 if (i
== HITCOUNT_IDX
)
5111 seq_puts(m
, "hitcount");
5114 hist_field_print(m
, field
);
5123 for_each_hist_val_field(i
, hist_data
) {
5124 field
= hist_data
->fields
[i
];
5126 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5129 hist_field_print(m
, field
);
5134 seq_puts(m
, ":sort=");
5136 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5137 struct tracing_map_sort_key
*sort_key
;
5138 unsigned int idx
, first_key_idx
;
5141 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
5143 sort_key
= &hist_data
->sort_keys
[i
];
5144 idx
= sort_key
->field_idx
;
5146 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
5152 if (idx
== HITCOUNT_IDX
)
5153 seq_puts(m
, "hitcount");
5155 if (idx
>= first_key_idx
)
5156 idx
+= hist_data
->n_vars
;
5157 hist_field_print(m
, hist_data
->fields
[idx
]);
5160 if (sort_key
->descending
)
5161 seq_puts(m
, ".descending");
5163 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
5164 if (hist_data
->enable_timestamps
)
5165 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
5167 print_actions_spec(m
, hist_data
);
5169 if (data
->filter_str
)
5170 seq_printf(m
, " if %s", data
->filter_str
);
5173 seq_puts(m
, " [paused]");
5175 seq_puts(m
, " [active]");
5182 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
5183 struct event_trigger_data
*data
)
5185 struct hist_trigger_data
*hist_data
= data
->private_data
;
5187 if (!data
->ref
&& hist_data
->attrs
->name
)
5188 save_named_trigger(hist_data
->attrs
->name
, data
);
5195 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
5197 struct trace_event_file
*file
;
5202 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5203 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
5204 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
5205 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
5206 "!hist", "hist", cmd
);
5210 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
5211 struct event_trigger_data
*data
)
5213 struct hist_trigger_data
*hist_data
= data
->private_data
;
5215 if (WARN_ON_ONCE(data
->ref
<= 0))
5221 del_named_trigger(data
);
5223 trigger_data_free(data
);
5225 remove_hist_vars(hist_data
);
5227 unregister_field_var_hists(hist_data
);
5229 destroy_hist_data(hist_data
);
5233 static struct event_trigger_ops event_hist_trigger_ops
= {
5234 .func
= event_hist_trigger
,
5235 .print
= event_hist_trigger_print
,
5236 .init
= event_hist_trigger_init
,
5237 .free
= event_hist_trigger_free
,
5240 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
5241 struct event_trigger_data
*data
)
5245 save_named_trigger(data
->named_data
->name
, data
);
5247 event_hist_trigger_init(ops
, data
->named_data
);
5252 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
5253 struct event_trigger_data
*data
)
5255 if (WARN_ON_ONCE(data
->ref
<= 0))
5258 event_hist_trigger_free(ops
, data
->named_data
);
5262 del_named_trigger(data
);
5263 trigger_data_free(data
);
5267 static struct event_trigger_ops event_hist_trigger_named_ops
= {
5268 .func
= event_hist_trigger
,
5269 .print
= event_hist_trigger_print
,
5270 .init
= event_hist_trigger_named_init
,
5271 .free
= event_hist_trigger_named_free
,
5274 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
5277 return &event_hist_trigger_ops
;
5280 static void hist_clear(struct event_trigger_data
*data
)
5282 struct hist_trigger_data
*hist_data
= data
->private_data
;
5285 pause_named_trigger(data
);
5287 tracepoint_synchronize_unregister();
5289 tracing_map_clear(hist_data
->map
);
5292 unpause_named_trigger(data
);
5295 static bool compatible_field(struct ftrace_event_field
*field
,
5296 struct ftrace_event_field
*test_field
)
5298 if (field
== test_field
)
5300 if (field
== NULL
|| test_field
== NULL
)
5302 if (strcmp(field
->name
, test_field
->name
) != 0)
5304 if (strcmp(field
->type
, test_field
->type
) != 0)
5306 if (field
->size
!= test_field
->size
)
5308 if (field
->is_signed
!= test_field
->is_signed
)
5314 static bool hist_trigger_match(struct event_trigger_data
*data
,
5315 struct event_trigger_data
*data_test
,
5316 struct event_trigger_data
*named_data
,
5319 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
5320 struct hist_trigger_data
*hist_data
, *hist_data_test
;
5321 struct hist_field
*key_field
, *key_field_test
;
5324 if (named_data
&& (named_data
!= data_test
) &&
5325 (named_data
!= data_test
->named_data
))
5328 if (!named_data
&& is_named_trigger(data_test
))
5331 hist_data
= data
->private_data
;
5332 hist_data_test
= data_test
->private_data
;
5334 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
5335 hist_data
->n_fields
!= hist_data_test
->n_fields
||
5336 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
5339 if (!ignore_filter
) {
5340 if ((data
->filter_str
&& !data_test
->filter_str
) ||
5341 (!data
->filter_str
&& data_test
->filter_str
))
5345 for_each_hist_field(i
, hist_data
) {
5346 key_field
= hist_data
->fields
[i
];
5347 key_field_test
= hist_data_test
->fields
[i
];
5349 if (key_field
->flags
!= key_field_test
->flags
)
5351 if (!compatible_field(key_field
->field
, key_field_test
->field
))
5353 if (key_field
->offset
!= key_field_test
->offset
)
5355 if (key_field
->size
!= key_field_test
->size
)
5357 if (key_field
->is_signed
!= key_field_test
->is_signed
)
5359 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
5361 if (key_field
->var
.name
&&
5362 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
5366 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5367 sort_key
= &hist_data
->sort_keys
[i
];
5368 sort_key_test
= &hist_data_test
->sort_keys
[i
];
5370 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
5371 sort_key
->descending
!= sort_key_test
->descending
)
5375 if (!ignore_filter
&& data
->filter_str
&&
5376 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
5379 if (!actions_match(hist_data
, hist_data_test
))
5385 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
5386 struct event_trigger_data
*data
,
5387 struct trace_event_file
*file
)
5389 struct hist_trigger_data
*hist_data
= data
->private_data
;
5390 struct event_trigger_data
*test
, *named_data
= NULL
;
5391 struct trace_array
*tr
= file
->tr
;
5394 if (hist_data
->attrs
->name
) {
5395 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5397 if (!hist_trigger_match(data
, named_data
, named_data
,
5399 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
5406 if (hist_data
->attrs
->name
&& !named_data
)
5409 lockdep_assert_held(&event_mutex
);
5411 list_for_each_entry(test
, &file
->triggers
, list
) {
5412 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5413 if (!hist_trigger_match(data
, test
, named_data
, false))
5415 if (hist_data
->attrs
->pause
)
5416 test
->paused
= true;
5417 else if (hist_data
->attrs
->cont
)
5418 test
->paused
= false;
5419 else if (hist_data
->attrs
->clear
)
5422 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
5429 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
5430 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
5435 if (hist_data
->attrs
->pause
)
5436 data
->paused
= true;
5439 data
->private_data
= named_data
->private_data
;
5440 set_named_trigger_data(data
, named_data
);
5441 data
->ops
= &event_hist_trigger_named_ops
;
5444 if (data
->ops
->init
) {
5445 ret
= data
->ops
->init(data
->ops
, data
);
5450 if (hist_data
->enable_timestamps
) {
5451 char *clock
= hist_data
->attrs
->clock
;
5453 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
5455 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
5459 tracing_set_time_stamp_abs(file
->tr
, true);
5463 destroy_hist_data(hist_data
);
5470 static int hist_trigger_enable(struct event_trigger_data
*data
,
5471 struct trace_event_file
*file
)
5475 list_add_tail_rcu(&data
->list
, &file
->triggers
);
5477 update_cond_flag(file
);
5479 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
5480 list_del_rcu(&data
->list
);
5481 update_cond_flag(file
);
5488 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
5489 struct trace_event_file
*file
)
5491 struct hist_trigger_data
*hist_data
= data
->private_data
;
5492 struct event_trigger_data
*test
, *named_data
= NULL
;
5495 lockdep_assert_held(&event_mutex
);
5497 if (hist_data
->attrs
->name
)
5498 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5500 list_for_each_entry(test
, &file
->triggers
, list
) {
5501 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5502 if (hist_trigger_match(data
, test
, named_data
, false)) {
5512 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
5513 struct trace_event_file
*file
)
5515 struct hist_trigger_data
*hist_data
= data
->private_data
;
5516 struct event_trigger_data
*test
, *named_data
= NULL
;
5518 lockdep_assert_held(&event_mutex
);
5520 if (hist_data
->attrs
->name
)
5521 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5523 list_for_each_entry(test
, &file
->triggers
, list
) {
5524 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5525 if (!hist_trigger_match(data
, test
, named_data
, false))
5527 hist_data
= test
->private_data
;
5528 if (check_var_refs(hist_data
))
5537 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
5538 struct event_trigger_data
*data
,
5539 struct trace_event_file
*file
)
5541 struct hist_trigger_data
*hist_data
= data
->private_data
;
5542 struct event_trigger_data
*test
, *named_data
= NULL
;
5543 bool unregistered
= false;
5545 lockdep_assert_held(&event_mutex
);
5547 if (hist_data
->attrs
->name
)
5548 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5550 list_for_each_entry(test
, &file
->triggers
, list
) {
5551 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5552 if (!hist_trigger_match(data
, test
, named_data
, false))
5554 unregistered
= true;
5555 list_del_rcu(&test
->list
);
5556 trace_event_trigger_enable_disable(file
, 0);
5557 update_cond_flag(file
);
5562 if (unregistered
&& test
->ops
->free
)
5563 test
->ops
->free(test
->ops
, test
);
5565 if (hist_data
->enable_timestamps
) {
5566 if (!hist_data
->remove
|| unregistered
)
5567 tracing_set_time_stamp_abs(file
->tr
, false);
5571 static bool hist_file_check_refs(struct trace_event_file
*file
)
5573 struct hist_trigger_data
*hist_data
;
5574 struct event_trigger_data
*test
;
5576 lockdep_assert_held(&event_mutex
);
5578 list_for_each_entry(test
, &file
->triggers
, list
) {
5579 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5580 hist_data
= test
->private_data
;
5581 if (check_var_refs(hist_data
))
5589 static void hist_unreg_all(struct trace_event_file
*file
)
5591 struct event_trigger_data
*test
, *n
;
5592 struct hist_trigger_data
*hist_data
;
5593 struct synth_event
*se
;
5594 const char *se_name
;
5596 lockdep_assert_held(&event_mutex
);
5598 if (hist_file_check_refs(file
))
5601 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
5602 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5603 hist_data
= test
->private_data
;
5604 list_del_rcu(&test
->list
);
5605 trace_event_trigger_enable_disable(file
, 0);
5607 se_name
= trace_event_name(file
->event_call
);
5608 se
= find_synth_event(se_name
);
5612 update_cond_flag(file
);
5613 if (hist_data
->enable_timestamps
)
5614 tracing_set_time_stamp_abs(file
->tr
, false);
5615 if (test
->ops
->free
)
5616 test
->ops
->free(test
->ops
, test
);
5621 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
5622 struct trace_event_file
*file
,
5623 char *glob
, char *cmd
, char *param
)
5625 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
5626 struct event_trigger_data
*trigger_data
;
5627 struct hist_trigger_attrs
*attrs
;
5628 struct event_trigger_ops
*trigger_ops
;
5629 struct hist_trigger_data
*hist_data
;
5630 struct synth_event
*se
;
5631 const char *se_name
;
5632 bool remove
= false;
5636 lockdep_assert_held(&event_mutex
);
5638 if (glob
&& strlen(glob
)) {
5640 last_cmd_set(file
, param
);
5650 * separate the trigger from the filter (k:v [if filter])
5651 * allowing for whitespace in the trigger
5653 p
= trigger
= param
;
5655 p
= strstr(p
, "if");
5660 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
5664 if (p
>= param
+ strlen(param
) - (sizeof("if") - 1) - 1)
5666 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
5677 param
= strstrip(p
);
5678 trigger
= strstrip(trigger
);
5681 attrs
= parse_hist_trigger_attrs(file
->tr
, trigger
);
5683 return PTR_ERR(attrs
);
5685 if (attrs
->map_bits
)
5686 hist_trigger_bits
= attrs
->map_bits
;
5688 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
5689 if (IS_ERR(hist_data
)) {
5690 destroy_hist_trigger_attrs(attrs
);
5691 return PTR_ERR(hist_data
);
5694 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
5696 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
5697 if (!trigger_data
) {
5702 trigger_data
->count
= -1;
5703 trigger_data
->ops
= trigger_ops
;
5704 trigger_data
->cmd_ops
= cmd_ops
;
5706 INIT_LIST_HEAD(&trigger_data
->list
);
5707 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
5709 trigger_data
->private_data
= hist_data
;
5711 /* if param is non-empty, it's supposed to be a filter */
5712 if (param
&& cmd_ops
->set_filter
) {
5713 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
5719 if (!have_hist_trigger_match(trigger_data
, file
))
5722 if (hist_trigger_check_refs(trigger_data
, file
)) {
5727 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5728 se_name
= trace_event_name(file
->event_call
);
5729 se
= find_synth_event(se_name
);
5736 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
5738 * The above returns on success the # of triggers registered,
5739 * but if it didn't register any it returns zero. Consider no
5740 * triggers registered a failure too.
5743 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
5749 if (get_named_trigger_data(trigger_data
))
5752 if (has_hist_vars(hist_data
))
5753 save_hist_vars(hist_data
);
5755 ret
= create_actions(hist_data
);
5759 ret
= tracing_map_init(hist_data
->map
);
5763 ret
= hist_trigger_enable(trigger_data
, file
);
5767 se_name
= trace_event_name(file
->event_call
);
5768 se
= find_synth_event(se_name
);
5771 /* Just return zero, not the number of registered triggers */
5779 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5781 if (cmd_ops
->set_filter
)
5782 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
5784 remove_hist_vars(hist_data
);
5786 kfree(trigger_data
);
5788 destroy_hist_data(hist_data
);
5792 static struct event_command trigger_hist_cmd
= {
5794 .trigger_type
= ETT_EVENT_HIST
,
5795 .flags
= EVENT_CMD_FL_NEEDS_REC
,
5796 .func
= event_hist_trigger_func
,
5797 .reg
= hist_register_trigger
,
5798 .unreg
= hist_unregister_trigger
,
5799 .unreg_all
= hist_unreg_all
,
5800 .get_trigger_ops
= event_hist_get_trigger_ops
,
5801 .set_filter
= set_trigger_filter
,
5804 __init
int register_trigger_hist_cmd(void)
5808 ret
= register_event_command(&trigger_hist_cmd
);
5815 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
,
5816 struct ring_buffer_event
*event
)
5818 struct enable_trigger_data
*enable_data
= data
->private_data
;
5819 struct event_trigger_data
*test
;
5821 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
5822 lockdep_is_held(&event_mutex
)) {
5823 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5824 if (enable_data
->enable
)
5825 test
->paused
= false;
5827 test
->paused
= true;
5833 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
5834 struct ring_buffer_event
*event
)
5839 if (data
->count
!= -1)
5842 hist_enable_trigger(data
, rec
, event
);
5845 static struct event_trigger_ops hist_enable_trigger_ops
= {
5846 .func
= hist_enable_trigger
,
5847 .print
= event_enable_trigger_print
,
5848 .init
= event_trigger_init
,
5849 .free
= event_enable_trigger_free
,
5852 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
5853 .func
= hist_enable_count_trigger
,
5854 .print
= event_enable_trigger_print
,
5855 .init
= event_trigger_init
,
5856 .free
= event_enable_trigger_free
,
5859 static struct event_trigger_ops hist_disable_trigger_ops
= {
5860 .func
= hist_enable_trigger
,
5861 .print
= event_enable_trigger_print
,
5862 .init
= event_trigger_init
,
5863 .free
= event_enable_trigger_free
,
5866 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
5867 .func
= hist_enable_count_trigger
,
5868 .print
= event_enable_trigger_print
,
5869 .init
= event_trigger_init
,
5870 .free
= event_enable_trigger_free
,
5873 static struct event_trigger_ops
*
5874 hist_enable_get_trigger_ops(char *cmd
, char *param
)
5876 struct event_trigger_ops
*ops
;
5879 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
5882 ops
= param
? &hist_enable_count_trigger_ops
:
5883 &hist_enable_trigger_ops
;
5885 ops
= param
? &hist_disable_count_trigger_ops
:
5886 &hist_disable_trigger_ops
;
5891 static void hist_enable_unreg_all(struct trace_event_file
*file
)
5893 struct event_trigger_data
*test
, *n
;
5895 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
5896 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
5897 list_del_rcu(&test
->list
);
5898 update_cond_flag(file
);
5899 trace_event_trigger_enable_disable(file
, 0);
5900 if (test
->ops
->free
)
5901 test
->ops
->free(test
->ops
, test
);
5906 static struct event_command trigger_hist_enable_cmd
= {
5907 .name
= ENABLE_HIST_STR
,
5908 .trigger_type
= ETT_HIST_ENABLE
,
5909 .func
= event_enable_trigger_func
,
5910 .reg
= event_enable_register_trigger
,
5911 .unreg
= event_enable_unregister_trigger
,
5912 .unreg_all
= hist_enable_unreg_all
,
5913 .get_trigger_ops
= hist_enable_get_trigger_ops
,
5914 .set_filter
= set_trigger_filter
,
5917 static struct event_command trigger_hist_disable_cmd
= {
5918 .name
= DISABLE_HIST_STR
,
5919 .trigger_type
= ETT_HIST_ENABLE
,
5920 .func
= event_enable_trigger_func
,
5921 .reg
= event_enable_register_trigger
,
5922 .unreg
= event_enable_unregister_trigger
,
5923 .unreg_all
= hist_enable_unreg_all
,
5924 .get_trigger_ops
= hist_enable_get_trigger_ops
,
5925 .set_filter
= set_trigger_filter
,
5928 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
5930 unregister_event_command(&trigger_hist_enable_cmd
);
5931 unregister_event_command(&trigger_hist_disable_cmd
);
5934 __init
int register_trigger_hist_enable_disable_cmds(void)
5938 ret
= register_event_command(&trigger_hist_enable_cmd
);
5939 if (WARN_ON(ret
< 0))
5941 ret
= register_event_command(&trigger_hist_disable_cmd
);
5942 if (WARN_ON(ret
< 0))
5943 unregister_trigger_hist_enable_disable_cmds();