1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
23 #include "trace_dynevent.h"
25 #define SYNTH_SYSTEM "synthetic"
26 #define SYNTH_FIELDS_MAX 32
28 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
31 C(NONE, "No error"), \
32 C(DUPLICATE_VAR, "Variable already defined"), \
33 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 C(TOO_MANY_VARS, "Too many variables defined"), \
35 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
36 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
38 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
39 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
40 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
41 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
42 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
43 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
44 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
45 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
46 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
47 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
48 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
49 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
50 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
51 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
52 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
53 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
54 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
55 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
56 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
57 C(TOO_MANY_PARAMS, "Too many action params"), \
58 C(PARAM_NOT_FOUND, "Couldn't find param"), \
59 C(INVALID_PARAM, "Invalid action param"), \
60 C(ACTION_NOT_FOUND, "No action found"), \
61 C(NO_SAVE_PARAMS, "No params found for save()"), \
62 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 C(ACTION_MISMATCH, "Handler doesn't support action"), \
64 C(NO_CLOSING_PAREN, "No closing paren found"), \
65 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
66 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
67 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
68 C(VAR_NOT_FOUND, "Couldn't find variable"), \
69 C(FIELD_NOT_FOUND, "Couldn't find field"), \
70 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
71 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
72 C(EMPTY_SORT_FIELD, "Empty sort field"), \
73 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
74 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"),
77 #define C(a, b) HIST_ERR_##a
84 static const char *err_text
[] = { ERRORS
};
88 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
89 struct tracing_map_elt
*elt
,
90 struct ring_buffer_event
*rbe
,
93 #define HIST_FIELD_OPERANDS_MAX 2
94 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX 8
101 FIELD_OP_UNARY_MINUS
,
105 * A hist_var (histogram variable) contains variable information for
106 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
107 * flag set. A hist_var has a variable name e.g. ts0, and is
108 * associated with a given histogram trigger, as specified by
109 * hist_data. The hist_var idx is the unique index assigned to the
110 * variable by the hist trigger's tracing_map. The idx is what is
111 * used to set a variable's value and, by a variable reference, to
116 struct hist_trigger_data
*hist_data
;
121 struct ftrace_event_field
*field
;
127 unsigned int is_signed
;
129 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
130 struct hist_trigger_data
*hist_data
;
133 * Variable fields contain variable-specific info in var.
136 enum field_op_id
operator;
141 * The name field is used for EXPR and VAR_REF fields. VAR
142 * fields contain the variable name in var.name.
147 * When a histogram trigger is hit, if it has any references
148 * to variables, the values of those variables are collected
149 * into a var_ref_vals array by resolve_var_refs(). The
150 * current value of each variable is read from the tracing_map
151 * using the hist field's hist_var.idx and entered into the
152 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
154 unsigned int var_ref_idx
;
158 static u64
hist_field_none(struct hist_field
*field
,
159 struct tracing_map_elt
*elt
,
160 struct ring_buffer_event
*rbe
,
166 static u64
hist_field_counter(struct hist_field
*field
,
167 struct tracing_map_elt
*elt
,
168 struct ring_buffer_event
*rbe
,
174 static u64
hist_field_string(struct hist_field
*hist_field
,
175 struct tracing_map_elt
*elt
,
176 struct ring_buffer_event
*rbe
,
179 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
181 return (u64
)(unsigned long)addr
;
184 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
185 struct tracing_map_elt
*elt
,
186 struct ring_buffer_event
*rbe
,
189 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
190 int str_loc
= str_item
& 0xffff;
191 char *addr
= (char *)(event
+ str_loc
);
193 return (u64
)(unsigned long)addr
;
196 static u64
hist_field_pstring(struct hist_field
*hist_field
,
197 struct tracing_map_elt
*elt
,
198 struct ring_buffer_event
*rbe
,
201 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
203 return (u64
)(unsigned long)*addr
;
206 static u64
hist_field_log2(struct hist_field
*hist_field
,
207 struct tracing_map_elt
*elt
,
208 struct ring_buffer_event
*rbe
,
211 struct hist_field
*operand
= hist_field
->operands
[0];
213 u64 val
= operand
->fn(operand
, elt
, rbe
, event
);
215 return (u64
) ilog2(roundup_pow_of_two(val
));
218 static u64
hist_field_plus(struct hist_field
*hist_field
,
219 struct tracing_map_elt
*elt
,
220 struct ring_buffer_event
*rbe
,
223 struct hist_field
*operand1
= hist_field
->operands
[0];
224 struct hist_field
*operand2
= hist_field
->operands
[1];
226 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
227 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
232 static u64
hist_field_minus(struct hist_field
*hist_field
,
233 struct tracing_map_elt
*elt
,
234 struct ring_buffer_event
*rbe
,
237 struct hist_field
*operand1
= hist_field
->operands
[0];
238 struct hist_field
*operand2
= hist_field
->operands
[1];
240 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
241 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
246 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
247 struct tracing_map_elt
*elt
,
248 struct ring_buffer_event
*rbe
,
251 struct hist_field
*operand
= hist_field
->operands
[0];
253 s64 sval
= (s64
)operand
->fn(operand
, elt
, rbe
, event
);
254 u64 val
= (u64
)-sval
;
259 #define DEFINE_HIST_FIELD_FN(type) \
260 static u64 hist_field_##type(struct hist_field *hist_field, \
261 struct tracing_map_elt *elt, \
262 struct ring_buffer_event *rbe, \
265 type *addr = (type *)(event + hist_field->field->offset); \
267 return (u64)(unsigned long)*addr; \
270 DEFINE_HIST_FIELD_FN(s64
);
271 DEFINE_HIST_FIELD_FN(u64
);
272 DEFINE_HIST_FIELD_FN(s32
);
273 DEFINE_HIST_FIELD_FN(u32
);
274 DEFINE_HIST_FIELD_FN(s16
);
275 DEFINE_HIST_FIELD_FN(u16
);
276 DEFINE_HIST_FIELD_FN(s8
);
277 DEFINE_HIST_FIELD_FN(u8
);
279 #define for_each_hist_field(i, hist_data) \
280 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
282 #define for_each_hist_val_field(i, hist_data) \
283 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
285 #define for_each_hist_key_field(i, hist_data) \
286 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
288 #define HIST_STACKTRACE_DEPTH 16
289 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
290 #define HIST_STACKTRACE_SKIP 5
292 #define HITCOUNT_IDX 0
293 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
295 enum hist_field_flags
{
296 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
297 HIST_FIELD_FL_KEY
= 1 << 1,
298 HIST_FIELD_FL_STRING
= 1 << 2,
299 HIST_FIELD_FL_HEX
= 1 << 3,
300 HIST_FIELD_FL_SYM
= 1 << 4,
301 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
302 HIST_FIELD_FL_EXECNAME
= 1 << 6,
303 HIST_FIELD_FL_SYSCALL
= 1 << 7,
304 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
305 HIST_FIELD_FL_LOG2
= 1 << 9,
306 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
307 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
308 HIST_FIELD_FL_VAR
= 1 << 12,
309 HIST_FIELD_FL_EXPR
= 1 << 13,
310 HIST_FIELD_FL_VAR_REF
= 1 << 14,
311 HIST_FIELD_FL_CPU
= 1 << 15,
312 HIST_FIELD_FL_ALIAS
= 1 << 16,
317 char *name
[TRACING_MAP_VARS_MAX
];
318 char *expr
[TRACING_MAP_VARS_MAX
];
321 struct hist_trigger_attrs
{
331 unsigned int map_bits
;
333 char *assignment_str
[TRACING_MAP_VARS_MAX
];
334 unsigned int n_assignments
;
336 char *action_str
[HIST_ACTIONS_MAX
];
337 unsigned int n_actions
;
339 struct var_defs var_defs
;
343 struct hist_field
*var
;
344 struct hist_field
*val
;
347 struct field_var_hist
{
348 struct hist_trigger_data
*hist_data
;
352 struct hist_trigger_data
{
353 struct hist_field
*fields
[HIST_FIELDS_MAX
];
356 unsigned int n_fields
;
358 unsigned int key_size
;
359 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
360 unsigned int n_sort_keys
;
361 struct trace_event_file
*event_file
;
362 struct hist_trigger_attrs
*attrs
;
363 struct tracing_map
*map
;
364 bool enable_timestamps
;
366 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
367 unsigned int n_var_refs
;
369 struct action_data
*actions
[HIST_ACTIONS_MAX
];
370 unsigned int n_actions
;
372 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
373 unsigned int n_field_vars
;
374 unsigned int n_field_var_str
;
375 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
376 unsigned int n_field_var_hists
;
378 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
379 unsigned int n_save_vars
;
380 unsigned int n_save_var_str
;
383 static int create_synth_event(int argc
, const char **argv
);
384 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
);
385 static int synth_event_release(struct dyn_event
*ev
);
386 static bool synth_event_is_busy(struct dyn_event
*ev
);
387 static bool synth_event_match(const char *system
, const char *event
,
388 int argc
, const char **argv
, struct dyn_event
*ev
);
390 static struct dyn_event_operations synth_event_ops
= {
391 .create
= create_synth_event
,
392 .show
= synth_event_show
,
393 .is_busy
= synth_event_is_busy
,
394 .free
= synth_event_release
,
395 .match
= synth_event_match
,
408 struct dyn_event devent
;
411 struct synth_field
**fields
;
412 unsigned int n_fields
;
414 struct trace_event_class
class;
415 struct trace_event_call call
;
416 struct tracepoint
*tp
;
420 static bool is_synth_event(struct dyn_event
*ev
)
422 return ev
->ops
== &synth_event_ops
;
425 static struct synth_event
*to_synth_event(struct dyn_event
*ev
)
427 return container_of(ev
, struct synth_event
, devent
);
430 static bool synth_event_is_busy(struct dyn_event
*ev
)
432 struct synth_event
*event
= to_synth_event(ev
);
434 return event
->ref
!= 0;
437 static bool synth_event_match(const char *system
, const char *event
,
438 int argc
, const char **argv
, struct dyn_event
*ev
)
440 struct synth_event
*sev
= to_synth_event(ev
);
442 return strcmp(sev
->name
, event
) == 0 &&
443 (!system
|| strcmp(system
, SYNTH_SYSTEM
) == 0);
448 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
449 struct tracing_map_elt
*elt
, void *rec
,
450 struct ring_buffer_event
*rbe
, void *key
,
451 struct action_data
*data
, u64
*var_ref_vals
);
453 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
468 enum handler_id handler
;
469 enum action_id action
;
473 unsigned int n_params
;
474 char *params
[SYNTH_FIELDS_MAX
];
477 * When a histogram trigger is hit, the values of any
478 * references to variables, including variables being passed
479 * as parameters to synthetic events, are collected into a
480 * var_ref_vals array. This var_ref_idx array is an array of
481 * indices into the var_ref_vals array, one for each synthetic
482 * event param, and is passed to the synthetic event
485 unsigned int var_ref_idx
[TRACING_MAP_VARS_MAX
];
486 struct synth_event
*synth_event
;
487 bool use_trace_keyword
;
488 char *synth_event_name
;
498 * var_str contains the $-unstripped variable
499 * name referenced by var_ref, and used when
500 * printing the action. Because var_ref
501 * creation is deferred to create_actions(),
502 * we need a per-action way to save it until
503 * then, thus var_str.
508 * var_ref refers to the variable being
509 * tracked e.g onmax($var).
511 struct hist_field
*var_ref
;
514 * track_var contains the 'invisible' tracking
515 * variable created to keep the current
518 struct hist_field
*track_var
;
520 check_track_val_fn_t check_val
;
521 action_fn_t save_data
;
530 unsigned int key_len
;
532 struct tracing_map_elt elt
;
534 struct action_data
*action_data
;
535 struct hist_trigger_data
*hist_data
;
538 struct hist_elt_data
{
541 char *field_var_str
[SYNTH_FIELDS_MAX
];
544 struct snapshot_context
{
545 struct tracing_map_elt
*elt
;
549 static void track_data_free(struct track_data
*track_data
)
551 struct hist_elt_data
*elt_data
;
556 kfree(track_data
->key
);
558 elt_data
= track_data
->elt
.private_data
;
560 kfree(elt_data
->comm
);
567 static struct track_data
*track_data_alloc(unsigned int key_len
,
568 struct action_data
*action_data
,
569 struct hist_trigger_data
*hist_data
)
571 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
572 struct hist_elt_data
*elt_data
;
575 return ERR_PTR(-ENOMEM
);
577 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
579 track_data_free(data
);
580 return ERR_PTR(-ENOMEM
);
583 data
->key_len
= key_len
;
584 data
->action_data
= action_data
;
585 data
->hist_data
= hist_data
;
587 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
589 track_data_free(data
);
590 return ERR_PTR(-ENOMEM
);
592 data
->elt
.private_data
= elt_data
;
594 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
595 if (!elt_data
->comm
) {
596 track_data_free(data
);
597 return ERR_PTR(-ENOMEM
);
603 static char last_cmd
[MAX_FILTER_STR_VAL
];
604 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
606 static int errpos(char *str
)
608 return err_pos(last_cmd
, str
);
611 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
613 const char *system
= NULL
, *name
= NULL
;
614 struct trace_event_call
*call
;
619 strcpy(last_cmd
, "hist:");
620 strncat(last_cmd
, str
, MAX_FILTER_STR_VAL
- 1 - sizeof("hist:"));
623 call
= file
->event_call
;
625 system
= call
->class->system
;
627 name
= trace_event_name(call
);
634 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, "hist:%s:%s", system
, name
);
637 static void hist_err(struct trace_array
*tr
, u8 err_type
, u8 err_pos
)
639 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
643 static void hist_err_clear(void)
646 last_cmd_loc
[0] = '\0';
649 struct synth_trace_event
{
650 struct trace_entry ent
;
654 static int synth_event_define_fields(struct trace_event_call
*call
)
656 struct synth_trace_event trace
;
657 int offset
= offsetof(typeof(trace
), fields
);
658 struct synth_event
*event
= call
->data
;
659 unsigned int i
, size
, n_u64
;
664 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
665 size
= event
->fields
[i
]->size
;
666 is_signed
= event
->fields
[i
]->is_signed
;
667 type
= event
->fields
[i
]->type
;
668 name
= event
->fields
[i
]->name
;
669 ret
= trace_define_field(call
, type
, name
, offset
, size
,
670 is_signed
, FILTER_OTHER
);
674 event
->fields
[i
]->offset
= n_u64
;
676 if (event
->fields
[i
]->is_string
) {
677 offset
+= STR_VAR_LEN_MAX
;
678 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
680 offset
+= sizeof(u64
);
685 event
->n_u64
= n_u64
;
690 static bool synth_field_signed(char *type
)
692 if (str_has_prefix(type
, "u"))
694 if (strcmp(type
, "gfp_t") == 0)
700 static int synth_field_is_string(char *type
)
702 if (strstr(type
, "char[") != NULL
)
708 static int synth_field_string_size(char *type
)
710 char buf
[4], *end
, *start
;
714 start
= strstr(type
, "char[");
717 start
+= sizeof("char[") - 1;
719 end
= strchr(type
, ']');
720 if (!end
|| end
< start
)
727 strncpy(buf
, start
, len
);
730 err
= kstrtouint(buf
, 0, &size
);
734 if (size
> STR_VAR_LEN_MAX
)
740 static int synth_field_size(char *type
)
744 if (strcmp(type
, "s64") == 0)
746 else if (strcmp(type
, "u64") == 0)
748 else if (strcmp(type
, "s32") == 0)
750 else if (strcmp(type
, "u32") == 0)
752 else if (strcmp(type
, "s16") == 0)
754 else if (strcmp(type
, "u16") == 0)
756 else if (strcmp(type
, "s8") == 0)
758 else if (strcmp(type
, "u8") == 0)
760 else if (strcmp(type
, "char") == 0)
762 else if (strcmp(type
, "unsigned char") == 0)
763 size
= sizeof(unsigned char);
764 else if (strcmp(type
, "int") == 0)
766 else if (strcmp(type
, "unsigned int") == 0)
767 size
= sizeof(unsigned int);
768 else if (strcmp(type
, "long") == 0)
770 else if (strcmp(type
, "unsigned long") == 0)
771 size
= sizeof(unsigned long);
772 else if (strcmp(type
, "pid_t") == 0)
773 size
= sizeof(pid_t
);
774 else if (strcmp(type
, "gfp_t") == 0)
775 size
= sizeof(gfp_t
);
776 else if (synth_field_is_string(type
))
777 size
= synth_field_string_size(type
);
782 static const char *synth_field_fmt(char *type
)
784 const char *fmt
= "%llu";
786 if (strcmp(type
, "s64") == 0)
788 else if (strcmp(type
, "u64") == 0)
790 else if (strcmp(type
, "s32") == 0)
792 else if (strcmp(type
, "u32") == 0)
794 else if (strcmp(type
, "s16") == 0)
796 else if (strcmp(type
, "u16") == 0)
798 else if (strcmp(type
, "s8") == 0)
800 else if (strcmp(type
, "u8") == 0)
802 else if (strcmp(type
, "char") == 0)
804 else if (strcmp(type
, "unsigned char") == 0)
806 else if (strcmp(type
, "int") == 0)
808 else if (strcmp(type
, "unsigned int") == 0)
810 else if (strcmp(type
, "long") == 0)
812 else if (strcmp(type
, "unsigned long") == 0)
814 else if (strcmp(type
, "pid_t") == 0)
816 else if (strcmp(type
, "gfp_t") == 0)
818 else if (synth_field_is_string(type
))
824 static enum print_line_t
print_synth_event(struct trace_iterator
*iter
,
826 struct trace_event
*event
)
828 struct trace_array
*tr
= iter
->tr
;
829 struct trace_seq
*s
= &iter
->seq
;
830 struct synth_trace_event
*entry
;
831 struct synth_event
*se
;
832 unsigned int i
, n_u64
;
836 entry
= (struct synth_trace_event
*)iter
->ent
;
837 se
= container_of(event
, struct synth_event
, call
.event
);
839 trace_seq_printf(s
, "%s: ", se
->name
);
841 for (i
= 0, n_u64
= 0; i
< se
->n_fields
; i
++) {
842 if (trace_seq_has_overflowed(s
))
845 fmt
= synth_field_fmt(se
->fields
[i
]->type
);
847 /* parameter types */
848 if (tr
&& tr
->trace_flags
& TRACE_ITER_VERBOSE
)
849 trace_seq_printf(s
, "%s ", fmt
);
851 snprintf(print_fmt
, sizeof(print_fmt
), "%%s=%s%%s", fmt
);
853 /* parameter values */
854 if (se
->fields
[i
]->is_string
) {
855 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
856 (char *)&entry
->fields
[n_u64
],
857 i
== se
->n_fields
- 1 ? "" : " ");
858 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
860 struct trace_print_flags __flags
[] = {
861 __def_gfpflag_names
, {-1, NULL
} };
863 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
864 entry
->fields
[n_u64
],
865 i
== se
->n_fields
- 1 ? "" : " ");
867 if (strcmp(se
->fields
[i
]->type
, "gfp_t") == 0) {
868 trace_seq_puts(s
, " (");
869 trace_print_flags_seq(s
, "|",
870 entry
->fields
[n_u64
],
872 trace_seq_putc(s
, ')');
878 trace_seq_putc(s
, '\n');
880 return trace_handle_return(s
);
883 static struct trace_event_functions synth_event_funcs
= {
884 .trace
= print_synth_event
887 static notrace
void trace_event_raw_event_synth(void *__data
,
889 unsigned int *var_ref_idx
)
891 struct trace_event_file
*trace_file
= __data
;
892 struct synth_trace_event
*entry
;
893 struct trace_event_buffer fbuffer
;
894 struct trace_buffer
*buffer
;
895 struct synth_event
*event
;
896 unsigned int i
, n_u64
, val_idx
;
899 event
= trace_file
->event_call
->data
;
901 if (trace_trigger_soft_disabled(trace_file
))
904 fields_size
= event
->n_u64
* sizeof(u64
);
907 * Avoid ring buffer recursion detection, as this event
908 * is being performed within another event.
910 buffer
= trace_file
->tr
->array_buffer
.buffer
;
911 ring_buffer_nest_start(buffer
);
913 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
,
914 sizeof(*entry
) + fields_size
);
918 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
919 val_idx
= var_ref_idx
[i
];
920 if (event
->fields
[i
]->is_string
) {
921 char *str_val
= (char *)(long)var_ref_vals
[val_idx
];
922 char *str_field
= (char *)&entry
->fields
[n_u64
];
924 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
925 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
927 struct synth_field
*field
= event
->fields
[i
];
928 u64 val
= var_ref_vals
[val_idx
];
930 switch (field
->size
) {
932 *(u8
*)&entry
->fields
[n_u64
] = (u8
)val
;
936 *(u16
*)&entry
->fields
[n_u64
] = (u16
)val
;
940 *(u32
*)&entry
->fields
[n_u64
] = (u32
)val
;
944 entry
->fields
[n_u64
] = val
;
951 trace_event_buffer_commit(&fbuffer
);
953 ring_buffer_nest_end(buffer
);
956 static void free_synth_event_print_fmt(struct trace_event_call
*call
)
959 kfree(call
->print_fmt
);
960 call
->print_fmt
= NULL
;
964 static int __set_synth_event_print_fmt(struct synth_event
*event
,
971 /* When len=0, we just calculate the needed length */
972 #define LEN_OR_ZERO (len ? len - pos : 0)
974 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
975 for (i
= 0; i
< event
->n_fields
; i
++) {
976 fmt
= synth_field_fmt(event
->fields
[i
]->type
);
977 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s=%s%s",
978 event
->fields
[i
]->name
, fmt
,
979 i
== event
->n_fields
- 1 ? "" : ", ");
981 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
983 for (i
= 0; i
< event
->n_fields
; i
++) {
984 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
985 ", REC->%s", event
->fields
[i
]->name
);
990 /* return the length of print_fmt */
994 static int set_synth_event_print_fmt(struct trace_event_call
*call
)
996 struct synth_event
*event
= call
->data
;
1000 /* First: called with 0 length to calculate the needed length */
1001 len
= __set_synth_event_print_fmt(event
, NULL
, 0);
1003 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
1007 /* Second: actually write the @print_fmt */
1008 __set_synth_event_print_fmt(event
, print_fmt
, len
+ 1);
1009 call
->print_fmt
= print_fmt
;
1014 static void free_synth_field(struct synth_field
*field
)
1021 static struct synth_field
*parse_synth_field(int argc
, const char **argv
,
1024 struct synth_field
*field
;
1025 const char *prefix
= NULL
, *field_type
= argv
[0], *field_name
, *array
;
1028 if (field_type
[0] == ';')
1031 if (!strcmp(field_type
, "unsigned")) {
1033 return ERR_PTR(-EINVAL
);
1034 prefix
= "unsigned ";
1035 field_type
= argv
[1];
1036 field_name
= argv
[2];
1039 field_name
= argv
[1];
1043 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
1045 return ERR_PTR(-ENOMEM
);
1047 len
= strlen(field_name
);
1048 array
= strchr(field_name
, '[');
1050 len
-= strlen(array
);
1051 else if (field_name
[len
- 1] == ';')
1054 field
->name
= kmemdup_nul(field_name
, len
, GFP_KERNEL
);
1060 if (field_type
[0] == ';')
1062 len
= strlen(field_type
) + 1;
1064 len
+= strlen(array
);
1066 len
+= strlen(prefix
);
1068 field
->type
= kzalloc(len
, GFP_KERNEL
);
1074 strcat(field
->type
, prefix
);
1075 strcat(field
->type
, field_type
);
1077 strcat(field
->type
, array
);
1078 if (field
->type
[len
- 1] == ';')
1079 field
->type
[len
- 1] = '\0';
1082 field
->size
= synth_field_size(field
->type
);
1088 if (synth_field_is_string(field
->type
))
1089 field
->is_string
= true;
1091 field
->is_signed
= synth_field_signed(field
->type
);
1096 free_synth_field(field
);
1097 field
= ERR_PTR(ret
);
1101 static void free_synth_tracepoint(struct tracepoint
*tp
)
1110 static struct tracepoint
*alloc_synth_tracepoint(char *name
)
1112 struct tracepoint
*tp
;
1114 tp
= kzalloc(sizeof(*tp
), GFP_KERNEL
);
1116 return ERR_PTR(-ENOMEM
);
1118 tp
->name
= kstrdup(name
, GFP_KERNEL
);
1121 return ERR_PTR(-ENOMEM
);
1127 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
1128 unsigned int *var_ref_idx
);
1130 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
1131 unsigned int *var_ref_idx
)
1133 struct tracepoint
*tp
= event
->tp
;
1135 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
1136 struct tracepoint_func
*probe_func_ptr
;
1137 synth_probe_func_t probe_func
;
1140 if (!(cpu_online(raw_smp_processor_id())))
1143 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
1144 if (probe_func_ptr
) {
1146 probe_func
= probe_func_ptr
->func
;
1147 __data
= probe_func_ptr
->data
;
1148 probe_func(__data
, var_ref_vals
, var_ref_idx
);
1149 } while ((++probe_func_ptr
)->func
);
1154 static struct synth_event
*find_synth_event(const char *name
)
1156 struct dyn_event
*pos
;
1157 struct synth_event
*event
;
1159 for_each_dyn_event(pos
) {
1160 if (!is_synth_event(pos
))
1162 event
= to_synth_event(pos
);
1163 if (strcmp(event
->name
, name
) == 0)
1170 static struct trace_event_fields synth_event_fields_array
[] = {
1171 { .type
= TRACE_FUNCTION_TYPE
,
1172 .define_fields
= synth_event_define_fields
},
1176 static int register_synth_event(struct synth_event
*event
)
1178 struct trace_event_call
*call
= &event
->call
;
1181 event
->call
.class = &event
->class;
1182 event
->class.system
= kstrdup(SYNTH_SYSTEM
, GFP_KERNEL
);
1183 if (!event
->class.system
) {
1188 event
->tp
= alloc_synth_tracepoint(event
->name
);
1189 if (IS_ERR(event
->tp
)) {
1190 ret
= PTR_ERR(event
->tp
);
1195 INIT_LIST_HEAD(&call
->class->fields
);
1196 call
->event
.funcs
= &synth_event_funcs
;
1197 call
->class->fields_array
= synth_event_fields_array
;
1199 ret
= register_trace_event(&call
->event
);
1204 call
->flags
= TRACE_EVENT_FL_TRACEPOINT
;
1205 call
->class->reg
= trace_event_reg
;
1206 call
->class->probe
= trace_event_raw_event_synth
;
1208 call
->tp
= event
->tp
;
1210 ret
= trace_add_event_call(call
);
1212 pr_warn("Failed to register synthetic event: %s\n",
1213 trace_event_name(call
));
1217 ret
= set_synth_event_print_fmt(call
);
1219 trace_remove_event_call(call
);
1225 unregister_trace_event(&call
->event
);
1229 static int unregister_synth_event(struct synth_event
*event
)
1231 struct trace_event_call
*call
= &event
->call
;
1234 ret
= trace_remove_event_call(call
);
1239 static void free_synth_event(struct synth_event
*event
)
1246 for (i
= 0; i
< event
->n_fields
; i
++)
1247 free_synth_field(event
->fields
[i
]);
1249 kfree(event
->fields
);
1251 kfree(event
->class.system
);
1252 free_synth_tracepoint(event
->tp
);
1253 free_synth_event_print_fmt(&event
->call
);
1257 static struct synth_event
*alloc_synth_event(const char *name
, int n_fields
,
1258 struct synth_field
**fields
)
1260 struct synth_event
*event
;
1263 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1265 event
= ERR_PTR(-ENOMEM
);
1269 event
->name
= kstrdup(name
, GFP_KERNEL
);
1272 event
= ERR_PTR(-ENOMEM
);
1276 event
->fields
= kcalloc(n_fields
, sizeof(*event
->fields
), GFP_KERNEL
);
1277 if (!event
->fields
) {
1278 free_synth_event(event
);
1279 event
= ERR_PTR(-ENOMEM
);
1283 dyn_event_init(&event
->devent
, &synth_event_ops
);
1285 for (i
= 0; i
< n_fields
; i
++)
1286 event
->fields
[i
] = fields
[i
];
1288 event
->n_fields
= n_fields
;
1293 static void action_trace(struct hist_trigger_data
*hist_data
,
1294 struct tracing_map_elt
*elt
, void *rec
,
1295 struct ring_buffer_event
*rbe
, void *key
,
1296 struct action_data
*data
, u64
*var_ref_vals
)
1298 struct synth_event
*event
= data
->synth_event
;
1300 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
1303 struct hist_var_data
{
1304 struct list_head list
;
1305 struct hist_trigger_data
*hist_data
;
1308 static int synth_event_check_arg_fn(void *data
)
1310 struct dynevent_arg_pair
*arg_pair
= data
;
1313 size
= synth_field_size((char *)arg_pair
->lhs
);
1315 return size
? 0 : -EINVAL
;
1319 * synth_event_add_field - Add a new field to a synthetic event cmd
1320 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1321 * @type: The type of the new field to add
1322 * @name: The name of the new field to add
1324 * Add a new field to a synthetic event cmd object. Field ordering is in
1325 * the same order the fields are added.
1327 * See synth_field_size() for available types. If field_name contains
1328 * [n] the field is considered to be an array.
1330 * Return: 0 if successful, error otherwise.
1332 int synth_event_add_field(struct dynevent_cmd
*cmd
, const char *type
,
1335 struct dynevent_arg_pair arg_pair
;
1338 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1344 dynevent_arg_pair_init(&arg_pair
, 0, ';');
1346 arg_pair
.lhs
= type
;
1347 arg_pair
.rhs
= name
;
1349 ret
= dynevent_arg_pair_add(cmd
, &arg_pair
, synth_event_check_arg_fn
);
1353 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
)
1358 EXPORT_SYMBOL_GPL(synth_event_add_field
);
1361 * synth_event_add_field_str - Add a new field to a synthetic event cmd
1362 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1363 * @type_name: The type and name of the new field to add, as a single string
1365 * Add a new field to a synthetic event cmd object, as a single
1366 * string. The @type_name string is expected to be of the form 'type
1367 * name', which will be appended by ';'. No sanity checking is done -
1368 * what's passed in is assumed to already be well-formed. Field
1369 * ordering is in the same order the fields are added.
1371 * See synth_field_size() for available types. If field_name contains
1372 * [n] the field is considered to be an array.
1374 * Return: 0 if successful, error otherwise.
1376 int synth_event_add_field_str(struct dynevent_cmd
*cmd
, const char *type_name
)
1378 struct dynevent_arg arg
;
1381 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1387 dynevent_arg_init(&arg
, ';');
1389 arg
.str
= type_name
;
1391 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1395 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
)
1400 EXPORT_SYMBOL_GPL(synth_event_add_field_str
);
1403 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1404 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1405 * @fields: An array of type/name field descriptions
1406 * @n_fields: The number of field descriptions contained in the fields array
1408 * Add a new set of fields to a synthetic event cmd object. The event
1409 * fields that will be defined for the event should be passed in as an
1410 * array of struct synth_field_desc, and the number of elements in the
1411 * array passed in as n_fields. Field ordering will retain the
1412 * ordering given in the fields array.
1414 * See synth_field_size() for available types. If field_name contains
1415 * [n] the field is considered to be an array.
1417 * Return: 0 if successful, error otherwise.
1419 int synth_event_add_fields(struct dynevent_cmd
*cmd
,
1420 struct synth_field_desc
*fields
,
1421 unsigned int n_fields
)
1426 for (i
= 0; i
< n_fields
; i
++) {
1427 if (fields
[i
].type
== NULL
|| fields
[i
].name
== NULL
) {
1432 ret
= synth_event_add_field(cmd
, fields
[i
].type
, fields
[i
].name
);
1439 EXPORT_SYMBOL_GPL(synth_event_add_fields
);
1442 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1443 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1444 * @name: The name of the synthetic event
1445 * @mod: The module creating the event, NULL if not created from a module
1446 * @args: Variable number of arg (pairs), one pair for each field
1448 * NOTE: Users normally won't want to call this function directly, but
1449 * rather use the synth_event_gen_cmd_start() wrapper, which
1450 * automatically adds a NULL to the end of the arg list. If this
1451 * function is used directly, make sure the last arg in the variable
1454 * Generate a synthetic event command to be executed by
1455 * synth_event_gen_cmd_end(). This function can be used to generate
1456 * the complete command or only the first part of it; in the latter
1457 * case, synth_event_add_field(), synth_event_add_field_str(), or
1458 * synth_event_add_fields() can be used to add more fields following
1461 * There should be an even number variable args, each pair consisting
1462 * of a type followed by a field name.
1464 * See synth_field_size() for available types. If field_name contains
1465 * [n] the field is considered to be an array.
1467 * Return: 0 if successful, error otherwise.
1469 int __synth_event_gen_cmd_start(struct dynevent_cmd
*cmd
, const char *name
,
1470 struct module
*mod
, ...)
1472 struct dynevent_arg arg
;
1476 cmd
->event_name
= name
;
1477 cmd
->private_data
= mod
;
1479 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1482 dynevent_arg_init(&arg
, 0);
1484 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1488 va_start(args
, mod
);
1490 const char *type
, *name
;
1492 type
= va_arg(args
, const char *);
1495 name
= va_arg(args
, const char *);
1499 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
) {
1504 ret
= synth_event_add_field(cmd
, type
, name
);
1512 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start
);
1515 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1516 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1517 * @name: The name of the synthetic event
1518 * @fields: An array of type/name field descriptions
1519 * @n_fields: The number of field descriptions contained in the fields array
1521 * Generate a synthetic event command to be executed by
1522 * synth_event_gen_cmd_end(). This function can be used to generate
1523 * the complete command or only the first part of it; in the latter
1524 * case, synth_event_add_field(), synth_event_add_field_str(), or
1525 * synth_event_add_fields() can be used to add more fields following
1528 * The event fields that will be defined for the event should be
1529 * passed in as an array of struct synth_field_desc, and the number of
1530 * elements in the array passed in as n_fields. Field ordering will
1531 * retain the ordering given in the fields array.
1533 * See synth_field_size() for available types. If field_name contains
1534 * [n] the field is considered to be an array.
1536 * Return: 0 if successful, error otherwise.
1538 int synth_event_gen_cmd_array_start(struct dynevent_cmd
*cmd
, const char *name
,
1540 struct synth_field_desc
*fields
,
1541 unsigned int n_fields
)
1543 struct dynevent_arg arg
;
1547 cmd
->event_name
= name
;
1548 cmd
->private_data
= mod
;
1550 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1553 if (n_fields
> SYNTH_FIELDS_MAX
)
1556 dynevent_arg_init(&arg
, 0);
1558 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1562 for (i
= 0; i
< n_fields
; i
++) {
1563 if (fields
[i
].type
== NULL
|| fields
[i
].name
== NULL
)
1566 ret
= synth_event_add_field(cmd
, fields
[i
].type
, fields
[i
].name
);
1573 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start
);
1575 static int __create_synth_event(int argc
, const char *name
, const char **argv
)
1577 struct synth_field
*field
, *fields
[SYNTH_FIELDS_MAX
];
1578 struct synth_event
*event
= NULL
;
1579 int i
, consumed
= 0, n_fields
= 0, ret
= 0;
1583 * - Add synthetic event: <event_name> field[;field] ...
1584 * - Remove synthetic event: !<event_name> field[;field] ...
1585 * where 'field' = type field_name
1588 if (name
[0] == '\0' || argc
< 1)
1591 mutex_lock(&event_mutex
);
1593 event
= find_synth_event(name
);
1599 for (i
= 0; i
< argc
- 1; i
++) {
1600 if (strcmp(argv
[i
], ";") == 0)
1602 if (n_fields
== SYNTH_FIELDS_MAX
) {
1607 field
= parse_synth_field(argc
- i
, &argv
[i
], &consumed
);
1608 if (IS_ERR(field
)) {
1609 ret
= PTR_ERR(field
);
1612 fields
[n_fields
++] = field
;
1616 if (i
< argc
&& strcmp(argv
[i
], ";") != 0) {
1621 event
= alloc_synth_event(name
, n_fields
, fields
);
1622 if (IS_ERR(event
)) {
1623 ret
= PTR_ERR(event
);
1627 ret
= register_synth_event(event
);
1629 dyn_event_add(&event
->devent
);
1631 free_synth_event(event
);
1633 mutex_unlock(&event_mutex
);
1637 for (i
= 0; i
< n_fields
; i
++)
1638 free_synth_field(fields
[i
]);
1644 * synth_event_create - Create a new synthetic event
1645 * @name: The name of the new sythetic event
1646 * @fields: An array of type/name field descriptions
1647 * @n_fields: The number of field descriptions contained in the fields array
1648 * @mod: The module creating the event, NULL if not created from a module
1650 * Create a new synthetic event with the given name under the
1651 * trace/events/synthetic/ directory. The event fields that will be
1652 * defined for the event should be passed in as an array of struct
1653 * synth_field_desc, and the number elements in the array passed in as
1654 * n_fields. Field ordering will retain the ordering given in the
1657 * If the new synthetic event is being created from a module, the mod
1658 * param must be non-NULL. This will ensure that the trace buffer
1659 * won't contain unreadable events.
1661 * The new synth event should be deleted using synth_event_delete()
1662 * function. The new synthetic event can be generated from modules or
1663 * other kernel code using trace_synth_event() and related functions.
1665 * Return: 0 if successful, error otherwise.
1667 int synth_event_create(const char *name
, struct synth_field_desc
*fields
,
1668 unsigned int n_fields
, struct module
*mod
)
1670 struct dynevent_cmd cmd
;
1674 buf
= kzalloc(MAX_DYNEVENT_CMD_LEN
, GFP_KERNEL
);
1678 synth_event_cmd_init(&cmd
, buf
, MAX_DYNEVENT_CMD_LEN
);
1680 ret
= synth_event_gen_cmd_array_start(&cmd
, name
, mod
,
1685 ret
= synth_event_gen_cmd_end(&cmd
);
1691 EXPORT_SYMBOL_GPL(synth_event_create
);
1693 static int destroy_synth_event(struct synth_event
*se
)
1700 ret
= unregister_synth_event(se
);
1702 dyn_event_remove(&se
->devent
);
1703 free_synth_event(se
);
1711 * synth_event_delete - Delete a synthetic event
1712 * @event_name: The name of the new sythetic event
1714 * Delete a synthetic event that was created with synth_event_create().
1716 * Return: 0 if successful, error otherwise.
1718 int synth_event_delete(const char *event_name
)
1720 struct synth_event
*se
= NULL
;
1721 struct module
*mod
= NULL
;
1724 mutex_lock(&event_mutex
);
1725 se
= find_synth_event(event_name
);
1728 ret
= destroy_synth_event(se
);
1730 mutex_unlock(&event_mutex
);
1733 mutex_lock(&trace_types_lock
);
1735 * It is safest to reset the ring buffer if the module
1736 * being unloaded registered any events that were
1737 * used. The only worry is if a new module gets
1738 * loaded, and takes on the same id as the events of
1739 * this module. When printing out the buffer, traced
1740 * events left over from this module may be passed to
1741 * the new module events and unexpected results may
1744 tracing_reset_all_online_cpus();
1745 mutex_unlock(&trace_types_lock
);
1750 EXPORT_SYMBOL_GPL(synth_event_delete
);
1752 static int create_or_delete_synth_event(int argc
, char **argv
)
1754 const char *name
= argv
[0];
1757 /* trace_run_command() ensures argc != 0 */
1758 if (name
[0] == '!') {
1759 ret
= synth_event_delete(name
+ 1);
1763 ret
= __create_synth_event(argc
- 1, name
, (const char **)argv
+ 1);
1764 return ret
== -ECANCELED
? -EINVAL
: ret
;
1767 static int synth_event_run_command(struct dynevent_cmd
*cmd
)
1769 struct synth_event
*se
;
1772 ret
= trace_run_command(cmd
->seq
.buffer
, create_or_delete_synth_event
);
1776 se
= find_synth_event(cmd
->event_name
);
1780 se
->mod
= cmd
->private_data
;
1786 * synth_event_cmd_init - Initialize a synthetic event command object
1787 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1788 * @buf: A pointer to the buffer used to build the command
1789 * @maxlen: The length of the buffer passed in @buf
1791 * Initialize a synthetic event command object. Use this before
1792 * calling any of the other dyenvent_cmd functions.
1794 void synth_event_cmd_init(struct dynevent_cmd
*cmd
, char *buf
, int maxlen
)
1796 dynevent_cmd_init(cmd
, buf
, maxlen
, DYNEVENT_TYPE_SYNTH
,
1797 synth_event_run_command
);
1799 EXPORT_SYMBOL_GPL(synth_event_cmd_init
);
1802 * synth_event_trace - Trace a synthetic event
1803 * @file: The trace_event_file representing the synthetic event
1804 * @n_vals: The number of values in vals
1805 * @args: Variable number of args containing the event values
1807 * Trace a synthetic event using the values passed in the variable
1810 * The argument list should be a list 'n_vals' u64 values. The number
1811 * of vals must match the number of field in the synthetic event, and
1812 * must be in the same order as the synthetic event fields.
1814 * All vals should be cast to u64, and string vals are just pointers
1815 * to strings, cast to u64. Strings will be copied into space
1816 * reserved in the event for the string, using these pointers.
1818 * Return: 0 on success, err otherwise.
1820 int synth_event_trace(struct trace_event_file
*file
, unsigned int n_vals
, ...)
1822 struct trace_event_buffer fbuffer
;
1823 struct synth_trace_event
*entry
;
1824 struct trace_buffer
*buffer
;
1825 struct synth_event
*event
;
1826 unsigned int i
, n_u64
;
1827 int fields_size
= 0;
1832 * Normal event generation doesn't get called at all unless
1833 * the ENABLED bit is set (which attaches the probe thus
1834 * allowing this code to be called, etc). Because this is
1835 * called directly by the user, we don't have that but we
1836 * still need to honor not logging when disabled.
1838 if (!(file
->flags
& EVENT_FILE_FL_ENABLED
))
1841 event
= file
->event_call
->data
;
1843 if (n_vals
!= event
->n_fields
)
1846 if (trace_trigger_soft_disabled(file
))
1849 fields_size
= event
->n_u64
* sizeof(u64
);
1852 * Avoid ring buffer recursion detection, as this event
1853 * is being performed within another event.
1855 buffer
= file
->tr
->array_buffer
.buffer
;
1856 ring_buffer_nest_start(buffer
);
1858 entry
= trace_event_buffer_reserve(&fbuffer
, file
,
1859 sizeof(*entry
) + fields_size
);
1865 va_start(args
, n_vals
);
1866 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
1869 val
= va_arg(args
, u64
);
1871 if (event
->fields
[i
]->is_string
) {
1872 char *str_val
= (char *)(long)val
;
1873 char *str_field
= (char *)&entry
->fields
[n_u64
];
1875 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
1876 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
1878 entry
->fields
[n_u64
] = val
;
1884 trace_event_buffer_commit(&fbuffer
);
1886 ring_buffer_nest_end(buffer
);
1890 EXPORT_SYMBOL_GPL(synth_event_trace
);
1893 * synth_event_trace_array - Trace a synthetic event from an array
1894 * @file: The trace_event_file representing the synthetic event
1895 * @vals: Array of values
1896 * @n_vals: The number of values in vals
1898 * Trace a synthetic event using the values passed in as 'vals'.
1900 * The 'vals' array is just an array of 'n_vals' u64. The number of
1901 * vals must match the number of field in the synthetic event, and
1902 * must be in the same order as the synthetic event fields.
1904 * All vals should be cast to u64, and string vals are just pointers
1905 * to strings, cast to u64. Strings will be copied into space
1906 * reserved in the event for the string, using these pointers.
1908 * Return: 0 on success, err otherwise.
1910 int synth_event_trace_array(struct trace_event_file
*file
, u64
*vals
,
1911 unsigned int n_vals
)
1913 struct trace_event_buffer fbuffer
;
1914 struct synth_trace_event
*entry
;
1915 struct trace_buffer
*buffer
;
1916 struct synth_event
*event
;
1917 unsigned int i
, n_u64
;
1918 int fields_size
= 0;
1922 * Normal event generation doesn't get called at all unless
1923 * the ENABLED bit is set (which attaches the probe thus
1924 * allowing this code to be called, etc). Because this is
1925 * called directly by the user, we don't have that but we
1926 * still need to honor not logging when disabled.
1928 if (!(file
->flags
& EVENT_FILE_FL_ENABLED
))
1931 event
= file
->event_call
->data
;
1933 if (n_vals
!= event
->n_fields
)
1936 if (trace_trigger_soft_disabled(file
))
1939 fields_size
= event
->n_u64
* sizeof(u64
);
1942 * Avoid ring buffer recursion detection, as this event
1943 * is being performed within another event.
1945 buffer
= file
->tr
->array_buffer
.buffer
;
1946 ring_buffer_nest_start(buffer
);
1948 entry
= trace_event_buffer_reserve(&fbuffer
, file
,
1949 sizeof(*entry
) + fields_size
);
1955 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
1956 if (event
->fields
[i
]->is_string
) {
1957 char *str_val
= (char *)(long)vals
[i
];
1958 char *str_field
= (char *)&entry
->fields
[n_u64
];
1960 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
1961 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
1963 entry
->fields
[n_u64
] = vals
[i
];
1968 trace_event_buffer_commit(&fbuffer
);
1970 ring_buffer_nest_end(buffer
);
1974 EXPORT_SYMBOL_GPL(synth_event_trace_array
);
1977 * synth_event_trace_start - Start piecewise synthetic event trace
1978 * @file: The trace_event_file representing the synthetic event
1979 * @trace_state: A pointer to object tracking the piecewise trace state
1981 * Start the trace of a synthetic event field-by-field rather than all
1984 * This function 'opens' an event trace, which means space is reserved
1985 * for the event in the trace buffer, after which the event's
1986 * individual field values can be set through either
1987 * synth_event_add_next_val() or synth_event_add_val().
1989 * A pointer to a trace_state object is passed in, which will keep
1990 * track of the current event trace state until the event trace is
1991 * closed (and the event finally traced) using
1992 * synth_event_trace_end().
1994 * Note that synth_event_trace_end() must be called after all values
1995 * have been added for each event trace, regardless of whether adding
1996 * all field values succeeded or not.
1998 * Note also that for a given event trace, all fields must be added
1999 * using either synth_event_add_next_val() or synth_event_add_val()
2000 * but not both together or interleaved.
2002 * Return: 0 on success, err otherwise.
2004 int synth_event_trace_start(struct trace_event_file
*file
,
2005 struct synth_event_trace_state
*trace_state
)
2007 struct synth_trace_event
*entry
;
2008 int fields_size
= 0;
2016 memset(trace_state
, '\0', sizeof(*trace_state
));
2019 * Normal event tracing doesn't get called at all unless the
2020 * ENABLED bit is set (which attaches the probe thus allowing
2021 * this code to be called, etc). Because this is called
2022 * directly by the user, we don't have that but we still need
2023 * to honor not logging when disabled. For the the iterated
2024 * trace case, we save the enabed state upon start and just
2025 * ignore the following data calls.
2027 if (!(file
->flags
& EVENT_FILE_FL_ENABLED
)) {
2028 trace_state
->enabled
= false;
2032 trace_state
->enabled
= true;
2034 trace_state
->event
= file
->event_call
->data
;
2036 if (trace_trigger_soft_disabled(file
)) {
2041 fields_size
= trace_state
->event
->n_u64
* sizeof(u64
);
2044 * Avoid ring buffer recursion detection, as this event
2045 * is being performed within another event.
2047 trace_state
->buffer
= file
->tr
->array_buffer
.buffer
;
2048 ring_buffer_nest_start(trace_state
->buffer
);
2050 entry
= trace_event_buffer_reserve(&trace_state
->fbuffer
, file
,
2051 sizeof(*entry
) + fields_size
);
2057 trace_state
->entry
= entry
;
2061 EXPORT_SYMBOL_GPL(synth_event_trace_start
);
2063 static int __synth_event_add_val(const char *field_name
, u64 val
,
2064 struct synth_event_trace_state
*trace_state
)
2066 struct synth_field
*field
= NULL
;
2067 struct synth_trace_event
*entry
;
2068 struct synth_event
*event
;
2076 /* can't mix add_next_synth_val() with add_synth_val() */
2078 if (trace_state
->add_next
) {
2082 trace_state
->add_name
= true;
2084 if (trace_state
->add_name
) {
2088 trace_state
->add_next
= true;
2091 if (!trace_state
->enabled
)
2094 event
= trace_state
->event
;
2095 if (trace_state
->add_name
) {
2096 for (i
= 0; i
< event
->n_fields
; i
++) {
2097 field
= event
->fields
[i
];
2098 if (strcmp(field
->name
, field_name
) == 0)
2106 if (trace_state
->cur_field
>= event
->n_fields
) {
2110 field
= event
->fields
[trace_state
->cur_field
++];
2113 entry
= trace_state
->entry
;
2114 if (field
->is_string
) {
2115 char *str_val
= (char *)(long)val
;
2123 str_field
= (char *)&entry
->fields
[field
->offset
];
2124 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
2126 entry
->fields
[field
->offset
] = val
;
2132 * synth_event_add_next_val - Add the next field's value to an open synth trace
2133 * @val: The value to set the next field to
2134 * @trace_state: A pointer to object tracking the piecewise trace state
2136 * Set the value of the next field in an event that's been opened by
2137 * synth_event_trace_start().
2139 * The val param should be the value cast to u64. If the value points
2140 * to a string, the val param should be a char * cast to u64.
2142 * This function assumes all the fields in an event are to be set one
2143 * after another - successive calls to this function are made, one for
2144 * each field, in the order of the fields in the event, until all
2145 * fields have been set. If you'd rather set each field individually
2146 * without regard to ordering, synth_event_add_val() can be used
2149 * Note however that synth_event_add_next_val() and
2150 * synth_event_add_val() can't be intermixed for a given event trace -
2151 * one or the other but not both can be used at the same time.
2153 * Note also that synth_event_trace_end() must be called after all
2154 * values have been added for each event trace, regardless of whether
2155 * adding all field values succeeded or not.
2157 * Return: 0 on success, err otherwise.
2159 int synth_event_add_next_val(u64 val
,
2160 struct synth_event_trace_state
*trace_state
)
2162 return __synth_event_add_val(NULL
, val
, trace_state
);
2164 EXPORT_SYMBOL_GPL(synth_event_add_next_val
);
2167 * synth_event_add_val - Add a named field's value to an open synth trace
2168 * @field_name: The name of the synthetic event field value to set
2169 * @val: The value to set the next field to
2170 * @trace_state: A pointer to object tracking the piecewise trace state
2172 * Set the value of the named field in an event that's been opened by
2173 * synth_event_trace_start().
2175 * The val param should be the value cast to u64. If the value points
2176 * to a string, the val param should be a char * cast to u64.
2178 * This function looks up the field name, and if found, sets the field
2179 * to the specified value. This lookup makes this function more
2180 * expensive than synth_event_add_next_val(), so use that or the
2181 * none-piecewise synth_event_trace() instead if efficiency is more
2184 * Note however that synth_event_add_next_val() and
2185 * synth_event_add_val() can't be intermixed for a given event trace -
2186 * one or the other but not both can be used at the same time.
2188 * Note also that synth_event_trace_end() must be called after all
2189 * values have been added for each event trace, regardless of whether
2190 * adding all field values succeeded or not.
2192 * Return: 0 on success, err otherwise.
2194 int synth_event_add_val(const char *field_name
, u64 val
,
2195 struct synth_event_trace_state
*trace_state
)
2197 return __synth_event_add_val(field_name
, val
, trace_state
);
2199 EXPORT_SYMBOL_GPL(synth_event_add_val
);
2202 * synth_event_trace_end - End piecewise synthetic event trace
2203 * @trace_state: A pointer to object tracking the piecewise trace state
2205 * End the trace of a synthetic event opened by
2206 * synth_event_trace__start().
2208 * This function 'closes' an event trace, which basically means that
2209 * it commits the reserved event and cleans up other loose ends.
2211 * A pointer to a trace_state object is passed in, which will keep
2212 * track of the current event trace state opened with
2213 * synth_event_trace_start().
2215 * Note that this function must be called after all values have been
2216 * added for each event trace, regardless of whether adding all field
2217 * values succeeded or not.
2219 * Return: 0 on success, err otherwise.
2221 int synth_event_trace_end(struct synth_event_trace_state
*trace_state
)
2226 trace_event_buffer_commit(&trace_state
->fbuffer
);
2228 ring_buffer_nest_end(trace_state
->buffer
);
2232 EXPORT_SYMBOL_GPL(synth_event_trace_end
);
2234 static int create_synth_event(int argc
, const char **argv
)
2236 const char *name
= argv
[0];
2239 if (name
[0] != 's' || name
[1] != ':')
2243 /* This interface accepts group name prefix */
2244 if (strchr(name
, '/')) {
2245 len
= str_has_prefix(name
, SYNTH_SYSTEM
"/");
2250 return __create_synth_event(argc
- 1, name
, argv
+ 1);
2253 static int synth_event_release(struct dyn_event
*ev
)
2255 struct synth_event
*event
= to_synth_event(ev
);
2261 ret
= unregister_synth_event(event
);
2265 dyn_event_remove(ev
);
2266 free_synth_event(event
);
2270 static int __synth_event_show(struct seq_file
*m
, struct synth_event
*event
)
2272 struct synth_field
*field
;
2275 seq_printf(m
, "%s\t", event
->name
);
2277 for (i
= 0; i
< event
->n_fields
; i
++) {
2278 field
= event
->fields
[i
];
2280 /* parameter values */
2281 seq_printf(m
, "%s %s%s", field
->type
, field
->name
,
2282 i
== event
->n_fields
- 1 ? "" : "; ");
2290 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
)
2292 struct synth_event
*event
= to_synth_event(ev
);
2294 seq_printf(m
, "s:%s/", event
->class.system
);
2296 return __synth_event_show(m
, event
);
2299 static int synth_events_seq_show(struct seq_file
*m
, void *v
)
2301 struct dyn_event
*ev
= v
;
2303 if (!is_synth_event(ev
))
2306 return __synth_event_show(m
, to_synth_event(ev
));
2309 static const struct seq_operations synth_events_seq_op
= {
2310 .start
= dyn_event_seq_start
,
2311 .next
= dyn_event_seq_next
,
2312 .stop
= dyn_event_seq_stop
,
2313 .show
= synth_events_seq_show
,
2316 static int synth_events_open(struct inode
*inode
, struct file
*file
)
2320 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
2324 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
2325 ret
= dyn_events_release_all(&synth_event_ops
);
2330 return seq_open(file
, &synth_events_seq_op
);
2333 static ssize_t
synth_events_write(struct file
*file
,
2334 const char __user
*buffer
,
2335 size_t count
, loff_t
*ppos
)
2337 return trace_parse_run_command(file
, buffer
, count
, ppos
,
2338 create_or_delete_synth_event
);
2341 static const struct file_operations synth_events_fops
= {
2342 .open
= synth_events_open
,
2343 .write
= synth_events_write
,
2345 .llseek
= seq_lseek
,
2346 .release
= seq_release
,
2349 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
2350 struct tracing_map_elt
*elt
,
2351 struct ring_buffer_event
*rbe
,
2354 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
2355 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2357 u64 ts
= ring_buffer_event_time_stamp(rbe
);
2359 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
2365 static u64
hist_field_cpu(struct hist_field
*hist_field
,
2366 struct tracing_map_elt
*elt
,
2367 struct ring_buffer_event
*rbe
,
2370 int cpu
= smp_processor_id();
2376 * check_field_for_var_ref - Check if a VAR_REF field references a variable
2377 * @hist_field: The VAR_REF field to check
2378 * @var_data: The hist trigger that owns the variable
2379 * @var_idx: The trigger variable identifier
2381 * Check the given VAR_REF field to see whether or not it references
2382 * the given variable associated with the given trigger.
2384 * Return: The VAR_REF field if it does reference the variable, NULL if not
2386 static struct hist_field
*
2387 check_field_for_var_ref(struct hist_field
*hist_field
,
2388 struct hist_trigger_data
*var_data
,
2389 unsigned int var_idx
)
2391 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
2393 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
2394 hist_field
->var
.hist_data
== var_data
)
2401 * find_var_ref - Check if a trigger has a reference to a trigger variable
2402 * @hist_data: The hist trigger that might have a reference to the variable
2403 * @var_data: The hist trigger that owns the variable
2404 * @var_idx: The trigger variable identifier
2406 * Check the list of var_refs[] on the first hist trigger to see
2407 * whether any of them are references to the variable on the second
2410 * Return: The VAR_REF field referencing the variable if so, NULL if not
2412 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
2413 struct hist_trigger_data
*var_data
,
2414 unsigned int var_idx
)
2416 struct hist_field
*hist_field
;
2419 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2420 hist_field
= hist_data
->var_refs
[i
];
2421 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
2429 * find_any_var_ref - Check if there is a reference to a given trigger variable
2430 * @hist_data: The hist trigger
2431 * @var_idx: The trigger variable identifier
2433 * Check to see whether the given variable is currently referenced by
2434 * any other trigger.
2436 * The trigger the variable is defined on is explicitly excluded - the
2437 * assumption being that a self-reference doesn't prevent a trigger
2438 * from being removed.
2440 * Return: The VAR_REF field referencing the variable if so, NULL if not
2442 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
2443 unsigned int var_idx
)
2445 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2446 struct hist_field
*found
= NULL
;
2447 struct hist_var_data
*var_data
;
2449 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2450 if (var_data
->hist_data
== hist_data
)
2452 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
2461 * check_var_refs - Check if there is a reference to any of trigger's variables
2462 * @hist_data: The hist trigger
2464 * A trigger can define one or more variables. If any one of them is
2465 * currently referenced by any other trigger, this function will
2468 * Typically used to determine whether or not a trigger can be removed
2469 * - if there are any references to a trigger's variables, it cannot.
2471 * Return: True if there is a reference to any of trigger's variables
2473 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
2475 struct hist_field
*field
;
2479 for_each_hist_field(i
, hist_data
) {
2480 field
= hist_data
->fields
[i
];
2481 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
2482 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
2492 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
2494 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2495 struct hist_var_data
*var_data
, *found
= NULL
;
2497 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2498 if (var_data
->hist_data
== hist_data
) {
2507 static bool field_has_hist_vars(struct hist_field
*hist_field
,
2518 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
2519 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
2522 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
2523 struct hist_field
*operand
;
2525 operand
= hist_field
->operands
[i
];
2526 if (field_has_hist_vars(operand
, level
+ 1))
2533 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
2535 struct hist_field
*hist_field
;
2538 for_each_hist_field(i
, hist_data
) {
2539 hist_field
= hist_data
->fields
[i
];
2540 if (field_has_hist_vars(hist_field
, 0))
2547 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
2549 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2550 struct hist_var_data
*var_data
;
2552 var_data
= find_hist_vars(hist_data
);
2556 if (tracing_check_open_get_tr(tr
))
2559 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
2561 trace_array_put(tr
);
2565 var_data
->hist_data
= hist_data
;
2566 list_add(&var_data
->list
, &tr
->hist_vars
);
2571 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
2573 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2574 struct hist_var_data
*var_data
;
2576 var_data
= find_hist_vars(hist_data
);
2580 if (WARN_ON(check_var_refs(hist_data
)))
2583 list_del(&var_data
->list
);
2587 trace_array_put(tr
);
2590 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
2591 const char *var_name
)
2593 struct hist_field
*hist_field
, *found
= NULL
;
2596 for_each_hist_field(i
, hist_data
) {
2597 hist_field
= hist_data
->fields
[i
];
2598 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
2599 strcmp(hist_field
->var
.name
, var_name
) == 0) {
2608 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
2609 struct trace_event_file
*file
,
2610 const char *var_name
)
2612 struct hist_trigger_data
*test_data
;
2613 struct event_trigger_data
*test
;
2614 struct hist_field
*hist_field
;
2616 lockdep_assert_held(&event_mutex
);
2618 hist_field
= find_var_field(hist_data
, var_name
);
2622 list_for_each_entry(test
, &file
->triggers
, list
) {
2623 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2624 test_data
= test
->private_data
;
2625 hist_field
= find_var_field(test_data
, var_name
);
2634 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
2639 struct hist_trigger_data
*var_hist_data
;
2640 struct hist_var_data
*var_data
;
2641 struct trace_event_file
*file
, *found
= NULL
;
2644 return find_event_file(tr
, system
, event_name
);
2646 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2647 var_hist_data
= var_data
->hist_data
;
2648 file
= var_hist_data
->event_file
;
2652 if (find_var_field(var_hist_data
, var_name
)) {
2654 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
2665 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
2666 const char *var_name
)
2668 struct hist_trigger_data
*test_data
;
2669 struct event_trigger_data
*test
;
2670 struct hist_field
*hist_field
;
2672 lockdep_assert_held(&event_mutex
);
2674 list_for_each_entry(test
, &file
->triggers
, list
) {
2675 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2676 test_data
= test
->private_data
;
2677 hist_field
= find_var_field(test_data
, var_name
);
2686 static struct hist_field
*
2687 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
2689 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2690 struct hist_field
*hist_field
, *found
= NULL
;
2691 struct trace_event_file
*file
;
2694 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
2695 struct action_data
*data
= hist_data
->actions
[i
];
2697 if (data
->handler
== HANDLER_ONMATCH
) {
2698 char *system
= data
->match_data
.event_system
;
2699 char *event_name
= data
->match_data
.event
;
2701 file
= find_var_file(tr
, system
, event_name
, var_name
);
2704 hist_field
= find_file_var(file
, var_name
);
2707 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
2709 return ERR_PTR(-EINVAL
);
2719 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
2724 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2725 struct hist_field
*hist_field
= NULL
;
2726 struct trace_event_file
*file
;
2728 if (!system
|| !event_name
) {
2729 hist_field
= find_match_var(hist_data
, var_name
);
2730 if (IS_ERR(hist_field
))
2736 file
= find_var_file(tr
, system
, event_name
, var_name
);
2740 hist_field
= find_file_var(file
, var_name
);
2745 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
2746 struct tracing_map_elt
*elt
,
2747 struct ring_buffer_event
*rbe
,
2750 struct hist_elt_data
*elt_data
;
2753 if (WARN_ON_ONCE(!elt
))
2756 elt_data
= elt
->private_data
;
2757 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
2762 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
2763 u64
*var_ref_vals
, bool self
)
2765 struct hist_trigger_data
*var_data
;
2766 struct tracing_map_elt
*var_elt
;
2767 struct hist_field
*hist_field
;
2768 unsigned int i
, var_idx
;
2769 bool resolved
= true;
2772 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2773 hist_field
= hist_data
->var_refs
[i
];
2774 var_idx
= hist_field
->var
.idx
;
2775 var_data
= hist_field
->var
.hist_data
;
2777 if (var_data
== NULL
) {
2782 if ((self
&& var_data
!= hist_data
) ||
2783 (!self
&& var_data
== hist_data
))
2786 var_elt
= tracing_map_lookup(var_data
->map
, key
);
2792 if (!tracing_map_var_set(var_elt
, var_idx
)) {
2797 if (self
|| !hist_field
->read_once
)
2798 var_val
= tracing_map_read_var(var_elt
, var_idx
);
2800 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
2802 var_ref_vals
[i
] = var_val
;
2808 static const char *hist_field_name(struct hist_field
*field
,
2811 const char *field_name
= "";
2817 field_name
= field
->field
->name
;
2818 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
2819 field
->flags
& HIST_FIELD_FL_ALIAS
)
2820 field_name
= hist_field_name(field
->operands
[0], ++level
);
2821 else if (field
->flags
& HIST_FIELD_FL_CPU
)
2823 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
2824 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
2825 if (field
->system
) {
2826 static char full_name
[MAX_FILTER_STR_VAL
];
2828 strcat(full_name
, field
->system
);
2829 strcat(full_name
, ".");
2830 strcat(full_name
, field
->event_name
);
2831 strcat(full_name
, ".");
2832 strcat(full_name
, field
->name
);
2833 field_name
= full_name
;
2835 field_name
= field
->name
;
2836 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
2837 field_name
= "common_timestamp";
2839 if (field_name
== NULL
)
2845 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
2847 hist_field_fn_t fn
= NULL
;
2849 switch (field_size
) {
2851 if (field_is_signed
)
2852 fn
= hist_field_s64
;
2854 fn
= hist_field_u64
;
2857 if (field_is_signed
)
2858 fn
= hist_field_s32
;
2860 fn
= hist_field_u32
;
2863 if (field_is_signed
)
2864 fn
= hist_field_s16
;
2866 fn
= hist_field_u16
;
2869 if (field_is_signed
)
2879 static int parse_map_size(char *str
)
2881 unsigned long size
, map_bits
;
2884 ret
= kstrtoul(str
, 0, &size
);
2888 map_bits
= ilog2(roundup_pow_of_two(size
));
2889 if (map_bits
< TRACING_MAP_BITS_MIN
||
2890 map_bits
> TRACING_MAP_BITS_MAX
)
2898 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
2905 for (i
= 0; i
< attrs
->n_assignments
; i
++)
2906 kfree(attrs
->assignment_str
[i
]);
2908 for (i
= 0; i
< attrs
->n_actions
; i
++)
2909 kfree(attrs
->action_str
[i
]);
2912 kfree(attrs
->sort_key_str
);
2913 kfree(attrs
->keys_str
);
2914 kfree(attrs
->vals_str
);
2915 kfree(attrs
->clock
);
2919 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
2923 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
2926 if ((str_has_prefix(str
, "onmatch(")) ||
2927 (str_has_prefix(str
, "onmax(")) ||
2928 (str_has_prefix(str
, "onchange("))) {
2929 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
2930 if (!attrs
->action_str
[attrs
->n_actions
]) {
2940 static int parse_assignment(struct trace_array
*tr
,
2941 char *str
, struct hist_trigger_attrs
*attrs
)
2945 if ((len
= str_has_prefix(str
, "key=")) ||
2946 (len
= str_has_prefix(str
, "keys="))) {
2947 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2948 if (!attrs
->keys_str
) {
2952 } else if ((len
= str_has_prefix(str
, "val=")) ||
2953 (len
= str_has_prefix(str
, "vals=")) ||
2954 (len
= str_has_prefix(str
, "values="))) {
2955 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2956 if (!attrs
->vals_str
) {
2960 } else if ((len
= str_has_prefix(str
, "sort="))) {
2961 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2962 if (!attrs
->sort_key_str
) {
2966 } else if (str_has_prefix(str
, "name=")) {
2967 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
2972 } else if ((len
= str_has_prefix(str
, "clock="))) {
2975 str
= strstrip(str
);
2976 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
2977 if (!attrs
->clock
) {
2981 } else if ((len
= str_has_prefix(str
, "size="))) {
2982 int map_bits
= parse_map_size(str
+ len
);
2988 attrs
->map_bits
= map_bits
;
2992 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
2993 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
2998 assignment
= kstrdup(str
, GFP_KERNEL
);
3004 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
3010 static struct hist_trigger_attrs
*
3011 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
3013 struct hist_trigger_attrs
*attrs
;
3016 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
3018 return ERR_PTR(-ENOMEM
);
3020 while (trigger_str
) {
3021 char *str
= strsep(&trigger_str
, ":");
3024 rhs
= strchr(str
, '=');
3026 if (!strlen(++rhs
)) {
3028 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
3031 ret
= parse_assignment(tr
, str
, attrs
);
3034 } else if (strcmp(str
, "pause") == 0)
3035 attrs
->pause
= true;
3036 else if ((strcmp(str
, "cont") == 0) ||
3037 (strcmp(str
, "continue") == 0))
3039 else if (strcmp(str
, "clear") == 0)
3040 attrs
->clear
= true;
3042 ret
= parse_action(str
, attrs
);
3048 if (!attrs
->keys_str
) {
3053 if (!attrs
->clock
) {
3054 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
3055 if (!attrs
->clock
) {
3063 destroy_hist_trigger_attrs(attrs
);
3065 return ERR_PTR(ret
);
3068 static inline void save_comm(char *comm
, struct task_struct
*task
)
3071 strcpy(comm
, "<idle>");
3075 if (WARN_ON_ONCE(task
->pid
< 0)) {
3076 strcpy(comm
, "<XXX>");
3080 strncpy(comm
, task
->comm
, TASK_COMM_LEN
);
3083 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
3087 for (i
= 0; i
< SYNTH_FIELDS_MAX
; i
++)
3088 kfree(elt_data
->field_var_str
[i
]);
3090 kfree(elt_data
->comm
);
3094 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
3096 struct hist_elt_data
*elt_data
= elt
->private_data
;
3098 hist_elt_data_free(elt_data
);
3101 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
3103 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
3104 unsigned int size
= TASK_COMM_LEN
;
3105 struct hist_elt_data
*elt_data
;
3106 struct hist_field
*key_field
;
3107 unsigned int i
, n_str
;
3109 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
3113 for_each_hist_key_field(i
, hist_data
) {
3114 key_field
= hist_data
->fields
[i
];
3116 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
3117 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
3118 if (!elt_data
->comm
) {
3126 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
;
3128 size
= STR_VAR_LEN_MAX
;
3130 for (i
= 0; i
< n_str
; i
++) {
3131 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
3132 if (!elt_data
->field_var_str
[i
]) {
3133 hist_elt_data_free(elt_data
);
3138 elt
->private_data
= elt_data
;
3143 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
3145 struct hist_elt_data
*elt_data
= elt
->private_data
;
3148 save_comm(elt_data
->comm
, current
);
3151 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
3152 .elt_alloc
= hist_trigger_elt_data_alloc
,
3153 .elt_free
= hist_trigger_elt_data_free
,
3154 .elt_init
= hist_trigger_elt_data_init
,
3157 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
3159 const char *flags_str
= NULL
;
3161 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
3163 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
3165 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
3166 flags_str
= "sym-offset";
3167 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
3168 flags_str
= "execname";
3169 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
3170 flags_str
= "syscall";
3171 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
3173 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
3174 flags_str
= "usecs";
3179 static void expr_field_str(struct hist_field
*field
, char *expr
)
3181 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
3184 strcat(expr
, hist_field_name(field
, 0));
3186 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
3187 const char *flags_str
= get_hist_field_flags(field
);
3191 strcat(expr
, flags_str
);
3196 static char *expr_str(struct hist_field
*field
, unsigned int level
)
3203 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3207 if (!field
->operands
[0]) {
3208 expr_field_str(field
, expr
);
3212 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
3216 subexpr
= expr_str(field
->operands
[0], ++level
);
3221 strcat(expr
, subexpr
);
3229 expr_field_str(field
->operands
[0], expr
);
3231 switch (field
->operator) {
3232 case FIELD_OP_MINUS
:
3243 expr_field_str(field
->operands
[1], expr
);
3248 static int contains_operator(char *str
)
3250 enum field_op_id field_op
= FIELD_OP_NONE
;
3253 op
= strpbrk(str
, "+-");
3255 return FIELD_OP_NONE
;
3260 field_op
= FIELD_OP_UNARY_MINUS
;
3262 field_op
= FIELD_OP_MINUS
;
3265 field_op
= FIELD_OP_PLUS
;
3274 static void get_hist_field(struct hist_field
*hist_field
)
3279 static void __destroy_hist_field(struct hist_field
*hist_field
)
3281 if (--hist_field
->ref
> 1)
3284 kfree(hist_field
->var
.name
);
3285 kfree(hist_field
->name
);
3286 kfree(hist_field
->type
);
3291 static void destroy_hist_field(struct hist_field
*hist_field
,
3302 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
3303 return; /* var refs will be destroyed separately */
3305 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
3306 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
3308 __destroy_hist_field(hist_field
);
3311 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
3312 struct ftrace_event_field
*field
,
3313 unsigned long flags
,
3316 struct hist_field
*hist_field
;
3318 if (field
&& is_function_field(field
))
3321 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3325 hist_field
->ref
= 1;
3327 hist_field
->hist_data
= hist_data
;
3329 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
3330 goto out
; /* caller will populate */
3332 if (flags
& HIST_FIELD_FL_VAR_REF
) {
3333 hist_field
->fn
= hist_field_var_ref
;
3337 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
3338 hist_field
->fn
= hist_field_counter
;
3339 hist_field
->size
= sizeof(u64
);
3340 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
3341 if (!hist_field
->type
)
3346 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
3347 hist_field
->fn
= hist_field_none
;
3351 if (flags
& HIST_FIELD_FL_LOG2
) {
3352 unsigned long fl
= flags
& ~HIST_FIELD_FL_LOG2
;
3353 hist_field
->fn
= hist_field_log2
;
3354 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
3355 hist_field
->size
= hist_field
->operands
[0]->size
;
3356 hist_field
->type
= kstrdup(hist_field
->operands
[0]->type
, GFP_KERNEL
);
3357 if (!hist_field
->type
)
3362 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
3363 hist_field
->fn
= hist_field_timestamp
;
3364 hist_field
->size
= sizeof(u64
);
3365 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
3366 if (!hist_field
->type
)
3371 if (flags
& HIST_FIELD_FL_CPU
) {
3372 hist_field
->fn
= hist_field_cpu
;
3373 hist_field
->size
= sizeof(int);
3374 hist_field
->type
= kstrdup("unsigned int", GFP_KERNEL
);
3375 if (!hist_field
->type
)
3380 if (WARN_ON_ONCE(!field
))
3383 if (is_string_field(field
)) {
3384 flags
|= HIST_FIELD_FL_STRING
;
3386 hist_field
->size
= MAX_FILTER_STR_VAL
;
3387 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
3388 if (!hist_field
->type
)
3391 if (field
->filter_type
== FILTER_STATIC_STRING
)
3392 hist_field
->fn
= hist_field_string
;
3393 else if (field
->filter_type
== FILTER_DYN_STRING
)
3394 hist_field
->fn
= hist_field_dynstring
;
3396 hist_field
->fn
= hist_field_pstring
;
3398 hist_field
->size
= field
->size
;
3399 hist_field
->is_signed
= field
->is_signed
;
3400 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
3401 if (!hist_field
->type
)
3404 hist_field
->fn
= select_value_fn(field
->size
,
3406 if (!hist_field
->fn
) {
3407 destroy_hist_field(hist_field
, 0);
3412 hist_field
->field
= field
;
3413 hist_field
->flags
= flags
;
3416 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
3417 if (!hist_field
->var
.name
)
3423 destroy_hist_field(hist_field
, 0);
3427 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
3431 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
3432 if (hist_data
->fields
[i
]) {
3433 destroy_hist_field(hist_data
->fields
[i
], 0);
3434 hist_data
->fields
[i
] = NULL
;
3438 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3439 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
3440 __destroy_hist_field(hist_data
->var_refs
[i
]);
3441 hist_data
->var_refs
[i
] = NULL
;
3445 static int init_var_ref(struct hist_field
*ref_field
,
3446 struct hist_field
*var_field
,
3447 char *system
, char *event_name
)
3451 ref_field
->var
.idx
= var_field
->var
.idx
;
3452 ref_field
->var
.hist_data
= var_field
->hist_data
;
3453 ref_field
->size
= var_field
->size
;
3454 ref_field
->is_signed
= var_field
->is_signed
;
3455 ref_field
->flags
|= var_field
->flags
&
3456 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3459 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
3460 if (!ref_field
->system
)
3465 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
3466 if (!ref_field
->event_name
) {
3472 if (var_field
->var
.name
) {
3473 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
3474 if (!ref_field
->name
) {
3478 } else if (var_field
->name
) {
3479 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
3480 if (!ref_field
->name
) {
3486 ref_field
->type
= kstrdup(var_field
->type
, GFP_KERNEL
);
3487 if (!ref_field
->type
) {
3494 kfree(ref_field
->system
);
3495 kfree(ref_field
->event_name
);
3496 kfree(ref_field
->name
);
3501 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
3502 struct hist_field
*var_field
)
3504 struct hist_field
*ref_field
;
3507 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3508 ref_field
= hist_data
->var_refs
[i
];
3509 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
3510 ref_field
->var
.hist_data
== var_field
->hist_data
)
3518 * create_var_ref - Create a variable reference and attach it to trigger
3519 * @hist_data: The trigger that will be referencing the variable
3520 * @var_field: The VAR field to create a reference to
3521 * @system: The optional system string
3522 * @event_name: The optional event_name string
3524 * Given a variable hist_field, create a VAR_REF hist_field that
3525 * represents a reference to it.
3527 * This function also adds the reference to the trigger that
3528 * now references the variable.
3530 * Return: The VAR_REF field if successful, NULL if not
3532 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
3533 struct hist_field
*var_field
,
3534 char *system
, char *event_name
)
3536 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
3537 struct hist_field
*ref_field
;
3540 /* Check if the variable already exists */
3541 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3542 ref_field
= hist_data
->var_refs
[i
];
3543 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
3544 ref_field
->var
.hist_data
== var_field
->hist_data
) {
3545 get_hist_field(ref_field
);
3550 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
3552 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
3553 destroy_hist_field(ref_field
, 0);
3557 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
3558 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
3564 static bool is_var_ref(char *var_name
)
3566 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
3572 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
3578 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
3579 name
= hist_data
->attrs
->var_defs
.name
[i
];
3581 if (strcmp(var_name
, name
) == 0) {
3582 field
= hist_data
->attrs
->var_defs
.expr
[i
];
3583 if (contains_operator(field
) || is_var_ref(field
))
3592 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
3593 char *system
, char *event_name
,
3596 struct trace_event_call
*call
;
3598 if (system
&& event_name
) {
3599 call
= hist_data
->event_file
->event_call
;
3601 if (strcmp(system
, call
->class->system
) != 0)
3604 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3608 if (!!system
!= !!event_name
)
3611 if (!is_var_ref(var_name
))
3616 return field_name_from_var(hist_data
, var_name
);
3619 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
3620 char *system
, char *event_name
,
3623 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
3624 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3626 if (!is_var_ref(var_name
))
3631 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
3633 ref_field
= create_var_ref(hist_data
, var_field
,
3634 system
, event_name
);
3637 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
3642 static struct ftrace_event_field
*
3643 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
3644 char *field_str
, unsigned long *flags
)
3646 struct ftrace_event_field
*field
= NULL
;
3647 char *field_name
, *modifier
, *str
;
3648 struct trace_array
*tr
= file
->tr
;
3650 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
3652 return ERR_PTR(-ENOMEM
);
3654 field_name
= strsep(&modifier
, ".");
3656 if (strcmp(modifier
, "hex") == 0)
3657 *flags
|= HIST_FIELD_FL_HEX
;
3658 else if (strcmp(modifier
, "sym") == 0)
3659 *flags
|= HIST_FIELD_FL_SYM
;
3660 else if (strcmp(modifier
, "sym-offset") == 0)
3661 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
3662 else if ((strcmp(modifier
, "execname") == 0) &&
3663 (strcmp(field_name
, "common_pid") == 0))
3664 *flags
|= HIST_FIELD_FL_EXECNAME
;
3665 else if (strcmp(modifier
, "syscall") == 0)
3666 *flags
|= HIST_FIELD_FL_SYSCALL
;
3667 else if (strcmp(modifier
, "log2") == 0)
3668 *flags
|= HIST_FIELD_FL_LOG2
;
3669 else if (strcmp(modifier
, "usecs") == 0)
3670 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
3672 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
3673 field
= ERR_PTR(-EINVAL
);
3678 if (strcmp(field_name
, "common_timestamp") == 0) {
3679 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
3680 hist_data
->enable_timestamps
= true;
3681 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
3682 hist_data
->attrs
->ts_in_usecs
= true;
3683 } else if (strcmp(field_name
, "cpu") == 0)
3684 *flags
|= HIST_FIELD_FL_CPU
;
3686 field
= trace_find_event_field(file
->event_call
, field_name
);
3687 if (!field
|| !field
->size
) {
3688 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
, errpos(field_name
));
3689 field
= ERR_PTR(-EINVAL
);
3699 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
3700 struct hist_field
*var_ref
,
3703 struct hist_field
*alias
= NULL
;
3704 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
3706 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3710 alias
->fn
= var_ref
->fn
;
3711 alias
->operands
[0] = var_ref
;
3713 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
3714 destroy_hist_field(alias
, 0);
3718 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
3723 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
3724 struct trace_event_file
*file
, char *str
,
3725 unsigned long *flags
, char *var_name
)
3727 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
3728 struct ftrace_event_field
*field
= NULL
;
3729 struct hist_field
*hist_field
= NULL
;
3732 s
= strchr(str
, '.');
3734 s
= strchr(++s
, '.');
3736 ref_system
= strsep(&str
, ".");
3741 ref_event
= strsep(&str
, ".");
3750 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
3752 hist_field
= parse_var_ref(hist_data
, ref_system
,
3753 ref_event
, ref_var
);
3756 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
3767 field
= parse_field(hist_data
, file
, str
, flags
);
3768 if (IS_ERR(field
)) {
3769 ret
= PTR_ERR(field
);
3773 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
3781 return ERR_PTR(ret
);
3784 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
3785 struct trace_event_file
*file
,
3786 char *str
, unsigned long flags
,
3787 char *var_name
, unsigned int level
);
3789 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
3790 struct trace_event_file
*file
,
3791 char *str
, unsigned long flags
,
3792 char *var_name
, unsigned int level
)
3794 struct hist_field
*operand1
, *expr
= NULL
;
3795 unsigned long operand_flags
;
3799 /* we support only -(xxx) i.e. explicit parens required */
3802 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
3807 str
++; /* skip leading '-' */
3809 s
= strchr(str
, '(');
3817 s
= strrchr(str
, ')');
3821 ret
= -EINVAL
; /* no closing ')' */
3825 flags
|= HIST_FIELD_FL_EXPR
;
3826 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3833 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
3834 if (IS_ERR(operand1
)) {
3835 ret
= PTR_ERR(operand1
);
3839 expr
->flags
|= operand1
->flags
&
3840 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3841 expr
->fn
= hist_field_unary_minus
;
3842 expr
->operands
[0] = operand1
;
3843 expr
->operator = FIELD_OP_UNARY_MINUS
;
3844 expr
->name
= expr_str(expr
, 0);
3845 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
3853 destroy_hist_field(expr
, 0);
3854 return ERR_PTR(ret
);
3857 static int check_expr_operands(struct trace_array
*tr
,
3858 struct hist_field
*operand1
,
3859 struct hist_field
*operand2
)
3861 unsigned long operand1_flags
= operand1
->flags
;
3862 unsigned long operand2_flags
= operand2
->flags
;
3864 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
3865 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
3866 struct hist_field
*var
;
3868 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
3871 operand1_flags
= var
->flags
;
3874 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
3875 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
3876 struct hist_field
*var
;
3878 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
3881 operand2_flags
= var
->flags
;
3884 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
3885 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
3886 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
3893 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
3894 struct trace_event_file
*file
,
3895 char *str
, unsigned long flags
,
3896 char *var_name
, unsigned int level
)
3898 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
3899 unsigned long operand_flags
;
3900 int field_op
, ret
= -EINVAL
;
3901 char *sep
, *operand1_str
;
3904 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
3905 return ERR_PTR(-EINVAL
);
3908 field_op
= contains_operator(str
);
3910 if (field_op
== FIELD_OP_NONE
)
3911 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
3913 if (field_op
== FIELD_OP_UNARY_MINUS
)
3914 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
3917 case FIELD_OP_MINUS
:
3927 operand1_str
= strsep(&str
, sep
);
3928 if (!operand1_str
|| !str
)
3932 operand1
= parse_atom(hist_data
, file
, operand1_str
,
3933 &operand_flags
, NULL
);
3934 if (IS_ERR(operand1
)) {
3935 ret
= PTR_ERR(operand1
);
3940 /* rest of string could be another expression e.g. b+c in a+b+c */
3942 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
3943 if (IS_ERR(operand2
)) {
3944 ret
= PTR_ERR(operand2
);
3949 ret
= check_expr_operands(file
->tr
, operand1
, operand2
);
3953 flags
|= HIST_FIELD_FL_EXPR
;
3955 flags
|= operand1
->flags
&
3956 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3958 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3964 operand1
->read_once
= true;
3965 operand2
->read_once
= true;
3967 expr
->operands
[0] = operand1
;
3968 expr
->operands
[1] = operand2
;
3969 expr
->operator = field_op
;
3970 expr
->name
= expr_str(expr
, 0);
3971 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
3978 case FIELD_OP_MINUS
:
3979 expr
->fn
= hist_field_minus
;
3982 expr
->fn
= hist_field_plus
;
3991 destroy_hist_field(operand1
, 0);
3992 destroy_hist_field(operand2
, 0);
3993 destroy_hist_field(expr
, 0);
3995 return ERR_PTR(ret
);
3998 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
3999 struct trace_event_file
*file
)
4001 struct event_trigger_data
*test
;
4003 lockdep_assert_held(&event_mutex
);
4005 list_for_each_entry(test
, &file
->triggers
, list
) {
4006 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
4007 if (test
->private_data
== hist_data
)
4008 return test
->filter_str
;
4015 static struct event_command trigger_hist_cmd
;
4016 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
4017 struct trace_event_file
*file
,
4018 char *glob
, char *cmd
, char *param
);
4020 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
4021 struct hist_trigger_data
*hist_data
,
4022 unsigned int n_keys
)
4024 struct hist_field
*target_hist_field
, *hist_field
;
4025 unsigned int n
, i
, j
;
4027 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
4030 i
= hist_data
->n_vals
;
4031 j
= target_hist_data
->n_vals
;
4033 for (n
= 0; n
< n_keys
; n
++) {
4034 hist_field
= hist_data
->fields
[i
+ n
];
4035 target_hist_field
= target_hist_data
->fields
[j
+ n
];
4037 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
4039 if (hist_field
->size
!= target_hist_field
->size
)
4041 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
4048 static struct hist_trigger_data
*
4049 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
4050 struct trace_event_file
*file
)
4052 struct hist_trigger_data
*hist_data
;
4053 struct event_trigger_data
*test
;
4054 unsigned int n_keys
;
4056 lockdep_assert_held(&event_mutex
);
4058 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
4060 list_for_each_entry(test
, &file
->triggers
, list
) {
4061 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
4062 hist_data
= test
->private_data
;
4064 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
4072 static struct trace_event_file
*event_file(struct trace_array
*tr
,
4073 char *system
, char *event_name
)
4075 struct trace_event_file
*file
;
4077 file
= __find_event_file(tr
, system
, event_name
);
4079 return ERR_PTR(-EINVAL
);
4084 static struct hist_field
*
4085 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
4086 char *system
, char *event_name
, char *field_name
)
4088 struct hist_field
*event_var
;
4089 char *synthetic_name
;
4091 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
4092 if (!synthetic_name
)
4093 return ERR_PTR(-ENOMEM
);
4095 strcpy(synthetic_name
, "synthetic_");
4096 strcat(synthetic_name
, field_name
);
4098 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
4100 kfree(synthetic_name
);
4106 * create_field_var_hist - Automatically create a histogram and var for a field
4107 * @target_hist_data: The target hist trigger
4108 * @subsys_name: Optional subsystem name
4109 * @event_name: Optional event name
4110 * @field_name: The name of the field (and the resulting variable)
4112 * Hist trigger actions fetch data from variables, not directly from
4113 * events. However, for convenience, users are allowed to directly
4114 * specify an event field in an action, which will be automatically
4115 * converted into a variable on their behalf.
4117 * If a user specifies a field on an event that isn't the event the
4118 * histogram currently being defined (the target event histogram), the
4119 * only way that can be accomplished is if a new hist trigger is
4120 * created and the field variable defined on that.
4122 * This function creates a new histogram compatible with the target
4123 * event (meaning a histogram with the same key as the target
4124 * histogram), and creates a variable for the specified field, but
4125 * with 'synthetic_' prepended to the variable name in order to avoid
4126 * collision with normal field variables.
4128 * Return: The variable created for the field.
4130 static struct hist_field
*
4131 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
4132 char *subsys_name
, char *event_name
, char *field_name
)
4134 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
4135 struct hist_field
*event_var
= ERR_PTR(-EINVAL
);
4136 struct hist_trigger_data
*hist_data
;
4137 unsigned int i
, n
, first
= true;
4138 struct field_var_hist
*var_hist
;
4139 struct trace_event_file
*file
;
4140 struct hist_field
*key_field
;
4145 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
4146 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
4147 return ERR_PTR(-EINVAL
);
4150 file
= event_file(tr
, subsys_name
, event_name
);
4153 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
4154 ret
= PTR_ERR(file
);
4155 return ERR_PTR(ret
);
4159 * Look for a histogram compatible with target. We'll use the
4160 * found histogram specification to create a new matching
4161 * histogram with our variable on it. target_hist_data is not
4162 * yet a registered histogram so we can't use that.
4164 hist_data
= find_compatible_hist(target_hist_data
, file
);
4166 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
4167 return ERR_PTR(-EINVAL
);
4170 /* See if a synthetic field variable has already been created */
4171 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
4172 event_name
, field_name
);
4173 if (!IS_ERR_OR_NULL(event_var
))
4176 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
4178 return ERR_PTR(-ENOMEM
);
4180 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
4183 return ERR_PTR(-ENOMEM
);
4186 /* Use the same keys as the compatible histogram */
4187 strcat(cmd
, "keys=");
4189 for_each_hist_key_field(i
, hist_data
) {
4190 key_field
= hist_data
->fields
[i
];
4193 strcat(cmd
, key_field
->field
->name
);
4197 /* Create the synthetic field variable specification */
4198 strcat(cmd
, ":synthetic_");
4199 strcat(cmd
, field_name
);
4201 strcat(cmd
, field_name
);
4203 /* Use the same filter as the compatible histogram */
4204 saved_filter
= find_trigger_filter(hist_data
, file
);
4206 strcat(cmd
, " if ");
4207 strcat(cmd
, saved_filter
);
4210 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
4211 if (!var_hist
->cmd
) {
4214 return ERR_PTR(-ENOMEM
);
4217 /* Save the compatible histogram information */
4218 var_hist
->hist_data
= hist_data
;
4220 /* Create the new histogram with our variable */
4221 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
4225 kfree(var_hist
->cmd
);
4227 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
4228 return ERR_PTR(ret
);
4233 /* If we can't find the variable, something went wrong */
4234 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
4235 event_name
, field_name
);
4236 if (IS_ERR_OR_NULL(event_var
)) {
4237 kfree(var_hist
->cmd
);
4239 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
4240 return ERR_PTR(-EINVAL
);
4243 n
= target_hist_data
->n_field_var_hists
;
4244 target_hist_data
->field_var_hists
[n
] = var_hist
;
4245 target_hist_data
->n_field_var_hists
++;
4250 static struct hist_field
*
4251 find_target_event_var(struct hist_trigger_data
*hist_data
,
4252 char *subsys_name
, char *event_name
, char *var_name
)
4254 struct trace_event_file
*file
= hist_data
->event_file
;
4255 struct hist_field
*hist_field
= NULL
;
4258 struct trace_event_call
*call
;
4263 call
= file
->event_call
;
4265 if (strcmp(subsys_name
, call
->class->system
) != 0)
4268 if (strcmp(event_name
, trace_event_name(call
)) != 0)
4272 hist_field
= find_var_field(hist_data
, var_name
);
4277 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
4278 struct ring_buffer_event
*rbe
,
4280 struct field_var
**field_vars
,
4281 unsigned int n_field_vars
,
4282 unsigned int field_var_str_start
)
4284 struct hist_elt_data
*elt_data
= elt
->private_data
;
4285 unsigned int i
, j
, var_idx
;
4288 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
4289 struct field_var
*field_var
= field_vars
[i
];
4290 struct hist_field
*var
= field_var
->var
;
4291 struct hist_field
*val
= field_var
->val
;
4293 var_val
= val
->fn(val
, elt
, rbe
, rec
);
4294 var_idx
= var
->var
.idx
;
4296 if (val
->flags
& HIST_FIELD_FL_STRING
) {
4297 char *str
= elt_data
->field_var_str
[j
++];
4298 char *val_str
= (char *)(uintptr_t)var_val
;
4300 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
4301 var_val
= (u64
)(uintptr_t)str
;
4303 tracing_map_set_var(elt
, var_idx
, var_val
);
4307 static void update_field_vars(struct hist_trigger_data
*hist_data
,
4308 struct tracing_map_elt
*elt
,
4309 struct ring_buffer_event
*rbe
,
4312 __update_field_vars(elt
, rbe
, rec
, hist_data
->field_vars
,
4313 hist_data
->n_field_vars
, 0);
4316 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
4317 struct tracing_map_elt
*elt
, void *rec
,
4318 struct ring_buffer_event
*rbe
, void *key
,
4319 struct action_data
*data
, u64
*var_ref_vals
)
4321 __update_field_vars(elt
, rbe
, rec
, hist_data
->save_vars
,
4322 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
4325 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
4326 struct trace_event_file
*file
,
4327 char *name
, int size
, const char *type
)
4329 struct hist_field
*var
;
4332 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
4333 var
= ERR_PTR(-EINVAL
);
4337 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
4339 var
= ERR_PTR(-ENOMEM
);
4343 idx
= tracing_map_add_var(hist_data
->map
);
4346 var
= ERR_PTR(-EINVAL
);
4350 var
->flags
= HIST_FIELD_FL_VAR
;
4352 var
->var
.hist_data
= var
->hist_data
= hist_data
;
4354 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
4355 var
->type
= kstrdup(type
, GFP_KERNEL
);
4356 if (!var
->var
.name
|| !var
->type
) {
4357 kfree(var
->var
.name
);
4360 var
= ERR_PTR(-ENOMEM
);
4366 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
4367 struct trace_event_file
*file
,
4370 struct hist_field
*val
= NULL
, *var
= NULL
;
4371 unsigned long flags
= HIST_FIELD_FL_VAR
;
4372 struct trace_array
*tr
= file
->tr
;
4373 struct field_var
*field_var
;
4376 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
4377 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
4382 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
4384 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
4389 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
4391 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
4397 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
4405 field_var
->var
= var
;
4406 field_var
->val
= val
;
4410 field_var
= ERR_PTR(ret
);
4415 * create_target_field_var - Automatically create a variable for a field
4416 * @target_hist_data: The target hist trigger
4417 * @subsys_name: Optional subsystem name
4418 * @event_name: Optional event name
4419 * @var_name: The name of the field (and the resulting variable)
4421 * Hist trigger actions fetch data from variables, not directly from
4422 * events. However, for convenience, users are allowed to directly
4423 * specify an event field in an action, which will be automatically
4424 * converted into a variable on their behalf.
4426 * This function creates a field variable with the name var_name on
4427 * the hist trigger currently being defined on the target event. If
4428 * subsys_name and event_name are specified, this function simply
4429 * verifies that they do in fact match the target event subsystem and
4432 * Return: The variable created for the field.
4434 static struct field_var
*
4435 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
4436 char *subsys_name
, char *event_name
, char *var_name
)
4438 struct trace_event_file
*file
= target_hist_data
->event_file
;
4441 struct trace_event_call
*call
;
4446 call
= file
->event_call
;
4448 if (strcmp(subsys_name
, call
->class->system
) != 0)
4451 if (strcmp(event_name
, trace_event_name(call
)) != 0)
4455 return create_field_var(target_hist_data
, file
, var_name
);
4458 static bool check_track_val_max(u64 track_val
, u64 var_val
)
4460 if (var_val
<= track_val
)
4466 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
4468 if (var_val
== track_val
)
4474 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
4475 struct tracing_map_elt
*elt
,
4476 struct action_data
*data
)
4478 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
4481 track_val
= tracing_map_read_var(elt
, track_var_idx
);
4486 static void save_track_val(struct hist_trigger_data
*hist_data
,
4487 struct tracing_map_elt
*elt
,
4488 struct action_data
*data
, u64 var_val
)
4490 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
4492 tracing_map_set_var(elt
, track_var_idx
, var_val
);
4495 static void save_track_data(struct hist_trigger_data
*hist_data
,
4496 struct tracing_map_elt
*elt
, void *rec
,
4497 struct ring_buffer_event
*rbe
, void *key
,
4498 struct action_data
*data
, u64
*var_ref_vals
)
4500 if (data
->track_data
.save_data
)
4501 data
->track_data
.save_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
4504 static bool check_track_val(struct tracing_map_elt
*elt
,
4505 struct action_data
*data
,
4508 struct hist_trigger_data
*hist_data
;
4511 hist_data
= data
->track_data
.track_var
->hist_data
;
4512 track_val
= get_track_val(hist_data
, elt
, data
);
4514 return data
->track_data
.check_val(track_val
, var_val
);
4517 #ifdef CONFIG_TRACER_SNAPSHOT
4518 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
4520 /* called with tr->max_lock held */
4521 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
4522 struct hist_elt_data
*elt_data
, *track_elt_data
;
4523 struct snapshot_context
*context
= cond_data
;
4524 struct action_data
*action
;
4530 action
= track_data
->action_data
;
4532 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
4533 track_data
->action_data
);
4535 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
4538 track_data
->track_val
= track_val
;
4539 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
4541 elt_data
= context
->elt
->private_data
;
4542 track_elt_data
= track_data
->elt
.private_data
;
4544 strncpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
4546 track_data
->updated
= true;
4551 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
4552 struct tracing_map_elt
*elt
, void *rec
,
4553 struct ring_buffer_event
*rbe
, void *key
,
4554 struct action_data
*data
,
4557 struct trace_event_file
*file
= hist_data
->event_file
;
4558 struct snapshot_context context
;
4563 tracing_snapshot_cond(file
->tr
, &context
);
4566 static void hist_trigger_print_key(struct seq_file
*m
,
4567 struct hist_trigger_data
*hist_data
,
4569 struct tracing_map_elt
*elt
);
4571 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
4575 if (!hist_data
->n_actions
)
4578 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4579 struct action_data
*data
= hist_data
->actions
[i
];
4581 if (data
->action
== ACTION_SNAPSHOT
)
4588 static void track_data_snapshot_print(struct seq_file
*m
,
4589 struct hist_trigger_data
*hist_data
)
4591 struct trace_event_file
*file
= hist_data
->event_file
;
4592 struct track_data
*track_data
;
4593 struct action_data
*action
;
4595 track_data
= tracing_cond_snapshot_data(file
->tr
);
4599 if (!track_data
->updated
)
4602 action
= snapshot_action(hist_data
);
4606 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
4607 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
4608 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
4609 action
->track_data
.var_str
, track_data
->track_val
);
4611 seq_puts(m
, "\ttriggered by event with key: ");
4612 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
4616 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
4620 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
4621 struct tracing_map_elt
*elt
, void *rec
,
4622 struct ring_buffer_event
*rbe
, void *key
,
4623 struct action_data
*data
,
4624 u64
*var_ref_vals
) {}
4625 static void track_data_snapshot_print(struct seq_file
*m
,
4626 struct hist_trigger_data
*hist_data
) {}
4627 #endif /* CONFIG_TRACER_SNAPSHOT */
4629 static void track_data_print(struct seq_file
*m
,
4630 struct hist_trigger_data
*hist_data
,
4631 struct tracing_map_elt
*elt
,
4632 struct action_data
*data
)
4634 u64 track_val
= get_track_val(hist_data
, elt
, data
);
4635 unsigned int i
, save_var_idx
;
4637 if (data
->handler
== HANDLER_ONMAX
)
4638 seq_printf(m
, "\n\tmax: %10llu", track_val
);
4639 else if (data
->handler
== HANDLER_ONCHANGE
)
4640 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
4642 if (data
->action
== ACTION_SNAPSHOT
)
4645 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4646 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
4647 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
4650 save_var_idx
= save_var
->var
.idx
;
4652 val
= tracing_map_read_var(elt
, save_var_idx
);
4654 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
4655 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
4656 (char *)(uintptr_t)(val
));
4658 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
4662 static void ontrack_action(struct hist_trigger_data
*hist_data
,
4663 struct tracing_map_elt
*elt
, void *rec
,
4664 struct ring_buffer_event
*rbe
, void *key
,
4665 struct action_data
*data
, u64
*var_ref_vals
)
4667 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
4669 if (check_track_val(elt
, data
, var_val
)) {
4670 save_track_val(hist_data
, elt
, data
, var_val
);
4671 save_track_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
4675 static void action_data_destroy(struct action_data
*data
)
4679 lockdep_assert_held(&event_mutex
);
4681 kfree(data
->action_name
);
4683 for (i
= 0; i
< data
->n_params
; i
++)
4684 kfree(data
->params
[i
]);
4686 if (data
->synth_event
)
4687 data
->synth_event
->ref
--;
4689 kfree(data
->synth_event_name
);
4694 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
4695 struct action_data
*data
)
4697 struct trace_event_file
*file
= hist_data
->event_file
;
4699 destroy_hist_field(data
->track_data
.track_var
, 0);
4701 if (data
->action
== ACTION_SNAPSHOT
) {
4702 struct track_data
*track_data
;
4704 track_data
= tracing_cond_snapshot_data(file
->tr
);
4705 if (track_data
&& track_data
->hist_data
== hist_data
) {
4706 tracing_snapshot_cond_disable(file
->tr
);
4707 track_data_free(track_data
);
4711 kfree(data
->track_data
.var_str
);
4713 action_data_destroy(data
);
4716 static int action_create(struct hist_trigger_data
*hist_data
,
4717 struct action_data
*data
);
4719 static int track_data_create(struct hist_trigger_data
*hist_data
,
4720 struct action_data
*data
)
4722 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
4723 struct trace_event_file
*file
= hist_data
->event_file
;
4724 struct trace_array
*tr
= file
->tr
;
4725 char *track_data_var_str
;
4728 track_data_var_str
= data
->track_data
.var_str
;
4729 if (track_data_var_str
[0] != '$') {
4730 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
4733 track_data_var_str
++;
4735 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
4737 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
4741 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
4745 data
->track_data
.var_ref
= ref_field
;
4747 if (data
->handler
== HANDLER_ONMAX
)
4748 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
4749 if (IS_ERR(track_var
)) {
4750 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
4751 ret
= PTR_ERR(track_var
);
4755 if (data
->handler
== HANDLER_ONCHANGE
)
4756 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
4757 if (IS_ERR(track_var
)) {
4758 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
4759 ret
= PTR_ERR(track_var
);
4762 data
->track_data
.track_var
= track_var
;
4764 ret
= action_create(hist_data
, data
);
4769 static int parse_action_params(struct trace_array
*tr
, char *params
,
4770 struct action_data
*data
)
4772 char *param
, *saved_param
;
4773 bool first_param
= true;
4777 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
4778 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
4782 param
= strsep(¶ms
, ",");
4784 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
4789 param
= strstrip(param
);
4790 if (strlen(param
) < 2) {
4791 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
4796 saved_param
= kstrdup(param
, GFP_KERNEL
);
4802 if (first_param
&& data
->use_trace_keyword
) {
4803 data
->synth_event_name
= saved_param
;
4804 first_param
= false;
4807 first_param
= false;
4809 data
->params
[data
->n_params
++] = saved_param
;
4815 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
4816 enum handler_id handler
)
4823 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
4828 action_name
= strsep(&str
, "(");
4829 if (!action_name
|| !str
) {
4830 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
4835 if (str_has_prefix(action_name
, "save")) {
4836 char *params
= strsep(&str
, ")");
4839 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
4844 ret
= parse_action_params(tr
, params
, data
);
4848 if (handler
== HANDLER_ONMAX
)
4849 data
->track_data
.check_val
= check_track_val_max
;
4850 else if (handler
== HANDLER_ONCHANGE
)
4851 data
->track_data
.check_val
= check_track_val_changed
;
4853 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
4858 data
->track_data
.save_data
= save_track_data_vars
;
4859 data
->fn
= ontrack_action
;
4860 data
->action
= ACTION_SAVE
;
4861 } else if (str_has_prefix(action_name
, "snapshot")) {
4862 char *params
= strsep(&str
, ")");
4865 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
4870 if (handler
== HANDLER_ONMAX
)
4871 data
->track_data
.check_val
= check_track_val_max
;
4872 else if (handler
== HANDLER_ONCHANGE
)
4873 data
->track_data
.check_val
= check_track_val_changed
;
4875 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
4880 data
->track_data
.save_data
= save_track_data_snapshot
;
4881 data
->fn
= ontrack_action
;
4882 data
->action
= ACTION_SNAPSHOT
;
4884 char *params
= strsep(&str
, ")");
4886 if (str_has_prefix(action_name
, "trace"))
4887 data
->use_trace_keyword
= true;
4890 ret
= parse_action_params(tr
, params
, data
);
4895 if (handler
== HANDLER_ONMAX
)
4896 data
->track_data
.check_val
= check_track_val_max
;
4897 else if (handler
== HANDLER_ONCHANGE
)
4898 data
->track_data
.check_val
= check_track_val_changed
;
4900 if (handler
!= HANDLER_ONMATCH
) {
4901 data
->track_data
.save_data
= action_trace
;
4902 data
->fn
= ontrack_action
;
4904 data
->fn
= action_trace
;
4906 data
->action
= ACTION_TRACE
;
4909 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
4910 if (!data
->action_name
) {
4915 data
->handler
= handler
;
4920 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
4921 char *str
, enum handler_id handler
)
4923 struct action_data
*data
;
4927 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
4929 return ERR_PTR(-ENOMEM
);
4931 var_str
= strsep(&str
, ")");
4932 if (!var_str
|| !str
) {
4937 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
4938 if (!data
->track_data
.var_str
) {
4943 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
4949 track_data_destroy(hist_data
, data
);
4950 data
= ERR_PTR(ret
);
4954 static void onmatch_destroy(struct action_data
*data
)
4956 kfree(data
->match_data
.event
);
4957 kfree(data
->match_data
.event_system
);
4959 action_data_destroy(data
);
4962 static void destroy_field_var(struct field_var
*field_var
)
4967 destroy_hist_field(field_var
->var
, 0);
4968 destroy_hist_field(field_var
->val
, 0);
4973 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
4977 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
4978 destroy_field_var(hist_data
->field_vars
[i
]);
4981 static void save_field_var(struct hist_trigger_data
*hist_data
,
4982 struct field_var
*field_var
)
4984 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
4986 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
4987 hist_data
->n_field_var_str
++;
4991 static int check_synth_field(struct synth_event
*event
,
4992 struct hist_field
*hist_field
,
4993 unsigned int field_pos
)
4995 struct synth_field
*field
;
4997 if (field_pos
>= event
->n_fields
)
5000 field
= event
->fields
[field_pos
];
5002 if (strcmp(field
->type
, hist_field
->type
) != 0) {
5003 if (field
->size
!= hist_field
->size
||
5004 field
->is_signed
!= hist_field
->is_signed
)
5011 static struct hist_field
*
5012 trace_action_find_var(struct hist_trigger_data
*hist_data
,
5013 struct action_data
*data
,
5014 char *system
, char *event
, char *var
)
5016 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5017 struct hist_field
*hist_field
;
5019 var
++; /* skip '$' */
5021 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
5023 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
5024 system
= data
->match_data
.event_system
;
5025 event
= data
->match_data
.event
;
5028 hist_field
= find_event_var(hist_data
, system
, event
, var
);
5032 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
5037 static struct hist_field
*
5038 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
5039 struct action_data
*data
, char *system
,
5040 char *event
, char *var
)
5042 struct hist_field
*hist_field
= NULL
;
5043 struct field_var
*field_var
;
5046 * First try to create a field var on the target event (the
5047 * currently being defined). This will create a variable for
5048 * unqualified fields on the target event, or if qualified,
5049 * target fields that have qualified names matching the target.
5051 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
5053 if (field_var
&& !IS_ERR(field_var
)) {
5054 save_field_var(hist_data
, field_var
);
5055 hist_field
= field_var
->var
;
5059 * If no explicit system.event is specfied, default to
5060 * looking for fields on the onmatch(system.event.xxx)
5063 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
5064 system
= data
->match_data
.event_system
;
5065 event
= data
->match_data
.event
;
5069 * At this point, we're looking at a field on another
5070 * event. Because we can't modify a hist trigger on
5071 * another event to add a variable for a field, we need
5072 * to create a new trigger on that event and create the
5073 * variable at the same time.
5075 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
5076 if (IS_ERR(hist_field
))
5082 destroy_field_var(field_var
);
5087 static int trace_action_create(struct hist_trigger_data
*hist_data
,
5088 struct action_data
*data
)
5090 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5091 char *event_name
, *param
, *system
= NULL
;
5092 struct hist_field
*hist_field
, *var_ref
;
5094 unsigned int field_pos
= 0;
5095 struct synth_event
*event
;
5096 char *synth_event_name
;
5097 int var_ref_idx
, ret
= 0;
5099 lockdep_assert_held(&event_mutex
);
5101 if (data
->use_trace_keyword
)
5102 synth_event_name
= data
->synth_event_name
;
5104 synth_event_name
= data
->action_name
;
5106 event
= find_synth_event(synth_event_name
);
5108 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
5114 for (i
= 0; i
< data
->n_params
; i
++) {
5117 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
5123 system
= strsep(¶m
, ".");
5125 param
= (char *)system
;
5126 system
= event_name
= NULL
;
5128 event_name
= strsep(¶m
, ".");
5136 if (param
[0] == '$')
5137 hist_field
= trace_action_find_var(hist_data
, data
,
5141 hist_field
= trace_action_create_field_var(hist_data
,
5153 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
5154 var_ref
= create_var_ref(hist_data
, hist_field
,
5155 system
, event_name
);
5162 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
5163 if (WARN_ON(var_ref_idx
< 0)) {
5168 data
->var_ref_idx
[i
] = var_ref_idx
;
5175 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
5181 if (field_pos
!= event
->n_fields
) {
5182 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
5187 data
->synth_event
= event
;
5196 static int action_create(struct hist_trigger_data
*hist_data
,
5197 struct action_data
*data
)
5199 struct trace_event_file
*file
= hist_data
->event_file
;
5200 struct trace_array
*tr
= file
->tr
;
5201 struct track_data
*track_data
;
5202 struct field_var
*field_var
;
5207 if (data
->action
== ACTION_TRACE
)
5208 return trace_action_create(hist_data
, data
);
5210 if (data
->action
== ACTION_SNAPSHOT
) {
5211 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
5212 if (IS_ERR(track_data
)) {
5213 ret
= PTR_ERR(track_data
);
5217 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
5218 cond_snapshot_update
);
5220 track_data_free(track_data
);
5225 if (data
->action
== ACTION_SAVE
) {
5226 if (hist_data
->n_save_vars
) {
5228 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
5232 for (i
= 0; i
< data
->n_params
; i
++) {
5233 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
5239 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
5240 if (IS_ERR(field_var
)) {
5241 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
5243 ret
= PTR_ERR(field_var
);
5248 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
5249 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
5250 hist_data
->n_save_var_str
++;
5258 static int onmatch_create(struct hist_trigger_data
*hist_data
,
5259 struct action_data
*data
)
5261 return action_create(hist_data
, data
);
5264 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
5266 char *match_event
, *match_event_system
;
5267 struct action_data
*data
;
5270 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
5272 return ERR_PTR(-ENOMEM
);
5274 match_event
= strsep(&str
, ")");
5275 if (!match_event
|| !str
) {
5276 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
5280 match_event_system
= strsep(&match_event
, ".");
5282 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
5286 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
5287 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
5291 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
5292 if (!data
->match_data
.event
) {
5297 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
5298 if (!data
->match_data
.event_system
) {
5303 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
5309 onmatch_destroy(data
);
5310 data
= ERR_PTR(ret
);
5314 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
5316 hist_data
->fields
[HITCOUNT_IDX
] =
5317 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
5318 if (!hist_data
->fields
[HITCOUNT_IDX
])
5321 hist_data
->n_vals
++;
5322 hist_data
->n_fields
++;
5324 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
5330 static int __create_val_field(struct hist_trigger_data
*hist_data
,
5331 unsigned int val_idx
,
5332 struct trace_event_file
*file
,
5333 char *var_name
, char *field_str
,
5334 unsigned long flags
)
5336 struct hist_field
*hist_field
;
5339 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
5340 if (IS_ERR(hist_field
)) {
5341 ret
= PTR_ERR(hist_field
);
5345 hist_data
->fields
[val_idx
] = hist_field
;
5347 ++hist_data
->n_vals
;
5348 ++hist_data
->n_fields
;
5350 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
5356 static int create_val_field(struct hist_trigger_data
*hist_data
,
5357 unsigned int val_idx
,
5358 struct trace_event_file
*file
,
5361 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
5364 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
5367 static int create_var_field(struct hist_trigger_data
*hist_data
,
5368 unsigned int val_idx
,
5369 struct trace_event_file
*file
,
5370 char *var_name
, char *expr_str
)
5372 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5373 unsigned long flags
= 0;
5375 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
5378 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
5379 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
5383 flags
|= HIST_FIELD_FL_VAR
;
5384 hist_data
->n_vars
++;
5385 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
5388 return __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
5391 static int create_val_fields(struct hist_trigger_data
*hist_data
,
5392 struct trace_event_file
*file
)
5394 char *fields_str
, *field_str
;
5395 unsigned int i
, j
= 1;
5398 ret
= create_hitcount_val(hist_data
);
5402 fields_str
= hist_data
->attrs
->vals_str
;
5406 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
5407 j
< TRACING_MAP_VALS_MAX
; i
++) {
5408 field_str
= strsep(&fields_str
, ",");
5412 if (strcmp(field_str
, "hitcount") == 0)
5415 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
5420 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
5426 static int create_key_field(struct hist_trigger_data
*hist_data
,
5427 unsigned int key_idx
,
5428 unsigned int key_offset
,
5429 struct trace_event_file
*file
,
5432 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5433 struct hist_field
*hist_field
= NULL
;
5434 unsigned long flags
= 0;
5435 unsigned int key_size
;
5438 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
5441 flags
|= HIST_FIELD_FL_KEY
;
5443 if (strcmp(field_str
, "stacktrace") == 0) {
5444 flags
|= HIST_FIELD_FL_STACKTRACE
;
5445 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
5446 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
5448 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
5450 if (IS_ERR(hist_field
)) {
5451 ret
= PTR_ERR(hist_field
);
5455 if (field_has_hist_vars(hist_field
, 0)) {
5456 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
5457 destroy_hist_field(hist_field
, 0);
5462 key_size
= hist_field
->size
;
5465 hist_data
->fields
[key_idx
] = hist_field
;
5467 key_size
= ALIGN(key_size
, sizeof(u64
));
5468 hist_data
->fields
[key_idx
]->size
= key_size
;
5469 hist_data
->fields
[key_idx
]->offset
= key_offset
;
5471 hist_data
->key_size
+= key_size
;
5473 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
5478 hist_data
->n_keys
++;
5479 hist_data
->n_fields
++;
5481 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
5489 static int create_key_fields(struct hist_trigger_data
*hist_data
,
5490 struct trace_event_file
*file
)
5492 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
5493 char *fields_str
, *field_str
;
5496 fields_str
= hist_data
->attrs
->keys_str
;
5500 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
5501 field_str
= strsep(&fields_str
, ",");
5504 ret
= create_key_field(hist_data
, i
, key_offset
,
5519 static int create_var_fields(struct hist_trigger_data
*hist_data
,
5520 struct trace_event_file
*file
)
5522 unsigned int i
, j
= hist_data
->n_vals
;
5525 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
5527 for (i
= 0; i
< n_vars
; i
++) {
5528 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
5529 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
5531 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
5539 static void free_var_defs(struct hist_trigger_data
*hist_data
)
5543 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
5544 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
5545 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
5548 hist_data
->attrs
->var_defs
.n_vars
= 0;
5551 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
5553 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5554 char *s
, *str
, *var_name
, *field_str
;
5555 unsigned int i
, j
, n_vars
= 0;
5558 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
5559 str
= hist_data
->attrs
->assignment_str
[i
];
5560 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
5561 field_str
= strsep(&str
, ",");
5565 var_name
= strsep(&field_str
, "=");
5566 if (!var_name
|| !field_str
) {
5567 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
5573 if (n_vars
== TRACING_MAP_VARS_MAX
) {
5574 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
5579 s
= kstrdup(var_name
, GFP_KERNEL
);
5584 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
5586 s
= kstrdup(field_str
, GFP_KERNEL
);
5588 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
5592 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
5594 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
5600 free_var_defs(hist_data
);
5605 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
5606 struct trace_event_file
*file
)
5610 ret
= parse_var_defs(hist_data
);
5614 ret
= create_val_fields(hist_data
, file
);
5618 ret
= create_var_fields(hist_data
, file
);
5622 ret
= create_key_fields(hist_data
, file
);
5626 free_var_defs(hist_data
);
5631 static int is_descending(struct trace_array
*tr
, const char *str
)
5636 if (strcmp(str
, "descending") == 0)
5639 if (strcmp(str
, "ascending") == 0)
5642 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
5647 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
5649 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5650 char *fields_str
= hist_data
->attrs
->sort_key_str
;
5651 struct tracing_map_sort_key
*sort_key
;
5652 int descending
, ret
= 0;
5653 unsigned int i
, j
, k
;
5655 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
5660 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
5661 struct hist_field
*hist_field
;
5662 char *field_str
, *field_name
;
5663 const char *test_name
;
5665 sort_key
= &hist_data
->sort_keys
[i
];
5667 field_str
= strsep(&fields_str
, ",");
5673 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
5677 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
5678 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
5683 field_name
= strsep(&field_str
, ".");
5684 if (!field_name
|| !*field_name
) {
5686 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
5690 if (strcmp(field_name
, "hitcount") == 0) {
5691 descending
= is_descending(tr
, field_str
);
5692 if (descending
< 0) {
5696 sort_key
->descending
= descending
;
5700 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
5703 hist_field
= hist_data
->fields
[j
];
5704 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
5709 test_name
= hist_field_name(hist_field
, 0);
5711 if (strcmp(field_name
, test_name
) == 0) {
5712 sort_key
->field_idx
= idx
;
5713 descending
= is_descending(tr
, field_str
);
5714 if (descending
< 0) {
5718 sort_key
->descending
= descending
;
5722 if (j
== hist_data
->n_fields
) {
5724 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
5729 hist_data
->n_sort_keys
= i
;
5734 static void destroy_actions(struct hist_trigger_data
*hist_data
)
5738 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5739 struct action_data
*data
= hist_data
->actions
[i
];
5741 if (data
->handler
== HANDLER_ONMATCH
)
5742 onmatch_destroy(data
);
5743 else if (data
->handler
== HANDLER_ONMAX
||
5744 data
->handler
== HANDLER_ONCHANGE
)
5745 track_data_destroy(hist_data
, data
);
5751 static int parse_actions(struct hist_trigger_data
*hist_data
)
5753 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5754 struct action_data
*data
;
5760 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
5761 str
= hist_data
->attrs
->action_str
[i
];
5763 if ((len
= str_has_prefix(str
, "onmatch("))) {
5764 char *action_str
= str
+ len
;
5766 data
= onmatch_parse(tr
, action_str
);
5768 ret
= PTR_ERR(data
);
5771 } else if ((len
= str_has_prefix(str
, "onmax("))) {
5772 char *action_str
= str
+ len
;
5774 data
= track_data_parse(hist_data
, action_str
,
5777 ret
= PTR_ERR(data
);
5780 } else if ((len
= str_has_prefix(str
, "onchange("))) {
5781 char *action_str
= str
+ len
;
5783 data
= track_data_parse(hist_data
, action_str
,
5786 ret
= PTR_ERR(data
);
5794 hist_data
->actions
[hist_data
->n_actions
++] = data
;
5800 static int create_actions(struct hist_trigger_data
*hist_data
)
5802 struct action_data
*data
;
5806 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
5807 data
= hist_data
->actions
[i
];
5809 if (data
->handler
== HANDLER_ONMATCH
) {
5810 ret
= onmatch_create(hist_data
, data
);
5813 } else if (data
->handler
== HANDLER_ONMAX
||
5814 data
->handler
== HANDLER_ONCHANGE
) {
5815 ret
= track_data_create(hist_data
, data
);
5827 static void print_actions(struct seq_file
*m
,
5828 struct hist_trigger_data
*hist_data
,
5829 struct tracing_map_elt
*elt
)
5833 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5834 struct action_data
*data
= hist_data
->actions
[i
];
5836 if (data
->action
== ACTION_SNAPSHOT
)
5839 if (data
->handler
== HANDLER_ONMAX
||
5840 data
->handler
== HANDLER_ONCHANGE
)
5841 track_data_print(m
, hist_data
, elt
, data
);
5845 static void print_action_spec(struct seq_file
*m
,
5846 struct hist_trigger_data
*hist_data
,
5847 struct action_data
*data
)
5851 if (data
->action
== ACTION_SAVE
) {
5852 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
5853 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
5854 if (i
< hist_data
->n_save_vars
- 1)
5857 } else if (data
->action
== ACTION_TRACE
) {
5858 if (data
->use_trace_keyword
)
5859 seq_printf(m
, "%s", data
->synth_event_name
);
5860 for (i
= 0; i
< data
->n_params
; i
++) {
5861 if (i
|| data
->use_trace_keyword
)
5863 seq_printf(m
, "%s", data
->params
[i
]);
5868 static void print_track_data_spec(struct seq_file
*m
,
5869 struct hist_trigger_data
*hist_data
,
5870 struct action_data
*data
)
5872 if (data
->handler
== HANDLER_ONMAX
)
5873 seq_puts(m
, ":onmax(");
5874 else if (data
->handler
== HANDLER_ONCHANGE
)
5875 seq_puts(m
, ":onchange(");
5876 seq_printf(m
, "%s", data
->track_data
.var_str
);
5877 seq_printf(m
, ").%s(", data
->action_name
);
5879 print_action_spec(m
, hist_data
, data
);
5884 static void print_onmatch_spec(struct seq_file
*m
,
5885 struct hist_trigger_data
*hist_data
,
5886 struct action_data
*data
)
5888 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
5889 data
->match_data
.event
);
5891 seq_printf(m
, "%s(", data
->action_name
);
5893 print_action_spec(m
, hist_data
, data
);
5898 static bool actions_match(struct hist_trigger_data
*hist_data
,
5899 struct hist_trigger_data
*hist_data_test
)
5903 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
5906 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5907 struct action_data
*data
= hist_data
->actions
[i
];
5908 struct action_data
*data_test
= hist_data_test
->actions
[i
];
5909 char *action_name
, *action_name_test
;
5911 if (data
->handler
!= data_test
->handler
)
5913 if (data
->action
!= data_test
->action
)
5916 if (data
->n_params
!= data_test
->n_params
)
5919 for (j
= 0; j
< data
->n_params
; j
++) {
5920 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
5924 if (data
->use_trace_keyword
)
5925 action_name
= data
->synth_event_name
;
5927 action_name
= data
->action_name
;
5929 if (data_test
->use_trace_keyword
)
5930 action_name_test
= data_test
->synth_event_name
;
5932 action_name_test
= data_test
->action_name
;
5934 if (strcmp(action_name
, action_name_test
) != 0)
5937 if (data
->handler
== HANDLER_ONMATCH
) {
5938 if (strcmp(data
->match_data
.event_system
,
5939 data_test
->match_data
.event_system
) != 0)
5941 if (strcmp(data
->match_data
.event
,
5942 data_test
->match_data
.event
) != 0)
5944 } else if (data
->handler
== HANDLER_ONMAX
||
5945 data
->handler
== HANDLER_ONCHANGE
) {
5946 if (strcmp(data
->track_data
.var_str
,
5947 data_test
->track_data
.var_str
) != 0)
5956 static void print_actions_spec(struct seq_file
*m
,
5957 struct hist_trigger_data
*hist_data
)
5961 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5962 struct action_data
*data
= hist_data
->actions
[i
];
5964 if (data
->handler
== HANDLER_ONMATCH
)
5965 print_onmatch_spec(m
, hist_data
, data
);
5966 else if (data
->handler
== HANDLER_ONMAX
||
5967 data
->handler
== HANDLER_ONCHANGE
)
5968 print_track_data_spec(m
, hist_data
, data
);
5972 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
5976 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5977 kfree(hist_data
->field_var_hists
[i
]->cmd
);
5978 kfree(hist_data
->field_var_hists
[i
]);
5982 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
5987 destroy_hist_trigger_attrs(hist_data
->attrs
);
5988 destroy_hist_fields(hist_data
);
5989 tracing_map_destroy(hist_data
->map
);
5991 destroy_actions(hist_data
);
5992 destroy_field_vars(hist_data
);
5993 destroy_field_var_hists(hist_data
);
5998 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
6000 struct tracing_map
*map
= hist_data
->map
;
6001 struct ftrace_event_field
*field
;
6002 struct hist_field
*hist_field
;
6005 for_each_hist_field(i
, hist_data
) {
6006 hist_field
= hist_data
->fields
[i
];
6007 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
6008 tracing_map_cmp_fn_t cmp_fn
;
6010 field
= hist_field
->field
;
6012 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
6013 cmp_fn
= tracing_map_cmp_none
;
6015 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
6016 hist_field
->is_signed
);
6017 else if (is_string_field(field
))
6018 cmp_fn
= tracing_map_cmp_string
;
6020 cmp_fn
= tracing_map_cmp_num(field
->size
,
6022 idx
= tracing_map_add_key_field(map
,
6025 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
6026 idx
= tracing_map_add_sum_field(map
);
6031 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6032 idx
= tracing_map_add_var(map
);
6035 hist_field
->var
.idx
= idx
;
6036 hist_field
->var
.hist_data
= hist_data
;
6043 static struct hist_trigger_data
*
6044 create_hist_data(unsigned int map_bits
,
6045 struct hist_trigger_attrs
*attrs
,
6046 struct trace_event_file
*file
,
6049 const struct tracing_map_ops
*map_ops
= NULL
;
6050 struct hist_trigger_data
*hist_data
;
6053 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
6055 return ERR_PTR(-ENOMEM
);
6057 hist_data
->attrs
= attrs
;
6058 hist_data
->remove
= remove
;
6059 hist_data
->event_file
= file
;
6061 ret
= parse_actions(hist_data
);
6065 ret
= create_hist_fields(hist_data
, file
);
6069 ret
= create_sort_keys(hist_data
);
6073 map_ops
= &hist_trigger_elt_data_ops
;
6075 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
6076 map_ops
, hist_data
);
6077 if (IS_ERR(hist_data
->map
)) {
6078 ret
= PTR_ERR(hist_data
->map
);
6079 hist_data
->map
= NULL
;
6083 ret
= create_tracing_map_fields(hist_data
);
6089 hist_data
->attrs
= NULL
;
6091 destroy_hist_data(hist_data
);
6093 hist_data
= ERR_PTR(ret
);
6098 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
6099 struct tracing_map_elt
*elt
, void *rec
,
6100 struct ring_buffer_event
*rbe
,
6103 struct hist_elt_data
*elt_data
;
6104 struct hist_field
*hist_field
;
6105 unsigned int i
, var_idx
;
6108 elt_data
= elt
->private_data
;
6109 elt_data
->var_ref_vals
= var_ref_vals
;
6111 for_each_hist_val_field(i
, hist_data
) {
6112 hist_field
= hist_data
->fields
[i
];
6113 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
6114 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6115 var_idx
= hist_field
->var
.idx
;
6116 tracing_map_set_var(elt
, var_idx
, hist_val
);
6119 tracing_map_update_sum(elt
, i
, hist_val
);
6122 for_each_hist_key_field(i
, hist_data
) {
6123 hist_field
= hist_data
->fields
[i
];
6124 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6125 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
6126 var_idx
= hist_field
->var
.idx
;
6127 tracing_map_set_var(elt
, var_idx
, hist_val
);
6131 update_field_vars(hist_data
, elt
, rbe
, rec
);
6134 static inline void add_to_key(char *compound_key
, void *key
,
6135 struct hist_field
*key_field
, void *rec
)
6137 size_t size
= key_field
->size
;
6139 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6140 struct ftrace_event_field
*field
;
6142 field
= key_field
->field
;
6143 if (field
->filter_type
== FILTER_DYN_STRING
)
6144 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
6145 else if (field
->filter_type
== FILTER_PTR_STRING
)
6147 else if (field
->filter_type
== FILTER_STATIC_STRING
)
6150 /* ensure NULL-termination */
6151 if (size
> key_field
->size
- 1)
6152 size
= key_field
->size
- 1;
6154 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
6156 memcpy(compound_key
+ key_field
->offset
, key
, size
);
6160 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
6161 struct tracing_map_elt
*elt
, void *rec
,
6162 struct ring_buffer_event
*rbe
, void *key
,
6165 struct action_data
*data
;
6168 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
6169 data
= hist_data
->actions
[i
];
6170 data
->fn(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
6174 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
,
6175 struct ring_buffer_event
*rbe
)
6177 struct hist_trigger_data
*hist_data
= data
->private_data
;
6178 bool use_compound_key
= (hist_data
->n_keys
> 1);
6179 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
6180 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
6181 char compound_key
[HIST_KEY_SIZE_MAX
];
6182 struct tracing_map_elt
*elt
= NULL
;
6183 struct hist_field
*key_field
;
6188 memset(compound_key
, 0, hist_data
->key_size
);
6190 for_each_hist_key_field(i
, hist_data
) {
6191 key_field
= hist_data
->fields
[i
];
6193 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
6194 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
6195 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
6196 HIST_STACKTRACE_SKIP
);
6199 field_contents
= key_field
->fn(key_field
, elt
, rbe
, rec
);
6200 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6201 key
= (void *)(unsigned long)field_contents
;
6202 use_compound_key
= true;
6204 key
= (void *)&field_contents
;
6207 if (use_compound_key
)
6208 add_to_key(compound_key
, key
, key_field
, rec
);
6211 if (use_compound_key
)
6214 if (hist_data
->n_var_refs
&&
6215 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
6218 elt
= tracing_map_insert(hist_data
->map
, key
);
6222 hist_trigger_elt_update(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
6224 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
6225 hist_trigger_actions(hist_data
, elt
, rec
, rbe
, key
, var_ref_vals
);
6228 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
6229 unsigned long *stacktrace_entries
,
6230 unsigned int max_entries
)
6232 char str
[KSYM_SYMBOL_LEN
];
6233 unsigned int spaces
= 8;
6236 for (i
= 0; i
< max_entries
; i
++) {
6237 if (!stacktrace_entries
[i
])
6240 seq_printf(m
, "%*c", 1 + spaces
, ' ');
6241 sprint_symbol(str
, stacktrace_entries
[i
]);
6242 seq_printf(m
, "%s\n", str
);
6246 static void hist_trigger_print_key(struct seq_file
*m
,
6247 struct hist_trigger_data
*hist_data
,
6249 struct tracing_map_elt
*elt
)
6251 struct hist_field
*key_field
;
6252 char str
[KSYM_SYMBOL_LEN
];
6253 bool multiline
= false;
6254 const char *field_name
;
6260 for_each_hist_key_field(i
, hist_data
) {
6261 key_field
= hist_data
->fields
[i
];
6263 if (i
> hist_data
->n_vals
)
6266 field_name
= hist_field_name(key_field
, 0);
6268 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
6269 uval
= *(u64
*)(key
+ key_field
->offset
);
6270 seq_printf(m
, "%s: %llx", field_name
, uval
);
6271 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
6272 uval
= *(u64
*)(key
+ key_field
->offset
);
6273 sprint_symbol_no_offset(str
, uval
);
6274 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
6276 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
6277 uval
= *(u64
*)(key
+ key_field
->offset
);
6278 sprint_symbol(str
, uval
);
6279 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
6281 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
6282 struct hist_elt_data
*elt_data
= elt
->private_data
;
6285 if (WARN_ON_ONCE(!elt_data
))
6288 comm
= elt_data
->comm
;
6290 uval
= *(u64
*)(key
+ key_field
->offset
);
6291 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
6293 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
6294 const char *syscall_name
;
6296 uval
= *(u64
*)(key
+ key_field
->offset
);
6297 syscall_name
= get_syscall_name(uval
);
6299 syscall_name
= "unknown_syscall";
6301 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
6302 syscall_name
, uval
);
6303 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
6304 seq_puts(m
, "stacktrace:\n");
6305 hist_trigger_stacktrace_print(m
,
6306 key
+ key_field
->offset
,
6307 HIST_STACKTRACE_DEPTH
);
6309 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
6310 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
6311 *(u64
*)(key
+ key_field
->offset
));
6312 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6313 seq_printf(m
, "%s: %-50s", field_name
,
6314 (char *)(key
+ key_field
->offset
));
6316 uval
= *(u64
*)(key
+ key_field
->offset
);
6317 seq_printf(m
, "%s: %10llu", field_name
, uval
);
6327 static void hist_trigger_entry_print(struct seq_file
*m
,
6328 struct hist_trigger_data
*hist_data
,
6330 struct tracing_map_elt
*elt
)
6332 const char *field_name
;
6335 hist_trigger_print_key(m
, hist_data
, key
, elt
);
6337 seq_printf(m
, " hitcount: %10llu",
6338 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
6340 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
6341 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
6343 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
6344 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
6347 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
6348 seq_printf(m
, " %s: %10llx", field_name
,
6349 tracing_map_read_sum(elt
, i
));
6351 seq_printf(m
, " %s: %10llu", field_name
,
6352 tracing_map_read_sum(elt
, i
));
6356 print_actions(m
, hist_data
, elt
);
6361 static int print_entries(struct seq_file
*m
,
6362 struct hist_trigger_data
*hist_data
)
6364 struct tracing_map_sort_entry
**sort_entries
= NULL
;
6365 struct tracing_map
*map
= hist_data
->map
;
6368 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
6369 hist_data
->n_sort_keys
,
6374 for (i
= 0; i
< n_entries
; i
++)
6375 hist_trigger_entry_print(m
, hist_data
,
6376 sort_entries
[i
]->key
,
6377 sort_entries
[i
]->elt
);
6379 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
6384 static void hist_trigger_show(struct seq_file
*m
,
6385 struct event_trigger_data
*data
, int n
)
6387 struct hist_trigger_data
*hist_data
;
6391 seq_puts(m
, "\n\n");
6393 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
6394 data
->ops
->print(m
, data
->ops
, data
);
6395 seq_puts(m
, "#\n\n");
6397 hist_data
= data
->private_data
;
6398 n_entries
= print_entries(m
, hist_data
);
6402 track_data_snapshot_print(m
, hist_data
);
6404 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
6405 (u64
)atomic64_read(&hist_data
->map
->hits
),
6406 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
6409 static int hist_show(struct seq_file
*m
, void *v
)
6411 struct event_trigger_data
*data
;
6412 struct trace_event_file
*event_file
;
6415 mutex_lock(&event_mutex
);
6417 event_file
= event_file_data(m
->private);
6418 if (unlikely(!event_file
)) {
6423 list_for_each_entry(data
, &event_file
->triggers
, list
) {
6424 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
6425 hist_trigger_show(m
, data
, n
++);
6429 mutex_unlock(&event_mutex
);
6434 static int event_hist_open(struct inode
*inode
, struct file
*file
)
6438 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
6442 return single_open(file
, hist_show
, file
);
6445 const struct file_operations event_hist_fops
= {
6446 .open
= event_hist_open
,
6448 .llseek
= seq_lseek
,
6449 .release
= single_release
,
6452 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
6454 const char *field_name
= hist_field_name(hist_field
, 0);
6456 if (hist_field
->var
.name
)
6457 seq_printf(m
, "%s=", hist_field
->var
.name
);
6459 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
6461 else if (field_name
) {
6462 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
6463 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
6465 seq_printf(m
, "%s", field_name
);
6466 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
6467 seq_puts(m
, "common_timestamp");
6469 if (hist_field
->flags
) {
6470 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
6471 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
)) {
6472 const char *flags
= get_hist_field_flags(hist_field
);
6475 seq_printf(m
, ".%s", flags
);
6480 static int event_hist_trigger_print(struct seq_file
*m
,
6481 struct event_trigger_ops
*ops
,
6482 struct event_trigger_data
*data
)
6484 struct hist_trigger_data
*hist_data
= data
->private_data
;
6485 struct hist_field
*field
;
6486 bool have_var
= false;
6489 seq_puts(m
, "hist:");
6492 seq_printf(m
, "%s:", data
->name
);
6494 seq_puts(m
, "keys=");
6496 for_each_hist_key_field(i
, hist_data
) {
6497 field
= hist_data
->fields
[i
];
6499 if (i
> hist_data
->n_vals
)
6502 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
6503 seq_puts(m
, "stacktrace");
6505 hist_field_print(m
, field
);
6508 seq_puts(m
, ":vals=");
6510 for_each_hist_val_field(i
, hist_data
) {
6511 field
= hist_data
->fields
[i
];
6512 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6517 if (i
== HITCOUNT_IDX
)
6518 seq_puts(m
, "hitcount");
6521 hist_field_print(m
, field
);
6530 for_each_hist_val_field(i
, hist_data
) {
6531 field
= hist_data
->fields
[i
];
6533 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6536 hist_field_print(m
, field
);
6541 seq_puts(m
, ":sort=");
6543 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6544 struct tracing_map_sort_key
*sort_key
;
6545 unsigned int idx
, first_key_idx
;
6548 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
6550 sort_key
= &hist_data
->sort_keys
[i
];
6551 idx
= sort_key
->field_idx
;
6553 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
6559 if (idx
== HITCOUNT_IDX
)
6560 seq_puts(m
, "hitcount");
6562 if (idx
>= first_key_idx
)
6563 idx
+= hist_data
->n_vars
;
6564 hist_field_print(m
, hist_data
->fields
[idx
]);
6567 if (sort_key
->descending
)
6568 seq_puts(m
, ".descending");
6570 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
6571 if (hist_data
->enable_timestamps
)
6572 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
6574 print_actions_spec(m
, hist_data
);
6576 if (data
->filter_str
)
6577 seq_printf(m
, " if %s", data
->filter_str
);
6580 seq_puts(m
, " [paused]");
6582 seq_puts(m
, " [active]");
6589 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
6590 struct event_trigger_data
*data
)
6592 struct hist_trigger_data
*hist_data
= data
->private_data
;
6594 if (!data
->ref
&& hist_data
->attrs
->name
)
6595 save_named_trigger(hist_data
->attrs
->name
, data
);
6602 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
6604 struct trace_event_file
*file
;
6609 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
6610 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
6611 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
6612 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
6613 "!hist", "hist", cmd
);
6617 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
6618 struct event_trigger_data
*data
)
6620 struct hist_trigger_data
*hist_data
= data
->private_data
;
6622 if (WARN_ON_ONCE(data
->ref
<= 0))
6628 del_named_trigger(data
);
6630 trigger_data_free(data
);
6632 remove_hist_vars(hist_data
);
6634 unregister_field_var_hists(hist_data
);
6636 destroy_hist_data(hist_data
);
6640 static struct event_trigger_ops event_hist_trigger_ops
= {
6641 .func
= event_hist_trigger
,
6642 .print
= event_hist_trigger_print
,
6643 .init
= event_hist_trigger_init
,
6644 .free
= event_hist_trigger_free
,
6647 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
6648 struct event_trigger_data
*data
)
6652 save_named_trigger(data
->named_data
->name
, data
);
6654 event_hist_trigger_init(ops
, data
->named_data
);
6659 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
6660 struct event_trigger_data
*data
)
6662 if (WARN_ON_ONCE(data
->ref
<= 0))
6665 event_hist_trigger_free(ops
, data
->named_data
);
6669 del_named_trigger(data
);
6670 trigger_data_free(data
);
6674 static struct event_trigger_ops event_hist_trigger_named_ops
= {
6675 .func
= event_hist_trigger
,
6676 .print
= event_hist_trigger_print
,
6677 .init
= event_hist_trigger_named_init
,
6678 .free
= event_hist_trigger_named_free
,
6681 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
6684 return &event_hist_trigger_ops
;
6687 static void hist_clear(struct event_trigger_data
*data
)
6689 struct hist_trigger_data
*hist_data
= data
->private_data
;
6692 pause_named_trigger(data
);
6694 tracepoint_synchronize_unregister();
6696 tracing_map_clear(hist_data
->map
);
6699 unpause_named_trigger(data
);
6702 static bool compatible_field(struct ftrace_event_field
*field
,
6703 struct ftrace_event_field
*test_field
)
6705 if (field
== test_field
)
6707 if (field
== NULL
|| test_field
== NULL
)
6709 if (strcmp(field
->name
, test_field
->name
) != 0)
6711 if (strcmp(field
->type
, test_field
->type
) != 0)
6713 if (field
->size
!= test_field
->size
)
6715 if (field
->is_signed
!= test_field
->is_signed
)
6721 static bool hist_trigger_match(struct event_trigger_data
*data
,
6722 struct event_trigger_data
*data_test
,
6723 struct event_trigger_data
*named_data
,
6726 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
6727 struct hist_trigger_data
*hist_data
, *hist_data_test
;
6728 struct hist_field
*key_field
, *key_field_test
;
6731 if (named_data
&& (named_data
!= data_test
) &&
6732 (named_data
!= data_test
->named_data
))
6735 if (!named_data
&& is_named_trigger(data_test
))
6738 hist_data
= data
->private_data
;
6739 hist_data_test
= data_test
->private_data
;
6741 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
6742 hist_data
->n_fields
!= hist_data_test
->n_fields
||
6743 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
6746 if (!ignore_filter
) {
6747 if ((data
->filter_str
&& !data_test
->filter_str
) ||
6748 (!data
->filter_str
&& data_test
->filter_str
))
6752 for_each_hist_field(i
, hist_data
) {
6753 key_field
= hist_data
->fields
[i
];
6754 key_field_test
= hist_data_test
->fields
[i
];
6756 if (key_field
->flags
!= key_field_test
->flags
)
6758 if (!compatible_field(key_field
->field
, key_field_test
->field
))
6760 if (key_field
->offset
!= key_field_test
->offset
)
6762 if (key_field
->size
!= key_field_test
->size
)
6764 if (key_field
->is_signed
!= key_field_test
->is_signed
)
6766 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
6768 if (key_field
->var
.name
&&
6769 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
6773 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6774 sort_key
= &hist_data
->sort_keys
[i
];
6775 sort_key_test
= &hist_data_test
->sort_keys
[i
];
6777 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
6778 sort_key
->descending
!= sort_key_test
->descending
)
6782 if (!ignore_filter
&& data
->filter_str
&&
6783 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
6786 if (!actions_match(hist_data
, hist_data_test
))
6792 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
6793 struct event_trigger_data
*data
,
6794 struct trace_event_file
*file
)
6796 struct hist_trigger_data
*hist_data
= data
->private_data
;
6797 struct event_trigger_data
*test
, *named_data
= NULL
;
6798 struct trace_array
*tr
= file
->tr
;
6801 if (hist_data
->attrs
->name
) {
6802 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6804 if (!hist_trigger_match(data
, named_data
, named_data
,
6806 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
6813 if (hist_data
->attrs
->name
&& !named_data
)
6816 lockdep_assert_held(&event_mutex
);
6818 list_for_each_entry(test
, &file
->triggers
, list
) {
6819 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6820 if (!hist_trigger_match(data
, test
, named_data
, false))
6822 if (hist_data
->attrs
->pause
)
6823 test
->paused
= true;
6824 else if (hist_data
->attrs
->cont
)
6825 test
->paused
= false;
6826 else if (hist_data
->attrs
->clear
)
6829 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
6836 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
6837 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
6842 if (hist_data
->attrs
->pause
)
6843 data
->paused
= true;
6846 data
->private_data
= named_data
->private_data
;
6847 set_named_trigger_data(data
, named_data
);
6848 data
->ops
= &event_hist_trigger_named_ops
;
6851 if (data
->ops
->init
) {
6852 ret
= data
->ops
->init(data
->ops
, data
);
6857 if (hist_data
->enable_timestamps
) {
6858 char *clock
= hist_data
->attrs
->clock
;
6860 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
6862 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
6866 tracing_set_time_stamp_abs(file
->tr
, true);
6870 destroy_hist_data(hist_data
);
6877 static int hist_trigger_enable(struct event_trigger_data
*data
,
6878 struct trace_event_file
*file
)
6882 list_add_tail_rcu(&data
->list
, &file
->triggers
);
6884 update_cond_flag(file
);
6886 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
6887 list_del_rcu(&data
->list
);
6888 update_cond_flag(file
);
6895 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
6896 struct trace_event_file
*file
)
6898 struct hist_trigger_data
*hist_data
= data
->private_data
;
6899 struct event_trigger_data
*test
, *named_data
= NULL
;
6902 lockdep_assert_held(&event_mutex
);
6904 if (hist_data
->attrs
->name
)
6905 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6907 list_for_each_entry(test
, &file
->triggers
, list
) {
6908 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6909 if (hist_trigger_match(data
, test
, named_data
, false)) {
6919 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
6920 struct trace_event_file
*file
)
6922 struct hist_trigger_data
*hist_data
= data
->private_data
;
6923 struct event_trigger_data
*test
, *named_data
= NULL
;
6925 lockdep_assert_held(&event_mutex
);
6927 if (hist_data
->attrs
->name
)
6928 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6930 list_for_each_entry(test
, &file
->triggers
, list
) {
6931 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6932 if (!hist_trigger_match(data
, test
, named_data
, false))
6934 hist_data
= test
->private_data
;
6935 if (check_var_refs(hist_data
))
6944 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
6945 struct event_trigger_data
*data
,
6946 struct trace_event_file
*file
)
6948 struct hist_trigger_data
*hist_data
= data
->private_data
;
6949 struct event_trigger_data
*test
, *named_data
= NULL
;
6950 bool unregistered
= false;
6952 lockdep_assert_held(&event_mutex
);
6954 if (hist_data
->attrs
->name
)
6955 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6957 list_for_each_entry(test
, &file
->triggers
, list
) {
6958 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6959 if (!hist_trigger_match(data
, test
, named_data
, false))
6961 unregistered
= true;
6962 list_del_rcu(&test
->list
);
6963 trace_event_trigger_enable_disable(file
, 0);
6964 update_cond_flag(file
);
6969 if (unregistered
&& test
->ops
->free
)
6970 test
->ops
->free(test
->ops
, test
);
6972 if (hist_data
->enable_timestamps
) {
6973 if (!hist_data
->remove
|| unregistered
)
6974 tracing_set_time_stamp_abs(file
->tr
, false);
6978 static bool hist_file_check_refs(struct trace_event_file
*file
)
6980 struct hist_trigger_data
*hist_data
;
6981 struct event_trigger_data
*test
;
6983 lockdep_assert_held(&event_mutex
);
6985 list_for_each_entry(test
, &file
->triggers
, list
) {
6986 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6987 hist_data
= test
->private_data
;
6988 if (check_var_refs(hist_data
))
6996 static void hist_unreg_all(struct trace_event_file
*file
)
6998 struct event_trigger_data
*test
, *n
;
6999 struct hist_trigger_data
*hist_data
;
7000 struct synth_event
*se
;
7001 const char *se_name
;
7003 lockdep_assert_held(&event_mutex
);
7005 if (hist_file_check_refs(file
))
7008 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
7009 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
7010 hist_data
= test
->private_data
;
7011 list_del_rcu(&test
->list
);
7012 trace_event_trigger_enable_disable(file
, 0);
7014 se_name
= trace_event_name(file
->event_call
);
7015 se
= find_synth_event(se_name
);
7019 update_cond_flag(file
);
7020 if (hist_data
->enable_timestamps
)
7021 tracing_set_time_stamp_abs(file
->tr
, false);
7022 if (test
->ops
->free
)
7023 test
->ops
->free(test
->ops
, test
);
7028 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
7029 struct trace_event_file
*file
,
7030 char *glob
, char *cmd
, char *param
)
7032 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
7033 struct event_trigger_data
*trigger_data
;
7034 struct hist_trigger_attrs
*attrs
;
7035 struct event_trigger_ops
*trigger_ops
;
7036 struct hist_trigger_data
*hist_data
;
7037 struct synth_event
*se
;
7038 const char *se_name
;
7039 bool remove
= false;
7043 lockdep_assert_held(&event_mutex
);
7045 if (glob
&& strlen(glob
)) {
7047 last_cmd_set(file
, param
);
7057 * separate the trigger from the filter (k:v [if filter])
7058 * allowing for whitespace in the trigger
7060 p
= trigger
= param
;
7062 p
= strstr(p
, "if");
7067 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
7071 if (p
>= param
+ strlen(param
) - (sizeof("if") - 1) - 1)
7073 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
7084 param
= strstrip(p
);
7085 trigger
= strstrip(trigger
);
7088 attrs
= parse_hist_trigger_attrs(file
->tr
, trigger
);
7090 return PTR_ERR(attrs
);
7092 if (attrs
->map_bits
)
7093 hist_trigger_bits
= attrs
->map_bits
;
7095 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
7096 if (IS_ERR(hist_data
)) {
7097 destroy_hist_trigger_attrs(attrs
);
7098 return PTR_ERR(hist_data
);
7101 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
7103 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
7104 if (!trigger_data
) {
7109 trigger_data
->count
= -1;
7110 trigger_data
->ops
= trigger_ops
;
7111 trigger_data
->cmd_ops
= cmd_ops
;
7113 INIT_LIST_HEAD(&trigger_data
->list
);
7114 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
7116 trigger_data
->private_data
= hist_data
;
7118 /* if param is non-empty, it's supposed to be a filter */
7119 if (param
&& cmd_ops
->set_filter
) {
7120 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
7126 if (!have_hist_trigger_match(trigger_data
, file
))
7129 if (hist_trigger_check_refs(trigger_data
, file
)) {
7134 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
7135 se_name
= trace_event_name(file
->event_call
);
7136 se
= find_synth_event(se_name
);
7143 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
7145 * The above returns on success the # of triggers registered,
7146 * but if it didn't register any it returns zero. Consider no
7147 * triggers registered a failure too.
7150 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
7156 if (get_named_trigger_data(trigger_data
))
7159 if (has_hist_vars(hist_data
))
7160 save_hist_vars(hist_data
);
7162 ret
= create_actions(hist_data
);
7166 ret
= tracing_map_init(hist_data
->map
);
7170 ret
= hist_trigger_enable(trigger_data
, file
);
7174 se_name
= trace_event_name(file
->event_call
);
7175 se
= find_synth_event(se_name
);
7178 /* Just return zero, not the number of registered triggers */
7186 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
7188 if (cmd_ops
->set_filter
)
7189 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
7191 remove_hist_vars(hist_data
);
7193 kfree(trigger_data
);
7195 destroy_hist_data(hist_data
);
7199 static struct event_command trigger_hist_cmd
= {
7201 .trigger_type
= ETT_EVENT_HIST
,
7202 .flags
= EVENT_CMD_FL_NEEDS_REC
,
7203 .func
= event_hist_trigger_func
,
7204 .reg
= hist_register_trigger
,
7205 .unreg
= hist_unregister_trigger
,
7206 .unreg_all
= hist_unreg_all
,
7207 .get_trigger_ops
= event_hist_get_trigger_ops
,
7208 .set_filter
= set_trigger_filter
,
7211 __init
int register_trigger_hist_cmd(void)
7215 ret
= register_event_command(&trigger_hist_cmd
);
7222 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
,
7223 struct ring_buffer_event
*event
)
7225 struct enable_trigger_data
*enable_data
= data
->private_data
;
7226 struct event_trigger_data
*test
;
7228 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
7229 lockdep_is_held(&event_mutex
)) {
7230 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
7231 if (enable_data
->enable
)
7232 test
->paused
= false;
7234 test
->paused
= true;
7240 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
7241 struct ring_buffer_event
*event
)
7246 if (data
->count
!= -1)
7249 hist_enable_trigger(data
, rec
, event
);
7252 static struct event_trigger_ops hist_enable_trigger_ops
= {
7253 .func
= hist_enable_trigger
,
7254 .print
= event_enable_trigger_print
,
7255 .init
= event_trigger_init
,
7256 .free
= event_enable_trigger_free
,
7259 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
7260 .func
= hist_enable_count_trigger
,
7261 .print
= event_enable_trigger_print
,
7262 .init
= event_trigger_init
,
7263 .free
= event_enable_trigger_free
,
7266 static struct event_trigger_ops hist_disable_trigger_ops
= {
7267 .func
= hist_enable_trigger
,
7268 .print
= event_enable_trigger_print
,
7269 .init
= event_trigger_init
,
7270 .free
= event_enable_trigger_free
,
7273 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
7274 .func
= hist_enable_count_trigger
,
7275 .print
= event_enable_trigger_print
,
7276 .init
= event_trigger_init
,
7277 .free
= event_enable_trigger_free
,
7280 static struct event_trigger_ops
*
7281 hist_enable_get_trigger_ops(char *cmd
, char *param
)
7283 struct event_trigger_ops
*ops
;
7286 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
7289 ops
= param
? &hist_enable_count_trigger_ops
:
7290 &hist_enable_trigger_ops
;
7292 ops
= param
? &hist_disable_count_trigger_ops
:
7293 &hist_disable_trigger_ops
;
7298 static void hist_enable_unreg_all(struct trace_event_file
*file
)
7300 struct event_trigger_data
*test
, *n
;
7302 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
7303 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
7304 list_del_rcu(&test
->list
);
7305 update_cond_flag(file
);
7306 trace_event_trigger_enable_disable(file
, 0);
7307 if (test
->ops
->free
)
7308 test
->ops
->free(test
->ops
, test
);
7313 static struct event_command trigger_hist_enable_cmd
= {
7314 .name
= ENABLE_HIST_STR
,
7315 .trigger_type
= ETT_HIST_ENABLE
,
7316 .func
= event_enable_trigger_func
,
7317 .reg
= event_enable_register_trigger
,
7318 .unreg
= event_enable_unregister_trigger
,
7319 .unreg_all
= hist_enable_unreg_all
,
7320 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7321 .set_filter
= set_trigger_filter
,
7324 static struct event_command trigger_hist_disable_cmd
= {
7325 .name
= DISABLE_HIST_STR
,
7326 .trigger_type
= ETT_HIST_ENABLE
,
7327 .func
= event_enable_trigger_func
,
7328 .reg
= event_enable_register_trigger
,
7329 .unreg
= event_enable_unregister_trigger
,
7330 .unreg_all
= hist_enable_unreg_all
,
7331 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7332 .set_filter
= set_trigger_filter
,
7335 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
7337 unregister_event_command(&trigger_hist_enable_cmd
);
7338 unregister_event_command(&trigger_hist_disable_cmd
);
7341 __init
int register_trigger_hist_enable_disable_cmds(void)
7345 ret
= register_event_command(&trigger_hist_enable_cmd
);
7346 if (WARN_ON(ret
< 0))
7348 ret
= register_event_command(&trigger_hist_disable_cmd
);
7349 if (WARN_ON(ret
< 0))
7350 unregister_trigger_hist_enable_disable_cmds();
7355 static __init
int trace_events_hist_init(void)
7357 struct dentry
*entry
= NULL
;
7358 struct dentry
*d_tracer
;
7361 err
= dyn_event_register(&synth_event_ops
);
7363 pr_warn("Could not register synth_event_ops\n");
7367 d_tracer
= tracing_init_dentry();
7368 if (IS_ERR(d_tracer
)) {
7369 err
= PTR_ERR(d_tracer
);
7373 entry
= tracefs_create_file("synthetic_events", 0644, d_tracer
,
7374 NULL
, &synth_events_fops
);
7382 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
7387 fs_initcall(trace_events_hist_init
);