mm/slab: sanity-check page type when looking up cache
[linux/fpc-iii.git] / kernel / trace / trace_events_hist.c
blobca6b0dff60c5be2af6315f5b135ac754983e92c6
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/stacktrace.h>
13 #include <linux/rculist.h>
14 #include <linux/tracefs.h>
16 #include "tracing_map.h"
17 #include "trace.h"
18 #include "trace_dynevent.h"
20 #define SYNTH_SYSTEM "synthetic"
21 #define SYNTH_FIELDS_MAX 16
23 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
25 #define ERRORS \
26 C(NONE, "No error"), \
27 C(DUPLICATE_VAR, "Variable already defined"), \
28 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
29 C(TOO_MANY_VARS, "Too many variables defined"), \
30 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
31 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
32 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
33 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
34 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
35 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
36 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
37 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
38 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
39 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
40 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
41 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
42 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
43 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
44 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
45 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
46 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
47 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
48 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
49 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
50 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
51 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
52 C(TOO_MANY_PARAMS, "Too many action params"), \
53 C(PARAM_NOT_FOUND, "Couldn't find param"), \
54 C(INVALID_PARAM, "Invalid action param"), \
55 C(ACTION_NOT_FOUND, "No action found"), \
56 C(NO_SAVE_PARAMS, "No params found for save()"), \
57 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
58 C(ACTION_MISMATCH, "Handler doesn't support action"), \
59 C(NO_CLOSING_PAREN, "No closing paren found"), \
60 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
61 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
62 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
63 C(VAR_NOT_FOUND, "Couldn't find variable"), \
64 C(FIELD_NOT_FOUND, "Couldn't find field"),
66 #undef C
67 #define C(a, b) HIST_ERR_##a
69 enum { ERRORS };
71 #undef C
72 #define C(a, b) b
74 static const char *err_text[] = { ERRORS };
76 struct hist_field;
78 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
79 struct tracing_map_elt *elt,
80 struct ring_buffer_event *rbe,
81 void *event);
83 #define HIST_FIELD_OPERANDS_MAX 2
84 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
85 #define HIST_ACTIONS_MAX 8
87 enum field_op_id {
88 FIELD_OP_NONE,
89 FIELD_OP_PLUS,
90 FIELD_OP_MINUS,
91 FIELD_OP_UNARY_MINUS,
95 * A hist_var (histogram variable) contains variable information for
96 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
97 * flag set. A hist_var has a variable name e.g. ts0, and is
98 * associated with a given histogram trigger, as specified by
99 * hist_data. The hist_var idx is the unique index assigned to the
100 * variable by the hist trigger's tracing_map. The idx is what is
101 * used to set a variable's value and, by a variable reference, to
102 * retrieve it.
104 struct hist_var {
105 char *name;
106 struct hist_trigger_data *hist_data;
107 unsigned int idx;
110 struct hist_field {
111 struct ftrace_event_field *field;
112 unsigned long flags;
113 hist_field_fn_t fn;
114 unsigned int size;
115 unsigned int offset;
116 unsigned int is_signed;
117 const char *type;
118 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
119 struct hist_trigger_data *hist_data;
122 * Variable fields contain variable-specific info in var.
124 struct hist_var var;
125 enum field_op_id operator;
126 char *system;
127 char *event_name;
130 * The name field is used for EXPR and VAR_REF fields. VAR
131 * fields contain the variable name in var.name.
133 char *name;
136 * When a histogram trigger is hit, if it has any references
137 * to variables, the values of those variables are collected
138 * into a var_ref_vals array by resolve_var_refs(). The
139 * current value of each variable is read from the tracing_map
140 * using the hist field's hist_var.idx and entered into the
141 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
143 unsigned int var_ref_idx;
144 bool read_once;
147 static u64 hist_field_none(struct hist_field *field,
148 struct tracing_map_elt *elt,
149 struct ring_buffer_event *rbe,
150 void *event)
152 return 0;
155 static u64 hist_field_counter(struct hist_field *field,
156 struct tracing_map_elt *elt,
157 struct ring_buffer_event *rbe,
158 void *event)
160 return 1;
163 static u64 hist_field_string(struct hist_field *hist_field,
164 struct tracing_map_elt *elt,
165 struct ring_buffer_event *rbe,
166 void *event)
168 char *addr = (char *)(event + hist_field->field->offset);
170 return (u64)(unsigned long)addr;
173 static u64 hist_field_dynstring(struct hist_field *hist_field,
174 struct tracing_map_elt *elt,
175 struct ring_buffer_event *rbe,
176 void *event)
178 u32 str_item = *(u32 *)(event + hist_field->field->offset);
179 int str_loc = str_item & 0xffff;
180 char *addr = (char *)(event + str_loc);
182 return (u64)(unsigned long)addr;
185 static u64 hist_field_pstring(struct hist_field *hist_field,
186 struct tracing_map_elt *elt,
187 struct ring_buffer_event *rbe,
188 void *event)
190 char **addr = (char **)(event + hist_field->field->offset);
192 return (u64)(unsigned long)*addr;
195 static u64 hist_field_log2(struct hist_field *hist_field,
196 struct tracing_map_elt *elt,
197 struct ring_buffer_event *rbe,
198 void *event)
200 struct hist_field *operand = hist_field->operands[0];
202 u64 val = operand->fn(operand, elt, rbe, event);
204 return (u64) ilog2(roundup_pow_of_two(val));
207 static u64 hist_field_plus(struct hist_field *hist_field,
208 struct tracing_map_elt *elt,
209 struct ring_buffer_event *rbe,
210 void *event)
212 struct hist_field *operand1 = hist_field->operands[0];
213 struct hist_field *operand2 = hist_field->operands[1];
215 u64 val1 = operand1->fn(operand1, elt, rbe, event);
216 u64 val2 = operand2->fn(operand2, elt, rbe, event);
218 return val1 + val2;
221 static u64 hist_field_minus(struct hist_field *hist_field,
222 struct tracing_map_elt *elt,
223 struct ring_buffer_event *rbe,
224 void *event)
226 struct hist_field *operand1 = hist_field->operands[0];
227 struct hist_field *operand2 = hist_field->operands[1];
229 u64 val1 = operand1->fn(operand1, elt, rbe, event);
230 u64 val2 = operand2->fn(operand2, elt, rbe, event);
232 return val1 - val2;
235 static u64 hist_field_unary_minus(struct hist_field *hist_field,
236 struct tracing_map_elt *elt,
237 struct ring_buffer_event *rbe,
238 void *event)
240 struct hist_field *operand = hist_field->operands[0];
242 s64 sval = (s64)operand->fn(operand, elt, rbe, event);
243 u64 val = (u64)-sval;
245 return val;
248 #define DEFINE_HIST_FIELD_FN(type) \
249 static u64 hist_field_##type(struct hist_field *hist_field, \
250 struct tracing_map_elt *elt, \
251 struct ring_buffer_event *rbe, \
252 void *event) \
254 type *addr = (type *)(event + hist_field->field->offset); \
256 return (u64)(unsigned long)*addr; \
259 DEFINE_HIST_FIELD_FN(s64);
260 DEFINE_HIST_FIELD_FN(u64);
261 DEFINE_HIST_FIELD_FN(s32);
262 DEFINE_HIST_FIELD_FN(u32);
263 DEFINE_HIST_FIELD_FN(s16);
264 DEFINE_HIST_FIELD_FN(u16);
265 DEFINE_HIST_FIELD_FN(s8);
266 DEFINE_HIST_FIELD_FN(u8);
268 #define for_each_hist_field(i, hist_data) \
269 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
271 #define for_each_hist_val_field(i, hist_data) \
272 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
274 #define for_each_hist_key_field(i, hist_data) \
275 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
277 #define HIST_STACKTRACE_DEPTH 16
278 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
279 #define HIST_STACKTRACE_SKIP 5
281 #define HITCOUNT_IDX 0
282 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
284 enum hist_field_flags {
285 HIST_FIELD_FL_HITCOUNT = 1 << 0,
286 HIST_FIELD_FL_KEY = 1 << 1,
287 HIST_FIELD_FL_STRING = 1 << 2,
288 HIST_FIELD_FL_HEX = 1 << 3,
289 HIST_FIELD_FL_SYM = 1 << 4,
290 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
291 HIST_FIELD_FL_EXECNAME = 1 << 6,
292 HIST_FIELD_FL_SYSCALL = 1 << 7,
293 HIST_FIELD_FL_STACKTRACE = 1 << 8,
294 HIST_FIELD_FL_LOG2 = 1 << 9,
295 HIST_FIELD_FL_TIMESTAMP = 1 << 10,
296 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
297 HIST_FIELD_FL_VAR = 1 << 12,
298 HIST_FIELD_FL_EXPR = 1 << 13,
299 HIST_FIELD_FL_VAR_REF = 1 << 14,
300 HIST_FIELD_FL_CPU = 1 << 15,
301 HIST_FIELD_FL_ALIAS = 1 << 16,
304 struct var_defs {
305 unsigned int n_vars;
306 char *name[TRACING_MAP_VARS_MAX];
307 char *expr[TRACING_MAP_VARS_MAX];
310 struct hist_trigger_attrs {
311 char *keys_str;
312 char *vals_str;
313 char *sort_key_str;
314 char *name;
315 char *clock;
316 bool pause;
317 bool cont;
318 bool clear;
319 bool ts_in_usecs;
320 unsigned int map_bits;
322 char *assignment_str[TRACING_MAP_VARS_MAX];
323 unsigned int n_assignments;
325 char *action_str[HIST_ACTIONS_MAX];
326 unsigned int n_actions;
328 struct var_defs var_defs;
331 struct field_var {
332 struct hist_field *var;
333 struct hist_field *val;
336 struct field_var_hist {
337 struct hist_trigger_data *hist_data;
338 char *cmd;
341 struct hist_trigger_data {
342 struct hist_field *fields[HIST_FIELDS_MAX];
343 unsigned int n_vals;
344 unsigned int n_keys;
345 unsigned int n_fields;
346 unsigned int n_vars;
347 unsigned int key_size;
348 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
349 unsigned int n_sort_keys;
350 struct trace_event_file *event_file;
351 struct hist_trigger_attrs *attrs;
352 struct tracing_map *map;
353 bool enable_timestamps;
354 bool remove;
355 struct hist_field *var_refs[TRACING_MAP_VARS_MAX];
356 unsigned int n_var_refs;
358 struct action_data *actions[HIST_ACTIONS_MAX];
359 unsigned int n_actions;
361 struct field_var *field_vars[SYNTH_FIELDS_MAX];
362 unsigned int n_field_vars;
363 unsigned int n_field_var_str;
364 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
365 unsigned int n_field_var_hists;
367 struct field_var *save_vars[SYNTH_FIELDS_MAX];
368 unsigned int n_save_vars;
369 unsigned int n_save_var_str;
372 static int synth_event_create(int argc, const char **argv);
373 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
374 static int synth_event_release(struct dyn_event *ev);
375 static bool synth_event_is_busy(struct dyn_event *ev);
376 static bool synth_event_match(const char *system, const char *event,
377 struct dyn_event *ev);
379 static struct dyn_event_operations synth_event_ops = {
380 .create = synth_event_create,
381 .show = synth_event_show,
382 .is_busy = synth_event_is_busy,
383 .free = synth_event_release,
384 .match = synth_event_match,
387 struct synth_field {
388 char *type;
389 char *name;
390 size_t size;
391 bool is_signed;
392 bool is_string;
395 struct synth_event {
396 struct dyn_event devent;
397 int ref;
398 char *name;
399 struct synth_field **fields;
400 unsigned int n_fields;
401 unsigned int n_u64;
402 struct trace_event_class class;
403 struct trace_event_call call;
404 struct tracepoint *tp;
407 static bool is_synth_event(struct dyn_event *ev)
409 return ev->ops == &synth_event_ops;
412 static struct synth_event *to_synth_event(struct dyn_event *ev)
414 return container_of(ev, struct synth_event, devent);
417 static bool synth_event_is_busy(struct dyn_event *ev)
419 struct synth_event *event = to_synth_event(ev);
421 return event->ref != 0;
424 static bool synth_event_match(const char *system, const char *event,
425 struct dyn_event *ev)
427 struct synth_event *sev = to_synth_event(ev);
429 return strcmp(sev->name, event) == 0 &&
430 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
433 struct action_data;
435 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
436 struct tracing_map_elt *elt, void *rec,
437 struct ring_buffer_event *rbe, void *key,
438 struct action_data *data, u64 *var_ref_vals);
440 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
442 enum handler_id {
443 HANDLER_ONMATCH = 1,
444 HANDLER_ONMAX,
445 HANDLER_ONCHANGE,
448 enum action_id {
449 ACTION_SAVE = 1,
450 ACTION_TRACE,
451 ACTION_SNAPSHOT,
454 struct action_data {
455 enum handler_id handler;
456 enum action_id action;
457 char *action_name;
458 action_fn_t fn;
460 unsigned int n_params;
461 char *params[SYNTH_FIELDS_MAX];
464 * When a histogram trigger is hit, the values of any
465 * references to variables, including variables being passed
466 * as parameters to synthetic events, are collected into a
467 * var_ref_vals array. This var_ref_idx is the index of the
468 * first param in the array to be passed to the synthetic
469 * event invocation.
471 unsigned int var_ref_idx;
472 struct synth_event *synth_event;
473 bool use_trace_keyword;
474 char *synth_event_name;
476 union {
477 struct {
478 char *event;
479 char *event_system;
480 } match_data;
482 struct {
484 * var_str contains the $-unstripped variable
485 * name referenced by var_ref, and used when
486 * printing the action. Because var_ref
487 * creation is deferred to create_actions(),
488 * we need a per-action way to save it until
489 * then, thus var_str.
491 char *var_str;
494 * var_ref refers to the variable being
495 * tracked e.g onmax($var).
497 struct hist_field *var_ref;
500 * track_var contains the 'invisible' tracking
501 * variable created to keep the current
502 * e.g. max value.
504 struct hist_field *track_var;
506 check_track_val_fn_t check_val;
507 action_fn_t save_data;
508 } track_data;
512 struct track_data {
513 u64 track_val;
514 bool updated;
516 unsigned int key_len;
517 void *key;
518 struct tracing_map_elt elt;
520 struct action_data *action_data;
521 struct hist_trigger_data *hist_data;
524 struct hist_elt_data {
525 char *comm;
526 u64 *var_ref_vals;
527 char *field_var_str[SYNTH_FIELDS_MAX];
530 struct snapshot_context {
531 struct tracing_map_elt *elt;
532 void *key;
535 static void track_data_free(struct track_data *track_data)
537 struct hist_elt_data *elt_data;
539 if (!track_data)
540 return;
542 kfree(track_data->key);
544 elt_data = track_data->elt.private_data;
545 if (elt_data) {
546 kfree(elt_data->comm);
547 kfree(elt_data);
550 kfree(track_data);
553 static struct track_data *track_data_alloc(unsigned int key_len,
554 struct action_data *action_data,
555 struct hist_trigger_data *hist_data)
557 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
558 struct hist_elt_data *elt_data;
560 if (!data)
561 return ERR_PTR(-ENOMEM);
563 data->key = kzalloc(key_len, GFP_KERNEL);
564 if (!data->key) {
565 track_data_free(data);
566 return ERR_PTR(-ENOMEM);
569 data->key_len = key_len;
570 data->action_data = action_data;
571 data->hist_data = hist_data;
573 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
574 if (!elt_data) {
575 track_data_free(data);
576 return ERR_PTR(-ENOMEM);
578 data->elt.private_data = elt_data;
580 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
581 if (!elt_data->comm) {
582 track_data_free(data);
583 return ERR_PTR(-ENOMEM);
586 return data;
589 static char last_cmd[MAX_FILTER_STR_VAL];
590 static char last_cmd_loc[MAX_FILTER_STR_VAL];
592 static int errpos(char *str)
594 return err_pos(last_cmd, str);
597 static void last_cmd_set(struct trace_event_file *file, char *str)
599 const char *system = NULL, *name = NULL;
600 struct trace_event_call *call;
602 if (!str)
603 return;
605 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
607 if (file) {
608 call = file->event_call;
610 system = call->class->system;
611 if (system) {
612 name = trace_event_name(call);
613 if (!name)
614 system = NULL;
618 if (system)
619 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
622 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
624 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
625 err_type, err_pos);
628 static void hist_err_clear(void)
630 last_cmd[0] = '\0';
631 last_cmd_loc[0] = '\0';
634 struct synth_trace_event {
635 struct trace_entry ent;
636 u64 fields[];
639 static int synth_event_define_fields(struct trace_event_call *call)
641 struct synth_trace_event trace;
642 int offset = offsetof(typeof(trace), fields);
643 struct synth_event *event = call->data;
644 unsigned int i, size, n_u64;
645 char *name, *type;
646 bool is_signed;
647 int ret = 0;
649 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
650 size = event->fields[i]->size;
651 is_signed = event->fields[i]->is_signed;
652 type = event->fields[i]->type;
653 name = event->fields[i]->name;
654 ret = trace_define_field(call, type, name, offset, size,
655 is_signed, FILTER_OTHER);
656 if (ret)
657 break;
659 if (event->fields[i]->is_string) {
660 offset += STR_VAR_LEN_MAX;
661 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
662 } else {
663 offset += sizeof(u64);
664 n_u64++;
668 event->n_u64 = n_u64;
670 return ret;
673 static bool synth_field_signed(char *type)
675 if (str_has_prefix(type, "u"))
676 return false;
678 return true;
681 static int synth_field_is_string(char *type)
683 if (strstr(type, "char[") != NULL)
684 return true;
686 return false;
689 static int synth_field_string_size(char *type)
691 char buf[4], *end, *start;
692 unsigned int len;
693 int size, err;
695 start = strstr(type, "char[");
696 if (start == NULL)
697 return -EINVAL;
698 start += sizeof("char[") - 1;
700 end = strchr(type, ']');
701 if (!end || end < start)
702 return -EINVAL;
704 len = end - start;
705 if (len > 3)
706 return -EINVAL;
708 strncpy(buf, start, len);
709 buf[len] = '\0';
711 err = kstrtouint(buf, 0, &size);
712 if (err)
713 return err;
715 if (size > STR_VAR_LEN_MAX)
716 return -EINVAL;
718 return size;
721 static int synth_field_size(char *type)
723 int size = 0;
725 if (strcmp(type, "s64") == 0)
726 size = sizeof(s64);
727 else if (strcmp(type, "u64") == 0)
728 size = sizeof(u64);
729 else if (strcmp(type, "s32") == 0)
730 size = sizeof(s32);
731 else if (strcmp(type, "u32") == 0)
732 size = sizeof(u32);
733 else if (strcmp(type, "s16") == 0)
734 size = sizeof(s16);
735 else if (strcmp(type, "u16") == 0)
736 size = sizeof(u16);
737 else if (strcmp(type, "s8") == 0)
738 size = sizeof(s8);
739 else if (strcmp(type, "u8") == 0)
740 size = sizeof(u8);
741 else if (strcmp(type, "char") == 0)
742 size = sizeof(char);
743 else if (strcmp(type, "unsigned char") == 0)
744 size = sizeof(unsigned char);
745 else if (strcmp(type, "int") == 0)
746 size = sizeof(int);
747 else if (strcmp(type, "unsigned int") == 0)
748 size = sizeof(unsigned int);
749 else if (strcmp(type, "long") == 0)
750 size = sizeof(long);
751 else if (strcmp(type, "unsigned long") == 0)
752 size = sizeof(unsigned long);
753 else if (strcmp(type, "pid_t") == 0)
754 size = sizeof(pid_t);
755 else if (synth_field_is_string(type))
756 size = synth_field_string_size(type);
758 return size;
761 static const char *synth_field_fmt(char *type)
763 const char *fmt = "%llu";
765 if (strcmp(type, "s64") == 0)
766 fmt = "%lld";
767 else if (strcmp(type, "u64") == 0)
768 fmt = "%llu";
769 else if (strcmp(type, "s32") == 0)
770 fmt = "%d";
771 else if (strcmp(type, "u32") == 0)
772 fmt = "%u";
773 else if (strcmp(type, "s16") == 0)
774 fmt = "%d";
775 else if (strcmp(type, "u16") == 0)
776 fmt = "%u";
777 else if (strcmp(type, "s8") == 0)
778 fmt = "%d";
779 else if (strcmp(type, "u8") == 0)
780 fmt = "%u";
781 else if (strcmp(type, "char") == 0)
782 fmt = "%d";
783 else if (strcmp(type, "unsigned char") == 0)
784 fmt = "%u";
785 else if (strcmp(type, "int") == 0)
786 fmt = "%d";
787 else if (strcmp(type, "unsigned int") == 0)
788 fmt = "%u";
789 else if (strcmp(type, "long") == 0)
790 fmt = "%ld";
791 else if (strcmp(type, "unsigned long") == 0)
792 fmt = "%lu";
793 else if (strcmp(type, "pid_t") == 0)
794 fmt = "%d";
795 else if (synth_field_is_string(type))
796 fmt = "%s";
798 return fmt;
801 static enum print_line_t print_synth_event(struct trace_iterator *iter,
802 int flags,
803 struct trace_event *event)
805 struct trace_array *tr = iter->tr;
806 struct trace_seq *s = &iter->seq;
807 struct synth_trace_event *entry;
808 struct synth_event *se;
809 unsigned int i, n_u64;
810 char print_fmt[32];
811 const char *fmt;
813 entry = (struct synth_trace_event *)iter->ent;
814 se = container_of(event, struct synth_event, call.event);
816 trace_seq_printf(s, "%s: ", se->name);
818 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
819 if (trace_seq_has_overflowed(s))
820 goto end;
822 fmt = synth_field_fmt(se->fields[i]->type);
824 /* parameter types */
825 if (tr->trace_flags & TRACE_ITER_VERBOSE)
826 trace_seq_printf(s, "%s ", fmt);
828 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
830 /* parameter values */
831 if (se->fields[i]->is_string) {
832 trace_seq_printf(s, print_fmt, se->fields[i]->name,
833 (char *)&entry->fields[n_u64],
834 i == se->n_fields - 1 ? "" : " ");
835 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
836 } else {
837 trace_seq_printf(s, print_fmt, se->fields[i]->name,
838 entry->fields[n_u64],
839 i == se->n_fields - 1 ? "" : " ");
840 n_u64++;
843 end:
844 trace_seq_putc(s, '\n');
846 return trace_handle_return(s);
849 static struct trace_event_functions synth_event_funcs = {
850 .trace = print_synth_event
853 static notrace void trace_event_raw_event_synth(void *__data,
854 u64 *var_ref_vals,
855 unsigned int var_ref_idx)
857 struct trace_event_file *trace_file = __data;
858 struct synth_trace_event *entry;
859 struct trace_event_buffer fbuffer;
860 struct ring_buffer *buffer;
861 struct synth_event *event;
862 unsigned int i, n_u64;
863 int fields_size = 0;
865 event = trace_file->event_call->data;
867 if (trace_trigger_soft_disabled(trace_file))
868 return;
870 fields_size = event->n_u64 * sizeof(u64);
873 * Avoid ring buffer recursion detection, as this event
874 * is being performed within another event.
876 buffer = trace_file->tr->trace_buffer.buffer;
877 ring_buffer_nest_start(buffer);
879 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
880 sizeof(*entry) + fields_size);
881 if (!entry)
882 goto out;
884 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
885 if (event->fields[i]->is_string) {
886 char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
887 char *str_field = (char *)&entry->fields[n_u64];
889 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
890 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
891 } else {
892 entry->fields[n_u64] = var_ref_vals[var_ref_idx + i];
893 n_u64++;
897 trace_event_buffer_commit(&fbuffer);
898 out:
899 ring_buffer_nest_end(buffer);
902 static void free_synth_event_print_fmt(struct trace_event_call *call)
904 if (call) {
905 kfree(call->print_fmt);
906 call->print_fmt = NULL;
910 static int __set_synth_event_print_fmt(struct synth_event *event,
911 char *buf, int len)
913 const char *fmt;
914 int pos = 0;
915 int i;
917 /* When len=0, we just calculate the needed length */
918 #define LEN_OR_ZERO (len ? len - pos : 0)
920 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
921 for (i = 0; i < event->n_fields; i++) {
922 fmt = synth_field_fmt(event->fields[i]->type);
923 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
924 event->fields[i]->name, fmt,
925 i == event->n_fields - 1 ? "" : ", ");
927 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
929 for (i = 0; i < event->n_fields; i++) {
930 pos += snprintf(buf + pos, LEN_OR_ZERO,
931 ", REC->%s", event->fields[i]->name);
934 #undef LEN_OR_ZERO
936 /* return the length of print_fmt */
937 return pos;
940 static int set_synth_event_print_fmt(struct trace_event_call *call)
942 struct synth_event *event = call->data;
943 char *print_fmt;
944 int len;
946 /* First: called with 0 length to calculate the needed length */
947 len = __set_synth_event_print_fmt(event, NULL, 0);
949 print_fmt = kmalloc(len + 1, GFP_KERNEL);
950 if (!print_fmt)
951 return -ENOMEM;
953 /* Second: actually write the @print_fmt */
954 __set_synth_event_print_fmt(event, print_fmt, len + 1);
955 call->print_fmt = print_fmt;
957 return 0;
960 static void free_synth_field(struct synth_field *field)
962 kfree(field->type);
963 kfree(field->name);
964 kfree(field);
967 static struct synth_field *parse_synth_field(int argc, const char **argv,
968 int *consumed)
970 struct synth_field *field;
971 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
972 int len, ret = 0;
974 if (field_type[0] == ';')
975 field_type++;
977 if (!strcmp(field_type, "unsigned")) {
978 if (argc < 3)
979 return ERR_PTR(-EINVAL);
980 prefix = "unsigned ";
981 field_type = argv[1];
982 field_name = argv[2];
983 *consumed = 3;
984 } else {
985 field_name = argv[1];
986 *consumed = 2;
989 field = kzalloc(sizeof(*field), GFP_KERNEL);
990 if (!field)
991 return ERR_PTR(-ENOMEM);
993 len = strlen(field_name);
994 array = strchr(field_name, '[');
995 if (array)
996 len -= strlen(array);
997 else if (field_name[len - 1] == ';')
998 len--;
1000 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1001 if (!field->name) {
1002 ret = -ENOMEM;
1003 goto free;
1006 if (field_type[0] == ';')
1007 field_type++;
1008 len = strlen(field_type) + 1;
1009 if (array)
1010 len += strlen(array);
1011 if (prefix)
1012 len += strlen(prefix);
1014 field->type = kzalloc(len, GFP_KERNEL);
1015 if (!field->type) {
1016 ret = -ENOMEM;
1017 goto free;
1019 if (prefix)
1020 strcat(field->type, prefix);
1021 strcat(field->type, field_type);
1022 if (array) {
1023 strcat(field->type, array);
1024 if (field->type[len - 1] == ';')
1025 field->type[len - 1] = '\0';
1028 field->size = synth_field_size(field->type);
1029 if (!field->size) {
1030 ret = -EINVAL;
1031 goto free;
1034 if (synth_field_is_string(field->type))
1035 field->is_string = true;
1037 field->is_signed = synth_field_signed(field->type);
1039 out:
1040 return field;
1041 free:
1042 free_synth_field(field);
1043 field = ERR_PTR(ret);
1044 goto out;
1047 static void free_synth_tracepoint(struct tracepoint *tp)
1049 if (!tp)
1050 return;
1052 kfree(tp->name);
1053 kfree(tp);
1056 static struct tracepoint *alloc_synth_tracepoint(char *name)
1058 struct tracepoint *tp;
1060 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1061 if (!tp)
1062 return ERR_PTR(-ENOMEM);
1064 tp->name = kstrdup(name, GFP_KERNEL);
1065 if (!tp->name) {
1066 kfree(tp);
1067 return ERR_PTR(-ENOMEM);
1070 return tp;
1073 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1074 unsigned int var_ref_idx);
1076 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1077 unsigned int var_ref_idx)
1079 struct tracepoint *tp = event->tp;
1081 if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1082 struct tracepoint_func *probe_func_ptr;
1083 synth_probe_func_t probe_func;
1084 void *__data;
1086 if (!(cpu_online(raw_smp_processor_id())))
1087 return;
1089 probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1090 if (probe_func_ptr) {
1091 do {
1092 probe_func = probe_func_ptr->func;
1093 __data = probe_func_ptr->data;
1094 probe_func(__data, var_ref_vals, var_ref_idx);
1095 } while ((++probe_func_ptr)->func);
1100 static struct synth_event *find_synth_event(const char *name)
1102 struct dyn_event *pos;
1103 struct synth_event *event;
1105 for_each_dyn_event(pos) {
1106 if (!is_synth_event(pos))
1107 continue;
1108 event = to_synth_event(pos);
1109 if (strcmp(event->name, name) == 0)
1110 return event;
1113 return NULL;
1116 static int register_synth_event(struct synth_event *event)
1118 struct trace_event_call *call = &event->call;
1119 int ret = 0;
1121 event->call.class = &event->class;
1122 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1123 if (!event->class.system) {
1124 ret = -ENOMEM;
1125 goto out;
1128 event->tp = alloc_synth_tracepoint(event->name);
1129 if (IS_ERR(event->tp)) {
1130 ret = PTR_ERR(event->tp);
1131 event->tp = NULL;
1132 goto out;
1135 INIT_LIST_HEAD(&call->class->fields);
1136 call->event.funcs = &synth_event_funcs;
1137 call->class->define_fields = synth_event_define_fields;
1139 ret = register_trace_event(&call->event);
1140 if (!ret) {
1141 ret = -ENODEV;
1142 goto out;
1144 call->flags = TRACE_EVENT_FL_TRACEPOINT;
1145 call->class->reg = trace_event_reg;
1146 call->class->probe = trace_event_raw_event_synth;
1147 call->data = event;
1148 call->tp = event->tp;
1150 ret = trace_add_event_call(call);
1151 if (ret) {
1152 pr_warn("Failed to register synthetic event: %s\n",
1153 trace_event_name(call));
1154 goto err;
1157 ret = set_synth_event_print_fmt(call);
1158 if (ret < 0) {
1159 trace_remove_event_call(call);
1160 goto err;
1162 out:
1163 return ret;
1164 err:
1165 unregister_trace_event(&call->event);
1166 goto out;
1169 static int unregister_synth_event(struct synth_event *event)
1171 struct trace_event_call *call = &event->call;
1172 int ret;
1174 ret = trace_remove_event_call(call);
1176 return ret;
1179 static void free_synth_event(struct synth_event *event)
1181 unsigned int i;
1183 if (!event)
1184 return;
1186 for (i = 0; i < event->n_fields; i++)
1187 free_synth_field(event->fields[i]);
1189 kfree(event->fields);
1190 kfree(event->name);
1191 kfree(event->class.system);
1192 free_synth_tracepoint(event->tp);
1193 free_synth_event_print_fmt(&event->call);
1194 kfree(event);
1197 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1198 struct synth_field **fields)
1200 struct synth_event *event;
1201 unsigned int i;
1203 event = kzalloc(sizeof(*event), GFP_KERNEL);
1204 if (!event) {
1205 event = ERR_PTR(-ENOMEM);
1206 goto out;
1209 event->name = kstrdup(name, GFP_KERNEL);
1210 if (!event->name) {
1211 kfree(event);
1212 event = ERR_PTR(-ENOMEM);
1213 goto out;
1216 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1217 if (!event->fields) {
1218 free_synth_event(event);
1219 event = ERR_PTR(-ENOMEM);
1220 goto out;
1223 dyn_event_init(&event->devent, &synth_event_ops);
1225 for (i = 0; i < n_fields; i++)
1226 event->fields[i] = fields[i];
1228 event->n_fields = n_fields;
1229 out:
1230 return event;
1233 static void action_trace(struct hist_trigger_data *hist_data,
1234 struct tracing_map_elt *elt, void *rec,
1235 struct ring_buffer_event *rbe, void *key,
1236 struct action_data *data, u64 *var_ref_vals)
1238 struct synth_event *event = data->synth_event;
1240 trace_synth(event, var_ref_vals, data->var_ref_idx);
1243 struct hist_var_data {
1244 struct list_head list;
1245 struct hist_trigger_data *hist_data;
1248 static int __create_synth_event(int argc, const char *name, const char **argv)
1250 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1251 struct synth_event *event = NULL;
1252 int i, consumed = 0, n_fields = 0, ret = 0;
1255 * Argument syntax:
1256 * - Add synthetic event: <event_name> field[;field] ...
1257 * - Remove synthetic event: !<event_name> field[;field] ...
1258 * where 'field' = type field_name
1261 if (name[0] == '\0' || argc < 1)
1262 return -EINVAL;
1264 mutex_lock(&event_mutex);
1266 event = find_synth_event(name);
1267 if (event) {
1268 ret = -EEXIST;
1269 goto out;
1272 for (i = 0; i < argc - 1; i++) {
1273 if (strcmp(argv[i], ";") == 0)
1274 continue;
1275 if (n_fields == SYNTH_FIELDS_MAX) {
1276 ret = -EINVAL;
1277 goto err;
1280 field = parse_synth_field(argc - i, &argv[i], &consumed);
1281 if (IS_ERR(field)) {
1282 ret = PTR_ERR(field);
1283 goto err;
1285 fields[n_fields++] = field;
1286 i += consumed - 1;
1289 if (i < argc && strcmp(argv[i], ";") != 0) {
1290 ret = -EINVAL;
1291 goto err;
1294 event = alloc_synth_event(name, n_fields, fields);
1295 if (IS_ERR(event)) {
1296 ret = PTR_ERR(event);
1297 event = NULL;
1298 goto err;
1300 ret = register_synth_event(event);
1301 if (!ret)
1302 dyn_event_add(&event->devent);
1303 else
1304 free_synth_event(event);
1305 out:
1306 mutex_unlock(&event_mutex);
1308 return ret;
1309 err:
1310 for (i = 0; i < n_fields; i++)
1311 free_synth_field(fields[i]);
1313 goto out;
1316 static int create_or_delete_synth_event(int argc, char **argv)
1318 const char *name = argv[0];
1319 struct synth_event *event = NULL;
1320 int ret;
1322 /* trace_run_command() ensures argc != 0 */
1323 if (name[0] == '!') {
1324 mutex_lock(&event_mutex);
1325 event = find_synth_event(name + 1);
1326 if (event) {
1327 if (event->ref)
1328 ret = -EBUSY;
1329 else {
1330 ret = unregister_synth_event(event);
1331 if (!ret) {
1332 dyn_event_remove(&event->devent);
1333 free_synth_event(event);
1336 } else
1337 ret = -ENOENT;
1338 mutex_unlock(&event_mutex);
1339 return ret;
1342 ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1343 return ret == -ECANCELED ? -EINVAL : ret;
1346 static int synth_event_create(int argc, const char **argv)
1348 const char *name = argv[0];
1349 int len;
1351 if (name[0] != 's' || name[1] != ':')
1352 return -ECANCELED;
1353 name += 2;
1355 /* This interface accepts group name prefix */
1356 if (strchr(name, '/')) {
1357 len = str_has_prefix(name, SYNTH_SYSTEM "/");
1358 if (len == 0)
1359 return -EINVAL;
1360 name += len;
1362 return __create_synth_event(argc - 1, name, argv + 1);
1365 static int synth_event_release(struct dyn_event *ev)
1367 struct synth_event *event = to_synth_event(ev);
1368 int ret;
1370 if (event->ref)
1371 return -EBUSY;
1373 ret = unregister_synth_event(event);
1374 if (ret)
1375 return ret;
1377 dyn_event_remove(ev);
1378 free_synth_event(event);
1379 return 0;
1382 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1384 struct synth_field *field;
1385 unsigned int i;
1387 seq_printf(m, "%s\t", event->name);
1389 for (i = 0; i < event->n_fields; i++) {
1390 field = event->fields[i];
1392 /* parameter values */
1393 seq_printf(m, "%s %s%s", field->type, field->name,
1394 i == event->n_fields - 1 ? "" : "; ");
1397 seq_putc(m, '\n');
1399 return 0;
1402 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1404 struct synth_event *event = to_synth_event(ev);
1406 seq_printf(m, "s:%s/", event->class.system);
1408 return __synth_event_show(m, event);
1411 static int synth_events_seq_show(struct seq_file *m, void *v)
1413 struct dyn_event *ev = v;
1415 if (!is_synth_event(ev))
1416 return 0;
1418 return __synth_event_show(m, to_synth_event(ev));
1421 static const struct seq_operations synth_events_seq_op = {
1422 .start = dyn_event_seq_start,
1423 .next = dyn_event_seq_next,
1424 .stop = dyn_event_seq_stop,
1425 .show = synth_events_seq_show,
1428 static int synth_events_open(struct inode *inode, struct file *file)
1430 int ret;
1432 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1433 ret = dyn_events_release_all(&synth_event_ops);
1434 if (ret < 0)
1435 return ret;
1438 return seq_open(file, &synth_events_seq_op);
1441 static ssize_t synth_events_write(struct file *file,
1442 const char __user *buffer,
1443 size_t count, loff_t *ppos)
1445 return trace_parse_run_command(file, buffer, count, ppos,
1446 create_or_delete_synth_event);
1449 static const struct file_operations synth_events_fops = {
1450 .open = synth_events_open,
1451 .write = synth_events_write,
1452 .read = seq_read,
1453 .llseek = seq_lseek,
1454 .release = seq_release,
1457 static u64 hist_field_timestamp(struct hist_field *hist_field,
1458 struct tracing_map_elt *elt,
1459 struct ring_buffer_event *rbe,
1460 void *event)
1462 struct hist_trigger_data *hist_data = hist_field->hist_data;
1463 struct trace_array *tr = hist_data->event_file->tr;
1465 u64 ts = ring_buffer_event_time_stamp(rbe);
1467 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1468 ts = ns2usecs(ts);
1470 return ts;
1473 static u64 hist_field_cpu(struct hist_field *hist_field,
1474 struct tracing_map_elt *elt,
1475 struct ring_buffer_event *rbe,
1476 void *event)
1478 int cpu = smp_processor_id();
1480 return cpu;
1484 * check_field_for_var_ref - Check if a VAR_REF field references a variable
1485 * @hist_field: The VAR_REF field to check
1486 * @var_data: The hist trigger that owns the variable
1487 * @var_idx: The trigger variable identifier
1489 * Check the given VAR_REF field to see whether or not it references
1490 * the given variable associated with the given trigger.
1492 * Return: The VAR_REF field if it does reference the variable, NULL if not
1494 static struct hist_field *
1495 check_field_for_var_ref(struct hist_field *hist_field,
1496 struct hist_trigger_data *var_data,
1497 unsigned int var_idx)
1499 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1501 if (hist_field && hist_field->var.idx == var_idx &&
1502 hist_field->var.hist_data == var_data)
1503 return hist_field;
1505 return NULL;
1509 * find_var_ref - Check if a trigger has a reference to a trigger variable
1510 * @hist_data: The hist trigger that might have a reference to the variable
1511 * @var_data: The hist trigger that owns the variable
1512 * @var_idx: The trigger variable identifier
1514 * Check the list of var_refs[] on the first hist trigger to see
1515 * whether any of them are references to the variable on the second
1516 * trigger.
1518 * Return: The VAR_REF field referencing the variable if so, NULL if not
1520 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1521 struct hist_trigger_data *var_data,
1522 unsigned int var_idx)
1524 struct hist_field *hist_field;
1525 unsigned int i;
1527 for (i = 0; i < hist_data->n_var_refs; i++) {
1528 hist_field = hist_data->var_refs[i];
1529 if (check_field_for_var_ref(hist_field, var_data, var_idx))
1530 return hist_field;
1533 return NULL;
1537 * find_any_var_ref - Check if there is a reference to a given trigger variable
1538 * @hist_data: The hist trigger
1539 * @var_idx: The trigger variable identifier
1541 * Check to see whether the given variable is currently referenced by
1542 * any other trigger.
1544 * The trigger the variable is defined on is explicitly excluded - the
1545 * assumption being that a self-reference doesn't prevent a trigger
1546 * from being removed.
1548 * Return: The VAR_REF field referencing the variable if so, NULL if not
1550 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1551 unsigned int var_idx)
1553 struct trace_array *tr = hist_data->event_file->tr;
1554 struct hist_field *found = NULL;
1555 struct hist_var_data *var_data;
1557 list_for_each_entry(var_data, &tr->hist_vars, list) {
1558 if (var_data->hist_data == hist_data)
1559 continue;
1560 found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1561 if (found)
1562 break;
1565 return found;
1569 * check_var_refs - Check if there is a reference to any of trigger's variables
1570 * @hist_data: The hist trigger
1572 * A trigger can define one or more variables. If any one of them is
1573 * currently referenced by any other trigger, this function will
1574 * determine that.
1576 * Typically used to determine whether or not a trigger can be removed
1577 * - if there are any references to a trigger's variables, it cannot.
1579 * Return: True if there is a reference to any of trigger's variables
1581 static bool check_var_refs(struct hist_trigger_data *hist_data)
1583 struct hist_field *field;
1584 bool found = false;
1585 int i;
1587 for_each_hist_field(i, hist_data) {
1588 field = hist_data->fields[i];
1589 if (field && field->flags & HIST_FIELD_FL_VAR) {
1590 if (find_any_var_ref(hist_data, field->var.idx)) {
1591 found = true;
1592 break;
1597 return found;
1600 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1602 struct trace_array *tr = hist_data->event_file->tr;
1603 struct hist_var_data *var_data, *found = NULL;
1605 list_for_each_entry(var_data, &tr->hist_vars, list) {
1606 if (var_data->hist_data == hist_data) {
1607 found = var_data;
1608 break;
1612 return found;
1615 static bool field_has_hist_vars(struct hist_field *hist_field,
1616 unsigned int level)
1618 int i;
1620 if (level > 3)
1621 return false;
1623 if (!hist_field)
1624 return false;
1626 if (hist_field->flags & HIST_FIELD_FL_VAR ||
1627 hist_field->flags & HIST_FIELD_FL_VAR_REF)
1628 return true;
1630 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1631 struct hist_field *operand;
1633 operand = hist_field->operands[i];
1634 if (field_has_hist_vars(operand, level + 1))
1635 return true;
1638 return false;
1641 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1643 struct hist_field *hist_field;
1644 int i;
1646 for_each_hist_field(i, hist_data) {
1647 hist_field = hist_data->fields[i];
1648 if (field_has_hist_vars(hist_field, 0))
1649 return true;
1652 return false;
1655 static int save_hist_vars(struct hist_trigger_data *hist_data)
1657 struct trace_array *tr = hist_data->event_file->tr;
1658 struct hist_var_data *var_data;
1660 var_data = find_hist_vars(hist_data);
1661 if (var_data)
1662 return 0;
1664 if (trace_array_get(tr) < 0)
1665 return -ENODEV;
1667 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1668 if (!var_data) {
1669 trace_array_put(tr);
1670 return -ENOMEM;
1673 var_data->hist_data = hist_data;
1674 list_add(&var_data->list, &tr->hist_vars);
1676 return 0;
1679 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1681 struct trace_array *tr = hist_data->event_file->tr;
1682 struct hist_var_data *var_data;
1684 var_data = find_hist_vars(hist_data);
1685 if (!var_data)
1686 return;
1688 if (WARN_ON(check_var_refs(hist_data)))
1689 return;
1691 list_del(&var_data->list);
1693 kfree(var_data);
1695 trace_array_put(tr);
1698 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1699 const char *var_name)
1701 struct hist_field *hist_field, *found = NULL;
1702 int i;
1704 for_each_hist_field(i, hist_data) {
1705 hist_field = hist_data->fields[i];
1706 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1707 strcmp(hist_field->var.name, var_name) == 0) {
1708 found = hist_field;
1709 break;
1713 return found;
1716 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1717 struct trace_event_file *file,
1718 const char *var_name)
1720 struct hist_trigger_data *test_data;
1721 struct event_trigger_data *test;
1722 struct hist_field *hist_field;
1724 hist_field = find_var_field(hist_data, var_name);
1725 if (hist_field)
1726 return hist_field;
1728 list_for_each_entry_rcu(test, &file->triggers, list) {
1729 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1730 test_data = test->private_data;
1731 hist_field = find_var_field(test_data, var_name);
1732 if (hist_field)
1733 return hist_field;
1737 return NULL;
1740 static struct trace_event_file *find_var_file(struct trace_array *tr,
1741 char *system,
1742 char *event_name,
1743 char *var_name)
1745 struct hist_trigger_data *var_hist_data;
1746 struct hist_var_data *var_data;
1747 struct trace_event_file *file, *found = NULL;
1749 if (system)
1750 return find_event_file(tr, system, event_name);
1752 list_for_each_entry(var_data, &tr->hist_vars, list) {
1753 var_hist_data = var_data->hist_data;
1754 file = var_hist_data->event_file;
1755 if (file == found)
1756 continue;
1758 if (find_var_field(var_hist_data, var_name)) {
1759 if (found) {
1760 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1761 return NULL;
1764 found = file;
1768 return found;
1771 static struct hist_field *find_file_var(struct trace_event_file *file,
1772 const char *var_name)
1774 struct hist_trigger_data *test_data;
1775 struct event_trigger_data *test;
1776 struct hist_field *hist_field;
1778 list_for_each_entry_rcu(test, &file->triggers, list) {
1779 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1780 test_data = test->private_data;
1781 hist_field = find_var_field(test_data, var_name);
1782 if (hist_field)
1783 return hist_field;
1787 return NULL;
1790 static struct hist_field *
1791 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1793 struct trace_array *tr = hist_data->event_file->tr;
1794 struct hist_field *hist_field, *found = NULL;
1795 struct trace_event_file *file;
1796 unsigned int i;
1798 for (i = 0; i < hist_data->n_actions; i++) {
1799 struct action_data *data = hist_data->actions[i];
1801 if (data->handler == HANDLER_ONMATCH) {
1802 char *system = data->match_data.event_system;
1803 char *event_name = data->match_data.event;
1805 file = find_var_file(tr, system, event_name, var_name);
1806 if (!file)
1807 continue;
1808 hist_field = find_file_var(file, var_name);
1809 if (hist_field) {
1810 if (found) {
1811 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1812 errpos(var_name));
1813 return ERR_PTR(-EINVAL);
1816 found = hist_field;
1820 return found;
1823 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1824 char *system,
1825 char *event_name,
1826 char *var_name)
1828 struct trace_array *tr = hist_data->event_file->tr;
1829 struct hist_field *hist_field = NULL;
1830 struct trace_event_file *file;
1832 if (!system || !event_name) {
1833 hist_field = find_match_var(hist_data, var_name);
1834 if (IS_ERR(hist_field))
1835 return NULL;
1836 if (hist_field)
1837 return hist_field;
1840 file = find_var_file(tr, system, event_name, var_name);
1841 if (!file)
1842 return NULL;
1844 hist_field = find_file_var(file, var_name);
1846 return hist_field;
1849 static u64 hist_field_var_ref(struct hist_field *hist_field,
1850 struct tracing_map_elt *elt,
1851 struct ring_buffer_event *rbe,
1852 void *event)
1854 struct hist_elt_data *elt_data;
1855 u64 var_val = 0;
1857 if (WARN_ON_ONCE(!elt))
1858 return var_val;
1860 elt_data = elt->private_data;
1861 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1863 return var_val;
1866 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1867 u64 *var_ref_vals, bool self)
1869 struct hist_trigger_data *var_data;
1870 struct tracing_map_elt *var_elt;
1871 struct hist_field *hist_field;
1872 unsigned int i, var_idx;
1873 bool resolved = true;
1874 u64 var_val = 0;
1876 for (i = 0; i < hist_data->n_var_refs; i++) {
1877 hist_field = hist_data->var_refs[i];
1878 var_idx = hist_field->var.idx;
1879 var_data = hist_field->var.hist_data;
1881 if (var_data == NULL) {
1882 resolved = false;
1883 break;
1886 if ((self && var_data != hist_data) ||
1887 (!self && var_data == hist_data))
1888 continue;
1890 var_elt = tracing_map_lookup(var_data->map, key);
1891 if (!var_elt) {
1892 resolved = false;
1893 break;
1896 if (!tracing_map_var_set(var_elt, var_idx)) {
1897 resolved = false;
1898 break;
1901 if (self || !hist_field->read_once)
1902 var_val = tracing_map_read_var(var_elt, var_idx);
1903 else
1904 var_val = tracing_map_read_var_once(var_elt, var_idx);
1906 var_ref_vals[i] = var_val;
1909 return resolved;
1912 static const char *hist_field_name(struct hist_field *field,
1913 unsigned int level)
1915 const char *field_name = "";
1917 if (level > 1)
1918 return field_name;
1920 if (field->field)
1921 field_name = field->field->name;
1922 else if (field->flags & HIST_FIELD_FL_LOG2 ||
1923 field->flags & HIST_FIELD_FL_ALIAS)
1924 field_name = hist_field_name(field->operands[0], ++level);
1925 else if (field->flags & HIST_FIELD_FL_CPU)
1926 field_name = "cpu";
1927 else if (field->flags & HIST_FIELD_FL_EXPR ||
1928 field->flags & HIST_FIELD_FL_VAR_REF) {
1929 if (field->system) {
1930 static char full_name[MAX_FILTER_STR_VAL];
1932 strcat(full_name, field->system);
1933 strcat(full_name, ".");
1934 strcat(full_name, field->event_name);
1935 strcat(full_name, ".");
1936 strcat(full_name, field->name);
1937 field_name = full_name;
1938 } else
1939 field_name = field->name;
1940 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1941 field_name = "common_timestamp";
1943 if (field_name == NULL)
1944 field_name = "";
1946 return field_name;
1949 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
1951 hist_field_fn_t fn = NULL;
1953 switch (field_size) {
1954 case 8:
1955 if (field_is_signed)
1956 fn = hist_field_s64;
1957 else
1958 fn = hist_field_u64;
1959 break;
1960 case 4:
1961 if (field_is_signed)
1962 fn = hist_field_s32;
1963 else
1964 fn = hist_field_u32;
1965 break;
1966 case 2:
1967 if (field_is_signed)
1968 fn = hist_field_s16;
1969 else
1970 fn = hist_field_u16;
1971 break;
1972 case 1:
1973 if (field_is_signed)
1974 fn = hist_field_s8;
1975 else
1976 fn = hist_field_u8;
1977 break;
1980 return fn;
1983 static int parse_map_size(char *str)
1985 unsigned long size, map_bits;
1986 int ret;
1988 strsep(&str, "=");
1989 if (!str) {
1990 ret = -EINVAL;
1991 goto out;
1994 ret = kstrtoul(str, 0, &size);
1995 if (ret)
1996 goto out;
1998 map_bits = ilog2(roundup_pow_of_two(size));
1999 if (map_bits < TRACING_MAP_BITS_MIN ||
2000 map_bits > TRACING_MAP_BITS_MAX)
2001 ret = -EINVAL;
2002 else
2003 ret = map_bits;
2004 out:
2005 return ret;
2008 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2010 unsigned int i;
2012 if (!attrs)
2013 return;
2015 for (i = 0; i < attrs->n_assignments; i++)
2016 kfree(attrs->assignment_str[i]);
2018 for (i = 0; i < attrs->n_actions; i++)
2019 kfree(attrs->action_str[i]);
2021 kfree(attrs->name);
2022 kfree(attrs->sort_key_str);
2023 kfree(attrs->keys_str);
2024 kfree(attrs->vals_str);
2025 kfree(attrs->clock);
2026 kfree(attrs);
2029 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2031 int ret = -EINVAL;
2033 if (attrs->n_actions >= HIST_ACTIONS_MAX)
2034 return ret;
2036 if ((str_has_prefix(str, "onmatch(")) ||
2037 (str_has_prefix(str, "onmax(")) ||
2038 (str_has_prefix(str, "onchange("))) {
2039 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2040 if (!attrs->action_str[attrs->n_actions]) {
2041 ret = -ENOMEM;
2042 return ret;
2044 attrs->n_actions++;
2045 ret = 0;
2047 return ret;
2050 static int parse_assignment(struct trace_array *tr,
2051 char *str, struct hist_trigger_attrs *attrs)
2053 int ret = 0;
2055 if ((str_has_prefix(str, "key=")) ||
2056 (str_has_prefix(str, "keys="))) {
2057 attrs->keys_str = kstrdup(str, GFP_KERNEL);
2058 if (!attrs->keys_str) {
2059 ret = -ENOMEM;
2060 goto out;
2062 } else if ((str_has_prefix(str, "val=")) ||
2063 (str_has_prefix(str, "vals=")) ||
2064 (str_has_prefix(str, "values="))) {
2065 attrs->vals_str = kstrdup(str, GFP_KERNEL);
2066 if (!attrs->vals_str) {
2067 ret = -ENOMEM;
2068 goto out;
2070 } else if (str_has_prefix(str, "sort=")) {
2071 attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
2072 if (!attrs->sort_key_str) {
2073 ret = -ENOMEM;
2074 goto out;
2076 } else if (str_has_prefix(str, "name=")) {
2077 attrs->name = kstrdup(str, GFP_KERNEL);
2078 if (!attrs->name) {
2079 ret = -ENOMEM;
2080 goto out;
2082 } else if (str_has_prefix(str, "clock=")) {
2083 strsep(&str, "=");
2084 if (!str) {
2085 ret = -EINVAL;
2086 goto out;
2089 str = strstrip(str);
2090 attrs->clock = kstrdup(str, GFP_KERNEL);
2091 if (!attrs->clock) {
2092 ret = -ENOMEM;
2093 goto out;
2095 } else if (str_has_prefix(str, "size=")) {
2096 int map_bits = parse_map_size(str);
2098 if (map_bits < 0) {
2099 ret = map_bits;
2100 goto out;
2102 attrs->map_bits = map_bits;
2103 } else {
2104 char *assignment;
2106 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2107 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2108 ret = -EINVAL;
2109 goto out;
2112 assignment = kstrdup(str, GFP_KERNEL);
2113 if (!assignment) {
2114 ret = -ENOMEM;
2115 goto out;
2118 attrs->assignment_str[attrs->n_assignments++] = assignment;
2120 out:
2121 return ret;
2124 static struct hist_trigger_attrs *
2125 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2127 struct hist_trigger_attrs *attrs;
2128 int ret = 0;
2130 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
2131 if (!attrs)
2132 return ERR_PTR(-ENOMEM);
2134 while (trigger_str) {
2135 char *str = strsep(&trigger_str, ":");
2137 if (strchr(str, '=')) {
2138 ret = parse_assignment(tr, str, attrs);
2139 if (ret)
2140 goto free;
2141 } else if (strcmp(str, "pause") == 0)
2142 attrs->pause = true;
2143 else if ((strcmp(str, "cont") == 0) ||
2144 (strcmp(str, "continue") == 0))
2145 attrs->cont = true;
2146 else if (strcmp(str, "clear") == 0)
2147 attrs->clear = true;
2148 else {
2149 ret = parse_action(str, attrs);
2150 if (ret)
2151 goto free;
2155 if (!attrs->keys_str) {
2156 ret = -EINVAL;
2157 goto free;
2160 if (!attrs->clock) {
2161 attrs->clock = kstrdup("global", GFP_KERNEL);
2162 if (!attrs->clock) {
2163 ret = -ENOMEM;
2164 goto free;
2168 return attrs;
2169 free:
2170 destroy_hist_trigger_attrs(attrs);
2172 return ERR_PTR(ret);
2175 static inline void save_comm(char *comm, struct task_struct *task)
2177 if (!task->pid) {
2178 strcpy(comm, "<idle>");
2179 return;
2182 if (WARN_ON_ONCE(task->pid < 0)) {
2183 strcpy(comm, "<XXX>");
2184 return;
2187 strncpy(comm, task->comm, TASK_COMM_LEN);
2190 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2192 unsigned int i;
2194 for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2195 kfree(elt_data->field_var_str[i]);
2197 kfree(elt_data->comm);
2198 kfree(elt_data);
2201 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2203 struct hist_elt_data *elt_data = elt->private_data;
2205 hist_elt_data_free(elt_data);
2208 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2210 struct hist_trigger_data *hist_data = elt->map->private_data;
2211 unsigned int size = TASK_COMM_LEN;
2212 struct hist_elt_data *elt_data;
2213 struct hist_field *key_field;
2214 unsigned int i, n_str;
2216 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2217 if (!elt_data)
2218 return -ENOMEM;
2220 for_each_hist_key_field(i, hist_data) {
2221 key_field = hist_data->fields[i];
2223 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2224 elt_data->comm = kzalloc(size, GFP_KERNEL);
2225 if (!elt_data->comm) {
2226 kfree(elt_data);
2227 return -ENOMEM;
2229 break;
2233 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
2235 size = STR_VAR_LEN_MAX;
2237 for (i = 0; i < n_str; i++) {
2238 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2239 if (!elt_data->field_var_str[i]) {
2240 hist_elt_data_free(elt_data);
2241 return -ENOMEM;
2245 elt->private_data = elt_data;
2247 return 0;
2250 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2252 struct hist_elt_data *elt_data = elt->private_data;
2254 if (elt_data->comm)
2255 save_comm(elt_data->comm, current);
2258 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2259 .elt_alloc = hist_trigger_elt_data_alloc,
2260 .elt_free = hist_trigger_elt_data_free,
2261 .elt_init = hist_trigger_elt_data_init,
2264 static const char *get_hist_field_flags(struct hist_field *hist_field)
2266 const char *flags_str = NULL;
2268 if (hist_field->flags & HIST_FIELD_FL_HEX)
2269 flags_str = "hex";
2270 else if (hist_field->flags & HIST_FIELD_FL_SYM)
2271 flags_str = "sym";
2272 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2273 flags_str = "sym-offset";
2274 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2275 flags_str = "execname";
2276 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2277 flags_str = "syscall";
2278 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2279 flags_str = "log2";
2280 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2281 flags_str = "usecs";
2283 return flags_str;
2286 static void expr_field_str(struct hist_field *field, char *expr)
2288 if (field->flags & HIST_FIELD_FL_VAR_REF)
2289 strcat(expr, "$");
2291 strcat(expr, hist_field_name(field, 0));
2293 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2294 const char *flags_str = get_hist_field_flags(field);
2296 if (flags_str) {
2297 strcat(expr, ".");
2298 strcat(expr, flags_str);
2303 static char *expr_str(struct hist_field *field, unsigned int level)
2305 char *expr;
2307 if (level > 1)
2308 return NULL;
2310 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2311 if (!expr)
2312 return NULL;
2314 if (!field->operands[0]) {
2315 expr_field_str(field, expr);
2316 return expr;
2319 if (field->operator == FIELD_OP_UNARY_MINUS) {
2320 char *subexpr;
2322 strcat(expr, "-(");
2323 subexpr = expr_str(field->operands[0], ++level);
2324 if (!subexpr) {
2325 kfree(expr);
2326 return NULL;
2328 strcat(expr, subexpr);
2329 strcat(expr, ")");
2331 kfree(subexpr);
2333 return expr;
2336 expr_field_str(field->operands[0], expr);
2338 switch (field->operator) {
2339 case FIELD_OP_MINUS:
2340 strcat(expr, "-");
2341 break;
2342 case FIELD_OP_PLUS:
2343 strcat(expr, "+");
2344 break;
2345 default:
2346 kfree(expr);
2347 return NULL;
2350 expr_field_str(field->operands[1], expr);
2352 return expr;
2355 static int contains_operator(char *str)
2357 enum field_op_id field_op = FIELD_OP_NONE;
2358 char *op;
2360 op = strpbrk(str, "+-");
2361 if (!op)
2362 return FIELD_OP_NONE;
2364 switch (*op) {
2365 case '-':
2366 if (*str == '-')
2367 field_op = FIELD_OP_UNARY_MINUS;
2368 else
2369 field_op = FIELD_OP_MINUS;
2370 break;
2371 case '+':
2372 field_op = FIELD_OP_PLUS;
2373 break;
2374 default:
2375 break;
2378 return field_op;
2381 static void __destroy_hist_field(struct hist_field *hist_field)
2383 kfree(hist_field->var.name);
2384 kfree(hist_field->name);
2385 kfree(hist_field->type);
2387 kfree(hist_field);
2390 static void destroy_hist_field(struct hist_field *hist_field,
2391 unsigned int level)
2393 unsigned int i;
2395 if (level > 3)
2396 return;
2398 if (!hist_field)
2399 return;
2401 if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2402 return; /* var refs will be destroyed separately */
2404 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2405 destroy_hist_field(hist_field->operands[i], level + 1);
2407 __destroy_hist_field(hist_field);
2410 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2411 struct ftrace_event_field *field,
2412 unsigned long flags,
2413 char *var_name)
2415 struct hist_field *hist_field;
2417 if (field && is_function_field(field))
2418 return NULL;
2420 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2421 if (!hist_field)
2422 return NULL;
2424 hist_field->hist_data = hist_data;
2426 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2427 goto out; /* caller will populate */
2429 if (flags & HIST_FIELD_FL_VAR_REF) {
2430 hist_field->fn = hist_field_var_ref;
2431 goto out;
2434 if (flags & HIST_FIELD_FL_HITCOUNT) {
2435 hist_field->fn = hist_field_counter;
2436 hist_field->size = sizeof(u64);
2437 hist_field->type = kstrdup("u64", GFP_KERNEL);
2438 if (!hist_field->type)
2439 goto free;
2440 goto out;
2443 if (flags & HIST_FIELD_FL_STACKTRACE) {
2444 hist_field->fn = hist_field_none;
2445 goto out;
2448 if (flags & HIST_FIELD_FL_LOG2) {
2449 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2450 hist_field->fn = hist_field_log2;
2451 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2452 hist_field->size = hist_field->operands[0]->size;
2453 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2454 if (!hist_field->type)
2455 goto free;
2456 goto out;
2459 if (flags & HIST_FIELD_FL_TIMESTAMP) {
2460 hist_field->fn = hist_field_timestamp;
2461 hist_field->size = sizeof(u64);
2462 hist_field->type = kstrdup("u64", GFP_KERNEL);
2463 if (!hist_field->type)
2464 goto free;
2465 goto out;
2468 if (flags & HIST_FIELD_FL_CPU) {
2469 hist_field->fn = hist_field_cpu;
2470 hist_field->size = sizeof(int);
2471 hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2472 if (!hist_field->type)
2473 goto free;
2474 goto out;
2477 if (WARN_ON_ONCE(!field))
2478 goto out;
2480 if (is_string_field(field)) {
2481 flags |= HIST_FIELD_FL_STRING;
2483 hist_field->size = MAX_FILTER_STR_VAL;
2484 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2485 if (!hist_field->type)
2486 goto free;
2488 if (field->filter_type == FILTER_STATIC_STRING)
2489 hist_field->fn = hist_field_string;
2490 else if (field->filter_type == FILTER_DYN_STRING)
2491 hist_field->fn = hist_field_dynstring;
2492 else
2493 hist_field->fn = hist_field_pstring;
2494 } else {
2495 hist_field->size = field->size;
2496 hist_field->is_signed = field->is_signed;
2497 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2498 if (!hist_field->type)
2499 goto free;
2501 hist_field->fn = select_value_fn(field->size,
2502 field->is_signed);
2503 if (!hist_field->fn) {
2504 destroy_hist_field(hist_field, 0);
2505 return NULL;
2508 out:
2509 hist_field->field = field;
2510 hist_field->flags = flags;
2512 if (var_name) {
2513 hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2514 if (!hist_field->var.name)
2515 goto free;
2518 return hist_field;
2519 free:
2520 destroy_hist_field(hist_field, 0);
2521 return NULL;
2524 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2526 unsigned int i;
2528 for (i = 0; i < HIST_FIELDS_MAX; i++) {
2529 if (hist_data->fields[i]) {
2530 destroy_hist_field(hist_data->fields[i], 0);
2531 hist_data->fields[i] = NULL;
2535 for (i = 0; i < hist_data->n_var_refs; i++) {
2536 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2537 __destroy_hist_field(hist_data->var_refs[i]);
2538 hist_data->var_refs[i] = NULL;
2542 static int init_var_ref(struct hist_field *ref_field,
2543 struct hist_field *var_field,
2544 char *system, char *event_name)
2546 int err = 0;
2548 ref_field->var.idx = var_field->var.idx;
2549 ref_field->var.hist_data = var_field->hist_data;
2550 ref_field->size = var_field->size;
2551 ref_field->is_signed = var_field->is_signed;
2552 ref_field->flags |= var_field->flags &
2553 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2555 if (system) {
2556 ref_field->system = kstrdup(system, GFP_KERNEL);
2557 if (!ref_field->system)
2558 return -ENOMEM;
2561 if (event_name) {
2562 ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2563 if (!ref_field->event_name) {
2564 err = -ENOMEM;
2565 goto free;
2569 if (var_field->var.name) {
2570 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2571 if (!ref_field->name) {
2572 err = -ENOMEM;
2573 goto free;
2575 } else if (var_field->name) {
2576 ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2577 if (!ref_field->name) {
2578 err = -ENOMEM;
2579 goto free;
2583 ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2584 if (!ref_field->type) {
2585 err = -ENOMEM;
2586 goto free;
2588 out:
2589 return err;
2590 free:
2591 kfree(ref_field->system);
2592 kfree(ref_field->event_name);
2593 kfree(ref_field->name);
2595 goto out;
2599 * create_var_ref - Create a variable reference and attach it to trigger
2600 * @hist_data: The trigger that will be referencing the variable
2601 * @var_field: The VAR field to create a reference to
2602 * @system: The optional system string
2603 * @event_name: The optional event_name string
2605 * Given a variable hist_field, create a VAR_REF hist_field that
2606 * represents a reference to it.
2608 * This function also adds the reference to the trigger that
2609 * now references the variable.
2611 * Return: The VAR_REF field if successful, NULL if not
2613 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2614 struct hist_field *var_field,
2615 char *system, char *event_name)
2617 unsigned long flags = HIST_FIELD_FL_VAR_REF;
2618 struct hist_field *ref_field;
2620 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2621 if (ref_field) {
2622 if (init_var_ref(ref_field, var_field, system, event_name)) {
2623 destroy_hist_field(ref_field, 0);
2624 return NULL;
2627 hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2628 ref_field->var_ref_idx = hist_data->n_var_refs++;
2631 return ref_field;
2634 static bool is_var_ref(char *var_name)
2636 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2637 return false;
2639 return true;
2642 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2643 char *var_name)
2645 char *name, *field;
2646 unsigned int i;
2648 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2649 name = hist_data->attrs->var_defs.name[i];
2651 if (strcmp(var_name, name) == 0) {
2652 field = hist_data->attrs->var_defs.expr[i];
2653 if (contains_operator(field) || is_var_ref(field))
2654 continue;
2655 return field;
2659 return NULL;
2662 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2663 char *system, char *event_name,
2664 char *var_name)
2666 struct trace_event_call *call;
2668 if (system && event_name) {
2669 call = hist_data->event_file->event_call;
2671 if (strcmp(system, call->class->system) != 0)
2672 return NULL;
2674 if (strcmp(event_name, trace_event_name(call)) != 0)
2675 return NULL;
2678 if (!!system != !!event_name)
2679 return NULL;
2681 if (!is_var_ref(var_name))
2682 return NULL;
2684 var_name++;
2686 return field_name_from_var(hist_data, var_name);
2689 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2690 char *system, char *event_name,
2691 char *var_name)
2693 struct hist_field *var_field = NULL, *ref_field = NULL;
2694 struct trace_array *tr = hist_data->event_file->tr;
2696 if (!is_var_ref(var_name))
2697 return NULL;
2699 var_name++;
2701 var_field = find_event_var(hist_data, system, event_name, var_name);
2702 if (var_field)
2703 ref_field = create_var_ref(hist_data, var_field,
2704 system, event_name);
2706 if (!ref_field)
2707 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2709 return ref_field;
2712 static struct ftrace_event_field *
2713 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2714 char *field_str, unsigned long *flags)
2716 struct ftrace_event_field *field = NULL;
2717 char *field_name, *modifier, *str;
2718 struct trace_array *tr = file->tr;
2720 modifier = str = kstrdup(field_str, GFP_KERNEL);
2721 if (!modifier)
2722 return ERR_PTR(-ENOMEM);
2724 field_name = strsep(&modifier, ".");
2725 if (modifier) {
2726 if (strcmp(modifier, "hex") == 0)
2727 *flags |= HIST_FIELD_FL_HEX;
2728 else if (strcmp(modifier, "sym") == 0)
2729 *flags |= HIST_FIELD_FL_SYM;
2730 else if (strcmp(modifier, "sym-offset") == 0)
2731 *flags |= HIST_FIELD_FL_SYM_OFFSET;
2732 else if ((strcmp(modifier, "execname") == 0) &&
2733 (strcmp(field_name, "common_pid") == 0))
2734 *flags |= HIST_FIELD_FL_EXECNAME;
2735 else if (strcmp(modifier, "syscall") == 0)
2736 *flags |= HIST_FIELD_FL_SYSCALL;
2737 else if (strcmp(modifier, "log2") == 0)
2738 *flags |= HIST_FIELD_FL_LOG2;
2739 else if (strcmp(modifier, "usecs") == 0)
2740 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2741 else {
2742 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2743 field = ERR_PTR(-EINVAL);
2744 goto out;
2748 if (strcmp(field_name, "common_timestamp") == 0) {
2749 *flags |= HIST_FIELD_FL_TIMESTAMP;
2750 hist_data->enable_timestamps = true;
2751 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2752 hist_data->attrs->ts_in_usecs = true;
2753 } else if (strcmp(field_name, "cpu") == 0)
2754 *flags |= HIST_FIELD_FL_CPU;
2755 else {
2756 field = trace_find_event_field(file->event_call, field_name);
2757 if (!field || !field->size) {
2758 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
2759 field = ERR_PTR(-EINVAL);
2760 goto out;
2763 out:
2764 kfree(str);
2766 return field;
2769 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2770 struct hist_field *var_ref,
2771 char *var_name)
2773 struct hist_field *alias = NULL;
2774 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2776 alias = create_hist_field(hist_data, NULL, flags, var_name);
2777 if (!alias)
2778 return NULL;
2780 alias->fn = var_ref->fn;
2781 alias->operands[0] = var_ref;
2783 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2784 destroy_hist_field(alias, 0);
2785 return NULL;
2788 return alias;
2791 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2792 struct trace_event_file *file, char *str,
2793 unsigned long *flags, char *var_name)
2795 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2796 struct ftrace_event_field *field = NULL;
2797 struct hist_field *hist_field = NULL;
2798 int ret = 0;
2800 s = strchr(str, '.');
2801 if (s) {
2802 s = strchr(++s, '.');
2803 if (s) {
2804 ref_system = strsep(&str, ".");
2805 if (!str) {
2806 ret = -EINVAL;
2807 goto out;
2809 ref_event = strsep(&str, ".");
2810 if (!str) {
2811 ret = -EINVAL;
2812 goto out;
2814 ref_var = str;
2818 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2819 if (!s) {
2820 hist_field = parse_var_ref(hist_data, ref_system,
2821 ref_event, ref_var);
2822 if (hist_field) {
2823 if (var_name) {
2824 hist_field = create_alias(hist_data, hist_field, var_name);
2825 if (!hist_field) {
2826 ret = -ENOMEM;
2827 goto out;
2830 return hist_field;
2832 } else
2833 str = s;
2835 field = parse_field(hist_data, file, str, flags);
2836 if (IS_ERR(field)) {
2837 ret = PTR_ERR(field);
2838 goto out;
2841 hist_field = create_hist_field(hist_data, field, *flags, var_name);
2842 if (!hist_field) {
2843 ret = -ENOMEM;
2844 goto out;
2847 return hist_field;
2848 out:
2849 return ERR_PTR(ret);
2852 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2853 struct trace_event_file *file,
2854 char *str, unsigned long flags,
2855 char *var_name, unsigned int level);
2857 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2858 struct trace_event_file *file,
2859 char *str, unsigned long flags,
2860 char *var_name, unsigned int level)
2862 struct hist_field *operand1, *expr = NULL;
2863 unsigned long operand_flags;
2864 int ret = 0;
2865 char *s;
2867 /* we support only -(xxx) i.e. explicit parens required */
2869 if (level > 3) {
2870 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2871 ret = -EINVAL;
2872 goto free;
2875 str++; /* skip leading '-' */
2877 s = strchr(str, '(');
2878 if (s)
2879 str++;
2880 else {
2881 ret = -EINVAL;
2882 goto free;
2885 s = strrchr(str, ')');
2886 if (s)
2887 *s = '\0';
2888 else {
2889 ret = -EINVAL; /* no closing ')' */
2890 goto free;
2893 flags |= HIST_FIELD_FL_EXPR;
2894 expr = create_hist_field(hist_data, NULL, flags, var_name);
2895 if (!expr) {
2896 ret = -ENOMEM;
2897 goto free;
2900 operand_flags = 0;
2901 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2902 if (IS_ERR(operand1)) {
2903 ret = PTR_ERR(operand1);
2904 goto free;
2907 expr->flags |= operand1->flags &
2908 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2909 expr->fn = hist_field_unary_minus;
2910 expr->operands[0] = operand1;
2911 expr->operator = FIELD_OP_UNARY_MINUS;
2912 expr->name = expr_str(expr, 0);
2913 expr->type = kstrdup(operand1->type, GFP_KERNEL);
2914 if (!expr->type) {
2915 ret = -ENOMEM;
2916 goto free;
2919 return expr;
2920 free:
2921 destroy_hist_field(expr, 0);
2922 return ERR_PTR(ret);
2925 static int check_expr_operands(struct trace_array *tr,
2926 struct hist_field *operand1,
2927 struct hist_field *operand2)
2929 unsigned long operand1_flags = operand1->flags;
2930 unsigned long operand2_flags = operand2->flags;
2932 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2933 (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2934 struct hist_field *var;
2936 var = find_var_field(operand1->var.hist_data, operand1->name);
2937 if (!var)
2938 return -EINVAL;
2939 operand1_flags = var->flags;
2942 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2943 (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2944 struct hist_field *var;
2946 var = find_var_field(operand2->var.hist_data, operand2->name);
2947 if (!var)
2948 return -EINVAL;
2949 operand2_flags = var->flags;
2952 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2953 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2954 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
2955 return -EINVAL;
2958 return 0;
2961 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2962 struct trace_event_file *file,
2963 char *str, unsigned long flags,
2964 char *var_name, unsigned int level)
2966 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2967 unsigned long operand_flags;
2968 int field_op, ret = -EINVAL;
2969 char *sep, *operand1_str;
2971 if (level > 3) {
2972 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2973 return ERR_PTR(-EINVAL);
2976 field_op = contains_operator(str);
2978 if (field_op == FIELD_OP_NONE)
2979 return parse_atom(hist_data, file, str, &flags, var_name);
2981 if (field_op == FIELD_OP_UNARY_MINUS)
2982 return parse_unary(hist_data, file, str, flags, var_name, ++level);
2984 switch (field_op) {
2985 case FIELD_OP_MINUS:
2986 sep = "-";
2987 break;
2988 case FIELD_OP_PLUS:
2989 sep = "+";
2990 break;
2991 default:
2992 goto free;
2995 operand1_str = strsep(&str, sep);
2996 if (!operand1_str || !str)
2997 goto free;
2999 operand_flags = 0;
3000 operand1 = parse_atom(hist_data, file, operand1_str,
3001 &operand_flags, NULL);
3002 if (IS_ERR(operand1)) {
3003 ret = PTR_ERR(operand1);
3004 operand1 = NULL;
3005 goto free;
3008 /* rest of string could be another expression e.g. b+c in a+b+c */
3009 operand_flags = 0;
3010 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3011 if (IS_ERR(operand2)) {
3012 ret = PTR_ERR(operand2);
3013 operand2 = NULL;
3014 goto free;
3017 ret = check_expr_operands(file->tr, operand1, operand2);
3018 if (ret)
3019 goto free;
3021 flags |= HIST_FIELD_FL_EXPR;
3023 flags |= operand1->flags &
3024 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3026 expr = create_hist_field(hist_data, NULL, flags, var_name);
3027 if (!expr) {
3028 ret = -ENOMEM;
3029 goto free;
3032 operand1->read_once = true;
3033 operand2->read_once = true;
3035 expr->operands[0] = operand1;
3036 expr->operands[1] = operand2;
3037 expr->operator = field_op;
3038 expr->name = expr_str(expr, 0);
3039 expr->type = kstrdup(operand1->type, GFP_KERNEL);
3040 if (!expr->type) {
3041 ret = -ENOMEM;
3042 goto free;
3045 switch (field_op) {
3046 case FIELD_OP_MINUS:
3047 expr->fn = hist_field_minus;
3048 break;
3049 case FIELD_OP_PLUS:
3050 expr->fn = hist_field_plus;
3051 break;
3052 default:
3053 ret = -EINVAL;
3054 goto free;
3057 return expr;
3058 free:
3059 destroy_hist_field(operand1, 0);
3060 destroy_hist_field(operand2, 0);
3061 destroy_hist_field(expr, 0);
3063 return ERR_PTR(ret);
3066 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3067 struct trace_event_file *file)
3069 struct event_trigger_data *test;
3071 list_for_each_entry_rcu(test, &file->triggers, list) {
3072 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3073 if (test->private_data == hist_data)
3074 return test->filter_str;
3078 return NULL;
3081 static struct event_command trigger_hist_cmd;
3082 static int event_hist_trigger_func(struct event_command *cmd_ops,
3083 struct trace_event_file *file,
3084 char *glob, char *cmd, char *param);
3086 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
3087 struct hist_trigger_data *hist_data,
3088 unsigned int n_keys)
3090 struct hist_field *target_hist_field, *hist_field;
3091 unsigned int n, i, j;
3093 if (hist_data->n_fields - hist_data->n_vals != n_keys)
3094 return false;
3096 i = hist_data->n_vals;
3097 j = target_hist_data->n_vals;
3099 for (n = 0; n < n_keys; n++) {
3100 hist_field = hist_data->fields[i + n];
3101 target_hist_field = target_hist_data->fields[j + n];
3103 if (strcmp(hist_field->type, target_hist_field->type) != 0)
3104 return false;
3105 if (hist_field->size != target_hist_field->size)
3106 return false;
3107 if (hist_field->is_signed != target_hist_field->is_signed)
3108 return false;
3111 return true;
3114 static struct hist_trigger_data *
3115 find_compatible_hist(struct hist_trigger_data *target_hist_data,
3116 struct trace_event_file *file)
3118 struct hist_trigger_data *hist_data;
3119 struct event_trigger_data *test;
3120 unsigned int n_keys;
3122 n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3124 list_for_each_entry_rcu(test, &file->triggers, list) {
3125 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3126 hist_data = test->private_data;
3128 if (compatible_keys(target_hist_data, hist_data, n_keys))
3129 return hist_data;
3133 return NULL;
3136 static struct trace_event_file *event_file(struct trace_array *tr,
3137 char *system, char *event_name)
3139 struct trace_event_file *file;
3141 file = __find_event_file(tr, system, event_name);
3142 if (!file)
3143 return ERR_PTR(-EINVAL);
3145 return file;
3148 static struct hist_field *
3149 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
3150 char *system, char *event_name, char *field_name)
3152 struct hist_field *event_var;
3153 char *synthetic_name;
3155 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3156 if (!synthetic_name)
3157 return ERR_PTR(-ENOMEM);
3159 strcpy(synthetic_name, "synthetic_");
3160 strcat(synthetic_name, field_name);
3162 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3164 kfree(synthetic_name);
3166 return event_var;
3170 * create_field_var_hist - Automatically create a histogram and var for a field
3171 * @target_hist_data: The target hist trigger
3172 * @subsys_name: Optional subsystem name
3173 * @event_name: Optional event name
3174 * @field_name: The name of the field (and the resulting variable)
3176 * Hist trigger actions fetch data from variables, not directly from
3177 * events. However, for convenience, users are allowed to directly
3178 * specify an event field in an action, which will be automatically
3179 * converted into a variable on their behalf.
3181 * If a user specifies a field on an event that isn't the event the
3182 * histogram currently being defined (the target event histogram), the
3183 * only way that can be accomplished is if a new hist trigger is
3184 * created and the field variable defined on that.
3186 * This function creates a new histogram compatible with the target
3187 * event (meaning a histogram with the same key as the target
3188 * histogram), and creates a variable for the specified field, but
3189 * with 'synthetic_' prepended to the variable name in order to avoid
3190 * collision with normal field variables.
3192 * Return: The variable created for the field.
3194 static struct hist_field *
3195 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3196 char *subsys_name, char *event_name, char *field_name)
3198 struct trace_array *tr = target_hist_data->event_file->tr;
3199 struct hist_field *event_var = ERR_PTR(-EINVAL);
3200 struct hist_trigger_data *hist_data;
3201 unsigned int i, n, first = true;
3202 struct field_var_hist *var_hist;
3203 struct trace_event_file *file;
3204 struct hist_field *key_field;
3205 char *saved_filter;
3206 char *cmd;
3207 int ret;
3209 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3210 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3211 return ERR_PTR(-EINVAL);
3214 file = event_file(tr, subsys_name, event_name);
3216 if (IS_ERR(file)) {
3217 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3218 ret = PTR_ERR(file);
3219 return ERR_PTR(ret);
3223 * Look for a histogram compatible with target. We'll use the
3224 * found histogram specification to create a new matching
3225 * histogram with our variable on it. target_hist_data is not
3226 * yet a registered histogram so we can't use that.
3228 hist_data = find_compatible_hist(target_hist_data, file);
3229 if (!hist_data) {
3230 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3231 return ERR_PTR(-EINVAL);
3234 /* See if a synthetic field variable has already been created */
3235 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3236 event_name, field_name);
3237 if (!IS_ERR_OR_NULL(event_var))
3238 return event_var;
3240 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3241 if (!var_hist)
3242 return ERR_PTR(-ENOMEM);
3244 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3245 if (!cmd) {
3246 kfree(var_hist);
3247 return ERR_PTR(-ENOMEM);
3250 /* Use the same keys as the compatible histogram */
3251 strcat(cmd, "keys=");
3253 for_each_hist_key_field(i, hist_data) {
3254 key_field = hist_data->fields[i];
3255 if (!first)
3256 strcat(cmd, ",");
3257 strcat(cmd, key_field->field->name);
3258 first = false;
3261 /* Create the synthetic field variable specification */
3262 strcat(cmd, ":synthetic_");
3263 strcat(cmd, field_name);
3264 strcat(cmd, "=");
3265 strcat(cmd, field_name);
3267 /* Use the same filter as the compatible histogram */
3268 saved_filter = find_trigger_filter(hist_data, file);
3269 if (saved_filter) {
3270 strcat(cmd, " if ");
3271 strcat(cmd, saved_filter);
3274 var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3275 if (!var_hist->cmd) {
3276 kfree(cmd);
3277 kfree(var_hist);
3278 return ERR_PTR(-ENOMEM);
3281 /* Save the compatible histogram information */
3282 var_hist->hist_data = hist_data;
3284 /* Create the new histogram with our variable */
3285 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3286 "", "hist", cmd);
3287 if (ret) {
3288 kfree(cmd);
3289 kfree(var_hist->cmd);
3290 kfree(var_hist);
3291 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3292 return ERR_PTR(ret);
3295 kfree(cmd);
3297 /* If we can't find the variable, something went wrong */
3298 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3299 event_name, field_name);
3300 if (IS_ERR_OR_NULL(event_var)) {
3301 kfree(var_hist->cmd);
3302 kfree(var_hist);
3303 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3304 return ERR_PTR(-EINVAL);
3307 n = target_hist_data->n_field_var_hists;
3308 target_hist_data->field_var_hists[n] = var_hist;
3309 target_hist_data->n_field_var_hists++;
3311 return event_var;
3314 static struct hist_field *
3315 find_target_event_var(struct hist_trigger_data *hist_data,
3316 char *subsys_name, char *event_name, char *var_name)
3318 struct trace_event_file *file = hist_data->event_file;
3319 struct hist_field *hist_field = NULL;
3321 if (subsys_name) {
3322 struct trace_event_call *call;
3324 if (!event_name)
3325 return NULL;
3327 call = file->event_call;
3329 if (strcmp(subsys_name, call->class->system) != 0)
3330 return NULL;
3332 if (strcmp(event_name, trace_event_name(call)) != 0)
3333 return NULL;
3336 hist_field = find_var_field(hist_data, var_name);
3338 return hist_field;
3341 static inline void __update_field_vars(struct tracing_map_elt *elt,
3342 struct ring_buffer_event *rbe,
3343 void *rec,
3344 struct field_var **field_vars,
3345 unsigned int n_field_vars,
3346 unsigned int field_var_str_start)
3348 struct hist_elt_data *elt_data = elt->private_data;
3349 unsigned int i, j, var_idx;
3350 u64 var_val;
3352 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3353 struct field_var *field_var = field_vars[i];
3354 struct hist_field *var = field_var->var;
3355 struct hist_field *val = field_var->val;
3357 var_val = val->fn(val, elt, rbe, rec);
3358 var_idx = var->var.idx;
3360 if (val->flags & HIST_FIELD_FL_STRING) {
3361 char *str = elt_data->field_var_str[j++];
3362 char *val_str = (char *)(uintptr_t)var_val;
3364 strscpy(str, val_str, STR_VAR_LEN_MAX);
3365 var_val = (u64)(uintptr_t)str;
3367 tracing_map_set_var(elt, var_idx, var_val);
3371 static void update_field_vars(struct hist_trigger_data *hist_data,
3372 struct tracing_map_elt *elt,
3373 struct ring_buffer_event *rbe,
3374 void *rec)
3376 __update_field_vars(elt, rbe, rec, hist_data->field_vars,
3377 hist_data->n_field_vars, 0);
3380 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3381 struct tracing_map_elt *elt, void *rec,
3382 struct ring_buffer_event *rbe, void *key,
3383 struct action_data *data, u64 *var_ref_vals)
3385 __update_field_vars(elt, rbe, rec, hist_data->save_vars,
3386 hist_data->n_save_vars, hist_data->n_field_var_str);
3389 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3390 struct trace_event_file *file,
3391 char *name, int size, const char *type)
3393 struct hist_field *var;
3394 int idx;
3396 if (find_var(hist_data, file, name) && !hist_data->remove) {
3397 var = ERR_PTR(-EINVAL);
3398 goto out;
3401 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3402 if (!var) {
3403 var = ERR_PTR(-ENOMEM);
3404 goto out;
3407 idx = tracing_map_add_var(hist_data->map);
3408 if (idx < 0) {
3409 kfree(var);
3410 var = ERR_PTR(-EINVAL);
3411 goto out;
3414 var->flags = HIST_FIELD_FL_VAR;
3415 var->var.idx = idx;
3416 var->var.hist_data = var->hist_data = hist_data;
3417 var->size = size;
3418 var->var.name = kstrdup(name, GFP_KERNEL);
3419 var->type = kstrdup(type, GFP_KERNEL);
3420 if (!var->var.name || !var->type) {
3421 kfree(var->var.name);
3422 kfree(var->type);
3423 kfree(var);
3424 var = ERR_PTR(-ENOMEM);
3426 out:
3427 return var;
3430 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3431 struct trace_event_file *file,
3432 char *field_name)
3434 struct hist_field *val = NULL, *var = NULL;
3435 unsigned long flags = HIST_FIELD_FL_VAR;
3436 struct trace_array *tr = file->tr;
3437 struct field_var *field_var;
3438 int ret = 0;
3440 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3441 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3442 ret = -EINVAL;
3443 goto err;
3446 val = parse_atom(hist_data, file, field_name, &flags, NULL);
3447 if (IS_ERR(val)) {
3448 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3449 ret = PTR_ERR(val);
3450 goto err;
3453 var = create_var(hist_data, file, field_name, val->size, val->type);
3454 if (IS_ERR(var)) {
3455 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3456 kfree(val);
3457 ret = PTR_ERR(var);
3458 goto err;
3461 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3462 if (!field_var) {
3463 kfree(val);
3464 kfree(var);
3465 ret = -ENOMEM;
3466 goto err;
3469 field_var->var = var;
3470 field_var->val = val;
3471 out:
3472 return field_var;
3473 err:
3474 field_var = ERR_PTR(ret);
3475 goto out;
3479 * create_target_field_var - Automatically create a variable for a field
3480 * @target_hist_data: The target hist trigger
3481 * @subsys_name: Optional subsystem name
3482 * @event_name: Optional event name
3483 * @var_name: The name of the field (and the resulting variable)
3485 * Hist trigger actions fetch data from variables, not directly from
3486 * events. However, for convenience, users are allowed to directly
3487 * specify an event field in an action, which will be automatically
3488 * converted into a variable on their behalf.
3490 * This function creates a field variable with the name var_name on
3491 * the hist trigger currently being defined on the target event. If
3492 * subsys_name and event_name are specified, this function simply
3493 * verifies that they do in fact match the target event subsystem and
3494 * event name.
3496 * Return: The variable created for the field.
3498 static struct field_var *
3499 create_target_field_var(struct hist_trigger_data *target_hist_data,
3500 char *subsys_name, char *event_name, char *var_name)
3502 struct trace_event_file *file = target_hist_data->event_file;
3504 if (subsys_name) {
3505 struct trace_event_call *call;
3507 if (!event_name)
3508 return NULL;
3510 call = file->event_call;
3512 if (strcmp(subsys_name, call->class->system) != 0)
3513 return NULL;
3515 if (strcmp(event_name, trace_event_name(call)) != 0)
3516 return NULL;
3519 return create_field_var(target_hist_data, file, var_name);
3522 static bool check_track_val_max(u64 track_val, u64 var_val)
3524 if (var_val <= track_val)
3525 return false;
3527 return true;
3530 static bool check_track_val_changed(u64 track_val, u64 var_val)
3532 if (var_val == track_val)
3533 return false;
3535 return true;
3538 static u64 get_track_val(struct hist_trigger_data *hist_data,
3539 struct tracing_map_elt *elt,
3540 struct action_data *data)
3542 unsigned int track_var_idx = data->track_data.track_var->var.idx;
3543 u64 track_val;
3545 track_val = tracing_map_read_var(elt, track_var_idx);
3547 return track_val;
3550 static void save_track_val(struct hist_trigger_data *hist_data,
3551 struct tracing_map_elt *elt,
3552 struct action_data *data, u64 var_val)
3554 unsigned int track_var_idx = data->track_data.track_var->var.idx;
3556 tracing_map_set_var(elt, track_var_idx, var_val);
3559 static void save_track_data(struct hist_trigger_data *hist_data,
3560 struct tracing_map_elt *elt, void *rec,
3561 struct ring_buffer_event *rbe, void *key,
3562 struct action_data *data, u64 *var_ref_vals)
3564 if (data->track_data.save_data)
3565 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3568 static bool check_track_val(struct tracing_map_elt *elt,
3569 struct action_data *data,
3570 u64 var_val)
3572 struct hist_trigger_data *hist_data;
3573 u64 track_val;
3575 hist_data = data->track_data.track_var->hist_data;
3576 track_val = get_track_val(hist_data, elt, data);
3578 return data->track_data.check_val(track_val, var_val);
3581 #ifdef CONFIG_TRACER_SNAPSHOT
3582 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3584 /* called with tr->max_lock held */
3585 struct track_data *track_data = tr->cond_snapshot->cond_data;
3586 struct hist_elt_data *elt_data, *track_elt_data;
3587 struct snapshot_context *context = cond_data;
3588 struct action_data *action;
3589 u64 track_val;
3591 if (!track_data)
3592 return false;
3594 action = track_data->action_data;
3596 track_val = get_track_val(track_data->hist_data, context->elt,
3597 track_data->action_data);
3599 if (!action->track_data.check_val(track_data->track_val, track_val))
3600 return false;
3602 track_data->track_val = track_val;
3603 memcpy(track_data->key, context->key, track_data->key_len);
3605 elt_data = context->elt->private_data;
3606 track_elt_data = track_data->elt.private_data;
3607 if (elt_data->comm)
3608 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3610 track_data->updated = true;
3612 return true;
3615 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3616 struct tracing_map_elt *elt, void *rec,
3617 struct ring_buffer_event *rbe, void *key,
3618 struct action_data *data,
3619 u64 *var_ref_vals)
3621 struct trace_event_file *file = hist_data->event_file;
3622 struct snapshot_context context;
3624 context.elt = elt;
3625 context.key = key;
3627 tracing_snapshot_cond(file->tr, &context);
3630 static void hist_trigger_print_key(struct seq_file *m,
3631 struct hist_trigger_data *hist_data,
3632 void *key,
3633 struct tracing_map_elt *elt);
3635 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3637 unsigned int i;
3639 if (!hist_data->n_actions)
3640 return NULL;
3642 for (i = 0; i < hist_data->n_actions; i++) {
3643 struct action_data *data = hist_data->actions[i];
3645 if (data->action == ACTION_SNAPSHOT)
3646 return data;
3649 return NULL;
3652 static void track_data_snapshot_print(struct seq_file *m,
3653 struct hist_trigger_data *hist_data)
3655 struct trace_event_file *file = hist_data->event_file;
3656 struct track_data *track_data;
3657 struct action_data *action;
3659 track_data = tracing_cond_snapshot_data(file->tr);
3660 if (!track_data)
3661 return;
3663 if (!track_data->updated)
3664 return;
3666 action = snapshot_action(hist_data);
3667 if (!action)
3668 return;
3670 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3671 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3672 action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3673 action->track_data.var_str, track_data->track_val);
3675 seq_puts(m, "\ttriggered by event with key: ");
3676 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3677 seq_putc(m, '\n');
3679 #else
3680 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3682 return false;
3684 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3685 struct tracing_map_elt *elt, void *rec,
3686 struct ring_buffer_event *rbe, void *key,
3687 struct action_data *data,
3688 u64 *var_ref_vals) {}
3689 static void track_data_snapshot_print(struct seq_file *m,
3690 struct hist_trigger_data *hist_data) {}
3691 #endif /* CONFIG_TRACER_SNAPSHOT */
3693 static void track_data_print(struct seq_file *m,
3694 struct hist_trigger_data *hist_data,
3695 struct tracing_map_elt *elt,
3696 struct action_data *data)
3698 u64 track_val = get_track_val(hist_data, elt, data);
3699 unsigned int i, save_var_idx;
3701 if (data->handler == HANDLER_ONMAX)
3702 seq_printf(m, "\n\tmax: %10llu", track_val);
3703 else if (data->handler == HANDLER_ONCHANGE)
3704 seq_printf(m, "\n\tchanged: %10llu", track_val);
3706 if (data->action == ACTION_SNAPSHOT)
3707 return;
3709 for (i = 0; i < hist_data->n_save_vars; i++) {
3710 struct hist_field *save_val = hist_data->save_vars[i]->val;
3711 struct hist_field *save_var = hist_data->save_vars[i]->var;
3712 u64 val;
3714 save_var_idx = save_var->var.idx;
3716 val = tracing_map_read_var(elt, save_var_idx);
3718 if (save_val->flags & HIST_FIELD_FL_STRING) {
3719 seq_printf(m, " %s: %-32s", save_var->var.name,
3720 (char *)(uintptr_t)(val));
3721 } else
3722 seq_printf(m, " %s: %10llu", save_var->var.name, val);
3726 static void ontrack_action(struct hist_trigger_data *hist_data,
3727 struct tracing_map_elt *elt, void *rec,
3728 struct ring_buffer_event *rbe, void *key,
3729 struct action_data *data, u64 *var_ref_vals)
3731 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3733 if (check_track_val(elt, data, var_val)) {
3734 save_track_val(hist_data, elt, data, var_val);
3735 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3739 static void action_data_destroy(struct action_data *data)
3741 unsigned int i;
3743 lockdep_assert_held(&event_mutex);
3745 kfree(data->action_name);
3747 for (i = 0; i < data->n_params; i++)
3748 kfree(data->params[i]);
3750 if (data->synth_event)
3751 data->synth_event->ref--;
3753 kfree(data->synth_event_name);
3755 kfree(data);
3758 static void track_data_destroy(struct hist_trigger_data *hist_data,
3759 struct action_data *data)
3761 struct trace_event_file *file = hist_data->event_file;
3763 destroy_hist_field(data->track_data.track_var, 0);
3765 if (data->action == ACTION_SNAPSHOT) {
3766 struct track_data *track_data;
3768 track_data = tracing_cond_snapshot_data(file->tr);
3769 if (track_data && track_data->hist_data == hist_data) {
3770 tracing_snapshot_cond_disable(file->tr);
3771 track_data_free(track_data);
3775 kfree(data->track_data.var_str);
3777 action_data_destroy(data);
3780 static int action_create(struct hist_trigger_data *hist_data,
3781 struct action_data *data);
3783 static int track_data_create(struct hist_trigger_data *hist_data,
3784 struct action_data *data)
3786 struct hist_field *var_field, *ref_field, *track_var = NULL;
3787 struct trace_event_file *file = hist_data->event_file;
3788 struct trace_array *tr = file->tr;
3789 char *track_data_var_str;
3790 int ret = 0;
3792 track_data_var_str = data->track_data.var_str;
3793 if (track_data_var_str[0] != '$') {
3794 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3795 return -EINVAL;
3797 track_data_var_str++;
3799 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3800 if (!var_field) {
3801 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3802 return -EINVAL;
3805 ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3806 if (!ref_field)
3807 return -ENOMEM;
3809 data->track_data.var_ref = ref_field;
3811 if (data->handler == HANDLER_ONMAX)
3812 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3813 if (IS_ERR(track_var)) {
3814 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3815 ret = PTR_ERR(track_var);
3816 goto out;
3819 if (data->handler == HANDLER_ONCHANGE)
3820 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3821 if (IS_ERR(track_var)) {
3822 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3823 ret = PTR_ERR(track_var);
3824 goto out;
3826 data->track_data.track_var = track_var;
3828 ret = action_create(hist_data, data);
3829 out:
3830 return ret;
3833 static int parse_action_params(struct trace_array *tr, char *params,
3834 struct action_data *data)
3836 char *param, *saved_param;
3837 bool first_param = true;
3838 int ret = 0;
3840 while (params) {
3841 if (data->n_params >= SYNTH_FIELDS_MAX) {
3842 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3843 goto out;
3846 param = strsep(&params, ",");
3847 if (!param) {
3848 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3849 ret = -EINVAL;
3850 goto out;
3853 param = strstrip(param);
3854 if (strlen(param) < 2) {
3855 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3856 ret = -EINVAL;
3857 goto out;
3860 saved_param = kstrdup(param, GFP_KERNEL);
3861 if (!saved_param) {
3862 ret = -ENOMEM;
3863 goto out;
3866 if (first_param && data->use_trace_keyword) {
3867 data->synth_event_name = saved_param;
3868 first_param = false;
3869 continue;
3871 first_param = false;
3873 data->params[data->n_params++] = saved_param;
3875 out:
3876 return ret;
3879 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3880 enum handler_id handler)
3882 char *action_name;
3883 int ret = 0;
3885 strsep(&str, ".");
3886 if (!str) {
3887 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3888 ret = -EINVAL;
3889 goto out;
3892 action_name = strsep(&str, "(");
3893 if (!action_name || !str) {
3894 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3895 ret = -EINVAL;
3896 goto out;
3899 if (str_has_prefix(action_name, "save")) {
3900 char *params = strsep(&str, ")");
3902 if (!params) {
3903 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3904 ret = -EINVAL;
3905 goto out;
3908 ret = parse_action_params(tr, params, data);
3909 if (ret)
3910 goto out;
3912 if (handler == HANDLER_ONMAX)
3913 data->track_data.check_val = check_track_val_max;
3914 else if (handler == HANDLER_ONCHANGE)
3915 data->track_data.check_val = check_track_val_changed;
3916 else {
3917 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3918 ret = -EINVAL;
3919 goto out;
3922 data->track_data.save_data = save_track_data_vars;
3923 data->fn = ontrack_action;
3924 data->action = ACTION_SAVE;
3925 } else if (str_has_prefix(action_name, "snapshot")) {
3926 char *params = strsep(&str, ")");
3928 if (!str) {
3929 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3930 ret = -EINVAL;
3931 goto out;
3934 if (handler == HANDLER_ONMAX)
3935 data->track_data.check_val = check_track_val_max;
3936 else if (handler == HANDLER_ONCHANGE)
3937 data->track_data.check_val = check_track_val_changed;
3938 else {
3939 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3940 ret = -EINVAL;
3941 goto out;
3944 data->track_data.save_data = save_track_data_snapshot;
3945 data->fn = ontrack_action;
3946 data->action = ACTION_SNAPSHOT;
3947 } else {
3948 char *params = strsep(&str, ")");
3950 if (str_has_prefix(action_name, "trace"))
3951 data->use_trace_keyword = true;
3953 if (params) {
3954 ret = parse_action_params(tr, params, data);
3955 if (ret)
3956 goto out;
3959 if (handler == HANDLER_ONMAX)
3960 data->track_data.check_val = check_track_val_max;
3961 else if (handler == HANDLER_ONCHANGE)
3962 data->track_data.check_val = check_track_val_changed;
3964 if (handler != HANDLER_ONMATCH) {
3965 data->track_data.save_data = action_trace;
3966 data->fn = ontrack_action;
3967 } else
3968 data->fn = action_trace;
3970 data->action = ACTION_TRACE;
3973 data->action_name = kstrdup(action_name, GFP_KERNEL);
3974 if (!data->action_name) {
3975 ret = -ENOMEM;
3976 goto out;
3979 data->handler = handler;
3980 out:
3981 return ret;
3984 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
3985 char *str, enum handler_id handler)
3987 struct action_data *data;
3988 int ret = -EINVAL;
3989 char *var_str;
3991 data = kzalloc(sizeof(*data), GFP_KERNEL);
3992 if (!data)
3993 return ERR_PTR(-ENOMEM);
3995 var_str = strsep(&str, ")");
3996 if (!var_str || !str) {
3997 ret = -EINVAL;
3998 goto free;
4001 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4002 if (!data->track_data.var_str) {
4003 ret = -ENOMEM;
4004 goto free;
4007 ret = action_parse(hist_data->event_file->tr, str, data, handler);
4008 if (ret)
4009 goto free;
4010 out:
4011 return data;
4012 free:
4013 track_data_destroy(hist_data, data);
4014 data = ERR_PTR(ret);
4015 goto out;
4018 static void onmatch_destroy(struct action_data *data)
4020 kfree(data->match_data.event);
4021 kfree(data->match_data.event_system);
4023 action_data_destroy(data);
4026 static void destroy_field_var(struct field_var *field_var)
4028 if (!field_var)
4029 return;
4031 destroy_hist_field(field_var->var, 0);
4032 destroy_hist_field(field_var->val, 0);
4034 kfree(field_var);
4037 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4039 unsigned int i;
4041 for (i = 0; i < hist_data->n_field_vars; i++)
4042 destroy_field_var(hist_data->field_vars[i]);
4045 static void save_field_var(struct hist_trigger_data *hist_data,
4046 struct field_var *field_var)
4048 hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4050 if (field_var->val->flags & HIST_FIELD_FL_STRING)
4051 hist_data->n_field_var_str++;
4055 static int check_synth_field(struct synth_event *event,
4056 struct hist_field *hist_field,
4057 unsigned int field_pos)
4059 struct synth_field *field;
4061 if (field_pos >= event->n_fields)
4062 return -EINVAL;
4064 field = event->fields[field_pos];
4066 if (strcmp(field->type, hist_field->type) != 0)
4067 return -EINVAL;
4069 return 0;
4072 static struct hist_field *
4073 trace_action_find_var(struct hist_trigger_data *hist_data,
4074 struct action_data *data,
4075 char *system, char *event, char *var)
4077 struct trace_array *tr = hist_data->event_file->tr;
4078 struct hist_field *hist_field;
4080 var++; /* skip '$' */
4082 hist_field = find_target_event_var(hist_data, system, event, var);
4083 if (!hist_field) {
4084 if (!system && data->handler == HANDLER_ONMATCH) {
4085 system = data->match_data.event_system;
4086 event = data->match_data.event;
4089 hist_field = find_event_var(hist_data, system, event, var);
4092 if (!hist_field)
4093 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4095 return hist_field;
4098 static struct hist_field *
4099 trace_action_create_field_var(struct hist_trigger_data *hist_data,
4100 struct action_data *data, char *system,
4101 char *event, char *var)
4103 struct hist_field *hist_field = NULL;
4104 struct field_var *field_var;
4107 * First try to create a field var on the target event (the
4108 * currently being defined). This will create a variable for
4109 * unqualified fields on the target event, or if qualified,
4110 * target fields that have qualified names matching the target.
4112 field_var = create_target_field_var(hist_data, system, event, var);
4114 if (field_var && !IS_ERR(field_var)) {
4115 save_field_var(hist_data, field_var);
4116 hist_field = field_var->var;
4117 } else {
4118 field_var = NULL;
4120 * If no explicit system.event is specfied, default to
4121 * looking for fields on the onmatch(system.event.xxx)
4122 * event.
4124 if (!system && data->handler == HANDLER_ONMATCH) {
4125 system = data->match_data.event_system;
4126 event = data->match_data.event;
4130 * At this point, we're looking at a field on another
4131 * event. Because we can't modify a hist trigger on
4132 * another event to add a variable for a field, we need
4133 * to create a new trigger on that event and create the
4134 * variable at the same time.
4136 hist_field = create_field_var_hist(hist_data, system, event, var);
4137 if (IS_ERR(hist_field))
4138 goto free;
4140 out:
4141 return hist_field;
4142 free:
4143 destroy_field_var(field_var);
4144 hist_field = NULL;
4145 goto out;
4148 static int trace_action_create(struct hist_trigger_data *hist_data,
4149 struct action_data *data)
4151 struct trace_array *tr = hist_data->event_file->tr;
4152 char *event_name, *param, *system = NULL;
4153 struct hist_field *hist_field, *var_ref;
4154 unsigned int i, var_ref_idx;
4155 unsigned int field_pos = 0;
4156 struct synth_event *event;
4157 char *synth_event_name;
4158 int ret = 0;
4160 lockdep_assert_held(&event_mutex);
4162 if (data->use_trace_keyword)
4163 synth_event_name = data->synth_event_name;
4164 else
4165 synth_event_name = data->action_name;
4167 event = find_synth_event(synth_event_name);
4168 if (!event) {
4169 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4170 return -EINVAL;
4173 event->ref++;
4175 var_ref_idx = hist_data->n_var_refs;
4177 for (i = 0; i < data->n_params; i++) {
4178 char *p;
4180 p = param = kstrdup(data->params[i], GFP_KERNEL);
4181 if (!param) {
4182 ret = -ENOMEM;
4183 goto err;
4186 system = strsep(&param, ".");
4187 if (!param) {
4188 param = (char *)system;
4189 system = event_name = NULL;
4190 } else {
4191 event_name = strsep(&param, ".");
4192 if (!param) {
4193 kfree(p);
4194 ret = -EINVAL;
4195 goto err;
4199 if (param[0] == '$')
4200 hist_field = trace_action_find_var(hist_data, data,
4201 system, event_name,
4202 param);
4203 else
4204 hist_field = trace_action_create_field_var(hist_data,
4205 data,
4206 system,
4207 event_name,
4208 param);
4210 if (!hist_field) {
4211 kfree(p);
4212 ret = -EINVAL;
4213 goto err;
4216 if (check_synth_field(event, hist_field, field_pos) == 0) {
4217 var_ref = create_var_ref(hist_data, hist_field,
4218 system, event_name);
4219 if (!var_ref) {
4220 kfree(p);
4221 ret = -ENOMEM;
4222 goto err;
4225 field_pos++;
4226 kfree(p);
4227 continue;
4230 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4231 kfree(p);
4232 ret = -EINVAL;
4233 goto err;
4236 if (field_pos != event->n_fields) {
4237 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4238 ret = -EINVAL;
4239 goto err;
4242 data->synth_event = event;
4243 data->var_ref_idx = var_ref_idx;
4244 out:
4245 return ret;
4246 err:
4247 event->ref--;
4249 goto out;
4252 static int action_create(struct hist_trigger_data *hist_data,
4253 struct action_data *data)
4255 struct trace_event_file *file = hist_data->event_file;
4256 struct trace_array *tr = file->tr;
4257 struct track_data *track_data;
4258 struct field_var *field_var;
4259 unsigned int i;
4260 char *param;
4261 int ret = 0;
4263 if (data->action == ACTION_TRACE)
4264 return trace_action_create(hist_data, data);
4266 if (data->action == ACTION_SNAPSHOT) {
4267 track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4268 if (IS_ERR(track_data)) {
4269 ret = PTR_ERR(track_data);
4270 goto out;
4273 ret = tracing_snapshot_cond_enable(file->tr, track_data,
4274 cond_snapshot_update);
4275 if (ret)
4276 track_data_free(track_data);
4278 goto out;
4281 if (data->action == ACTION_SAVE) {
4282 if (hist_data->n_save_vars) {
4283 ret = -EEXIST;
4284 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4285 goto out;
4288 for (i = 0; i < data->n_params; i++) {
4289 param = kstrdup(data->params[i], GFP_KERNEL);
4290 if (!param) {
4291 ret = -ENOMEM;
4292 goto out;
4295 field_var = create_target_field_var(hist_data, NULL, NULL, param);
4296 if (IS_ERR(field_var)) {
4297 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4298 errpos(param));
4299 ret = PTR_ERR(field_var);
4300 kfree(param);
4301 goto out;
4304 hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4305 if (field_var->val->flags & HIST_FIELD_FL_STRING)
4306 hist_data->n_save_var_str++;
4307 kfree(param);
4310 out:
4311 return ret;
4314 static int onmatch_create(struct hist_trigger_data *hist_data,
4315 struct action_data *data)
4317 return action_create(hist_data, data);
4320 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4322 char *match_event, *match_event_system;
4323 struct action_data *data;
4324 int ret = -EINVAL;
4326 data = kzalloc(sizeof(*data), GFP_KERNEL);
4327 if (!data)
4328 return ERR_PTR(-ENOMEM);
4330 match_event = strsep(&str, ")");
4331 if (!match_event || !str) {
4332 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4333 goto free;
4336 match_event_system = strsep(&match_event, ".");
4337 if (!match_event) {
4338 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4339 goto free;
4342 if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4343 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4344 goto free;
4347 data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4348 if (!data->match_data.event) {
4349 ret = -ENOMEM;
4350 goto free;
4353 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4354 if (!data->match_data.event_system) {
4355 ret = -ENOMEM;
4356 goto free;
4359 ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4360 if (ret)
4361 goto free;
4362 out:
4363 return data;
4364 free:
4365 onmatch_destroy(data);
4366 data = ERR_PTR(ret);
4367 goto out;
4370 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4372 hist_data->fields[HITCOUNT_IDX] =
4373 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4374 if (!hist_data->fields[HITCOUNT_IDX])
4375 return -ENOMEM;
4377 hist_data->n_vals++;
4378 hist_data->n_fields++;
4380 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4381 return -EINVAL;
4383 return 0;
4386 static int __create_val_field(struct hist_trigger_data *hist_data,
4387 unsigned int val_idx,
4388 struct trace_event_file *file,
4389 char *var_name, char *field_str,
4390 unsigned long flags)
4392 struct hist_field *hist_field;
4393 int ret = 0;
4395 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
4396 if (IS_ERR(hist_field)) {
4397 ret = PTR_ERR(hist_field);
4398 goto out;
4401 hist_data->fields[val_idx] = hist_field;
4403 ++hist_data->n_vals;
4404 ++hist_data->n_fields;
4406 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4407 ret = -EINVAL;
4408 out:
4409 return ret;
4412 static int create_val_field(struct hist_trigger_data *hist_data,
4413 unsigned int val_idx,
4414 struct trace_event_file *file,
4415 char *field_str)
4417 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4418 return -EINVAL;
4420 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4423 static int create_var_field(struct hist_trigger_data *hist_data,
4424 unsigned int val_idx,
4425 struct trace_event_file *file,
4426 char *var_name, char *expr_str)
4428 struct trace_array *tr = hist_data->event_file->tr;
4429 unsigned long flags = 0;
4431 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4432 return -EINVAL;
4434 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4435 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4436 return -EINVAL;
4439 flags |= HIST_FIELD_FL_VAR;
4440 hist_data->n_vars++;
4441 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4442 return -EINVAL;
4444 return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4447 static int create_val_fields(struct hist_trigger_data *hist_data,
4448 struct trace_event_file *file)
4450 char *fields_str, *field_str;
4451 unsigned int i, j = 1;
4452 int ret;
4454 ret = create_hitcount_val(hist_data);
4455 if (ret)
4456 goto out;
4458 fields_str = hist_data->attrs->vals_str;
4459 if (!fields_str)
4460 goto out;
4462 strsep(&fields_str, "=");
4463 if (!fields_str)
4464 goto out;
4466 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4467 j < TRACING_MAP_VALS_MAX; i++) {
4468 field_str = strsep(&fields_str, ",");
4469 if (!field_str)
4470 break;
4472 if (strcmp(field_str, "hitcount") == 0)
4473 continue;
4475 ret = create_val_field(hist_data, j++, file, field_str);
4476 if (ret)
4477 goto out;
4480 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4481 ret = -EINVAL;
4482 out:
4483 return ret;
4486 static int create_key_field(struct hist_trigger_data *hist_data,
4487 unsigned int key_idx,
4488 unsigned int key_offset,
4489 struct trace_event_file *file,
4490 char *field_str)
4492 struct trace_array *tr = hist_data->event_file->tr;
4493 struct hist_field *hist_field = NULL;
4494 unsigned long flags = 0;
4495 unsigned int key_size;
4496 int ret = 0;
4498 if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4499 return -EINVAL;
4501 flags |= HIST_FIELD_FL_KEY;
4503 if (strcmp(field_str, "stacktrace") == 0) {
4504 flags |= HIST_FIELD_FL_STACKTRACE;
4505 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4506 hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4507 } else {
4508 hist_field = parse_expr(hist_data, file, field_str, flags,
4509 NULL, 0);
4510 if (IS_ERR(hist_field)) {
4511 ret = PTR_ERR(hist_field);
4512 goto out;
4515 if (field_has_hist_vars(hist_field, 0)) {
4516 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4517 destroy_hist_field(hist_field, 0);
4518 ret = -EINVAL;
4519 goto out;
4522 key_size = hist_field->size;
4525 hist_data->fields[key_idx] = hist_field;
4527 key_size = ALIGN(key_size, sizeof(u64));
4528 hist_data->fields[key_idx]->size = key_size;
4529 hist_data->fields[key_idx]->offset = key_offset;
4531 hist_data->key_size += key_size;
4533 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4534 ret = -EINVAL;
4535 goto out;
4538 hist_data->n_keys++;
4539 hist_data->n_fields++;
4541 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4542 return -EINVAL;
4544 ret = key_size;
4545 out:
4546 return ret;
4549 static int create_key_fields(struct hist_trigger_data *hist_data,
4550 struct trace_event_file *file)
4552 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4553 char *fields_str, *field_str;
4554 int ret = -EINVAL;
4556 fields_str = hist_data->attrs->keys_str;
4557 if (!fields_str)
4558 goto out;
4560 strsep(&fields_str, "=");
4561 if (!fields_str)
4562 goto out;
4564 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4565 field_str = strsep(&fields_str, ",");
4566 if (!field_str)
4567 break;
4568 ret = create_key_field(hist_data, i, key_offset,
4569 file, field_str);
4570 if (ret < 0)
4571 goto out;
4572 key_offset += ret;
4574 if (fields_str) {
4575 ret = -EINVAL;
4576 goto out;
4578 ret = 0;
4579 out:
4580 return ret;
4583 static int create_var_fields(struct hist_trigger_data *hist_data,
4584 struct trace_event_file *file)
4586 unsigned int i, j = hist_data->n_vals;
4587 int ret = 0;
4589 unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4591 for (i = 0; i < n_vars; i++) {
4592 char *var_name = hist_data->attrs->var_defs.name[i];
4593 char *expr = hist_data->attrs->var_defs.expr[i];
4595 ret = create_var_field(hist_data, j++, file, var_name, expr);
4596 if (ret)
4597 goto out;
4599 out:
4600 return ret;
4603 static void free_var_defs(struct hist_trigger_data *hist_data)
4605 unsigned int i;
4607 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4608 kfree(hist_data->attrs->var_defs.name[i]);
4609 kfree(hist_data->attrs->var_defs.expr[i]);
4612 hist_data->attrs->var_defs.n_vars = 0;
4615 static int parse_var_defs(struct hist_trigger_data *hist_data)
4617 struct trace_array *tr = hist_data->event_file->tr;
4618 char *s, *str, *var_name, *field_str;
4619 unsigned int i, j, n_vars = 0;
4620 int ret = 0;
4622 for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4623 str = hist_data->attrs->assignment_str[i];
4624 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4625 field_str = strsep(&str, ",");
4626 if (!field_str)
4627 break;
4629 var_name = strsep(&field_str, "=");
4630 if (!var_name || !field_str) {
4631 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4632 errpos(var_name));
4633 ret = -EINVAL;
4634 goto free;
4637 if (n_vars == TRACING_MAP_VARS_MAX) {
4638 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4639 ret = -EINVAL;
4640 goto free;
4643 s = kstrdup(var_name, GFP_KERNEL);
4644 if (!s) {
4645 ret = -ENOMEM;
4646 goto free;
4648 hist_data->attrs->var_defs.name[n_vars] = s;
4650 s = kstrdup(field_str, GFP_KERNEL);
4651 if (!s) {
4652 kfree(hist_data->attrs->var_defs.name[n_vars]);
4653 ret = -ENOMEM;
4654 goto free;
4656 hist_data->attrs->var_defs.expr[n_vars++] = s;
4658 hist_data->attrs->var_defs.n_vars = n_vars;
4662 return ret;
4663 free:
4664 free_var_defs(hist_data);
4666 return ret;
4669 static int create_hist_fields(struct hist_trigger_data *hist_data,
4670 struct trace_event_file *file)
4672 int ret;
4674 ret = parse_var_defs(hist_data);
4675 if (ret)
4676 goto out;
4678 ret = create_val_fields(hist_data, file);
4679 if (ret)
4680 goto out;
4682 ret = create_var_fields(hist_data, file);
4683 if (ret)
4684 goto out;
4686 ret = create_key_fields(hist_data, file);
4687 if (ret)
4688 goto out;
4689 out:
4690 free_var_defs(hist_data);
4692 return ret;
4695 static int is_descending(const char *str)
4697 if (!str)
4698 return 0;
4700 if (strcmp(str, "descending") == 0)
4701 return 1;
4703 if (strcmp(str, "ascending") == 0)
4704 return 0;
4706 return -EINVAL;
4709 static int create_sort_keys(struct hist_trigger_data *hist_data)
4711 char *fields_str = hist_data->attrs->sort_key_str;
4712 struct tracing_map_sort_key *sort_key;
4713 int descending, ret = 0;
4714 unsigned int i, j, k;
4716 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4718 if (!fields_str)
4719 goto out;
4721 strsep(&fields_str, "=");
4722 if (!fields_str) {
4723 ret = -EINVAL;
4724 goto out;
4727 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4728 struct hist_field *hist_field;
4729 char *field_str, *field_name;
4730 const char *test_name;
4732 sort_key = &hist_data->sort_keys[i];
4734 field_str = strsep(&fields_str, ",");
4735 if (!field_str) {
4736 if (i == 0)
4737 ret = -EINVAL;
4738 break;
4741 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4742 ret = -EINVAL;
4743 break;
4746 field_name = strsep(&field_str, ".");
4747 if (!field_name) {
4748 ret = -EINVAL;
4749 break;
4752 if (strcmp(field_name, "hitcount") == 0) {
4753 descending = is_descending(field_str);
4754 if (descending < 0) {
4755 ret = descending;
4756 break;
4758 sort_key->descending = descending;
4759 continue;
4762 for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4763 unsigned int idx;
4765 hist_field = hist_data->fields[j];
4766 if (hist_field->flags & HIST_FIELD_FL_VAR)
4767 continue;
4769 idx = k++;
4771 test_name = hist_field_name(hist_field, 0);
4773 if (strcmp(field_name, test_name) == 0) {
4774 sort_key->field_idx = idx;
4775 descending = is_descending(field_str);
4776 if (descending < 0) {
4777 ret = descending;
4778 goto out;
4780 sort_key->descending = descending;
4781 break;
4784 if (j == hist_data->n_fields) {
4785 ret = -EINVAL;
4786 break;
4790 hist_data->n_sort_keys = i;
4791 out:
4792 return ret;
4795 static void destroy_actions(struct hist_trigger_data *hist_data)
4797 unsigned int i;
4799 for (i = 0; i < hist_data->n_actions; i++) {
4800 struct action_data *data = hist_data->actions[i];
4802 if (data->handler == HANDLER_ONMATCH)
4803 onmatch_destroy(data);
4804 else if (data->handler == HANDLER_ONMAX ||
4805 data->handler == HANDLER_ONCHANGE)
4806 track_data_destroy(hist_data, data);
4807 else
4808 kfree(data);
4812 static int parse_actions(struct hist_trigger_data *hist_data)
4814 struct trace_array *tr = hist_data->event_file->tr;
4815 struct action_data *data;
4816 unsigned int i;
4817 int ret = 0;
4818 char *str;
4819 int len;
4821 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4822 str = hist_data->attrs->action_str[i];
4824 if ((len = str_has_prefix(str, "onmatch("))) {
4825 char *action_str = str + len;
4827 data = onmatch_parse(tr, action_str);
4828 if (IS_ERR(data)) {
4829 ret = PTR_ERR(data);
4830 break;
4832 } else if ((len = str_has_prefix(str, "onmax("))) {
4833 char *action_str = str + len;
4835 data = track_data_parse(hist_data, action_str,
4836 HANDLER_ONMAX);
4837 if (IS_ERR(data)) {
4838 ret = PTR_ERR(data);
4839 break;
4841 } else if ((len = str_has_prefix(str, "onchange("))) {
4842 char *action_str = str + len;
4844 data = track_data_parse(hist_data, action_str,
4845 HANDLER_ONCHANGE);
4846 if (IS_ERR(data)) {
4847 ret = PTR_ERR(data);
4848 break;
4850 } else {
4851 ret = -EINVAL;
4852 break;
4855 hist_data->actions[hist_data->n_actions++] = data;
4858 return ret;
4861 static int create_actions(struct hist_trigger_data *hist_data)
4863 struct action_data *data;
4864 unsigned int i;
4865 int ret = 0;
4867 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4868 data = hist_data->actions[i];
4870 if (data->handler == HANDLER_ONMATCH) {
4871 ret = onmatch_create(hist_data, data);
4872 if (ret)
4873 break;
4874 } else if (data->handler == HANDLER_ONMAX ||
4875 data->handler == HANDLER_ONCHANGE) {
4876 ret = track_data_create(hist_data, data);
4877 if (ret)
4878 break;
4879 } else {
4880 ret = -EINVAL;
4881 break;
4885 return ret;
4888 static void print_actions(struct seq_file *m,
4889 struct hist_trigger_data *hist_data,
4890 struct tracing_map_elt *elt)
4892 unsigned int i;
4894 for (i = 0; i < hist_data->n_actions; i++) {
4895 struct action_data *data = hist_data->actions[i];
4897 if (data->action == ACTION_SNAPSHOT)
4898 continue;
4900 if (data->handler == HANDLER_ONMAX ||
4901 data->handler == HANDLER_ONCHANGE)
4902 track_data_print(m, hist_data, elt, data);
4906 static void print_action_spec(struct seq_file *m,
4907 struct hist_trigger_data *hist_data,
4908 struct action_data *data)
4910 unsigned int i;
4912 if (data->action == ACTION_SAVE) {
4913 for (i = 0; i < hist_data->n_save_vars; i++) {
4914 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4915 if (i < hist_data->n_save_vars - 1)
4916 seq_puts(m, ",");
4918 } else if (data->action == ACTION_TRACE) {
4919 if (data->use_trace_keyword)
4920 seq_printf(m, "%s", data->synth_event_name);
4921 for (i = 0; i < data->n_params; i++) {
4922 if (i || data->use_trace_keyword)
4923 seq_puts(m, ",");
4924 seq_printf(m, "%s", data->params[i]);
4929 static void print_track_data_spec(struct seq_file *m,
4930 struct hist_trigger_data *hist_data,
4931 struct action_data *data)
4933 if (data->handler == HANDLER_ONMAX)
4934 seq_puts(m, ":onmax(");
4935 else if (data->handler == HANDLER_ONCHANGE)
4936 seq_puts(m, ":onchange(");
4937 seq_printf(m, "%s", data->track_data.var_str);
4938 seq_printf(m, ").%s(", data->action_name);
4940 print_action_spec(m, hist_data, data);
4942 seq_puts(m, ")");
4945 static void print_onmatch_spec(struct seq_file *m,
4946 struct hist_trigger_data *hist_data,
4947 struct action_data *data)
4949 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
4950 data->match_data.event);
4952 seq_printf(m, "%s(", data->action_name);
4954 print_action_spec(m, hist_data, data);
4956 seq_puts(m, ")");
4959 static bool actions_match(struct hist_trigger_data *hist_data,
4960 struct hist_trigger_data *hist_data_test)
4962 unsigned int i, j;
4964 if (hist_data->n_actions != hist_data_test->n_actions)
4965 return false;
4967 for (i = 0; i < hist_data->n_actions; i++) {
4968 struct action_data *data = hist_data->actions[i];
4969 struct action_data *data_test = hist_data_test->actions[i];
4970 char *action_name, *action_name_test;
4972 if (data->handler != data_test->handler)
4973 return false;
4974 if (data->action != data_test->action)
4975 return false;
4977 if (data->n_params != data_test->n_params)
4978 return false;
4980 for (j = 0; j < data->n_params; j++) {
4981 if (strcmp(data->params[j], data_test->params[j]) != 0)
4982 return false;
4985 if (data->use_trace_keyword)
4986 action_name = data->synth_event_name;
4987 else
4988 action_name = data->action_name;
4990 if (data_test->use_trace_keyword)
4991 action_name_test = data_test->synth_event_name;
4992 else
4993 action_name_test = data_test->action_name;
4995 if (strcmp(action_name, action_name_test) != 0)
4996 return false;
4998 if (data->handler == HANDLER_ONMATCH) {
4999 if (strcmp(data->match_data.event_system,
5000 data_test->match_data.event_system) != 0)
5001 return false;
5002 if (strcmp(data->match_data.event,
5003 data_test->match_data.event) != 0)
5004 return false;
5005 } else if (data->handler == HANDLER_ONMAX ||
5006 data->handler == HANDLER_ONCHANGE) {
5007 if (strcmp(data->track_data.var_str,
5008 data_test->track_data.var_str) != 0)
5009 return false;
5013 return true;
5017 static void print_actions_spec(struct seq_file *m,
5018 struct hist_trigger_data *hist_data)
5020 unsigned int i;
5022 for (i = 0; i < hist_data->n_actions; i++) {
5023 struct action_data *data = hist_data->actions[i];
5025 if (data->handler == HANDLER_ONMATCH)
5026 print_onmatch_spec(m, hist_data, data);
5027 else if (data->handler == HANDLER_ONMAX ||
5028 data->handler == HANDLER_ONCHANGE)
5029 print_track_data_spec(m, hist_data, data);
5033 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5035 unsigned int i;
5037 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5038 kfree(hist_data->field_var_hists[i]->cmd);
5039 kfree(hist_data->field_var_hists[i]);
5043 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5045 if (!hist_data)
5046 return;
5048 destroy_hist_trigger_attrs(hist_data->attrs);
5049 destroy_hist_fields(hist_data);
5050 tracing_map_destroy(hist_data->map);
5052 destroy_actions(hist_data);
5053 destroy_field_vars(hist_data);
5054 destroy_field_var_hists(hist_data);
5056 kfree(hist_data);
5059 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5061 struct tracing_map *map = hist_data->map;
5062 struct ftrace_event_field *field;
5063 struct hist_field *hist_field;
5064 int i, idx = 0;
5066 for_each_hist_field(i, hist_data) {
5067 hist_field = hist_data->fields[i];
5068 if (hist_field->flags & HIST_FIELD_FL_KEY) {
5069 tracing_map_cmp_fn_t cmp_fn;
5071 field = hist_field->field;
5073 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5074 cmp_fn = tracing_map_cmp_none;
5075 else if (!field)
5076 cmp_fn = tracing_map_cmp_num(hist_field->size,
5077 hist_field->is_signed);
5078 else if (is_string_field(field))
5079 cmp_fn = tracing_map_cmp_string;
5080 else
5081 cmp_fn = tracing_map_cmp_num(field->size,
5082 field->is_signed);
5083 idx = tracing_map_add_key_field(map,
5084 hist_field->offset,
5085 cmp_fn);
5086 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5087 idx = tracing_map_add_sum_field(map);
5089 if (idx < 0)
5090 return idx;
5092 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5093 idx = tracing_map_add_var(map);
5094 if (idx < 0)
5095 return idx;
5096 hist_field->var.idx = idx;
5097 hist_field->var.hist_data = hist_data;
5101 return 0;
5104 static struct hist_trigger_data *
5105 create_hist_data(unsigned int map_bits,
5106 struct hist_trigger_attrs *attrs,
5107 struct trace_event_file *file,
5108 bool remove)
5110 const struct tracing_map_ops *map_ops = NULL;
5111 struct hist_trigger_data *hist_data;
5112 int ret = 0;
5114 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5115 if (!hist_data)
5116 return ERR_PTR(-ENOMEM);
5118 hist_data->attrs = attrs;
5119 hist_data->remove = remove;
5120 hist_data->event_file = file;
5122 ret = parse_actions(hist_data);
5123 if (ret)
5124 goto free;
5126 ret = create_hist_fields(hist_data, file);
5127 if (ret)
5128 goto free;
5130 ret = create_sort_keys(hist_data);
5131 if (ret)
5132 goto free;
5134 map_ops = &hist_trigger_elt_data_ops;
5136 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5137 map_ops, hist_data);
5138 if (IS_ERR(hist_data->map)) {
5139 ret = PTR_ERR(hist_data->map);
5140 hist_data->map = NULL;
5141 goto free;
5144 ret = create_tracing_map_fields(hist_data);
5145 if (ret)
5146 goto free;
5147 out:
5148 return hist_data;
5149 free:
5150 hist_data->attrs = NULL;
5152 destroy_hist_data(hist_data);
5154 hist_data = ERR_PTR(ret);
5156 goto out;
5159 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5160 struct tracing_map_elt *elt, void *rec,
5161 struct ring_buffer_event *rbe,
5162 u64 *var_ref_vals)
5164 struct hist_elt_data *elt_data;
5165 struct hist_field *hist_field;
5166 unsigned int i, var_idx;
5167 u64 hist_val;
5169 elt_data = elt->private_data;
5170 elt_data->var_ref_vals = var_ref_vals;
5172 for_each_hist_val_field(i, hist_data) {
5173 hist_field = hist_data->fields[i];
5174 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5175 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5176 var_idx = hist_field->var.idx;
5177 tracing_map_set_var(elt, var_idx, hist_val);
5178 continue;
5180 tracing_map_update_sum(elt, i, hist_val);
5183 for_each_hist_key_field(i, hist_data) {
5184 hist_field = hist_data->fields[i];
5185 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5186 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5187 var_idx = hist_field->var.idx;
5188 tracing_map_set_var(elt, var_idx, hist_val);
5192 update_field_vars(hist_data, elt, rbe, rec);
5195 static inline void add_to_key(char *compound_key, void *key,
5196 struct hist_field *key_field, void *rec)
5198 size_t size = key_field->size;
5200 if (key_field->flags & HIST_FIELD_FL_STRING) {
5201 struct ftrace_event_field *field;
5203 field = key_field->field;
5204 if (field->filter_type == FILTER_DYN_STRING)
5205 size = *(u32 *)(rec + field->offset) >> 16;
5206 else if (field->filter_type == FILTER_PTR_STRING)
5207 size = strlen(key);
5208 else if (field->filter_type == FILTER_STATIC_STRING)
5209 size = field->size;
5211 /* ensure NULL-termination */
5212 if (size > key_field->size - 1)
5213 size = key_field->size - 1;
5215 strncpy(compound_key + key_field->offset, (char *)key, size);
5216 } else
5217 memcpy(compound_key + key_field->offset, key, size);
5220 static void
5221 hist_trigger_actions(struct hist_trigger_data *hist_data,
5222 struct tracing_map_elt *elt, void *rec,
5223 struct ring_buffer_event *rbe, void *key,
5224 u64 *var_ref_vals)
5226 struct action_data *data;
5227 unsigned int i;
5229 for (i = 0; i < hist_data->n_actions; i++) {
5230 data = hist_data->actions[i];
5231 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
5235 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
5236 struct ring_buffer_event *rbe)
5238 struct hist_trigger_data *hist_data = data->private_data;
5239 bool use_compound_key = (hist_data->n_keys > 1);
5240 unsigned long entries[HIST_STACKTRACE_DEPTH];
5241 u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5242 char compound_key[HIST_KEY_SIZE_MAX];
5243 struct tracing_map_elt *elt = NULL;
5244 struct hist_field *key_field;
5245 u64 field_contents;
5246 void *key = NULL;
5247 unsigned int i;
5249 memset(compound_key, 0, hist_data->key_size);
5251 for_each_hist_key_field(i, hist_data) {
5252 key_field = hist_data->fields[i];
5254 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5255 memset(entries, 0, HIST_STACKTRACE_SIZE);
5256 stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5257 HIST_STACKTRACE_SKIP);
5258 key = entries;
5259 } else {
5260 field_contents = key_field->fn(key_field, elt, rbe, rec);
5261 if (key_field->flags & HIST_FIELD_FL_STRING) {
5262 key = (void *)(unsigned long)field_contents;
5263 use_compound_key = true;
5264 } else
5265 key = (void *)&field_contents;
5268 if (use_compound_key)
5269 add_to_key(compound_key, key, key_field, rec);
5272 if (use_compound_key)
5273 key = compound_key;
5275 if (hist_data->n_var_refs &&
5276 !resolve_var_refs(hist_data, key, var_ref_vals, false))
5277 return;
5279 elt = tracing_map_insert(hist_data->map, key);
5280 if (!elt)
5281 return;
5283 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
5285 if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5286 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
5289 static void hist_trigger_stacktrace_print(struct seq_file *m,
5290 unsigned long *stacktrace_entries,
5291 unsigned int max_entries)
5293 char str[KSYM_SYMBOL_LEN];
5294 unsigned int spaces = 8;
5295 unsigned int i;
5297 for (i = 0; i < max_entries; i++) {
5298 if (!stacktrace_entries[i])
5299 return;
5301 seq_printf(m, "%*c", 1 + spaces, ' ');
5302 sprint_symbol(str, stacktrace_entries[i]);
5303 seq_printf(m, "%s\n", str);
5307 static void hist_trigger_print_key(struct seq_file *m,
5308 struct hist_trigger_data *hist_data,
5309 void *key,
5310 struct tracing_map_elt *elt)
5312 struct hist_field *key_field;
5313 char str[KSYM_SYMBOL_LEN];
5314 bool multiline = false;
5315 const char *field_name;
5316 unsigned int i;
5317 u64 uval;
5319 seq_puts(m, "{ ");
5321 for_each_hist_key_field(i, hist_data) {
5322 key_field = hist_data->fields[i];
5324 if (i > hist_data->n_vals)
5325 seq_puts(m, ", ");
5327 field_name = hist_field_name(key_field, 0);
5329 if (key_field->flags & HIST_FIELD_FL_HEX) {
5330 uval = *(u64 *)(key + key_field->offset);
5331 seq_printf(m, "%s: %llx", field_name, uval);
5332 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
5333 uval = *(u64 *)(key + key_field->offset);
5334 sprint_symbol_no_offset(str, uval);
5335 seq_printf(m, "%s: [%llx] %-45s", field_name,
5336 uval, str);
5337 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5338 uval = *(u64 *)(key + key_field->offset);
5339 sprint_symbol(str, uval);
5340 seq_printf(m, "%s: [%llx] %-55s", field_name,
5341 uval, str);
5342 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5343 struct hist_elt_data *elt_data = elt->private_data;
5344 char *comm;
5346 if (WARN_ON_ONCE(!elt_data))
5347 return;
5349 comm = elt_data->comm;
5351 uval = *(u64 *)(key + key_field->offset);
5352 seq_printf(m, "%s: %-16s[%10llu]", field_name,
5353 comm, uval);
5354 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5355 const char *syscall_name;
5357 uval = *(u64 *)(key + key_field->offset);
5358 syscall_name = get_syscall_name(uval);
5359 if (!syscall_name)
5360 syscall_name = "unknown_syscall";
5362 seq_printf(m, "%s: %-30s[%3llu]", field_name,
5363 syscall_name, uval);
5364 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5365 seq_puts(m, "stacktrace:\n");
5366 hist_trigger_stacktrace_print(m,
5367 key + key_field->offset,
5368 HIST_STACKTRACE_DEPTH);
5369 multiline = true;
5370 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5371 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5372 *(u64 *)(key + key_field->offset));
5373 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
5374 seq_printf(m, "%s: %-50s", field_name,
5375 (char *)(key + key_field->offset));
5376 } else {
5377 uval = *(u64 *)(key + key_field->offset);
5378 seq_printf(m, "%s: %10llu", field_name, uval);
5382 if (!multiline)
5383 seq_puts(m, " ");
5385 seq_puts(m, "}");
5388 static void hist_trigger_entry_print(struct seq_file *m,
5389 struct hist_trigger_data *hist_data,
5390 void *key,
5391 struct tracing_map_elt *elt)
5393 const char *field_name;
5394 unsigned int i;
5396 hist_trigger_print_key(m, hist_data, key, elt);
5398 seq_printf(m, " hitcount: %10llu",
5399 tracing_map_read_sum(elt, HITCOUNT_IDX));
5401 for (i = 1; i < hist_data->n_vals; i++) {
5402 field_name = hist_field_name(hist_data->fields[i], 0);
5404 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
5405 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
5406 continue;
5408 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
5409 seq_printf(m, " %s: %10llx", field_name,
5410 tracing_map_read_sum(elt, i));
5411 } else {
5412 seq_printf(m, " %s: %10llu", field_name,
5413 tracing_map_read_sum(elt, i));
5417 print_actions(m, hist_data, elt);
5419 seq_puts(m, "\n");
5422 static int print_entries(struct seq_file *m,
5423 struct hist_trigger_data *hist_data)
5425 struct tracing_map_sort_entry **sort_entries = NULL;
5426 struct tracing_map *map = hist_data->map;
5427 int i, n_entries;
5429 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5430 hist_data->n_sort_keys,
5431 &sort_entries);
5432 if (n_entries < 0)
5433 return n_entries;
5435 for (i = 0; i < n_entries; i++)
5436 hist_trigger_entry_print(m, hist_data,
5437 sort_entries[i]->key,
5438 sort_entries[i]->elt);
5440 tracing_map_destroy_sort_entries(sort_entries, n_entries);
5442 return n_entries;
5445 static void hist_trigger_show(struct seq_file *m,
5446 struct event_trigger_data *data, int n)
5448 struct hist_trigger_data *hist_data;
5449 int n_entries;
5451 if (n > 0)
5452 seq_puts(m, "\n\n");
5454 seq_puts(m, "# event histogram\n#\n# trigger info: ");
5455 data->ops->print(m, data->ops, data);
5456 seq_puts(m, "#\n\n");
5458 hist_data = data->private_data;
5459 n_entries = print_entries(m, hist_data);
5460 if (n_entries < 0)
5461 n_entries = 0;
5463 track_data_snapshot_print(m, hist_data);
5465 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
5466 (u64)atomic64_read(&hist_data->map->hits),
5467 n_entries, (u64)atomic64_read(&hist_data->map->drops));
5470 static int hist_show(struct seq_file *m, void *v)
5472 struct event_trigger_data *data;
5473 struct trace_event_file *event_file;
5474 int n = 0, ret = 0;
5476 mutex_lock(&event_mutex);
5478 event_file = event_file_data(m->private);
5479 if (unlikely(!event_file)) {
5480 ret = -ENODEV;
5481 goto out_unlock;
5484 list_for_each_entry_rcu(data, &event_file->triggers, list) {
5485 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5486 hist_trigger_show(m, data, n++);
5489 out_unlock:
5490 mutex_unlock(&event_mutex);
5492 return ret;
5495 static int event_hist_open(struct inode *inode, struct file *file)
5497 return single_open(file, hist_show, file);
5500 const struct file_operations event_hist_fops = {
5501 .open = event_hist_open,
5502 .read = seq_read,
5503 .llseek = seq_lseek,
5504 .release = single_release,
5507 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5509 const char *field_name = hist_field_name(hist_field, 0);
5511 if (hist_field->var.name)
5512 seq_printf(m, "%s=", hist_field->var.name);
5514 if (hist_field->flags & HIST_FIELD_FL_CPU)
5515 seq_puts(m, "cpu");
5516 else if (field_name) {
5517 if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5518 hist_field->flags & HIST_FIELD_FL_ALIAS)
5519 seq_putc(m, '$');
5520 seq_printf(m, "%s", field_name);
5521 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5522 seq_puts(m, "common_timestamp");
5524 if (hist_field->flags) {
5525 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5526 !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5527 const char *flags = get_hist_field_flags(hist_field);
5529 if (flags)
5530 seq_printf(m, ".%s", flags);
5535 static int event_hist_trigger_print(struct seq_file *m,
5536 struct event_trigger_ops *ops,
5537 struct event_trigger_data *data)
5539 struct hist_trigger_data *hist_data = data->private_data;
5540 struct hist_field *field;
5541 bool have_var = false;
5542 unsigned int i;
5544 seq_puts(m, "hist:");
5546 if (data->name)
5547 seq_printf(m, "%s:", data->name);
5549 seq_puts(m, "keys=");
5551 for_each_hist_key_field(i, hist_data) {
5552 field = hist_data->fields[i];
5554 if (i > hist_data->n_vals)
5555 seq_puts(m, ",");
5557 if (field->flags & HIST_FIELD_FL_STACKTRACE)
5558 seq_puts(m, "stacktrace");
5559 else
5560 hist_field_print(m, field);
5563 seq_puts(m, ":vals=");
5565 for_each_hist_val_field(i, hist_data) {
5566 field = hist_data->fields[i];
5567 if (field->flags & HIST_FIELD_FL_VAR) {
5568 have_var = true;
5569 continue;
5572 if (i == HITCOUNT_IDX)
5573 seq_puts(m, "hitcount");
5574 else {
5575 seq_puts(m, ",");
5576 hist_field_print(m, field);
5580 if (have_var) {
5581 unsigned int n = 0;
5583 seq_puts(m, ":");
5585 for_each_hist_val_field(i, hist_data) {
5586 field = hist_data->fields[i];
5588 if (field->flags & HIST_FIELD_FL_VAR) {
5589 if (n++)
5590 seq_puts(m, ",");
5591 hist_field_print(m, field);
5596 seq_puts(m, ":sort=");
5598 for (i = 0; i < hist_data->n_sort_keys; i++) {
5599 struct tracing_map_sort_key *sort_key;
5600 unsigned int idx, first_key_idx;
5602 /* skip VAR vals */
5603 first_key_idx = hist_data->n_vals - hist_data->n_vars;
5605 sort_key = &hist_data->sort_keys[i];
5606 idx = sort_key->field_idx;
5608 if (WARN_ON(idx >= HIST_FIELDS_MAX))
5609 return -EINVAL;
5611 if (i > 0)
5612 seq_puts(m, ",");
5614 if (idx == HITCOUNT_IDX)
5615 seq_puts(m, "hitcount");
5616 else {
5617 if (idx >= first_key_idx)
5618 idx += hist_data->n_vars;
5619 hist_field_print(m, hist_data->fields[idx]);
5622 if (sort_key->descending)
5623 seq_puts(m, ".descending");
5625 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5626 if (hist_data->enable_timestamps)
5627 seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5629 print_actions_spec(m, hist_data);
5631 if (data->filter_str)
5632 seq_printf(m, " if %s", data->filter_str);
5634 if (data->paused)
5635 seq_puts(m, " [paused]");
5636 else
5637 seq_puts(m, " [active]");
5639 seq_putc(m, '\n');
5641 return 0;
5644 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5645 struct event_trigger_data *data)
5647 struct hist_trigger_data *hist_data = data->private_data;
5649 if (!data->ref && hist_data->attrs->name)
5650 save_named_trigger(hist_data->attrs->name, data);
5652 data->ref++;
5654 return 0;
5657 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5659 struct trace_event_file *file;
5660 unsigned int i;
5661 char *cmd;
5662 int ret;
5664 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5665 file = hist_data->field_var_hists[i]->hist_data->event_file;
5666 cmd = hist_data->field_var_hists[i]->cmd;
5667 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5668 "!hist", "hist", cmd);
5672 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5673 struct event_trigger_data *data)
5675 struct hist_trigger_data *hist_data = data->private_data;
5677 if (WARN_ON_ONCE(data->ref <= 0))
5678 return;
5680 data->ref--;
5681 if (!data->ref) {
5682 if (data->name)
5683 del_named_trigger(data);
5685 trigger_data_free(data);
5687 remove_hist_vars(hist_data);
5689 unregister_field_var_hists(hist_data);
5691 destroy_hist_data(hist_data);
5695 static struct event_trigger_ops event_hist_trigger_ops = {
5696 .func = event_hist_trigger,
5697 .print = event_hist_trigger_print,
5698 .init = event_hist_trigger_init,
5699 .free = event_hist_trigger_free,
5702 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5703 struct event_trigger_data *data)
5705 data->ref++;
5707 save_named_trigger(data->named_data->name, data);
5709 event_hist_trigger_init(ops, data->named_data);
5711 return 0;
5714 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5715 struct event_trigger_data *data)
5717 if (WARN_ON_ONCE(data->ref <= 0))
5718 return;
5720 event_hist_trigger_free(ops, data->named_data);
5722 data->ref--;
5723 if (!data->ref) {
5724 del_named_trigger(data);
5725 trigger_data_free(data);
5729 static struct event_trigger_ops event_hist_trigger_named_ops = {
5730 .func = event_hist_trigger,
5731 .print = event_hist_trigger_print,
5732 .init = event_hist_trigger_named_init,
5733 .free = event_hist_trigger_named_free,
5736 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5737 char *param)
5739 return &event_hist_trigger_ops;
5742 static void hist_clear(struct event_trigger_data *data)
5744 struct hist_trigger_data *hist_data = data->private_data;
5746 if (data->name)
5747 pause_named_trigger(data);
5749 tracepoint_synchronize_unregister();
5751 tracing_map_clear(hist_data->map);
5753 if (data->name)
5754 unpause_named_trigger(data);
5757 static bool compatible_field(struct ftrace_event_field *field,
5758 struct ftrace_event_field *test_field)
5760 if (field == test_field)
5761 return true;
5762 if (field == NULL || test_field == NULL)
5763 return false;
5764 if (strcmp(field->name, test_field->name) != 0)
5765 return false;
5766 if (strcmp(field->type, test_field->type) != 0)
5767 return false;
5768 if (field->size != test_field->size)
5769 return false;
5770 if (field->is_signed != test_field->is_signed)
5771 return false;
5773 return true;
5776 static bool hist_trigger_match(struct event_trigger_data *data,
5777 struct event_trigger_data *data_test,
5778 struct event_trigger_data *named_data,
5779 bool ignore_filter)
5781 struct tracing_map_sort_key *sort_key, *sort_key_test;
5782 struct hist_trigger_data *hist_data, *hist_data_test;
5783 struct hist_field *key_field, *key_field_test;
5784 unsigned int i;
5786 if (named_data && (named_data != data_test) &&
5787 (named_data != data_test->named_data))
5788 return false;
5790 if (!named_data && is_named_trigger(data_test))
5791 return false;
5793 hist_data = data->private_data;
5794 hist_data_test = data_test->private_data;
5796 if (hist_data->n_vals != hist_data_test->n_vals ||
5797 hist_data->n_fields != hist_data_test->n_fields ||
5798 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5799 return false;
5801 if (!ignore_filter) {
5802 if ((data->filter_str && !data_test->filter_str) ||
5803 (!data->filter_str && data_test->filter_str))
5804 return false;
5807 for_each_hist_field(i, hist_data) {
5808 key_field = hist_data->fields[i];
5809 key_field_test = hist_data_test->fields[i];
5811 if (key_field->flags != key_field_test->flags)
5812 return false;
5813 if (!compatible_field(key_field->field, key_field_test->field))
5814 return false;
5815 if (key_field->offset != key_field_test->offset)
5816 return false;
5817 if (key_field->size != key_field_test->size)
5818 return false;
5819 if (key_field->is_signed != key_field_test->is_signed)
5820 return false;
5821 if (!!key_field->var.name != !!key_field_test->var.name)
5822 return false;
5823 if (key_field->var.name &&
5824 strcmp(key_field->var.name, key_field_test->var.name) != 0)
5825 return false;
5828 for (i = 0; i < hist_data->n_sort_keys; i++) {
5829 sort_key = &hist_data->sort_keys[i];
5830 sort_key_test = &hist_data_test->sort_keys[i];
5832 if (sort_key->field_idx != sort_key_test->field_idx ||
5833 sort_key->descending != sort_key_test->descending)
5834 return false;
5837 if (!ignore_filter && data->filter_str &&
5838 (strcmp(data->filter_str, data_test->filter_str) != 0))
5839 return false;
5841 if (!actions_match(hist_data, hist_data_test))
5842 return false;
5844 return true;
5847 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5848 struct event_trigger_data *data,
5849 struct trace_event_file *file)
5851 struct hist_trigger_data *hist_data = data->private_data;
5852 struct event_trigger_data *test, *named_data = NULL;
5853 struct trace_array *tr = file->tr;
5854 int ret = 0;
5856 if (hist_data->attrs->name) {
5857 named_data = find_named_trigger(hist_data->attrs->name);
5858 if (named_data) {
5859 if (!hist_trigger_match(data, named_data, named_data,
5860 true)) {
5861 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5862 ret = -EINVAL;
5863 goto out;
5868 if (hist_data->attrs->name && !named_data)
5869 goto new;
5871 list_for_each_entry_rcu(test, &file->triggers, list) {
5872 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5873 if (!hist_trigger_match(data, test, named_data, false))
5874 continue;
5875 if (hist_data->attrs->pause)
5876 test->paused = true;
5877 else if (hist_data->attrs->cont)
5878 test->paused = false;
5879 else if (hist_data->attrs->clear)
5880 hist_clear(test);
5881 else {
5882 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5883 ret = -EEXIST;
5885 goto out;
5888 new:
5889 if (hist_data->attrs->cont || hist_data->attrs->clear) {
5890 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5891 ret = -ENOENT;
5892 goto out;
5895 if (hist_data->attrs->pause)
5896 data->paused = true;
5898 if (named_data) {
5899 data->private_data = named_data->private_data;
5900 set_named_trigger_data(data, named_data);
5901 data->ops = &event_hist_trigger_named_ops;
5904 if (data->ops->init) {
5905 ret = data->ops->init(data->ops, data);
5906 if (ret < 0)
5907 goto out;
5910 if (hist_data->enable_timestamps) {
5911 char *clock = hist_data->attrs->clock;
5913 ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5914 if (ret) {
5915 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
5916 goto out;
5919 tracing_set_time_stamp_abs(file->tr, true);
5922 if (named_data)
5923 destroy_hist_data(hist_data);
5925 ret++;
5926 out:
5927 return ret;
5930 static int hist_trigger_enable(struct event_trigger_data *data,
5931 struct trace_event_file *file)
5933 int ret = 0;
5935 list_add_tail_rcu(&data->list, &file->triggers);
5937 update_cond_flag(file);
5939 if (trace_event_trigger_enable_disable(file, 1) < 0) {
5940 list_del_rcu(&data->list);
5941 update_cond_flag(file);
5942 ret--;
5945 return ret;
5948 static bool have_hist_trigger_match(struct event_trigger_data *data,
5949 struct trace_event_file *file)
5951 struct hist_trigger_data *hist_data = data->private_data;
5952 struct event_trigger_data *test, *named_data = NULL;
5953 bool match = false;
5955 if (hist_data->attrs->name)
5956 named_data = find_named_trigger(hist_data->attrs->name);
5958 list_for_each_entry_rcu(test, &file->triggers, list) {
5959 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5960 if (hist_trigger_match(data, test, named_data, false)) {
5961 match = true;
5962 break;
5967 return match;
5970 static bool hist_trigger_check_refs(struct event_trigger_data *data,
5971 struct trace_event_file *file)
5973 struct hist_trigger_data *hist_data = data->private_data;
5974 struct event_trigger_data *test, *named_data = NULL;
5976 if (hist_data->attrs->name)
5977 named_data = find_named_trigger(hist_data->attrs->name);
5979 list_for_each_entry_rcu(test, &file->triggers, list) {
5980 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5981 if (!hist_trigger_match(data, test, named_data, false))
5982 continue;
5983 hist_data = test->private_data;
5984 if (check_var_refs(hist_data))
5985 return true;
5986 break;
5990 return false;
5993 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
5994 struct event_trigger_data *data,
5995 struct trace_event_file *file)
5997 struct hist_trigger_data *hist_data = data->private_data;
5998 struct event_trigger_data *test, *named_data = NULL;
5999 bool unregistered = false;
6001 if (hist_data->attrs->name)
6002 named_data = find_named_trigger(hist_data->attrs->name);
6004 list_for_each_entry_rcu(test, &file->triggers, list) {
6005 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6006 if (!hist_trigger_match(data, test, named_data, false))
6007 continue;
6008 unregistered = true;
6009 list_del_rcu(&test->list);
6010 trace_event_trigger_enable_disable(file, 0);
6011 update_cond_flag(file);
6012 break;
6016 if (unregistered && test->ops->free)
6017 test->ops->free(test->ops, test);
6019 if (hist_data->enable_timestamps) {
6020 if (!hist_data->remove || unregistered)
6021 tracing_set_time_stamp_abs(file->tr, false);
6025 static bool hist_file_check_refs(struct trace_event_file *file)
6027 struct hist_trigger_data *hist_data;
6028 struct event_trigger_data *test;
6030 list_for_each_entry_rcu(test, &file->triggers, list) {
6031 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6032 hist_data = test->private_data;
6033 if (check_var_refs(hist_data))
6034 return true;
6038 return false;
6041 static void hist_unreg_all(struct trace_event_file *file)
6043 struct event_trigger_data *test, *n;
6044 struct hist_trigger_data *hist_data;
6045 struct synth_event *se;
6046 const char *se_name;
6048 lockdep_assert_held(&event_mutex);
6050 if (hist_file_check_refs(file))
6051 return;
6053 list_for_each_entry_safe(test, n, &file->triggers, list) {
6054 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6055 hist_data = test->private_data;
6056 list_del_rcu(&test->list);
6057 trace_event_trigger_enable_disable(file, 0);
6059 se_name = trace_event_name(file->event_call);
6060 se = find_synth_event(se_name);
6061 if (se)
6062 se->ref--;
6064 update_cond_flag(file);
6065 if (hist_data->enable_timestamps)
6066 tracing_set_time_stamp_abs(file->tr, false);
6067 if (test->ops->free)
6068 test->ops->free(test->ops, test);
6073 static int event_hist_trigger_func(struct event_command *cmd_ops,
6074 struct trace_event_file *file,
6075 char *glob, char *cmd, char *param)
6077 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6078 struct event_trigger_data *trigger_data;
6079 struct hist_trigger_attrs *attrs;
6080 struct event_trigger_ops *trigger_ops;
6081 struct hist_trigger_data *hist_data;
6082 struct synth_event *se;
6083 const char *se_name;
6084 bool remove = false;
6085 char *trigger, *p;
6086 int ret = 0;
6088 lockdep_assert_held(&event_mutex);
6090 if (glob && strlen(glob)) {
6091 hist_err_clear();
6092 last_cmd_set(file, param);
6095 if (!param)
6096 return -EINVAL;
6098 if (glob[0] == '!')
6099 remove = true;
6102 * separate the trigger from the filter (k:v [if filter])
6103 * allowing for whitespace in the trigger
6105 p = trigger = param;
6106 do {
6107 p = strstr(p, "if");
6108 if (!p)
6109 break;
6110 if (p == param)
6111 return -EINVAL;
6112 if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6113 p++;
6114 continue;
6116 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
6117 return -EINVAL;
6118 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6119 p++;
6120 continue;
6122 break;
6123 } while (p);
6125 if (!p)
6126 param = NULL;
6127 else {
6128 *(p - 1) = '\0';
6129 param = strstrip(p);
6130 trigger = strstrip(trigger);
6133 attrs = parse_hist_trigger_attrs(file->tr, trigger);
6134 if (IS_ERR(attrs))
6135 return PTR_ERR(attrs);
6137 if (attrs->map_bits)
6138 hist_trigger_bits = attrs->map_bits;
6140 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6141 if (IS_ERR(hist_data)) {
6142 destroy_hist_trigger_attrs(attrs);
6143 return PTR_ERR(hist_data);
6146 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6148 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
6149 if (!trigger_data) {
6150 ret = -ENOMEM;
6151 goto out_free;
6154 trigger_data->count = -1;
6155 trigger_data->ops = trigger_ops;
6156 trigger_data->cmd_ops = cmd_ops;
6158 INIT_LIST_HEAD(&trigger_data->list);
6159 RCU_INIT_POINTER(trigger_data->filter, NULL);
6161 trigger_data->private_data = hist_data;
6163 /* if param is non-empty, it's supposed to be a filter */
6164 if (param && cmd_ops->set_filter) {
6165 ret = cmd_ops->set_filter(param, trigger_data, file);
6166 if (ret < 0)
6167 goto out_free;
6170 if (remove) {
6171 if (!have_hist_trigger_match(trigger_data, file))
6172 goto out_free;
6174 if (hist_trigger_check_refs(trigger_data, file)) {
6175 ret = -EBUSY;
6176 goto out_free;
6179 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6180 se_name = trace_event_name(file->event_call);
6181 se = find_synth_event(se_name);
6182 if (se)
6183 se->ref--;
6184 ret = 0;
6185 goto out_free;
6188 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6190 * The above returns on success the # of triggers registered,
6191 * but if it didn't register any it returns zero. Consider no
6192 * triggers registered a failure too.
6194 if (!ret) {
6195 if (!(attrs->pause || attrs->cont || attrs->clear))
6196 ret = -ENOENT;
6197 goto out_free;
6198 } else if (ret < 0)
6199 goto out_free;
6201 if (get_named_trigger_data(trigger_data))
6202 goto enable;
6204 if (has_hist_vars(hist_data))
6205 save_hist_vars(hist_data);
6207 ret = create_actions(hist_data);
6208 if (ret)
6209 goto out_unreg;
6211 ret = tracing_map_init(hist_data->map);
6212 if (ret)
6213 goto out_unreg;
6214 enable:
6215 ret = hist_trigger_enable(trigger_data, file);
6216 if (ret)
6217 goto out_unreg;
6219 se_name = trace_event_name(file->event_call);
6220 se = find_synth_event(se_name);
6221 if (se)
6222 se->ref++;
6223 /* Just return zero, not the number of registered triggers */
6224 ret = 0;
6225 out:
6226 if (ret == 0)
6227 hist_err_clear();
6229 return ret;
6230 out_unreg:
6231 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6232 out_free:
6233 if (cmd_ops->set_filter)
6234 cmd_ops->set_filter(NULL, trigger_data, NULL);
6236 remove_hist_vars(hist_data);
6238 kfree(trigger_data);
6240 destroy_hist_data(hist_data);
6241 goto out;
6244 static struct event_command trigger_hist_cmd = {
6245 .name = "hist",
6246 .trigger_type = ETT_EVENT_HIST,
6247 .flags = EVENT_CMD_FL_NEEDS_REC,
6248 .func = event_hist_trigger_func,
6249 .reg = hist_register_trigger,
6250 .unreg = hist_unregister_trigger,
6251 .unreg_all = hist_unreg_all,
6252 .get_trigger_ops = event_hist_get_trigger_ops,
6253 .set_filter = set_trigger_filter,
6256 __init int register_trigger_hist_cmd(void)
6258 int ret;
6260 ret = register_event_command(&trigger_hist_cmd);
6261 WARN_ON(ret < 0);
6263 return ret;
6266 static void
6267 hist_enable_trigger(struct event_trigger_data *data, void *rec,
6268 struct ring_buffer_event *event)
6270 struct enable_trigger_data *enable_data = data->private_data;
6271 struct event_trigger_data *test;
6273 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
6274 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6275 if (enable_data->enable)
6276 test->paused = false;
6277 else
6278 test->paused = true;
6283 static void
6284 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
6285 struct ring_buffer_event *event)
6287 if (!data->count)
6288 return;
6290 if (data->count != -1)
6291 (data->count)--;
6293 hist_enable_trigger(data, rec, event);
6296 static struct event_trigger_ops hist_enable_trigger_ops = {
6297 .func = hist_enable_trigger,
6298 .print = event_enable_trigger_print,
6299 .init = event_trigger_init,
6300 .free = event_enable_trigger_free,
6303 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6304 .func = hist_enable_count_trigger,
6305 .print = event_enable_trigger_print,
6306 .init = event_trigger_init,
6307 .free = event_enable_trigger_free,
6310 static struct event_trigger_ops hist_disable_trigger_ops = {
6311 .func = hist_enable_trigger,
6312 .print = event_enable_trigger_print,
6313 .init = event_trigger_init,
6314 .free = event_enable_trigger_free,
6317 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6318 .func = hist_enable_count_trigger,
6319 .print = event_enable_trigger_print,
6320 .init = event_trigger_init,
6321 .free = event_enable_trigger_free,
6324 static struct event_trigger_ops *
6325 hist_enable_get_trigger_ops(char *cmd, char *param)
6327 struct event_trigger_ops *ops;
6328 bool enable;
6330 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6332 if (enable)
6333 ops = param ? &hist_enable_count_trigger_ops :
6334 &hist_enable_trigger_ops;
6335 else
6336 ops = param ? &hist_disable_count_trigger_ops :
6337 &hist_disable_trigger_ops;
6339 return ops;
6342 static void hist_enable_unreg_all(struct trace_event_file *file)
6344 struct event_trigger_data *test, *n;
6346 list_for_each_entry_safe(test, n, &file->triggers, list) {
6347 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6348 list_del_rcu(&test->list);
6349 update_cond_flag(file);
6350 trace_event_trigger_enable_disable(file, 0);
6351 if (test->ops->free)
6352 test->ops->free(test->ops, test);
6357 static struct event_command trigger_hist_enable_cmd = {
6358 .name = ENABLE_HIST_STR,
6359 .trigger_type = ETT_HIST_ENABLE,
6360 .func = event_enable_trigger_func,
6361 .reg = event_enable_register_trigger,
6362 .unreg = event_enable_unregister_trigger,
6363 .unreg_all = hist_enable_unreg_all,
6364 .get_trigger_ops = hist_enable_get_trigger_ops,
6365 .set_filter = set_trigger_filter,
6368 static struct event_command trigger_hist_disable_cmd = {
6369 .name = DISABLE_HIST_STR,
6370 .trigger_type = ETT_HIST_ENABLE,
6371 .func = event_enable_trigger_func,
6372 .reg = event_enable_register_trigger,
6373 .unreg = event_enable_unregister_trigger,
6374 .unreg_all = hist_enable_unreg_all,
6375 .get_trigger_ops = hist_enable_get_trigger_ops,
6376 .set_filter = set_trigger_filter,
6379 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6381 unregister_event_command(&trigger_hist_enable_cmd);
6382 unregister_event_command(&trigger_hist_disable_cmd);
6385 __init int register_trigger_hist_enable_disable_cmds(void)
6387 int ret;
6389 ret = register_event_command(&trigger_hist_enable_cmd);
6390 if (WARN_ON(ret < 0))
6391 return ret;
6392 ret = register_event_command(&trigger_hist_disable_cmd);
6393 if (WARN_ON(ret < 0))
6394 unregister_trigger_hist_enable_disable_cmds();
6396 return ret;
6399 static __init int trace_events_hist_init(void)
6401 struct dentry *entry = NULL;
6402 struct dentry *d_tracer;
6403 int err = 0;
6405 err = dyn_event_register(&synth_event_ops);
6406 if (err) {
6407 pr_warn("Could not register synth_event_ops\n");
6408 return err;
6411 d_tracer = tracing_init_dentry();
6412 if (IS_ERR(d_tracer)) {
6413 err = PTR_ERR(d_tracer);
6414 goto err;
6417 entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
6418 NULL, &synth_events_fops);
6419 if (!entry) {
6420 err = -ENODEV;
6421 goto err;
6424 return err;
6425 err:
6426 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6428 return err;
6431 fs_initcall(trace_events_hist_init);