1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
22 #include "trace_synth.h"
25 C(NONE, "No error"), \
26 C(DUPLICATE_VAR, "Variable already defined"), \
27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 C(TOO_MANY_VARS, "Too many variables defined"), \
29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
51 C(TOO_MANY_PARAMS, "Too many action params"), \
52 C(PARAM_NOT_FOUND, "Couldn't find param"), \
53 C(INVALID_PARAM, "Invalid action param"), \
54 C(ACTION_NOT_FOUND, "No action found"), \
55 C(NO_SAVE_PARAMS, "No params found for save()"), \
56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 C(ACTION_MISMATCH, "Handler doesn't support action"), \
58 C(NO_CLOSING_PAREN, "No closing paren found"), \
59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
62 C(VAR_NOT_FOUND, "Couldn't find variable"), \
63 C(FIELD_NOT_FOUND, "Couldn't find field"), \
64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
66 C(EMPTY_SORT_FIELD, "Empty sort field"), \
67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \
70 C(EXPECT_NUMBER, "Expecting numeric literal"), \
71 C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \
72 C(DIVISION_BY_ZERO, "Division by zero"), \
73 C(NEED_NOHC_VAL, "Non-hitcount value is required for 'nohitcount'"),
76 #define C(a, b) HIST_ERR_##a
83 static const char *err_text
[] = { ERRORS
};
87 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
88 struct tracing_map_elt
*elt
,
89 struct trace_buffer
*buffer
,
90 struct ring_buffer_event
*rbe
,
93 #define HIST_FIELD_OPERANDS_MAX 2
94 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX 8
96 #define HIST_CONST_DIGITS_MAX 21
97 #define HIST_DIV_SHIFT 20 /* For optimizing division by constants */
103 FIELD_OP_UNARY_MINUS
,
110 HIST_FIELD_FN_VAR_REF
,
111 HIST_FIELD_FN_COUNTER
,
114 HIST_FIELD_FN_BUCKET
,
115 HIST_FIELD_FN_TIMESTAMP
,
117 HIST_FIELD_FN_STRING
,
118 HIST_FIELD_FN_DYNSTRING
,
119 HIST_FIELD_FN_RELDYNSTRING
,
120 HIST_FIELD_FN_PSTRING
,
129 HIST_FIELD_FN_UMINUS
,
134 HIST_FIELD_FN_DIV_POWER2
,
135 HIST_FIELD_FN_DIV_NOT_POWER2
,
136 HIST_FIELD_FN_DIV_MULT_SHIFT
,
137 HIST_FIELD_FN_EXECNAME
,
142 * A hist_var (histogram variable) contains variable information for
143 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
144 * flag set. A hist_var has a variable name e.g. ts0, and is
145 * associated with a given histogram trigger, as specified by
146 * hist_data. The hist_var idx is the unique index assigned to the
147 * variable by the hist trigger's tracing_map. The idx is what is
148 * used to set a variable's value and, by a variable reference, to
153 struct hist_trigger_data
*hist_data
;
158 struct ftrace_event_field
*field
;
160 unsigned long buckets
;
162 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
163 struct hist_trigger_data
*hist_data
;
164 enum hist_field_fn fn_num
;
168 unsigned int is_signed
;
171 * Variable fields contain variable-specific info in var.
174 enum field_op_id
operator;
179 * The name field is used for EXPR and VAR_REF fields. VAR
180 * fields contain the variable name in var.name.
185 * When a histogram trigger is hit, if it has any references
186 * to variables, the values of those variables are collected
187 * into a var_ref_vals array by resolve_var_refs(). The
188 * current value of each variable is read from the tracing_map
189 * using the hist field's hist_var.idx and entered into the
190 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
192 unsigned int var_ref_idx
;
195 unsigned int var_str_idx
;
197 /* Numeric literals are represented as u64 */
199 /* Used to optimize division by constants */
203 static u64
hist_fn_call(struct hist_field
*hist_field
,
204 struct tracing_map_elt
*elt
,
205 struct trace_buffer
*buffer
,
206 struct ring_buffer_event
*rbe
,
209 static u64
hist_field_const(struct hist_field
*field
,
210 struct tracing_map_elt
*elt
,
211 struct trace_buffer
*buffer
,
212 struct ring_buffer_event
*rbe
,
215 return field
->constant
;
218 static u64
hist_field_counter(struct hist_field
*field
,
219 struct tracing_map_elt
*elt
,
220 struct trace_buffer
*buffer
,
221 struct ring_buffer_event
*rbe
,
227 static u64
hist_field_string(struct hist_field
*hist_field
,
228 struct tracing_map_elt
*elt
,
229 struct trace_buffer
*buffer
,
230 struct ring_buffer_event
*rbe
,
233 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
235 return (u64
)(unsigned long)addr
;
238 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
239 struct tracing_map_elt
*elt
,
240 struct trace_buffer
*buffer
,
241 struct ring_buffer_event
*rbe
,
244 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
245 int str_loc
= str_item
& 0xffff;
246 char *addr
= (char *)(event
+ str_loc
);
248 return (u64
)(unsigned long)addr
;
251 static u64
hist_field_reldynstring(struct hist_field
*hist_field
,
252 struct tracing_map_elt
*elt
,
253 struct trace_buffer
*buffer
,
254 struct ring_buffer_event
*rbe
,
257 u32
*item
= event
+ hist_field
->field
->offset
;
258 u32 str_item
= *item
;
259 int str_loc
= str_item
& 0xffff;
260 char *addr
= (char *)&item
[1] + str_loc
;
262 return (u64
)(unsigned long)addr
;
265 static u64
hist_field_pstring(struct hist_field
*hist_field
,
266 struct tracing_map_elt
*elt
,
267 struct trace_buffer
*buffer
,
268 struct ring_buffer_event
*rbe
,
271 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
273 return (u64
)(unsigned long)*addr
;
276 static u64
hist_field_log2(struct hist_field
*hist_field
,
277 struct tracing_map_elt
*elt
,
278 struct trace_buffer
*buffer
,
279 struct ring_buffer_event
*rbe
,
282 struct hist_field
*operand
= hist_field
->operands
[0];
284 u64 val
= hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
286 return (u64
) ilog2(roundup_pow_of_two(val
));
289 static u64
hist_field_bucket(struct hist_field
*hist_field
,
290 struct tracing_map_elt
*elt
,
291 struct trace_buffer
*buffer
,
292 struct ring_buffer_event
*rbe
,
295 struct hist_field
*operand
= hist_field
->operands
[0];
296 unsigned long buckets
= hist_field
->buckets
;
298 u64 val
= hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
300 if (WARN_ON_ONCE(!buckets
))
304 val
= div64_ul(val
, buckets
);
306 val
= (u64
)((unsigned long)val
/ buckets
);
307 return val
* buckets
;
310 static u64
hist_field_plus(struct hist_field
*hist_field
,
311 struct tracing_map_elt
*elt
,
312 struct trace_buffer
*buffer
,
313 struct ring_buffer_event
*rbe
,
316 struct hist_field
*operand1
= hist_field
->operands
[0];
317 struct hist_field
*operand2
= hist_field
->operands
[1];
319 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
320 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
325 static u64
hist_field_minus(struct hist_field
*hist_field
,
326 struct tracing_map_elt
*elt
,
327 struct trace_buffer
*buffer
,
328 struct ring_buffer_event
*rbe
,
331 struct hist_field
*operand1
= hist_field
->operands
[0];
332 struct hist_field
*operand2
= hist_field
->operands
[1];
334 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
335 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
340 static u64
hist_field_div(struct hist_field
*hist_field
,
341 struct tracing_map_elt
*elt
,
342 struct trace_buffer
*buffer
,
343 struct ring_buffer_event
*rbe
,
346 struct hist_field
*operand1
= hist_field
->operands
[0];
347 struct hist_field
*operand2
= hist_field
->operands
[1];
349 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
350 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
352 /* Return -1 for the undefined case */
356 /* Use shift if the divisor is a power of 2 */
357 if (!(val2
& (val2
- 1)))
358 return val1
>> __ffs64(val2
);
360 return div64_u64(val1
, val2
);
363 static u64
div_by_power_of_two(struct hist_field
*hist_field
,
364 struct tracing_map_elt
*elt
,
365 struct trace_buffer
*buffer
,
366 struct ring_buffer_event
*rbe
,
369 struct hist_field
*operand1
= hist_field
->operands
[0];
370 struct hist_field
*operand2
= hist_field
->operands
[1];
372 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
374 return val1
>> __ffs64(operand2
->constant
);
377 static u64
div_by_not_power_of_two(struct hist_field
*hist_field
,
378 struct tracing_map_elt
*elt
,
379 struct trace_buffer
*buffer
,
380 struct ring_buffer_event
*rbe
,
383 struct hist_field
*operand1
= hist_field
->operands
[0];
384 struct hist_field
*operand2
= hist_field
->operands
[1];
386 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
388 return div64_u64(val1
, operand2
->constant
);
391 static u64
div_by_mult_and_shift(struct hist_field
*hist_field
,
392 struct tracing_map_elt
*elt
,
393 struct trace_buffer
*buffer
,
394 struct ring_buffer_event
*rbe
,
397 struct hist_field
*operand1
= hist_field
->operands
[0];
398 struct hist_field
*operand2
= hist_field
->operands
[1];
400 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
403 * If the divisor is a constant, do a multiplication and shift instead.
405 * Choose Z = some power of 2. If Y <= Z, then:
406 * X / Y = (X * (Z / Y)) / Z
408 * (Z / Y) is a constant (mult) which is calculated at parse time, so:
409 * X / Y = (X * mult) / Z
411 * The division by Z can be replaced by a shift since Z is a power of 2:
412 * X / Y = (X * mult) >> HIST_DIV_SHIFT
414 * As long, as X < Z the results will not be off by more than 1.
416 if (val1
< (1 << HIST_DIV_SHIFT
)) {
417 u64 mult
= operand2
->div_multiplier
;
419 return (val1
* mult
+ ((1 << HIST_DIV_SHIFT
) - 1)) >> HIST_DIV_SHIFT
;
422 return div64_u64(val1
, operand2
->constant
);
425 static u64
hist_field_mult(struct hist_field
*hist_field
,
426 struct tracing_map_elt
*elt
,
427 struct trace_buffer
*buffer
,
428 struct ring_buffer_event
*rbe
,
431 struct hist_field
*operand1
= hist_field
->operands
[0];
432 struct hist_field
*operand2
= hist_field
->operands
[1];
434 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
435 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
440 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
441 struct tracing_map_elt
*elt
,
442 struct trace_buffer
*buffer
,
443 struct ring_buffer_event
*rbe
,
446 struct hist_field
*operand
= hist_field
->operands
[0];
448 s64 sval
= (s64
)hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
449 u64 val
= (u64
)-sval
;
454 #define DEFINE_HIST_FIELD_FN(type) \
455 static u64 hist_field_##type(struct hist_field *hist_field, \
456 struct tracing_map_elt *elt, \
457 struct trace_buffer *buffer, \
458 struct ring_buffer_event *rbe, \
461 type *addr = (type *)(event + hist_field->field->offset); \
463 return (u64)(unsigned long)*addr; \
466 DEFINE_HIST_FIELD_FN(s64
);
467 DEFINE_HIST_FIELD_FN(u64
);
468 DEFINE_HIST_FIELD_FN(s32
);
469 DEFINE_HIST_FIELD_FN(u32
);
470 DEFINE_HIST_FIELD_FN(s16
);
471 DEFINE_HIST_FIELD_FN(u16
);
472 DEFINE_HIST_FIELD_FN(s8
);
473 DEFINE_HIST_FIELD_FN(u8
);
475 #define for_each_hist_field(i, hist_data) \
476 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
478 #define for_each_hist_val_field(i, hist_data) \
479 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
481 #define for_each_hist_key_field(i, hist_data) \
482 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
484 #define HITCOUNT_IDX 0
485 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
487 enum hist_field_flags
{
488 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
489 HIST_FIELD_FL_KEY
= 1 << 1,
490 HIST_FIELD_FL_STRING
= 1 << 2,
491 HIST_FIELD_FL_HEX
= 1 << 3,
492 HIST_FIELD_FL_SYM
= 1 << 4,
493 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
494 HIST_FIELD_FL_EXECNAME
= 1 << 6,
495 HIST_FIELD_FL_SYSCALL
= 1 << 7,
496 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
497 HIST_FIELD_FL_LOG2
= 1 << 9,
498 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
499 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
500 HIST_FIELD_FL_VAR
= 1 << 12,
501 HIST_FIELD_FL_EXPR
= 1 << 13,
502 HIST_FIELD_FL_VAR_REF
= 1 << 14,
503 HIST_FIELD_FL_CPU
= 1 << 15,
504 HIST_FIELD_FL_ALIAS
= 1 << 16,
505 HIST_FIELD_FL_BUCKET
= 1 << 17,
506 HIST_FIELD_FL_CONST
= 1 << 18,
507 HIST_FIELD_FL_PERCENT
= 1 << 19,
508 HIST_FIELD_FL_GRAPH
= 1 << 20,
513 char *name
[TRACING_MAP_VARS_MAX
];
514 char *expr
[TRACING_MAP_VARS_MAX
];
517 struct hist_trigger_attrs
{
528 unsigned int map_bits
;
530 char *assignment_str
[TRACING_MAP_VARS_MAX
];
531 unsigned int n_assignments
;
533 char *action_str
[HIST_ACTIONS_MAX
];
534 unsigned int n_actions
;
536 struct var_defs var_defs
;
540 struct hist_field
*var
;
541 struct hist_field
*val
;
544 struct field_var_hist
{
545 struct hist_trigger_data
*hist_data
;
549 struct hist_trigger_data
{
550 struct hist_field
*fields
[HIST_FIELDS_MAX
];
553 unsigned int n_fields
;
555 unsigned int n_var_str
;
556 unsigned int key_size
;
557 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
558 unsigned int n_sort_keys
;
559 struct trace_event_file
*event_file
;
560 struct hist_trigger_attrs
*attrs
;
561 struct tracing_map
*map
;
562 bool enable_timestamps
;
564 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
565 unsigned int n_var_refs
;
567 struct action_data
*actions
[HIST_ACTIONS_MAX
];
568 unsigned int n_actions
;
570 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
571 unsigned int n_field_vars
;
572 unsigned int n_field_var_str
;
573 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
574 unsigned int n_field_var_hists
;
576 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
577 unsigned int n_save_vars
;
578 unsigned int n_save_var_str
;
583 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
584 struct tracing_map_elt
*elt
,
585 struct trace_buffer
*buffer
, void *rec
,
586 struct ring_buffer_event
*rbe
, void *key
,
587 struct action_data
*data
, u64
*var_ref_vals
);
589 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
604 enum handler_id handler
;
605 enum action_id action
;
609 unsigned int n_params
;
610 char *params
[SYNTH_FIELDS_MAX
];
613 * When a histogram trigger is hit, the values of any
614 * references to variables, including variables being passed
615 * as parameters to synthetic events, are collected into a
616 * var_ref_vals array. This var_ref_idx array is an array of
617 * indices into the var_ref_vals array, one for each synthetic
618 * event param, and is passed to the synthetic event
621 unsigned int var_ref_idx
[SYNTH_FIELDS_MAX
];
622 struct synth_event
*synth_event
;
623 bool use_trace_keyword
;
624 char *synth_event_name
;
634 * var_str contains the $-unstripped variable
635 * name referenced by var_ref, and used when
636 * printing the action. Because var_ref
637 * creation is deferred to create_actions(),
638 * we need a per-action way to save it until
639 * then, thus var_str.
644 * var_ref refers to the variable being
645 * tracked e.g onmax($var).
647 struct hist_field
*var_ref
;
650 * track_var contains the 'invisible' tracking
651 * variable created to keep the current
654 struct hist_field
*track_var
;
656 check_track_val_fn_t check_val
;
657 action_fn_t save_data
;
666 unsigned int key_len
;
668 struct tracing_map_elt elt
;
670 struct action_data
*action_data
;
671 struct hist_trigger_data
*hist_data
;
674 struct hist_elt_data
{
677 char **field_var_str
;
681 struct snapshot_context
{
682 struct tracing_map_elt
*elt
;
687 * Returns the specific division function to use if the divisor
688 * is constant. This avoids extra branches when the trigger is hit.
690 static enum hist_field_fn
hist_field_get_div_fn(struct hist_field
*divisor
)
692 u64 div
= divisor
->constant
;
694 if (!(div
& (div
- 1)))
695 return HIST_FIELD_FN_DIV_POWER2
;
697 /* If the divisor is too large, do a regular division */
698 if (div
> (1 << HIST_DIV_SHIFT
))
699 return HIST_FIELD_FN_DIV_NOT_POWER2
;
701 divisor
->div_multiplier
= div64_u64((u64
)(1 << HIST_DIV_SHIFT
), div
);
702 return HIST_FIELD_FN_DIV_MULT_SHIFT
;
705 static void track_data_free(struct track_data
*track_data
)
707 struct hist_elt_data
*elt_data
;
712 kfree(track_data
->key
);
714 elt_data
= track_data
->elt
.private_data
;
716 kfree(elt_data
->comm
);
723 static struct track_data
*track_data_alloc(unsigned int key_len
,
724 struct action_data
*action_data
,
725 struct hist_trigger_data
*hist_data
)
727 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
728 struct hist_elt_data
*elt_data
;
731 return ERR_PTR(-ENOMEM
);
733 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
735 track_data_free(data
);
736 return ERR_PTR(-ENOMEM
);
739 data
->key_len
= key_len
;
740 data
->action_data
= action_data
;
741 data
->hist_data
= hist_data
;
743 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
745 track_data_free(data
);
746 return ERR_PTR(-ENOMEM
);
749 data
->elt
.private_data
= elt_data
;
751 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
752 if (!elt_data
->comm
) {
753 track_data_free(data
);
754 return ERR_PTR(-ENOMEM
);
760 #define HIST_PREFIX "hist:"
762 static char *last_cmd
;
763 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
765 static int errpos(char *str
)
767 if (!str
|| !last_cmd
)
770 return err_pos(last_cmd
, str
);
773 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
775 const char *system
= NULL
, *name
= NULL
;
776 struct trace_event_call
*call
;
783 last_cmd
= kasprintf(GFP_KERNEL
, HIST_PREFIX
"%s", str
);
788 call
= file
->event_call
;
789 system
= call
->class->system
;
791 name
= trace_event_name(call
);
798 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, HIST_PREFIX
"%s:%s", system
, name
);
801 static void hist_err(struct trace_array
*tr
, u8 err_type
, u16 err_pos
)
806 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
810 static void hist_err_clear(void)
814 last_cmd_loc
[0] = '\0';
817 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
818 unsigned int *var_ref_idx
);
820 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
821 unsigned int *var_ref_idx
)
823 struct tracepoint
*tp
= event
->tp
;
825 if (unlikely(static_key_enabled(&tp
->key
))) {
826 struct tracepoint_func
*probe_func_ptr
;
827 synth_probe_func_t probe_func
;
830 if (!(cpu_online(raw_smp_processor_id())))
833 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
834 if (probe_func_ptr
) {
836 probe_func
= probe_func_ptr
->func
;
837 __data
= probe_func_ptr
->data
;
838 probe_func(__data
, var_ref_vals
, var_ref_idx
);
839 } while ((++probe_func_ptr
)->func
);
844 static void action_trace(struct hist_trigger_data
*hist_data
,
845 struct tracing_map_elt
*elt
,
846 struct trace_buffer
*buffer
, void *rec
,
847 struct ring_buffer_event
*rbe
, void *key
,
848 struct action_data
*data
, u64
*var_ref_vals
)
850 struct synth_event
*event
= data
->synth_event
;
852 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
855 struct hist_var_data
{
856 struct list_head list
;
857 struct hist_trigger_data
*hist_data
;
860 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
861 struct tracing_map_elt
*elt
,
862 struct trace_buffer
*buffer
,
863 struct ring_buffer_event
*rbe
,
866 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
867 struct trace_array
*tr
= hist_data
->event_file
->tr
;
869 u64 ts
= ring_buffer_event_time_stamp(buffer
, rbe
);
871 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
877 static u64
hist_field_cpu(struct hist_field
*hist_field
,
878 struct tracing_map_elt
*elt
,
879 struct trace_buffer
*buffer
,
880 struct ring_buffer_event
*rbe
,
883 int cpu
= smp_processor_id();
889 * check_field_for_var_ref - Check if a VAR_REF field references a variable
890 * @hist_field: The VAR_REF field to check
891 * @var_data: The hist trigger that owns the variable
892 * @var_idx: The trigger variable identifier
894 * Check the given VAR_REF field to see whether or not it references
895 * the given variable associated with the given trigger.
897 * Return: The VAR_REF field if it does reference the variable, NULL if not
899 static struct hist_field
*
900 check_field_for_var_ref(struct hist_field
*hist_field
,
901 struct hist_trigger_data
*var_data
,
902 unsigned int var_idx
)
904 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
906 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
907 hist_field
->var
.hist_data
== var_data
)
914 * find_var_ref - Check if a trigger has a reference to a trigger variable
915 * @hist_data: The hist trigger that might have a reference to the variable
916 * @var_data: The hist trigger that owns the variable
917 * @var_idx: The trigger variable identifier
919 * Check the list of var_refs[] on the first hist trigger to see
920 * whether any of them are references to the variable on the second
923 * Return: The VAR_REF field referencing the variable if so, NULL if not
925 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
926 struct hist_trigger_data
*var_data
,
927 unsigned int var_idx
)
929 struct hist_field
*hist_field
;
932 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
933 hist_field
= hist_data
->var_refs
[i
];
934 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
942 * find_any_var_ref - Check if there is a reference to a given trigger variable
943 * @hist_data: The hist trigger
944 * @var_idx: The trigger variable identifier
946 * Check to see whether the given variable is currently referenced by
949 * The trigger the variable is defined on is explicitly excluded - the
950 * assumption being that a self-reference doesn't prevent a trigger
951 * from being removed.
953 * Return: The VAR_REF field referencing the variable if so, NULL if not
955 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
956 unsigned int var_idx
)
958 struct trace_array
*tr
= hist_data
->event_file
->tr
;
959 struct hist_field
*found
= NULL
;
960 struct hist_var_data
*var_data
;
962 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
963 if (var_data
->hist_data
== hist_data
)
965 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
974 * check_var_refs - Check if there is a reference to any of trigger's variables
975 * @hist_data: The hist trigger
977 * A trigger can define one or more variables. If any one of them is
978 * currently referenced by any other trigger, this function will
981 * Typically used to determine whether or not a trigger can be removed
982 * - if there are any references to a trigger's variables, it cannot.
984 * Return: True if there is a reference to any of trigger's variables
986 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
988 struct hist_field
*field
;
992 for_each_hist_field(i
, hist_data
) {
993 field
= hist_data
->fields
[i
];
994 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
995 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
1005 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
1007 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1008 struct hist_var_data
*var_data
, *found
= NULL
;
1010 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1011 if (var_data
->hist_data
== hist_data
) {
1020 static bool field_has_hist_vars(struct hist_field
*hist_field
,
1031 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
1032 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1035 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1036 struct hist_field
*operand
;
1038 operand
= hist_field
->operands
[i
];
1039 if (field_has_hist_vars(operand
, level
+ 1))
1046 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
1048 struct hist_field
*hist_field
;
1051 for_each_hist_field(i
, hist_data
) {
1052 hist_field
= hist_data
->fields
[i
];
1053 if (field_has_hist_vars(hist_field
, 0))
1060 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
1062 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1063 struct hist_var_data
*var_data
;
1065 var_data
= find_hist_vars(hist_data
);
1069 if (tracing_check_open_get_tr(tr
))
1072 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
1074 trace_array_put(tr
);
1078 var_data
->hist_data
= hist_data
;
1079 list_add(&var_data
->list
, &tr
->hist_vars
);
1084 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
1086 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1087 struct hist_var_data
*var_data
;
1089 var_data
= find_hist_vars(hist_data
);
1093 if (WARN_ON(check_var_refs(hist_data
)))
1096 list_del(&var_data
->list
);
1100 trace_array_put(tr
);
1103 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
1104 const char *var_name
)
1106 struct hist_field
*hist_field
, *found
= NULL
;
1109 for_each_hist_field(i
, hist_data
) {
1110 hist_field
= hist_data
->fields
[i
];
1111 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
1112 strcmp(hist_field
->var
.name
, var_name
) == 0) {
1121 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
1122 struct trace_event_file
*file
,
1123 const char *var_name
)
1125 struct hist_trigger_data
*test_data
;
1126 struct event_trigger_data
*test
;
1127 struct hist_field
*hist_field
;
1129 lockdep_assert_held(&event_mutex
);
1131 hist_field
= find_var_field(hist_data
, var_name
);
1135 list_for_each_entry(test
, &file
->triggers
, list
) {
1136 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1137 test_data
= test
->private_data
;
1138 hist_field
= find_var_field(test_data
, var_name
);
1147 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
1152 struct hist_trigger_data
*var_hist_data
;
1153 struct hist_var_data
*var_data
;
1154 struct trace_event_file
*file
, *found
= NULL
;
1157 return find_event_file(tr
, system
, event_name
);
1159 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1160 var_hist_data
= var_data
->hist_data
;
1161 file
= var_hist_data
->event_file
;
1165 if (find_var_field(var_hist_data
, var_name
)) {
1167 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
1178 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
1179 const char *var_name
)
1181 struct hist_trigger_data
*test_data
;
1182 struct event_trigger_data
*test
;
1183 struct hist_field
*hist_field
;
1185 lockdep_assert_held(&event_mutex
);
1187 list_for_each_entry(test
, &file
->triggers
, list
) {
1188 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1189 test_data
= test
->private_data
;
1190 hist_field
= find_var_field(test_data
, var_name
);
1199 static struct hist_field
*
1200 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
1202 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1203 struct hist_field
*hist_field
, *found
= NULL
;
1204 struct trace_event_file
*file
;
1207 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
1208 struct action_data
*data
= hist_data
->actions
[i
];
1210 if (data
->handler
== HANDLER_ONMATCH
) {
1211 char *system
= data
->match_data
.event_system
;
1212 char *event_name
= data
->match_data
.event
;
1214 file
= find_var_file(tr
, system
, event_name
, var_name
);
1217 hist_field
= find_file_var(file
, var_name
);
1220 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
1222 return ERR_PTR(-EINVAL
);
1232 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1237 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1238 struct hist_field
*hist_field
= NULL
;
1239 struct trace_event_file
*file
;
1241 if (!system
|| !event_name
) {
1242 hist_field
= find_match_var(hist_data
, var_name
);
1243 if (IS_ERR(hist_field
))
1249 file
= find_var_file(tr
, system
, event_name
, var_name
);
1253 hist_field
= find_file_var(file
, var_name
);
1258 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1259 struct tracing_map_elt
*elt
,
1260 struct trace_buffer
*buffer
,
1261 struct ring_buffer_event
*rbe
,
1264 struct hist_elt_data
*elt_data
;
1267 if (WARN_ON_ONCE(!elt
))
1270 elt_data
= elt
->private_data
;
1271 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1276 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1277 u64
*var_ref_vals
, bool self
)
1279 struct hist_trigger_data
*var_data
;
1280 struct tracing_map_elt
*var_elt
;
1281 struct hist_field
*hist_field
;
1282 unsigned int i
, var_idx
;
1283 bool resolved
= true;
1286 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1287 hist_field
= hist_data
->var_refs
[i
];
1288 var_idx
= hist_field
->var
.idx
;
1289 var_data
= hist_field
->var
.hist_data
;
1291 if (var_data
== NULL
) {
1296 if ((self
&& var_data
!= hist_data
) ||
1297 (!self
&& var_data
== hist_data
))
1300 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1306 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1311 if (self
|| !hist_field
->read_once
)
1312 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1314 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1316 var_ref_vals
[i
] = var_val
;
1322 static const char *hist_field_name(struct hist_field
*field
,
1325 const char *field_name
= "";
1327 if (WARN_ON_ONCE(!field
))
1334 field_name
= field
->field
->name
;
1335 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1336 field
->flags
& HIST_FIELD_FL_ALIAS
||
1337 field
->flags
& HIST_FIELD_FL_BUCKET
)
1338 field_name
= hist_field_name(field
->operands
[0], ++level
);
1339 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1340 field_name
= "common_cpu";
1341 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1342 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1343 if (field
->system
) {
1344 static char full_name
[MAX_FILTER_STR_VAL
];
1346 strcat(full_name
, field
->system
);
1347 strcat(full_name
, ".");
1348 strcat(full_name
, field
->event_name
);
1349 strcat(full_name
, ".");
1350 strcat(full_name
, field
->name
);
1351 field_name
= full_name
;
1353 field_name
= field
->name
;
1354 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1355 field_name
= "common_timestamp";
1356 else if (field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
1357 field_name
= "common_stacktrace";
1358 } else if (field
->flags
& HIST_FIELD_FL_HITCOUNT
)
1359 field_name
= "hitcount";
1361 if (field_name
== NULL
)
1367 static enum hist_field_fn
select_value_fn(int field_size
, int field_is_signed
)
1369 switch (field_size
) {
1371 if (field_is_signed
)
1372 return HIST_FIELD_FN_S64
;
1374 return HIST_FIELD_FN_U64
;
1376 if (field_is_signed
)
1377 return HIST_FIELD_FN_S32
;
1379 return HIST_FIELD_FN_U32
;
1381 if (field_is_signed
)
1382 return HIST_FIELD_FN_S16
;
1384 return HIST_FIELD_FN_U16
;
1386 if (field_is_signed
)
1387 return HIST_FIELD_FN_S8
;
1389 return HIST_FIELD_FN_U8
;
1392 return HIST_FIELD_FN_NOP
;
1395 static int parse_map_size(char *str
)
1397 unsigned long size
, map_bits
;
1400 ret
= kstrtoul(str
, 0, &size
);
1404 map_bits
= ilog2(roundup_pow_of_two(size
));
1405 if (map_bits
< TRACING_MAP_BITS_MIN
||
1406 map_bits
> TRACING_MAP_BITS_MAX
)
1414 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
1421 for (i
= 0; i
< attrs
->n_assignments
; i
++)
1422 kfree(attrs
->assignment_str
[i
]);
1424 for (i
= 0; i
< attrs
->n_actions
; i
++)
1425 kfree(attrs
->action_str
[i
]);
1428 kfree(attrs
->sort_key_str
);
1429 kfree(attrs
->keys_str
);
1430 kfree(attrs
->vals_str
);
1431 kfree(attrs
->clock
);
1435 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
1439 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
1442 if ((str_has_prefix(str
, "onmatch(")) ||
1443 (str_has_prefix(str
, "onmax(")) ||
1444 (str_has_prefix(str
, "onchange("))) {
1445 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
1446 if (!attrs
->action_str
[attrs
->n_actions
]) {
1456 static int parse_assignment(struct trace_array
*tr
,
1457 char *str
, struct hist_trigger_attrs
*attrs
)
1461 if ((len
= str_has_prefix(str
, "key=")) ||
1462 (len
= str_has_prefix(str
, "keys="))) {
1463 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1464 if (!attrs
->keys_str
) {
1468 } else if ((len
= str_has_prefix(str
, "val=")) ||
1469 (len
= str_has_prefix(str
, "vals=")) ||
1470 (len
= str_has_prefix(str
, "values="))) {
1471 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1472 if (!attrs
->vals_str
) {
1476 } else if ((len
= str_has_prefix(str
, "sort="))) {
1477 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1478 if (!attrs
->sort_key_str
) {
1482 } else if (str_has_prefix(str
, "name=")) {
1483 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
1488 } else if ((len
= str_has_prefix(str
, "clock="))) {
1491 str
= strstrip(str
);
1492 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
1493 if (!attrs
->clock
) {
1497 } else if ((len
= str_has_prefix(str
, "size="))) {
1498 int map_bits
= parse_map_size(str
+ len
);
1504 attrs
->map_bits
= map_bits
;
1508 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
1509 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
1514 assignment
= kstrdup(str
, GFP_KERNEL
);
1520 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
1526 static struct hist_trigger_attrs
*
1527 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
1529 struct hist_trigger_attrs
*attrs
;
1532 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
1534 return ERR_PTR(-ENOMEM
);
1536 while (trigger_str
) {
1537 char *str
= strsep(&trigger_str
, ":");
1540 rhs
= strchr(str
, '=');
1542 if (!strlen(++rhs
)) {
1544 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
1547 ret
= parse_assignment(tr
, str
, attrs
);
1550 } else if (strcmp(str
, "nohitcount") == 0 ||
1551 strcmp(str
, "NOHC") == 0)
1552 attrs
->no_hitcount
= true;
1553 else if (strcmp(str
, "pause") == 0)
1554 attrs
->pause
= true;
1555 else if ((strcmp(str
, "cont") == 0) ||
1556 (strcmp(str
, "continue") == 0))
1558 else if (strcmp(str
, "clear") == 0)
1559 attrs
->clear
= true;
1561 ret
= parse_action(str
, attrs
);
1567 if (!attrs
->keys_str
) {
1572 if (!attrs
->clock
) {
1573 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
1574 if (!attrs
->clock
) {
1582 destroy_hist_trigger_attrs(attrs
);
1584 return ERR_PTR(ret
);
1587 static inline void save_comm(char *comm
, struct task_struct
*task
)
1590 strcpy(comm
, "<idle>");
1594 if (WARN_ON_ONCE(task
->pid
< 0)) {
1595 strcpy(comm
, "<XXX>");
1599 strscpy(comm
, task
->comm
, TASK_COMM_LEN
);
1602 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
1606 for (i
= 0; i
< elt_data
->n_field_var_str
; i
++)
1607 kfree(elt_data
->field_var_str
[i
]);
1609 kfree(elt_data
->field_var_str
);
1611 kfree(elt_data
->comm
);
1615 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
1617 struct hist_elt_data
*elt_data
= elt
->private_data
;
1619 hist_elt_data_free(elt_data
);
1622 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
1624 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
1625 unsigned int size
= TASK_COMM_LEN
;
1626 struct hist_elt_data
*elt_data
;
1627 struct hist_field
*hist_field
;
1628 unsigned int i
, n_str
;
1630 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
1634 for_each_hist_field(i
, hist_data
) {
1635 hist_field
= hist_data
->fields
[i
];
1637 if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
1638 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
1639 if (!elt_data
->comm
) {
1647 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
+
1648 hist_data
->n_var_str
;
1649 if (n_str
> SYNTH_FIELDS_MAX
) {
1650 hist_elt_data_free(elt_data
);
1654 BUILD_BUG_ON(STR_VAR_LEN_MAX
& (sizeof(u64
) - 1));
1656 size
= STR_VAR_LEN_MAX
;
1658 elt_data
->field_var_str
= kcalloc(n_str
, sizeof(char *), GFP_KERNEL
);
1659 if (!elt_data
->field_var_str
) {
1660 hist_elt_data_free(elt_data
);
1663 elt_data
->n_field_var_str
= n_str
;
1665 for (i
= 0; i
< n_str
; i
++) {
1666 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
1667 if (!elt_data
->field_var_str
[i
]) {
1668 hist_elt_data_free(elt_data
);
1673 elt
->private_data
= elt_data
;
1678 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
1680 struct hist_elt_data
*elt_data
= elt
->private_data
;
1683 save_comm(elt_data
->comm
, current
);
1686 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
1687 .elt_alloc
= hist_trigger_elt_data_alloc
,
1688 .elt_free
= hist_trigger_elt_data_free
,
1689 .elt_init
= hist_trigger_elt_data_init
,
1692 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
1694 const char *flags_str
= NULL
;
1696 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
1698 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
1700 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
1701 flags_str
= "sym-offset";
1702 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
1703 flags_str
= "execname";
1704 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
1705 flags_str
= "syscall";
1706 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
1708 else if (hist_field
->flags
& HIST_FIELD_FL_BUCKET
)
1709 flags_str
= "buckets";
1710 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
1711 flags_str
= "usecs";
1712 else if (hist_field
->flags
& HIST_FIELD_FL_PERCENT
)
1713 flags_str
= "percent";
1714 else if (hist_field
->flags
& HIST_FIELD_FL_GRAPH
)
1715 flags_str
= "graph";
1716 else if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
1717 flags_str
= "stacktrace";
1722 static void expr_field_str(struct hist_field
*field
, char *expr
)
1724 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
1726 else if (field
->flags
& HIST_FIELD_FL_CONST
) {
1727 char str
[HIST_CONST_DIGITS_MAX
];
1729 snprintf(str
, HIST_CONST_DIGITS_MAX
, "%llu", field
->constant
);
1733 strcat(expr
, hist_field_name(field
, 0));
1735 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
1736 const char *flags_str
= get_hist_field_flags(field
);
1740 strcat(expr
, flags_str
);
1745 static char *expr_str(struct hist_field
*field
, unsigned int level
)
1752 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
1756 if (!field
->operands
[0]) {
1757 expr_field_str(field
, expr
);
1761 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
1765 subexpr
= expr_str(field
->operands
[0], ++level
);
1770 strcat(expr
, subexpr
);
1778 expr_field_str(field
->operands
[0], expr
);
1780 switch (field
->operator) {
1781 case FIELD_OP_MINUS
:
1798 expr_field_str(field
->operands
[1], expr
);
1804 * If field_op != FIELD_OP_NONE, *sep points to the root operator
1805 * of the expression tree to be evaluated.
1807 static int contains_operator(char *str
, char **sep
)
1809 enum field_op_id field_op
= FIELD_OP_NONE
;
1810 char *minus_op
, *plus_op
, *div_op
, *mult_op
;
1814 * Report the last occurrence of the operators first, so that the
1815 * expression is evaluated left to right. This is important since
1816 * subtraction and division are not associative.
1819 * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2
1820 * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2
1824 * First, find lower precedence addition and subtraction
1825 * since the expression will be evaluated recursively.
1827 minus_op
= strrchr(str
, '-');
1830 * Unary minus is not supported in sub-expressions. If
1831 * present, it is always the next root operator.
1833 if (minus_op
== str
) {
1834 field_op
= FIELD_OP_UNARY_MINUS
;
1838 field_op
= FIELD_OP_MINUS
;
1841 plus_op
= strrchr(str
, '+');
1842 if (plus_op
|| minus_op
) {
1844 * For operators of the same precedence use to rightmost as the
1845 * root, so that the expression is evaluated left to right.
1847 if (plus_op
> minus_op
)
1848 field_op
= FIELD_OP_PLUS
;
1853 * Multiplication and division have higher precedence than addition and
1856 div_op
= strrchr(str
, '/');
1858 field_op
= FIELD_OP_DIV
;
1860 mult_op
= strrchr(str
, '*');
1862 * For operators of the same precedence use to rightmost as the
1863 * root, so that the expression is evaluated left to right.
1865 if (mult_op
> div_op
)
1866 field_op
= FIELD_OP_MULT
;
1871 case FIELD_OP_UNARY_MINUS
:
1872 case FIELD_OP_MINUS
:
1894 static void get_hist_field(struct hist_field
*hist_field
)
1899 static void __destroy_hist_field(struct hist_field
*hist_field
)
1901 if (--hist_field
->ref
> 1)
1904 kfree(hist_field
->var
.name
);
1905 kfree(hist_field
->name
);
1907 /* Can likely be a const */
1908 kfree_const(hist_field
->type
);
1910 kfree(hist_field
->system
);
1911 kfree(hist_field
->event_name
);
1916 static void destroy_hist_field(struct hist_field
*hist_field
,
1927 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1928 return; /* var refs will be destroyed separately */
1930 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
1931 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
1933 __destroy_hist_field(hist_field
);
1936 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
1937 struct ftrace_event_field
*field
,
1938 unsigned long flags
,
1941 struct hist_field
*hist_field
;
1943 if (field
&& is_function_field(field
))
1946 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
1950 hist_field
->ref
= 1;
1952 hist_field
->hist_data
= hist_data
;
1954 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
1955 goto out
; /* caller will populate */
1957 if (flags
& HIST_FIELD_FL_VAR_REF
) {
1958 hist_field
->fn_num
= HIST_FIELD_FN_VAR_REF
;
1962 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
1963 hist_field
->fn_num
= HIST_FIELD_FN_COUNTER
;
1964 hist_field
->size
= sizeof(u64
);
1965 hist_field
->type
= "u64";
1969 if (flags
& HIST_FIELD_FL_CONST
) {
1970 hist_field
->fn_num
= HIST_FIELD_FN_CONST
;
1971 hist_field
->size
= sizeof(u64
);
1972 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
1973 if (!hist_field
->type
)
1978 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
1980 hist_field
->fn_num
= HIST_FIELD_FN_STACK
;
1982 hist_field
->fn_num
= HIST_FIELD_FN_NOP
;
1983 hist_field
->size
= HIST_STACKTRACE_SIZE
;
1984 hist_field
->type
= kstrdup_const("unsigned long[]", GFP_KERNEL
);
1985 if (!hist_field
->type
)
1990 if (flags
& (HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
)) {
1991 unsigned long fl
= flags
& ~(HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
);
1992 hist_field
->fn_num
= flags
& HIST_FIELD_FL_LOG2
? HIST_FIELD_FN_LOG2
:
1993 HIST_FIELD_FN_BUCKET
;
1994 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
1995 if (!hist_field
->operands
[0])
1997 hist_field
->size
= hist_field
->operands
[0]->size
;
1998 hist_field
->type
= kstrdup_const(hist_field
->operands
[0]->type
, GFP_KERNEL
);
1999 if (!hist_field
->type
)
2004 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
2005 hist_field
->fn_num
= HIST_FIELD_FN_TIMESTAMP
;
2006 hist_field
->size
= sizeof(u64
);
2007 hist_field
->type
= "u64";
2011 if (flags
& HIST_FIELD_FL_CPU
) {
2012 hist_field
->fn_num
= HIST_FIELD_FN_CPU
;
2013 hist_field
->size
= sizeof(int);
2014 hist_field
->type
= "unsigned int";
2018 if (WARN_ON_ONCE(!field
))
2021 /* Pointers to strings are just pointers and dangerous to dereference */
2022 if (is_string_field(field
) &&
2023 (field
->filter_type
!= FILTER_PTR_STRING
)) {
2024 flags
|= HIST_FIELD_FL_STRING
;
2026 hist_field
->size
= MAX_FILTER_STR_VAL
;
2027 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
2028 if (!hist_field
->type
)
2031 if (field
->filter_type
== FILTER_STATIC_STRING
) {
2032 hist_field
->fn_num
= HIST_FIELD_FN_STRING
;
2033 hist_field
->size
= field
->size
;
2034 } else if (field
->filter_type
== FILTER_DYN_STRING
) {
2035 hist_field
->fn_num
= HIST_FIELD_FN_DYNSTRING
;
2036 } else if (field
->filter_type
== FILTER_RDYN_STRING
)
2037 hist_field
->fn_num
= HIST_FIELD_FN_RELDYNSTRING
;
2039 hist_field
->fn_num
= HIST_FIELD_FN_PSTRING
;
2041 hist_field
->size
= field
->size
;
2042 hist_field
->is_signed
= field
->is_signed
;
2043 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
2044 if (!hist_field
->type
)
2047 hist_field
->fn_num
= select_value_fn(field
->size
,
2049 if (hist_field
->fn_num
== HIST_FIELD_FN_NOP
) {
2050 destroy_hist_field(hist_field
, 0);
2055 hist_field
->field
= field
;
2056 hist_field
->flags
= flags
;
2059 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
2060 if (!hist_field
->var
.name
)
2066 destroy_hist_field(hist_field
, 0);
2070 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
2074 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
2075 if (hist_data
->fields
[i
]) {
2076 destroy_hist_field(hist_data
->fields
[i
], 0);
2077 hist_data
->fields
[i
] = NULL
;
2081 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2082 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
2083 __destroy_hist_field(hist_data
->var_refs
[i
]);
2084 hist_data
->var_refs
[i
] = NULL
;
2088 static int init_var_ref(struct hist_field
*ref_field
,
2089 struct hist_field
*var_field
,
2090 char *system
, char *event_name
)
2094 ref_field
->var
.idx
= var_field
->var
.idx
;
2095 ref_field
->var
.hist_data
= var_field
->hist_data
;
2096 ref_field
->size
= var_field
->size
;
2097 ref_field
->is_signed
= var_field
->is_signed
;
2098 ref_field
->flags
|= var_field
->flags
&
2099 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2102 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
2103 if (!ref_field
->system
)
2108 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
2109 if (!ref_field
->event_name
) {
2115 if (var_field
->var
.name
) {
2116 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
2117 if (!ref_field
->name
) {
2121 } else if (var_field
->name
) {
2122 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
2123 if (!ref_field
->name
) {
2129 ref_field
->type
= kstrdup_const(var_field
->type
, GFP_KERNEL
);
2130 if (!ref_field
->type
) {
2137 kfree(ref_field
->system
);
2138 ref_field
->system
= NULL
;
2139 kfree(ref_field
->event_name
);
2140 ref_field
->event_name
= NULL
;
2141 kfree(ref_field
->name
);
2142 ref_field
->name
= NULL
;
2147 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
2148 struct hist_field
*var_field
)
2150 struct hist_field
*ref_field
;
2153 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2154 ref_field
= hist_data
->var_refs
[i
];
2155 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
2156 ref_field
->var
.hist_data
== var_field
->hist_data
)
2164 * create_var_ref - Create a variable reference and attach it to trigger
2165 * @hist_data: The trigger that will be referencing the variable
2166 * @var_field: The VAR field to create a reference to
2167 * @system: The optional system string
2168 * @event_name: The optional event_name string
2170 * Given a variable hist_field, create a VAR_REF hist_field that
2171 * represents a reference to it.
2173 * This function also adds the reference to the trigger that
2174 * now references the variable.
2176 * Return: The VAR_REF field if successful, NULL if not
2178 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
2179 struct hist_field
*var_field
,
2180 char *system
, char *event_name
)
2182 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
2183 struct hist_field
*ref_field
;
2186 /* Check if the variable already exists */
2187 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2188 ref_field
= hist_data
->var_refs
[i
];
2189 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
2190 ref_field
->var
.hist_data
== var_field
->hist_data
) {
2191 get_hist_field(ref_field
);
2195 /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
2196 if (hist_data
->n_var_refs
>= TRACING_MAP_VARS_MAX
)
2198 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
2200 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
2201 destroy_hist_field(ref_field
, 0);
2205 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
2206 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
2212 static bool is_var_ref(char *var_name
)
2214 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
2220 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
2226 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
2227 name
= hist_data
->attrs
->var_defs
.name
[i
];
2229 if (strcmp(var_name
, name
) == 0) {
2230 field
= hist_data
->attrs
->var_defs
.expr
[i
];
2231 if (contains_operator(field
, NULL
) || is_var_ref(field
))
2240 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
2241 char *system
, char *event_name
,
2244 struct trace_event_call
*call
;
2246 if (system
&& event_name
) {
2247 call
= hist_data
->event_file
->event_call
;
2249 if (strcmp(system
, call
->class->system
) != 0)
2252 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2256 if (!!system
!= !!event_name
)
2259 if (!is_var_ref(var_name
))
2264 return field_name_from_var(hist_data
, var_name
);
2267 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
2268 char *system
, char *event_name
,
2271 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
2272 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2274 if (!is_var_ref(var_name
))
2279 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
2281 ref_field
= create_var_ref(hist_data
, var_field
,
2282 system
, event_name
);
2285 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
2290 static struct ftrace_event_field
*
2291 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
2292 char *field_str
, unsigned long *flags
, unsigned long *buckets
)
2294 struct ftrace_event_field
*field
= NULL
;
2295 char *field_name
, *modifier
, *str
;
2296 struct trace_array
*tr
= file
->tr
;
2298 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
2300 return ERR_PTR(-ENOMEM
);
2302 field_name
= strsep(&modifier
, ".");
2304 if (strcmp(modifier
, "hex") == 0)
2305 *flags
|= HIST_FIELD_FL_HEX
;
2306 else if (strcmp(modifier
, "sym") == 0)
2307 *flags
|= HIST_FIELD_FL_SYM
;
2309 * 'sym-offset' occurrences in the trigger string are modified
2310 * to 'symXoffset' to simplify arithmetic expression parsing.
2312 else if (strcmp(modifier
, "symXoffset") == 0)
2313 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
2314 else if ((strcmp(modifier
, "execname") == 0) &&
2315 (strcmp(field_name
, "common_pid") == 0))
2316 *flags
|= HIST_FIELD_FL_EXECNAME
;
2317 else if (strcmp(modifier
, "syscall") == 0)
2318 *flags
|= HIST_FIELD_FL_SYSCALL
;
2319 else if (strcmp(modifier
, "stacktrace") == 0)
2320 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2321 else if (strcmp(modifier
, "log2") == 0)
2322 *flags
|= HIST_FIELD_FL_LOG2
;
2323 else if (strcmp(modifier
, "usecs") == 0)
2324 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
2325 else if (strncmp(modifier
, "bucket", 6) == 0) {
2330 if (*modifier
== 's')
2332 if (*modifier
!= '=')
2335 ret
= kstrtoul(modifier
, 0, buckets
);
2336 if (ret
|| !(*buckets
))
2338 *flags
|= HIST_FIELD_FL_BUCKET
;
2339 } else if (strncmp(modifier
, "percent", 7) == 0) {
2340 if (*flags
& (HIST_FIELD_FL_VAR
| HIST_FIELD_FL_KEY
))
2342 *flags
|= HIST_FIELD_FL_PERCENT
;
2343 } else if (strncmp(modifier
, "graph", 5) == 0) {
2344 if (*flags
& (HIST_FIELD_FL_VAR
| HIST_FIELD_FL_KEY
))
2346 *flags
|= HIST_FIELD_FL_GRAPH
;
2349 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
2350 field
= ERR_PTR(-EINVAL
);
2355 if (strcmp(field_name
, "common_timestamp") == 0) {
2356 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
2357 hist_data
->enable_timestamps
= true;
2358 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2359 hist_data
->attrs
->ts_in_usecs
= true;
2360 } else if (strcmp(field_name
, "common_stacktrace") == 0) {
2361 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2362 } else if (strcmp(field_name
, "common_cpu") == 0)
2363 *flags
|= HIST_FIELD_FL_CPU
;
2364 else if (strcmp(field_name
, "hitcount") == 0)
2365 *flags
|= HIST_FIELD_FL_HITCOUNT
;
2367 field
= trace_find_event_field(file
->event_call
, field_name
);
2368 if (!field
|| !field
->size
) {
2370 * For backward compatibility, if field_name
2371 * was "cpu" or "stacktrace", then we treat this
2372 * the same as common_cpu and common_stacktrace
2373 * respectively. This also works for "CPU", and
2376 if (field
&& field
->filter_type
== FILTER_CPU
) {
2377 *flags
|= HIST_FIELD_FL_CPU
;
2378 } else if (field
&& field
->filter_type
== FILTER_STACKTRACE
) {
2379 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2381 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
,
2382 errpos(field_name
));
2383 field
= ERR_PTR(-EINVAL
);
2394 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
2395 struct hist_field
*var_ref
,
2398 struct hist_field
*alias
= NULL
;
2399 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
2401 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2405 alias
->fn_num
= var_ref
->fn_num
;
2406 alias
->operands
[0] = var_ref
;
2408 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2409 destroy_hist_field(alias
, 0);
2413 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
2418 static struct hist_field
*parse_const(struct hist_trigger_data
*hist_data
,
2419 char *str
, char *var_name
,
2420 unsigned long *flags
)
2422 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2423 struct hist_field
*field
= NULL
;
2426 if (kstrtoull(str
, 0, &constant
)) {
2427 hist_err(tr
, HIST_ERR_EXPECT_NUMBER
, errpos(str
));
2431 *flags
|= HIST_FIELD_FL_CONST
;
2432 field
= create_hist_field(hist_data
, NULL
, *flags
, var_name
);
2436 field
->constant
= constant
;
2441 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2442 struct trace_event_file
*file
, char *str
,
2443 unsigned long *flags
, char *var_name
)
2445 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2446 struct ftrace_event_field
*field
= NULL
;
2447 struct hist_field
*hist_field
= NULL
;
2448 unsigned long buckets
= 0;
2451 if (isdigit(str
[0])) {
2452 hist_field
= parse_const(hist_data
, str
, var_name
, flags
);
2460 s
= strchr(str
, '.');
2462 s
= strchr(++s
, '.');
2464 ref_system
= strsep(&str
, ".");
2469 ref_event
= strsep(&str
, ".");
2478 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2480 hist_field
= parse_var_ref(hist_data
, ref_system
,
2481 ref_event
, ref_var
);
2484 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2495 field
= parse_field(hist_data
, file
, str
, flags
, &buckets
);
2496 if (IS_ERR(field
)) {
2497 ret
= PTR_ERR(field
);
2501 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2506 hist_field
->buckets
= buckets
;
2510 return ERR_PTR(ret
);
2513 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2514 struct trace_event_file
*file
,
2515 char *str
, unsigned long flags
,
2516 char *var_name
, unsigned int *n_subexprs
);
2518 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2519 struct trace_event_file
*file
,
2520 char *str
, unsigned long flags
,
2521 char *var_name
, unsigned int *n_subexprs
)
2523 struct hist_field
*operand1
, *expr
= NULL
;
2524 unsigned long operand_flags
;
2528 /* Unary minus operator, increment n_subexprs */
2531 /* we support only -(xxx) i.e. explicit parens required */
2533 if (*n_subexprs
> 3) {
2534 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2539 str
++; /* skip leading '-' */
2541 s
= strchr(str
, '(');
2549 s
= strrchr(str
, ')');
2551 /* unary minus not supported in sub-expressions */
2552 if (*(s
+1) != '\0') {
2553 hist_err(file
->tr
, HIST_ERR_UNARY_MINUS_SUBEXPR
,
2561 ret
= -EINVAL
; /* no closing ')' */
2565 flags
|= HIST_FIELD_FL_EXPR
;
2566 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2573 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, n_subexprs
);
2574 if (IS_ERR(operand1
)) {
2575 ret
= PTR_ERR(operand1
);
2578 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2579 /* String type can not be the operand of unary operator. */
2580 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2581 destroy_hist_field(operand1
, 0);
2586 expr
->flags
|= operand1
->flags
&
2587 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2588 expr
->fn_num
= HIST_FIELD_FN_UMINUS
;
2589 expr
->operands
[0] = operand1
;
2590 expr
->size
= operand1
->size
;
2591 expr
->is_signed
= operand1
->is_signed
;
2592 expr
->operator = FIELD_OP_UNARY_MINUS
;
2593 expr
->name
= expr_str(expr
, 0);
2594 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2602 destroy_hist_field(expr
, 0);
2603 return ERR_PTR(ret
);
2607 * If the operands are var refs, return pointers the
2608 * variable(s) referenced in var1 and var2, else NULL.
2610 static int check_expr_operands(struct trace_array
*tr
,
2611 struct hist_field
*operand1
,
2612 struct hist_field
*operand2
,
2613 struct hist_field
**var1
,
2614 struct hist_field
**var2
)
2616 unsigned long operand1_flags
= operand1
->flags
;
2617 unsigned long operand2_flags
= operand2
->flags
;
2619 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2620 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2621 struct hist_field
*var
;
2623 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2626 operand1_flags
= var
->flags
;
2630 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2631 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2632 struct hist_field
*var
;
2634 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2637 operand2_flags
= var
->flags
;
2641 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2642 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2643 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
2650 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2651 struct trace_event_file
*file
,
2652 char *str
, unsigned long flags
,
2653 char *var_name
, unsigned int *n_subexprs
)
2655 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2656 struct hist_field
*var1
= NULL
, *var2
= NULL
;
2657 unsigned long operand_flags
, operand2_flags
;
2658 int field_op
, ret
= -EINVAL
;
2659 char *sep
, *operand1_str
;
2660 enum hist_field_fn op_fn
;
2661 bool combine_consts
;
2663 if (*n_subexprs
> 3) {
2664 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2665 return ERR_PTR(-EINVAL
);
2668 field_op
= contains_operator(str
, &sep
);
2670 if (field_op
== FIELD_OP_NONE
)
2671 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2673 if (field_op
== FIELD_OP_UNARY_MINUS
)
2674 return parse_unary(hist_data
, file
, str
, flags
, var_name
, n_subexprs
);
2676 /* Binary operator found, increment n_subexprs */
2679 /* Split the expression string at the root operator */
2681 return ERR_PTR(-EINVAL
);
2687 /* Binary operator requires both operands */
2688 if (*operand1_str
== '\0' || *str
== '\0')
2689 return ERR_PTR(-EINVAL
);
2693 /* LHS of string is an expression e.g. a+b in a+b+c */
2694 operand1
= parse_expr(hist_data
, file
, operand1_str
, operand_flags
, NULL
, n_subexprs
);
2695 if (IS_ERR(operand1
))
2696 return ERR_CAST(operand1
);
2698 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2699 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(operand1_str
));
2704 /* RHS of string is another expression e.g. c in a+b+c */
2706 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, n_subexprs
);
2707 if (IS_ERR(operand2
)) {
2708 ret
= PTR_ERR(operand2
);
2711 if (operand2
->flags
& HIST_FIELD_FL_STRING
) {
2712 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2718 case FIELD_OP_MINUS
:
2719 op_fn
= HIST_FIELD_FN_MINUS
;
2722 op_fn
= HIST_FIELD_FN_PLUS
;
2725 op_fn
= HIST_FIELD_FN_DIV
;
2728 op_fn
= HIST_FIELD_FN_MULT
;
2735 ret
= check_expr_operands(file
->tr
, operand1
, operand2
, &var1
, &var2
);
2739 operand_flags
= var1
? var1
->flags
: operand1
->flags
;
2740 operand2_flags
= var2
? var2
->flags
: operand2
->flags
;
2743 * If both operands are constant, the expression can be
2744 * collapsed to a single constant.
2746 combine_consts
= operand_flags
& operand2_flags
& HIST_FIELD_FL_CONST
;
2748 flags
|= combine_consts
? HIST_FIELD_FL_CONST
: HIST_FIELD_FL_EXPR
;
2750 flags
|= operand1
->flags
&
2751 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2753 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2759 operand1
->read_once
= true;
2760 operand2
->read_once
= true;
2762 /* The operands are now owned and free'd by 'expr' */
2763 expr
->operands
[0] = operand1
;
2764 expr
->operands
[1] = operand2
;
2766 if (field_op
== FIELD_OP_DIV
&&
2767 operand2_flags
& HIST_FIELD_FL_CONST
) {
2768 u64 divisor
= var2
? var2
->constant
: operand2
->constant
;
2771 hist_err(file
->tr
, HIST_ERR_DIVISION_BY_ZERO
, errpos(str
));
2777 * Copy the divisor here so we don't have to look it up
2778 * later if this is a var ref
2780 operand2
->constant
= divisor
;
2781 op_fn
= hist_field_get_div_fn(operand2
);
2784 expr
->fn_num
= op_fn
;
2786 if (combine_consts
) {
2788 expr
->operands
[0] = var1
;
2790 expr
->operands
[1] = var2
;
2792 expr
->constant
= hist_fn_call(expr
, NULL
, NULL
, NULL
, NULL
);
2793 expr
->fn_num
= HIST_FIELD_FN_CONST
;
2795 expr
->operands
[0] = NULL
;
2796 expr
->operands
[1] = NULL
;
2799 * var refs won't be destroyed immediately
2800 * See: destroy_hist_field()
2802 destroy_hist_field(operand2
, 0);
2803 destroy_hist_field(operand1
, 0);
2805 expr
->name
= expr_str(expr
, 0);
2807 /* The operand sizes should be the same, so just pick one */
2808 expr
->size
= operand1
->size
;
2809 expr
->is_signed
= operand1
->is_signed
;
2811 expr
->operator = field_op
;
2812 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2818 expr
->name
= expr_str(expr
, 0);
2824 destroy_hist_field(operand2
, 0);
2826 destroy_hist_field(operand1
, 0);
2827 return ERR_PTR(ret
);
2830 destroy_hist_field(expr
, 0);
2831 return ERR_PTR(ret
);
2834 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
2835 struct trace_event_file
*file
)
2837 struct event_trigger_data
*test
;
2839 lockdep_assert_held(&event_mutex
);
2841 list_for_each_entry(test
, &file
->triggers
, list
) {
2842 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2843 if (test
->private_data
== hist_data
)
2844 return test
->filter_str
;
2851 static struct event_command trigger_hist_cmd
;
2852 static int event_hist_trigger_parse(struct event_command
*cmd_ops
,
2853 struct trace_event_file
*file
,
2854 char *glob
, char *cmd
,
2855 char *param_and_filter
);
2857 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
2858 struct hist_trigger_data
*hist_data
,
2859 unsigned int n_keys
)
2861 struct hist_field
*target_hist_field
, *hist_field
;
2862 unsigned int n
, i
, j
;
2864 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
2867 i
= hist_data
->n_vals
;
2868 j
= target_hist_data
->n_vals
;
2870 for (n
= 0; n
< n_keys
; n
++) {
2871 hist_field
= hist_data
->fields
[i
+ n
];
2872 target_hist_field
= target_hist_data
->fields
[j
+ n
];
2874 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
2876 if (hist_field
->size
!= target_hist_field
->size
)
2878 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
2885 static struct hist_trigger_data
*
2886 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
2887 struct trace_event_file
*file
)
2889 struct hist_trigger_data
*hist_data
;
2890 struct event_trigger_data
*test
;
2891 unsigned int n_keys
;
2893 lockdep_assert_held(&event_mutex
);
2895 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
2897 list_for_each_entry(test
, &file
->triggers
, list
) {
2898 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2899 hist_data
= test
->private_data
;
2901 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
2909 static struct trace_event_file
*event_file(struct trace_array
*tr
,
2910 char *system
, char *event_name
)
2912 struct trace_event_file
*file
;
2914 file
= __find_event_file(tr
, system
, event_name
);
2916 return ERR_PTR(-EINVAL
);
2921 static struct hist_field
*
2922 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
2923 char *system
, char *event_name
, char *field_name
)
2925 struct hist_field
*event_var
;
2926 char *synthetic_name
;
2928 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2929 if (!synthetic_name
)
2930 return ERR_PTR(-ENOMEM
);
2932 strcpy(synthetic_name
, "synthetic_");
2933 strcat(synthetic_name
, field_name
);
2935 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
2937 kfree(synthetic_name
);
2943 * create_field_var_hist - Automatically create a histogram and var for a field
2944 * @target_hist_data: The target hist trigger
2945 * @subsys_name: Optional subsystem name
2946 * @event_name: Optional event name
2947 * @field_name: The name of the field (and the resulting variable)
2949 * Hist trigger actions fetch data from variables, not directly from
2950 * events. However, for convenience, users are allowed to directly
2951 * specify an event field in an action, which will be automatically
2952 * converted into a variable on their behalf.
2954 * If a user specifies a field on an event that isn't the event the
2955 * histogram currently being defined (the target event histogram), the
2956 * only way that can be accomplished is if a new hist trigger is
2957 * created and the field variable defined on that.
2959 * This function creates a new histogram compatible with the target
2960 * event (meaning a histogram with the same key as the target
2961 * histogram), and creates a variable for the specified field, but
2962 * with 'synthetic_' prepended to the variable name in order to avoid
2963 * collision with normal field variables.
2965 * Return: The variable created for the field.
2967 static struct hist_field
*
2968 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
2969 char *subsys_name
, char *event_name
, char *field_name
)
2971 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
2972 struct hist_trigger_data
*hist_data
;
2973 unsigned int i
, n
, first
= true;
2974 struct field_var_hist
*var_hist
;
2975 struct trace_event_file
*file
;
2976 struct hist_field
*key_field
;
2977 struct hist_field
*event_var
;
2982 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
2983 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
2984 return ERR_PTR(-EINVAL
);
2987 file
= event_file(tr
, subsys_name
, event_name
);
2990 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
2991 ret
= PTR_ERR(file
);
2992 return ERR_PTR(ret
);
2996 * Look for a histogram compatible with target. We'll use the
2997 * found histogram specification to create a new matching
2998 * histogram with our variable on it. target_hist_data is not
2999 * yet a registered histogram so we can't use that.
3001 hist_data
= find_compatible_hist(target_hist_data
, file
);
3003 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
3004 return ERR_PTR(-EINVAL
);
3007 /* See if a synthetic field variable has already been created */
3008 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3009 event_name
, field_name
);
3010 if (!IS_ERR_OR_NULL(event_var
))
3013 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
3015 return ERR_PTR(-ENOMEM
);
3017 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3020 return ERR_PTR(-ENOMEM
);
3023 /* Use the same keys as the compatible histogram */
3024 strcat(cmd
, "keys=");
3026 for_each_hist_key_field(i
, hist_data
) {
3027 key_field
= hist_data
->fields
[i
];
3030 strcat(cmd
, key_field
->field
->name
);
3034 /* Create the synthetic field variable specification */
3035 strcat(cmd
, ":synthetic_");
3036 strcat(cmd
, field_name
);
3038 strcat(cmd
, field_name
);
3040 /* Use the same filter as the compatible histogram */
3041 saved_filter
= find_trigger_filter(hist_data
, file
);
3043 strcat(cmd
, " if ");
3044 strcat(cmd
, saved_filter
);
3047 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
3048 if (!var_hist
->cmd
) {
3051 return ERR_PTR(-ENOMEM
);
3054 /* Save the compatible histogram information */
3055 var_hist
->hist_data
= hist_data
;
3057 /* Create the new histogram with our variable */
3058 ret
= event_hist_trigger_parse(&trigger_hist_cmd
, file
,
3062 kfree(var_hist
->cmd
);
3064 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
3065 return ERR_PTR(ret
);
3070 /* If we can't find the variable, something went wrong */
3071 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3072 event_name
, field_name
);
3073 if (IS_ERR_OR_NULL(event_var
)) {
3074 kfree(var_hist
->cmd
);
3076 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
3077 return ERR_PTR(-EINVAL
);
3080 n
= target_hist_data
->n_field_var_hists
;
3081 target_hist_data
->field_var_hists
[n
] = var_hist
;
3082 target_hist_data
->n_field_var_hists
++;
3087 static struct hist_field
*
3088 find_target_event_var(struct hist_trigger_data
*hist_data
,
3089 char *subsys_name
, char *event_name
, char *var_name
)
3091 struct trace_event_file
*file
= hist_data
->event_file
;
3092 struct hist_field
*hist_field
= NULL
;
3095 struct trace_event_call
*call
;
3100 call
= file
->event_call
;
3102 if (strcmp(subsys_name
, call
->class->system
) != 0)
3105 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3109 hist_field
= find_var_field(hist_data
, var_name
);
3114 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
3115 struct trace_buffer
*buffer
,
3116 struct ring_buffer_event
*rbe
,
3118 struct field_var
**field_vars
,
3119 unsigned int n_field_vars
,
3120 unsigned int field_var_str_start
)
3122 struct hist_elt_data
*elt_data
= elt
->private_data
;
3123 unsigned int i
, j
, var_idx
;
3126 /* Make sure stacktrace can fit in the string variable length */
3127 BUILD_BUG_ON((HIST_STACKTRACE_DEPTH
+ 1) * sizeof(long) >= STR_VAR_LEN_MAX
);
3129 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
3130 struct field_var
*field_var
= field_vars
[i
];
3131 struct hist_field
*var
= field_var
->var
;
3132 struct hist_field
*val
= field_var
->val
;
3134 var_val
= hist_fn_call(val
, elt
, buffer
, rbe
, rec
);
3135 var_idx
= var
->var
.idx
;
3137 if (val
->flags
& (HIST_FIELD_FL_STRING
|
3138 HIST_FIELD_FL_STACKTRACE
)) {
3139 char *str
= elt_data
->field_var_str
[j
++];
3140 char *val_str
= (char *)(uintptr_t)var_val
;
3143 if (val
->flags
& HIST_FIELD_FL_STRING
) {
3144 size
= min(val
->size
, STR_VAR_LEN_MAX
);
3145 strscpy(str
, val_str
, size
);
3147 char *stack_start
= str
+ sizeof(unsigned long);
3150 e
= stack_trace_save((void *)stack_start
,
3151 HIST_STACKTRACE_DEPTH
,
3152 HIST_STACKTRACE_SKIP
);
3153 if (e
< HIST_STACKTRACE_DEPTH
- 1)
3154 ((unsigned long *)stack_start
)[e
] = 0;
3155 *((unsigned long *)str
) = e
;
3157 var_val
= (u64
)(uintptr_t)str
;
3159 tracing_map_set_var(elt
, var_idx
, var_val
);
3163 static void update_field_vars(struct hist_trigger_data
*hist_data
,
3164 struct tracing_map_elt
*elt
,
3165 struct trace_buffer
*buffer
,
3166 struct ring_buffer_event
*rbe
,
3169 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->field_vars
,
3170 hist_data
->n_field_vars
, 0);
3173 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
3174 struct tracing_map_elt
*elt
,
3175 struct trace_buffer
*buffer
, void *rec
,
3176 struct ring_buffer_event
*rbe
, void *key
,
3177 struct action_data
*data
, u64
*var_ref_vals
)
3179 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->save_vars
,
3180 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
3183 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
3184 struct trace_event_file
*file
,
3185 char *name
, int size
, const char *type
)
3187 struct hist_field
*var
;
3190 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
3191 var
= ERR_PTR(-EINVAL
);
3195 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3197 var
= ERR_PTR(-ENOMEM
);
3201 idx
= tracing_map_add_var(hist_data
->map
);
3204 var
= ERR_PTR(-EINVAL
);
3209 var
->flags
= HIST_FIELD_FL_VAR
;
3211 var
->var
.hist_data
= var
->hist_data
= hist_data
;
3213 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
3214 var
->type
= kstrdup_const(type
, GFP_KERNEL
);
3215 if (!var
->var
.name
|| !var
->type
) {
3216 kfree_const(var
->type
);
3217 kfree(var
->var
.name
);
3219 var
= ERR_PTR(-ENOMEM
);
3225 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
3226 struct trace_event_file
*file
,
3229 struct hist_field
*val
= NULL
, *var
= NULL
;
3230 unsigned long flags
= HIST_FIELD_FL_VAR
;
3231 struct trace_array
*tr
= file
->tr
;
3232 struct field_var
*field_var
;
3235 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
3236 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
3241 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
3243 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
3248 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
3250 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
3256 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
3264 field_var
->var
= var
;
3265 field_var
->val
= val
;
3269 field_var
= ERR_PTR(ret
);
3274 * create_target_field_var - Automatically create a variable for a field
3275 * @target_hist_data: The target hist trigger
3276 * @subsys_name: Optional subsystem name
3277 * @event_name: Optional event name
3278 * @var_name: The name of the field (and the resulting variable)
3280 * Hist trigger actions fetch data from variables, not directly from
3281 * events. However, for convenience, users are allowed to directly
3282 * specify an event field in an action, which will be automatically
3283 * converted into a variable on their behalf.
3285 * This function creates a field variable with the name var_name on
3286 * the hist trigger currently being defined on the target event. If
3287 * subsys_name and event_name are specified, this function simply
3288 * verifies that they do in fact match the target event subsystem and
3291 * Return: The variable created for the field.
3293 static struct field_var
*
3294 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
3295 char *subsys_name
, char *event_name
, char *var_name
)
3297 struct trace_event_file
*file
= target_hist_data
->event_file
;
3300 struct trace_event_call
*call
;
3305 call
= file
->event_call
;
3307 if (strcmp(subsys_name
, call
->class->system
) != 0)
3310 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3314 return create_field_var(target_hist_data
, file
, var_name
);
3317 static bool check_track_val_max(u64 track_val
, u64 var_val
)
3319 if (var_val
<= track_val
)
3325 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
3327 if (var_val
== track_val
)
3333 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
3334 struct tracing_map_elt
*elt
,
3335 struct action_data
*data
)
3337 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3340 track_val
= tracing_map_read_var(elt
, track_var_idx
);
3345 static void save_track_val(struct hist_trigger_data
*hist_data
,
3346 struct tracing_map_elt
*elt
,
3347 struct action_data
*data
, u64 var_val
)
3349 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3351 tracing_map_set_var(elt
, track_var_idx
, var_val
);
3354 static void save_track_data(struct hist_trigger_data
*hist_data
,
3355 struct tracing_map_elt
*elt
,
3356 struct trace_buffer
*buffer
, void *rec
,
3357 struct ring_buffer_event
*rbe
, void *key
,
3358 struct action_data
*data
, u64
*var_ref_vals
)
3360 if (data
->track_data
.save_data
)
3361 data
->track_data
.save_data(hist_data
, elt
, buffer
, rec
, rbe
,
3362 key
, data
, var_ref_vals
);
3365 static bool check_track_val(struct tracing_map_elt
*elt
,
3366 struct action_data
*data
,
3369 struct hist_trigger_data
*hist_data
;
3372 hist_data
= data
->track_data
.track_var
->hist_data
;
3373 track_val
= get_track_val(hist_data
, elt
, data
);
3375 return data
->track_data
.check_val(track_val
, var_val
);
3378 #ifdef CONFIG_TRACER_SNAPSHOT
3379 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3381 /* called with tr->max_lock held */
3382 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
3383 struct hist_elt_data
*elt_data
, *track_elt_data
;
3384 struct snapshot_context
*context
= cond_data
;
3385 struct action_data
*action
;
3391 action
= track_data
->action_data
;
3393 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
3394 track_data
->action_data
);
3396 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
3399 track_data
->track_val
= track_val
;
3400 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
3402 elt_data
= context
->elt
->private_data
;
3403 track_elt_data
= track_data
->elt
.private_data
;
3405 strscpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
3407 track_data
->updated
= true;
3412 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3413 struct tracing_map_elt
*elt
,
3414 struct trace_buffer
*buffer
, void *rec
,
3415 struct ring_buffer_event
*rbe
, void *key
,
3416 struct action_data
*data
,
3419 struct trace_event_file
*file
= hist_data
->event_file
;
3420 struct snapshot_context context
;
3425 tracing_snapshot_cond(file
->tr
, &context
);
3428 static void hist_trigger_print_key(struct seq_file
*m
,
3429 struct hist_trigger_data
*hist_data
,
3431 struct tracing_map_elt
*elt
);
3433 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
3437 if (!hist_data
->n_actions
)
3440 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
3441 struct action_data
*data
= hist_data
->actions
[i
];
3443 if (data
->action
== ACTION_SNAPSHOT
)
3450 static void track_data_snapshot_print(struct seq_file
*m
,
3451 struct hist_trigger_data
*hist_data
)
3453 struct trace_event_file
*file
= hist_data
->event_file
;
3454 struct track_data
*track_data
;
3455 struct action_data
*action
;
3457 track_data
= tracing_cond_snapshot_data(file
->tr
);
3461 if (!track_data
->updated
)
3464 action
= snapshot_action(hist_data
);
3468 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3469 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
3470 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
3471 action
->track_data
.var_str
, track_data
->track_val
);
3473 seq_puts(m
, "\ttriggered by event with key: ");
3474 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
3478 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3482 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3483 struct tracing_map_elt
*elt
,
3484 struct trace_buffer
*buffer
, void *rec
,
3485 struct ring_buffer_event
*rbe
, void *key
,
3486 struct action_data
*data
,
3487 u64
*var_ref_vals
) {}
3488 static void track_data_snapshot_print(struct seq_file
*m
,
3489 struct hist_trigger_data
*hist_data
) {}
3490 #endif /* CONFIG_TRACER_SNAPSHOT */
3492 static void track_data_print(struct seq_file
*m
,
3493 struct hist_trigger_data
*hist_data
,
3494 struct tracing_map_elt
*elt
,
3495 struct action_data
*data
)
3497 u64 track_val
= get_track_val(hist_data
, elt
, data
);
3498 unsigned int i
, save_var_idx
;
3500 if (data
->handler
== HANDLER_ONMAX
)
3501 seq_printf(m
, "\n\tmax: %10llu", track_val
);
3502 else if (data
->handler
== HANDLER_ONCHANGE
)
3503 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
3505 if (data
->action
== ACTION_SNAPSHOT
)
3508 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
3509 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
3510 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
3513 save_var_idx
= save_var
->var
.idx
;
3515 val
= tracing_map_read_var(elt
, save_var_idx
);
3517 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
3518 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
3519 (char *)(uintptr_t)(val
));
3521 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
3525 static void ontrack_action(struct hist_trigger_data
*hist_data
,
3526 struct tracing_map_elt
*elt
,
3527 struct trace_buffer
*buffer
, void *rec
,
3528 struct ring_buffer_event
*rbe
, void *key
,
3529 struct action_data
*data
, u64
*var_ref_vals
)
3531 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
3533 if (check_track_val(elt
, data
, var_val
)) {
3534 save_track_val(hist_data
, elt
, data
, var_val
);
3535 save_track_data(hist_data
, elt
, buffer
, rec
, rbe
,
3536 key
, data
, var_ref_vals
);
3540 static void action_data_destroy(struct action_data
*data
)
3544 lockdep_assert_held(&event_mutex
);
3546 kfree(data
->action_name
);
3548 for (i
= 0; i
< data
->n_params
; i
++)
3549 kfree(data
->params
[i
]);
3551 if (data
->synth_event
)
3552 data
->synth_event
->ref
--;
3554 kfree(data
->synth_event_name
);
3559 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
3560 struct action_data
*data
)
3562 struct trace_event_file
*file
= hist_data
->event_file
;
3564 destroy_hist_field(data
->track_data
.track_var
, 0);
3566 if (data
->action
== ACTION_SNAPSHOT
) {
3567 struct track_data
*track_data
;
3569 track_data
= tracing_cond_snapshot_data(file
->tr
);
3570 if (track_data
&& track_data
->hist_data
== hist_data
) {
3571 tracing_snapshot_cond_disable(file
->tr
);
3572 track_data_free(track_data
);
3576 kfree(data
->track_data
.var_str
);
3578 action_data_destroy(data
);
3581 static int action_create(struct hist_trigger_data
*hist_data
,
3582 struct action_data
*data
);
3584 static int track_data_create(struct hist_trigger_data
*hist_data
,
3585 struct action_data
*data
)
3587 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
3588 struct trace_event_file
*file
= hist_data
->event_file
;
3589 struct trace_array
*tr
= file
->tr
;
3590 char *track_data_var_str
;
3593 track_data_var_str
= data
->track_data
.var_str
;
3594 if (track_data_var_str
[0] != '$') {
3595 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
3598 track_data_var_str
++;
3600 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
3602 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
3606 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
3610 data
->track_data
.var_ref
= ref_field
;
3612 if (data
->handler
== HANDLER_ONMAX
)
3613 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
3614 if (IS_ERR(track_var
)) {
3615 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3616 ret
= PTR_ERR(track_var
);
3620 if (data
->handler
== HANDLER_ONCHANGE
)
3621 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
3622 if (IS_ERR(track_var
)) {
3623 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3624 ret
= PTR_ERR(track_var
);
3627 data
->track_data
.track_var
= track_var
;
3629 ret
= action_create(hist_data
, data
);
3634 static int parse_action_params(struct trace_array
*tr
, char *params
,
3635 struct action_data
*data
)
3637 char *param
, *saved_param
;
3638 bool first_param
= true;
3642 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
3643 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
3648 param
= strsep(¶ms
, ",");
3650 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
3655 param
= strstrip(param
);
3656 if (strlen(param
) < 2) {
3657 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
3662 saved_param
= kstrdup(param
, GFP_KERNEL
);
3668 if (first_param
&& data
->use_trace_keyword
) {
3669 data
->synth_event_name
= saved_param
;
3670 first_param
= false;
3673 first_param
= false;
3675 data
->params
[data
->n_params
++] = saved_param
;
3681 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
3682 enum handler_id handler
)
3689 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3694 action_name
= strsep(&str
, "(");
3695 if (!action_name
|| !str
) {
3696 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3701 if (str_has_prefix(action_name
, "save")) {
3702 char *params
= strsep(&str
, ")");
3705 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
3710 ret
= parse_action_params(tr
, params
, data
);
3714 if (handler
== HANDLER_ONMAX
)
3715 data
->track_data
.check_val
= check_track_val_max
;
3716 else if (handler
== HANDLER_ONCHANGE
)
3717 data
->track_data
.check_val
= check_track_val_changed
;
3719 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3724 data
->track_data
.save_data
= save_track_data_vars
;
3725 data
->fn
= ontrack_action
;
3726 data
->action
= ACTION_SAVE
;
3727 } else if (str_has_prefix(action_name
, "snapshot")) {
3728 char *params
= strsep(&str
, ")");
3731 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
3736 if (handler
== HANDLER_ONMAX
)
3737 data
->track_data
.check_val
= check_track_val_max
;
3738 else if (handler
== HANDLER_ONCHANGE
)
3739 data
->track_data
.check_val
= check_track_val_changed
;
3741 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3746 data
->track_data
.save_data
= save_track_data_snapshot
;
3747 data
->fn
= ontrack_action
;
3748 data
->action
= ACTION_SNAPSHOT
;
3750 char *params
= strsep(&str
, ")");
3752 if (str_has_prefix(action_name
, "trace"))
3753 data
->use_trace_keyword
= true;
3756 ret
= parse_action_params(tr
, params
, data
);
3761 if (handler
== HANDLER_ONMAX
)
3762 data
->track_data
.check_val
= check_track_val_max
;
3763 else if (handler
== HANDLER_ONCHANGE
)
3764 data
->track_data
.check_val
= check_track_val_changed
;
3766 if (handler
!= HANDLER_ONMATCH
) {
3767 data
->track_data
.save_data
= action_trace
;
3768 data
->fn
= ontrack_action
;
3770 data
->fn
= action_trace
;
3772 data
->action
= ACTION_TRACE
;
3775 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
3776 if (!data
->action_name
) {
3781 data
->handler
= handler
;
3786 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
3787 char *str
, enum handler_id handler
)
3789 struct action_data
*data
;
3793 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3795 return ERR_PTR(-ENOMEM
);
3797 var_str
= strsep(&str
, ")");
3798 if (!var_str
|| !str
) {
3803 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
3804 if (!data
->track_data
.var_str
) {
3809 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
3815 track_data_destroy(hist_data
, data
);
3816 data
= ERR_PTR(ret
);
3820 static void onmatch_destroy(struct action_data
*data
)
3822 kfree(data
->match_data
.event
);
3823 kfree(data
->match_data
.event_system
);
3825 action_data_destroy(data
);
3828 static void destroy_field_var(struct field_var
*field_var
)
3833 destroy_hist_field(field_var
->var
, 0);
3834 destroy_hist_field(field_var
->val
, 0);
3839 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
3843 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
3844 destroy_field_var(hist_data
->field_vars
[i
]);
3846 for (i
= 0; i
< hist_data
->n_save_vars
; i
++)
3847 destroy_field_var(hist_data
->save_vars
[i
]);
3850 static void save_field_var(struct hist_trigger_data
*hist_data
,
3851 struct field_var
*field_var
)
3853 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
3855 /* Stack traces are saved in the string storage too */
3856 if (field_var
->val
->flags
& (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
3857 hist_data
->n_field_var_str
++;
3861 static int check_synth_field(struct synth_event
*event
,
3862 struct hist_field
*hist_field
,
3863 unsigned int field_pos
)
3865 struct synth_field
*field
;
3867 if (field_pos
>= event
->n_fields
)
3870 field
= event
->fields
[field_pos
];
3873 * A dynamic string synth field can accept static or
3874 * dynamic. A static string synth field can only accept a
3875 * same-sized static string, which is checked for later.
3877 if (strstr(hist_field
->type
, "char[") && field
->is_string
3878 && field
->is_dynamic
)
3881 if (strstr(hist_field
->type
, "long[") && field
->is_stack
)
3884 if (strcmp(field
->type
, hist_field
->type
) != 0) {
3885 if (field
->size
!= hist_field
->size
||
3886 (!field
->is_string
&& field
->is_signed
!= hist_field
->is_signed
))
3893 static struct hist_field
*
3894 trace_action_find_var(struct hist_trigger_data
*hist_data
,
3895 struct action_data
*data
,
3896 char *system
, char *event
, char *var
)
3898 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3899 struct hist_field
*hist_field
;
3901 var
++; /* skip '$' */
3903 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
3905 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3906 system
= data
->match_data
.event_system
;
3907 event
= data
->match_data
.event
;
3910 hist_field
= find_event_var(hist_data
, system
, event
, var
);
3914 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
3919 static struct hist_field
*
3920 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
3921 struct action_data
*data
, char *system
,
3922 char *event
, char *var
)
3924 struct hist_field
*hist_field
= NULL
;
3925 struct field_var
*field_var
;
3928 * First try to create a field var on the target event (the
3929 * currently being defined). This will create a variable for
3930 * unqualified fields on the target event, or if qualified,
3931 * target fields that have qualified names matching the target.
3933 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
3935 if (field_var
&& !IS_ERR(field_var
)) {
3936 save_field_var(hist_data
, field_var
);
3937 hist_field
= field_var
->var
;
3941 * If no explicit system.event is specified, default to
3942 * looking for fields on the onmatch(system.event.xxx)
3945 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3946 system
= data
->match_data
.event_system
;
3947 event
= data
->match_data
.event
;
3953 * At this point, we're looking at a field on another
3954 * event. Because we can't modify a hist trigger on
3955 * another event to add a variable for a field, we need
3956 * to create a new trigger on that event and create the
3957 * variable at the same time.
3959 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
3960 if (IS_ERR(hist_field
))
3966 destroy_field_var(field_var
);
3971 static int trace_action_create(struct hist_trigger_data
*hist_data
,
3972 struct action_data
*data
)
3974 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3975 char *event_name
, *param
, *system
= NULL
;
3976 struct hist_field
*hist_field
, *var_ref
;
3978 unsigned int field_pos
= 0;
3979 struct synth_event
*event
;
3980 char *synth_event_name
;
3981 int var_ref_idx
, ret
= 0;
3983 lockdep_assert_held(&event_mutex
);
3985 /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
3986 if (data
->n_params
> SYNTH_FIELDS_MAX
)
3989 if (data
->use_trace_keyword
)
3990 synth_event_name
= data
->synth_event_name
;
3992 synth_event_name
= data
->action_name
;
3994 event
= find_synth_event(synth_event_name
);
3996 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
4002 for (i
= 0; i
< data
->n_params
; i
++) {
4005 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4011 system
= strsep(¶m
, ".");
4013 param
= (char *)system
;
4014 system
= event_name
= NULL
;
4016 event_name
= strsep(¶m
, ".");
4024 if (param
[0] == '$')
4025 hist_field
= trace_action_find_var(hist_data
, data
,
4029 hist_field
= trace_action_create_field_var(hist_data
,
4041 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
4042 var_ref
= create_var_ref(hist_data
, hist_field
,
4043 system
, event_name
);
4050 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
4051 if (WARN_ON(var_ref_idx
< 0)) {
4057 data
->var_ref_idx
[i
] = var_ref_idx
;
4064 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
4070 if (field_pos
!= event
->n_fields
) {
4071 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
4076 data
->synth_event
= event
;
4085 static int action_create(struct hist_trigger_data
*hist_data
,
4086 struct action_data
*data
)
4088 struct trace_event_file
*file
= hist_data
->event_file
;
4089 struct trace_array
*tr
= file
->tr
;
4090 struct track_data
*track_data
;
4091 struct field_var
*field_var
;
4096 if (data
->action
== ACTION_TRACE
)
4097 return trace_action_create(hist_data
, data
);
4099 if (data
->action
== ACTION_SNAPSHOT
) {
4100 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
4101 if (IS_ERR(track_data
)) {
4102 ret
= PTR_ERR(track_data
);
4106 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
4107 cond_snapshot_update
);
4109 track_data_free(track_data
);
4114 if (data
->action
== ACTION_SAVE
) {
4115 if (hist_data
->n_save_vars
) {
4117 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
4121 for (i
= 0; i
< data
->n_params
; i
++) {
4122 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4128 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
4129 if (IS_ERR(field_var
)) {
4130 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
4132 ret
= PTR_ERR(field_var
);
4137 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
4138 if (field_var
->val
->flags
&
4139 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
4140 hist_data
->n_save_var_str
++;
4148 static int onmatch_create(struct hist_trigger_data
*hist_data
,
4149 struct action_data
*data
)
4151 return action_create(hist_data
, data
);
4154 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
4156 char *match_event
, *match_event_system
;
4157 struct action_data
*data
;
4160 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
4162 return ERR_PTR(-ENOMEM
);
4164 match_event
= strsep(&str
, ")");
4165 if (!match_event
|| !str
) {
4166 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
4170 match_event_system
= strsep(&match_event
, ".");
4172 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
4176 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
4177 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
4181 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
4182 if (!data
->match_data
.event
) {
4187 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
4188 if (!data
->match_data
.event_system
) {
4193 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
4199 onmatch_destroy(data
);
4200 data
= ERR_PTR(ret
);
4204 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
4206 hist_data
->fields
[HITCOUNT_IDX
] =
4207 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
4208 if (!hist_data
->fields
[HITCOUNT_IDX
])
4211 hist_data
->n_vals
++;
4212 hist_data
->n_fields
++;
4214 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
4220 static int __create_val_field(struct hist_trigger_data
*hist_data
,
4221 unsigned int val_idx
,
4222 struct trace_event_file
*file
,
4223 char *var_name
, char *field_str
,
4224 unsigned long flags
)
4226 struct hist_field
*hist_field
;
4227 int ret
= 0, n_subexprs
= 0;
4229 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, &n_subexprs
);
4230 if (IS_ERR(hist_field
)) {
4231 ret
= PTR_ERR(hist_field
);
4235 /* values and variables should not have some modifiers */
4236 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4238 if (hist_field
->flags
& (HIST_FIELD_FL_GRAPH
| HIST_FIELD_FL_PERCENT
|
4239 HIST_FIELD_FL_BUCKET
| HIST_FIELD_FL_LOG2
))
4243 if (hist_field
->flags
& (HIST_FIELD_FL_GRAPH
| HIST_FIELD_FL_PERCENT
|
4244 HIST_FIELD_FL_BUCKET
| HIST_FIELD_FL_LOG2
|
4245 HIST_FIELD_FL_SYM
| HIST_FIELD_FL_SYM_OFFSET
|
4246 HIST_FIELD_FL_SYSCALL
| HIST_FIELD_FL_STACKTRACE
))
4250 hist_data
->fields
[val_idx
] = hist_field
;
4252 ++hist_data
->n_vals
;
4253 ++hist_data
->n_fields
;
4255 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4260 hist_err(file
->tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(field_str
));
4264 static int create_val_field(struct hist_trigger_data
*hist_data
,
4265 unsigned int val_idx
,
4266 struct trace_event_file
*file
,
4269 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
4272 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
4275 static const char no_comm
[] = "(no comm)";
4277 static u64
hist_field_execname(struct hist_field
*hist_field
,
4278 struct tracing_map_elt
*elt
,
4279 struct trace_buffer
*buffer
,
4280 struct ring_buffer_event
*rbe
,
4283 struct hist_elt_data
*elt_data
;
4285 if (WARN_ON_ONCE(!elt
))
4286 return (u64
)(unsigned long)no_comm
;
4288 elt_data
= elt
->private_data
;
4290 if (WARN_ON_ONCE(!elt_data
->comm
))
4291 return (u64
)(unsigned long)no_comm
;
4293 return (u64
)(unsigned long)(elt_data
->comm
);
4296 static u64
hist_field_stack(struct hist_field
*hist_field
,
4297 struct tracing_map_elt
*elt
,
4298 struct trace_buffer
*buffer
,
4299 struct ring_buffer_event
*rbe
,
4302 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
4303 int str_loc
= str_item
& 0xffff;
4304 char *addr
= (char *)(event
+ str_loc
);
4306 return (u64
)(unsigned long)addr
;
4309 static u64
hist_fn_call(struct hist_field
*hist_field
,
4310 struct tracing_map_elt
*elt
,
4311 struct trace_buffer
*buffer
,
4312 struct ring_buffer_event
*rbe
,
4315 switch (hist_field
->fn_num
) {
4316 case HIST_FIELD_FN_VAR_REF
:
4317 return hist_field_var_ref(hist_field
, elt
, buffer
, rbe
, event
);
4318 case HIST_FIELD_FN_COUNTER
:
4319 return hist_field_counter(hist_field
, elt
, buffer
, rbe
, event
);
4320 case HIST_FIELD_FN_CONST
:
4321 return hist_field_const(hist_field
, elt
, buffer
, rbe
, event
);
4322 case HIST_FIELD_FN_LOG2
:
4323 return hist_field_log2(hist_field
, elt
, buffer
, rbe
, event
);
4324 case HIST_FIELD_FN_BUCKET
:
4325 return hist_field_bucket(hist_field
, elt
, buffer
, rbe
, event
);
4326 case HIST_FIELD_FN_TIMESTAMP
:
4327 return hist_field_timestamp(hist_field
, elt
, buffer
, rbe
, event
);
4328 case HIST_FIELD_FN_CPU
:
4329 return hist_field_cpu(hist_field
, elt
, buffer
, rbe
, event
);
4330 case HIST_FIELD_FN_STRING
:
4331 return hist_field_string(hist_field
, elt
, buffer
, rbe
, event
);
4332 case HIST_FIELD_FN_DYNSTRING
:
4333 return hist_field_dynstring(hist_field
, elt
, buffer
, rbe
, event
);
4334 case HIST_FIELD_FN_RELDYNSTRING
:
4335 return hist_field_reldynstring(hist_field
, elt
, buffer
, rbe
, event
);
4336 case HIST_FIELD_FN_PSTRING
:
4337 return hist_field_pstring(hist_field
, elt
, buffer
, rbe
, event
);
4338 case HIST_FIELD_FN_S64
:
4339 return hist_field_s64(hist_field
, elt
, buffer
, rbe
, event
);
4340 case HIST_FIELD_FN_U64
:
4341 return hist_field_u64(hist_field
, elt
, buffer
, rbe
, event
);
4342 case HIST_FIELD_FN_S32
:
4343 return hist_field_s32(hist_field
, elt
, buffer
, rbe
, event
);
4344 case HIST_FIELD_FN_U32
:
4345 return hist_field_u32(hist_field
, elt
, buffer
, rbe
, event
);
4346 case HIST_FIELD_FN_S16
:
4347 return hist_field_s16(hist_field
, elt
, buffer
, rbe
, event
);
4348 case HIST_FIELD_FN_U16
:
4349 return hist_field_u16(hist_field
, elt
, buffer
, rbe
, event
);
4350 case HIST_FIELD_FN_S8
:
4351 return hist_field_s8(hist_field
, elt
, buffer
, rbe
, event
);
4352 case HIST_FIELD_FN_U8
:
4353 return hist_field_u8(hist_field
, elt
, buffer
, rbe
, event
);
4354 case HIST_FIELD_FN_UMINUS
:
4355 return hist_field_unary_minus(hist_field
, elt
, buffer
, rbe
, event
);
4356 case HIST_FIELD_FN_MINUS
:
4357 return hist_field_minus(hist_field
, elt
, buffer
, rbe
, event
);
4358 case HIST_FIELD_FN_PLUS
:
4359 return hist_field_plus(hist_field
, elt
, buffer
, rbe
, event
);
4360 case HIST_FIELD_FN_DIV
:
4361 return hist_field_div(hist_field
, elt
, buffer
, rbe
, event
);
4362 case HIST_FIELD_FN_MULT
:
4363 return hist_field_mult(hist_field
, elt
, buffer
, rbe
, event
);
4364 case HIST_FIELD_FN_DIV_POWER2
:
4365 return div_by_power_of_two(hist_field
, elt
, buffer
, rbe
, event
);
4366 case HIST_FIELD_FN_DIV_NOT_POWER2
:
4367 return div_by_not_power_of_two(hist_field
, elt
, buffer
, rbe
, event
);
4368 case HIST_FIELD_FN_DIV_MULT_SHIFT
:
4369 return div_by_mult_and_shift(hist_field
, elt
, buffer
, rbe
, event
);
4370 case HIST_FIELD_FN_EXECNAME
:
4371 return hist_field_execname(hist_field
, elt
, buffer
, rbe
, event
);
4372 case HIST_FIELD_FN_STACK
:
4373 return hist_field_stack(hist_field
, elt
, buffer
, rbe
, event
);
4379 /* Convert a var that points to common_pid.execname to a string */
4380 static void update_var_execname(struct hist_field
*hist_field
)
4382 hist_field
->flags
= HIST_FIELD_FL_STRING
| HIST_FIELD_FL_VAR
|
4383 HIST_FIELD_FL_EXECNAME
;
4384 hist_field
->size
= MAX_FILTER_STR_VAL
;
4385 hist_field
->is_signed
= 0;
4387 kfree_const(hist_field
->type
);
4388 hist_field
->type
= "char[]";
4390 hist_field
->fn_num
= HIST_FIELD_FN_EXECNAME
;
4393 static int create_var_field(struct hist_trigger_data
*hist_data
,
4394 unsigned int val_idx
,
4395 struct trace_event_file
*file
,
4396 char *var_name
, char *expr_str
)
4398 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4399 unsigned long flags
= 0;
4402 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4405 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
4406 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
4410 flags
|= HIST_FIELD_FL_VAR
;
4411 hist_data
->n_vars
++;
4412 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
4415 ret
= __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
4417 if (!ret
&& hist_data
->fields
[val_idx
]->flags
& HIST_FIELD_FL_EXECNAME
)
4418 update_var_execname(hist_data
->fields
[val_idx
]);
4420 if (!ret
&& hist_data
->fields
[val_idx
]->flags
&
4421 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
4422 hist_data
->fields
[val_idx
]->var_str_idx
= hist_data
->n_var_str
++;
4427 static int create_val_fields(struct hist_trigger_data
*hist_data
,
4428 struct trace_event_file
*file
)
4430 unsigned int i
, j
= 1, n_hitcount
= 0;
4431 char *fields_str
, *field_str
;
4434 ret
= create_hitcount_val(hist_data
);
4438 fields_str
= hist_data
->attrs
->vals_str
;
4442 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
4443 j
< TRACING_MAP_VALS_MAX
; i
++) {
4444 field_str
= strsep(&fields_str
, ",");
4448 if (strcmp(field_str
, "hitcount") == 0) {
4453 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
4458 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
4461 /* There is only raw hitcount but nohitcount suppresses it. */
4462 if (j
== 1 && hist_data
->attrs
->no_hitcount
) {
4463 hist_err(hist_data
->event_file
->tr
, HIST_ERR_NEED_NOHC_VAL
, 0);
4470 static int create_key_field(struct hist_trigger_data
*hist_data
,
4471 unsigned int key_idx
,
4472 unsigned int key_offset
,
4473 struct trace_event_file
*file
,
4476 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4477 struct hist_field
*hist_field
= NULL
;
4478 unsigned long flags
= 0;
4479 unsigned int key_size
;
4480 int ret
= 0, n_subexprs
= 0;
4482 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
4485 flags
|= HIST_FIELD_FL_KEY
;
4487 if (strcmp(field_str
, "stacktrace") == 0) {
4488 flags
|= HIST_FIELD_FL_STACKTRACE
;
4489 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
4490 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
4492 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
4494 if (IS_ERR(hist_field
)) {
4495 ret
= PTR_ERR(hist_field
);
4499 if (field_has_hist_vars(hist_field
, 0)) {
4500 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
4501 destroy_hist_field(hist_field
, 0);
4506 key_size
= hist_field
->size
;
4509 hist_data
->fields
[key_idx
] = hist_field
;
4511 key_size
= ALIGN(key_size
, sizeof(u64
));
4512 hist_data
->fields
[key_idx
]->size
= key_size
;
4513 hist_data
->fields
[key_idx
]->offset
= key_offset
;
4515 hist_data
->key_size
+= key_size
;
4517 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
4522 hist_data
->n_keys
++;
4523 hist_data
->n_fields
++;
4525 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
4533 static int create_key_fields(struct hist_trigger_data
*hist_data
,
4534 struct trace_event_file
*file
)
4536 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
4537 char *fields_str
, *field_str
;
4540 fields_str
= hist_data
->attrs
->keys_str
;
4544 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
4545 field_str
= strsep(&fields_str
, ",");
4548 ret
= create_key_field(hist_data
, i
, key_offset
,
4563 static int create_var_fields(struct hist_trigger_data
*hist_data
,
4564 struct trace_event_file
*file
)
4566 unsigned int i
, j
= hist_data
->n_vals
;
4569 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
4571 for (i
= 0; i
< n_vars
; i
++) {
4572 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
4573 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
4575 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
4583 static void free_var_defs(struct hist_trigger_data
*hist_data
)
4587 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
4588 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
4589 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
4592 hist_data
->attrs
->var_defs
.n_vars
= 0;
4595 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
4597 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4598 char *s
, *str
, *var_name
, *field_str
;
4599 unsigned int i
, j
, n_vars
= 0;
4602 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
4603 str
= hist_data
->attrs
->assignment_str
[i
];
4604 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
4605 field_str
= strsep(&str
, ",");
4609 var_name
= strsep(&field_str
, "=");
4610 if (!var_name
|| !field_str
) {
4611 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
4617 if (n_vars
== TRACING_MAP_VARS_MAX
) {
4618 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
4623 s
= kstrdup(var_name
, GFP_KERNEL
);
4628 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
4630 s
= kstrdup(field_str
, GFP_KERNEL
);
4632 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
4633 hist_data
->attrs
->var_defs
.name
[n_vars
] = NULL
;
4637 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
4639 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
4645 free_var_defs(hist_data
);
4650 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
4651 struct trace_event_file
*file
)
4655 ret
= parse_var_defs(hist_data
);
4659 ret
= create_val_fields(hist_data
, file
);
4663 ret
= create_var_fields(hist_data
, file
);
4667 ret
= create_key_fields(hist_data
, file
);
4670 free_var_defs(hist_data
);
4675 static int is_descending(struct trace_array
*tr
, const char *str
)
4680 if (strcmp(str
, "descending") == 0)
4683 if (strcmp(str
, "ascending") == 0)
4686 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
4691 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
4693 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4694 char *fields_str
= hist_data
->attrs
->sort_key_str
;
4695 struct tracing_map_sort_key
*sort_key
;
4696 int descending
, ret
= 0;
4697 unsigned int i
, j
, k
;
4699 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
4704 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
4705 struct hist_field
*hist_field
;
4706 char *field_str
, *field_name
;
4707 const char *test_name
;
4709 sort_key
= &hist_data
->sort_keys
[i
];
4711 field_str
= strsep(&fields_str
, ",");
4717 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4721 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
4722 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
4727 field_name
= strsep(&field_str
, ".");
4728 if (!field_name
|| !*field_name
) {
4730 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4734 if (strcmp(field_name
, "hitcount") == 0) {
4735 descending
= is_descending(tr
, field_str
);
4736 if (descending
< 0) {
4740 sort_key
->descending
= descending
;
4744 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4747 hist_field
= hist_data
->fields
[j
];
4748 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4753 test_name
= hist_field_name(hist_field
, 0);
4755 if (strcmp(field_name
, test_name
) == 0) {
4756 sort_key
->field_idx
= idx
;
4757 descending
= is_descending(tr
, field_str
);
4758 if (descending
< 0) {
4762 sort_key
->descending
= descending
;
4766 if (j
== hist_data
->n_fields
) {
4768 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
4773 hist_data
->n_sort_keys
= i
;
4778 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4782 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4783 struct action_data
*data
= hist_data
->actions
[i
];
4785 if (data
->handler
== HANDLER_ONMATCH
)
4786 onmatch_destroy(data
);
4787 else if (data
->handler
== HANDLER_ONMAX
||
4788 data
->handler
== HANDLER_ONCHANGE
)
4789 track_data_destroy(hist_data
, data
);
4795 static int parse_actions(struct hist_trigger_data
*hist_data
)
4797 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4798 struct action_data
*data
;
4804 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4805 enum handler_id hid
= 0;
4808 str
= hist_data
->attrs
->action_str
[i
];
4810 if ((len
= str_has_prefix(str
, "onmatch(")))
4811 hid
= HANDLER_ONMATCH
;
4812 else if ((len
= str_has_prefix(str
, "onmax(")))
4813 hid
= HANDLER_ONMAX
;
4814 else if ((len
= str_has_prefix(str
, "onchange(")))
4815 hid
= HANDLER_ONCHANGE
;
4817 action_str
= str
+ len
;
4820 case HANDLER_ONMATCH
:
4821 data
= onmatch_parse(tr
, action_str
);
4824 case HANDLER_ONCHANGE
:
4825 data
= track_data_parse(hist_data
, action_str
, hid
);
4828 data
= ERR_PTR(-EINVAL
);
4833 ret
= PTR_ERR(data
);
4837 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4843 static int create_actions(struct hist_trigger_data
*hist_data
)
4845 struct action_data
*data
;
4849 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4850 data
= hist_data
->actions
[i
];
4852 if (data
->handler
== HANDLER_ONMATCH
) {
4853 ret
= onmatch_create(hist_data
, data
);
4856 } else if (data
->handler
== HANDLER_ONMAX
||
4857 data
->handler
== HANDLER_ONCHANGE
) {
4858 ret
= track_data_create(hist_data
, data
);
4870 static void print_actions(struct seq_file
*m
,
4871 struct hist_trigger_data
*hist_data
,
4872 struct tracing_map_elt
*elt
)
4876 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4877 struct action_data
*data
= hist_data
->actions
[i
];
4879 if (data
->action
== ACTION_SNAPSHOT
)
4882 if (data
->handler
== HANDLER_ONMAX
||
4883 data
->handler
== HANDLER_ONCHANGE
)
4884 track_data_print(m
, hist_data
, elt
, data
);
4888 static void print_action_spec(struct seq_file
*m
,
4889 struct hist_trigger_data
*hist_data
,
4890 struct action_data
*data
)
4894 if (data
->action
== ACTION_SAVE
) {
4895 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4896 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
4897 if (i
< hist_data
->n_save_vars
- 1)
4900 } else if (data
->action
== ACTION_TRACE
) {
4901 if (data
->use_trace_keyword
)
4902 seq_printf(m
, "%s", data
->synth_event_name
);
4903 for (i
= 0; i
< data
->n_params
; i
++) {
4904 if (i
|| data
->use_trace_keyword
)
4906 seq_printf(m
, "%s", data
->params
[i
]);
4911 static void print_track_data_spec(struct seq_file
*m
,
4912 struct hist_trigger_data
*hist_data
,
4913 struct action_data
*data
)
4915 if (data
->handler
== HANDLER_ONMAX
)
4916 seq_puts(m
, ":onmax(");
4917 else if (data
->handler
== HANDLER_ONCHANGE
)
4918 seq_puts(m
, ":onchange(");
4919 seq_printf(m
, "%s", data
->track_data
.var_str
);
4920 seq_printf(m
, ").%s(", data
->action_name
);
4922 print_action_spec(m
, hist_data
, data
);
4927 static void print_onmatch_spec(struct seq_file
*m
,
4928 struct hist_trigger_data
*hist_data
,
4929 struct action_data
*data
)
4931 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
4932 data
->match_data
.event
);
4934 seq_printf(m
, "%s(", data
->action_name
);
4936 print_action_spec(m
, hist_data
, data
);
4941 static bool actions_match(struct hist_trigger_data
*hist_data
,
4942 struct hist_trigger_data
*hist_data_test
)
4946 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4949 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4950 struct action_data
*data
= hist_data
->actions
[i
];
4951 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4952 char *action_name
, *action_name_test
;
4954 if (data
->handler
!= data_test
->handler
)
4956 if (data
->action
!= data_test
->action
)
4959 if (data
->n_params
!= data_test
->n_params
)
4962 for (j
= 0; j
< data
->n_params
; j
++) {
4963 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4967 if (data
->use_trace_keyword
)
4968 action_name
= data
->synth_event_name
;
4970 action_name
= data
->action_name
;
4972 if (data_test
->use_trace_keyword
)
4973 action_name_test
= data_test
->synth_event_name
;
4975 action_name_test
= data_test
->action_name
;
4977 if (strcmp(action_name
, action_name_test
) != 0)
4980 if (data
->handler
== HANDLER_ONMATCH
) {
4981 if (strcmp(data
->match_data
.event_system
,
4982 data_test
->match_data
.event_system
) != 0)
4984 if (strcmp(data
->match_data
.event
,
4985 data_test
->match_data
.event
) != 0)
4987 } else if (data
->handler
== HANDLER_ONMAX
||
4988 data
->handler
== HANDLER_ONCHANGE
) {
4989 if (strcmp(data
->track_data
.var_str
,
4990 data_test
->track_data
.var_str
) != 0)
4999 static void print_actions_spec(struct seq_file
*m
,
5000 struct hist_trigger_data
*hist_data
)
5004 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5005 struct action_data
*data
= hist_data
->actions
[i
];
5007 if (data
->handler
== HANDLER_ONMATCH
)
5008 print_onmatch_spec(m
, hist_data
, data
);
5009 else if (data
->handler
== HANDLER_ONMAX
||
5010 data
->handler
== HANDLER_ONCHANGE
)
5011 print_track_data_spec(m
, hist_data
, data
);
5015 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
5019 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5020 kfree(hist_data
->field_var_hists
[i
]->cmd
);
5021 kfree(hist_data
->field_var_hists
[i
]);
5025 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
5030 destroy_hist_trigger_attrs(hist_data
->attrs
);
5031 destroy_hist_fields(hist_data
);
5032 tracing_map_destroy(hist_data
->map
);
5034 destroy_actions(hist_data
);
5035 destroy_field_vars(hist_data
);
5036 destroy_field_var_hists(hist_data
);
5041 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
5043 struct tracing_map
*map
= hist_data
->map
;
5044 struct ftrace_event_field
*field
;
5045 struct hist_field
*hist_field
;
5048 for_each_hist_field(i
, hist_data
) {
5049 hist_field
= hist_data
->fields
[i
];
5050 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
5051 tracing_map_cmp_fn_t cmp_fn
;
5053 field
= hist_field
->field
;
5055 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5056 cmp_fn
= tracing_map_cmp_none
;
5057 else if (!field
|| hist_field
->flags
& HIST_FIELD_FL_CPU
)
5058 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
5059 hist_field
->is_signed
);
5060 else if (is_string_field(field
))
5061 cmp_fn
= tracing_map_cmp_string
;
5063 cmp_fn
= tracing_map_cmp_num(field
->size
,
5065 idx
= tracing_map_add_key_field(map
,
5068 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
5069 idx
= tracing_map_add_sum_field(map
);
5074 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5075 idx
= tracing_map_add_var(map
);
5078 hist_field
->var
.idx
= idx
;
5079 hist_field
->var
.hist_data
= hist_data
;
5086 static struct hist_trigger_data
*
5087 create_hist_data(unsigned int map_bits
,
5088 struct hist_trigger_attrs
*attrs
,
5089 struct trace_event_file
*file
,
5092 const struct tracing_map_ops
*map_ops
= NULL
;
5093 struct hist_trigger_data
*hist_data
;
5096 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
5098 return ERR_PTR(-ENOMEM
);
5100 hist_data
->attrs
= attrs
;
5101 hist_data
->remove
= remove
;
5102 hist_data
->event_file
= file
;
5104 ret
= parse_actions(hist_data
);
5108 ret
= create_hist_fields(hist_data
, file
);
5112 ret
= create_sort_keys(hist_data
);
5116 map_ops
= &hist_trigger_elt_data_ops
;
5118 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
5119 map_ops
, hist_data
);
5120 if (IS_ERR(hist_data
->map
)) {
5121 ret
= PTR_ERR(hist_data
->map
);
5122 hist_data
->map
= NULL
;
5126 ret
= create_tracing_map_fields(hist_data
);
5132 hist_data
->attrs
= NULL
;
5134 destroy_hist_data(hist_data
);
5136 hist_data
= ERR_PTR(ret
);
5141 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
5142 struct tracing_map_elt
*elt
,
5143 struct trace_buffer
*buffer
, void *rec
,
5144 struct ring_buffer_event
*rbe
,
5147 struct hist_elt_data
*elt_data
;
5148 struct hist_field
*hist_field
;
5149 unsigned int i
, var_idx
;
5152 elt_data
= elt
->private_data
;
5153 elt_data
->var_ref_vals
= var_ref_vals
;
5155 for_each_hist_val_field(i
, hist_data
) {
5156 hist_field
= hist_data
->fields
[i
];
5157 hist_val
= hist_fn_call(hist_field
, elt
, buffer
, rbe
, rec
);
5158 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5159 var_idx
= hist_field
->var
.idx
;
5161 if (hist_field
->flags
&
5162 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
)) {
5163 unsigned int str_start
, var_str_idx
, idx
;
5164 char *str
, *val_str
;
5167 str_start
= hist_data
->n_field_var_str
+
5168 hist_data
->n_save_var_str
;
5169 var_str_idx
= hist_field
->var_str_idx
;
5170 idx
= str_start
+ var_str_idx
;
5172 str
= elt_data
->field_var_str
[idx
];
5173 val_str
= (char *)(uintptr_t)hist_val
;
5175 if (hist_field
->flags
& HIST_FIELD_FL_STRING
) {
5176 size
= min(hist_field
->size
, STR_VAR_LEN_MAX
);
5177 strscpy(str
, val_str
, size
);
5179 char *stack_start
= str
+ sizeof(unsigned long);
5182 e
= stack_trace_save((void *)stack_start
,
5183 HIST_STACKTRACE_DEPTH
,
5184 HIST_STACKTRACE_SKIP
);
5185 if (e
< HIST_STACKTRACE_DEPTH
- 1)
5186 ((unsigned long *)stack_start
)[e
] = 0;
5187 *((unsigned long *)str
) = e
;
5189 hist_val
= (u64
)(uintptr_t)str
;
5191 tracing_map_set_var(elt
, var_idx
, hist_val
);
5194 tracing_map_update_sum(elt
, i
, hist_val
);
5197 for_each_hist_key_field(i
, hist_data
) {
5198 hist_field
= hist_data
->fields
[i
];
5199 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5200 hist_val
= hist_fn_call(hist_field
, elt
, buffer
, rbe
, rec
);
5201 var_idx
= hist_field
->var
.idx
;
5202 tracing_map_set_var(elt
, var_idx
, hist_val
);
5206 update_field_vars(hist_data
, elt
, buffer
, rbe
, rec
);
5209 static inline void add_to_key(char *compound_key
, void *key
,
5210 struct hist_field
*key_field
, void *rec
)
5212 size_t size
= key_field
->size
;
5214 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5215 struct ftrace_event_field
*field
;
5217 field
= key_field
->field
;
5218 if (field
->filter_type
== FILTER_DYN_STRING
||
5219 field
->filter_type
== FILTER_RDYN_STRING
)
5220 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
5221 else if (field
->filter_type
== FILTER_STATIC_STRING
)
5224 /* ensure NULL-termination */
5225 if (size
> key_field
->size
- 1)
5226 size
= key_field
->size
- 1;
5228 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
5230 memcpy(compound_key
+ key_field
->offset
, key
, size
);
5234 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
5235 struct tracing_map_elt
*elt
,
5236 struct trace_buffer
*buffer
, void *rec
,
5237 struct ring_buffer_event
*rbe
, void *key
,
5240 struct action_data
*data
;
5243 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5244 data
= hist_data
->actions
[i
];
5245 data
->fn(hist_data
, elt
, buffer
, rec
, rbe
, key
, data
, var_ref_vals
);
5249 static void event_hist_trigger(struct event_trigger_data
*data
,
5250 struct trace_buffer
*buffer
, void *rec
,
5251 struct ring_buffer_event
*rbe
)
5253 struct hist_trigger_data
*hist_data
= data
->private_data
;
5254 bool use_compound_key
= (hist_data
->n_keys
> 1);
5255 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
5256 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
5257 char compound_key
[HIST_KEY_SIZE_MAX
];
5258 struct tracing_map_elt
*elt
= NULL
;
5259 struct hist_field
*key_field
;
5267 memset(compound_key
, 0, hist_data
->key_size
);
5269 for_each_hist_key_field(i
, hist_data
) {
5270 key_field
= hist_data
->fields
[i
];
5272 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5273 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
5274 if (key_field
->field
) {
5275 unsigned long *stack
, n_entries
;
5277 field_contents
= hist_fn_call(key_field
, elt
, buffer
, rbe
, rec
);
5278 stack
= (unsigned long *)(long)field_contents
;
5280 memcpy(entries
, ++stack
, n_entries
* sizeof(unsigned long));
5282 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
5283 HIST_STACKTRACE_SKIP
);
5287 field_contents
= hist_fn_call(key_field
, elt
, buffer
, rbe
, rec
);
5288 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5289 key
= (void *)(unsigned long)field_contents
;
5290 use_compound_key
= true;
5292 key
= (void *)&field_contents
;
5295 if (use_compound_key
)
5296 add_to_key(compound_key
, key
, key_field
, rec
);
5299 if (use_compound_key
)
5302 if (hist_data
->n_var_refs
&&
5303 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
5306 elt
= tracing_map_insert(hist_data
->map
, key
);
5310 hist_trigger_elt_update(hist_data
, elt
, buffer
, rec
, rbe
, var_ref_vals
);
5312 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
5313 hist_trigger_actions(hist_data
, elt
, buffer
, rec
, rbe
, key
, var_ref_vals
);
5316 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
5317 unsigned long *stacktrace_entries
,
5318 unsigned int max_entries
)
5320 unsigned int spaces
= 8;
5323 for (i
= 0; i
< max_entries
; i
++) {
5324 if (!stacktrace_entries
[i
])
5327 seq_printf(m
, "%*c", 1 + spaces
, ' ');
5328 seq_printf(m
, "%pS\n", (void*)stacktrace_entries
[i
]);
5332 static void hist_trigger_print_key(struct seq_file
*m
,
5333 struct hist_trigger_data
*hist_data
,
5335 struct tracing_map_elt
*elt
)
5337 struct hist_field
*key_field
;
5338 bool multiline
= false;
5339 const char *field_name
;
5345 for_each_hist_key_field(i
, hist_data
) {
5346 key_field
= hist_data
->fields
[i
];
5348 if (i
> hist_data
->n_vals
)
5351 field_name
= hist_field_name(key_field
, 0);
5353 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
5354 uval
= *(u64
*)(key
+ key_field
->offset
);
5355 seq_printf(m
, "%s: %llx", field_name
, uval
);
5356 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
5357 uval
= *(u64
*)(key
+ key_field
->offset
);
5358 seq_printf(m
, "%s: [%llx] %-45ps", field_name
,
5359 uval
, (void *)(uintptr_t)uval
);
5360 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
5361 uval
= *(u64
*)(key
+ key_field
->offset
);
5362 seq_printf(m
, "%s: [%llx] %-55pS", field_name
,
5363 uval
, (void *)(uintptr_t)uval
);
5364 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
5365 struct hist_elt_data
*elt_data
= elt
->private_data
;
5368 if (WARN_ON_ONCE(!elt_data
))
5371 comm
= elt_data
->comm
;
5373 uval
= *(u64
*)(key
+ key_field
->offset
);
5374 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
5376 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
5377 const char *syscall_name
;
5379 uval
= *(u64
*)(key
+ key_field
->offset
);
5380 syscall_name
= get_syscall_name(uval
);
5382 syscall_name
= "unknown_syscall";
5384 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
5385 syscall_name
, uval
);
5386 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5387 if (key_field
->field
)
5388 seq_printf(m
, "%s.stacktrace", key_field
->field
->name
);
5390 seq_puts(m
, "common_stacktrace:\n");
5391 hist_trigger_stacktrace_print(m
,
5392 key
+ key_field
->offset
,
5393 HIST_STACKTRACE_DEPTH
);
5395 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
5396 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
5397 *(u64
*)(key
+ key_field
->offset
));
5398 } else if (key_field
->flags
& HIST_FIELD_FL_BUCKET
) {
5399 unsigned long buckets
= key_field
->buckets
;
5400 uval
= *(u64
*)(key
+ key_field
->offset
);
5401 seq_printf(m
, "%s: ~ %llu-%llu", field_name
,
5402 uval
, uval
+ buckets
-1);
5403 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5404 seq_printf(m
, "%s: %-50s", field_name
,
5405 (char *)(key
+ key_field
->offset
));
5407 uval
= *(u64
*)(key
+ key_field
->offset
);
5408 seq_printf(m
, "%s: %10llu", field_name
, uval
);
5418 /* Get the 100 times of the percentage of @val in @total */
5419 static inline unsigned int __get_percentage(u64 val
, u64 total
)
5424 if (val
< (U64_MAX
/ 10000))
5425 return (unsigned int)div64_ul(val
* 10000, total
);
5427 total
= div64_u64(total
, 10000);
5431 return (unsigned int)div64_ul(val
, total
);
5433 return val
? UINT_MAX
: 0;
5436 #define BAR_CHAR '#'
5438 static inline const char *__fill_bar_str(char *buf
, int size
, u64 val
, u64 max
)
5440 unsigned int len
= __get_percentage(val
, max
);
5443 if (len
== UINT_MAX
) {
5444 snprintf(buf
, size
, "[ERROR]");
5448 len
= len
* size
/ 10000;
5449 for (i
= 0; i
< len
&& i
< size
; i
++)
5458 struct hist_val_stat
{
5463 static void hist_trigger_print_val(struct seq_file
*m
, unsigned int idx
,
5464 const char *field_name
, unsigned long flags
,
5465 struct hist_val_stat
*stats
,
5466 struct tracing_map_elt
*elt
)
5468 u64 val
= tracing_map_read_sum(elt
, idx
);
5472 if (flags
& HIST_FIELD_FL_PERCENT
) {
5473 pc
= __get_percentage(val
, stats
[idx
].total
);
5475 seq_printf(m
, " %s (%%):[ERROR]", field_name
);
5477 seq_printf(m
, " %s (%%): %3u.%02u", field_name
,
5478 pc
/ 100, pc
% 100);
5479 } else if (flags
& HIST_FIELD_FL_GRAPH
) {
5480 seq_printf(m
, " %s: %20s", field_name
,
5481 __fill_bar_str(bar
, 20, val
, stats
[idx
].max
));
5482 } else if (flags
& HIST_FIELD_FL_HEX
) {
5483 seq_printf(m
, " %s: %10llx", field_name
, val
);
5485 seq_printf(m
, " %s: %10llu", field_name
, val
);
5489 static void hist_trigger_entry_print(struct seq_file
*m
,
5490 struct hist_trigger_data
*hist_data
,
5491 struct hist_val_stat
*stats
,
5493 struct tracing_map_elt
*elt
)
5495 const char *field_name
;
5496 unsigned int i
= HITCOUNT_IDX
;
5497 unsigned long flags
;
5499 hist_trigger_print_key(m
, hist_data
, key
, elt
);
5501 /* At first, show the raw hitcount if !nohitcount */
5502 if (!hist_data
->attrs
->no_hitcount
)
5503 hist_trigger_print_val(m
, i
, "hitcount", 0, stats
, elt
);
5505 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
5506 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
5507 flags
= hist_data
->fields
[i
]->flags
;
5508 if (flags
& HIST_FIELD_FL_VAR
|| flags
& HIST_FIELD_FL_EXPR
)
5512 hist_trigger_print_val(m
, i
, field_name
, flags
, stats
, elt
);
5515 print_actions(m
, hist_data
, elt
);
5520 static int print_entries(struct seq_file
*m
,
5521 struct hist_trigger_data
*hist_data
)
5523 struct tracing_map_sort_entry
**sort_entries
= NULL
;
5524 struct tracing_map
*map
= hist_data
->map
;
5525 int i
, j
, n_entries
;
5526 struct hist_val_stat
*stats
= NULL
;
5529 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
5530 hist_data
->n_sort_keys
,
5535 /* Calculate the max and the total for each field if needed. */
5536 for (j
= 0; j
< hist_data
->n_vals
; j
++) {
5537 if (!(hist_data
->fields
[j
]->flags
&
5538 (HIST_FIELD_FL_PERCENT
| HIST_FIELD_FL_GRAPH
)))
5541 stats
= kcalloc(hist_data
->n_vals
, sizeof(*stats
),
5544 n_entries
= -ENOMEM
;
5548 for (i
= 0; i
< n_entries
; i
++) {
5549 val
= tracing_map_read_sum(sort_entries
[i
]->elt
, j
);
5550 stats
[j
].total
+= val
;
5551 if (stats
[j
].max
< val
)
5556 for (i
= 0; i
< n_entries
; i
++)
5557 hist_trigger_entry_print(m
, hist_data
, stats
,
5558 sort_entries
[i
]->key
,
5559 sort_entries
[i
]->elt
);
5563 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
5568 static void hist_trigger_show(struct seq_file
*m
,
5569 struct event_trigger_data
*data
, int n
)
5571 struct hist_trigger_data
*hist_data
;
5575 seq_puts(m
, "\n\n");
5577 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
5578 data
->ops
->print(m
, data
);
5579 seq_puts(m
, "#\n\n");
5581 hist_data
= data
->private_data
;
5582 n_entries
= print_entries(m
, hist_data
);
5586 track_data_snapshot_print(m
, hist_data
);
5588 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
5589 (u64
)atomic64_read(&hist_data
->map
->hits
),
5590 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
5593 static int hist_show(struct seq_file
*m
, void *v
)
5595 struct event_trigger_data
*data
;
5596 struct trace_event_file
*event_file
;
5599 mutex_lock(&event_mutex
);
5601 event_file
= event_file_file(m
->private);
5602 if (unlikely(!event_file
)) {
5607 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5608 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5609 hist_trigger_show(m
, data
, n
++);
5613 mutex_unlock(&event_mutex
);
5618 static int event_hist_open(struct inode
*inode
, struct file
*file
)
5622 ret
= tracing_open_file_tr(inode
, file
);
5626 /* Clear private_data to avoid warning in single_open() */
5627 file
->private_data
= NULL
;
5628 return single_open(file
, hist_show
, file
);
5631 const struct file_operations event_hist_fops
= {
5632 .open
= event_hist_open
,
5634 .llseek
= seq_lseek
,
5635 .release
= tracing_single_release_file_tr
,
5638 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
5639 static void hist_field_debug_show_flags(struct seq_file
*m
,
5640 unsigned long flags
)
5642 seq_puts(m
, " flags:\n");
5644 if (flags
& HIST_FIELD_FL_KEY
)
5645 seq_puts(m
, " HIST_FIELD_FL_KEY\n");
5646 else if (flags
& HIST_FIELD_FL_HITCOUNT
)
5647 seq_puts(m
, " VAL: HIST_FIELD_FL_HITCOUNT\n");
5648 else if (flags
& HIST_FIELD_FL_VAR
)
5649 seq_puts(m
, " HIST_FIELD_FL_VAR\n");
5650 else if (flags
& HIST_FIELD_FL_VAR_REF
)
5651 seq_puts(m
, " HIST_FIELD_FL_VAR_REF\n");
5653 seq_puts(m
, " VAL: normal u64 value\n");
5655 if (flags
& HIST_FIELD_FL_ALIAS
)
5656 seq_puts(m
, " HIST_FIELD_FL_ALIAS\n");
5657 else if (flags
& HIST_FIELD_FL_CONST
)
5658 seq_puts(m
, " HIST_FIELD_FL_CONST\n");
5661 static int hist_field_debug_show(struct seq_file
*m
,
5662 struct hist_field
*field
, unsigned long flags
)
5664 if ((field
->flags
& flags
) != flags
) {
5665 seq_printf(m
, "ERROR: bad flags - %lx\n", flags
);
5669 hist_field_debug_show_flags(m
, field
->flags
);
5671 seq_printf(m
, " ftrace_event_field name: %s\n",
5672 field
->field
->name
);
5674 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5675 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
5676 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5680 if (field
->flags
& HIST_FIELD_FL_CONST
)
5681 seq_printf(m
, " constant: %llu\n", field
->constant
);
5683 if (field
->flags
& HIST_FIELD_FL_ALIAS
)
5684 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
5685 field
->var_ref_idx
);
5687 if (field
->flags
& HIST_FIELD_FL_VAR_REF
) {
5688 seq_printf(m
, " name: %s\n", field
->name
);
5689 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5691 seq_printf(m
, " var.hist_data: %p\n", field
->var
.hist_data
);
5692 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
5693 field
->var_ref_idx
);
5695 seq_printf(m
, " system: %s\n", field
->system
);
5696 if (field
->event_name
)
5697 seq_printf(m
, " event_name: %s\n", field
->event_name
);
5700 seq_printf(m
, " type: %s\n", field
->type
);
5701 seq_printf(m
, " size: %u\n", field
->size
);
5702 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5707 static int field_var_debug_show(struct seq_file
*m
,
5708 struct field_var
*field_var
, unsigned int i
,
5711 const char *vars_name
= save_vars
? "save_vars" : "field_vars";
5712 struct hist_field
*field
;
5715 seq_printf(m
, "\n hist_data->%s[%d]:\n", vars_name
, i
);
5717 field
= field_var
->var
;
5719 seq_printf(m
, "\n %s[%d].var:\n", vars_name
, i
);
5721 hist_field_debug_show_flags(m
, field
->flags
);
5722 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
5723 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5726 field
= field_var
->val
;
5728 seq_printf(m
, "\n %s[%d].val:\n", vars_name
, i
);
5730 seq_printf(m
, " ftrace_event_field name: %s\n",
5731 field
->field
->name
);
5737 seq_printf(m
, " type: %s\n", field
->type
);
5738 seq_printf(m
, " size: %u\n", field
->size
);
5739 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5744 static int hist_action_debug_show(struct seq_file
*m
,
5745 struct action_data
*data
, int i
)
5749 if (data
->handler
== HANDLER_ONMAX
||
5750 data
->handler
== HANDLER_ONCHANGE
) {
5751 seq_printf(m
, "\n hist_data->actions[%d].track_data.var_ref:\n", i
);
5752 ret
= hist_field_debug_show(m
, data
->track_data
.var_ref
,
5753 HIST_FIELD_FL_VAR_REF
);
5757 seq_printf(m
, "\n hist_data->actions[%d].track_data.track_var:\n", i
);
5758 ret
= hist_field_debug_show(m
, data
->track_data
.track_var
,
5764 if (data
->handler
== HANDLER_ONMATCH
) {
5765 seq_printf(m
, "\n hist_data->actions[%d].match_data.event_system: %s\n",
5766 i
, data
->match_data
.event_system
);
5767 seq_printf(m
, " hist_data->actions[%d].match_data.event: %s\n",
5768 i
, data
->match_data
.event
);
5774 static int hist_actions_debug_show(struct seq_file
*m
,
5775 struct hist_trigger_data
*hist_data
)
5779 if (hist_data
->n_actions
)
5780 seq_puts(m
, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
5782 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5783 struct action_data
*action
= hist_data
->actions
[i
];
5785 ret
= hist_action_debug_show(m
, action
, i
);
5790 if (hist_data
->n_save_vars
)
5791 seq_puts(m
, "\n save action variables (save() params):\n");
5793 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
5794 ret
= field_var_debug_show(m
, hist_data
->save_vars
[i
], i
, true);
5802 static void hist_trigger_debug_show(struct seq_file
*m
,
5803 struct event_trigger_data
*data
, int n
)
5805 struct hist_trigger_data
*hist_data
;
5809 seq_puts(m
, "\n\n");
5811 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
5812 data
->ops
->print(m
, data
);
5813 seq_puts(m
, "#\n\n");
5815 hist_data
= data
->private_data
;
5817 seq_printf(m
, "hist_data: %p\n\n", hist_data
);
5818 seq_printf(m
, " n_vals: %u\n", hist_data
->n_vals
);
5819 seq_printf(m
, " n_keys: %u\n", hist_data
->n_keys
);
5820 seq_printf(m
, " n_fields: %u\n", hist_data
->n_fields
);
5822 seq_puts(m
, "\n val fields:\n\n");
5824 seq_puts(m
, " hist_data->fields[0]:\n");
5825 ret
= hist_field_debug_show(m
, hist_data
->fields
[0],
5826 HIST_FIELD_FL_HITCOUNT
);
5830 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
5831 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
5832 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
], 0);
5837 seq_puts(m
, "\n key fields:\n");
5839 for (i
= hist_data
->n_vals
; i
< hist_data
->n_fields
; i
++) {
5840 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
5841 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
],
5847 if (hist_data
->n_var_refs
)
5848 seq_puts(m
, "\n variable reference fields:\n");
5850 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
5851 seq_printf(m
, "\n hist_data->var_refs[%d]:\n", i
);
5852 ret
= hist_field_debug_show(m
, hist_data
->var_refs
[i
],
5853 HIST_FIELD_FL_VAR_REF
);
5858 if (hist_data
->n_field_vars
)
5859 seq_puts(m
, "\n field variables:\n");
5861 for (i
= 0; i
< hist_data
->n_field_vars
; i
++) {
5862 ret
= field_var_debug_show(m
, hist_data
->field_vars
[i
], i
, false);
5867 ret
= hist_actions_debug_show(m
, hist_data
);
5872 static int hist_debug_show(struct seq_file
*m
, void *v
)
5874 struct event_trigger_data
*data
;
5875 struct trace_event_file
*event_file
;
5878 mutex_lock(&event_mutex
);
5880 event_file
= event_file_file(m
->private);
5881 if (unlikely(!event_file
)) {
5886 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5887 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5888 hist_trigger_debug_show(m
, data
, n
++);
5892 mutex_unlock(&event_mutex
);
5897 static int event_hist_debug_open(struct inode
*inode
, struct file
*file
)
5901 ret
= tracing_open_file_tr(inode
, file
);
5905 /* Clear private_data to avoid warning in single_open() */
5906 file
->private_data
= NULL
;
5907 return single_open(file
, hist_debug_show
, file
);
5910 const struct file_operations event_hist_debug_fops
= {
5911 .open
= event_hist_debug_open
,
5913 .llseek
= seq_lseek
,
5914 .release
= tracing_single_release_file_tr
,
5918 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
5920 const char *field_name
= hist_field_name(hist_field
, 0);
5922 if (hist_field
->var
.name
)
5923 seq_printf(m
, "%s=", hist_field
->var
.name
);
5925 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
5926 seq_puts(m
, "common_cpu");
5927 else if (hist_field
->flags
& HIST_FIELD_FL_CONST
)
5928 seq_printf(m
, "%llu", hist_field
->constant
);
5929 else if (field_name
) {
5930 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
5931 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
5933 seq_printf(m
, "%s", field_name
);
5934 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
5935 seq_puts(m
, "common_timestamp");
5937 if (hist_field
->flags
) {
5938 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
5939 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
) &&
5940 !(hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)) {
5941 const char *flags
= get_hist_field_flags(hist_field
);
5944 seq_printf(m
, ".%s", flags
);
5947 if (hist_field
->buckets
)
5948 seq_printf(m
, "=%ld", hist_field
->buckets
);
5951 static int event_hist_trigger_print(struct seq_file
*m
,
5952 struct event_trigger_data
*data
)
5954 struct hist_trigger_data
*hist_data
= data
->private_data
;
5955 struct hist_field
*field
;
5956 bool have_var
= false;
5957 bool show_val
= false;
5960 seq_puts(m
, HIST_PREFIX
);
5963 seq_printf(m
, "%s:", data
->name
);
5965 seq_puts(m
, "keys=");
5967 for_each_hist_key_field(i
, hist_data
) {
5968 field
= hist_data
->fields
[i
];
5970 if (i
> hist_data
->n_vals
)
5973 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5975 seq_printf(m
, "%s.stacktrace", field
->field
->name
);
5977 seq_puts(m
, "common_stacktrace");
5979 hist_field_print(m
, field
);
5982 seq_puts(m
, ":vals=");
5984 for_each_hist_val_field(i
, hist_data
) {
5985 field
= hist_data
->fields
[i
];
5986 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5991 if (i
== HITCOUNT_IDX
) {
5992 if (hist_data
->attrs
->no_hitcount
)
5994 seq_puts(m
, "hitcount");
5998 hist_field_print(m
, field
);
6008 for_each_hist_val_field(i
, hist_data
) {
6009 field
= hist_data
->fields
[i
];
6011 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6014 hist_field_print(m
, field
);
6019 seq_puts(m
, ":sort=");
6021 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6022 struct tracing_map_sort_key
*sort_key
;
6023 unsigned int idx
, first_key_idx
;
6026 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
6028 sort_key
= &hist_data
->sort_keys
[i
];
6029 idx
= sort_key
->field_idx
;
6031 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
6037 if (idx
== HITCOUNT_IDX
)
6038 seq_puts(m
, "hitcount");
6040 if (idx
>= first_key_idx
)
6041 idx
+= hist_data
->n_vars
;
6042 hist_field_print(m
, hist_data
->fields
[idx
]);
6045 if (sort_key
->descending
)
6046 seq_puts(m
, ".descending");
6048 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
6049 if (hist_data
->enable_timestamps
)
6050 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
6051 if (hist_data
->attrs
->no_hitcount
)
6052 seq_puts(m
, ":nohitcount");
6054 print_actions_spec(m
, hist_data
);
6056 if (data
->filter_str
)
6057 seq_printf(m
, " if %s", data
->filter_str
);
6060 seq_puts(m
, " [paused]");
6062 seq_puts(m
, " [active]");
6069 static int event_hist_trigger_init(struct event_trigger_data
*data
)
6071 struct hist_trigger_data
*hist_data
= data
->private_data
;
6073 if (!data
->ref
&& hist_data
->attrs
->name
)
6074 save_named_trigger(hist_data
->attrs
->name
, data
);
6081 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
6083 struct trace_event_file
*file
;
6088 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
6089 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
6090 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
6091 ret
= event_hist_trigger_parse(&trigger_hist_cmd
, file
,
6092 "!hist", "hist", cmd
);
6093 WARN_ON_ONCE(ret
< 0);
6097 static void event_hist_trigger_free(struct event_trigger_data
*data
)
6099 struct hist_trigger_data
*hist_data
= data
->private_data
;
6101 if (WARN_ON_ONCE(data
->ref
<= 0))
6107 del_named_trigger(data
);
6109 trigger_data_free(data
);
6111 remove_hist_vars(hist_data
);
6113 unregister_field_var_hists(hist_data
);
6115 destroy_hist_data(hist_data
);
6119 static struct event_trigger_ops event_hist_trigger_ops
= {
6120 .trigger
= event_hist_trigger
,
6121 .print
= event_hist_trigger_print
,
6122 .init
= event_hist_trigger_init
,
6123 .free
= event_hist_trigger_free
,
6126 static int event_hist_trigger_named_init(struct event_trigger_data
*data
)
6130 save_named_trigger(data
->named_data
->name
, data
);
6132 event_hist_trigger_init(data
->named_data
);
6137 static void event_hist_trigger_named_free(struct event_trigger_data
*data
)
6139 if (WARN_ON_ONCE(data
->ref
<= 0))
6142 event_hist_trigger_free(data
->named_data
);
6146 del_named_trigger(data
);
6147 trigger_data_free(data
);
6151 static struct event_trigger_ops event_hist_trigger_named_ops
= {
6152 .trigger
= event_hist_trigger
,
6153 .print
= event_hist_trigger_print
,
6154 .init
= event_hist_trigger_named_init
,
6155 .free
= event_hist_trigger_named_free
,
6158 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
6161 return &event_hist_trigger_ops
;
6164 static void hist_clear(struct event_trigger_data
*data
)
6166 struct hist_trigger_data
*hist_data
= data
->private_data
;
6169 pause_named_trigger(data
);
6171 tracepoint_synchronize_unregister();
6173 tracing_map_clear(hist_data
->map
);
6176 unpause_named_trigger(data
);
6179 static bool compatible_field(struct ftrace_event_field
*field
,
6180 struct ftrace_event_field
*test_field
)
6182 if (field
== test_field
)
6184 if (field
== NULL
|| test_field
== NULL
)
6186 if (strcmp(field
->name
, test_field
->name
) != 0)
6188 if (strcmp(field
->type
, test_field
->type
) != 0)
6190 if (field
->size
!= test_field
->size
)
6192 if (field
->is_signed
!= test_field
->is_signed
)
6198 static bool hist_trigger_match(struct event_trigger_data
*data
,
6199 struct event_trigger_data
*data_test
,
6200 struct event_trigger_data
*named_data
,
6203 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
6204 struct hist_trigger_data
*hist_data
, *hist_data_test
;
6205 struct hist_field
*key_field
, *key_field_test
;
6208 if (named_data
&& (named_data
!= data_test
) &&
6209 (named_data
!= data_test
->named_data
))
6212 if (!named_data
&& is_named_trigger(data_test
))
6215 hist_data
= data
->private_data
;
6216 hist_data_test
= data_test
->private_data
;
6218 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
6219 hist_data
->n_fields
!= hist_data_test
->n_fields
||
6220 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
6223 if (!ignore_filter
) {
6224 if ((data
->filter_str
&& !data_test
->filter_str
) ||
6225 (!data
->filter_str
&& data_test
->filter_str
))
6229 for_each_hist_field(i
, hist_data
) {
6230 key_field
= hist_data
->fields
[i
];
6231 key_field_test
= hist_data_test
->fields
[i
];
6233 if (key_field
->flags
!= key_field_test
->flags
)
6235 if (!compatible_field(key_field
->field
, key_field_test
->field
))
6237 if (key_field
->offset
!= key_field_test
->offset
)
6239 if (key_field
->size
!= key_field_test
->size
)
6241 if (key_field
->is_signed
!= key_field_test
->is_signed
)
6243 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
6245 if (key_field
->var
.name
&&
6246 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
6250 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6251 sort_key
= &hist_data
->sort_keys
[i
];
6252 sort_key_test
= &hist_data_test
->sort_keys
[i
];
6254 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
6255 sort_key
->descending
!= sort_key_test
->descending
)
6259 if (!ignore_filter
&& data
->filter_str
&&
6260 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
6263 if (!actions_match(hist_data
, hist_data_test
))
6269 static bool existing_hist_update_only(char *glob
,
6270 struct event_trigger_data
*data
,
6271 struct trace_event_file
*file
)
6273 struct hist_trigger_data
*hist_data
= data
->private_data
;
6274 struct event_trigger_data
*test
, *named_data
= NULL
;
6275 bool updated
= false;
6277 if (!hist_data
->attrs
->pause
&& !hist_data
->attrs
->cont
&&
6278 !hist_data
->attrs
->clear
)
6281 if (hist_data
->attrs
->name
) {
6282 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6284 if (!hist_trigger_match(data
, named_data
, named_data
,
6290 if (hist_data
->attrs
->name
&& !named_data
)
6293 list_for_each_entry(test
, &file
->triggers
, list
) {
6294 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6295 if (!hist_trigger_match(data
, test
, named_data
, false))
6297 if (hist_data
->attrs
->pause
)
6298 test
->paused
= true;
6299 else if (hist_data
->attrs
->cont
)
6300 test
->paused
= false;
6301 else if (hist_data
->attrs
->clear
)
6311 static int hist_register_trigger(char *glob
,
6312 struct event_trigger_data
*data
,
6313 struct trace_event_file
*file
)
6315 struct hist_trigger_data
*hist_data
= data
->private_data
;
6316 struct event_trigger_data
*test
, *named_data
= NULL
;
6317 struct trace_array
*tr
= file
->tr
;
6320 if (hist_data
->attrs
->name
) {
6321 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6323 if (!hist_trigger_match(data
, named_data
, named_data
,
6325 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
6332 if (hist_data
->attrs
->name
&& !named_data
)
6335 lockdep_assert_held(&event_mutex
);
6337 list_for_each_entry(test
, &file
->triggers
, list
) {
6338 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6339 if (hist_trigger_match(data
, test
, named_data
, false)) {
6340 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
6347 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
6348 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
6353 if (hist_data
->attrs
->pause
)
6354 data
->paused
= true;
6357 data
->private_data
= named_data
->private_data
;
6358 set_named_trigger_data(data
, named_data
);
6359 data
->ops
= &event_hist_trigger_named_ops
;
6362 if (data
->ops
->init
) {
6363 ret
= data
->ops
->init(data
);
6368 if (hist_data
->enable_timestamps
) {
6369 char *clock
= hist_data
->attrs
->clock
;
6371 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
6373 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
6377 tracing_set_filter_buffering(file
->tr
, true);
6381 destroy_hist_data(hist_data
);
6386 static int hist_trigger_enable(struct event_trigger_data
*data
,
6387 struct trace_event_file
*file
)
6391 list_add_tail_rcu(&data
->list
, &file
->triggers
);
6393 update_cond_flag(file
);
6395 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
6396 list_del_rcu(&data
->list
);
6397 update_cond_flag(file
);
6404 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
6405 struct trace_event_file
*file
)
6407 struct hist_trigger_data
*hist_data
= data
->private_data
;
6408 struct event_trigger_data
*test
, *named_data
= NULL
;
6411 lockdep_assert_held(&event_mutex
);
6413 if (hist_data
->attrs
->name
)
6414 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6416 list_for_each_entry(test
, &file
->triggers
, list
) {
6417 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6418 if (hist_trigger_match(data
, test
, named_data
, false)) {
6428 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
6429 struct trace_event_file
*file
)
6431 struct hist_trigger_data
*hist_data
= data
->private_data
;
6432 struct event_trigger_data
*test
, *named_data
= NULL
;
6434 lockdep_assert_held(&event_mutex
);
6436 if (hist_data
->attrs
->name
)
6437 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6439 list_for_each_entry(test
, &file
->triggers
, list
) {
6440 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6441 if (!hist_trigger_match(data
, test
, named_data
, false))
6443 hist_data
= test
->private_data
;
6444 if (check_var_refs(hist_data
))
6453 static void hist_unregister_trigger(char *glob
,
6454 struct event_trigger_data
*data
,
6455 struct trace_event_file
*file
)
6457 struct event_trigger_data
*test
= NULL
, *iter
, *named_data
= NULL
;
6458 struct hist_trigger_data
*hist_data
= data
->private_data
;
6460 lockdep_assert_held(&event_mutex
);
6462 if (hist_data
->attrs
->name
)
6463 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6465 list_for_each_entry(iter
, &file
->triggers
, list
) {
6466 if (iter
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6467 if (!hist_trigger_match(data
, iter
, named_data
, false))
6470 list_del_rcu(&test
->list
);
6471 trace_event_trigger_enable_disable(file
, 0);
6472 update_cond_flag(file
);
6477 if (test
&& test
->ops
->free
)
6478 test
->ops
->free(test
);
6480 if (hist_data
->enable_timestamps
) {
6481 if (!hist_data
->remove
|| test
)
6482 tracing_set_filter_buffering(file
->tr
, false);
6486 static bool hist_file_check_refs(struct trace_event_file
*file
)
6488 struct hist_trigger_data
*hist_data
;
6489 struct event_trigger_data
*test
;
6491 lockdep_assert_held(&event_mutex
);
6493 list_for_each_entry(test
, &file
->triggers
, list
) {
6494 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6495 hist_data
= test
->private_data
;
6496 if (check_var_refs(hist_data
))
6504 static void hist_unreg_all(struct trace_event_file
*file
)
6506 struct event_trigger_data
*test
, *n
;
6507 struct hist_trigger_data
*hist_data
;
6508 struct synth_event
*se
;
6509 const char *se_name
;
6511 lockdep_assert_held(&event_mutex
);
6513 if (hist_file_check_refs(file
))
6516 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6517 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6518 hist_data
= test
->private_data
;
6519 list_del_rcu(&test
->list
);
6520 trace_event_trigger_enable_disable(file
, 0);
6522 se_name
= trace_event_name(file
->event_call
);
6523 se
= find_synth_event(se_name
);
6527 update_cond_flag(file
);
6528 if (hist_data
->enable_timestamps
)
6529 tracing_set_filter_buffering(file
->tr
, false);
6530 if (test
->ops
->free
)
6531 test
->ops
->free(test
);
6536 static int event_hist_trigger_parse(struct event_command
*cmd_ops
,
6537 struct trace_event_file
*file
,
6538 char *glob
, char *cmd
,
6539 char *param_and_filter
)
6541 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
6542 struct event_trigger_data
*trigger_data
;
6543 struct hist_trigger_attrs
*attrs
;
6544 struct hist_trigger_data
*hist_data
;
6545 char *param
, *filter
, *p
, *start
;
6546 struct synth_event
*se
;
6547 const char *se_name
;
6551 lockdep_assert_held(&event_mutex
);
6558 last_cmd_set(file
, param_and_filter
);
6561 remove
= event_trigger_check_remove(glob
);
6563 if (event_trigger_empty_param(param_and_filter
))
6567 * separate the trigger from the filter (k:v [if filter])
6568 * allowing for whitespace in the trigger
6570 p
= param
= param_and_filter
;
6572 p
= strstr(p
, "if");
6575 if (p
== param_and_filter
)
6577 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
6581 if (p
>= param_and_filter
+ strlen(param_and_filter
) - (sizeof("if") - 1) - 1)
6583 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
6594 filter
= strstrip(p
);
6595 param
= strstrip(param
);
6599 * To simplify arithmetic expression parsing, replace occurrences of
6600 * '.sym-offset' modifier with '.symXoffset'
6602 start
= strstr(param
, ".sym-offset");
6605 start
= strstr(start
+ 11, ".sym-offset");
6608 attrs
= parse_hist_trigger_attrs(file
->tr
, param
);
6610 return PTR_ERR(attrs
);
6612 if (attrs
->map_bits
)
6613 hist_trigger_bits
= attrs
->map_bits
;
6615 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
6616 if (IS_ERR(hist_data
)) {
6617 destroy_hist_trigger_attrs(attrs
);
6618 return PTR_ERR(hist_data
);
6621 trigger_data
= event_trigger_alloc(cmd_ops
, cmd
, param
, hist_data
);
6622 if (!trigger_data
) {
6627 ret
= event_trigger_set_filter(cmd_ops
, file
, filter
, trigger_data
);
6632 if (!have_hist_trigger_match(trigger_data
, file
))
6635 if (hist_trigger_check_refs(trigger_data
, file
)) {
6640 event_trigger_unregister(cmd_ops
, file
, glob
+1, trigger_data
);
6641 se_name
= trace_event_name(file
->event_call
);
6642 se
= find_synth_event(se_name
);
6649 if (existing_hist_update_only(glob
, trigger_data
, file
))
6652 ret
= event_trigger_register(cmd_ops
, file
, glob
, trigger_data
);
6656 if (get_named_trigger_data(trigger_data
))
6659 ret
= create_actions(hist_data
);
6663 if (has_hist_vars(hist_data
) || hist_data
->n_var_refs
) {
6664 ret
= save_hist_vars(hist_data
);
6669 ret
= tracing_map_init(hist_data
->map
);
6673 ret
= hist_trigger_enable(trigger_data
, file
);
6677 se_name
= trace_event_name(file
->event_call
);
6678 se
= find_synth_event(se_name
);
6682 if (ret
== 0 && glob
[0])
6687 event_trigger_unregister(cmd_ops
, file
, glob
+1, trigger_data
);
6689 event_trigger_reset_filter(cmd_ops
, trigger_data
);
6691 remove_hist_vars(hist_data
);
6693 kfree(trigger_data
);
6695 destroy_hist_data(hist_data
);
6699 static struct event_command trigger_hist_cmd
= {
6701 .trigger_type
= ETT_EVENT_HIST
,
6702 .flags
= EVENT_CMD_FL_NEEDS_REC
,
6703 .parse
= event_hist_trigger_parse
,
6704 .reg
= hist_register_trigger
,
6705 .unreg
= hist_unregister_trigger
,
6706 .unreg_all
= hist_unreg_all
,
6707 .get_trigger_ops
= event_hist_get_trigger_ops
,
6708 .set_filter
= set_trigger_filter
,
6711 __init
int register_trigger_hist_cmd(void)
6715 ret
= register_event_command(&trigger_hist_cmd
);
6722 hist_enable_trigger(struct event_trigger_data
*data
,
6723 struct trace_buffer
*buffer
, void *rec
,
6724 struct ring_buffer_event
*event
)
6726 struct enable_trigger_data
*enable_data
= data
->private_data
;
6727 struct event_trigger_data
*test
;
6729 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
6730 lockdep_is_held(&event_mutex
)) {
6731 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6732 if (enable_data
->enable
)
6733 test
->paused
= false;
6735 test
->paused
= true;
6741 hist_enable_count_trigger(struct event_trigger_data
*data
,
6742 struct trace_buffer
*buffer
, void *rec
,
6743 struct ring_buffer_event
*event
)
6748 if (data
->count
!= -1)
6751 hist_enable_trigger(data
, buffer
, rec
, event
);
6754 static struct event_trigger_ops hist_enable_trigger_ops
= {
6755 .trigger
= hist_enable_trigger
,
6756 .print
= event_enable_trigger_print
,
6757 .init
= event_trigger_init
,
6758 .free
= event_enable_trigger_free
,
6761 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
6762 .trigger
= hist_enable_count_trigger
,
6763 .print
= event_enable_trigger_print
,
6764 .init
= event_trigger_init
,
6765 .free
= event_enable_trigger_free
,
6768 static struct event_trigger_ops hist_disable_trigger_ops
= {
6769 .trigger
= hist_enable_trigger
,
6770 .print
= event_enable_trigger_print
,
6771 .init
= event_trigger_init
,
6772 .free
= event_enable_trigger_free
,
6775 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
6776 .trigger
= hist_enable_count_trigger
,
6777 .print
= event_enable_trigger_print
,
6778 .init
= event_trigger_init
,
6779 .free
= event_enable_trigger_free
,
6782 static struct event_trigger_ops
*
6783 hist_enable_get_trigger_ops(char *cmd
, char *param
)
6785 struct event_trigger_ops
*ops
;
6788 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
6791 ops
= param
? &hist_enable_count_trigger_ops
:
6792 &hist_enable_trigger_ops
;
6794 ops
= param
? &hist_disable_count_trigger_ops
:
6795 &hist_disable_trigger_ops
;
6800 static void hist_enable_unreg_all(struct trace_event_file
*file
)
6802 struct event_trigger_data
*test
, *n
;
6804 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6805 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
6806 list_del_rcu(&test
->list
);
6807 update_cond_flag(file
);
6808 trace_event_trigger_enable_disable(file
, 0);
6809 if (test
->ops
->free
)
6810 test
->ops
->free(test
);
6815 static struct event_command trigger_hist_enable_cmd
= {
6816 .name
= ENABLE_HIST_STR
,
6817 .trigger_type
= ETT_HIST_ENABLE
,
6818 .parse
= event_enable_trigger_parse
,
6819 .reg
= event_enable_register_trigger
,
6820 .unreg
= event_enable_unregister_trigger
,
6821 .unreg_all
= hist_enable_unreg_all
,
6822 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6823 .set_filter
= set_trigger_filter
,
6826 static struct event_command trigger_hist_disable_cmd
= {
6827 .name
= DISABLE_HIST_STR
,
6828 .trigger_type
= ETT_HIST_ENABLE
,
6829 .parse
= event_enable_trigger_parse
,
6830 .reg
= event_enable_register_trigger
,
6831 .unreg
= event_enable_unregister_trigger
,
6832 .unreg_all
= hist_enable_unreg_all
,
6833 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6834 .set_filter
= set_trigger_filter
,
6837 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
6839 unregister_event_command(&trigger_hist_enable_cmd
);
6840 unregister_event_command(&trigger_hist_disable_cmd
);
6843 __init
int register_trigger_hist_enable_disable_cmds(void)
6847 ret
= register_event_command(&trigger_hist_enable_cmd
);
6848 if (WARN_ON(ret
< 0))
6850 ret
= register_event_command(&trigger_hist_disable_cmd
);
6851 if (WARN_ON(ret
< 0))
6852 unregister_trigger_hist_enable_disable_cmds();