1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
22 #include "trace_synth.h"
25 C(NONE, "No error"), \
26 C(DUPLICATE_VAR, "Variable already defined"), \
27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 C(TOO_MANY_VARS, "Too many variables defined"), \
29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
51 C(TOO_MANY_PARAMS, "Too many action params"), \
52 C(PARAM_NOT_FOUND, "Couldn't find param"), \
53 C(INVALID_PARAM, "Invalid action param"), \
54 C(ACTION_NOT_FOUND, "No action found"), \
55 C(NO_SAVE_PARAMS, "No params found for save()"), \
56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 C(ACTION_MISMATCH, "Handler doesn't support action"), \
58 C(NO_CLOSING_PAREN, "No closing paren found"), \
59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
62 C(VAR_NOT_FOUND, "Couldn't find variable"), \
63 C(FIELD_NOT_FOUND, "Couldn't find field"), \
64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
66 C(EMPTY_SORT_FIELD, "Empty sort field"), \
67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \
70 C(EXPECT_NUMBER, "Expecting numeric literal"), \
71 C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \
72 C(DIVISION_BY_ZERO, "Division by zero"), \
73 C(NEED_NOHC_VAL, "Non-hitcount value is required for 'nohitcount'"),
76 #define C(a, b) HIST_ERR_##a
83 static const char *err_text
[] = { ERRORS
};
87 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
88 struct tracing_map_elt
*elt
,
89 struct trace_buffer
*buffer
,
90 struct ring_buffer_event
*rbe
,
93 #define HIST_FIELD_OPERANDS_MAX 2
94 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX 8
96 #define HIST_CONST_DIGITS_MAX 21
97 #define HIST_DIV_SHIFT 20 /* For optimizing division by constants */
103 FIELD_OP_UNARY_MINUS
,
110 HIST_FIELD_FN_VAR_REF
,
111 HIST_FIELD_FN_COUNTER
,
114 HIST_FIELD_FN_BUCKET
,
115 HIST_FIELD_FN_TIMESTAMP
,
117 HIST_FIELD_FN_STRING
,
118 HIST_FIELD_FN_DYNSTRING
,
119 HIST_FIELD_FN_RELDYNSTRING
,
120 HIST_FIELD_FN_PSTRING
,
129 HIST_FIELD_FN_UMINUS
,
134 HIST_FIELD_FN_DIV_POWER2
,
135 HIST_FIELD_FN_DIV_NOT_POWER2
,
136 HIST_FIELD_FN_DIV_MULT_SHIFT
,
137 HIST_FIELD_FN_EXECNAME
,
142 * A hist_var (histogram variable) contains variable information for
143 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
144 * flag set. A hist_var has a variable name e.g. ts0, and is
145 * associated with a given histogram trigger, as specified by
146 * hist_data. The hist_var idx is the unique index assigned to the
147 * variable by the hist trigger's tracing_map. The idx is what is
148 * used to set a variable's value and, by a variable reference, to
153 struct hist_trigger_data
*hist_data
;
158 struct ftrace_event_field
*field
;
160 unsigned long buckets
;
162 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
163 struct hist_trigger_data
*hist_data
;
164 enum hist_field_fn fn_num
;
168 unsigned int is_signed
;
171 * Variable fields contain variable-specific info in var.
174 enum field_op_id
operator;
179 * The name field is used for EXPR and VAR_REF fields. VAR
180 * fields contain the variable name in var.name.
185 * When a histogram trigger is hit, if it has any references
186 * to variables, the values of those variables are collected
187 * into a var_ref_vals array by resolve_var_refs(). The
188 * current value of each variable is read from the tracing_map
189 * using the hist field's hist_var.idx and entered into the
190 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
192 unsigned int var_ref_idx
;
195 unsigned int var_str_idx
;
197 /* Numeric literals are represented as u64 */
199 /* Used to optimize division by constants */
203 static u64
hist_fn_call(struct hist_field
*hist_field
,
204 struct tracing_map_elt
*elt
,
205 struct trace_buffer
*buffer
,
206 struct ring_buffer_event
*rbe
,
209 static u64
hist_field_const(struct hist_field
*field
,
210 struct tracing_map_elt
*elt
,
211 struct trace_buffer
*buffer
,
212 struct ring_buffer_event
*rbe
,
215 return field
->constant
;
218 static u64
hist_field_counter(struct hist_field
*field
,
219 struct tracing_map_elt
*elt
,
220 struct trace_buffer
*buffer
,
221 struct ring_buffer_event
*rbe
,
227 static u64
hist_field_string(struct hist_field
*hist_field
,
228 struct tracing_map_elt
*elt
,
229 struct trace_buffer
*buffer
,
230 struct ring_buffer_event
*rbe
,
233 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
235 return (u64
)(unsigned long)addr
;
238 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
239 struct tracing_map_elt
*elt
,
240 struct trace_buffer
*buffer
,
241 struct ring_buffer_event
*rbe
,
244 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
245 int str_loc
= str_item
& 0xffff;
246 char *addr
= (char *)(event
+ str_loc
);
248 return (u64
)(unsigned long)addr
;
251 static u64
hist_field_reldynstring(struct hist_field
*hist_field
,
252 struct tracing_map_elt
*elt
,
253 struct trace_buffer
*buffer
,
254 struct ring_buffer_event
*rbe
,
257 u32
*item
= event
+ hist_field
->field
->offset
;
258 u32 str_item
= *item
;
259 int str_loc
= str_item
& 0xffff;
260 char *addr
= (char *)&item
[1] + str_loc
;
262 return (u64
)(unsigned long)addr
;
265 static u64
hist_field_pstring(struct hist_field
*hist_field
,
266 struct tracing_map_elt
*elt
,
267 struct trace_buffer
*buffer
,
268 struct ring_buffer_event
*rbe
,
271 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
273 return (u64
)(unsigned long)*addr
;
276 static u64
hist_field_log2(struct hist_field
*hist_field
,
277 struct tracing_map_elt
*elt
,
278 struct trace_buffer
*buffer
,
279 struct ring_buffer_event
*rbe
,
282 struct hist_field
*operand
= hist_field
->operands
[0];
284 u64 val
= hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
286 return (u64
) ilog2(roundup_pow_of_two(val
));
289 static u64
hist_field_bucket(struct hist_field
*hist_field
,
290 struct tracing_map_elt
*elt
,
291 struct trace_buffer
*buffer
,
292 struct ring_buffer_event
*rbe
,
295 struct hist_field
*operand
= hist_field
->operands
[0];
296 unsigned long buckets
= hist_field
->buckets
;
298 u64 val
= hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
300 if (WARN_ON_ONCE(!buckets
))
304 val
= div64_ul(val
, buckets
);
306 val
= (u64
)((unsigned long)val
/ buckets
);
307 return val
* buckets
;
310 static u64
hist_field_plus(struct hist_field
*hist_field
,
311 struct tracing_map_elt
*elt
,
312 struct trace_buffer
*buffer
,
313 struct ring_buffer_event
*rbe
,
316 struct hist_field
*operand1
= hist_field
->operands
[0];
317 struct hist_field
*operand2
= hist_field
->operands
[1];
319 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
320 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
325 static u64
hist_field_minus(struct hist_field
*hist_field
,
326 struct tracing_map_elt
*elt
,
327 struct trace_buffer
*buffer
,
328 struct ring_buffer_event
*rbe
,
331 struct hist_field
*operand1
= hist_field
->operands
[0];
332 struct hist_field
*operand2
= hist_field
->operands
[1];
334 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
335 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
340 static u64
hist_field_div(struct hist_field
*hist_field
,
341 struct tracing_map_elt
*elt
,
342 struct trace_buffer
*buffer
,
343 struct ring_buffer_event
*rbe
,
346 struct hist_field
*operand1
= hist_field
->operands
[0];
347 struct hist_field
*operand2
= hist_field
->operands
[1];
349 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
350 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
352 /* Return -1 for the undefined case */
356 /* Use shift if the divisor is a power of 2 */
357 if (!(val2
& (val2
- 1)))
358 return val1
>> __ffs64(val2
);
360 return div64_u64(val1
, val2
);
363 static u64
div_by_power_of_two(struct hist_field
*hist_field
,
364 struct tracing_map_elt
*elt
,
365 struct trace_buffer
*buffer
,
366 struct ring_buffer_event
*rbe
,
369 struct hist_field
*operand1
= hist_field
->operands
[0];
370 struct hist_field
*operand2
= hist_field
->operands
[1];
372 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
374 return val1
>> __ffs64(operand2
->constant
);
377 static u64
div_by_not_power_of_two(struct hist_field
*hist_field
,
378 struct tracing_map_elt
*elt
,
379 struct trace_buffer
*buffer
,
380 struct ring_buffer_event
*rbe
,
383 struct hist_field
*operand1
= hist_field
->operands
[0];
384 struct hist_field
*operand2
= hist_field
->operands
[1];
386 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
388 return div64_u64(val1
, operand2
->constant
);
391 static u64
div_by_mult_and_shift(struct hist_field
*hist_field
,
392 struct tracing_map_elt
*elt
,
393 struct trace_buffer
*buffer
,
394 struct ring_buffer_event
*rbe
,
397 struct hist_field
*operand1
= hist_field
->operands
[0];
398 struct hist_field
*operand2
= hist_field
->operands
[1];
400 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
403 * If the divisor is a constant, do a multiplication and shift instead.
405 * Choose Z = some power of 2. If Y <= Z, then:
406 * X / Y = (X * (Z / Y)) / Z
408 * (Z / Y) is a constant (mult) which is calculated at parse time, so:
409 * X / Y = (X * mult) / Z
411 * The division by Z can be replaced by a shift since Z is a power of 2:
412 * X / Y = (X * mult) >> HIST_DIV_SHIFT
414 * As long, as X < Z the results will not be off by more than 1.
416 if (val1
< (1 << HIST_DIV_SHIFT
)) {
417 u64 mult
= operand2
->div_multiplier
;
419 return (val1
* mult
+ ((1 << HIST_DIV_SHIFT
) - 1)) >> HIST_DIV_SHIFT
;
422 return div64_u64(val1
, operand2
->constant
);
425 static u64
hist_field_mult(struct hist_field
*hist_field
,
426 struct tracing_map_elt
*elt
,
427 struct trace_buffer
*buffer
,
428 struct ring_buffer_event
*rbe
,
431 struct hist_field
*operand1
= hist_field
->operands
[0];
432 struct hist_field
*operand2
= hist_field
->operands
[1];
434 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
435 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
440 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
441 struct tracing_map_elt
*elt
,
442 struct trace_buffer
*buffer
,
443 struct ring_buffer_event
*rbe
,
446 struct hist_field
*operand
= hist_field
->operands
[0];
448 s64 sval
= (s64
)hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
449 u64 val
= (u64
)-sval
;
454 #define DEFINE_HIST_FIELD_FN(type) \
455 static u64 hist_field_##type(struct hist_field *hist_field, \
456 struct tracing_map_elt *elt, \
457 struct trace_buffer *buffer, \
458 struct ring_buffer_event *rbe, \
461 type *addr = (type *)(event + hist_field->field->offset); \
463 return (u64)(unsigned long)*addr; \
466 DEFINE_HIST_FIELD_FN(s64
);
467 DEFINE_HIST_FIELD_FN(u64
);
468 DEFINE_HIST_FIELD_FN(s32
);
469 DEFINE_HIST_FIELD_FN(u32
);
470 DEFINE_HIST_FIELD_FN(s16
);
471 DEFINE_HIST_FIELD_FN(u16
);
472 DEFINE_HIST_FIELD_FN(s8
);
473 DEFINE_HIST_FIELD_FN(u8
);
475 #define for_each_hist_field(i, hist_data) \
476 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
478 #define for_each_hist_val_field(i, hist_data) \
479 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
481 #define for_each_hist_key_field(i, hist_data) \
482 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
484 #define HITCOUNT_IDX 0
485 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
487 enum hist_field_flags
{
488 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
489 HIST_FIELD_FL_KEY
= 1 << 1,
490 HIST_FIELD_FL_STRING
= 1 << 2,
491 HIST_FIELD_FL_HEX
= 1 << 3,
492 HIST_FIELD_FL_SYM
= 1 << 4,
493 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
494 HIST_FIELD_FL_EXECNAME
= 1 << 6,
495 HIST_FIELD_FL_SYSCALL
= 1 << 7,
496 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
497 HIST_FIELD_FL_LOG2
= 1 << 9,
498 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
499 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
500 HIST_FIELD_FL_VAR
= 1 << 12,
501 HIST_FIELD_FL_EXPR
= 1 << 13,
502 HIST_FIELD_FL_VAR_REF
= 1 << 14,
503 HIST_FIELD_FL_CPU
= 1 << 15,
504 HIST_FIELD_FL_ALIAS
= 1 << 16,
505 HIST_FIELD_FL_BUCKET
= 1 << 17,
506 HIST_FIELD_FL_CONST
= 1 << 18,
507 HIST_FIELD_FL_PERCENT
= 1 << 19,
508 HIST_FIELD_FL_GRAPH
= 1 << 20,
513 char *name
[TRACING_MAP_VARS_MAX
];
514 char *expr
[TRACING_MAP_VARS_MAX
];
517 struct hist_trigger_attrs
{
528 unsigned int map_bits
;
530 char *assignment_str
[TRACING_MAP_VARS_MAX
];
531 unsigned int n_assignments
;
533 char *action_str
[HIST_ACTIONS_MAX
];
534 unsigned int n_actions
;
536 struct var_defs var_defs
;
540 struct hist_field
*var
;
541 struct hist_field
*val
;
544 struct field_var_hist
{
545 struct hist_trigger_data
*hist_data
;
549 struct hist_trigger_data
{
550 struct hist_field
*fields
[HIST_FIELDS_MAX
];
553 unsigned int n_fields
;
555 unsigned int n_var_str
;
556 unsigned int key_size
;
557 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
558 unsigned int n_sort_keys
;
559 struct trace_event_file
*event_file
;
560 struct hist_trigger_attrs
*attrs
;
561 struct tracing_map
*map
;
562 bool enable_timestamps
;
564 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
565 unsigned int n_var_refs
;
567 struct action_data
*actions
[HIST_ACTIONS_MAX
];
568 unsigned int n_actions
;
570 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
571 unsigned int n_field_vars
;
572 unsigned int n_field_var_str
;
573 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
574 unsigned int n_field_var_hists
;
576 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
577 unsigned int n_save_vars
;
578 unsigned int n_save_var_str
;
583 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
584 struct tracing_map_elt
*elt
,
585 struct trace_buffer
*buffer
, void *rec
,
586 struct ring_buffer_event
*rbe
, void *key
,
587 struct action_data
*data
, u64
*var_ref_vals
);
589 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
604 enum handler_id handler
;
605 enum action_id action
;
609 unsigned int n_params
;
610 char *params
[SYNTH_FIELDS_MAX
];
613 * When a histogram trigger is hit, the values of any
614 * references to variables, including variables being passed
615 * as parameters to synthetic events, are collected into a
616 * var_ref_vals array. This var_ref_idx array is an array of
617 * indices into the var_ref_vals array, one for each synthetic
618 * event param, and is passed to the synthetic event
621 unsigned int var_ref_idx
[SYNTH_FIELDS_MAX
];
622 struct synth_event
*synth_event
;
623 bool use_trace_keyword
;
624 char *synth_event_name
;
634 * var_str contains the $-unstripped variable
635 * name referenced by var_ref, and used when
636 * printing the action. Because var_ref
637 * creation is deferred to create_actions(),
638 * we need a per-action way to save it until
639 * then, thus var_str.
644 * var_ref refers to the variable being
645 * tracked e.g onmax($var).
647 struct hist_field
*var_ref
;
650 * track_var contains the 'invisible' tracking
651 * variable created to keep the current
654 struct hist_field
*track_var
;
656 check_track_val_fn_t check_val
;
657 action_fn_t save_data
;
666 unsigned int key_len
;
668 struct tracing_map_elt elt
;
670 struct action_data
*action_data
;
671 struct hist_trigger_data
*hist_data
;
674 struct hist_elt_data
{
677 char **field_var_str
;
681 struct snapshot_context
{
682 struct tracing_map_elt
*elt
;
687 * Returns the specific division function to use if the divisor
688 * is constant. This avoids extra branches when the trigger is hit.
690 static enum hist_field_fn
hist_field_get_div_fn(struct hist_field
*divisor
)
692 u64 div
= divisor
->constant
;
694 if (!(div
& (div
- 1)))
695 return HIST_FIELD_FN_DIV_POWER2
;
697 /* If the divisor is too large, do a regular division */
698 if (div
> (1 << HIST_DIV_SHIFT
))
699 return HIST_FIELD_FN_DIV_NOT_POWER2
;
701 divisor
->div_multiplier
= div64_u64((u64
)(1 << HIST_DIV_SHIFT
), div
);
702 return HIST_FIELD_FN_DIV_MULT_SHIFT
;
705 static void track_data_free(struct track_data
*track_data
)
707 struct hist_elt_data
*elt_data
;
712 kfree(track_data
->key
);
714 elt_data
= track_data
->elt
.private_data
;
716 kfree(elt_data
->comm
);
723 static struct track_data
*track_data_alloc(unsigned int key_len
,
724 struct action_data
*action_data
,
725 struct hist_trigger_data
*hist_data
)
727 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
728 struct hist_elt_data
*elt_data
;
731 return ERR_PTR(-ENOMEM
);
733 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
735 track_data_free(data
);
736 return ERR_PTR(-ENOMEM
);
739 data
->key_len
= key_len
;
740 data
->action_data
= action_data
;
741 data
->hist_data
= hist_data
;
743 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
745 track_data_free(data
);
746 return ERR_PTR(-ENOMEM
);
749 data
->elt
.private_data
= elt_data
;
751 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
752 if (!elt_data
->comm
) {
753 track_data_free(data
);
754 return ERR_PTR(-ENOMEM
);
760 #define HIST_PREFIX "hist:"
762 static char *last_cmd
;
763 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
765 static int errpos(char *str
)
767 if (!str
|| !last_cmd
)
770 return err_pos(last_cmd
, str
);
773 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
775 const char *system
= NULL
, *name
= NULL
;
776 struct trace_event_call
*call
;
783 last_cmd
= kasprintf(GFP_KERNEL
, HIST_PREFIX
"%s", str
);
788 call
= file
->event_call
;
789 system
= call
->class->system
;
791 name
= trace_event_name(call
);
798 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, HIST_PREFIX
"%s:%s", system
, name
);
801 static void hist_err(struct trace_array
*tr
, u8 err_type
, u16 err_pos
)
806 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
810 static void hist_err_clear(void)
814 last_cmd_loc
[0] = '\0';
817 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
818 unsigned int *var_ref_idx
);
820 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
821 unsigned int *var_ref_idx
)
823 struct tracepoint
*tp
= event
->tp
;
825 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
826 struct tracepoint_func
*probe_func_ptr
;
827 synth_probe_func_t probe_func
;
830 if (!(cpu_online(raw_smp_processor_id())))
833 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
834 if (probe_func_ptr
) {
836 probe_func
= probe_func_ptr
->func
;
837 __data
= probe_func_ptr
->data
;
838 probe_func(__data
, var_ref_vals
, var_ref_idx
);
839 } while ((++probe_func_ptr
)->func
);
844 static void action_trace(struct hist_trigger_data
*hist_data
,
845 struct tracing_map_elt
*elt
,
846 struct trace_buffer
*buffer
, void *rec
,
847 struct ring_buffer_event
*rbe
, void *key
,
848 struct action_data
*data
, u64
*var_ref_vals
)
850 struct synth_event
*event
= data
->synth_event
;
852 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
855 struct hist_var_data
{
856 struct list_head list
;
857 struct hist_trigger_data
*hist_data
;
860 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
861 struct tracing_map_elt
*elt
,
862 struct trace_buffer
*buffer
,
863 struct ring_buffer_event
*rbe
,
866 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
867 struct trace_array
*tr
= hist_data
->event_file
->tr
;
869 u64 ts
= ring_buffer_event_time_stamp(buffer
, rbe
);
871 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
877 static u64
hist_field_cpu(struct hist_field
*hist_field
,
878 struct tracing_map_elt
*elt
,
879 struct trace_buffer
*buffer
,
880 struct ring_buffer_event
*rbe
,
883 int cpu
= smp_processor_id();
889 * check_field_for_var_ref - Check if a VAR_REF field references a variable
890 * @hist_field: The VAR_REF field to check
891 * @var_data: The hist trigger that owns the variable
892 * @var_idx: The trigger variable identifier
894 * Check the given VAR_REF field to see whether or not it references
895 * the given variable associated with the given trigger.
897 * Return: The VAR_REF field if it does reference the variable, NULL if not
899 static struct hist_field
*
900 check_field_for_var_ref(struct hist_field
*hist_field
,
901 struct hist_trigger_data
*var_data
,
902 unsigned int var_idx
)
904 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
906 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
907 hist_field
->var
.hist_data
== var_data
)
914 * find_var_ref - Check if a trigger has a reference to a trigger variable
915 * @hist_data: The hist trigger that might have a reference to the variable
916 * @var_data: The hist trigger that owns the variable
917 * @var_idx: The trigger variable identifier
919 * Check the list of var_refs[] on the first hist trigger to see
920 * whether any of them are references to the variable on the second
923 * Return: The VAR_REF field referencing the variable if so, NULL if not
925 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
926 struct hist_trigger_data
*var_data
,
927 unsigned int var_idx
)
929 struct hist_field
*hist_field
;
932 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
933 hist_field
= hist_data
->var_refs
[i
];
934 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
942 * find_any_var_ref - Check if there is a reference to a given trigger variable
943 * @hist_data: The hist trigger
944 * @var_idx: The trigger variable identifier
946 * Check to see whether the given variable is currently referenced by
949 * The trigger the variable is defined on is explicitly excluded - the
950 * assumption being that a self-reference doesn't prevent a trigger
951 * from being removed.
953 * Return: The VAR_REF field referencing the variable if so, NULL if not
955 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
956 unsigned int var_idx
)
958 struct trace_array
*tr
= hist_data
->event_file
->tr
;
959 struct hist_field
*found
= NULL
;
960 struct hist_var_data
*var_data
;
962 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
963 if (var_data
->hist_data
== hist_data
)
965 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
974 * check_var_refs - Check if there is a reference to any of trigger's variables
975 * @hist_data: The hist trigger
977 * A trigger can define one or more variables. If any one of them is
978 * currently referenced by any other trigger, this function will
981 * Typically used to determine whether or not a trigger can be removed
982 * - if there are any references to a trigger's variables, it cannot.
984 * Return: True if there is a reference to any of trigger's variables
986 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
988 struct hist_field
*field
;
992 for_each_hist_field(i
, hist_data
) {
993 field
= hist_data
->fields
[i
];
994 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
995 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
1005 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
1007 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1008 struct hist_var_data
*var_data
, *found
= NULL
;
1010 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1011 if (var_data
->hist_data
== hist_data
) {
1020 static bool field_has_hist_vars(struct hist_field
*hist_field
,
1031 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
1032 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1035 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1036 struct hist_field
*operand
;
1038 operand
= hist_field
->operands
[i
];
1039 if (field_has_hist_vars(operand
, level
+ 1))
1046 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
1048 struct hist_field
*hist_field
;
1051 for_each_hist_field(i
, hist_data
) {
1052 hist_field
= hist_data
->fields
[i
];
1053 if (field_has_hist_vars(hist_field
, 0))
1060 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
1062 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1063 struct hist_var_data
*var_data
;
1065 var_data
= find_hist_vars(hist_data
);
1069 if (tracing_check_open_get_tr(tr
))
1072 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
1074 trace_array_put(tr
);
1078 var_data
->hist_data
= hist_data
;
1079 list_add(&var_data
->list
, &tr
->hist_vars
);
1084 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
1086 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1087 struct hist_var_data
*var_data
;
1089 var_data
= find_hist_vars(hist_data
);
1093 if (WARN_ON(check_var_refs(hist_data
)))
1096 list_del(&var_data
->list
);
1100 trace_array_put(tr
);
1103 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
1104 const char *var_name
)
1106 struct hist_field
*hist_field
, *found
= NULL
;
1109 for_each_hist_field(i
, hist_data
) {
1110 hist_field
= hist_data
->fields
[i
];
1111 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
1112 strcmp(hist_field
->var
.name
, var_name
) == 0) {
1121 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
1122 struct trace_event_file
*file
,
1123 const char *var_name
)
1125 struct hist_trigger_data
*test_data
;
1126 struct event_trigger_data
*test
;
1127 struct hist_field
*hist_field
;
1129 lockdep_assert_held(&event_mutex
);
1131 hist_field
= find_var_field(hist_data
, var_name
);
1135 list_for_each_entry(test
, &file
->triggers
, list
) {
1136 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1137 test_data
= test
->private_data
;
1138 hist_field
= find_var_field(test_data
, var_name
);
1147 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
1152 struct hist_trigger_data
*var_hist_data
;
1153 struct hist_var_data
*var_data
;
1154 struct trace_event_file
*file
, *found
= NULL
;
1157 return find_event_file(tr
, system
, event_name
);
1159 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1160 var_hist_data
= var_data
->hist_data
;
1161 file
= var_hist_data
->event_file
;
1165 if (find_var_field(var_hist_data
, var_name
)) {
1167 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
1178 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
1179 const char *var_name
)
1181 struct hist_trigger_data
*test_data
;
1182 struct event_trigger_data
*test
;
1183 struct hist_field
*hist_field
;
1185 lockdep_assert_held(&event_mutex
);
1187 list_for_each_entry(test
, &file
->triggers
, list
) {
1188 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1189 test_data
= test
->private_data
;
1190 hist_field
= find_var_field(test_data
, var_name
);
1199 static struct hist_field
*
1200 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
1202 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1203 struct hist_field
*hist_field
, *found
= NULL
;
1204 struct trace_event_file
*file
;
1207 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
1208 struct action_data
*data
= hist_data
->actions
[i
];
1210 if (data
->handler
== HANDLER_ONMATCH
) {
1211 char *system
= data
->match_data
.event_system
;
1212 char *event_name
= data
->match_data
.event
;
1214 file
= find_var_file(tr
, system
, event_name
, var_name
);
1217 hist_field
= find_file_var(file
, var_name
);
1220 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
1222 return ERR_PTR(-EINVAL
);
1232 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1237 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1238 struct hist_field
*hist_field
= NULL
;
1239 struct trace_event_file
*file
;
1241 if (!system
|| !event_name
) {
1242 hist_field
= find_match_var(hist_data
, var_name
);
1243 if (IS_ERR(hist_field
))
1249 file
= find_var_file(tr
, system
, event_name
, var_name
);
1253 hist_field
= find_file_var(file
, var_name
);
1258 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1259 struct tracing_map_elt
*elt
,
1260 struct trace_buffer
*buffer
,
1261 struct ring_buffer_event
*rbe
,
1264 struct hist_elt_data
*elt_data
;
1267 if (WARN_ON_ONCE(!elt
))
1270 elt_data
= elt
->private_data
;
1271 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1276 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1277 u64
*var_ref_vals
, bool self
)
1279 struct hist_trigger_data
*var_data
;
1280 struct tracing_map_elt
*var_elt
;
1281 struct hist_field
*hist_field
;
1282 unsigned int i
, var_idx
;
1283 bool resolved
= true;
1286 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1287 hist_field
= hist_data
->var_refs
[i
];
1288 var_idx
= hist_field
->var
.idx
;
1289 var_data
= hist_field
->var
.hist_data
;
1291 if (var_data
== NULL
) {
1296 if ((self
&& var_data
!= hist_data
) ||
1297 (!self
&& var_data
== hist_data
))
1300 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1306 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1311 if (self
|| !hist_field
->read_once
)
1312 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1314 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1316 var_ref_vals
[i
] = var_val
;
1322 static const char *hist_field_name(struct hist_field
*field
,
1325 const char *field_name
= "";
1327 if (WARN_ON_ONCE(!field
))
1334 field_name
= field
->field
->name
;
1335 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1336 field
->flags
& HIST_FIELD_FL_ALIAS
||
1337 field
->flags
& HIST_FIELD_FL_BUCKET
)
1338 field_name
= hist_field_name(field
->operands
[0], ++level
);
1339 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1340 field_name
= "common_cpu";
1341 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1342 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1343 if (field
->system
) {
1344 static char full_name
[MAX_FILTER_STR_VAL
];
1346 strcat(full_name
, field
->system
);
1347 strcat(full_name
, ".");
1348 strcat(full_name
, field
->event_name
);
1349 strcat(full_name
, ".");
1350 strcat(full_name
, field
->name
);
1351 field_name
= full_name
;
1353 field_name
= field
->name
;
1354 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1355 field_name
= "common_timestamp";
1356 else if (field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
1358 field_name
= field
->field
->name
;
1360 field_name
= "common_stacktrace";
1361 } else if (field
->flags
& HIST_FIELD_FL_HITCOUNT
)
1362 field_name
= "hitcount";
1364 if (field_name
== NULL
)
1370 static enum hist_field_fn
select_value_fn(int field_size
, int field_is_signed
)
1372 switch (field_size
) {
1374 if (field_is_signed
)
1375 return HIST_FIELD_FN_S64
;
1377 return HIST_FIELD_FN_U64
;
1379 if (field_is_signed
)
1380 return HIST_FIELD_FN_S32
;
1382 return HIST_FIELD_FN_U32
;
1384 if (field_is_signed
)
1385 return HIST_FIELD_FN_S16
;
1387 return HIST_FIELD_FN_U16
;
1389 if (field_is_signed
)
1390 return HIST_FIELD_FN_S8
;
1392 return HIST_FIELD_FN_U8
;
1395 return HIST_FIELD_FN_NOP
;
1398 static int parse_map_size(char *str
)
1400 unsigned long size
, map_bits
;
1403 ret
= kstrtoul(str
, 0, &size
);
1407 map_bits
= ilog2(roundup_pow_of_two(size
));
1408 if (map_bits
< TRACING_MAP_BITS_MIN
||
1409 map_bits
> TRACING_MAP_BITS_MAX
)
1417 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
1424 for (i
= 0; i
< attrs
->n_assignments
; i
++)
1425 kfree(attrs
->assignment_str
[i
]);
1427 for (i
= 0; i
< attrs
->n_actions
; i
++)
1428 kfree(attrs
->action_str
[i
]);
1431 kfree(attrs
->sort_key_str
);
1432 kfree(attrs
->keys_str
);
1433 kfree(attrs
->vals_str
);
1434 kfree(attrs
->clock
);
1438 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
1442 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
1445 if ((str_has_prefix(str
, "onmatch(")) ||
1446 (str_has_prefix(str
, "onmax(")) ||
1447 (str_has_prefix(str
, "onchange("))) {
1448 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
1449 if (!attrs
->action_str
[attrs
->n_actions
]) {
1459 static int parse_assignment(struct trace_array
*tr
,
1460 char *str
, struct hist_trigger_attrs
*attrs
)
1464 if ((len
= str_has_prefix(str
, "key=")) ||
1465 (len
= str_has_prefix(str
, "keys="))) {
1466 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1467 if (!attrs
->keys_str
) {
1471 } else if ((len
= str_has_prefix(str
, "val=")) ||
1472 (len
= str_has_prefix(str
, "vals=")) ||
1473 (len
= str_has_prefix(str
, "values="))) {
1474 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1475 if (!attrs
->vals_str
) {
1479 } else if ((len
= str_has_prefix(str
, "sort="))) {
1480 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1481 if (!attrs
->sort_key_str
) {
1485 } else if (str_has_prefix(str
, "name=")) {
1486 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
1491 } else if ((len
= str_has_prefix(str
, "clock="))) {
1494 str
= strstrip(str
);
1495 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
1496 if (!attrs
->clock
) {
1500 } else if ((len
= str_has_prefix(str
, "size="))) {
1501 int map_bits
= parse_map_size(str
+ len
);
1507 attrs
->map_bits
= map_bits
;
1511 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
1512 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
1517 assignment
= kstrdup(str
, GFP_KERNEL
);
1523 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
1529 static struct hist_trigger_attrs
*
1530 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
1532 struct hist_trigger_attrs
*attrs
;
1535 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
1537 return ERR_PTR(-ENOMEM
);
1539 while (trigger_str
) {
1540 char *str
= strsep(&trigger_str
, ":");
1543 rhs
= strchr(str
, '=');
1545 if (!strlen(++rhs
)) {
1547 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
1550 ret
= parse_assignment(tr
, str
, attrs
);
1553 } else if (strcmp(str
, "nohitcount") == 0 ||
1554 strcmp(str
, "NOHC") == 0)
1555 attrs
->no_hitcount
= true;
1556 else if (strcmp(str
, "pause") == 0)
1557 attrs
->pause
= true;
1558 else if ((strcmp(str
, "cont") == 0) ||
1559 (strcmp(str
, "continue") == 0))
1561 else if (strcmp(str
, "clear") == 0)
1562 attrs
->clear
= true;
1564 ret
= parse_action(str
, attrs
);
1570 if (!attrs
->keys_str
) {
1575 if (!attrs
->clock
) {
1576 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
1577 if (!attrs
->clock
) {
1585 destroy_hist_trigger_attrs(attrs
);
1587 return ERR_PTR(ret
);
1590 static inline void save_comm(char *comm
, struct task_struct
*task
)
1593 strcpy(comm
, "<idle>");
1597 if (WARN_ON_ONCE(task
->pid
< 0)) {
1598 strcpy(comm
, "<XXX>");
1602 strncpy(comm
, task
->comm
, TASK_COMM_LEN
);
1605 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
1609 for (i
= 0; i
< elt_data
->n_field_var_str
; i
++)
1610 kfree(elt_data
->field_var_str
[i
]);
1612 kfree(elt_data
->field_var_str
);
1614 kfree(elt_data
->comm
);
1618 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
1620 struct hist_elt_data
*elt_data
= elt
->private_data
;
1622 hist_elt_data_free(elt_data
);
1625 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
1627 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
1628 unsigned int size
= TASK_COMM_LEN
;
1629 struct hist_elt_data
*elt_data
;
1630 struct hist_field
*hist_field
;
1631 unsigned int i
, n_str
;
1633 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
1637 for_each_hist_field(i
, hist_data
) {
1638 hist_field
= hist_data
->fields
[i
];
1640 if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
1641 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
1642 if (!elt_data
->comm
) {
1650 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
+
1651 hist_data
->n_var_str
;
1652 if (n_str
> SYNTH_FIELDS_MAX
) {
1653 hist_elt_data_free(elt_data
);
1657 BUILD_BUG_ON(STR_VAR_LEN_MAX
& (sizeof(u64
) - 1));
1659 size
= STR_VAR_LEN_MAX
;
1661 elt_data
->field_var_str
= kcalloc(n_str
, sizeof(char *), GFP_KERNEL
);
1662 if (!elt_data
->field_var_str
) {
1663 hist_elt_data_free(elt_data
);
1666 elt_data
->n_field_var_str
= n_str
;
1668 for (i
= 0; i
< n_str
; i
++) {
1669 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
1670 if (!elt_data
->field_var_str
[i
]) {
1671 hist_elt_data_free(elt_data
);
1676 elt
->private_data
= elt_data
;
1681 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
1683 struct hist_elt_data
*elt_data
= elt
->private_data
;
1686 save_comm(elt_data
->comm
, current
);
1689 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
1690 .elt_alloc
= hist_trigger_elt_data_alloc
,
1691 .elt_free
= hist_trigger_elt_data_free
,
1692 .elt_init
= hist_trigger_elt_data_init
,
1695 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
1697 const char *flags_str
= NULL
;
1699 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
1701 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
1703 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
1704 flags_str
= "sym-offset";
1705 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
1706 flags_str
= "execname";
1707 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
1708 flags_str
= "syscall";
1709 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
1711 else if (hist_field
->flags
& HIST_FIELD_FL_BUCKET
)
1712 flags_str
= "buckets";
1713 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
1714 flags_str
= "usecs";
1715 else if (hist_field
->flags
& HIST_FIELD_FL_PERCENT
)
1716 flags_str
= "percent";
1717 else if (hist_field
->flags
& HIST_FIELD_FL_GRAPH
)
1718 flags_str
= "graph";
1719 else if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
1720 flags_str
= "stacktrace";
1725 static void expr_field_str(struct hist_field
*field
, char *expr
)
1727 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
1729 else if (field
->flags
& HIST_FIELD_FL_CONST
) {
1730 char str
[HIST_CONST_DIGITS_MAX
];
1732 snprintf(str
, HIST_CONST_DIGITS_MAX
, "%llu", field
->constant
);
1736 strcat(expr
, hist_field_name(field
, 0));
1738 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
1739 const char *flags_str
= get_hist_field_flags(field
);
1743 strcat(expr
, flags_str
);
1748 static char *expr_str(struct hist_field
*field
, unsigned int level
)
1755 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
1759 if (!field
->operands
[0]) {
1760 expr_field_str(field
, expr
);
1764 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
1768 subexpr
= expr_str(field
->operands
[0], ++level
);
1773 strcat(expr
, subexpr
);
1781 expr_field_str(field
->operands
[0], expr
);
1783 switch (field
->operator) {
1784 case FIELD_OP_MINUS
:
1801 expr_field_str(field
->operands
[1], expr
);
1807 * If field_op != FIELD_OP_NONE, *sep points to the root operator
1808 * of the expression tree to be evaluated.
1810 static int contains_operator(char *str
, char **sep
)
1812 enum field_op_id field_op
= FIELD_OP_NONE
;
1813 char *minus_op
, *plus_op
, *div_op
, *mult_op
;
1817 * Report the last occurrence of the operators first, so that the
1818 * expression is evaluated left to right. This is important since
1819 * subtraction and division are not associative.
1822 * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2
1823 * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2
1827 * First, find lower precedence addition and subtraction
1828 * since the expression will be evaluated recursively.
1830 minus_op
= strrchr(str
, '-');
1833 * Unary minus is not supported in sub-expressions. If
1834 * present, it is always the next root operator.
1836 if (minus_op
== str
) {
1837 field_op
= FIELD_OP_UNARY_MINUS
;
1841 field_op
= FIELD_OP_MINUS
;
1844 plus_op
= strrchr(str
, '+');
1845 if (plus_op
|| minus_op
) {
1847 * For operators of the same precedence use to rightmost as the
1848 * root, so that the expression is evaluated left to right.
1850 if (plus_op
> minus_op
)
1851 field_op
= FIELD_OP_PLUS
;
1856 * Multiplication and division have higher precedence than addition and
1859 div_op
= strrchr(str
, '/');
1861 field_op
= FIELD_OP_DIV
;
1863 mult_op
= strrchr(str
, '*');
1865 * For operators of the same precedence use to rightmost as the
1866 * root, so that the expression is evaluated left to right.
1868 if (mult_op
> div_op
)
1869 field_op
= FIELD_OP_MULT
;
1874 case FIELD_OP_UNARY_MINUS
:
1875 case FIELD_OP_MINUS
:
1897 static void get_hist_field(struct hist_field
*hist_field
)
1902 static void __destroy_hist_field(struct hist_field
*hist_field
)
1904 if (--hist_field
->ref
> 1)
1907 kfree(hist_field
->var
.name
);
1908 kfree(hist_field
->name
);
1910 /* Can likely be a const */
1911 kfree_const(hist_field
->type
);
1913 kfree(hist_field
->system
);
1914 kfree(hist_field
->event_name
);
1919 static void destroy_hist_field(struct hist_field
*hist_field
,
1930 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1931 return; /* var refs will be destroyed separately */
1933 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
1934 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
1936 __destroy_hist_field(hist_field
);
1939 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
1940 struct ftrace_event_field
*field
,
1941 unsigned long flags
,
1944 struct hist_field
*hist_field
;
1946 if (field
&& is_function_field(field
))
1949 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
1953 hist_field
->ref
= 1;
1955 hist_field
->hist_data
= hist_data
;
1957 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
1958 goto out
; /* caller will populate */
1960 if (flags
& HIST_FIELD_FL_VAR_REF
) {
1961 hist_field
->fn_num
= HIST_FIELD_FN_VAR_REF
;
1965 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
1966 hist_field
->fn_num
= HIST_FIELD_FN_COUNTER
;
1967 hist_field
->size
= sizeof(u64
);
1968 hist_field
->type
= "u64";
1972 if (flags
& HIST_FIELD_FL_CONST
) {
1973 hist_field
->fn_num
= HIST_FIELD_FN_CONST
;
1974 hist_field
->size
= sizeof(u64
);
1975 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
1976 if (!hist_field
->type
)
1981 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
1983 hist_field
->fn_num
= HIST_FIELD_FN_STACK
;
1985 hist_field
->fn_num
= HIST_FIELD_FN_NOP
;
1986 hist_field
->size
= HIST_STACKTRACE_SIZE
;
1987 hist_field
->type
= kstrdup_const("unsigned long[]", GFP_KERNEL
);
1988 if (!hist_field
->type
)
1993 if (flags
& (HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
)) {
1994 unsigned long fl
= flags
& ~(HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
);
1995 hist_field
->fn_num
= flags
& HIST_FIELD_FL_LOG2
? HIST_FIELD_FN_LOG2
:
1996 HIST_FIELD_FN_BUCKET
;
1997 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
1998 if (!hist_field
->operands
[0])
2000 hist_field
->size
= hist_field
->operands
[0]->size
;
2001 hist_field
->type
= kstrdup_const(hist_field
->operands
[0]->type
, GFP_KERNEL
);
2002 if (!hist_field
->type
)
2007 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
2008 hist_field
->fn_num
= HIST_FIELD_FN_TIMESTAMP
;
2009 hist_field
->size
= sizeof(u64
);
2010 hist_field
->type
= "u64";
2014 if (flags
& HIST_FIELD_FL_CPU
) {
2015 hist_field
->fn_num
= HIST_FIELD_FN_CPU
;
2016 hist_field
->size
= sizeof(int);
2017 hist_field
->type
= "unsigned int";
2021 if (WARN_ON_ONCE(!field
))
2024 /* Pointers to strings are just pointers and dangerous to dereference */
2025 if (is_string_field(field
) &&
2026 (field
->filter_type
!= FILTER_PTR_STRING
)) {
2027 flags
|= HIST_FIELD_FL_STRING
;
2029 hist_field
->size
= MAX_FILTER_STR_VAL
;
2030 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
2031 if (!hist_field
->type
)
2034 if (field
->filter_type
== FILTER_STATIC_STRING
) {
2035 hist_field
->fn_num
= HIST_FIELD_FN_STRING
;
2036 hist_field
->size
= field
->size
;
2037 } else if (field
->filter_type
== FILTER_DYN_STRING
) {
2038 hist_field
->fn_num
= HIST_FIELD_FN_DYNSTRING
;
2039 } else if (field
->filter_type
== FILTER_RDYN_STRING
)
2040 hist_field
->fn_num
= HIST_FIELD_FN_RELDYNSTRING
;
2042 hist_field
->fn_num
= HIST_FIELD_FN_PSTRING
;
2044 hist_field
->size
= field
->size
;
2045 hist_field
->is_signed
= field
->is_signed
;
2046 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
2047 if (!hist_field
->type
)
2050 hist_field
->fn_num
= select_value_fn(field
->size
,
2052 if (hist_field
->fn_num
== HIST_FIELD_FN_NOP
) {
2053 destroy_hist_field(hist_field
, 0);
2058 hist_field
->field
= field
;
2059 hist_field
->flags
= flags
;
2062 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
2063 if (!hist_field
->var
.name
)
2069 destroy_hist_field(hist_field
, 0);
2073 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
2077 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
2078 if (hist_data
->fields
[i
]) {
2079 destroy_hist_field(hist_data
->fields
[i
], 0);
2080 hist_data
->fields
[i
] = NULL
;
2084 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2085 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
2086 __destroy_hist_field(hist_data
->var_refs
[i
]);
2087 hist_data
->var_refs
[i
] = NULL
;
2091 static int init_var_ref(struct hist_field
*ref_field
,
2092 struct hist_field
*var_field
,
2093 char *system
, char *event_name
)
2097 ref_field
->var
.idx
= var_field
->var
.idx
;
2098 ref_field
->var
.hist_data
= var_field
->hist_data
;
2099 ref_field
->size
= var_field
->size
;
2100 ref_field
->is_signed
= var_field
->is_signed
;
2101 ref_field
->flags
|= var_field
->flags
&
2102 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2105 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
2106 if (!ref_field
->system
)
2111 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
2112 if (!ref_field
->event_name
) {
2118 if (var_field
->var
.name
) {
2119 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
2120 if (!ref_field
->name
) {
2124 } else if (var_field
->name
) {
2125 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
2126 if (!ref_field
->name
) {
2132 ref_field
->type
= kstrdup_const(var_field
->type
, GFP_KERNEL
);
2133 if (!ref_field
->type
) {
2140 kfree(ref_field
->system
);
2141 ref_field
->system
= NULL
;
2142 kfree(ref_field
->event_name
);
2143 ref_field
->event_name
= NULL
;
2144 kfree(ref_field
->name
);
2145 ref_field
->name
= NULL
;
2150 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
2151 struct hist_field
*var_field
)
2153 struct hist_field
*ref_field
;
2156 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2157 ref_field
= hist_data
->var_refs
[i
];
2158 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
2159 ref_field
->var
.hist_data
== var_field
->hist_data
)
2167 * create_var_ref - Create a variable reference and attach it to trigger
2168 * @hist_data: The trigger that will be referencing the variable
2169 * @var_field: The VAR field to create a reference to
2170 * @system: The optional system string
2171 * @event_name: The optional event_name string
2173 * Given a variable hist_field, create a VAR_REF hist_field that
2174 * represents a reference to it.
2176 * This function also adds the reference to the trigger that
2177 * now references the variable.
2179 * Return: The VAR_REF field if successful, NULL if not
2181 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
2182 struct hist_field
*var_field
,
2183 char *system
, char *event_name
)
2185 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
2186 struct hist_field
*ref_field
;
2189 /* Check if the variable already exists */
2190 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2191 ref_field
= hist_data
->var_refs
[i
];
2192 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
2193 ref_field
->var
.hist_data
== var_field
->hist_data
) {
2194 get_hist_field(ref_field
);
2198 /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
2199 if (hist_data
->n_var_refs
>= TRACING_MAP_VARS_MAX
)
2201 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
2203 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
2204 destroy_hist_field(ref_field
, 0);
2208 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
2209 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
2215 static bool is_var_ref(char *var_name
)
2217 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
2223 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
2229 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
2230 name
= hist_data
->attrs
->var_defs
.name
[i
];
2232 if (strcmp(var_name
, name
) == 0) {
2233 field
= hist_data
->attrs
->var_defs
.expr
[i
];
2234 if (contains_operator(field
, NULL
) || is_var_ref(field
))
2243 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
2244 char *system
, char *event_name
,
2247 struct trace_event_call
*call
;
2249 if (system
&& event_name
) {
2250 call
= hist_data
->event_file
->event_call
;
2252 if (strcmp(system
, call
->class->system
) != 0)
2255 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2259 if (!!system
!= !!event_name
)
2262 if (!is_var_ref(var_name
))
2267 return field_name_from_var(hist_data
, var_name
);
2270 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
2271 char *system
, char *event_name
,
2274 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
2275 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2277 if (!is_var_ref(var_name
))
2282 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
2284 ref_field
= create_var_ref(hist_data
, var_field
,
2285 system
, event_name
);
2288 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
2293 static struct ftrace_event_field
*
2294 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
2295 char *field_str
, unsigned long *flags
, unsigned long *buckets
)
2297 struct ftrace_event_field
*field
= NULL
;
2298 char *field_name
, *modifier
, *str
;
2299 struct trace_array
*tr
= file
->tr
;
2301 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
2303 return ERR_PTR(-ENOMEM
);
2305 field_name
= strsep(&modifier
, ".");
2307 if (strcmp(modifier
, "hex") == 0)
2308 *flags
|= HIST_FIELD_FL_HEX
;
2309 else if (strcmp(modifier
, "sym") == 0)
2310 *flags
|= HIST_FIELD_FL_SYM
;
2312 * 'sym-offset' occurrences in the trigger string are modified
2313 * to 'symXoffset' to simplify arithmetic expression parsing.
2315 else if (strcmp(modifier
, "symXoffset") == 0)
2316 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
2317 else if ((strcmp(modifier
, "execname") == 0) &&
2318 (strcmp(field_name
, "common_pid") == 0))
2319 *flags
|= HIST_FIELD_FL_EXECNAME
;
2320 else if (strcmp(modifier
, "syscall") == 0)
2321 *flags
|= HIST_FIELD_FL_SYSCALL
;
2322 else if (strcmp(modifier
, "stacktrace") == 0)
2323 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2324 else if (strcmp(modifier
, "log2") == 0)
2325 *flags
|= HIST_FIELD_FL_LOG2
;
2326 else if (strcmp(modifier
, "usecs") == 0)
2327 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
2328 else if (strncmp(modifier
, "bucket", 6) == 0) {
2333 if (*modifier
== 's')
2335 if (*modifier
!= '=')
2338 ret
= kstrtoul(modifier
, 0, buckets
);
2339 if (ret
|| !(*buckets
))
2341 *flags
|= HIST_FIELD_FL_BUCKET
;
2342 } else if (strncmp(modifier
, "percent", 7) == 0) {
2343 if (*flags
& (HIST_FIELD_FL_VAR
| HIST_FIELD_FL_KEY
))
2345 *flags
|= HIST_FIELD_FL_PERCENT
;
2346 } else if (strncmp(modifier
, "graph", 5) == 0) {
2347 if (*flags
& (HIST_FIELD_FL_VAR
| HIST_FIELD_FL_KEY
))
2349 *flags
|= HIST_FIELD_FL_GRAPH
;
2352 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
2353 field
= ERR_PTR(-EINVAL
);
2358 if (strcmp(field_name
, "common_timestamp") == 0) {
2359 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
2360 hist_data
->enable_timestamps
= true;
2361 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2362 hist_data
->attrs
->ts_in_usecs
= true;
2363 } else if (strcmp(field_name
, "common_stacktrace") == 0) {
2364 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2365 } else if (strcmp(field_name
, "common_cpu") == 0)
2366 *flags
|= HIST_FIELD_FL_CPU
;
2367 else if (strcmp(field_name
, "hitcount") == 0)
2368 *flags
|= HIST_FIELD_FL_HITCOUNT
;
2370 field
= trace_find_event_field(file
->event_call
, field_name
);
2371 if (!field
|| !field
->size
) {
2373 * For backward compatibility, if field_name
2374 * was "cpu" or "stacktrace", then we treat this
2375 * the same as common_cpu and common_stacktrace
2376 * respectively. This also works for "CPU", and
2379 if (field
&& field
->filter_type
== FILTER_CPU
) {
2380 *flags
|= HIST_FIELD_FL_CPU
;
2381 } else if (field
&& field
->filter_type
== FILTER_STACKTRACE
) {
2382 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2384 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
,
2385 errpos(field_name
));
2386 field
= ERR_PTR(-EINVAL
);
2397 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
2398 struct hist_field
*var_ref
,
2401 struct hist_field
*alias
= NULL
;
2402 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
2404 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2408 alias
->fn_num
= var_ref
->fn_num
;
2409 alias
->operands
[0] = var_ref
;
2411 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2412 destroy_hist_field(alias
, 0);
2416 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
2421 static struct hist_field
*parse_const(struct hist_trigger_data
*hist_data
,
2422 char *str
, char *var_name
,
2423 unsigned long *flags
)
2425 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2426 struct hist_field
*field
= NULL
;
2429 if (kstrtoull(str
, 0, &constant
)) {
2430 hist_err(tr
, HIST_ERR_EXPECT_NUMBER
, errpos(str
));
2434 *flags
|= HIST_FIELD_FL_CONST
;
2435 field
= create_hist_field(hist_data
, NULL
, *flags
, var_name
);
2439 field
->constant
= constant
;
2444 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2445 struct trace_event_file
*file
, char *str
,
2446 unsigned long *flags
, char *var_name
)
2448 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2449 struct ftrace_event_field
*field
= NULL
;
2450 struct hist_field
*hist_field
= NULL
;
2451 unsigned long buckets
= 0;
2454 if (isdigit(str
[0])) {
2455 hist_field
= parse_const(hist_data
, str
, var_name
, flags
);
2463 s
= strchr(str
, '.');
2465 s
= strchr(++s
, '.');
2467 ref_system
= strsep(&str
, ".");
2472 ref_event
= strsep(&str
, ".");
2481 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2483 hist_field
= parse_var_ref(hist_data
, ref_system
,
2484 ref_event
, ref_var
);
2487 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2498 field
= parse_field(hist_data
, file
, str
, flags
, &buckets
);
2499 if (IS_ERR(field
)) {
2500 ret
= PTR_ERR(field
);
2504 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2509 hist_field
->buckets
= buckets
;
2513 return ERR_PTR(ret
);
2516 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2517 struct trace_event_file
*file
,
2518 char *str
, unsigned long flags
,
2519 char *var_name
, unsigned int *n_subexprs
);
2521 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2522 struct trace_event_file
*file
,
2523 char *str
, unsigned long flags
,
2524 char *var_name
, unsigned int *n_subexprs
)
2526 struct hist_field
*operand1
, *expr
= NULL
;
2527 unsigned long operand_flags
;
2531 /* Unary minus operator, increment n_subexprs */
2534 /* we support only -(xxx) i.e. explicit parens required */
2536 if (*n_subexprs
> 3) {
2537 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2542 str
++; /* skip leading '-' */
2544 s
= strchr(str
, '(');
2552 s
= strrchr(str
, ')');
2554 /* unary minus not supported in sub-expressions */
2555 if (*(s
+1) != '\0') {
2556 hist_err(file
->tr
, HIST_ERR_UNARY_MINUS_SUBEXPR
,
2564 ret
= -EINVAL
; /* no closing ')' */
2568 flags
|= HIST_FIELD_FL_EXPR
;
2569 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2576 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, n_subexprs
);
2577 if (IS_ERR(operand1
)) {
2578 ret
= PTR_ERR(operand1
);
2581 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2582 /* String type can not be the operand of unary operator. */
2583 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2584 destroy_hist_field(operand1
, 0);
2589 expr
->flags
|= operand1
->flags
&
2590 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2591 expr
->fn_num
= HIST_FIELD_FN_UMINUS
;
2592 expr
->operands
[0] = operand1
;
2593 expr
->size
= operand1
->size
;
2594 expr
->is_signed
= operand1
->is_signed
;
2595 expr
->operator = FIELD_OP_UNARY_MINUS
;
2596 expr
->name
= expr_str(expr
, 0);
2597 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2605 destroy_hist_field(expr
, 0);
2606 return ERR_PTR(ret
);
2610 * If the operands are var refs, return pointers the
2611 * variable(s) referenced in var1 and var2, else NULL.
2613 static int check_expr_operands(struct trace_array
*tr
,
2614 struct hist_field
*operand1
,
2615 struct hist_field
*operand2
,
2616 struct hist_field
**var1
,
2617 struct hist_field
**var2
)
2619 unsigned long operand1_flags
= operand1
->flags
;
2620 unsigned long operand2_flags
= operand2
->flags
;
2622 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2623 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2624 struct hist_field
*var
;
2626 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2629 operand1_flags
= var
->flags
;
2633 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2634 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2635 struct hist_field
*var
;
2637 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2640 operand2_flags
= var
->flags
;
2644 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2645 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2646 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
2653 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2654 struct trace_event_file
*file
,
2655 char *str
, unsigned long flags
,
2656 char *var_name
, unsigned int *n_subexprs
)
2658 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2659 struct hist_field
*var1
= NULL
, *var2
= NULL
;
2660 unsigned long operand_flags
, operand2_flags
;
2661 int field_op
, ret
= -EINVAL
;
2662 char *sep
, *operand1_str
;
2663 enum hist_field_fn op_fn
;
2664 bool combine_consts
;
2666 if (*n_subexprs
> 3) {
2667 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2668 return ERR_PTR(-EINVAL
);
2671 field_op
= contains_operator(str
, &sep
);
2673 if (field_op
== FIELD_OP_NONE
)
2674 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2676 if (field_op
== FIELD_OP_UNARY_MINUS
)
2677 return parse_unary(hist_data
, file
, str
, flags
, var_name
, n_subexprs
);
2679 /* Binary operator found, increment n_subexprs */
2682 /* Split the expression string at the root operator */
2684 return ERR_PTR(-EINVAL
);
2690 /* Binary operator requires both operands */
2691 if (*operand1_str
== '\0' || *str
== '\0')
2692 return ERR_PTR(-EINVAL
);
2696 /* LHS of string is an expression e.g. a+b in a+b+c */
2697 operand1
= parse_expr(hist_data
, file
, operand1_str
, operand_flags
, NULL
, n_subexprs
);
2698 if (IS_ERR(operand1
))
2699 return ERR_CAST(operand1
);
2701 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2702 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(operand1_str
));
2707 /* RHS of string is another expression e.g. c in a+b+c */
2709 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, n_subexprs
);
2710 if (IS_ERR(operand2
)) {
2711 ret
= PTR_ERR(operand2
);
2714 if (operand2
->flags
& HIST_FIELD_FL_STRING
) {
2715 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2721 case FIELD_OP_MINUS
:
2722 op_fn
= HIST_FIELD_FN_MINUS
;
2725 op_fn
= HIST_FIELD_FN_PLUS
;
2728 op_fn
= HIST_FIELD_FN_DIV
;
2731 op_fn
= HIST_FIELD_FN_MULT
;
2738 ret
= check_expr_operands(file
->tr
, operand1
, operand2
, &var1
, &var2
);
2742 operand_flags
= var1
? var1
->flags
: operand1
->flags
;
2743 operand2_flags
= var2
? var2
->flags
: operand2
->flags
;
2746 * If both operands are constant, the expression can be
2747 * collapsed to a single constant.
2749 combine_consts
= operand_flags
& operand2_flags
& HIST_FIELD_FL_CONST
;
2751 flags
|= combine_consts
? HIST_FIELD_FL_CONST
: HIST_FIELD_FL_EXPR
;
2753 flags
|= operand1
->flags
&
2754 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2756 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2762 operand1
->read_once
= true;
2763 operand2
->read_once
= true;
2765 /* The operands are now owned and free'd by 'expr' */
2766 expr
->operands
[0] = operand1
;
2767 expr
->operands
[1] = operand2
;
2769 if (field_op
== FIELD_OP_DIV
&&
2770 operand2_flags
& HIST_FIELD_FL_CONST
) {
2771 u64 divisor
= var2
? var2
->constant
: operand2
->constant
;
2774 hist_err(file
->tr
, HIST_ERR_DIVISION_BY_ZERO
, errpos(str
));
2780 * Copy the divisor here so we don't have to look it up
2781 * later if this is a var ref
2783 operand2
->constant
= divisor
;
2784 op_fn
= hist_field_get_div_fn(operand2
);
2787 expr
->fn_num
= op_fn
;
2789 if (combine_consts
) {
2791 expr
->operands
[0] = var1
;
2793 expr
->operands
[1] = var2
;
2795 expr
->constant
= hist_fn_call(expr
, NULL
, NULL
, NULL
, NULL
);
2796 expr
->fn_num
= HIST_FIELD_FN_CONST
;
2798 expr
->operands
[0] = NULL
;
2799 expr
->operands
[1] = NULL
;
2802 * var refs won't be destroyed immediately
2803 * See: destroy_hist_field()
2805 destroy_hist_field(operand2
, 0);
2806 destroy_hist_field(operand1
, 0);
2808 expr
->name
= expr_str(expr
, 0);
2810 /* The operand sizes should be the same, so just pick one */
2811 expr
->size
= operand1
->size
;
2812 expr
->is_signed
= operand1
->is_signed
;
2814 expr
->operator = field_op
;
2815 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2821 expr
->name
= expr_str(expr
, 0);
2827 destroy_hist_field(operand2
, 0);
2829 destroy_hist_field(operand1
, 0);
2830 return ERR_PTR(ret
);
2833 destroy_hist_field(expr
, 0);
2834 return ERR_PTR(ret
);
2837 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
2838 struct trace_event_file
*file
)
2840 struct event_trigger_data
*test
;
2842 lockdep_assert_held(&event_mutex
);
2844 list_for_each_entry(test
, &file
->triggers
, list
) {
2845 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2846 if (test
->private_data
== hist_data
)
2847 return test
->filter_str
;
2854 static struct event_command trigger_hist_cmd
;
2855 static int event_hist_trigger_parse(struct event_command
*cmd_ops
,
2856 struct trace_event_file
*file
,
2857 char *glob
, char *cmd
,
2858 char *param_and_filter
);
2860 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
2861 struct hist_trigger_data
*hist_data
,
2862 unsigned int n_keys
)
2864 struct hist_field
*target_hist_field
, *hist_field
;
2865 unsigned int n
, i
, j
;
2867 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
2870 i
= hist_data
->n_vals
;
2871 j
= target_hist_data
->n_vals
;
2873 for (n
= 0; n
< n_keys
; n
++) {
2874 hist_field
= hist_data
->fields
[i
+ n
];
2875 target_hist_field
= target_hist_data
->fields
[j
+ n
];
2877 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
2879 if (hist_field
->size
!= target_hist_field
->size
)
2881 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
2888 static struct hist_trigger_data
*
2889 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
2890 struct trace_event_file
*file
)
2892 struct hist_trigger_data
*hist_data
;
2893 struct event_trigger_data
*test
;
2894 unsigned int n_keys
;
2896 lockdep_assert_held(&event_mutex
);
2898 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
2900 list_for_each_entry(test
, &file
->triggers
, list
) {
2901 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2902 hist_data
= test
->private_data
;
2904 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
2912 static struct trace_event_file
*event_file(struct trace_array
*tr
,
2913 char *system
, char *event_name
)
2915 struct trace_event_file
*file
;
2917 file
= __find_event_file(tr
, system
, event_name
);
2919 return ERR_PTR(-EINVAL
);
2924 static struct hist_field
*
2925 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
2926 char *system
, char *event_name
, char *field_name
)
2928 struct hist_field
*event_var
;
2929 char *synthetic_name
;
2931 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2932 if (!synthetic_name
)
2933 return ERR_PTR(-ENOMEM
);
2935 strcpy(synthetic_name
, "synthetic_");
2936 strcat(synthetic_name
, field_name
);
2938 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
2940 kfree(synthetic_name
);
2946 * create_field_var_hist - Automatically create a histogram and var for a field
2947 * @target_hist_data: The target hist trigger
2948 * @subsys_name: Optional subsystem name
2949 * @event_name: Optional event name
2950 * @field_name: The name of the field (and the resulting variable)
2952 * Hist trigger actions fetch data from variables, not directly from
2953 * events. However, for convenience, users are allowed to directly
2954 * specify an event field in an action, which will be automatically
2955 * converted into a variable on their behalf.
2957 * If a user specifies a field on an event that isn't the event the
2958 * histogram currently being defined (the target event histogram), the
2959 * only way that can be accomplished is if a new hist trigger is
2960 * created and the field variable defined on that.
2962 * This function creates a new histogram compatible with the target
2963 * event (meaning a histogram with the same key as the target
2964 * histogram), and creates a variable for the specified field, but
2965 * with 'synthetic_' prepended to the variable name in order to avoid
2966 * collision with normal field variables.
2968 * Return: The variable created for the field.
2970 static struct hist_field
*
2971 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
2972 char *subsys_name
, char *event_name
, char *field_name
)
2974 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
2975 struct hist_trigger_data
*hist_data
;
2976 unsigned int i
, n
, first
= true;
2977 struct field_var_hist
*var_hist
;
2978 struct trace_event_file
*file
;
2979 struct hist_field
*key_field
;
2980 struct hist_field
*event_var
;
2985 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
2986 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
2987 return ERR_PTR(-EINVAL
);
2990 file
= event_file(tr
, subsys_name
, event_name
);
2993 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
2994 ret
= PTR_ERR(file
);
2995 return ERR_PTR(ret
);
2999 * Look for a histogram compatible with target. We'll use the
3000 * found histogram specification to create a new matching
3001 * histogram with our variable on it. target_hist_data is not
3002 * yet a registered histogram so we can't use that.
3004 hist_data
= find_compatible_hist(target_hist_data
, file
);
3006 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
3007 return ERR_PTR(-EINVAL
);
3010 /* See if a synthetic field variable has already been created */
3011 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3012 event_name
, field_name
);
3013 if (!IS_ERR_OR_NULL(event_var
))
3016 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
3018 return ERR_PTR(-ENOMEM
);
3020 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3023 return ERR_PTR(-ENOMEM
);
3026 /* Use the same keys as the compatible histogram */
3027 strcat(cmd
, "keys=");
3029 for_each_hist_key_field(i
, hist_data
) {
3030 key_field
= hist_data
->fields
[i
];
3033 strcat(cmd
, key_field
->field
->name
);
3037 /* Create the synthetic field variable specification */
3038 strcat(cmd
, ":synthetic_");
3039 strcat(cmd
, field_name
);
3041 strcat(cmd
, field_name
);
3043 /* Use the same filter as the compatible histogram */
3044 saved_filter
= find_trigger_filter(hist_data
, file
);
3046 strcat(cmd
, " if ");
3047 strcat(cmd
, saved_filter
);
3050 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
3051 if (!var_hist
->cmd
) {
3054 return ERR_PTR(-ENOMEM
);
3057 /* Save the compatible histogram information */
3058 var_hist
->hist_data
= hist_data
;
3060 /* Create the new histogram with our variable */
3061 ret
= event_hist_trigger_parse(&trigger_hist_cmd
, file
,
3065 kfree(var_hist
->cmd
);
3067 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
3068 return ERR_PTR(ret
);
3073 /* If we can't find the variable, something went wrong */
3074 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3075 event_name
, field_name
);
3076 if (IS_ERR_OR_NULL(event_var
)) {
3077 kfree(var_hist
->cmd
);
3079 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
3080 return ERR_PTR(-EINVAL
);
3083 n
= target_hist_data
->n_field_var_hists
;
3084 target_hist_data
->field_var_hists
[n
] = var_hist
;
3085 target_hist_data
->n_field_var_hists
++;
3090 static struct hist_field
*
3091 find_target_event_var(struct hist_trigger_data
*hist_data
,
3092 char *subsys_name
, char *event_name
, char *var_name
)
3094 struct trace_event_file
*file
= hist_data
->event_file
;
3095 struct hist_field
*hist_field
= NULL
;
3098 struct trace_event_call
*call
;
3103 call
= file
->event_call
;
3105 if (strcmp(subsys_name
, call
->class->system
) != 0)
3108 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3112 hist_field
= find_var_field(hist_data
, var_name
);
3117 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
3118 struct trace_buffer
*buffer
,
3119 struct ring_buffer_event
*rbe
,
3121 struct field_var
**field_vars
,
3122 unsigned int n_field_vars
,
3123 unsigned int field_var_str_start
)
3125 struct hist_elt_data
*elt_data
= elt
->private_data
;
3126 unsigned int i
, j
, var_idx
;
3129 /* Make sure stacktrace can fit in the string variable length */
3130 BUILD_BUG_ON((HIST_STACKTRACE_DEPTH
+ 1) * sizeof(long) >= STR_VAR_LEN_MAX
);
3132 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
3133 struct field_var
*field_var
= field_vars
[i
];
3134 struct hist_field
*var
= field_var
->var
;
3135 struct hist_field
*val
= field_var
->val
;
3137 var_val
= hist_fn_call(val
, elt
, buffer
, rbe
, rec
);
3138 var_idx
= var
->var
.idx
;
3140 if (val
->flags
& (HIST_FIELD_FL_STRING
|
3141 HIST_FIELD_FL_STACKTRACE
)) {
3142 char *str
= elt_data
->field_var_str
[j
++];
3143 char *val_str
= (char *)(uintptr_t)var_val
;
3146 if (val
->flags
& HIST_FIELD_FL_STRING
) {
3147 size
= min(val
->size
, STR_VAR_LEN_MAX
);
3148 strscpy(str
, val_str
, size
);
3150 char *stack_start
= str
+ sizeof(unsigned long);
3153 e
= stack_trace_save((void *)stack_start
,
3154 HIST_STACKTRACE_DEPTH
,
3155 HIST_STACKTRACE_SKIP
);
3156 if (e
< HIST_STACKTRACE_DEPTH
- 1)
3157 ((unsigned long *)stack_start
)[e
] = 0;
3158 *((unsigned long *)str
) = e
;
3160 var_val
= (u64
)(uintptr_t)str
;
3162 tracing_map_set_var(elt
, var_idx
, var_val
);
3166 static void update_field_vars(struct hist_trigger_data
*hist_data
,
3167 struct tracing_map_elt
*elt
,
3168 struct trace_buffer
*buffer
,
3169 struct ring_buffer_event
*rbe
,
3172 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->field_vars
,
3173 hist_data
->n_field_vars
, 0);
3176 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
3177 struct tracing_map_elt
*elt
,
3178 struct trace_buffer
*buffer
, void *rec
,
3179 struct ring_buffer_event
*rbe
, void *key
,
3180 struct action_data
*data
, u64
*var_ref_vals
)
3182 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->save_vars
,
3183 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
3186 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
3187 struct trace_event_file
*file
,
3188 char *name
, int size
, const char *type
)
3190 struct hist_field
*var
;
3193 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
3194 var
= ERR_PTR(-EINVAL
);
3198 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3200 var
= ERR_PTR(-ENOMEM
);
3204 idx
= tracing_map_add_var(hist_data
->map
);
3207 var
= ERR_PTR(-EINVAL
);
3212 var
->flags
= HIST_FIELD_FL_VAR
;
3214 var
->var
.hist_data
= var
->hist_data
= hist_data
;
3216 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
3217 var
->type
= kstrdup_const(type
, GFP_KERNEL
);
3218 if (!var
->var
.name
|| !var
->type
) {
3219 kfree_const(var
->type
);
3220 kfree(var
->var
.name
);
3222 var
= ERR_PTR(-ENOMEM
);
3228 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
3229 struct trace_event_file
*file
,
3232 struct hist_field
*val
= NULL
, *var
= NULL
;
3233 unsigned long flags
= HIST_FIELD_FL_VAR
;
3234 struct trace_array
*tr
= file
->tr
;
3235 struct field_var
*field_var
;
3238 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
3239 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
3244 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
3246 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
3251 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
3253 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
3259 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
3267 field_var
->var
= var
;
3268 field_var
->val
= val
;
3272 field_var
= ERR_PTR(ret
);
3277 * create_target_field_var - Automatically create a variable for a field
3278 * @target_hist_data: The target hist trigger
3279 * @subsys_name: Optional subsystem name
3280 * @event_name: Optional event name
3281 * @var_name: The name of the field (and the resulting variable)
3283 * Hist trigger actions fetch data from variables, not directly from
3284 * events. However, for convenience, users are allowed to directly
3285 * specify an event field in an action, which will be automatically
3286 * converted into a variable on their behalf.
3288 * This function creates a field variable with the name var_name on
3289 * the hist trigger currently being defined on the target event. If
3290 * subsys_name and event_name are specified, this function simply
3291 * verifies that they do in fact match the target event subsystem and
3294 * Return: The variable created for the field.
3296 static struct field_var
*
3297 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
3298 char *subsys_name
, char *event_name
, char *var_name
)
3300 struct trace_event_file
*file
= target_hist_data
->event_file
;
3303 struct trace_event_call
*call
;
3308 call
= file
->event_call
;
3310 if (strcmp(subsys_name
, call
->class->system
) != 0)
3313 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3317 return create_field_var(target_hist_data
, file
, var_name
);
3320 static bool check_track_val_max(u64 track_val
, u64 var_val
)
3322 if (var_val
<= track_val
)
3328 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
3330 if (var_val
== track_val
)
3336 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
3337 struct tracing_map_elt
*elt
,
3338 struct action_data
*data
)
3340 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3343 track_val
= tracing_map_read_var(elt
, track_var_idx
);
3348 static void save_track_val(struct hist_trigger_data
*hist_data
,
3349 struct tracing_map_elt
*elt
,
3350 struct action_data
*data
, u64 var_val
)
3352 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3354 tracing_map_set_var(elt
, track_var_idx
, var_val
);
3357 static void save_track_data(struct hist_trigger_data
*hist_data
,
3358 struct tracing_map_elt
*elt
,
3359 struct trace_buffer
*buffer
, void *rec
,
3360 struct ring_buffer_event
*rbe
, void *key
,
3361 struct action_data
*data
, u64
*var_ref_vals
)
3363 if (data
->track_data
.save_data
)
3364 data
->track_data
.save_data(hist_data
, elt
, buffer
, rec
, rbe
,
3365 key
, data
, var_ref_vals
);
3368 static bool check_track_val(struct tracing_map_elt
*elt
,
3369 struct action_data
*data
,
3372 struct hist_trigger_data
*hist_data
;
3375 hist_data
= data
->track_data
.track_var
->hist_data
;
3376 track_val
= get_track_val(hist_data
, elt
, data
);
3378 return data
->track_data
.check_val(track_val
, var_val
);
3381 #ifdef CONFIG_TRACER_SNAPSHOT
3382 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3384 /* called with tr->max_lock held */
3385 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
3386 struct hist_elt_data
*elt_data
, *track_elt_data
;
3387 struct snapshot_context
*context
= cond_data
;
3388 struct action_data
*action
;
3394 action
= track_data
->action_data
;
3396 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
3397 track_data
->action_data
);
3399 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
3402 track_data
->track_val
= track_val
;
3403 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
3405 elt_data
= context
->elt
->private_data
;
3406 track_elt_data
= track_data
->elt
.private_data
;
3408 strncpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
3410 track_data
->updated
= true;
3415 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3416 struct tracing_map_elt
*elt
,
3417 struct trace_buffer
*buffer
, void *rec
,
3418 struct ring_buffer_event
*rbe
, void *key
,
3419 struct action_data
*data
,
3422 struct trace_event_file
*file
= hist_data
->event_file
;
3423 struct snapshot_context context
;
3428 tracing_snapshot_cond(file
->tr
, &context
);
3431 static void hist_trigger_print_key(struct seq_file
*m
,
3432 struct hist_trigger_data
*hist_data
,
3434 struct tracing_map_elt
*elt
);
3436 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
3440 if (!hist_data
->n_actions
)
3443 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
3444 struct action_data
*data
= hist_data
->actions
[i
];
3446 if (data
->action
== ACTION_SNAPSHOT
)
3453 static void track_data_snapshot_print(struct seq_file
*m
,
3454 struct hist_trigger_data
*hist_data
)
3456 struct trace_event_file
*file
= hist_data
->event_file
;
3457 struct track_data
*track_data
;
3458 struct action_data
*action
;
3460 track_data
= tracing_cond_snapshot_data(file
->tr
);
3464 if (!track_data
->updated
)
3467 action
= snapshot_action(hist_data
);
3471 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3472 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
3473 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
3474 action
->track_data
.var_str
, track_data
->track_val
);
3476 seq_puts(m
, "\ttriggered by event with key: ");
3477 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
3481 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3485 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3486 struct tracing_map_elt
*elt
,
3487 struct trace_buffer
*buffer
, void *rec
,
3488 struct ring_buffer_event
*rbe
, void *key
,
3489 struct action_data
*data
,
3490 u64
*var_ref_vals
) {}
3491 static void track_data_snapshot_print(struct seq_file
*m
,
3492 struct hist_trigger_data
*hist_data
) {}
3493 #endif /* CONFIG_TRACER_SNAPSHOT */
3495 static void track_data_print(struct seq_file
*m
,
3496 struct hist_trigger_data
*hist_data
,
3497 struct tracing_map_elt
*elt
,
3498 struct action_data
*data
)
3500 u64 track_val
= get_track_val(hist_data
, elt
, data
);
3501 unsigned int i
, save_var_idx
;
3503 if (data
->handler
== HANDLER_ONMAX
)
3504 seq_printf(m
, "\n\tmax: %10llu", track_val
);
3505 else if (data
->handler
== HANDLER_ONCHANGE
)
3506 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
3508 if (data
->action
== ACTION_SNAPSHOT
)
3511 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
3512 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
3513 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
3516 save_var_idx
= save_var
->var
.idx
;
3518 val
= tracing_map_read_var(elt
, save_var_idx
);
3520 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
3521 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
3522 (char *)(uintptr_t)(val
));
3524 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
3528 static void ontrack_action(struct hist_trigger_data
*hist_data
,
3529 struct tracing_map_elt
*elt
,
3530 struct trace_buffer
*buffer
, void *rec
,
3531 struct ring_buffer_event
*rbe
, void *key
,
3532 struct action_data
*data
, u64
*var_ref_vals
)
3534 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
3536 if (check_track_val(elt
, data
, var_val
)) {
3537 save_track_val(hist_data
, elt
, data
, var_val
);
3538 save_track_data(hist_data
, elt
, buffer
, rec
, rbe
,
3539 key
, data
, var_ref_vals
);
3543 static void action_data_destroy(struct action_data
*data
)
3547 lockdep_assert_held(&event_mutex
);
3549 kfree(data
->action_name
);
3551 for (i
= 0; i
< data
->n_params
; i
++)
3552 kfree(data
->params
[i
]);
3554 if (data
->synth_event
)
3555 data
->synth_event
->ref
--;
3557 kfree(data
->synth_event_name
);
3562 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
3563 struct action_data
*data
)
3565 struct trace_event_file
*file
= hist_data
->event_file
;
3567 destroy_hist_field(data
->track_data
.track_var
, 0);
3569 if (data
->action
== ACTION_SNAPSHOT
) {
3570 struct track_data
*track_data
;
3572 track_data
= tracing_cond_snapshot_data(file
->tr
);
3573 if (track_data
&& track_data
->hist_data
== hist_data
) {
3574 tracing_snapshot_cond_disable(file
->tr
);
3575 track_data_free(track_data
);
3579 kfree(data
->track_data
.var_str
);
3581 action_data_destroy(data
);
3584 static int action_create(struct hist_trigger_data
*hist_data
,
3585 struct action_data
*data
);
3587 static int track_data_create(struct hist_trigger_data
*hist_data
,
3588 struct action_data
*data
)
3590 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
3591 struct trace_event_file
*file
= hist_data
->event_file
;
3592 struct trace_array
*tr
= file
->tr
;
3593 char *track_data_var_str
;
3596 track_data_var_str
= data
->track_data
.var_str
;
3597 if (track_data_var_str
[0] != '$') {
3598 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
3601 track_data_var_str
++;
3603 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
3605 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
3609 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
3613 data
->track_data
.var_ref
= ref_field
;
3615 if (data
->handler
== HANDLER_ONMAX
)
3616 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
3617 if (IS_ERR(track_var
)) {
3618 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3619 ret
= PTR_ERR(track_var
);
3623 if (data
->handler
== HANDLER_ONCHANGE
)
3624 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
3625 if (IS_ERR(track_var
)) {
3626 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3627 ret
= PTR_ERR(track_var
);
3630 data
->track_data
.track_var
= track_var
;
3632 ret
= action_create(hist_data
, data
);
3637 static int parse_action_params(struct trace_array
*tr
, char *params
,
3638 struct action_data
*data
)
3640 char *param
, *saved_param
;
3641 bool first_param
= true;
3645 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
3646 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
3651 param
= strsep(¶ms
, ",");
3653 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
3658 param
= strstrip(param
);
3659 if (strlen(param
) < 2) {
3660 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
3665 saved_param
= kstrdup(param
, GFP_KERNEL
);
3671 if (first_param
&& data
->use_trace_keyword
) {
3672 data
->synth_event_name
= saved_param
;
3673 first_param
= false;
3676 first_param
= false;
3678 data
->params
[data
->n_params
++] = saved_param
;
3684 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
3685 enum handler_id handler
)
3692 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3697 action_name
= strsep(&str
, "(");
3698 if (!action_name
|| !str
) {
3699 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3704 if (str_has_prefix(action_name
, "save")) {
3705 char *params
= strsep(&str
, ")");
3708 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
3713 ret
= parse_action_params(tr
, params
, data
);
3717 if (handler
== HANDLER_ONMAX
)
3718 data
->track_data
.check_val
= check_track_val_max
;
3719 else if (handler
== HANDLER_ONCHANGE
)
3720 data
->track_data
.check_val
= check_track_val_changed
;
3722 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3727 data
->track_data
.save_data
= save_track_data_vars
;
3728 data
->fn
= ontrack_action
;
3729 data
->action
= ACTION_SAVE
;
3730 } else if (str_has_prefix(action_name
, "snapshot")) {
3731 char *params
= strsep(&str
, ")");
3734 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
3739 if (handler
== HANDLER_ONMAX
)
3740 data
->track_data
.check_val
= check_track_val_max
;
3741 else if (handler
== HANDLER_ONCHANGE
)
3742 data
->track_data
.check_val
= check_track_val_changed
;
3744 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3749 data
->track_data
.save_data
= save_track_data_snapshot
;
3750 data
->fn
= ontrack_action
;
3751 data
->action
= ACTION_SNAPSHOT
;
3753 char *params
= strsep(&str
, ")");
3755 if (str_has_prefix(action_name
, "trace"))
3756 data
->use_trace_keyword
= true;
3759 ret
= parse_action_params(tr
, params
, data
);
3764 if (handler
== HANDLER_ONMAX
)
3765 data
->track_data
.check_val
= check_track_val_max
;
3766 else if (handler
== HANDLER_ONCHANGE
)
3767 data
->track_data
.check_val
= check_track_val_changed
;
3769 if (handler
!= HANDLER_ONMATCH
) {
3770 data
->track_data
.save_data
= action_trace
;
3771 data
->fn
= ontrack_action
;
3773 data
->fn
= action_trace
;
3775 data
->action
= ACTION_TRACE
;
3778 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
3779 if (!data
->action_name
) {
3784 data
->handler
= handler
;
3789 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
3790 char *str
, enum handler_id handler
)
3792 struct action_data
*data
;
3796 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3798 return ERR_PTR(-ENOMEM
);
3800 var_str
= strsep(&str
, ")");
3801 if (!var_str
|| !str
) {
3806 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
3807 if (!data
->track_data
.var_str
) {
3812 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
3818 track_data_destroy(hist_data
, data
);
3819 data
= ERR_PTR(ret
);
3823 static void onmatch_destroy(struct action_data
*data
)
3825 kfree(data
->match_data
.event
);
3826 kfree(data
->match_data
.event_system
);
3828 action_data_destroy(data
);
3831 static void destroy_field_var(struct field_var
*field_var
)
3836 destroy_hist_field(field_var
->var
, 0);
3837 destroy_hist_field(field_var
->val
, 0);
3842 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
3846 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
3847 destroy_field_var(hist_data
->field_vars
[i
]);
3849 for (i
= 0; i
< hist_data
->n_save_vars
; i
++)
3850 destroy_field_var(hist_data
->save_vars
[i
]);
3853 static void save_field_var(struct hist_trigger_data
*hist_data
,
3854 struct field_var
*field_var
)
3856 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
3858 /* Stack traces are saved in the string storage too */
3859 if (field_var
->val
->flags
& (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
3860 hist_data
->n_field_var_str
++;
3864 static int check_synth_field(struct synth_event
*event
,
3865 struct hist_field
*hist_field
,
3866 unsigned int field_pos
)
3868 struct synth_field
*field
;
3870 if (field_pos
>= event
->n_fields
)
3873 field
= event
->fields
[field_pos
];
3876 * A dynamic string synth field can accept static or
3877 * dynamic. A static string synth field can only accept a
3878 * same-sized static string, which is checked for later.
3880 if (strstr(hist_field
->type
, "char[") && field
->is_string
3881 && field
->is_dynamic
)
3884 if (strstr(hist_field
->type
, "long[") && field
->is_stack
)
3887 if (strcmp(field
->type
, hist_field
->type
) != 0) {
3888 if (field
->size
!= hist_field
->size
||
3889 (!field
->is_string
&& field
->is_signed
!= hist_field
->is_signed
))
3896 static struct hist_field
*
3897 trace_action_find_var(struct hist_trigger_data
*hist_data
,
3898 struct action_data
*data
,
3899 char *system
, char *event
, char *var
)
3901 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3902 struct hist_field
*hist_field
;
3904 var
++; /* skip '$' */
3906 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
3908 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3909 system
= data
->match_data
.event_system
;
3910 event
= data
->match_data
.event
;
3913 hist_field
= find_event_var(hist_data
, system
, event
, var
);
3917 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
3922 static struct hist_field
*
3923 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
3924 struct action_data
*data
, char *system
,
3925 char *event
, char *var
)
3927 struct hist_field
*hist_field
= NULL
;
3928 struct field_var
*field_var
;
3931 * First try to create a field var on the target event (the
3932 * currently being defined). This will create a variable for
3933 * unqualified fields on the target event, or if qualified,
3934 * target fields that have qualified names matching the target.
3936 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
3938 if (field_var
&& !IS_ERR(field_var
)) {
3939 save_field_var(hist_data
, field_var
);
3940 hist_field
= field_var
->var
;
3944 * If no explicit system.event is specified, default to
3945 * looking for fields on the onmatch(system.event.xxx)
3948 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3949 system
= data
->match_data
.event_system
;
3950 event
= data
->match_data
.event
;
3956 * At this point, we're looking at a field on another
3957 * event. Because we can't modify a hist trigger on
3958 * another event to add a variable for a field, we need
3959 * to create a new trigger on that event and create the
3960 * variable at the same time.
3962 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
3963 if (IS_ERR(hist_field
))
3969 destroy_field_var(field_var
);
3974 static int trace_action_create(struct hist_trigger_data
*hist_data
,
3975 struct action_data
*data
)
3977 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3978 char *event_name
, *param
, *system
= NULL
;
3979 struct hist_field
*hist_field
, *var_ref
;
3981 unsigned int field_pos
= 0;
3982 struct synth_event
*event
;
3983 char *synth_event_name
;
3984 int var_ref_idx
, ret
= 0;
3986 lockdep_assert_held(&event_mutex
);
3988 /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
3989 if (data
->n_params
> SYNTH_FIELDS_MAX
)
3992 if (data
->use_trace_keyword
)
3993 synth_event_name
= data
->synth_event_name
;
3995 synth_event_name
= data
->action_name
;
3997 event
= find_synth_event(synth_event_name
);
3999 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
4005 for (i
= 0; i
< data
->n_params
; i
++) {
4008 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4014 system
= strsep(¶m
, ".");
4016 param
= (char *)system
;
4017 system
= event_name
= NULL
;
4019 event_name
= strsep(¶m
, ".");
4027 if (param
[0] == '$')
4028 hist_field
= trace_action_find_var(hist_data
, data
,
4032 hist_field
= trace_action_create_field_var(hist_data
,
4044 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
4045 var_ref
= create_var_ref(hist_data
, hist_field
,
4046 system
, event_name
);
4053 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
4054 if (WARN_ON(var_ref_idx
< 0)) {
4060 data
->var_ref_idx
[i
] = var_ref_idx
;
4067 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
4073 if (field_pos
!= event
->n_fields
) {
4074 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
4079 data
->synth_event
= event
;
4088 static int action_create(struct hist_trigger_data
*hist_data
,
4089 struct action_data
*data
)
4091 struct trace_event_file
*file
= hist_data
->event_file
;
4092 struct trace_array
*tr
= file
->tr
;
4093 struct track_data
*track_data
;
4094 struct field_var
*field_var
;
4099 if (data
->action
== ACTION_TRACE
)
4100 return trace_action_create(hist_data
, data
);
4102 if (data
->action
== ACTION_SNAPSHOT
) {
4103 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
4104 if (IS_ERR(track_data
)) {
4105 ret
= PTR_ERR(track_data
);
4109 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
4110 cond_snapshot_update
);
4112 track_data_free(track_data
);
4117 if (data
->action
== ACTION_SAVE
) {
4118 if (hist_data
->n_save_vars
) {
4120 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
4124 for (i
= 0; i
< data
->n_params
; i
++) {
4125 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4131 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
4132 if (IS_ERR(field_var
)) {
4133 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
4135 ret
= PTR_ERR(field_var
);
4140 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
4141 if (field_var
->val
->flags
&
4142 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
4143 hist_data
->n_save_var_str
++;
4151 static int onmatch_create(struct hist_trigger_data
*hist_data
,
4152 struct action_data
*data
)
4154 return action_create(hist_data
, data
);
4157 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
4159 char *match_event
, *match_event_system
;
4160 struct action_data
*data
;
4163 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
4165 return ERR_PTR(-ENOMEM
);
4167 match_event
= strsep(&str
, ")");
4168 if (!match_event
|| !str
) {
4169 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
4173 match_event_system
= strsep(&match_event
, ".");
4175 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
4179 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
4180 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
4184 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
4185 if (!data
->match_data
.event
) {
4190 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
4191 if (!data
->match_data
.event_system
) {
4196 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
4202 onmatch_destroy(data
);
4203 data
= ERR_PTR(ret
);
4207 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
4209 hist_data
->fields
[HITCOUNT_IDX
] =
4210 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
4211 if (!hist_data
->fields
[HITCOUNT_IDX
])
4214 hist_data
->n_vals
++;
4215 hist_data
->n_fields
++;
4217 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
4223 static int __create_val_field(struct hist_trigger_data
*hist_data
,
4224 unsigned int val_idx
,
4225 struct trace_event_file
*file
,
4226 char *var_name
, char *field_str
,
4227 unsigned long flags
)
4229 struct hist_field
*hist_field
;
4230 int ret
= 0, n_subexprs
= 0;
4232 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, &n_subexprs
);
4233 if (IS_ERR(hist_field
)) {
4234 ret
= PTR_ERR(hist_field
);
4238 /* values and variables should not have some modifiers */
4239 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4241 if (hist_field
->flags
& (HIST_FIELD_FL_GRAPH
| HIST_FIELD_FL_PERCENT
|
4242 HIST_FIELD_FL_BUCKET
| HIST_FIELD_FL_LOG2
))
4246 if (hist_field
->flags
& (HIST_FIELD_FL_GRAPH
| HIST_FIELD_FL_PERCENT
|
4247 HIST_FIELD_FL_BUCKET
| HIST_FIELD_FL_LOG2
|
4248 HIST_FIELD_FL_SYM
| HIST_FIELD_FL_SYM_OFFSET
|
4249 HIST_FIELD_FL_SYSCALL
| HIST_FIELD_FL_STACKTRACE
))
4253 hist_data
->fields
[val_idx
] = hist_field
;
4255 ++hist_data
->n_vals
;
4256 ++hist_data
->n_fields
;
4258 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4263 hist_err(file
->tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(field_str
));
4267 static int create_val_field(struct hist_trigger_data
*hist_data
,
4268 unsigned int val_idx
,
4269 struct trace_event_file
*file
,
4272 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
4275 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
4278 static const char no_comm
[] = "(no comm)";
4280 static u64
hist_field_execname(struct hist_field
*hist_field
,
4281 struct tracing_map_elt
*elt
,
4282 struct trace_buffer
*buffer
,
4283 struct ring_buffer_event
*rbe
,
4286 struct hist_elt_data
*elt_data
;
4288 if (WARN_ON_ONCE(!elt
))
4289 return (u64
)(unsigned long)no_comm
;
4291 elt_data
= elt
->private_data
;
4293 if (WARN_ON_ONCE(!elt_data
->comm
))
4294 return (u64
)(unsigned long)no_comm
;
4296 return (u64
)(unsigned long)(elt_data
->comm
);
4299 static u64
hist_field_stack(struct hist_field
*hist_field
,
4300 struct tracing_map_elt
*elt
,
4301 struct trace_buffer
*buffer
,
4302 struct ring_buffer_event
*rbe
,
4305 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
4306 int str_loc
= str_item
& 0xffff;
4307 char *addr
= (char *)(event
+ str_loc
);
4309 return (u64
)(unsigned long)addr
;
4312 static u64
hist_fn_call(struct hist_field
*hist_field
,
4313 struct tracing_map_elt
*elt
,
4314 struct trace_buffer
*buffer
,
4315 struct ring_buffer_event
*rbe
,
4318 switch (hist_field
->fn_num
) {
4319 case HIST_FIELD_FN_VAR_REF
:
4320 return hist_field_var_ref(hist_field
, elt
, buffer
, rbe
, event
);
4321 case HIST_FIELD_FN_COUNTER
:
4322 return hist_field_counter(hist_field
, elt
, buffer
, rbe
, event
);
4323 case HIST_FIELD_FN_CONST
:
4324 return hist_field_const(hist_field
, elt
, buffer
, rbe
, event
);
4325 case HIST_FIELD_FN_LOG2
:
4326 return hist_field_log2(hist_field
, elt
, buffer
, rbe
, event
);
4327 case HIST_FIELD_FN_BUCKET
:
4328 return hist_field_bucket(hist_field
, elt
, buffer
, rbe
, event
);
4329 case HIST_FIELD_FN_TIMESTAMP
:
4330 return hist_field_timestamp(hist_field
, elt
, buffer
, rbe
, event
);
4331 case HIST_FIELD_FN_CPU
:
4332 return hist_field_cpu(hist_field
, elt
, buffer
, rbe
, event
);
4333 case HIST_FIELD_FN_STRING
:
4334 return hist_field_string(hist_field
, elt
, buffer
, rbe
, event
);
4335 case HIST_FIELD_FN_DYNSTRING
:
4336 return hist_field_dynstring(hist_field
, elt
, buffer
, rbe
, event
);
4337 case HIST_FIELD_FN_RELDYNSTRING
:
4338 return hist_field_reldynstring(hist_field
, elt
, buffer
, rbe
, event
);
4339 case HIST_FIELD_FN_PSTRING
:
4340 return hist_field_pstring(hist_field
, elt
, buffer
, rbe
, event
);
4341 case HIST_FIELD_FN_S64
:
4342 return hist_field_s64(hist_field
, elt
, buffer
, rbe
, event
);
4343 case HIST_FIELD_FN_U64
:
4344 return hist_field_u64(hist_field
, elt
, buffer
, rbe
, event
);
4345 case HIST_FIELD_FN_S32
:
4346 return hist_field_s32(hist_field
, elt
, buffer
, rbe
, event
);
4347 case HIST_FIELD_FN_U32
:
4348 return hist_field_u32(hist_field
, elt
, buffer
, rbe
, event
);
4349 case HIST_FIELD_FN_S16
:
4350 return hist_field_s16(hist_field
, elt
, buffer
, rbe
, event
);
4351 case HIST_FIELD_FN_U16
:
4352 return hist_field_u16(hist_field
, elt
, buffer
, rbe
, event
);
4353 case HIST_FIELD_FN_S8
:
4354 return hist_field_s8(hist_field
, elt
, buffer
, rbe
, event
);
4355 case HIST_FIELD_FN_U8
:
4356 return hist_field_u8(hist_field
, elt
, buffer
, rbe
, event
);
4357 case HIST_FIELD_FN_UMINUS
:
4358 return hist_field_unary_minus(hist_field
, elt
, buffer
, rbe
, event
);
4359 case HIST_FIELD_FN_MINUS
:
4360 return hist_field_minus(hist_field
, elt
, buffer
, rbe
, event
);
4361 case HIST_FIELD_FN_PLUS
:
4362 return hist_field_plus(hist_field
, elt
, buffer
, rbe
, event
);
4363 case HIST_FIELD_FN_DIV
:
4364 return hist_field_div(hist_field
, elt
, buffer
, rbe
, event
);
4365 case HIST_FIELD_FN_MULT
:
4366 return hist_field_mult(hist_field
, elt
, buffer
, rbe
, event
);
4367 case HIST_FIELD_FN_DIV_POWER2
:
4368 return div_by_power_of_two(hist_field
, elt
, buffer
, rbe
, event
);
4369 case HIST_FIELD_FN_DIV_NOT_POWER2
:
4370 return div_by_not_power_of_two(hist_field
, elt
, buffer
, rbe
, event
);
4371 case HIST_FIELD_FN_DIV_MULT_SHIFT
:
4372 return div_by_mult_and_shift(hist_field
, elt
, buffer
, rbe
, event
);
4373 case HIST_FIELD_FN_EXECNAME
:
4374 return hist_field_execname(hist_field
, elt
, buffer
, rbe
, event
);
4375 case HIST_FIELD_FN_STACK
:
4376 return hist_field_stack(hist_field
, elt
, buffer
, rbe
, event
);
4382 /* Convert a var that points to common_pid.execname to a string */
4383 static void update_var_execname(struct hist_field
*hist_field
)
4385 hist_field
->flags
= HIST_FIELD_FL_STRING
| HIST_FIELD_FL_VAR
|
4386 HIST_FIELD_FL_EXECNAME
;
4387 hist_field
->size
= MAX_FILTER_STR_VAL
;
4388 hist_field
->is_signed
= 0;
4390 kfree_const(hist_field
->type
);
4391 hist_field
->type
= "char[]";
4393 hist_field
->fn_num
= HIST_FIELD_FN_EXECNAME
;
4396 static int create_var_field(struct hist_trigger_data
*hist_data
,
4397 unsigned int val_idx
,
4398 struct trace_event_file
*file
,
4399 char *var_name
, char *expr_str
)
4401 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4402 unsigned long flags
= 0;
4405 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4408 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
4409 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
4413 flags
|= HIST_FIELD_FL_VAR
;
4414 hist_data
->n_vars
++;
4415 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
4418 ret
= __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
4420 if (!ret
&& hist_data
->fields
[val_idx
]->flags
& HIST_FIELD_FL_EXECNAME
)
4421 update_var_execname(hist_data
->fields
[val_idx
]);
4423 if (!ret
&& hist_data
->fields
[val_idx
]->flags
&
4424 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
4425 hist_data
->fields
[val_idx
]->var_str_idx
= hist_data
->n_var_str
++;
4430 static int create_val_fields(struct hist_trigger_data
*hist_data
,
4431 struct trace_event_file
*file
)
4433 unsigned int i
, j
= 1, n_hitcount
= 0;
4434 char *fields_str
, *field_str
;
4437 ret
= create_hitcount_val(hist_data
);
4441 fields_str
= hist_data
->attrs
->vals_str
;
4445 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
4446 j
< TRACING_MAP_VALS_MAX
; i
++) {
4447 field_str
= strsep(&fields_str
, ",");
4451 if (strcmp(field_str
, "hitcount") == 0) {
4456 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
4461 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
4464 /* There is only raw hitcount but nohitcount suppresses it. */
4465 if (j
== 1 && hist_data
->attrs
->no_hitcount
) {
4466 hist_err(hist_data
->event_file
->tr
, HIST_ERR_NEED_NOHC_VAL
, 0);
4473 static int create_key_field(struct hist_trigger_data
*hist_data
,
4474 unsigned int key_idx
,
4475 unsigned int key_offset
,
4476 struct trace_event_file
*file
,
4479 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4480 struct hist_field
*hist_field
= NULL
;
4481 unsigned long flags
= 0;
4482 unsigned int key_size
;
4483 int ret
= 0, n_subexprs
= 0;
4485 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
4488 flags
|= HIST_FIELD_FL_KEY
;
4490 if (strcmp(field_str
, "stacktrace") == 0) {
4491 flags
|= HIST_FIELD_FL_STACKTRACE
;
4492 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
4493 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
4495 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
4497 if (IS_ERR(hist_field
)) {
4498 ret
= PTR_ERR(hist_field
);
4502 if (field_has_hist_vars(hist_field
, 0)) {
4503 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
4504 destroy_hist_field(hist_field
, 0);
4509 key_size
= hist_field
->size
;
4512 hist_data
->fields
[key_idx
] = hist_field
;
4514 key_size
= ALIGN(key_size
, sizeof(u64
));
4515 hist_data
->fields
[key_idx
]->size
= key_size
;
4516 hist_data
->fields
[key_idx
]->offset
= key_offset
;
4518 hist_data
->key_size
+= key_size
;
4520 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
4525 hist_data
->n_keys
++;
4526 hist_data
->n_fields
++;
4528 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
4536 static int create_key_fields(struct hist_trigger_data
*hist_data
,
4537 struct trace_event_file
*file
)
4539 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
4540 char *fields_str
, *field_str
;
4543 fields_str
= hist_data
->attrs
->keys_str
;
4547 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
4548 field_str
= strsep(&fields_str
, ",");
4551 ret
= create_key_field(hist_data
, i
, key_offset
,
4566 static int create_var_fields(struct hist_trigger_data
*hist_data
,
4567 struct trace_event_file
*file
)
4569 unsigned int i
, j
= hist_data
->n_vals
;
4572 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
4574 for (i
= 0; i
< n_vars
; i
++) {
4575 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
4576 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
4578 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
4586 static void free_var_defs(struct hist_trigger_data
*hist_data
)
4590 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
4591 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
4592 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
4595 hist_data
->attrs
->var_defs
.n_vars
= 0;
4598 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
4600 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4601 char *s
, *str
, *var_name
, *field_str
;
4602 unsigned int i
, j
, n_vars
= 0;
4605 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
4606 str
= hist_data
->attrs
->assignment_str
[i
];
4607 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
4608 field_str
= strsep(&str
, ",");
4612 var_name
= strsep(&field_str
, "=");
4613 if (!var_name
|| !field_str
) {
4614 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
4620 if (n_vars
== TRACING_MAP_VARS_MAX
) {
4621 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
4626 s
= kstrdup(var_name
, GFP_KERNEL
);
4631 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
4633 s
= kstrdup(field_str
, GFP_KERNEL
);
4635 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
4636 hist_data
->attrs
->var_defs
.name
[n_vars
] = NULL
;
4640 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
4642 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
4648 free_var_defs(hist_data
);
4653 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
4654 struct trace_event_file
*file
)
4658 ret
= parse_var_defs(hist_data
);
4662 ret
= create_val_fields(hist_data
, file
);
4666 ret
= create_var_fields(hist_data
, file
);
4670 ret
= create_key_fields(hist_data
, file
);
4673 free_var_defs(hist_data
);
4678 static int is_descending(struct trace_array
*tr
, const char *str
)
4683 if (strcmp(str
, "descending") == 0)
4686 if (strcmp(str
, "ascending") == 0)
4689 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
4694 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
4696 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4697 char *fields_str
= hist_data
->attrs
->sort_key_str
;
4698 struct tracing_map_sort_key
*sort_key
;
4699 int descending
, ret
= 0;
4700 unsigned int i
, j
, k
;
4702 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
4707 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
4708 struct hist_field
*hist_field
;
4709 char *field_str
, *field_name
;
4710 const char *test_name
;
4712 sort_key
= &hist_data
->sort_keys
[i
];
4714 field_str
= strsep(&fields_str
, ",");
4720 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4724 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
4725 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
4730 field_name
= strsep(&field_str
, ".");
4731 if (!field_name
|| !*field_name
) {
4733 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4737 if (strcmp(field_name
, "hitcount") == 0) {
4738 descending
= is_descending(tr
, field_str
);
4739 if (descending
< 0) {
4743 sort_key
->descending
= descending
;
4747 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4750 hist_field
= hist_data
->fields
[j
];
4751 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4756 test_name
= hist_field_name(hist_field
, 0);
4758 if (strcmp(field_name
, test_name
) == 0) {
4759 sort_key
->field_idx
= idx
;
4760 descending
= is_descending(tr
, field_str
);
4761 if (descending
< 0) {
4765 sort_key
->descending
= descending
;
4769 if (j
== hist_data
->n_fields
) {
4771 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
4776 hist_data
->n_sort_keys
= i
;
4781 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4785 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4786 struct action_data
*data
= hist_data
->actions
[i
];
4788 if (data
->handler
== HANDLER_ONMATCH
)
4789 onmatch_destroy(data
);
4790 else if (data
->handler
== HANDLER_ONMAX
||
4791 data
->handler
== HANDLER_ONCHANGE
)
4792 track_data_destroy(hist_data
, data
);
4798 static int parse_actions(struct hist_trigger_data
*hist_data
)
4800 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4801 struct action_data
*data
;
4807 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4808 enum handler_id hid
= 0;
4811 str
= hist_data
->attrs
->action_str
[i
];
4813 if ((len
= str_has_prefix(str
, "onmatch(")))
4814 hid
= HANDLER_ONMATCH
;
4815 else if ((len
= str_has_prefix(str
, "onmax(")))
4816 hid
= HANDLER_ONMAX
;
4817 else if ((len
= str_has_prefix(str
, "onchange(")))
4818 hid
= HANDLER_ONCHANGE
;
4820 action_str
= str
+ len
;
4823 case HANDLER_ONMATCH
:
4824 data
= onmatch_parse(tr
, action_str
);
4827 case HANDLER_ONCHANGE
:
4828 data
= track_data_parse(hist_data
, action_str
, hid
);
4831 data
= ERR_PTR(-EINVAL
);
4836 ret
= PTR_ERR(data
);
4840 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4846 static int create_actions(struct hist_trigger_data
*hist_data
)
4848 struct action_data
*data
;
4852 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4853 data
= hist_data
->actions
[i
];
4855 if (data
->handler
== HANDLER_ONMATCH
) {
4856 ret
= onmatch_create(hist_data
, data
);
4859 } else if (data
->handler
== HANDLER_ONMAX
||
4860 data
->handler
== HANDLER_ONCHANGE
) {
4861 ret
= track_data_create(hist_data
, data
);
4873 static void print_actions(struct seq_file
*m
,
4874 struct hist_trigger_data
*hist_data
,
4875 struct tracing_map_elt
*elt
)
4879 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4880 struct action_data
*data
= hist_data
->actions
[i
];
4882 if (data
->action
== ACTION_SNAPSHOT
)
4885 if (data
->handler
== HANDLER_ONMAX
||
4886 data
->handler
== HANDLER_ONCHANGE
)
4887 track_data_print(m
, hist_data
, elt
, data
);
4891 static void print_action_spec(struct seq_file
*m
,
4892 struct hist_trigger_data
*hist_data
,
4893 struct action_data
*data
)
4897 if (data
->action
== ACTION_SAVE
) {
4898 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4899 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
4900 if (i
< hist_data
->n_save_vars
- 1)
4903 } else if (data
->action
== ACTION_TRACE
) {
4904 if (data
->use_trace_keyword
)
4905 seq_printf(m
, "%s", data
->synth_event_name
);
4906 for (i
= 0; i
< data
->n_params
; i
++) {
4907 if (i
|| data
->use_trace_keyword
)
4909 seq_printf(m
, "%s", data
->params
[i
]);
4914 static void print_track_data_spec(struct seq_file
*m
,
4915 struct hist_trigger_data
*hist_data
,
4916 struct action_data
*data
)
4918 if (data
->handler
== HANDLER_ONMAX
)
4919 seq_puts(m
, ":onmax(");
4920 else if (data
->handler
== HANDLER_ONCHANGE
)
4921 seq_puts(m
, ":onchange(");
4922 seq_printf(m
, "%s", data
->track_data
.var_str
);
4923 seq_printf(m
, ").%s(", data
->action_name
);
4925 print_action_spec(m
, hist_data
, data
);
4930 static void print_onmatch_spec(struct seq_file
*m
,
4931 struct hist_trigger_data
*hist_data
,
4932 struct action_data
*data
)
4934 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
4935 data
->match_data
.event
);
4937 seq_printf(m
, "%s(", data
->action_name
);
4939 print_action_spec(m
, hist_data
, data
);
4944 static bool actions_match(struct hist_trigger_data
*hist_data
,
4945 struct hist_trigger_data
*hist_data_test
)
4949 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4952 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4953 struct action_data
*data
= hist_data
->actions
[i
];
4954 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4955 char *action_name
, *action_name_test
;
4957 if (data
->handler
!= data_test
->handler
)
4959 if (data
->action
!= data_test
->action
)
4962 if (data
->n_params
!= data_test
->n_params
)
4965 for (j
= 0; j
< data
->n_params
; j
++) {
4966 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4970 if (data
->use_trace_keyword
)
4971 action_name
= data
->synth_event_name
;
4973 action_name
= data
->action_name
;
4975 if (data_test
->use_trace_keyword
)
4976 action_name_test
= data_test
->synth_event_name
;
4978 action_name_test
= data_test
->action_name
;
4980 if (strcmp(action_name
, action_name_test
) != 0)
4983 if (data
->handler
== HANDLER_ONMATCH
) {
4984 if (strcmp(data
->match_data
.event_system
,
4985 data_test
->match_data
.event_system
) != 0)
4987 if (strcmp(data
->match_data
.event
,
4988 data_test
->match_data
.event
) != 0)
4990 } else if (data
->handler
== HANDLER_ONMAX
||
4991 data
->handler
== HANDLER_ONCHANGE
) {
4992 if (strcmp(data
->track_data
.var_str
,
4993 data_test
->track_data
.var_str
) != 0)
5002 static void print_actions_spec(struct seq_file
*m
,
5003 struct hist_trigger_data
*hist_data
)
5007 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5008 struct action_data
*data
= hist_data
->actions
[i
];
5010 if (data
->handler
== HANDLER_ONMATCH
)
5011 print_onmatch_spec(m
, hist_data
, data
);
5012 else if (data
->handler
== HANDLER_ONMAX
||
5013 data
->handler
== HANDLER_ONCHANGE
)
5014 print_track_data_spec(m
, hist_data
, data
);
5018 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
5022 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5023 kfree(hist_data
->field_var_hists
[i
]->cmd
);
5024 kfree(hist_data
->field_var_hists
[i
]);
5028 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
5033 destroy_hist_trigger_attrs(hist_data
->attrs
);
5034 destroy_hist_fields(hist_data
);
5035 tracing_map_destroy(hist_data
->map
);
5037 destroy_actions(hist_data
);
5038 destroy_field_vars(hist_data
);
5039 destroy_field_var_hists(hist_data
);
5044 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
5046 struct tracing_map
*map
= hist_data
->map
;
5047 struct ftrace_event_field
*field
;
5048 struct hist_field
*hist_field
;
5051 for_each_hist_field(i
, hist_data
) {
5052 hist_field
= hist_data
->fields
[i
];
5053 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
5054 tracing_map_cmp_fn_t cmp_fn
;
5056 field
= hist_field
->field
;
5058 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5059 cmp_fn
= tracing_map_cmp_none
;
5060 else if (!field
|| hist_field
->flags
& HIST_FIELD_FL_CPU
)
5061 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
5062 hist_field
->is_signed
);
5063 else if (is_string_field(field
))
5064 cmp_fn
= tracing_map_cmp_string
;
5066 cmp_fn
= tracing_map_cmp_num(field
->size
,
5068 idx
= tracing_map_add_key_field(map
,
5071 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
5072 idx
= tracing_map_add_sum_field(map
);
5077 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5078 idx
= tracing_map_add_var(map
);
5081 hist_field
->var
.idx
= idx
;
5082 hist_field
->var
.hist_data
= hist_data
;
5089 static struct hist_trigger_data
*
5090 create_hist_data(unsigned int map_bits
,
5091 struct hist_trigger_attrs
*attrs
,
5092 struct trace_event_file
*file
,
5095 const struct tracing_map_ops
*map_ops
= NULL
;
5096 struct hist_trigger_data
*hist_data
;
5099 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
5101 return ERR_PTR(-ENOMEM
);
5103 hist_data
->attrs
= attrs
;
5104 hist_data
->remove
= remove
;
5105 hist_data
->event_file
= file
;
5107 ret
= parse_actions(hist_data
);
5111 ret
= create_hist_fields(hist_data
, file
);
5115 ret
= create_sort_keys(hist_data
);
5119 map_ops
= &hist_trigger_elt_data_ops
;
5121 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
5122 map_ops
, hist_data
);
5123 if (IS_ERR(hist_data
->map
)) {
5124 ret
= PTR_ERR(hist_data
->map
);
5125 hist_data
->map
= NULL
;
5129 ret
= create_tracing_map_fields(hist_data
);
5135 hist_data
->attrs
= NULL
;
5137 destroy_hist_data(hist_data
);
5139 hist_data
= ERR_PTR(ret
);
5144 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
5145 struct tracing_map_elt
*elt
,
5146 struct trace_buffer
*buffer
, void *rec
,
5147 struct ring_buffer_event
*rbe
,
5150 struct hist_elt_data
*elt_data
;
5151 struct hist_field
*hist_field
;
5152 unsigned int i
, var_idx
;
5155 elt_data
= elt
->private_data
;
5156 elt_data
->var_ref_vals
= var_ref_vals
;
5158 for_each_hist_val_field(i
, hist_data
) {
5159 hist_field
= hist_data
->fields
[i
];
5160 hist_val
= hist_fn_call(hist_field
, elt
, buffer
, rbe
, rec
);
5161 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5162 var_idx
= hist_field
->var
.idx
;
5164 if (hist_field
->flags
&
5165 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
)) {
5166 unsigned int str_start
, var_str_idx
, idx
;
5167 char *str
, *val_str
;
5170 str_start
= hist_data
->n_field_var_str
+
5171 hist_data
->n_save_var_str
;
5172 var_str_idx
= hist_field
->var_str_idx
;
5173 idx
= str_start
+ var_str_idx
;
5175 str
= elt_data
->field_var_str
[idx
];
5176 val_str
= (char *)(uintptr_t)hist_val
;
5178 if (hist_field
->flags
& HIST_FIELD_FL_STRING
) {
5179 size
= min(hist_field
->size
, STR_VAR_LEN_MAX
);
5180 strscpy(str
, val_str
, size
);
5182 char *stack_start
= str
+ sizeof(unsigned long);
5185 e
= stack_trace_save((void *)stack_start
,
5186 HIST_STACKTRACE_DEPTH
,
5187 HIST_STACKTRACE_SKIP
);
5188 if (e
< HIST_STACKTRACE_DEPTH
- 1)
5189 ((unsigned long *)stack_start
)[e
] = 0;
5190 *((unsigned long *)str
) = e
;
5192 hist_val
= (u64
)(uintptr_t)str
;
5194 tracing_map_set_var(elt
, var_idx
, hist_val
);
5197 tracing_map_update_sum(elt
, i
, hist_val
);
5200 for_each_hist_key_field(i
, hist_data
) {
5201 hist_field
= hist_data
->fields
[i
];
5202 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5203 hist_val
= hist_fn_call(hist_field
, elt
, buffer
, rbe
, rec
);
5204 var_idx
= hist_field
->var
.idx
;
5205 tracing_map_set_var(elt
, var_idx
, hist_val
);
5209 update_field_vars(hist_data
, elt
, buffer
, rbe
, rec
);
5212 static inline void add_to_key(char *compound_key
, void *key
,
5213 struct hist_field
*key_field
, void *rec
)
5215 size_t size
= key_field
->size
;
5217 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5218 struct ftrace_event_field
*field
;
5220 field
= key_field
->field
;
5221 if (field
->filter_type
== FILTER_DYN_STRING
||
5222 field
->filter_type
== FILTER_RDYN_STRING
)
5223 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
5224 else if (field
->filter_type
== FILTER_STATIC_STRING
)
5227 /* ensure NULL-termination */
5228 if (size
> key_field
->size
- 1)
5229 size
= key_field
->size
- 1;
5231 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
5233 memcpy(compound_key
+ key_field
->offset
, key
, size
);
5237 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
5238 struct tracing_map_elt
*elt
,
5239 struct trace_buffer
*buffer
, void *rec
,
5240 struct ring_buffer_event
*rbe
, void *key
,
5243 struct action_data
*data
;
5246 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5247 data
= hist_data
->actions
[i
];
5248 data
->fn(hist_data
, elt
, buffer
, rec
, rbe
, key
, data
, var_ref_vals
);
5252 static void event_hist_trigger(struct event_trigger_data
*data
,
5253 struct trace_buffer
*buffer
, void *rec
,
5254 struct ring_buffer_event
*rbe
)
5256 struct hist_trigger_data
*hist_data
= data
->private_data
;
5257 bool use_compound_key
= (hist_data
->n_keys
> 1);
5258 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
5259 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
5260 char compound_key
[HIST_KEY_SIZE_MAX
];
5261 struct tracing_map_elt
*elt
= NULL
;
5262 struct hist_field
*key_field
;
5270 memset(compound_key
, 0, hist_data
->key_size
);
5272 for_each_hist_key_field(i
, hist_data
) {
5273 key_field
= hist_data
->fields
[i
];
5275 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5276 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
5277 if (key_field
->field
) {
5278 unsigned long *stack
, n_entries
;
5280 field_contents
= hist_fn_call(key_field
, elt
, buffer
, rbe
, rec
);
5281 stack
= (unsigned long *)(long)field_contents
;
5283 memcpy(entries
, ++stack
, n_entries
* sizeof(unsigned long));
5285 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
5286 HIST_STACKTRACE_SKIP
);
5290 field_contents
= hist_fn_call(key_field
, elt
, buffer
, rbe
, rec
);
5291 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5292 key
= (void *)(unsigned long)field_contents
;
5293 use_compound_key
= true;
5295 key
= (void *)&field_contents
;
5298 if (use_compound_key
)
5299 add_to_key(compound_key
, key
, key_field
, rec
);
5302 if (use_compound_key
)
5305 if (hist_data
->n_var_refs
&&
5306 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
5309 elt
= tracing_map_insert(hist_data
->map
, key
);
5313 hist_trigger_elt_update(hist_data
, elt
, buffer
, rec
, rbe
, var_ref_vals
);
5315 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
5316 hist_trigger_actions(hist_data
, elt
, buffer
, rec
, rbe
, key
, var_ref_vals
);
5319 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
5320 unsigned long *stacktrace_entries
,
5321 unsigned int max_entries
)
5323 unsigned int spaces
= 8;
5326 for (i
= 0; i
< max_entries
; i
++) {
5327 if (!stacktrace_entries
[i
])
5330 seq_printf(m
, "%*c", 1 + spaces
, ' ');
5331 seq_printf(m
, "%pS\n", (void*)stacktrace_entries
[i
]);
5335 static void hist_trigger_print_key(struct seq_file
*m
,
5336 struct hist_trigger_data
*hist_data
,
5338 struct tracing_map_elt
*elt
)
5340 struct hist_field
*key_field
;
5341 bool multiline
= false;
5342 const char *field_name
;
5348 for_each_hist_key_field(i
, hist_data
) {
5349 key_field
= hist_data
->fields
[i
];
5351 if (i
> hist_data
->n_vals
)
5354 field_name
= hist_field_name(key_field
, 0);
5356 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
5357 uval
= *(u64
*)(key
+ key_field
->offset
);
5358 seq_printf(m
, "%s: %llx", field_name
, uval
);
5359 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
5360 uval
= *(u64
*)(key
+ key_field
->offset
);
5361 seq_printf(m
, "%s: [%llx] %-45ps", field_name
,
5362 uval
, (void *)(uintptr_t)uval
);
5363 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
5364 uval
= *(u64
*)(key
+ key_field
->offset
);
5365 seq_printf(m
, "%s: [%llx] %-55pS", field_name
,
5366 uval
, (void *)(uintptr_t)uval
);
5367 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
5368 struct hist_elt_data
*elt_data
= elt
->private_data
;
5371 if (WARN_ON_ONCE(!elt_data
))
5374 comm
= elt_data
->comm
;
5376 uval
= *(u64
*)(key
+ key_field
->offset
);
5377 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
5379 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
5380 const char *syscall_name
;
5382 uval
= *(u64
*)(key
+ key_field
->offset
);
5383 syscall_name
= get_syscall_name(uval
);
5385 syscall_name
= "unknown_syscall";
5387 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
5388 syscall_name
, uval
);
5389 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5390 if (key_field
->field
)
5391 seq_printf(m
, "%s.stacktrace", key_field
->field
->name
);
5393 seq_puts(m
, "common_stacktrace:\n");
5394 hist_trigger_stacktrace_print(m
,
5395 key
+ key_field
->offset
,
5396 HIST_STACKTRACE_DEPTH
);
5398 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
5399 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
5400 *(u64
*)(key
+ key_field
->offset
));
5401 } else if (key_field
->flags
& HIST_FIELD_FL_BUCKET
) {
5402 unsigned long buckets
= key_field
->buckets
;
5403 uval
= *(u64
*)(key
+ key_field
->offset
);
5404 seq_printf(m
, "%s: ~ %llu-%llu", field_name
,
5405 uval
, uval
+ buckets
-1);
5406 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5407 seq_printf(m
, "%s: %-50s", field_name
,
5408 (char *)(key
+ key_field
->offset
));
5410 uval
= *(u64
*)(key
+ key_field
->offset
);
5411 seq_printf(m
, "%s: %10llu", field_name
, uval
);
5421 /* Get the 100 times of the percentage of @val in @total */
5422 static inline unsigned int __get_percentage(u64 val
, u64 total
)
5427 if (val
< (U64_MAX
/ 10000))
5428 return (unsigned int)div64_ul(val
* 10000, total
);
5430 total
= div64_u64(total
, 10000);
5434 return (unsigned int)div64_ul(val
, total
);
5436 return val
? UINT_MAX
: 0;
5439 #define BAR_CHAR '#'
5441 static inline const char *__fill_bar_str(char *buf
, int size
, u64 val
, u64 max
)
5443 unsigned int len
= __get_percentage(val
, max
);
5446 if (len
== UINT_MAX
) {
5447 snprintf(buf
, size
, "[ERROR]");
5451 len
= len
* size
/ 10000;
5452 for (i
= 0; i
< len
&& i
< size
; i
++)
5461 struct hist_val_stat
{
5466 static void hist_trigger_print_val(struct seq_file
*m
, unsigned int idx
,
5467 const char *field_name
, unsigned long flags
,
5468 struct hist_val_stat
*stats
,
5469 struct tracing_map_elt
*elt
)
5471 u64 val
= tracing_map_read_sum(elt
, idx
);
5475 if (flags
& HIST_FIELD_FL_PERCENT
) {
5476 pc
= __get_percentage(val
, stats
[idx
].total
);
5478 seq_printf(m
, " %s (%%):[ERROR]", field_name
);
5480 seq_printf(m
, " %s (%%): %3u.%02u", field_name
,
5481 pc
/ 100, pc
% 100);
5482 } else if (flags
& HIST_FIELD_FL_GRAPH
) {
5483 seq_printf(m
, " %s: %20s", field_name
,
5484 __fill_bar_str(bar
, 20, val
, stats
[idx
].max
));
5485 } else if (flags
& HIST_FIELD_FL_HEX
) {
5486 seq_printf(m
, " %s: %10llx", field_name
, val
);
5488 seq_printf(m
, " %s: %10llu", field_name
, val
);
5492 static void hist_trigger_entry_print(struct seq_file
*m
,
5493 struct hist_trigger_data
*hist_data
,
5494 struct hist_val_stat
*stats
,
5496 struct tracing_map_elt
*elt
)
5498 const char *field_name
;
5499 unsigned int i
= HITCOUNT_IDX
;
5500 unsigned long flags
;
5502 hist_trigger_print_key(m
, hist_data
, key
, elt
);
5504 /* At first, show the raw hitcount if !nohitcount */
5505 if (!hist_data
->attrs
->no_hitcount
)
5506 hist_trigger_print_val(m
, i
, "hitcount", 0, stats
, elt
);
5508 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
5509 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
5510 flags
= hist_data
->fields
[i
]->flags
;
5511 if (flags
& HIST_FIELD_FL_VAR
|| flags
& HIST_FIELD_FL_EXPR
)
5515 hist_trigger_print_val(m
, i
, field_name
, flags
, stats
, elt
);
5518 print_actions(m
, hist_data
, elt
);
5523 static int print_entries(struct seq_file
*m
,
5524 struct hist_trigger_data
*hist_data
)
5526 struct tracing_map_sort_entry
**sort_entries
= NULL
;
5527 struct tracing_map
*map
= hist_data
->map
;
5528 int i
, j
, n_entries
;
5529 struct hist_val_stat
*stats
= NULL
;
5532 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
5533 hist_data
->n_sort_keys
,
5538 /* Calculate the max and the total for each field if needed. */
5539 for (j
= 0; j
< hist_data
->n_vals
; j
++) {
5540 if (!(hist_data
->fields
[j
]->flags
&
5541 (HIST_FIELD_FL_PERCENT
| HIST_FIELD_FL_GRAPH
)))
5544 stats
= kcalloc(hist_data
->n_vals
, sizeof(*stats
),
5547 n_entries
= -ENOMEM
;
5551 for (i
= 0; i
< n_entries
; i
++) {
5552 val
= tracing_map_read_sum(sort_entries
[i
]->elt
, j
);
5553 stats
[j
].total
+= val
;
5554 if (stats
[j
].max
< val
)
5559 for (i
= 0; i
< n_entries
; i
++)
5560 hist_trigger_entry_print(m
, hist_data
, stats
,
5561 sort_entries
[i
]->key
,
5562 sort_entries
[i
]->elt
);
5566 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
5571 static void hist_trigger_show(struct seq_file
*m
,
5572 struct event_trigger_data
*data
, int n
)
5574 struct hist_trigger_data
*hist_data
;
5578 seq_puts(m
, "\n\n");
5580 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
5581 data
->ops
->print(m
, data
);
5582 seq_puts(m
, "#\n\n");
5584 hist_data
= data
->private_data
;
5585 n_entries
= print_entries(m
, hist_data
);
5589 track_data_snapshot_print(m
, hist_data
);
5591 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
5592 (u64
)atomic64_read(&hist_data
->map
->hits
),
5593 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
5596 static int hist_show(struct seq_file
*m
, void *v
)
5598 struct event_trigger_data
*data
;
5599 struct trace_event_file
*event_file
;
5602 mutex_lock(&event_mutex
);
5604 event_file
= event_file_file(m
->private);
5605 if (unlikely(!event_file
)) {
5610 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5611 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5612 hist_trigger_show(m
, data
, n
++);
5616 mutex_unlock(&event_mutex
);
5621 static int event_hist_open(struct inode
*inode
, struct file
*file
)
5625 ret
= tracing_open_file_tr(inode
, file
);
5629 /* Clear private_data to avoid warning in single_open() */
5630 file
->private_data
= NULL
;
5631 return single_open(file
, hist_show
, file
);
5634 const struct file_operations event_hist_fops
= {
5635 .open
= event_hist_open
,
5637 .llseek
= seq_lseek
,
5638 .release
= tracing_single_release_file_tr
,
5641 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
5642 static void hist_field_debug_show_flags(struct seq_file
*m
,
5643 unsigned long flags
)
5645 seq_puts(m
, " flags:\n");
5647 if (flags
& HIST_FIELD_FL_KEY
)
5648 seq_puts(m
, " HIST_FIELD_FL_KEY\n");
5649 else if (flags
& HIST_FIELD_FL_HITCOUNT
)
5650 seq_puts(m
, " VAL: HIST_FIELD_FL_HITCOUNT\n");
5651 else if (flags
& HIST_FIELD_FL_VAR
)
5652 seq_puts(m
, " HIST_FIELD_FL_VAR\n");
5653 else if (flags
& HIST_FIELD_FL_VAR_REF
)
5654 seq_puts(m
, " HIST_FIELD_FL_VAR_REF\n");
5656 seq_puts(m
, " VAL: normal u64 value\n");
5658 if (flags
& HIST_FIELD_FL_ALIAS
)
5659 seq_puts(m
, " HIST_FIELD_FL_ALIAS\n");
5660 else if (flags
& HIST_FIELD_FL_CONST
)
5661 seq_puts(m
, " HIST_FIELD_FL_CONST\n");
5664 static int hist_field_debug_show(struct seq_file
*m
,
5665 struct hist_field
*field
, unsigned long flags
)
5667 if ((field
->flags
& flags
) != flags
) {
5668 seq_printf(m
, "ERROR: bad flags - %lx\n", flags
);
5672 hist_field_debug_show_flags(m
, field
->flags
);
5674 seq_printf(m
, " ftrace_event_field name: %s\n",
5675 field
->field
->name
);
5677 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5678 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
5679 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5683 if (field
->flags
& HIST_FIELD_FL_CONST
)
5684 seq_printf(m
, " constant: %llu\n", field
->constant
);
5686 if (field
->flags
& HIST_FIELD_FL_ALIAS
)
5687 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
5688 field
->var_ref_idx
);
5690 if (field
->flags
& HIST_FIELD_FL_VAR_REF
) {
5691 seq_printf(m
, " name: %s\n", field
->name
);
5692 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5694 seq_printf(m
, " var.hist_data: %p\n", field
->var
.hist_data
);
5695 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
5696 field
->var_ref_idx
);
5698 seq_printf(m
, " system: %s\n", field
->system
);
5699 if (field
->event_name
)
5700 seq_printf(m
, " event_name: %s\n", field
->event_name
);
5703 seq_printf(m
, " type: %s\n", field
->type
);
5704 seq_printf(m
, " size: %u\n", field
->size
);
5705 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5710 static int field_var_debug_show(struct seq_file
*m
,
5711 struct field_var
*field_var
, unsigned int i
,
5714 const char *vars_name
= save_vars
? "save_vars" : "field_vars";
5715 struct hist_field
*field
;
5718 seq_printf(m
, "\n hist_data->%s[%d]:\n", vars_name
, i
);
5720 field
= field_var
->var
;
5722 seq_printf(m
, "\n %s[%d].var:\n", vars_name
, i
);
5724 hist_field_debug_show_flags(m
, field
->flags
);
5725 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
5726 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5729 field
= field_var
->val
;
5731 seq_printf(m
, "\n %s[%d].val:\n", vars_name
, i
);
5733 seq_printf(m
, " ftrace_event_field name: %s\n",
5734 field
->field
->name
);
5740 seq_printf(m
, " type: %s\n", field
->type
);
5741 seq_printf(m
, " size: %u\n", field
->size
);
5742 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5747 static int hist_action_debug_show(struct seq_file
*m
,
5748 struct action_data
*data
, int i
)
5752 if (data
->handler
== HANDLER_ONMAX
||
5753 data
->handler
== HANDLER_ONCHANGE
) {
5754 seq_printf(m
, "\n hist_data->actions[%d].track_data.var_ref:\n", i
);
5755 ret
= hist_field_debug_show(m
, data
->track_data
.var_ref
,
5756 HIST_FIELD_FL_VAR_REF
);
5760 seq_printf(m
, "\n hist_data->actions[%d].track_data.track_var:\n", i
);
5761 ret
= hist_field_debug_show(m
, data
->track_data
.track_var
,
5767 if (data
->handler
== HANDLER_ONMATCH
) {
5768 seq_printf(m
, "\n hist_data->actions[%d].match_data.event_system: %s\n",
5769 i
, data
->match_data
.event_system
);
5770 seq_printf(m
, " hist_data->actions[%d].match_data.event: %s\n",
5771 i
, data
->match_data
.event
);
5777 static int hist_actions_debug_show(struct seq_file
*m
,
5778 struct hist_trigger_data
*hist_data
)
5782 if (hist_data
->n_actions
)
5783 seq_puts(m
, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
5785 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5786 struct action_data
*action
= hist_data
->actions
[i
];
5788 ret
= hist_action_debug_show(m
, action
, i
);
5793 if (hist_data
->n_save_vars
)
5794 seq_puts(m
, "\n save action variables (save() params):\n");
5796 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
5797 ret
= field_var_debug_show(m
, hist_data
->save_vars
[i
], i
, true);
5805 static void hist_trigger_debug_show(struct seq_file
*m
,
5806 struct event_trigger_data
*data
, int n
)
5808 struct hist_trigger_data
*hist_data
;
5812 seq_puts(m
, "\n\n");
5814 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
5815 data
->ops
->print(m
, data
);
5816 seq_puts(m
, "#\n\n");
5818 hist_data
= data
->private_data
;
5820 seq_printf(m
, "hist_data: %p\n\n", hist_data
);
5821 seq_printf(m
, " n_vals: %u\n", hist_data
->n_vals
);
5822 seq_printf(m
, " n_keys: %u\n", hist_data
->n_keys
);
5823 seq_printf(m
, " n_fields: %u\n", hist_data
->n_fields
);
5825 seq_puts(m
, "\n val fields:\n\n");
5827 seq_puts(m
, " hist_data->fields[0]:\n");
5828 ret
= hist_field_debug_show(m
, hist_data
->fields
[0],
5829 HIST_FIELD_FL_HITCOUNT
);
5833 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
5834 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
5835 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
], 0);
5840 seq_puts(m
, "\n key fields:\n");
5842 for (i
= hist_data
->n_vals
; i
< hist_data
->n_fields
; i
++) {
5843 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
5844 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
],
5850 if (hist_data
->n_var_refs
)
5851 seq_puts(m
, "\n variable reference fields:\n");
5853 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
5854 seq_printf(m
, "\n hist_data->var_refs[%d]:\n", i
);
5855 ret
= hist_field_debug_show(m
, hist_data
->var_refs
[i
],
5856 HIST_FIELD_FL_VAR_REF
);
5861 if (hist_data
->n_field_vars
)
5862 seq_puts(m
, "\n field variables:\n");
5864 for (i
= 0; i
< hist_data
->n_field_vars
; i
++) {
5865 ret
= field_var_debug_show(m
, hist_data
->field_vars
[i
], i
, false);
5870 ret
= hist_actions_debug_show(m
, hist_data
);
5875 static int hist_debug_show(struct seq_file
*m
, void *v
)
5877 struct event_trigger_data
*data
;
5878 struct trace_event_file
*event_file
;
5881 mutex_lock(&event_mutex
);
5883 event_file
= event_file_file(m
->private);
5884 if (unlikely(!event_file
)) {
5889 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5890 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5891 hist_trigger_debug_show(m
, data
, n
++);
5895 mutex_unlock(&event_mutex
);
5900 static int event_hist_debug_open(struct inode
*inode
, struct file
*file
)
5904 ret
= tracing_open_file_tr(inode
, file
);
5908 /* Clear private_data to avoid warning in single_open() */
5909 file
->private_data
= NULL
;
5910 return single_open(file
, hist_debug_show
, file
);
5913 const struct file_operations event_hist_debug_fops
= {
5914 .open
= event_hist_debug_open
,
5916 .llseek
= seq_lseek
,
5917 .release
= tracing_single_release_file_tr
,
5921 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
5923 const char *field_name
= hist_field_name(hist_field
, 0);
5925 if (hist_field
->var
.name
)
5926 seq_printf(m
, "%s=", hist_field
->var
.name
);
5928 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
5929 seq_puts(m
, "common_cpu");
5930 else if (hist_field
->flags
& HIST_FIELD_FL_CONST
)
5931 seq_printf(m
, "%llu", hist_field
->constant
);
5932 else if (field_name
) {
5933 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
5934 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
5936 seq_printf(m
, "%s", field_name
);
5937 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
5938 seq_puts(m
, "common_timestamp");
5940 if (hist_field
->flags
) {
5941 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
5942 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
) &&
5943 !(hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)) {
5944 const char *flags
= get_hist_field_flags(hist_field
);
5947 seq_printf(m
, ".%s", flags
);
5950 if (hist_field
->buckets
)
5951 seq_printf(m
, "=%ld", hist_field
->buckets
);
5954 static int event_hist_trigger_print(struct seq_file
*m
,
5955 struct event_trigger_data
*data
)
5957 struct hist_trigger_data
*hist_data
= data
->private_data
;
5958 struct hist_field
*field
;
5959 bool have_var
= false;
5960 bool show_val
= false;
5963 seq_puts(m
, HIST_PREFIX
);
5966 seq_printf(m
, "%s:", data
->name
);
5968 seq_puts(m
, "keys=");
5970 for_each_hist_key_field(i
, hist_data
) {
5971 field
= hist_data
->fields
[i
];
5973 if (i
> hist_data
->n_vals
)
5976 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5978 seq_printf(m
, "%s.stacktrace", field
->field
->name
);
5980 seq_puts(m
, "common_stacktrace");
5982 hist_field_print(m
, field
);
5985 seq_puts(m
, ":vals=");
5987 for_each_hist_val_field(i
, hist_data
) {
5988 field
= hist_data
->fields
[i
];
5989 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5994 if (i
== HITCOUNT_IDX
) {
5995 if (hist_data
->attrs
->no_hitcount
)
5997 seq_puts(m
, "hitcount");
6001 hist_field_print(m
, field
);
6011 for_each_hist_val_field(i
, hist_data
) {
6012 field
= hist_data
->fields
[i
];
6014 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6017 hist_field_print(m
, field
);
6022 seq_puts(m
, ":sort=");
6024 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6025 struct tracing_map_sort_key
*sort_key
;
6026 unsigned int idx
, first_key_idx
;
6029 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
6031 sort_key
= &hist_data
->sort_keys
[i
];
6032 idx
= sort_key
->field_idx
;
6034 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
6040 if (idx
== HITCOUNT_IDX
)
6041 seq_puts(m
, "hitcount");
6043 if (idx
>= first_key_idx
)
6044 idx
+= hist_data
->n_vars
;
6045 hist_field_print(m
, hist_data
->fields
[idx
]);
6048 if (sort_key
->descending
)
6049 seq_puts(m
, ".descending");
6051 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
6052 if (hist_data
->enable_timestamps
)
6053 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
6054 if (hist_data
->attrs
->no_hitcount
)
6055 seq_puts(m
, ":nohitcount");
6057 print_actions_spec(m
, hist_data
);
6059 if (data
->filter_str
)
6060 seq_printf(m
, " if %s", data
->filter_str
);
6063 seq_puts(m
, " [paused]");
6065 seq_puts(m
, " [active]");
6072 static int event_hist_trigger_init(struct event_trigger_data
*data
)
6074 struct hist_trigger_data
*hist_data
= data
->private_data
;
6076 if (!data
->ref
&& hist_data
->attrs
->name
)
6077 save_named_trigger(hist_data
->attrs
->name
, data
);
6084 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
6086 struct trace_event_file
*file
;
6091 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
6092 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
6093 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
6094 ret
= event_hist_trigger_parse(&trigger_hist_cmd
, file
,
6095 "!hist", "hist", cmd
);
6096 WARN_ON_ONCE(ret
< 0);
6100 static void event_hist_trigger_free(struct event_trigger_data
*data
)
6102 struct hist_trigger_data
*hist_data
= data
->private_data
;
6104 if (WARN_ON_ONCE(data
->ref
<= 0))
6110 del_named_trigger(data
);
6112 trigger_data_free(data
);
6114 remove_hist_vars(hist_data
);
6116 unregister_field_var_hists(hist_data
);
6118 destroy_hist_data(hist_data
);
6122 static struct event_trigger_ops event_hist_trigger_ops
= {
6123 .trigger
= event_hist_trigger
,
6124 .print
= event_hist_trigger_print
,
6125 .init
= event_hist_trigger_init
,
6126 .free
= event_hist_trigger_free
,
6129 static int event_hist_trigger_named_init(struct event_trigger_data
*data
)
6133 save_named_trigger(data
->named_data
->name
, data
);
6135 event_hist_trigger_init(data
->named_data
);
6140 static void event_hist_trigger_named_free(struct event_trigger_data
*data
)
6142 if (WARN_ON_ONCE(data
->ref
<= 0))
6145 event_hist_trigger_free(data
->named_data
);
6149 del_named_trigger(data
);
6150 trigger_data_free(data
);
6154 static struct event_trigger_ops event_hist_trigger_named_ops
= {
6155 .trigger
= event_hist_trigger
,
6156 .print
= event_hist_trigger_print
,
6157 .init
= event_hist_trigger_named_init
,
6158 .free
= event_hist_trigger_named_free
,
6161 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
6164 return &event_hist_trigger_ops
;
6167 static void hist_clear(struct event_trigger_data
*data
)
6169 struct hist_trigger_data
*hist_data
= data
->private_data
;
6172 pause_named_trigger(data
);
6174 tracepoint_synchronize_unregister();
6176 tracing_map_clear(hist_data
->map
);
6179 unpause_named_trigger(data
);
6182 static bool compatible_field(struct ftrace_event_field
*field
,
6183 struct ftrace_event_field
*test_field
)
6185 if (field
== test_field
)
6187 if (field
== NULL
|| test_field
== NULL
)
6189 if (strcmp(field
->name
, test_field
->name
) != 0)
6191 if (strcmp(field
->type
, test_field
->type
) != 0)
6193 if (field
->size
!= test_field
->size
)
6195 if (field
->is_signed
!= test_field
->is_signed
)
6201 static bool hist_trigger_match(struct event_trigger_data
*data
,
6202 struct event_trigger_data
*data_test
,
6203 struct event_trigger_data
*named_data
,
6206 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
6207 struct hist_trigger_data
*hist_data
, *hist_data_test
;
6208 struct hist_field
*key_field
, *key_field_test
;
6211 if (named_data
&& (named_data
!= data_test
) &&
6212 (named_data
!= data_test
->named_data
))
6215 if (!named_data
&& is_named_trigger(data_test
))
6218 hist_data
= data
->private_data
;
6219 hist_data_test
= data_test
->private_data
;
6221 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
6222 hist_data
->n_fields
!= hist_data_test
->n_fields
||
6223 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
6226 if (!ignore_filter
) {
6227 if ((data
->filter_str
&& !data_test
->filter_str
) ||
6228 (!data
->filter_str
&& data_test
->filter_str
))
6232 for_each_hist_field(i
, hist_data
) {
6233 key_field
= hist_data
->fields
[i
];
6234 key_field_test
= hist_data_test
->fields
[i
];
6236 if (key_field
->flags
!= key_field_test
->flags
)
6238 if (!compatible_field(key_field
->field
, key_field_test
->field
))
6240 if (key_field
->offset
!= key_field_test
->offset
)
6242 if (key_field
->size
!= key_field_test
->size
)
6244 if (key_field
->is_signed
!= key_field_test
->is_signed
)
6246 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
6248 if (key_field
->var
.name
&&
6249 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
6253 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6254 sort_key
= &hist_data
->sort_keys
[i
];
6255 sort_key_test
= &hist_data_test
->sort_keys
[i
];
6257 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
6258 sort_key
->descending
!= sort_key_test
->descending
)
6262 if (!ignore_filter
&& data
->filter_str
&&
6263 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
6266 if (!actions_match(hist_data
, hist_data_test
))
6272 static bool existing_hist_update_only(char *glob
,
6273 struct event_trigger_data
*data
,
6274 struct trace_event_file
*file
)
6276 struct hist_trigger_data
*hist_data
= data
->private_data
;
6277 struct event_trigger_data
*test
, *named_data
= NULL
;
6278 bool updated
= false;
6280 if (!hist_data
->attrs
->pause
&& !hist_data
->attrs
->cont
&&
6281 !hist_data
->attrs
->clear
)
6284 if (hist_data
->attrs
->name
) {
6285 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6287 if (!hist_trigger_match(data
, named_data
, named_data
,
6293 if (hist_data
->attrs
->name
&& !named_data
)
6296 list_for_each_entry(test
, &file
->triggers
, list
) {
6297 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6298 if (!hist_trigger_match(data
, test
, named_data
, false))
6300 if (hist_data
->attrs
->pause
)
6301 test
->paused
= true;
6302 else if (hist_data
->attrs
->cont
)
6303 test
->paused
= false;
6304 else if (hist_data
->attrs
->clear
)
6314 static int hist_register_trigger(char *glob
,
6315 struct event_trigger_data
*data
,
6316 struct trace_event_file
*file
)
6318 struct hist_trigger_data
*hist_data
= data
->private_data
;
6319 struct event_trigger_data
*test
, *named_data
= NULL
;
6320 struct trace_array
*tr
= file
->tr
;
6323 if (hist_data
->attrs
->name
) {
6324 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6326 if (!hist_trigger_match(data
, named_data
, named_data
,
6328 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
6335 if (hist_data
->attrs
->name
&& !named_data
)
6338 lockdep_assert_held(&event_mutex
);
6340 list_for_each_entry(test
, &file
->triggers
, list
) {
6341 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6342 if (hist_trigger_match(data
, test
, named_data
, false)) {
6343 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
6350 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
6351 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
6356 if (hist_data
->attrs
->pause
)
6357 data
->paused
= true;
6360 data
->private_data
= named_data
->private_data
;
6361 set_named_trigger_data(data
, named_data
);
6362 data
->ops
= &event_hist_trigger_named_ops
;
6365 if (data
->ops
->init
) {
6366 ret
= data
->ops
->init(data
);
6371 if (hist_data
->enable_timestamps
) {
6372 char *clock
= hist_data
->attrs
->clock
;
6374 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
6376 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
6380 tracing_set_filter_buffering(file
->tr
, true);
6384 destroy_hist_data(hist_data
);
6389 static int hist_trigger_enable(struct event_trigger_data
*data
,
6390 struct trace_event_file
*file
)
6394 list_add_tail_rcu(&data
->list
, &file
->triggers
);
6396 update_cond_flag(file
);
6398 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
6399 list_del_rcu(&data
->list
);
6400 update_cond_flag(file
);
6407 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
6408 struct trace_event_file
*file
)
6410 struct hist_trigger_data
*hist_data
= data
->private_data
;
6411 struct event_trigger_data
*test
, *named_data
= NULL
;
6414 lockdep_assert_held(&event_mutex
);
6416 if (hist_data
->attrs
->name
)
6417 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6419 list_for_each_entry(test
, &file
->triggers
, list
) {
6420 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6421 if (hist_trigger_match(data
, test
, named_data
, false)) {
6431 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
6432 struct trace_event_file
*file
)
6434 struct hist_trigger_data
*hist_data
= data
->private_data
;
6435 struct event_trigger_data
*test
, *named_data
= NULL
;
6437 lockdep_assert_held(&event_mutex
);
6439 if (hist_data
->attrs
->name
)
6440 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6442 list_for_each_entry(test
, &file
->triggers
, list
) {
6443 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6444 if (!hist_trigger_match(data
, test
, named_data
, false))
6446 hist_data
= test
->private_data
;
6447 if (check_var_refs(hist_data
))
6456 static void hist_unregister_trigger(char *glob
,
6457 struct event_trigger_data
*data
,
6458 struct trace_event_file
*file
)
6460 struct event_trigger_data
*test
= NULL
, *iter
, *named_data
= NULL
;
6461 struct hist_trigger_data
*hist_data
= data
->private_data
;
6463 lockdep_assert_held(&event_mutex
);
6465 if (hist_data
->attrs
->name
)
6466 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6468 list_for_each_entry(iter
, &file
->triggers
, list
) {
6469 if (iter
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6470 if (!hist_trigger_match(data
, iter
, named_data
, false))
6473 list_del_rcu(&test
->list
);
6474 trace_event_trigger_enable_disable(file
, 0);
6475 update_cond_flag(file
);
6480 if (test
&& test
->ops
->free
)
6481 test
->ops
->free(test
);
6483 if (hist_data
->enable_timestamps
) {
6484 if (!hist_data
->remove
|| test
)
6485 tracing_set_filter_buffering(file
->tr
, false);
6489 static bool hist_file_check_refs(struct trace_event_file
*file
)
6491 struct hist_trigger_data
*hist_data
;
6492 struct event_trigger_data
*test
;
6494 lockdep_assert_held(&event_mutex
);
6496 list_for_each_entry(test
, &file
->triggers
, list
) {
6497 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6498 hist_data
= test
->private_data
;
6499 if (check_var_refs(hist_data
))
6507 static void hist_unreg_all(struct trace_event_file
*file
)
6509 struct event_trigger_data
*test
, *n
;
6510 struct hist_trigger_data
*hist_data
;
6511 struct synth_event
*se
;
6512 const char *se_name
;
6514 lockdep_assert_held(&event_mutex
);
6516 if (hist_file_check_refs(file
))
6519 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6520 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6521 hist_data
= test
->private_data
;
6522 list_del_rcu(&test
->list
);
6523 trace_event_trigger_enable_disable(file
, 0);
6525 se_name
= trace_event_name(file
->event_call
);
6526 se
= find_synth_event(se_name
);
6530 update_cond_flag(file
);
6531 if (hist_data
->enable_timestamps
)
6532 tracing_set_filter_buffering(file
->tr
, false);
6533 if (test
->ops
->free
)
6534 test
->ops
->free(test
);
6539 static int event_hist_trigger_parse(struct event_command
*cmd_ops
,
6540 struct trace_event_file
*file
,
6541 char *glob
, char *cmd
,
6542 char *param_and_filter
)
6544 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
6545 struct event_trigger_data
*trigger_data
;
6546 struct hist_trigger_attrs
*attrs
;
6547 struct hist_trigger_data
*hist_data
;
6548 char *param
, *filter
, *p
, *start
;
6549 struct synth_event
*se
;
6550 const char *se_name
;
6554 lockdep_assert_held(&event_mutex
);
6561 last_cmd_set(file
, param_and_filter
);
6564 remove
= event_trigger_check_remove(glob
);
6566 if (event_trigger_empty_param(param_and_filter
))
6570 * separate the trigger from the filter (k:v [if filter])
6571 * allowing for whitespace in the trigger
6573 p
= param
= param_and_filter
;
6575 p
= strstr(p
, "if");
6578 if (p
== param_and_filter
)
6580 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
6584 if (p
>= param_and_filter
+ strlen(param_and_filter
) - (sizeof("if") - 1) - 1)
6586 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
6597 filter
= strstrip(p
);
6598 param
= strstrip(param
);
6602 * To simplify arithmetic expression parsing, replace occurrences of
6603 * '.sym-offset' modifier with '.symXoffset'
6605 start
= strstr(param
, ".sym-offset");
6608 start
= strstr(start
+ 11, ".sym-offset");
6611 attrs
= parse_hist_trigger_attrs(file
->tr
, param
);
6613 return PTR_ERR(attrs
);
6615 if (attrs
->map_bits
)
6616 hist_trigger_bits
= attrs
->map_bits
;
6618 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
6619 if (IS_ERR(hist_data
)) {
6620 destroy_hist_trigger_attrs(attrs
);
6621 return PTR_ERR(hist_data
);
6624 trigger_data
= event_trigger_alloc(cmd_ops
, cmd
, param
, hist_data
);
6625 if (!trigger_data
) {
6630 ret
= event_trigger_set_filter(cmd_ops
, file
, filter
, trigger_data
);
6635 if (!have_hist_trigger_match(trigger_data
, file
))
6638 if (hist_trigger_check_refs(trigger_data
, file
)) {
6643 event_trigger_unregister(cmd_ops
, file
, glob
+1, trigger_data
);
6644 se_name
= trace_event_name(file
->event_call
);
6645 se
= find_synth_event(se_name
);
6652 if (existing_hist_update_only(glob
, trigger_data
, file
))
6655 ret
= event_trigger_register(cmd_ops
, file
, glob
, trigger_data
);
6659 if (get_named_trigger_data(trigger_data
))
6662 ret
= create_actions(hist_data
);
6666 if (has_hist_vars(hist_data
) || hist_data
->n_var_refs
) {
6667 ret
= save_hist_vars(hist_data
);
6672 ret
= tracing_map_init(hist_data
->map
);
6676 ret
= hist_trigger_enable(trigger_data
, file
);
6680 se_name
= trace_event_name(file
->event_call
);
6681 se
= find_synth_event(se_name
);
6685 if (ret
== 0 && glob
[0])
6690 event_trigger_unregister(cmd_ops
, file
, glob
+1, trigger_data
);
6692 event_trigger_reset_filter(cmd_ops
, trigger_data
);
6694 remove_hist_vars(hist_data
);
6696 kfree(trigger_data
);
6698 destroy_hist_data(hist_data
);
6702 static struct event_command trigger_hist_cmd
= {
6704 .trigger_type
= ETT_EVENT_HIST
,
6705 .flags
= EVENT_CMD_FL_NEEDS_REC
,
6706 .parse
= event_hist_trigger_parse
,
6707 .reg
= hist_register_trigger
,
6708 .unreg
= hist_unregister_trigger
,
6709 .unreg_all
= hist_unreg_all
,
6710 .get_trigger_ops
= event_hist_get_trigger_ops
,
6711 .set_filter
= set_trigger_filter
,
6714 __init
int register_trigger_hist_cmd(void)
6718 ret
= register_event_command(&trigger_hist_cmd
);
6725 hist_enable_trigger(struct event_trigger_data
*data
,
6726 struct trace_buffer
*buffer
, void *rec
,
6727 struct ring_buffer_event
*event
)
6729 struct enable_trigger_data
*enable_data
= data
->private_data
;
6730 struct event_trigger_data
*test
;
6732 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
6733 lockdep_is_held(&event_mutex
)) {
6734 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6735 if (enable_data
->enable
)
6736 test
->paused
= false;
6738 test
->paused
= true;
6744 hist_enable_count_trigger(struct event_trigger_data
*data
,
6745 struct trace_buffer
*buffer
, void *rec
,
6746 struct ring_buffer_event
*event
)
6751 if (data
->count
!= -1)
6754 hist_enable_trigger(data
, buffer
, rec
, event
);
6757 static struct event_trigger_ops hist_enable_trigger_ops
= {
6758 .trigger
= hist_enable_trigger
,
6759 .print
= event_enable_trigger_print
,
6760 .init
= event_trigger_init
,
6761 .free
= event_enable_trigger_free
,
6764 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
6765 .trigger
= hist_enable_count_trigger
,
6766 .print
= event_enable_trigger_print
,
6767 .init
= event_trigger_init
,
6768 .free
= event_enable_trigger_free
,
6771 static struct event_trigger_ops hist_disable_trigger_ops
= {
6772 .trigger
= hist_enable_trigger
,
6773 .print
= event_enable_trigger_print
,
6774 .init
= event_trigger_init
,
6775 .free
= event_enable_trigger_free
,
6778 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
6779 .trigger
= hist_enable_count_trigger
,
6780 .print
= event_enable_trigger_print
,
6781 .init
= event_trigger_init
,
6782 .free
= event_enable_trigger_free
,
6785 static struct event_trigger_ops
*
6786 hist_enable_get_trigger_ops(char *cmd
, char *param
)
6788 struct event_trigger_ops
*ops
;
6791 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
6794 ops
= param
? &hist_enable_count_trigger_ops
:
6795 &hist_enable_trigger_ops
;
6797 ops
= param
? &hist_disable_count_trigger_ops
:
6798 &hist_disable_trigger_ops
;
6803 static void hist_enable_unreg_all(struct trace_event_file
*file
)
6805 struct event_trigger_data
*test
, *n
;
6807 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6808 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
6809 list_del_rcu(&test
->list
);
6810 update_cond_flag(file
);
6811 trace_event_trigger_enable_disable(file
, 0);
6812 if (test
->ops
->free
)
6813 test
->ops
->free(test
);
6818 static struct event_command trigger_hist_enable_cmd
= {
6819 .name
= ENABLE_HIST_STR
,
6820 .trigger_type
= ETT_HIST_ENABLE
,
6821 .parse
= event_enable_trigger_parse
,
6822 .reg
= event_enable_register_trigger
,
6823 .unreg
= event_enable_unregister_trigger
,
6824 .unreg_all
= hist_enable_unreg_all
,
6825 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6826 .set_filter
= set_trigger_filter
,
6829 static struct event_command trigger_hist_disable_cmd
= {
6830 .name
= DISABLE_HIST_STR
,
6831 .trigger_type
= ETT_HIST_ENABLE
,
6832 .parse
= event_enable_trigger_parse
,
6833 .reg
= event_enable_register_trigger
,
6834 .unreg
= event_enable_unregister_trigger
,
6835 .unreg_all
= hist_enable_unreg_all
,
6836 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6837 .set_filter
= set_trigger_filter
,
6840 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
6842 unregister_event_command(&trigger_hist_enable_cmd
);
6843 unregister_event_command(&trigger_hist_disable_cmd
);
6846 __init
int register_trigger_hist_enable_disable_cmds(void)
6850 ret
= register_event_command(&trigger_hist_enable_cmd
);
6851 if (WARN_ON(ret
< 0))
6853 ret
= register_event_command(&trigger_hist_disable_cmd
);
6854 if (WARN_ON(ret
< 0))
6855 unregister_trigger_hist_enable_disable_cmds();