2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
22 #include <linux/rculist.h>
24 #include "tracing_map.h"
29 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
, void *event
);
32 struct ftrace_event_field
*field
;
39 static u64
hist_field_none(struct hist_field
*field
, void *event
)
44 static u64
hist_field_counter(struct hist_field
*field
, void *event
)
49 static u64
hist_field_string(struct hist_field
*hist_field
, void *event
)
51 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
53 return (u64
)(unsigned long)addr
;
56 static u64
hist_field_dynstring(struct hist_field
*hist_field
, void *event
)
58 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
59 int str_loc
= str_item
& 0xffff;
60 char *addr
= (char *)(event
+ str_loc
);
62 return (u64
)(unsigned long)addr
;
65 static u64
hist_field_pstring(struct hist_field
*hist_field
, void *event
)
67 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
69 return (u64
)(unsigned long)*addr
;
72 static u64
hist_field_log2(struct hist_field
*hist_field
, void *event
)
74 u64 val
= *(u64
*)(event
+ hist_field
->field
->offset
);
76 return (u64
) ilog2(roundup_pow_of_two(val
));
79 #define DEFINE_HIST_FIELD_FN(type) \
80 static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
82 type *addr = (type *)(event + hist_field->field->offset); \
84 return (u64)(unsigned long)*addr; \
87 DEFINE_HIST_FIELD_FN(s64
);
88 DEFINE_HIST_FIELD_FN(u64
);
89 DEFINE_HIST_FIELD_FN(s32
);
90 DEFINE_HIST_FIELD_FN(u32
);
91 DEFINE_HIST_FIELD_FN(s16
);
92 DEFINE_HIST_FIELD_FN(u16
);
93 DEFINE_HIST_FIELD_FN(s8
);
94 DEFINE_HIST_FIELD_FN(u8
);
96 #define for_each_hist_field(i, hist_data) \
97 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
99 #define for_each_hist_val_field(i, hist_data) \
100 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
102 #define for_each_hist_key_field(i, hist_data) \
103 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
105 #define HIST_STACKTRACE_DEPTH 16
106 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
107 #define HIST_STACKTRACE_SKIP 5
109 #define HITCOUNT_IDX 0
110 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
112 enum hist_field_flags
{
113 HIST_FIELD_FL_HITCOUNT
= 1,
114 HIST_FIELD_FL_KEY
= 2,
115 HIST_FIELD_FL_STRING
= 4,
116 HIST_FIELD_FL_HEX
= 8,
117 HIST_FIELD_FL_SYM
= 16,
118 HIST_FIELD_FL_SYM_OFFSET
= 32,
119 HIST_FIELD_FL_EXECNAME
= 64,
120 HIST_FIELD_FL_SYSCALL
= 128,
121 HIST_FIELD_FL_STACKTRACE
= 256,
122 HIST_FIELD_FL_LOG2
= 512,
125 struct hist_trigger_attrs
{
133 unsigned int map_bits
;
136 struct hist_trigger_data
{
137 struct hist_field
*fields
[TRACING_MAP_FIELDS_MAX
];
140 unsigned int n_fields
;
141 unsigned int key_size
;
142 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
143 unsigned int n_sort_keys
;
144 struct trace_event_file
*event_file
;
145 struct hist_trigger_attrs
*attrs
;
146 struct tracing_map
*map
;
149 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
151 hist_field_fn_t fn
= NULL
;
153 switch (field_size
) {
183 static int parse_map_size(char *str
)
185 unsigned long size
, map_bits
;
194 ret
= kstrtoul(str
, 0, &size
);
198 map_bits
= ilog2(roundup_pow_of_two(size
));
199 if (map_bits
< TRACING_MAP_BITS_MIN
||
200 map_bits
> TRACING_MAP_BITS_MAX
)
208 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
214 kfree(attrs
->sort_key_str
);
215 kfree(attrs
->keys_str
);
216 kfree(attrs
->vals_str
);
220 static struct hist_trigger_attrs
*parse_hist_trigger_attrs(char *trigger_str
)
222 struct hist_trigger_attrs
*attrs
;
225 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
227 return ERR_PTR(-ENOMEM
);
229 while (trigger_str
) {
230 char *str
= strsep(&trigger_str
, ":");
232 if ((strncmp(str
, "key=", strlen("key=")) == 0) ||
233 (strncmp(str
, "keys=", strlen("keys=")) == 0))
234 attrs
->keys_str
= kstrdup(str
, GFP_KERNEL
);
235 else if ((strncmp(str
, "val=", strlen("val=")) == 0) ||
236 (strncmp(str
, "vals=", strlen("vals=")) == 0) ||
237 (strncmp(str
, "values=", strlen("values=")) == 0))
238 attrs
->vals_str
= kstrdup(str
, GFP_KERNEL
);
239 else if (strncmp(str
, "sort=", strlen("sort=")) == 0)
240 attrs
->sort_key_str
= kstrdup(str
, GFP_KERNEL
);
241 else if (strncmp(str
, "name=", strlen("name=")) == 0)
242 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
243 else if (strcmp(str
, "pause") == 0)
245 else if ((strcmp(str
, "cont") == 0) ||
246 (strcmp(str
, "continue") == 0))
248 else if (strcmp(str
, "clear") == 0)
250 else if (strncmp(str
, "size=", strlen("size=")) == 0) {
251 int map_bits
= parse_map_size(str
);
257 attrs
->map_bits
= map_bits
;
264 if (!attrs
->keys_str
) {
271 destroy_hist_trigger_attrs(attrs
);
276 static inline void save_comm(char *comm
, struct task_struct
*task
)
279 strcpy(comm
, "<idle>");
283 if (WARN_ON_ONCE(task
->pid
< 0)) {
284 strcpy(comm
, "<XXX>");
288 memcpy(comm
, task
->comm
, TASK_COMM_LEN
);
291 static void hist_trigger_elt_comm_free(struct tracing_map_elt
*elt
)
293 kfree((char *)elt
->private_data
);
296 static int hist_trigger_elt_comm_alloc(struct tracing_map_elt
*elt
)
298 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
299 struct hist_field
*key_field
;
302 for_each_hist_key_field(i
, hist_data
) {
303 key_field
= hist_data
->fields
[i
];
305 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
306 unsigned int size
= TASK_COMM_LEN
+ 1;
308 elt
->private_data
= kzalloc(size
, GFP_KERNEL
);
309 if (!elt
->private_data
)
318 static void hist_trigger_elt_comm_copy(struct tracing_map_elt
*to
,
319 struct tracing_map_elt
*from
)
321 char *comm_from
= from
->private_data
;
322 char *comm_to
= to
->private_data
;
325 memcpy(comm_to
, comm_from
, TASK_COMM_LEN
+ 1);
328 static void hist_trigger_elt_comm_init(struct tracing_map_elt
*elt
)
330 char *comm
= elt
->private_data
;
333 save_comm(comm
, current
);
336 static const struct tracing_map_ops hist_trigger_elt_comm_ops
= {
337 .elt_alloc
= hist_trigger_elt_comm_alloc
,
338 .elt_copy
= hist_trigger_elt_comm_copy
,
339 .elt_free
= hist_trigger_elt_comm_free
,
340 .elt_init
= hist_trigger_elt_comm_init
,
343 static void destroy_hist_field(struct hist_field
*hist_field
)
348 static struct hist_field
*create_hist_field(struct ftrace_event_field
*field
,
351 struct hist_field
*hist_field
;
353 if (field
&& is_function_field(field
))
356 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
360 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
361 hist_field
->fn
= hist_field_counter
;
365 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
366 hist_field
->fn
= hist_field_none
;
370 if (flags
& HIST_FIELD_FL_LOG2
) {
371 hist_field
->fn
= hist_field_log2
;
375 if (WARN_ON_ONCE(!field
))
378 if (is_string_field(field
)) {
379 flags
|= HIST_FIELD_FL_STRING
;
381 if (field
->filter_type
== FILTER_STATIC_STRING
)
382 hist_field
->fn
= hist_field_string
;
383 else if (field
->filter_type
== FILTER_DYN_STRING
)
384 hist_field
->fn
= hist_field_dynstring
;
386 hist_field
->fn
= hist_field_pstring
;
388 hist_field
->fn
= select_value_fn(field
->size
,
390 if (!hist_field
->fn
) {
391 destroy_hist_field(hist_field
);
396 hist_field
->field
= field
;
397 hist_field
->flags
= flags
;
402 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
406 for (i
= 0; i
< TRACING_MAP_FIELDS_MAX
; i
++) {
407 if (hist_data
->fields
[i
]) {
408 destroy_hist_field(hist_data
->fields
[i
]);
409 hist_data
->fields
[i
] = NULL
;
414 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
416 hist_data
->fields
[HITCOUNT_IDX
] =
417 create_hist_field(NULL
, HIST_FIELD_FL_HITCOUNT
);
418 if (!hist_data
->fields
[HITCOUNT_IDX
])
423 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
429 static int create_val_field(struct hist_trigger_data
*hist_data
,
430 unsigned int val_idx
,
431 struct trace_event_file
*file
,
434 struct ftrace_event_field
*field
= NULL
;
435 unsigned long flags
= 0;
439 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
442 field_name
= strsep(&field_str
, ".");
444 if (strcmp(field_str
, "hex") == 0)
445 flags
|= HIST_FIELD_FL_HEX
;
452 field
= trace_find_event_field(file
->event_call
, field_name
);
458 hist_data
->fields
[val_idx
] = create_hist_field(field
, flags
);
459 if (!hist_data
->fields
[val_idx
]) {
466 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
472 static int create_val_fields(struct hist_trigger_data
*hist_data
,
473 struct trace_event_file
*file
)
475 char *fields_str
, *field_str
;
479 ret
= create_hitcount_val(hist_data
);
483 fields_str
= hist_data
->attrs
->vals_str
;
487 strsep(&fields_str
, "=");
491 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
492 j
< TRACING_MAP_VALS_MAX
; i
++) {
493 field_str
= strsep(&fields_str
, ",");
496 if (strcmp(field_str
, "hitcount") == 0)
498 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
502 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
508 static int create_key_field(struct hist_trigger_data
*hist_data
,
509 unsigned int key_idx
,
510 unsigned int key_offset
,
511 struct trace_event_file
*file
,
514 struct ftrace_event_field
*field
= NULL
;
515 unsigned long flags
= 0;
516 unsigned int key_size
;
519 if (WARN_ON(key_idx
>= TRACING_MAP_FIELDS_MAX
))
522 flags
|= HIST_FIELD_FL_KEY
;
524 if (strcmp(field_str
, "stacktrace") == 0) {
525 flags
|= HIST_FIELD_FL_STACKTRACE
;
526 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
528 char *field_name
= strsep(&field_str
, ".");
531 if (strcmp(field_str
, "hex") == 0)
532 flags
|= HIST_FIELD_FL_HEX
;
533 else if (strcmp(field_str
, "sym") == 0)
534 flags
|= HIST_FIELD_FL_SYM
;
535 else if (strcmp(field_str
, "sym-offset") == 0)
536 flags
|= HIST_FIELD_FL_SYM_OFFSET
;
537 else if ((strcmp(field_str
, "execname") == 0) &&
538 (strcmp(field_name
, "common_pid") == 0))
539 flags
|= HIST_FIELD_FL_EXECNAME
;
540 else if (strcmp(field_str
, "syscall") == 0)
541 flags
|= HIST_FIELD_FL_SYSCALL
;
542 else if (strcmp(field_str
, "log2") == 0)
543 flags
|= HIST_FIELD_FL_LOG2
;
550 field
= trace_find_event_field(file
->event_call
, field_name
);
556 if (is_string_field(field
))
557 key_size
= MAX_FILTER_STR_VAL
;
559 key_size
= field
->size
;
562 hist_data
->fields
[key_idx
] = create_hist_field(field
, flags
);
563 if (!hist_data
->fields
[key_idx
]) {
568 key_size
= ALIGN(key_size
, sizeof(u64
));
569 hist_data
->fields
[key_idx
]->size
= key_size
;
570 hist_data
->fields
[key_idx
]->offset
= key_offset
;
571 hist_data
->key_size
+= key_size
;
572 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
579 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
587 static int create_key_fields(struct hist_trigger_data
*hist_data
,
588 struct trace_event_file
*file
)
590 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
591 char *fields_str
, *field_str
;
594 fields_str
= hist_data
->attrs
->keys_str
;
598 strsep(&fields_str
, "=");
602 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
603 field_str
= strsep(&fields_str
, ",");
606 ret
= create_key_field(hist_data
, i
, key_offset
,
621 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
622 struct trace_event_file
*file
)
626 ret
= create_val_fields(hist_data
, file
);
630 ret
= create_key_fields(hist_data
, file
);
634 hist_data
->n_fields
= hist_data
->n_vals
+ hist_data
->n_keys
;
639 static int is_descending(const char *str
)
644 if (strcmp(str
, "descending") == 0)
647 if (strcmp(str
, "ascending") == 0)
653 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
655 char *fields_str
= hist_data
->attrs
->sort_key_str
;
656 struct ftrace_event_field
*field
= NULL
;
657 struct tracing_map_sort_key
*sort_key
;
658 int descending
, ret
= 0;
661 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
666 strsep(&fields_str
, "=");
672 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
673 char *field_str
, *field_name
;
675 sort_key
= &hist_data
->sort_keys
[i
];
677 field_str
= strsep(&fields_str
, ",");
684 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
689 field_name
= strsep(&field_str
, ".");
695 if (strcmp(field_name
, "hitcount") == 0) {
696 descending
= is_descending(field_str
);
697 if (descending
< 0) {
701 sort_key
->descending
= descending
;
705 for (j
= 1; j
< hist_data
->n_fields
; j
++) {
706 field
= hist_data
->fields
[j
]->field
;
707 if (field
&& (strcmp(field_name
, field
->name
) == 0)) {
708 sort_key
->field_idx
= j
;
709 descending
= is_descending(field_str
);
710 if (descending
< 0) {
714 sort_key
->descending
= descending
;
718 if (j
== hist_data
->n_fields
) {
723 hist_data
->n_sort_keys
= i
;
728 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
730 destroy_hist_trigger_attrs(hist_data
->attrs
);
731 destroy_hist_fields(hist_data
);
732 tracing_map_destroy(hist_data
->map
);
736 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
738 struct tracing_map
*map
= hist_data
->map
;
739 struct ftrace_event_field
*field
;
740 struct hist_field
*hist_field
;
743 for_each_hist_field(i
, hist_data
) {
744 hist_field
= hist_data
->fields
[i
];
745 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
746 tracing_map_cmp_fn_t cmp_fn
;
748 field
= hist_field
->field
;
750 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
751 cmp_fn
= tracing_map_cmp_none
;
752 else if (is_string_field(field
))
753 cmp_fn
= tracing_map_cmp_string
;
755 cmp_fn
= tracing_map_cmp_num(field
->size
,
757 idx
= tracing_map_add_key_field(map
,
762 idx
= tracing_map_add_sum_field(map
);
771 static bool need_tracing_map_ops(struct hist_trigger_data
*hist_data
)
773 struct hist_field
*key_field
;
776 for_each_hist_key_field(i
, hist_data
) {
777 key_field
= hist_data
->fields
[i
];
779 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
)
786 static struct hist_trigger_data
*
787 create_hist_data(unsigned int map_bits
,
788 struct hist_trigger_attrs
*attrs
,
789 struct trace_event_file
*file
)
791 const struct tracing_map_ops
*map_ops
= NULL
;
792 struct hist_trigger_data
*hist_data
;
795 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
797 return ERR_PTR(-ENOMEM
);
799 hist_data
->attrs
= attrs
;
801 ret
= create_hist_fields(hist_data
, file
);
805 ret
= create_sort_keys(hist_data
);
809 if (need_tracing_map_ops(hist_data
))
810 map_ops
= &hist_trigger_elt_comm_ops
;
812 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
814 if (IS_ERR(hist_data
->map
)) {
815 ret
= PTR_ERR(hist_data
->map
);
816 hist_data
->map
= NULL
;
820 ret
= create_tracing_map_fields(hist_data
);
824 ret
= tracing_map_init(hist_data
->map
);
828 hist_data
->event_file
= file
;
832 hist_data
->attrs
= NULL
;
834 destroy_hist_data(hist_data
);
836 hist_data
= ERR_PTR(ret
);
841 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
842 struct tracing_map_elt
*elt
,
845 struct hist_field
*hist_field
;
849 for_each_hist_val_field(i
, hist_data
) {
850 hist_field
= hist_data
->fields
[i
];
851 hist_val
= hist_field
->fn(hist_field
, rec
);
852 tracing_map_update_sum(elt
, i
, hist_val
);
856 static inline void add_to_key(char *compound_key
, void *key
,
857 struct hist_field
*key_field
, void *rec
)
859 size_t size
= key_field
->size
;
861 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
862 struct ftrace_event_field
*field
;
864 field
= key_field
->field
;
865 if (field
->filter_type
== FILTER_DYN_STRING
)
866 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
867 else if (field
->filter_type
== FILTER_PTR_STRING
)
869 else if (field
->filter_type
== FILTER_STATIC_STRING
)
872 /* ensure NULL-termination */
873 if (size
> key_field
->size
- 1)
874 size
= key_field
->size
- 1;
877 memcpy(compound_key
+ key_field
->offset
, key
, size
);
880 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
)
882 struct hist_trigger_data
*hist_data
= data
->private_data
;
883 bool use_compound_key
= (hist_data
->n_keys
> 1);
884 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
885 char compound_key
[HIST_KEY_SIZE_MAX
];
886 struct stack_trace stacktrace
;
887 struct hist_field
*key_field
;
888 struct tracing_map_elt
*elt
;
893 memset(compound_key
, 0, hist_data
->key_size
);
895 for_each_hist_key_field(i
, hist_data
) {
896 key_field
= hist_data
->fields
[i
];
898 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
899 stacktrace
.max_entries
= HIST_STACKTRACE_DEPTH
;
900 stacktrace
.entries
= entries
;
901 stacktrace
.nr_entries
= 0;
902 stacktrace
.skip
= HIST_STACKTRACE_SKIP
;
904 memset(stacktrace
.entries
, 0, HIST_STACKTRACE_SIZE
);
905 save_stack_trace(&stacktrace
);
909 field_contents
= key_field
->fn(key_field
, rec
);
910 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
911 key
= (void *)(unsigned long)field_contents
;
912 use_compound_key
= true;
914 key
= (void *)&field_contents
;
917 if (use_compound_key
)
918 add_to_key(compound_key
, key
, key_field
, rec
);
921 if (use_compound_key
)
924 elt
= tracing_map_insert(hist_data
->map
, key
);
926 hist_trigger_elt_update(hist_data
, elt
, rec
);
929 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
930 unsigned long *stacktrace_entries
,
931 unsigned int max_entries
)
933 char str
[KSYM_SYMBOL_LEN
];
934 unsigned int spaces
= 8;
937 for (i
= 0; i
< max_entries
; i
++) {
938 if (stacktrace_entries
[i
] == ULONG_MAX
)
941 seq_printf(m
, "%*c", 1 + spaces
, ' ');
942 sprint_symbol(str
, stacktrace_entries
[i
]);
943 seq_printf(m
, "%s\n", str
);
948 hist_trigger_entry_print(struct seq_file
*m
,
949 struct hist_trigger_data
*hist_data
, void *key
,
950 struct tracing_map_elt
*elt
)
952 struct hist_field
*key_field
;
953 char str
[KSYM_SYMBOL_LEN
];
954 bool multiline
= false;
960 for_each_hist_key_field(i
, hist_data
) {
961 key_field
= hist_data
->fields
[i
];
963 if (i
> hist_data
->n_vals
)
966 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
967 uval
= *(u64
*)(key
+ key_field
->offset
);
968 seq_printf(m
, "%s: %llx",
969 key_field
->field
->name
, uval
);
970 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
971 uval
= *(u64
*)(key
+ key_field
->offset
);
972 sprint_symbol_no_offset(str
, uval
);
973 seq_printf(m
, "%s: [%llx] %-45s",
974 key_field
->field
->name
, uval
, str
);
975 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
976 uval
= *(u64
*)(key
+ key_field
->offset
);
977 sprint_symbol(str
, uval
);
978 seq_printf(m
, "%s: [%llx] %-55s",
979 key_field
->field
->name
, uval
, str
);
980 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
981 char *comm
= elt
->private_data
;
983 uval
= *(u64
*)(key
+ key_field
->offset
);
984 seq_printf(m
, "%s: %-16s[%10llu]",
985 key_field
->field
->name
, comm
, uval
);
986 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
987 const char *syscall_name
;
989 uval
= *(u64
*)(key
+ key_field
->offset
);
990 syscall_name
= get_syscall_name(uval
);
992 syscall_name
= "unknown_syscall";
994 seq_printf(m
, "%s: %-30s[%3llu]",
995 key_field
->field
->name
, syscall_name
, uval
);
996 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
997 seq_puts(m
, "stacktrace:\n");
998 hist_trigger_stacktrace_print(m
,
999 key
+ key_field
->offset
,
1000 HIST_STACKTRACE_DEPTH
);
1002 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
1003 seq_printf(m
, "%s: ~ 2^%-2llu", key_field
->field
->name
,
1004 *(u64
*)(key
+ key_field
->offset
));
1005 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
1006 seq_printf(m
, "%s: %-50s", key_field
->field
->name
,
1007 (char *)(key
+ key_field
->offset
));
1009 uval
= *(u64
*)(key
+ key_field
->offset
);
1010 seq_printf(m
, "%s: %10llu", key_field
->field
->name
,
1020 seq_printf(m
, " hitcount: %10llu",
1021 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
1023 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
1024 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
1025 seq_printf(m
, " %s: %10llx",
1026 hist_data
->fields
[i
]->field
->name
,
1027 tracing_map_read_sum(elt
, i
));
1029 seq_printf(m
, " %s: %10llu",
1030 hist_data
->fields
[i
]->field
->name
,
1031 tracing_map_read_sum(elt
, i
));
1038 static int print_entries(struct seq_file
*m
,
1039 struct hist_trigger_data
*hist_data
)
1041 struct tracing_map_sort_entry
**sort_entries
= NULL
;
1042 struct tracing_map
*map
= hist_data
->map
;
1045 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
1046 hist_data
->n_sort_keys
,
1051 for (i
= 0; i
< n_entries
; i
++)
1052 hist_trigger_entry_print(m
, hist_data
,
1053 sort_entries
[i
]->key
,
1054 sort_entries
[i
]->elt
);
1056 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
1061 static void hist_trigger_show(struct seq_file
*m
,
1062 struct event_trigger_data
*data
, int n
)
1064 struct hist_trigger_data
*hist_data
;
1065 int n_entries
, ret
= 0;
1068 seq_puts(m
, "\n\n");
1070 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
1071 data
->ops
->print(m
, data
->ops
, data
);
1072 seq_puts(m
, "#\n\n");
1074 hist_data
= data
->private_data
;
1075 n_entries
= print_entries(m
, hist_data
);
1076 if (n_entries
< 0) {
1081 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1082 (u64
)atomic64_read(&hist_data
->map
->hits
),
1083 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
1086 static int hist_show(struct seq_file
*m
, void *v
)
1088 struct event_trigger_data
*data
;
1089 struct trace_event_file
*event_file
;
1092 mutex_lock(&event_mutex
);
1094 event_file
= event_file_data(m
->private);
1095 if (unlikely(!event_file
)) {
1100 list_for_each_entry_rcu(data
, &event_file
->triggers
, list
) {
1101 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
1102 hist_trigger_show(m
, data
, n
++);
1106 mutex_unlock(&event_mutex
);
1111 static int event_hist_open(struct inode
*inode
, struct file
*file
)
1113 return single_open(file
, hist_show
, file
);
1116 const struct file_operations event_hist_fops
= {
1117 .open
= event_hist_open
,
1119 .llseek
= seq_lseek
,
1120 .release
= single_release
,
1123 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
1125 const char *flags_str
= NULL
;
1127 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
1129 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
1131 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
1132 flags_str
= "sym-offset";
1133 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
1134 flags_str
= "execname";
1135 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
1136 flags_str
= "syscall";
1137 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
1143 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
1145 seq_printf(m
, "%s", hist_field
->field
->name
);
1146 if (hist_field
->flags
) {
1147 const char *flags_str
= get_hist_field_flags(hist_field
);
1150 seq_printf(m
, ".%s", flags_str
);
1154 static int event_hist_trigger_print(struct seq_file
*m
,
1155 struct event_trigger_ops
*ops
,
1156 struct event_trigger_data
*data
)
1158 struct hist_trigger_data
*hist_data
= data
->private_data
;
1159 struct hist_field
*key_field
;
1162 seq_puts(m
, "hist:");
1165 seq_printf(m
, "%s:", data
->name
);
1167 seq_puts(m
, "keys=");
1169 for_each_hist_key_field(i
, hist_data
) {
1170 key_field
= hist_data
->fields
[i
];
1172 if (i
> hist_data
->n_vals
)
1175 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
1176 seq_puts(m
, "stacktrace");
1178 hist_field_print(m
, key_field
);
1181 seq_puts(m
, ":vals=");
1183 for_each_hist_val_field(i
, hist_data
) {
1184 if (i
== HITCOUNT_IDX
)
1185 seq_puts(m
, "hitcount");
1188 hist_field_print(m
, hist_data
->fields
[i
]);
1192 seq_puts(m
, ":sort=");
1194 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
1195 struct tracing_map_sort_key
*sort_key
;
1197 sort_key
= &hist_data
->sort_keys
[i
];
1202 if (sort_key
->field_idx
== HITCOUNT_IDX
)
1203 seq_puts(m
, "hitcount");
1205 unsigned int idx
= sort_key
->field_idx
;
1207 if (WARN_ON(idx
>= TRACING_MAP_FIELDS_MAX
))
1210 hist_field_print(m
, hist_data
->fields
[idx
]);
1213 if (sort_key
->descending
)
1214 seq_puts(m
, ".descending");
1217 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
1219 if (data
->filter_str
)
1220 seq_printf(m
, " if %s", data
->filter_str
);
1223 seq_puts(m
, " [paused]");
1225 seq_puts(m
, " [active]");
1232 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
1233 struct event_trigger_data
*data
)
1235 struct hist_trigger_data
*hist_data
= data
->private_data
;
1237 if (!data
->ref
&& hist_data
->attrs
->name
)
1238 save_named_trigger(hist_data
->attrs
->name
, data
);
1245 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
1246 struct event_trigger_data
*data
)
1248 struct hist_trigger_data
*hist_data
= data
->private_data
;
1250 if (WARN_ON_ONCE(data
->ref
<= 0))
1256 del_named_trigger(data
);
1257 trigger_data_free(data
);
1258 destroy_hist_data(hist_data
);
1262 static struct event_trigger_ops event_hist_trigger_ops
= {
1263 .func
= event_hist_trigger
,
1264 .print
= event_hist_trigger_print
,
1265 .init
= event_hist_trigger_init
,
1266 .free
= event_hist_trigger_free
,
1269 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
1270 struct event_trigger_data
*data
)
1274 save_named_trigger(data
->named_data
->name
, data
);
1276 event_hist_trigger_init(ops
, data
->named_data
);
1281 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
1282 struct event_trigger_data
*data
)
1284 if (WARN_ON_ONCE(data
->ref
<= 0))
1287 event_hist_trigger_free(ops
, data
->named_data
);
1291 del_named_trigger(data
);
1292 trigger_data_free(data
);
1296 static struct event_trigger_ops event_hist_trigger_named_ops
= {
1297 .func
= event_hist_trigger
,
1298 .print
= event_hist_trigger_print
,
1299 .init
= event_hist_trigger_named_init
,
1300 .free
= event_hist_trigger_named_free
,
1303 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
1306 return &event_hist_trigger_ops
;
1309 static void hist_clear(struct event_trigger_data
*data
)
1311 struct hist_trigger_data
*hist_data
= data
->private_data
;
1314 pause_named_trigger(data
);
1316 synchronize_sched();
1318 tracing_map_clear(hist_data
->map
);
1321 unpause_named_trigger(data
);
1324 static bool compatible_field(struct ftrace_event_field
*field
,
1325 struct ftrace_event_field
*test_field
)
1327 if (field
== test_field
)
1329 if (field
== NULL
|| test_field
== NULL
)
1331 if (strcmp(field
->name
, test_field
->name
) != 0)
1333 if (strcmp(field
->type
, test_field
->type
) != 0)
1335 if (field
->size
!= test_field
->size
)
1337 if (field
->is_signed
!= test_field
->is_signed
)
1343 static bool hist_trigger_match(struct event_trigger_data
*data
,
1344 struct event_trigger_data
*data_test
,
1345 struct event_trigger_data
*named_data
,
1348 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
1349 struct hist_trigger_data
*hist_data
, *hist_data_test
;
1350 struct hist_field
*key_field
, *key_field_test
;
1353 if (named_data
&& (named_data
!= data_test
) &&
1354 (named_data
!= data_test
->named_data
))
1357 if (!named_data
&& is_named_trigger(data_test
))
1360 hist_data
= data
->private_data
;
1361 hist_data_test
= data_test
->private_data
;
1363 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
1364 hist_data
->n_fields
!= hist_data_test
->n_fields
||
1365 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
1368 if (!ignore_filter
) {
1369 if ((data
->filter_str
&& !data_test
->filter_str
) ||
1370 (!data
->filter_str
&& data_test
->filter_str
))
1374 for_each_hist_field(i
, hist_data
) {
1375 key_field
= hist_data
->fields
[i
];
1376 key_field_test
= hist_data_test
->fields
[i
];
1378 if (key_field
->flags
!= key_field_test
->flags
)
1380 if (!compatible_field(key_field
->field
, key_field_test
->field
))
1382 if (key_field
->offset
!= key_field_test
->offset
)
1386 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
1387 sort_key
= &hist_data
->sort_keys
[i
];
1388 sort_key_test
= &hist_data_test
->sort_keys
[i
];
1390 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
1391 sort_key
->descending
!= sort_key_test
->descending
)
1395 if (!ignore_filter
&& data
->filter_str
&&
1396 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
1402 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
1403 struct event_trigger_data
*data
,
1404 struct trace_event_file
*file
)
1406 struct hist_trigger_data
*hist_data
= data
->private_data
;
1407 struct event_trigger_data
*test
, *named_data
= NULL
;
1410 if (hist_data
->attrs
->name
) {
1411 named_data
= find_named_trigger(hist_data
->attrs
->name
);
1413 if (!hist_trigger_match(data
, named_data
, named_data
,
1421 if (hist_data
->attrs
->name
&& !named_data
)
1424 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1425 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1426 if (!hist_trigger_match(data
, test
, named_data
, false))
1428 if (hist_data
->attrs
->pause
)
1429 test
->paused
= true;
1430 else if (hist_data
->attrs
->cont
)
1431 test
->paused
= false;
1432 else if (hist_data
->attrs
->clear
)
1440 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
1445 if (hist_data
->attrs
->pause
)
1446 data
->paused
= true;
1449 destroy_hist_data(data
->private_data
);
1450 data
->private_data
= named_data
->private_data
;
1451 set_named_trigger_data(data
, named_data
);
1452 data
->ops
= &event_hist_trigger_named_ops
;
1455 if (data
->ops
->init
) {
1456 ret
= data
->ops
->init(data
->ops
, data
);
1461 list_add_rcu(&data
->list
, &file
->triggers
);
1464 update_cond_flag(file
);
1466 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
1467 list_del_rcu(&data
->list
);
1468 update_cond_flag(file
);
1475 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
1476 struct event_trigger_data
*data
,
1477 struct trace_event_file
*file
)
1479 struct hist_trigger_data
*hist_data
= data
->private_data
;
1480 struct event_trigger_data
*test
, *named_data
= NULL
;
1481 bool unregistered
= false;
1483 if (hist_data
->attrs
->name
)
1484 named_data
= find_named_trigger(hist_data
->attrs
->name
);
1486 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1487 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1488 if (!hist_trigger_match(data
, test
, named_data
, false))
1490 unregistered
= true;
1491 list_del_rcu(&test
->list
);
1492 trace_event_trigger_enable_disable(file
, 0);
1493 update_cond_flag(file
);
1498 if (unregistered
&& test
->ops
->free
)
1499 test
->ops
->free(test
->ops
, test
);
1502 static void hist_unreg_all(struct trace_event_file
*file
)
1504 struct event_trigger_data
*test
, *n
;
1506 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
1507 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1508 list_del_rcu(&test
->list
);
1509 trace_event_trigger_enable_disable(file
, 0);
1510 update_cond_flag(file
);
1511 if (test
->ops
->free
)
1512 test
->ops
->free(test
->ops
, test
);
1517 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
1518 struct trace_event_file
*file
,
1519 char *glob
, char *cmd
, char *param
)
1521 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
1522 struct event_trigger_data
*trigger_data
;
1523 struct hist_trigger_attrs
*attrs
;
1524 struct event_trigger_ops
*trigger_ops
;
1525 struct hist_trigger_data
*hist_data
;
1532 /* separate the trigger from the filter (k:v [if filter]) */
1533 trigger
= strsep(¶m
, " \t");
1537 attrs
= parse_hist_trigger_attrs(trigger
);
1539 return PTR_ERR(attrs
);
1541 if (attrs
->map_bits
)
1542 hist_trigger_bits
= attrs
->map_bits
;
1544 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
);
1545 if (IS_ERR(hist_data
)) {
1546 destroy_hist_trigger_attrs(attrs
);
1547 return PTR_ERR(hist_data
);
1550 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
1553 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
1557 trigger_data
->count
= -1;
1558 trigger_data
->ops
= trigger_ops
;
1559 trigger_data
->cmd_ops
= cmd_ops
;
1561 INIT_LIST_HEAD(&trigger_data
->list
);
1562 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
1564 trigger_data
->private_data
= hist_data
;
1566 /* if param is non-empty, it's supposed to be a filter */
1567 if (param
&& cmd_ops
->set_filter
) {
1568 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
1573 if (glob
[0] == '!') {
1574 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
1579 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
1581 * The above returns on success the # of triggers registered,
1582 * but if it didn't register any it returns zero. Consider no
1583 * triggers registered a failure too.
1586 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
1591 /* Just return zero, not the number of registered triggers */
1596 if (cmd_ops
->set_filter
)
1597 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
1599 kfree(trigger_data
);
1601 destroy_hist_data(hist_data
);
1605 static struct event_command trigger_hist_cmd
= {
1607 .trigger_type
= ETT_EVENT_HIST
,
1608 .flags
= EVENT_CMD_FL_NEEDS_REC
,
1609 .func
= event_hist_trigger_func
,
1610 .reg
= hist_register_trigger
,
1611 .unreg
= hist_unregister_trigger
,
1612 .unreg_all
= hist_unreg_all
,
1613 .get_trigger_ops
= event_hist_get_trigger_ops
,
1614 .set_filter
= set_trigger_filter
,
1617 __init
int register_trigger_hist_cmd(void)
1621 ret
= register_event_command(&trigger_hist_cmd
);
1628 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
)
1630 struct enable_trigger_data
*enable_data
= data
->private_data
;
1631 struct event_trigger_data
*test
;
1633 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
) {
1634 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1635 if (enable_data
->enable
)
1636 test
->paused
= false;
1638 test
->paused
= true;
1644 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
)
1649 if (data
->count
!= -1)
1652 hist_enable_trigger(data
, rec
);
1655 static struct event_trigger_ops hist_enable_trigger_ops
= {
1656 .func
= hist_enable_trigger
,
1657 .print
= event_enable_trigger_print
,
1658 .init
= event_trigger_init
,
1659 .free
= event_enable_trigger_free
,
1662 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
1663 .func
= hist_enable_count_trigger
,
1664 .print
= event_enable_trigger_print
,
1665 .init
= event_trigger_init
,
1666 .free
= event_enable_trigger_free
,
1669 static struct event_trigger_ops hist_disable_trigger_ops
= {
1670 .func
= hist_enable_trigger
,
1671 .print
= event_enable_trigger_print
,
1672 .init
= event_trigger_init
,
1673 .free
= event_enable_trigger_free
,
1676 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
1677 .func
= hist_enable_count_trigger
,
1678 .print
= event_enable_trigger_print
,
1679 .init
= event_trigger_init
,
1680 .free
= event_enable_trigger_free
,
1683 static struct event_trigger_ops
*
1684 hist_enable_get_trigger_ops(char *cmd
, char *param
)
1686 struct event_trigger_ops
*ops
;
1689 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
1692 ops
= param
? &hist_enable_count_trigger_ops
:
1693 &hist_enable_trigger_ops
;
1695 ops
= param
? &hist_disable_count_trigger_ops
:
1696 &hist_disable_trigger_ops
;
1701 static void hist_enable_unreg_all(struct trace_event_file
*file
)
1703 struct event_trigger_data
*test
, *n
;
1705 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
1706 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
1707 list_del_rcu(&test
->list
);
1708 update_cond_flag(file
);
1709 trace_event_trigger_enable_disable(file
, 0);
1710 if (test
->ops
->free
)
1711 test
->ops
->free(test
->ops
, test
);
1716 static struct event_command trigger_hist_enable_cmd
= {
1717 .name
= ENABLE_HIST_STR
,
1718 .trigger_type
= ETT_HIST_ENABLE
,
1719 .func
= event_enable_trigger_func
,
1720 .reg
= event_enable_register_trigger
,
1721 .unreg
= event_enable_unregister_trigger
,
1722 .unreg_all
= hist_enable_unreg_all
,
1723 .get_trigger_ops
= hist_enable_get_trigger_ops
,
1724 .set_filter
= set_trigger_filter
,
1727 static struct event_command trigger_hist_disable_cmd
= {
1728 .name
= DISABLE_HIST_STR
,
1729 .trigger_type
= ETT_HIST_ENABLE
,
1730 .func
= event_enable_trigger_func
,
1731 .reg
= event_enable_register_trigger
,
1732 .unreg
= event_enable_unregister_trigger
,
1733 .unreg_all
= hist_enable_unreg_all
,
1734 .get_trigger_ops
= hist_enable_get_trigger_ops
,
1735 .set_filter
= set_trigger_filter
,
1738 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
1740 unregister_event_command(&trigger_hist_enable_cmd
);
1741 unregister_event_command(&trigger_hist_disable_cmd
);
1744 __init
int register_trigger_hist_enable_disable_cmds(void)
1748 ret
= register_event_command(&trigger_hist_enable_cmd
);
1749 if (WARN_ON(ret
< 0))
1751 ret
= register_event_command(&trigger_hist_disable_cmd
);
1752 if (WARN_ON(ret
< 0))
1753 unregister_trigger_hist_enable_disable_cmds();