powerpc/powernv: Report size of OPAL memcons log
[linux/fpc-iii.git] / kernel / trace / trace_events_hist.c
blobf3a960ed75a197ffd0e519501b74bade3f227ce3
1 /*
2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
23 #include "tracing_map.h"
24 #include "trace.h"
26 struct hist_field;
28 typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
30 struct hist_field {
31 struct ftrace_event_field *field;
32 unsigned long flags;
33 hist_field_fn_t fn;
34 unsigned int size;
35 unsigned int offset;
38 static u64 hist_field_none(struct hist_field *field, void *event)
40 return 0;
43 static u64 hist_field_counter(struct hist_field *field, void *event)
45 return 1;
48 static u64 hist_field_string(struct hist_field *hist_field, void *event)
50 char *addr = (char *)(event + hist_field->field->offset);
52 return (u64)(unsigned long)addr;
55 static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
57 u32 str_item = *(u32 *)(event + hist_field->field->offset);
58 int str_loc = str_item & 0xffff;
59 char *addr = (char *)(event + str_loc);
61 return (u64)(unsigned long)addr;
64 static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
66 char **addr = (char **)(event + hist_field->field->offset);
68 return (u64)(unsigned long)*addr;
71 static u64 hist_field_log2(struct hist_field *hist_field, void *event)
73 u64 val = *(u64 *)(event + hist_field->field->offset);
75 return (u64) ilog2(roundup_pow_of_two(val));
78 #define DEFINE_HIST_FIELD_FN(type) \
79 static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
80 { \
81 type *addr = (type *)(event + hist_field->field->offset); \
83 return (u64)(unsigned long)*addr; \
86 DEFINE_HIST_FIELD_FN(s64);
87 DEFINE_HIST_FIELD_FN(u64);
88 DEFINE_HIST_FIELD_FN(s32);
89 DEFINE_HIST_FIELD_FN(u32);
90 DEFINE_HIST_FIELD_FN(s16);
91 DEFINE_HIST_FIELD_FN(u16);
92 DEFINE_HIST_FIELD_FN(s8);
93 DEFINE_HIST_FIELD_FN(u8);
95 #define for_each_hist_field(i, hist_data) \
96 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
98 #define for_each_hist_val_field(i, hist_data) \
99 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
101 #define for_each_hist_key_field(i, hist_data) \
102 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
104 #define HIST_STACKTRACE_DEPTH 16
105 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
106 #define HIST_STACKTRACE_SKIP 5
108 #define HITCOUNT_IDX 0
109 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
111 enum hist_field_flags {
112 HIST_FIELD_FL_HITCOUNT = 1,
113 HIST_FIELD_FL_KEY = 2,
114 HIST_FIELD_FL_STRING = 4,
115 HIST_FIELD_FL_HEX = 8,
116 HIST_FIELD_FL_SYM = 16,
117 HIST_FIELD_FL_SYM_OFFSET = 32,
118 HIST_FIELD_FL_EXECNAME = 64,
119 HIST_FIELD_FL_SYSCALL = 128,
120 HIST_FIELD_FL_STACKTRACE = 256,
121 HIST_FIELD_FL_LOG2 = 512,
124 struct hist_trigger_attrs {
125 char *keys_str;
126 char *vals_str;
127 char *sort_key_str;
128 char *name;
129 bool pause;
130 bool cont;
131 bool clear;
132 unsigned int map_bits;
135 struct hist_trigger_data {
136 struct hist_field *fields[TRACING_MAP_FIELDS_MAX];
137 unsigned int n_vals;
138 unsigned int n_keys;
139 unsigned int n_fields;
140 unsigned int key_size;
141 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
142 unsigned int n_sort_keys;
143 struct trace_event_file *event_file;
144 struct hist_trigger_attrs *attrs;
145 struct tracing_map *map;
148 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
150 hist_field_fn_t fn = NULL;
152 switch (field_size) {
153 case 8:
154 if (field_is_signed)
155 fn = hist_field_s64;
156 else
157 fn = hist_field_u64;
158 break;
159 case 4:
160 if (field_is_signed)
161 fn = hist_field_s32;
162 else
163 fn = hist_field_u32;
164 break;
165 case 2:
166 if (field_is_signed)
167 fn = hist_field_s16;
168 else
169 fn = hist_field_u16;
170 break;
171 case 1:
172 if (field_is_signed)
173 fn = hist_field_s8;
174 else
175 fn = hist_field_u8;
176 break;
179 return fn;
182 static int parse_map_size(char *str)
184 unsigned long size, map_bits;
185 int ret;
187 strsep(&str, "=");
188 if (!str) {
189 ret = -EINVAL;
190 goto out;
193 ret = kstrtoul(str, 0, &size);
194 if (ret)
195 goto out;
197 map_bits = ilog2(roundup_pow_of_two(size));
198 if (map_bits < TRACING_MAP_BITS_MIN ||
199 map_bits > TRACING_MAP_BITS_MAX)
200 ret = -EINVAL;
201 else
202 ret = map_bits;
203 out:
204 return ret;
207 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
209 if (!attrs)
210 return;
212 kfree(attrs->name);
213 kfree(attrs->sort_key_str);
214 kfree(attrs->keys_str);
215 kfree(attrs->vals_str);
216 kfree(attrs);
219 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
221 struct hist_trigger_attrs *attrs;
222 int ret = 0;
224 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
225 if (!attrs)
226 return ERR_PTR(-ENOMEM);
228 while (trigger_str) {
229 char *str = strsep(&trigger_str, ":");
231 if ((strncmp(str, "key=", strlen("key=")) == 0) ||
232 (strncmp(str, "keys=", strlen("keys=")) == 0))
233 attrs->keys_str = kstrdup(str, GFP_KERNEL);
234 else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
235 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
236 (strncmp(str, "values=", strlen("values=")) == 0))
237 attrs->vals_str = kstrdup(str, GFP_KERNEL);
238 else if (strncmp(str, "sort=", strlen("sort=")) == 0)
239 attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
240 else if (strncmp(str, "name=", strlen("name=")) == 0)
241 attrs->name = kstrdup(str, GFP_KERNEL);
242 else if (strcmp(str, "pause") == 0)
243 attrs->pause = true;
244 else if ((strcmp(str, "cont") == 0) ||
245 (strcmp(str, "continue") == 0))
246 attrs->cont = true;
247 else if (strcmp(str, "clear") == 0)
248 attrs->clear = true;
249 else if (strncmp(str, "size=", strlen("size=")) == 0) {
250 int map_bits = parse_map_size(str);
252 if (map_bits < 0) {
253 ret = map_bits;
254 goto free;
256 attrs->map_bits = map_bits;
257 } else {
258 ret = -EINVAL;
259 goto free;
263 if (!attrs->keys_str) {
264 ret = -EINVAL;
265 goto free;
268 return attrs;
269 free:
270 destroy_hist_trigger_attrs(attrs);
272 return ERR_PTR(ret);
275 static inline void save_comm(char *comm, struct task_struct *task)
277 if (!task->pid) {
278 strcpy(comm, "<idle>");
279 return;
282 if (WARN_ON_ONCE(task->pid < 0)) {
283 strcpy(comm, "<XXX>");
284 return;
287 memcpy(comm, task->comm, TASK_COMM_LEN);
290 static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
292 kfree((char *)elt->private_data);
295 static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
297 struct hist_trigger_data *hist_data = elt->map->private_data;
298 struct hist_field *key_field;
299 unsigned int i;
301 for_each_hist_key_field(i, hist_data) {
302 key_field = hist_data->fields[i];
304 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
305 unsigned int size = TASK_COMM_LEN + 1;
307 elt->private_data = kzalloc(size, GFP_KERNEL);
308 if (!elt->private_data)
309 return -ENOMEM;
310 break;
314 return 0;
317 static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
318 struct tracing_map_elt *from)
320 char *comm_from = from->private_data;
321 char *comm_to = to->private_data;
323 if (comm_from)
324 memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
327 static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
329 char *comm = elt->private_data;
331 if (comm)
332 save_comm(comm, current);
335 static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
336 .elt_alloc = hist_trigger_elt_comm_alloc,
337 .elt_copy = hist_trigger_elt_comm_copy,
338 .elt_free = hist_trigger_elt_comm_free,
339 .elt_init = hist_trigger_elt_comm_init,
342 static void destroy_hist_field(struct hist_field *hist_field)
344 kfree(hist_field);
347 static struct hist_field *create_hist_field(struct ftrace_event_field *field,
348 unsigned long flags)
350 struct hist_field *hist_field;
352 if (field && is_function_field(field))
353 return NULL;
355 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
356 if (!hist_field)
357 return NULL;
359 if (flags & HIST_FIELD_FL_HITCOUNT) {
360 hist_field->fn = hist_field_counter;
361 goto out;
364 if (flags & HIST_FIELD_FL_STACKTRACE) {
365 hist_field->fn = hist_field_none;
366 goto out;
369 if (flags & HIST_FIELD_FL_LOG2) {
370 hist_field->fn = hist_field_log2;
371 goto out;
374 if (WARN_ON_ONCE(!field))
375 goto out;
377 if (is_string_field(field)) {
378 flags |= HIST_FIELD_FL_STRING;
380 if (field->filter_type == FILTER_STATIC_STRING)
381 hist_field->fn = hist_field_string;
382 else if (field->filter_type == FILTER_DYN_STRING)
383 hist_field->fn = hist_field_dynstring;
384 else
385 hist_field->fn = hist_field_pstring;
386 } else {
387 hist_field->fn = select_value_fn(field->size,
388 field->is_signed);
389 if (!hist_field->fn) {
390 destroy_hist_field(hist_field);
391 return NULL;
394 out:
395 hist_field->field = field;
396 hist_field->flags = flags;
398 return hist_field;
401 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
403 unsigned int i;
405 for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
406 if (hist_data->fields[i]) {
407 destroy_hist_field(hist_data->fields[i]);
408 hist_data->fields[i] = NULL;
413 static int create_hitcount_val(struct hist_trigger_data *hist_data)
415 hist_data->fields[HITCOUNT_IDX] =
416 create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
417 if (!hist_data->fields[HITCOUNT_IDX])
418 return -ENOMEM;
420 hist_data->n_vals++;
422 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
423 return -EINVAL;
425 return 0;
428 static int create_val_field(struct hist_trigger_data *hist_data,
429 unsigned int val_idx,
430 struct trace_event_file *file,
431 char *field_str)
433 struct ftrace_event_field *field = NULL;
434 unsigned long flags = 0;
435 char *field_name;
436 int ret = 0;
438 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
439 return -EINVAL;
441 field_name = strsep(&field_str, ".");
442 if (field_str) {
443 if (strcmp(field_str, "hex") == 0)
444 flags |= HIST_FIELD_FL_HEX;
445 else {
446 ret = -EINVAL;
447 goto out;
451 field = trace_find_event_field(file->event_call, field_name);
452 if (!field) {
453 ret = -EINVAL;
454 goto out;
457 hist_data->fields[val_idx] = create_hist_field(field, flags);
458 if (!hist_data->fields[val_idx]) {
459 ret = -ENOMEM;
460 goto out;
463 ++hist_data->n_vals;
465 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
466 ret = -EINVAL;
467 out:
468 return ret;
471 static int create_val_fields(struct hist_trigger_data *hist_data,
472 struct trace_event_file *file)
474 char *fields_str, *field_str;
475 unsigned int i, j;
476 int ret;
478 ret = create_hitcount_val(hist_data);
479 if (ret)
480 goto out;
482 fields_str = hist_data->attrs->vals_str;
483 if (!fields_str)
484 goto out;
486 strsep(&fields_str, "=");
487 if (!fields_str)
488 goto out;
490 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
491 j < TRACING_MAP_VALS_MAX; i++) {
492 field_str = strsep(&fields_str, ",");
493 if (!field_str)
494 break;
495 if (strcmp(field_str, "hitcount") == 0)
496 continue;
497 ret = create_val_field(hist_data, j++, file, field_str);
498 if (ret)
499 goto out;
501 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
502 ret = -EINVAL;
503 out:
504 return ret;
507 static int create_key_field(struct hist_trigger_data *hist_data,
508 unsigned int key_idx,
509 unsigned int key_offset,
510 struct trace_event_file *file,
511 char *field_str)
513 struct ftrace_event_field *field = NULL;
514 unsigned long flags = 0;
515 unsigned int key_size;
516 int ret = 0;
518 if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
519 return -EINVAL;
521 flags |= HIST_FIELD_FL_KEY;
523 if (strcmp(field_str, "stacktrace") == 0) {
524 flags |= HIST_FIELD_FL_STACKTRACE;
525 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
526 } else {
527 char *field_name = strsep(&field_str, ".");
529 if (field_str) {
530 if (strcmp(field_str, "hex") == 0)
531 flags |= HIST_FIELD_FL_HEX;
532 else if (strcmp(field_str, "sym") == 0)
533 flags |= HIST_FIELD_FL_SYM;
534 else if (strcmp(field_str, "sym-offset") == 0)
535 flags |= HIST_FIELD_FL_SYM_OFFSET;
536 else if ((strcmp(field_str, "execname") == 0) &&
537 (strcmp(field_name, "common_pid") == 0))
538 flags |= HIST_FIELD_FL_EXECNAME;
539 else if (strcmp(field_str, "syscall") == 0)
540 flags |= HIST_FIELD_FL_SYSCALL;
541 else if (strcmp(field_str, "log2") == 0)
542 flags |= HIST_FIELD_FL_LOG2;
543 else {
544 ret = -EINVAL;
545 goto out;
549 field = trace_find_event_field(file->event_call, field_name);
550 if (!field) {
551 ret = -EINVAL;
552 goto out;
555 if (is_string_field(field))
556 key_size = MAX_FILTER_STR_VAL;
557 else
558 key_size = field->size;
561 hist_data->fields[key_idx] = create_hist_field(field, flags);
562 if (!hist_data->fields[key_idx]) {
563 ret = -ENOMEM;
564 goto out;
567 key_size = ALIGN(key_size, sizeof(u64));
568 hist_data->fields[key_idx]->size = key_size;
569 hist_data->fields[key_idx]->offset = key_offset;
570 hist_data->key_size += key_size;
571 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
572 ret = -EINVAL;
573 goto out;
576 hist_data->n_keys++;
578 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
579 return -EINVAL;
581 ret = key_size;
582 out:
583 return ret;
586 static int create_key_fields(struct hist_trigger_data *hist_data,
587 struct trace_event_file *file)
589 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
590 char *fields_str, *field_str;
591 int ret = -EINVAL;
593 fields_str = hist_data->attrs->keys_str;
594 if (!fields_str)
595 goto out;
597 strsep(&fields_str, "=");
598 if (!fields_str)
599 goto out;
601 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
602 field_str = strsep(&fields_str, ",");
603 if (!field_str)
604 break;
605 ret = create_key_field(hist_data, i, key_offset,
606 file, field_str);
607 if (ret < 0)
608 goto out;
609 key_offset += ret;
611 if (fields_str) {
612 ret = -EINVAL;
613 goto out;
615 ret = 0;
616 out:
617 return ret;
620 static int create_hist_fields(struct hist_trigger_data *hist_data,
621 struct trace_event_file *file)
623 int ret;
625 ret = create_val_fields(hist_data, file);
626 if (ret)
627 goto out;
629 ret = create_key_fields(hist_data, file);
630 if (ret)
631 goto out;
633 hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
634 out:
635 return ret;
638 static int is_descending(const char *str)
640 if (!str)
641 return 0;
643 if (strcmp(str, "descending") == 0)
644 return 1;
646 if (strcmp(str, "ascending") == 0)
647 return 0;
649 return -EINVAL;
652 static int create_sort_keys(struct hist_trigger_data *hist_data)
654 char *fields_str = hist_data->attrs->sort_key_str;
655 struct ftrace_event_field *field = NULL;
656 struct tracing_map_sort_key *sort_key;
657 int descending, ret = 0;
658 unsigned int i, j;
660 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
662 if (!fields_str)
663 goto out;
665 strsep(&fields_str, "=");
666 if (!fields_str) {
667 ret = -EINVAL;
668 goto out;
671 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
672 char *field_str, *field_name;
674 sort_key = &hist_data->sort_keys[i];
676 field_str = strsep(&fields_str, ",");
677 if (!field_str) {
678 if (i == 0)
679 ret = -EINVAL;
680 break;
683 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
684 ret = -EINVAL;
685 break;
688 field_name = strsep(&field_str, ".");
689 if (!field_name) {
690 ret = -EINVAL;
691 break;
694 if (strcmp(field_name, "hitcount") == 0) {
695 descending = is_descending(field_str);
696 if (descending < 0) {
697 ret = descending;
698 break;
700 sort_key->descending = descending;
701 continue;
704 for (j = 1; j < hist_data->n_fields; j++) {
705 field = hist_data->fields[j]->field;
706 if (field && (strcmp(field_name, field->name) == 0)) {
707 sort_key->field_idx = j;
708 descending = is_descending(field_str);
709 if (descending < 0) {
710 ret = descending;
711 goto out;
713 sort_key->descending = descending;
714 break;
717 if (j == hist_data->n_fields) {
718 ret = -EINVAL;
719 break;
722 hist_data->n_sort_keys = i;
723 out:
724 return ret;
727 static void destroy_hist_data(struct hist_trigger_data *hist_data)
729 destroy_hist_trigger_attrs(hist_data->attrs);
730 destroy_hist_fields(hist_data);
731 tracing_map_destroy(hist_data->map);
732 kfree(hist_data);
735 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
737 struct tracing_map *map = hist_data->map;
738 struct ftrace_event_field *field;
739 struct hist_field *hist_field;
740 int i, idx;
742 for_each_hist_field(i, hist_data) {
743 hist_field = hist_data->fields[i];
744 if (hist_field->flags & HIST_FIELD_FL_KEY) {
745 tracing_map_cmp_fn_t cmp_fn;
747 field = hist_field->field;
749 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
750 cmp_fn = tracing_map_cmp_none;
751 else if (is_string_field(field))
752 cmp_fn = tracing_map_cmp_string;
753 else
754 cmp_fn = tracing_map_cmp_num(field->size,
755 field->is_signed);
756 idx = tracing_map_add_key_field(map,
757 hist_field->offset,
758 cmp_fn);
760 } else
761 idx = tracing_map_add_sum_field(map);
763 if (idx < 0)
764 return idx;
767 return 0;
770 static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
772 struct hist_field *key_field;
773 unsigned int i;
775 for_each_hist_key_field(i, hist_data) {
776 key_field = hist_data->fields[i];
778 if (key_field->flags & HIST_FIELD_FL_EXECNAME)
779 return true;
782 return false;
785 static struct hist_trigger_data *
786 create_hist_data(unsigned int map_bits,
787 struct hist_trigger_attrs *attrs,
788 struct trace_event_file *file)
790 const struct tracing_map_ops *map_ops = NULL;
791 struct hist_trigger_data *hist_data;
792 int ret = 0;
794 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
795 if (!hist_data)
796 return ERR_PTR(-ENOMEM);
798 hist_data->attrs = attrs;
800 ret = create_hist_fields(hist_data, file);
801 if (ret)
802 goto free;
804 ret = create_sort_keys(hist_data);
805 if (ret)
806 goto free;
808 if (need_tracing_map_ops(hist_data))
809 map_ops = &hist_trigger_elt_comm_ops;
811 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
812 map_ops, hist_data);
813 if (IS_ERR(hist_data->map)) {
814 ret = PTR_ERR(hist_data->map);
815 hist_data->map = NULL;
816 goto free;
819 ret = create_tracing_map_fields(hist_data);
820 if (ret)
821 goto free;
823 ret = tracing_map_init(hist_data->map);
824 if (ret)
825 goto free;
827 hist_data->event_file = file;
828 out:
829 return hist_data;
830 free:
831 hist_data->attrs = NULL;
833 destroy_hist_data(hist_data);
835 hist_data = ERR_PTR(ret);
837 goto out;
840 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
841 struct tracing_map_elt *elt,
842 void *rec)
844 struct hist_field *hist_field;
845 unsigned int i;
846 u64 hist_val;
848 for_each_hist_val_field(i, hist_data) {
849 hist_field = hist_data->fields[i];
850 hist_val = hist_field->fn(hist_field, rec);
851 tracing_map_update_sum(elt, i, hist_val);
855 static inline void add_to_key(char *compound_key, void *key,
856 struct hist_field *key_field, void *rec)
858 size_t size = key_field->size;
860 if (key_field->flags & HIST_FIELD_FL_STRING) {
861 struct ftrace_event_field *field;
863 field = key_field->field;
864 if (field->filter_type == FILTER_DYN_STRING)
865 size = *(u32 *)(rec + field->offset) >> 16;
866 else if (field->filter_type == FILTER_PTR_STRING)
867 size = strlen(key);
868 else if (field->filter_type == FILTER_STATIC_STRING)
869 size = field->size;
871 /* ensure NULL-termination */
872 if (size > key_field->size - 1)
873 size = key_field->size - 1;
876 memcpy(compound_key + key_field->offset, key, size);
879 static void event_hist_trigger(struct event_trigger_data *data, void *rec)
881 struct hist_trigger_data *hist_data = data->private_data;
882 bool use_compound_key = (hist_data->n_keys > 1);
883 unsigned long entries[HIST_STACKTRACE_DEPTH];
884 char compound_key[HIST_KEY_SIZE_MAX];
885 struct stack_trace stacktrace;
886 struct hist_field *key_field;
887 struct tracing_map_elt *elt;
888 u64 field_contents;
889 void *key = NULL;
890 unsigned int i;
892 memset(compound_key, 0, hist_data->key_size);
894 for_each_hist_key_field(i, hist_data) {
895 key_field = hist_data->fields[i];
897 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
898 stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
899 stacktrace.entries = entries;
900 stacktrace.nr_entries = 0;
901 stacktrace.skip = HIST_STACKTRACE_SKIP;
903 memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
904 save_stack_trace(&stacktrace);
906 key = entries;
907 } else {
908 field_contents = key_field->fn(key_field, rec);
909 if (key_field->flags & HIST_FIELD_FL_STRING) {
910 key = (void *)(unsigned long)field_contents;
911 use_compound_key = true;
912 } else
913 key = (void *)&field_contents;
916 if (use_compound_key)
917 add_to_key(compound_key, key, key_field, rec);
920 if (use_compound_key)
921 key = compound_key;
923 elt = tracing_map_insert(hist_data->map, key);
924 if (elt)
925 hist_trigger_elt_update(hist_data, elt, rec);
928 static void hist_trigger_stacktrace_print(struct seq_file *m,
929 unsigned long *stacktrace_entries,
930 unsigned int max_entries)
932 char str[KSYM_SYMBOL_LEN];
933 unsigned int spaces = 8;
934 unsigned int i;
936 for (i = 0; i < max_entries; i++) {
937 if (stacktrace_entries[i] == ULONG_MAX)
938 return;
940 seq_printf(m, "%*c", 1 + spaces, ' ');
941 sprint_symbol(str, stacktrace_entries[i]);
942 seq_printf(m, "%s\n", str);
946 static void
947 hist_trigger_entry_print(struct seq_file *m,
948 struct hist_trigger_data *hist_data, void *key,
949 struct tracing_map_elt *elt)
951 struct hist_field *key_field;
952 char str[KSYM_SYMBOL_LEN];
953 bool multiline = false;
954 unsigned int i;
955 u64 uval;
957 seq_puts(m, "{ ");
959 for_each_hist_key_field(i, hist_data) {
960 key_field = hist_data->fields[i];
962 if (i > hist_data->n_vals)
963 seq_puts(m, ", ");
965 if (key_field->flags & HIST_FIELD_FL_HEX) {
966 uval = *(u64 *)(key + key_field->offset);
967 seq_printf(m, "%s: %llx",
968 key_field->field->name, uval);
969 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
970 uval = *(u64 *)(key + key_field->offset);
971 sprint_symbol_no_offset(str, uval);
972 seq_printf(m, "%s: [%llx] %-45s",
973 key_field->field->name, uval, str);
974 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
975 uval = *(u64 *)(key + key_field->offset);
976 sprint_symbol(str, uval);
977 seq_printf(m, "%s: [%llx] %-55s",
978 key_field->field->name, uval, str);
979 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
980 char *comm = elt->private_data;
982 uval = *(u64 *)(key + key_field->offset);
983 seq_printf(m, "%s: %-16s[%10llu]",
984 key_field->field->name, comm, uval);
985 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
986 const char *syscall_name;
988 uval = *(u64 *)(key + key_field->offset);
989 syscall_name = get_syscall_name(uval);
990 if (!syscall_name)
991 syscall_name = "unknown_syscall";
993 seq_printf(m, "%s: %-30s[%3llu]",
994 key_field->field->name, syscall_name, uval);
995 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
996 seq_puts(m, "stacktrace:\n");
997 hist_trigger_stacktrace_print(m,
998 key + key_field->offset,
999 HIST_STACKTRACE_DEPTH);
1000 multiline = true;
1001 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
1002 seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name,
1003 *(u64 *)(key + key_field->offset));
1004 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
1005 seq_printf(m, "%s: %-50s", key_field->field->name,
1006 (char *)(key + key_field->offset));
1007 } else {
1008 uval = *(u64 *)(key + key_field->offset);
1009 seq_printf(m, "%s: %10llu", key_field->field->name,
1010 uval);
1014 if (!multiline)
1015 seq_puts(m, " ");
1017 seq_puts(m, "}");
1019 seq_printf(m, " hitcount: %10llu",
1020 tracing_map_read_sum(elt, HITCOUNT_IDX));
1022 for (i = 1; i < hist_data->n_vals; i++) {
1023 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
1024 seq_printf(m, " %s: %10llx",
1025 hist_data->fields[i]->field->name,
1026 tracing_map_read_sum(elt, i));
1027 } else {
1028 seq_printf(m, " %s: %10llu",
1029 hist_data->fields[i]->field->name,
1030 tracing_map_read_sum(elt, i));
1034 seq_puts(m, "\n");
1037 static int print_entries(struct seq_file *m,
1038 struct hist_trigger_data *hist_data)
1040 struct tracing_map_sort_entry **sort_entries = NULL;
1041 struct tracing_map *map = hist_data->map;
1042 int i, n_entries;
1044 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
1045 hist_data->n_sort_keys,
1046 &sort_entries);
1047 if (n_entries < 0)
1048 return n_entries;
1050 for (i = 0; i < n_entries; i++)
1051 hist_trigger_entry_print(m, hist_data,
1052 sort_entries[i]->key,
1053 sort_entries[i]->elt);
1055 tracing_map_destroy_sort_entries(sort_entries, n_entries);
1057 return n_entries;
1060 static void hist_trigger_show(struct seq_file *m,
1061 struct event_trigger_data *data, int n)
1063 struct hist_trigger_data *hist_data;
1064 int n_entries, ret = 0;
1066 if (n > 0)
1067 seq_puts(m, "\n\n");
1069 seq_puts(m, "# event histogram\n#\n# trigger info: ");
1070 data->ops->print(m, data->ops, data);
1071 seq_puts(m, "#\n\n");
1073 hist_data = data->private_data;
1074 n_entries = print_entries(m, hist_data);
1075 if (n_entries < 0) {
1076 ret = n_entries;
1077 n_entries = 0;
1080 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1081 (u64)atomic64_read(&hist_data->map->hits),
1082 n_entries, (u64)atomic64_read(&hist_data->map->drops));
1085 static int hist_show(struct seq_file *m, void *v)
1087 struct event_trigger_data *data;
1088 struct trace_event_file *event_file;
1089 int n = 0, ret = 0;
1091 mutex_lock(&event_mutex);
1093 event_file = event_file_data(m->private);
1094 if (unlikely(!event_file)) {
1095 ret = -ENODEV;
1096 goto out_unlock;
1099 list_for_each_entry_rcu(data, &event_file->triggers, list) {
1100 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
1101 hist_trigger_show(m, data, n++);
1104 out_unlock:
1105 mutex_unlock(&event_mutex);
1107 return ret;
1110 static int event_hist_open(struct inode *inode, struct file *file)
1112 return single_open(file, hist_show, file);
1115 const struct file_operations event_hist_fops = {
1116 .open = event_hist_open,
1117 .read = seq_read,
1118 .llseek = seq_lseek,
1119 .release = single_release,
1122 static const char *get_hist_field_flags(struct hist_field *hist_field)
1124 const char *flags_str = NULL;
1126 if (hist_field->flags & HIST_FIELD_FL_HEX)
1127 flags_str = "hex";
1128 else if (hist_field->flags & HIST_FIELD_FL_SYM)
1129 flags_str = "sym";
1130 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1131 flags_str = "sym-offset";
1132 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1133 flags_str = "execname";
1134 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1135 flags_str = "syscall";
1136 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
1137 flags_str = "log2";
1139 return flags_str;
1142 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
1144 seq_printf(m, "%s", hist_field->field->name);
1145 if (hist_field->flags) {
1146 const char *flags_str = get_hist_field_flags(hist_field);
1148 if (flags_str)
1149 seq_printf(m, ".%s", flags_str);
1153 static int event_hist_trigger_print(struct seq_file *m,
1154 struct event_trigger_ops *ops,
1155 struct event_trigger_data *data)
1157 struct hist_trigger_data *hist_data = data->private_data;
1158 struct hist_field *key_field;
1159 unsigned int i;
1161 seq_puts(m, "hist:");
1163 if (data->name)
1164 seq_printf(m, "%s:", data->name);
1166 seq_puts(m, "keys=");
1168 for_each_hist_key_field(i, hist_data) {
1169 key_field = hist_data->fields[i];
1171 if (i > hist_data->n_vals)
1172 seq_puts(m, ",");
1174 if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
1175 seq_puts(m, "stacktrace");
1176 else
1177 hist_field_print(m, key_field);
1180 seq_puts(m, ":vals=");
1182 for_each_hist_val_field(i, hist_data) {
1183 if (i == HITCOUNT_IDX)
1184 seq_puts(m, "hitcount");
1185 else {
1186 seq_puts(m, ",");
1187 hist_field_print(m, hist_data->fields[i]);
1191 seq_puts(m, ":sort=");
1193 for (i = 0; i < hist_data->n_sort_keys; i++) {
1194 struct tracing_map_sort_key *sort_key;
1196 sort_key = &hist_data->sort_keys[i];
1198 if (i > 0)
1199 seq_puts(m, ",");
1201 if (sort_key->field_idx == HITCOUNT_IDX)
1202 seq_puts(m, "hitcount");
1203 else {
1204 unsigned int idx = sort_key->field_idx;
1206 if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
1207 return -EINVAL;
1209 hist_field_print(m, hist_data->fields[idx]);
1212 if (sort_key->descending)
1213 seq_puts(m, ".descending");
1216 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
1218 if (data->filter_str)
1219 seq_printf(m, " if %s", data->filter_str);
1221 if (data->paused)
1222 seq_puts(m, " [paused]");
1223 else
1224 seq_puts(m, " [active]");
1226 seq_putc(m, '\n');
1228 return 0;
1231 static int event_hist_trigger_init(struct event_trigger_ops *ops,
1232 struct event_trigger_data *data)
1234 struct hist_trigger_data *hist_data = data->private_data;
1236 if (!data->ref && hist_data->attrs->name)
1237 save_named_trigger(hist_data->attrs->name, data);
1239 data->ref++;
1241 return 0;
1244 static void event_hist_trigger_free(struct event_trigger_ops *ops,
1245 struct event_trigger_data *data)
1247 struct hist_trigger_data *hist_data = data->private_data;
1249 if (WARN_ON_ONCE(data->ref <= 0))
1250 return;
1252 data->ref--;
1253 if (!data->ref) {
1254 if (data->name)
1255 del_named_trigger(data);
1256 trigger_data_free(data);
1257 destroy_hist_data(hist_data);
1261 static struct event_trigger_ops event_hist_trigger_ops = {
1262 .func = event_hist_trigger,
1263 .print = event_hist_trigger_print,
1264 .init = event_hist_trigger_init,
1265 .free = event_hist_trigger_free,
1268 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
1269 struct event_trigger_data *data)
1271 data->ref++;
1273 save_named_trigger(data->named_data->name, data);
1275 event_hist_trigger_init(ops, data->named_data);
1277 return 0;
1280 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
1281 struct event_trigger_data *data)
1283 if (WARN_ON_ONCE(data->ref <= 0))
1284 return;
1286 event_hist_trigger_free(ops, data->named_data);
1288 data->ref--;
1289 if (!data->ref) {
1290 del_named_trigger(data);
1291 trigger_data_free(data);
1295 static struct event_trigger_ops event_hist_trigger_named_ops = {
1296 .func = event_hist_trigger,
1297 .print = event_hist_trigger_print,
1298 .init = event_hist_trigger_named_init,
1299 .free = event_hist_trigger_named_free,
1302 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
1303 char *param)
1305 return &event_hist_trigger_ops;
1308 static void hist_clear(struct event_trigger_data *data)
1310 struct hist_trigger_data *hist_data = data->private_data;
1312 if (data->name)
1313 pause_named_trigger(data);
1315 synchronize_sched();
1317 tracing_map_clear(hist_data->map);
1319 if (data->name)
1320 unpause_named_trigger(data);
1323 static bool compatible_field(struct ftrace_event_field *field,
1324 struct ftrace_event_field *test_field)
1326 if (field == test_field)
1327 return true;
1328 if (field == NULL || test_field == NULL)
1329 return false;
1330 if (strcmp(field->name, test_field->name) != 0)
1331 return false;
1332 if (strcmp(field->type, test_field->type) != 0)
1333 return false;
1334 if (field->size != test_field->size)
1335 return false;
1336 if (field->is_signed != test_field->is_signed)
1337 return false;
1339 return true;
1342 static bool hist_trigger_match(struct event_trigger_data *data,
1343 struct event_trigger_data *data_test,
1344 struct event_trigger_data *named_data,
1345 bool ignore_filter)
1347 struct tracing_map_sort_key *sort_key, *sort_key_test;
1348 struct hist_trigger_data *hist_data, *hist_data_test;
1349 struct hist_field *key_field, *key_field_test;
1350 unsigned int i;
1352 if (named_data && (named_data != data_test) &&
1353 (named_data != data_test->named_data))
1354 return false;
1356 if (!named_data && is_named_trigger(data_test))
1357 return false;
1359 hist_data = data->private_data;
1360 hist_data_test = data_test->private_data;
1362 if (hist_data->n_vals != hist_data_test->n_vals ||
1363 hist_data->n_fields != hist_data_test->n_fields ||
1364 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
1365 return false;
1367 if (!ignore_filter) {
1368 if ((data->filter_str && !data_test->filter_str) ||
1369 (!data->filter_str && data_test->filter_str))
1370 return false;
1373 for_each_hist_field(i, hist_data) {
1374 key_field = hist_data->fields[i];
1375 key_field_test = hist_data_test->fields[i];
1377 if (key_field->flags != key_field_test->flags)
1378 return false;
1379 if (!compatible_field(key_field->field, key_field_test->field))
1380 return false;
1381 if (key_field->offset != key_field_test->offset)
1382 return false;
1385 for (i = 0; i < hist_data->n_sort_keys; i++) {
1386 sort_key = &hist_data->sort_keys[i];
1387 sort_key_test = &hist_data_test->sort_keys[i];
1389 if (sort_key->field_idx != sort_key_test->field_idx ||
1390 sort_key->descending != sort_key_test->descending)
1391 return false;
1394 if (!ignore_filter && data->filter_str &&
1395 (strcmp(data->filter_str, data_test->filter_str) != 0))
1396 return false;
1398 return true;
1401 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
1402 struct event_trigger_data *data,
1403 struct trace_event_file *file)
1405 struct hist_trigger_data *hist_data = data->private_data;
1406 struct event_trigger_data *test, *named_data = NULL;
1407 int ret = 0;
1409 if (hist_data->attrs->name) {
1410 named_data = find_named_trigger(hist_data->attrs->name);
1411 if (named_data) {
1412 if (!hist_trigger_match(data, named_data, named_data,
1413 true)) {
1414 ret = -EINVAL;
1415 goto out;
1420 if (hist_data->attrs->name && !named_data)
1421 goto new;
1423 list_for_each_entry_rcu(test, &file->triggers, list) {
1424 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1425 if (!hist_trigger_match(data, test, named_data, false))
1426 continue;
1427 if (hist_data->attrs->pause)
1428 test->paused = true;
1429 else if (hist_data->attrs->cont)
1430 test->paused = false;
1431 else if (hist_data->attrs->clear)
1432 hist_clear(test);
1433 else
1434 ret = -EEXIST;
1435 goto out;
1438 new:
1439 if (hist_data->attrs->cont || hist_data->attrs->clear) {
1440 ret = -ENOENT;
1441 goto out;
1444 if (hist_data->attrs->pause)
1445 data->paused = true;
1447 if (named_data) {
1448 destroy_hist_data(data->private_data);
1449 data->private_data = named_data->private_data;
1450 set_named_trigger_data(data, named_data);
1451 data->ops = &event_hist_trigger_named_ops;
1454 if (data->ops->init) {
1455 ret = data->ops->init(data->ops, data);
1456 if (ret < 0)
1457 goto out;
1460 list_add_rcu(&data->list, &file->triggers);
1461 ret++;
1463 update_cond_flag(file);
1465 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1466 list_del_rcu(&data->list);
1467 update_cond_flag(file);
1468 ret--;
1470 out:
1471 return ret;
1474 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
1475 struct event_trigger_data *data,
1476 struct trace_event_file *file)
1478 struct hist_trigger_data *hist_data = data->private_data;
1479 struct event_trigger_data *test, *named_data = NULL;
1480 bool unregistered = false;
1482 if (hist_data->attrs->name)
1483 named_data = find_named_trigger(hist_data->attrs->name);
1485 list_for_each_entry_rcu(test, &file->triggers, list) {
1486 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1487 if (!hist_trigger_match(data, test, named_data, false))
1488 continue;
1489 unregistered = true;
1490 list_del_rcu(&test->list);
1491 trace_event_trigger_enable_disable(file, 0);
1492 update_cond_flag(file);
1493 break;
1497 if (unregistered && test->ops->free)
1498 test->ops->free(test->ops, test);
1501 static void hist_unreg_all(struct trace_event_file *file)
1503 struct event_trigger_data *test, *n;
1505 list_for_each_entry_safe(test, n, &file->triggers, list) {
1506 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1507 list_del_rcu(&test->list);
1508 trace_event_trigger_enable_disable(file, 0);
1509 update_cond_flag(file);
1510 if (test->ops->free)
1511 test->ops->free(test->ops, test);
1516 static int event_hist_trigger_func(struct event_command *cmd_ops,
1517 struct trace_event_file *file,
1518 char *glob, char *cmd, char *param)
1520 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
1521 struct event_trigger_data *trigger_data;
1522 struct hist_trigger_attrs *attrs;
1523 struct event_trigger_ops *trigger_ops;
1524 struct hist_trigger_data *hist_data;
1525 char *trigger;
1526 int ret = 0;
1528 if (!param)
1529 return -EINVAL;
1531 /* separate the trigger from the filter (k:v [if filter]) */
1532 trigger = strsep(&param, " \t");
1533 if (!trigger)
1534 return -EINVAL;
1536 attrs = parse_hist_trigger_attrs(trigger);
1537 if (IS_ERR(attrs))
1538 return PTR_ERR(attrs);
1540 if (attrs->map_bits)
1541 hist_trigger_bits = attrs->map_bits;
1543 hist_data = create_hist_data(hist_trigger_bits, attrs, file);
1544 if (IS_ERR(hist_data)) {
1545 destroy_hist_trigger_attrs(attrs);
1546 return PTR_ERR(hist_data);
1549 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1551 ret = -ENOMEM;
1552 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1553 if (!trigger_data)
1554 goto out_free;
1556 trigger_data->count = -1;
1557 trigger_data->ops = trigger_ops;
1558 trigger_data->cmd_ops = cmd_ops;
1560 INIT_LIST_HEAD(&trigger_data->list);
1561 RCU_INIT_POINTER(trigger_data->filter, NULL);
1563 trigger_data->private_data = hist_data;
1565 /* if param is non-empty, it's supposed to be a filter */
1566 if (param && cmd_ops->set_filter) {
1567 ret = cmd_ops->set_filter(param, trigger_data, file);
1568 if (ret < 0)
1569 goto out_free;
1572 if (glob[0] == '!') {
1573 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1574 ret = 0;
1575 goto out_free;
1578 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1580 * The above returns on success the # of triggers registered,
1581 * but if it didn't register any it returns zero. Consider no
1582 * triggers registered a failure too.
1584 if (!ret) {
1585 if (!(attrs->pause || attrs->cont || attrs->clear))
1586 ret = -ENOENT;
1587 goto out_free;
1588 } else if (ret < 0)
1589 goto out_free;
1590 /* Just return zero, not the number of registered triggers */
1591 ret = 0;
1592 out:
1593 return ret;
1594 out_free:
1595 if (cmd_ops->set_filter)
1596 cmd_ops->set_filter(NULL, trigger_data, NULL);
1598 kfree(trigger_data);
1600 destroy_hist_data(hist_data);
1601 goto out;
1604 static struct event_command trigger_hist_cmd = {
1605 .name = "hist",
1606 .trigger_type = ETT_EVENT_HIST,
1607 .flags = EVENT_CMD_FL_NEEDS_REC,
1608 .func = event_hist_trigger_func,
1609 .reg = hist_register_trigger,
1610 .unreg = hist_unregister_trigger,
1611 .unreg_all = hist_unreg_all,
1612 .get_trigger_ops = event_hist_get_trigger_ops,
1613 .set_filter = set_trigger_filter,
1616 __init int register_trigger_hist_cmd(void)
1618 int ret;
1620 ret = register_event_command(&trigger_hist_cmd);
1621 WARN_ON(ret < 0);
1623 return ret;
1626 static void
1627 hist_enable_trigger(struct event_trigger_data *data, void *rec)
1629 struct enable_trigger_data *enable_data = data->private_data;
1630 struct event_trigger_data *test;
1632 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
1633 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1634 if (enable_data->enable)
1635 test->paused = false;
1636 else
1637 test->paused = true;
1642 static void
1643 hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
1645 if (!data->count)
1646 return;
1648 if (data->count != -1)
1649 (data->count)--;
1651 hist_enable_trigger(data, rec);
1654 static struct event_trigger_ops hist_enable_trigger_ops = {
1655 .func = hist_enable_trigger,
1656 .print = event_enable_trigger_print,
1657 .init = event_trigger_init,
1658 .free = event_enable_trigger_free,
1661 static struct event_trigger_ops hist_enable_count_trigger_ops = {
1662 .func = hist_enable_count_trigger,
1663 .print = event_enable_trigger_print,
1664 .init = event_trigger_init,
1665 .free = event_enable_trigger_free,
1668 static struct event_trigger_ops hist_disable_trigger_ops = {
1669 .func = hist_enable_trigger,
1670 .print = event_enable_trigger_print,
1671 .init = event_trigger_init,
1672 .free = event_enable_trigger_free,
1675 static struct event_trigger_ops hist_disable_count_trigger_ops = {
1676 .func = hist_enable_count_trigger,
1677 .print = event_enable_trigger_print,
1678 .init = event_trigger_init,
1679 .free = event_enable_trigger_free,
1682 static struct event_trigger_ops *
1683 hist_enable_get_trigger_ops(char *cmd, char *param)
1685 struct event_trigger_ops *ops;
1686 bool enable;
1688 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
1690 if (enable)
1691 ops = param ? &hist_enable_count_trigger_ops :
1692 &hist_enable_trigger_ops;
1693 else
1694 ops = param ? &hist_disable_count_trigger_ops :
1695 &hist_disable_trigger_ops;
1697 return ops;
1700 static void hist_enable_unreg_all(struct trace_event_file *file)
1702 struct event_trigger_data *test, *n;
1704 list_for_each_entry_safe(test, n, &file->triggers, list) {
1705 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
1706 list_del_rcu(&test->list);
1707 update_cond_flag(file);
1708 trace_event_trigger_enable_disable(file, 0);
1709 if (test->ops->free)
1710 test->ops->free(test->ops, test);
1715 static struct event_command trigger_hist_enable_cmd = {
1716 .name = ENABLE_HIST_STR,
1717 .trigger_type = ETT_HIST_ENABLE,
1718 .func = event_enable_trigger_func,
1719 .reg = event_enable_register_trigger,
1720 .unreg = event_enable_unregister_trigger,
1721 .unreg_all = hist_enable_unreg_all,
1722 .get_trigger_ops = hist_enable_get_trigger_ops,
1723 .set_filter = set_trigger_filter,
1726 static struct event_command trigger_hist_disable_cmd = {
1727 .name = DISABLE_HIST_STR,
1728 .trigger_type = ETT_HIST_ENABLE,
1729 .func = event_enable_trigger_func,
1730 .reg = event_enable_register_trigger,
1731 .unreg = event_enable_unregister_trigger,
1732 .unreg_all = hist_enable_unreg_all,
1733 .get_trigger_ops = hist_enable_get_trigger_ops,
1734 .set_filter = set_trigger_filter,
1737 static __init void unregister_trigger_hist_enable_disable_cmds(void)
1739 unregister_event_command(&trigger_hist_enable_cmd);
1740 unregister_event_command(&trigger_hist_disable_cmd);
1743 __init int register_trigger_hist_enable_disable_cmds(void)
1745 int ret;
1747 ret = register_event_command(&trigger_hist_enable_cmd);
1748 if (WARN_ON(ret < 0))
1749 return ret;
1750 ret = register_event_command(&trigger_hist_disable_cmd);
1751 if (WARN_ON(ret < 0))
1752 unregister_trigger_hist_enable_disable_cmds();
1754 return ret;