1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
4 #include <linux/list_sort.h>
5 #include <linux/zalloc.h>
14 #include <subcmd/parse-options.h>
15 #include "parse-events.h"
19 #include <api/fs/tracing_path.h>
20 #include <perf/cpumap.h>
21 #include <util/parse-events-bison.h>
22 #include <util/parse-events-flex.h>
26 #include "util/parse-branch-options.h"
27 #include "util/evsel_config.h"
28 #include "util/event.h"
29 #include "util/bpf-filter.h"
30 #include "util/util.h"
31 #include "tracepoint.h"
33 #define MAX_NAME_LEN 100
35 static int get_config_terms(const struct parse_events_terms
*head_config
,
36 struct list_head
*head_terms
);
37 static int parse_events_terms__copy(const struct parse_events_terms
*src
,
38 struct parse_events_terms
*dest
);
40 const struct event_symbol event_symbols_hw
[PERF_COUNT_HW_MAX
] = {
41 [PERF_COUNT_HW_CPU_CYCLES
] = {
42 .symbol
= "cpu-cycles",
45 [PERF_COUNT_HW_INSTRUCTIONS
] = {
46 .symbol
= "instructions",
49 [PERF_COUNT_HW_CACHE_REFERENCES
] = {
50 .symbol
= "cache-references",
53 [PERF_COUNT_HW_CACHE_MISSES
] = {
54 .symbol
= "cache-misses",
57 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = {
58 .symbol
= "branch-instructions",
61 [PERF_COUNT_HW_BRANCH_MISSES
] = {
62 .symbol
= "branch-misses",
65 [PERF_COUNT_HW_BUS_CYCLES
] = {
66 .symbol
= "bus-cycles",
69 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = {
70 .symbol
= "stalled-cycles-frontend",
71 .alias
= "idle-cycles-frontend",
73 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = {
74 .symbol
= "stalled-cycles-backend",
75 .alias
= "idle-cycles-backend",
77 [PERF_COUNT_HW_REF_CPU_CYCLES
] = {
78 .symbol
= "ref-cycles",
83 const struct event_symbol event_symbols_sw
[PERF_COUNT_SW_MAX
] = {
84 [PERF_COUNT_SW_CPU_CLOCK
] = {
85 .symbol
= "cpu-clock",
88 [PERF_COUNT_SW_TASK_CLOCK
] = {
89 .symbol
= "task-clock",
92 [PERF_COUNT_SW_PAGE_FAULTS
] = {
93 .symbol
= "page-faults",
96 [PERF_COUNT_SW_CONTEXT_SWITCHES
] = {
97 .symbol
= "context-switches",
100 [PERF_COUNT_SW_CPU_MIGRATIONS
] = {
101 .symbol
= "cpu-migrations",
102 .alias
= "migrations",
104 [PERF_COUNT_SW_PAGE_FAULTS_MIN
] = {
105 .symbol
= "minor-faults",
108 [PERF_COUNT_SW_PAGE_FAULTS_MAJ
] = {
109 .symbol
= "major-faults",
112 [PERF_COUNT_SW_ALIGNMENT_FAULTS
] = {
113 .symbol
= "alignment-faults",
116 [PERF_COUNT_SW_EMULATION_FAULTS
] = {
117 .symbol
= "emulation-faults",
120 [PERF_COUNT_SW_DUMMY
] = {
124 [PERF_COUNT_SW_BPF_OUTPUT
] = {
125 .symbol
= "bpf-output",
128 [PERF_COUNT_SW_CGROUP_SWITCHES
] = {
129 .symbol
= "cgroup-switches",
134 const char *event_type(int type
)
137 case PERF_TYPE_HARDWARE
:
140 case PERF_TYPE_SOFTWARE
:
143 case PERF_TYPE_TRACEPOINT
:
146 case PERF_TYPE_HW_CACHE
:
147 return "hardware-cache";
156 static char *get_config_str(const struct parse_events_terms
*head_terms
,
157 enum parse_events__term_type type_term
)
159 struct parse_events_term
*term
;
164 list_for_each_entry(term
, &head_terms
->terms
, list
)
165 if (term
->type_term
== type_term
)
166 return term
->val
.str
;
171 static char *get_config_metric_id(const struct parse_events_terms
*head_terms
)
173 return get_config_str(head_terms
, PARSE_EVENTS__TERM_TYPE_METRIC_ID
);
176 static char *get_config_name(const struct parse_events_terms
*head_terms
)
178 return get_config_str(head_terms
, PARSE_EVENTS__TERM_TYPE_NAME
);
182 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
183 * matches the raw's string value. If the string value matches an
184 * event then change the term to be an event, if not then change it to
185 * be a config term. For example, "read" may be an event of the PMU or
186 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of
187 * the event can be determined and we don't need to scan all PMUs
189 * @config_terms: the list of terms that may contain a raw term.
190 * @pmu: the PMU to scan for events from.
192 static void fix_raw(struct parse_events_terms
*config_terms
, struct perf_pmu
*pmu
)
194 struct parse_events_term
*term
;
196 list_for_each_entry(term
, &config_terms
->terms
, list
) {
199 if (term
->type_term
!= PARSE_EVENTS__TERM_TYPE_RAW
)
202 if (perf_pmu__have_event(pmu
, term
->val
.str
)) {
203 zfree(&term
->config
);
204 term
->config
= term
->val
.str
;
205 term
->type_val
= PARSE_EVENTS__TERM_TYPE_NUM
;
206 term
->type_term
= PARSE_EVENTS__TERM_TYPE_USER
;
208 term
->no_value
= true;
212 zfree(&term
->config
);
213 term
->config
= strdup("config");
215 num
= strtoull(term
->val
.str
+ 1, NULL
, 16);
218 term
->type_val
= PARSE_EVENTS__TERM_TYPE_NUM
;
219 term
->type_term
= PARSE_EVENTS__TERM_TYPE_CONFIG
;
221 term
->no_value
= false;
225 static struct evsel
*
226 __add_event(struct list_head
*list
, int *idx
,
227 struct perf_event_attr
*attr
,
229 const char *name
, const char *metric_id
, struct perf_pmu
*pmu
,
230 struct list_head
*config_terms
, bool auto_merge_stats
,
231 struct perf_cpu_map
*cpu_list
, u64 alternate_hw_config
)
234 struct perf_cpu_map
*cpus
= perf_cpu_map__is_empty(cpu_list
) && pmu
? pmu
->cpus
: cpu_list
;
236 cpus
= perf_cpu_map__get(cpus
);
238 perf_pmu__warn_invalid_formats(pmu
);
240 if (pmu
&& (attr
->type
== PERF_TYPE_RAW
|| attr
->type
>= PERF_TYPE_MAX
)) {
241 perf_pmu__warn_invalid_config(pmu
, attr
->config
, name
,
242 PERF_PMU_FORMAT_VALUE_CONFIG
, "config");
243 perf_pmu__warn_invalid_config(pmu
, attr
->config1
, name
,
244 PERF_PMU_FORMAT_VALUE_CONFIG1
, "config1");
245 perf_pmu__warn_invalid_config(pmu
, attr
->config2
, name
,
246 PERF_PMU_FORMAT_VALUE_CONFIG2
, "config2");
247 perf_pmu__warn_invalid_config(pmu
, attr
->config3
, name
,
248 PERF_PMU_FORMAT_VALUE_CONFIG3
, "config3");
251 event_attr_init(attr
);
253 evsel
= evsel__new_idx(attr
, *idx
);
255 perf_cpu_map__put(cpus
);
260 evsel
->core
.cpus
= cpus
;
261 evsel
->core
.own_cpus
= perf_cpu_map__get(cpus
);
262 evsel
->core
.requires_cpu
= pmu
? pmu
->is_uncore
: false;
263 evsel
->core
.is_pmu_core
= pmu
? pmu
->is_core
: false;
264 evsel
->auto_merge_stats
= auto_merge_stats
;
266 evsel
->alternate_hw_config
= alternate_hw_config
;
269 evsel
->name
= strdup(name
);
272 evsel
->metric_id
= strdup(metric_id
);
275 list_splice_init(config_terms
, &evsel
->config_terms
);
278 list_add_tail(&evsel
->core
.node
, list
);
283 struct evsel
*parse_events__add_event(int idx
, struct perf_event_attr
*attr
,
284 const char *name
, const char *metric_id
,
285 struct perf_pmu
*pmu
)
287 return __add_event(/*list=*/NULL
, &idx
, attr
, /*init_attr=*/false, name
,
288 metric_id
, pmu
, /*config_terms=*/NULL
,
289 /*auto_merge_stats=*/false, /*cpu_list=*/NULL
,
290 /*alternate_hw_config=*/PERF_COUNT_HW_MAX
);
293 static int add_event(struct list_head
*list
, int *idx
,
294 struct perf_event_attr
*attr
, const char *name
,
295 const char *metric_id
, struct list_head
*config_terms
,
296 u64 alternate_hw_config
)
298 return __add_event(list
, idx
, attr
, /*init_attr*/true, name
, metric_id
,
299 /*pmu=*/NULL
, config_terms
,
300 /*auto_merge_stats=*/false, /*cpu_list=*/NULL
,
301 alternate_hw_config
) ? 0 : -ENOMEM
;
305 * parse_aliases - search names for entries beginning or equalling str ignoring
306 * case. If mutliple entries in names match str then the longest
308 * @str: The needle to look for.
309 * @names: The haystack to search.
310 * @size: The size of the haystack.
311 * @longest: Out argument giving the length of the matching entry.
313 static int parse_aliases(const char *str
, const char *const names
[][EVSEL__MAX_ALIASES
], int size
,
317 for (int i
= 0; i
< size
; i
++) {
318 for (int j
= 0; j
< EVSEL__MAX_ALIASES
&& names
[i
][j
]; j
++) {
319 int n
= strlen(names
[i
][j
]);
321 if (n
> *longest
&& !strncasecmp(str
, names
[i
][j
], n
))
331 typedef int config_term_func_t(struct perf_event_attr
*attr
,
332 struct parse_events_term
*term
,
333 struct parse_events_error
*err
);
334 static int config_term_common(struct perf_event_attr
*attr
,
335 struct parse_events_term
*term
,
336 struct parse_events_error
*err
);
337 static int config_attr(struct perf_event_attr
*attr
,
338 const struct parse_events_terms
*head
,
339 struct parse_events_error
*err
,
340 config_term_func_t config_term
);
343 * parse_events__decode_legacy_cache - Search name for the legacy cache event
344 * name composed of 1, 2 or 3 hyphen
345 * separated sections. The first section is
346 * the cache type while the others are the
347 * optional op and optional result. To make
348 * life hard the names in the table also
349 * contain hyphens and the longest name
350 * should always be selected.
352 int parse_events__decode_legacy_cache(const char *name
, int extended_pmu_type
, __u64
*config
)
354 int len
, cache_type
= -1, cache_op
= -1, cache_result
= -1;
355 const char *name_end
= &name
[strlen(name
) + 1];
356 const char *str
= name
;
358 cache_type
= parse_aliases(str
, evsel__hw_cache
, PERF_COUNT_HW_CACHE_MAX
, &len
);
359 if (cache_type
== -1)
363 if (str
< name_end
) {
364 cache_op
= parse_aliases(str
, evsel__hw_cache_op
,
365 PERF_COUNT_HW_CACHE_OP_MAX
, &len
);
367 if (!evsel__is_cache_op_valid(cache_type
, cache_op
))
371 cache_result
= parse_aliases(str
, evsel__hw_cache_result
,
372 PERF_COUNT_HW_CACHE_RESULT_MAX
, &len
);
373 if (cache_result
>= 0)
377 if (str
< name_end
) {
379 cache_op
= parse_aliases(str
, evsel__hw_cache_op
,
380 PERF_COUNT_HW_CACHE_OP_MAX
, &len
);
382 if (!evsel__is_cache_op_valid(cache_type
, cache_op
))
385 } else if (cache_result
< 0) {
386 cache_result
= parse_aliases(str
, evsel__hw_cache_result
,
387 PERF_COUNT_HW_CACHE_RESULT_MAX
, &len
);
392 * Fall back to reads:
395 cache_op
= PERF_COUNT_HW_CACHE_OP_READ
;
398 * Fall back to accesses:
400 if (cache_result
== -1)
401 cache_result
= PERF_COUNT_HW_CACHE_RESULT_ACCESS
;
403 *config
= cache_type
| (cache_op
<< 8) | (cache_result
<< 16);
404 if (perf_pmus__supports_extended_type())
405 *config
|= (__u64
)extended_pmu_type
<< PERF_PMU_TYPE_SHIFT
;
410 * parse_events__filter_pmu - returns false if a wildcard PMU should be
411 * considered, true if it should be filtered.
413 bool parse_events__filter_pmu(const struct parse_events_state
*parse_state
,
414 const struct perf_pmu
*pmu
)
416 if (parse_state
->pmu_filter
== NULL
)
419 return strcmp(parse_state
->pmu_filter
, pmu
->name
) != 0;
422 static int parse_events_add_pmu(struct parse_events_state
*parse_state
,
423 struct list_head
*list
, struct perf_pmu
*pmu
,
424 const struct parse_events_terms
*const_parsed_terms
,
425 bool auto_merge_stats
, u64 alternate_hw_config
);
427 int parse_events_add_cache(struct list_head
*list
, int *idx
, const char *name
,
428 struct parse_events_state
*parse_state
,
429 struct parse_events_terms
*parsed_terms
)
431 struct perf_pmu
*pmu
= NULL
;
432 bool found_supported
= false;
433 const char *config_name
= get_config_name(parsed_terms
);
434 const char *metric_id
= get_config_metric_id(parsed_terms
);
436 while ((pmu
= perf_pmus__scan(pmu
)) != NULL
) {
437 LIST_HEAD(config_terms
);
438 struct perf_event_attr attr
;
441 if (parse_events__filter_pmu(parse_state
, pmu
))
444 if (perf_pmu__have_event(pmu
, name
)) {
446 * The PMU has the event so add as not a legacy cache
449 ret
= parse_events_add_pmu(parse_state
, list
, pmu
,
451 perf_pmu__auto_merge_stats(pmu
),
452 /*alternate_hw_config=*/PERF_COUNT_HW_MAX
);
459 /* Legacy cache events are only supported by core PMUs. */
463 memset(&attr
, 0, sizeof(attr
));
464 attr
.type
= PERF_TYPE_HW_CACHE
;
466 ret
= parse_events__decode_legacy_cache(name
, pmu
->type
, &attr
.config
);
470 found_supported
= true;
473 if (config_attr(&attr
, parsed_terms
, parse_state
->error
,
477 if (get_config_terms(parsed_terms
, &config_terms
))
481 if (__add_event(list
, idx
, &attr
, /*init_attr*/true, config_name
?: name
,
482 metric_id
, pmu
, &config_terms
, /*auto_merge_stats=*/false,
484 /*alternate_hw_config=*/PERF_COUNT_HW_MAX
) == NULL
)
487 free_config_terms(&config_terms
);
489 return found_supported
? 0 : -EINVAL
;
492 #ifdef HAVE_LIBTRACEEVENT
493 static void tracepoint_error(struct parse_events_error
*e
, int err
,
494 const char *sys
, const char *name
, int column
)
503 * We get error directly from syscall errno ( > 0),
504 * or from encoded pointer's error ( < 0).
510 str
= "can't access trace events";
513 str
= "unknown tracepoint";
516 str
= "failed to add tracepoint";
520 tracing_path__strerror_open_tp(err
, help
, sizeof(help
), sys
, name
);
521 parse_events_error__handle(e
, column
, strdup(str
), strdup(help
));
524 static int add_tracepoint(struct parse_events_state
*parse_state
,
525 struct list_head
*list
,
526 const char *sys_name
, const char *evt_name
,
527 struct parse_events_error
*err
,
528 struct parse_events_terms
*head_config
, void *loc_
)
531 struct evsel
*evsel
= evsel__newtp_idx(sys_name
, evt_name
, parse_state
->idx
++,
532 !parse_state
->fake_tp
);
535 tracepoint_error(err
, PTR_ERR(evsel
), sys_name
, evt_name
, loc
->first_column
);
536 return PTR_ERR(evsel
);
540 LIST_HEAD(config_terms
);
542 if (get_config_terms(head_config
, &config_terms
))
544 list_splice(&config_terms
, &evsel
->config_terms
);
547 list_add_tail(&evsel
->core
.node
, list
);
551 static int add_tracepoint_multi_event(struct parse_events_state
*parse_state
,
552 struct list_head
*list
,
553 const char *sys_name
, const char *evt_name
,
554 struct parse_events_error
*err
,
555 struct parse_events_terms
*head_config
, YYLTYPE
*loc
)
558 struct dirent
*evt_ent
;
560 int ret
= 0, found
= 0;
562 evt_path
= get_events_file(sys_name
);
564 tracepoint_error(err
, errno
, sys_name
, evt_name
, loc
->first_column
);
567 evt_dir
= opendir(evt_path
);
569 put_events_file(evt_path
);
570 tracepoint_error(err
, errno
, sys_name
, evt_name
, loc
->first_column
);
574 while (!ret
&& (evt_ent
= readdir(evt_dir
))) {
575 if (!strcmp(evt_ent
->d_name
, ".")
576 || !strcmp(evt_ent
->d_name
, "..")
577 || !strcmp(evt_ent
->d_name
, "enable")
578 || !strcmp(evt_ent
->d_name
, "filter"))
581 if (!strglobmatch(evt_ent
->d_name
, evt_name
))
586 ret
= add_tracepoint(parse_state
, list
, sys_name
, evt_ent
->d_name
,
587 err
, head_config
, loc
);
591 tracepoint_error(err
, ENOENT
, sys_name
, evt_name
, loc
->first_column
);
595 put_events_file(evt_path
);
600 static int add_tracepoint_event(struct parse_events_state
*parse_state
,
601 struct list_head
*list
,
602 const char *sys_name
, const char *evt_name
,
603 struct parse_events_error
*err
,
604 struct parse_events_terms
*head_config
, YYLTYPE
*loc
)
606 return strpbrk(evt_name
, "*?") ?
607 add_tracepoint_multi_event(parse_state
, list
, sys_name
, evt_name
,
608 err
, head_config
, loc
) :
609 add_tracepoint(parse_state
, list
, sys_name
, evt_name
,
610 err
, head_config
, loc
);
613 static int add_tracepoint_multi_sys(struct parse_events_state
*parse_state
,
614 struct list_head
*list
,
615 const char *sys_name
, const char *evt_name
,
616 struct parse_events_error
*err
,
617 struct parse_events_terms
*head_config
, YYLTYPE
*loc
)
619 struct dirent
*events_ent
;
623 events_dir
= tracing_events__opendir();
625 tracepoint_error(err
, errno
, sys_name
, evt_name
, loc
->first_column
);
629 while (!ret
&& (events_ent
= readdir(events_dir
))) {
630 if (!strcmp(events_ent
->d_name
, ".")
631 || !strcmp(events_ent
->d_name
, "..")
632 || !strcmp(events_ent
->d_name
, "enable")
633 || !strcmp(events_ent
->d_name
, "header_event")
634 || !strcmp(events_ent
->d_name
, "header_page"))
637 if (!strglobmatch(events_ent
->d_name
, sys_name
))
640 ret
= add_tracepoint_event(parse_state
, list
, events_ent
->d_name
,
641 evt_name
, err
, head_config
, loc
);
644 closedir(events_dir
);
647 #endif /* HAVE_LIBTRACEEVENT */
649 size_t default_breakpoint_len(void)
651 #if defined(__i386__)
655 struct perf_env env
= {};
657 perf_env__init(&env
);
658 len
= perf_env__kernel_is_64_bit(&env
) ? sizeof(u64
) : sizeof(long);
659 perf_env__exit(&env
);
662 #elif defined(__aarch64__)
670 parse_breakpoint_type(const char *type
, struct perf_event_attr
*attr
)
674 for (i
= 0; i
< 3; i
++) {
675 if (!type
|| !type
[i
])
678 #define CHECK_SET_TYPE(bit) \
680 if (attr->bp_type & bit) \
683 attr->bp_type |= bit; \
688 CHECK_SET_TYPE(HW_BREAKPOINT_R
);
691 CHECK_SET_TYPE(HW_BREAKPOINT_W
);
694 CHECK_SET_TYPE(HW_BREAKPOINT_X
);
701 #undef CHECK_SET_TYPE
703 if (!attr
->bp_type
) /* Default */
704 attr
->bp_type
= HW_BREAKPOINT_R
| HW_BREAKPOINT_W
;
709 int parse_events_add_breakpoint(struct parse_events_state
*parse_state
,
710 struct list_head
*list
,
711 u64 addr
, char *type
, u64 len
,
712 struct parse_events_terms
*head_config
)
714 struct perf_event_attr attr
;
715 LIST_HEAD(config_terms
);
718 memset(&attr
, 0, sizeof(attr
));
721 if (parse_breakpoint_type(type
, &attr
))
724 /* Provide some defaults if len is not specified */
726 if (attr
.bp_type
== HW_BREAKPOINT_X
)
727 len
= default_breakpoint_len();
729 len
= HW_BREAKPOINT_LEN_4
;
734 attr
.type
= PERF_TYPE_BREAKPOINT
;
735 attr
.sample_period
= 1;
738 if (config_attr(&attr
, head_config
, parse_state
->error
,
742 if (get_config_terms(head_config
, &config_terms
))
746 name
= get_config_name(head_config
);
748 return add_event(list
, &parse_state
->idx
, &attr
, name
, /*mertic_id=*/NULL
,
749 &config_terms
, /*alternate_hw_config=*/PERF_COUNT_HW_MAX
);
752 static int check_type_val(struct parse_events_term
*term
,
753 struct parse_events_error
*err
,
754 enum parse_events__term_val_type type
)
756 if (type
== term
->type_val
)
760 parse_events_error__handle(err
, term
->err_val
,
761 type
== PARSE_EVENTS__TERM_TYPE_NUM
762 ? strdup("expected numeric value")
763 : strdup("expected string value"),
769 static bool config_term_shrinked
;
771 const char *parse_events__term_type_str(enum parse_events__term_type term_type
)
774 * Update according to parse-events.l
776 static const char *config_term_names
[__PARSE_EVENTS__TERM_TYPE_NR
] = {
777 [PARSE_EVENTS__TERM_TYPE_USER
] = "<sysfs term>",
778 [PARSE_EVENTS__TERM_TYPE_CONFIG
] = "config",
779 [PARSE_EVENTS__TERM_TYPE_CONFIG1
] = "config1",
780 [PARSE_EVENTS__TERM_TYPE_CONFIG2
] = "config2",
781 [PARSE_EVENTS__TERM_TYPE_CONFIG3
] = "config3",
782 [PARSE_EVENTS__TERM_TYPE_NAME
] = "name",
783 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
] = "period",
784 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
] = "freq",
785 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
] = "branch_type",
786 [PARSE_EVENTS__TERM_TYPE_TIME
] = "time",
787 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH
] = "call-graph",
788 [PARSE_EVENTS__TERM_TYPE_STACKSIZE
] = "stack-size",
789 [PARSE_EVENTS__TERM_TYPE_NOINHERIT
] = "no-inherit",
790 [PARSE_EVENTS__TERM_TYPE_INHERIT
] = "inherit",
791 [PARSE_EVENTS__TERM_TYPE_MAX_STACK
] = "max-stack",
792 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS
] = "nr",
793 [PARSE_EVENTS__TERM_TYPE_OVERWRITE
] = "overwrite",
794 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
] = "no-overwrite",
795 [PARSE_EVENTS__TERM_TYPE_DRV_CFG
] = "driver-config",
796 [PARSE_EVENTS__TERM_TYPE_PERCORE
] = "percore",
797 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT
] = "aux-output",
798 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE
] = "aux-sample-size",
799 [PARSE_EVENTS__TERM_TYPE_METRIC_ID
] = "metric-id",
800 [PARSE_EVENTS__TERM_TYPE_RAW
] = "raw",
801 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE
] = "legacy-cache",
802 [PARSE_EVENTS__TERM_TYPE_HARDWARE
] = "hardware",
804 if ((unsigned int)term_type
>= __PARSE_EVENTS__TERM_TYPE_NR
)
805 return "unknown term";
807 return config_term_names
[term_type
];
811 config_term_avail(enum parse_events__term_type term_type
, struct parse_events_error
*err
)
815 if (term_type
< 0 || term_type
>= __PARSE_EVENTS__TERM_TYPE_NR
) {
816 parse_events_error__handle(err
, -1,
817 strdup("Invalid term_type"), NULL
);
820 if (!config_term_shrinked
)
824 case PARSE_EVENTS__TERM_TYPE_CONFIG
:
825 case PARSE_EVENTS__TERM_TYPE_CONFIG1
:
826 case PARSE_EVENTS__TERM_TYPE_CONFIG2
:
827 case PARSE_EVENTS__TERM_TYPE_CONFIG3
:
828 case PARSE_EVENTS__TERM_TYPE_NAME
:
829 case PARSE_EVENTS__TERM_TYPE_METRIC_ID
:
830 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
831 case PARSE_EVENTS__TERM_TYPE_PERCORE
:
833 case PARSE_EVENTS__TERM_TYPE_USER
:
834 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
:
835 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
:
836 case PARSE_EVENTS__TERM_TYPE_TIME
:
837 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
838 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
839 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
840 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
841 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
842 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS
:
843 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
844 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
845 case PARSE_EVENTS__TERM_TYPE_DRV_CFG
:
846 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT
:
847 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE
:
848 case PARSE_EVENTS__TERM_TYPE_RAW
:
849 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE
:
850 case PARSE_EVENTS__TERM_TYPE_HARDWARE
:
855 /* term_type is validated so indexing is safe */
856 if (asprintf(&err_str
, "'%s' is not usable in 'perf stat'",
857 parse_events__term_type_str(term_type
)) >= 0)
858 parse_events_error__handle(err
, -1, err_str
, NULL
);
863 void parse_events__shrink_config_terms(void)
865 config_term_shrinked
= true;
868 static int config_term_common(struct perf_event_attr
*attr
,
869 struct parse_events_term
*term
,
870 struct parse_events_error
*err
)
872 #define CHECK_TYPE_VAL(type) \
874 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
878 switch (term
->type_term
) {
879 case PARSE_EVENTS__TERM_TYPE_CONFIG
:
881 attr
->config
= term
->val
.num
;
883 case PARSE_EVENTS__TERM_TYPE_CONFIG1
:
885 attr
->config1
= term
->val
.num
;
887 case PARSE_EVENTS__TERM_TYPE_CONFIG2
:
889 attr
->config2
= term
->val
.num
;
891 case PARSE_EVENTS__TERM_TYPE_CONFIG3
:
893 attr
->config3
= term
->val
.num
;
895 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
898 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
:
901 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
:
903 if (strcmp(term
->val
.str
, "no") &&
904 parse_branch_str(term
->val
.str
,
905 &attr
->branch_sample_type
)) {
906 parse_events_error__handle(err
, term
->err_val
,
907 strdup("invalid branch sample type"),
912 case PARSE_EVENTS__TERM_TYPE_TIME
:
914 if (term
->val
.num
> 1) {
915 parse_events_error__handle(err
, term
->err_val
,
916 strdup("expected 0 or 1"),
921 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
924 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
927 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
930 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
933 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
936 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
939 case PARSE_EVENTS__TERM_TYPE_NAME
:
942 case PARSE_EVENTS__TERM_TYPE_METRIC_ID
:
945 case PARSE_EVENTS__TERM_TYPE_RAW
:
948 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
951 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS
:
954 case PARSE_EVENTS__TERM_TYPE_PERCORE
:
956 if ((unsigned int)term
->val
.num
> 1) {
957 parse_events_error__handle(err
, term
->err_val
,
958 strdup("expected 0 or 1"),
963 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT
:
966 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE
:
968 if (term
->val
.num
> UINT_MAX
) {
969 parse_events_error__handle(err
, term
->err_val
,
975 case PARSE_EVENTS__TERM_TYPE_DRV_CFG
:
976 case PARSE_EVENTS__TERM_TYPE_USER
:
977 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE
:
978 case PARSE_EVENTS__TERM_TYPE_HARDWARE
:
980 parse_events_error__handle(err
, term
->err_term
,
981 strdup(parse_events__term_type_str(term
->type_term
)),
982 parse_events_formats_error_string(NULL
));
987 * Check term availability after basic checking so
988 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
990 * If check availability at the entry of this function,
991 * user will see "'<sysfs term>' is not usable in 'perf stat'"
992 * if an invalid config term is provided for legacy events
993 * (for example, instructions/badterm/...), which is confusing.
995 if (!config_term_avail(term
->type_term
, err
))
998 #undef CHECK_TYPE_VAL
1001 static int config_term_pmu(struct perf_event_attr
*attr
,
1002 struct parse_events_term
*term
,
1003 struct parse_events_error
*err
)
1005 if (term
->type_term
== PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE
) {
1006 struct perf_pmu
*pmu
= perf_pmus__find_by_type(attr
->type
);
1011 if (asprintf(&err_str
, "Failed to find PMU for type %d", attr
->type
) >= 0)
1012 parse_events_error__handle(err
, term
->err_term
,
1013 err_str
, /*help=*/NULL
);
1017 * Rewrite the PMU event to a legacy cache one unless the PMU
1018 * doesn't support legacy cache events or the event is present
1021 if (perf_pmu__supports_legacy_cache(pmu
) &&
1022 !perf_pmu__have_event(pmu
, term
->config
)) {
1023 attr
->type
= PERF_TYPE_HW_CACHE
;
1024 return parse_events__decode_legacy_cache(term
->config
, pmu
->type
,
1027 term
->type_term
= PARSE_EVENTS__TERM_TYPE_USER
;
1028 term
->no_value
= true;
1031 if (term
->type_term
== PARSE_EVENTS__TERM_TYPE_HARDWARE
) {
1032 struct perf_pmu
*pmu
= perf_pmus__find_by_type(attr
->type
);
1037 if (asprintf(&err_str
, "Failed to find PMU for type %d", attr
->type
) >= 0)
1038 parse_events_error__handle(err
, term
->err_term
,
1039 err_str
, /*help=*/NULL
);
1043 * If the PMU has a sysfs or json event prefer it over
1044 * legacy. ARM requires this.
1046 if (perf_pmu__have_event(pmu
, term
->config
)) {
1047 term
->type_term
= PARSE_EVENTS__TERM_TYPE_USER
;
1048 term
->no_value
= true;
1049 term
->alternate_hw_config
= true;
1051 attr
->type
= PERF_TYPE_HARDWARE
;
1052 attr
->config
= term
->val
.num
;
1053 if (perf_pmus__supports_extended_type())
1054 attr
->config
|= (__u64
)pmu
->type
<< PERF_PMU_TYPE_SHIFT
;
1058 if (term
->type_term
== PARSE_EVENTS__TERM_TYPE_USER
||
1059 term
->type_term
== PARSE_EVENTS__TERM_TYPE_DRV_CFG
) {
1061 * Always succeed for sysfs terms, as we dont know
1062 * at this point what type they need to have.
1066 return config_term_common(attr
, term
, err
);
1069 #ifdef HAVE_LIBTRACEEVENT
1070 static int config_term_tracepoint(struct perf_event_attr
*attr
,
1071 struct parse_events_term
*term
,
1072 struct parse_events_error
*err
)
1074 switch (term
->type_term
) {
1075 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
1076 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
1077 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
1078 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
1079 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
1080 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS
:
1081 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
1082 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
1083 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT
:
1084 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE
:
1085 return config_term_common(attr
, term
, err
);
1086 case PARSE_EVENTS__TERM_TYPE_USER
:
1087 case PARSE_EVENTS__TERM_TYPE_CONFIG
:
1088 case PARSE_EVENTS__TERM_TYPE_CONFIG1
:
1089 case PARSE_EVENTS__TERM_TYPE_CONFIG2
:
1090 case PARSE_EVENTS__TERM_TYPE_CONFIG3
:
1091 case PARSE_EVENTS__TERM_TYPE_NAME
:
1092 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
1093 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
:
1094 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
:
1095 case PARSE_EVENTS__TERM_TYPE_TIME
:
1096 case PARSE_EVENTS__TERM_TYPE_DRV_CFG
:
1097 case PARSE_EVENTS__TERM_TYPE_PERCORE
:
1098 case PARSE_EVENTS__TERM_TYPE_METRIC_ID
:
1099 case PARSE_EVENTS__TERM_TYPE_RAW
:
1100 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE
:
1101 case PARSE_EVENTS__TERM_TYPE_HARDWARE
:
1104 parse_events_error__handle(err
, term
->err_term
,
1105 strdup(parse_events__term_type_str(term
->type_term
)),
1106 strdup("valid terms: call-graph,stack-size\n")
1116 static int config_attr(struct perf_event_attr
*attr
,
1117 const struct parse_events_terms
*head
,
1118 struct parse_events_error
*err
,
1119 config_term_func_t config_term
)
1121 struct parse_events_term
*term
;
1123 list_for_each_entry(term
, &head
->terms
, list
)
1124 if (config_term(attr
, term
, err
))
1130 static int get_config_terms(const struct parse_events_terms
*head_config
,
1131 struct list_head
*head_terms
)
1133 #define ADD_CONFIG_TERM(__type, __weak) \
1134 struct evsel_config_term *__t; \
1136 __t = zalloc(sizeof(*__t)); \
1140 INIT_LIST_HEAD(&__t->list); \
1141 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1142 __t->weak = __weak; \
1143 list_add_tail(&__t->list, head_terms)
1145 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1147 ADD_CONFIG_TERM(__type, __weak); \
1148 __t->val.__name = __val; \
1151 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1153 ADD_CONFIG_TERM(__type, __weak); \
1154 __t->val.str = strdup(__val); \
1155 if (!__t->val.str) { \
1159 __t->free_str = true; \
1162 struct parse_events_term
*term
;
1164 list_for_each_entry(term
, &head_config
->terms
, list
) {
1165 switch (term
->type_term
) {
1166 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
1167 ADD_CONFIG_TERM_VAL(PERIOD
, period
, term
->val
.num
, term
->weak
);
1169 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
:
1170 ADD_CONFIG_TERM_VAL(FREQ
, freq
, term
->val
.num
, term
->weak
);
1172 case PARSE_EVENTS__TERM_TYPE_TIME
:
1173 ADD_CONFIG_TERM_VAL(TIME
, time
, term
->val
.num
, term
->weak
);
1175 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
1176 ADD_CONFIG_TERM_STR(CALLGRAPH
, term
->val
.str
, term
->weak
);
1178 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
:
1179 ADD_CONFIG_TERM_STR(BRANCH
, term
->val
.str
, term
->weak
);
1181 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
1182 ADD_CONFIG_TERM_VAL(STACK_USER
, stack_user
,
1183 term
->val
.num
, term
->weak
);
1185 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
1186 ADD_CONFIG_TERM_VAL(INHERIT
, inherit
,
1187 term
->val
.num
? 1 : 0, term
->weak
);
1189 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
1190 ADD_CONFIG_TERM_VAL(INHERIT
, inherit
,
1191 term
->val
.num
? 0 : 1, term
->weak
);
1193 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
1194 ADD_CONFIG_TERM_VAL(MAX_STACK
, max_stack
,
1195 term
->val
.num
, term
->weak
);
1197 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS
:
1198 ADD_CONFIG_TERM_VAL(MAX_EVENTS
, max_events
,
1199 term
->val
.num
, term
->weak
);
1201 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
1202 ADD_CONFIG_TERM_VAL(OVERWRITE
, overwrite
,
1203 term
->val
.num
? 1 : 0, term
->weak
);
1205 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
1206 ADD_CONFIG_TERM_VAL(OVERWRITE
, overwrite
,
1207 term
->val
.num
? 0 : 1, term
->weak
);
1209 case PARSE_EVENTS__TERM_TYPE_DRV_CFG
:
1210 ADD_CONFIG_TERM_STR(DRV_CFG
, term
->val
.str
, term
->weak
);
1212 case PARSE_EVENTS__TERM_TYPE_PERCORE
:
1213 ADD_CONFIG_TERM_VAL(PERCORE
, percore
,
1214 term
->val
.num
? true : false, term
->weak
);
1216 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT
:
1217 ADD_CONFIG_TERM_VAL(AUX_OUTPUT
, aux_output
,
1218 term
->val
.num
? 1 : 0, term
->weak
);
1220 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE
:
1221 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE
, aux_sample_size
,
1222 term
->val
.num
, term
->weak
);
1224 case PARSE_EVENTS__TERM_TYPE_USER
:
1225 case PARSE_EVENTS__TERM_TYPE_CONFIG
:
1226 case PARSE_EVENTS__TERM_TYPE_CONFIG1
:
1227 case PARSE_EVENTS__TERM_TYPE_CONFIG2
:
1228 case PARSE_EVENTS__TERM_TYPE_CONFIG3
:
1229 case PARSE_EVENTS__TERM_TYPE_NAME
:
1230 case PARSE_EVENTS__TERM_TYPE_METRIC_ID
:
1231 case PARSE_EVENTS__TERM_TYPE_RAW
:
1232 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE
:
1233 case PARSE_EVENTS__TERM_TYPE_HARDWARE
:
1242 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1243 * each bit of attr->config that the user has changed.
1245 static int get_config_chgs(struct perf_pmu
*pmu
, struct parse_events_terms
*head_config
,
1246 struct list_head
*head_terms
)
1248 struct parse_events_term
*term
;
1252 list_for_each_entry(term
, &head_config
->terms
, list
) {
1253 switch (term
->type_term
) {
1254 case PARSE_EVENTS__TERM_TYPE_USER
:
1255 type
= perf_pmu__format_type(pmu
, term
->config
);
1256 if (type
!= PERF_PMU_FORMAT_VALUE_CONFIG
)
1258 bits
|= perf_pmu__format_bits(pmu
, term
->config
);
1260 case PARSE_EVENTS__TERM_TYPE_CONFIG
:
1263 case PARSE_EVENTS__TERM_TYPE_CONFIG1
:
1264 case PARSE_EVENTS__TERM_TYPE_CONFIG2
:
1265 case PARSE_EVENTS__TERM_TYPE_CONFIG3
:
1266 case PARSE_EVENTS__TERM_TYPE_NAME
:
1267 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
1268 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
:
1269 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
:
1270 case PARSE_EVENTS__TERM_TYPE_TIME
:
1271 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
1272 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
1273 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
1274 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
1275 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
1276 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS
:
1277 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
1278 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
1279 case PARSE_EVENTS__TERM_TYPE_DRV_CFG
:
1280 case PARSE_EVENTS__TERM_TYPE_PERCORE
:
1281 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT
:
1282 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE
:
1283 case PARSE_EVENTS__TERM_TYPE_METRIC_ID
:
1284 case PARSE_EVENTS__TERM_TYPE_RAW
:
1285 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE
:
1286 case PARSE_EVENTS__TERM_TYPE_HARDWARE
:
1293 ADD_CONFIG_TERM_VAL(CFG_CHG
, cfg_chg
, bits
, false);
1295 #undef ADD_CONFIG_TERM
1299 int parse_events_add_tracepoint(struct parse_events_state
*parse_state
,
1300 struct list_head
*list
,
1301 const char *sys
, const char *event
,
1302 struct parse_events_error
*err
,
1303 struct parse_events_terms
*head_config
, void *loc_
)
1305 YYLTYPE
*loc
= loc_
;
1306 #ifdef HAVE_LIBTRACEEVENT
1308 struct perf_event_attr attr
;
1310 if (config_attr(&attr
, head_config
, err
,
1311 config_term_tracepoint
))
1315 if (strpbrk(sys
, "*?"))
1316 return add_tracepoint_multi_sys(parse_state
, list
, sys
, event
,
1317 err
, head_config
, loc
);
1319 return add_tracepoint_event(parse_state
, list
, sys
, event
,
1320 err
, head_config
, loc
);
1327 parse_events_error__handle(err
, loc
->first_column
, strdup("unsupported tracepoint"),
1328 strdup("libtraceevent is necessary for tracepoint support"));
1333 static int __parse_events_add_numeric(struct parse_events_state
*parse_state
,
1334 struct list_head
*list
,
1335 struct perf_pmu
*pmu
, u32 type
, u32 extended_type
,
1336 u64 config
, const struct parse_events_terms
*head_config
)
1338 struct perf_event_attr attr
;
1339 LIST_HEAD(config_terms
);
1340 const char *name
, *metric_id
;
1343 memset(&attr
, 0, sizeof(attr
));
1345 attr
.config
= config
;
1346 if (extended_type
&& (type
== PERF_TYPE_HARDWARE
|| type
== PERF_TYPE_HW_CACHE
)) {
1347 assert(perf_pmus__supports_extended_type());
1348 attr
.config
|= (u64
)extended_type
<< PERF_PMU_TYPE_SHIFT
;
1352 if (config_attr(&attr
, head_config
, parse_state
->error
,
1353 config_term_common
))
1356 if (get_config_terms(head_config
, &config_terms
))
1360 name
= get_config_name(head_config
);
1361 metric_id
= get_config_metric_id(head_config
);
1362 ret
= __add_event(list
, &parse_state
->idx
, &attr
, /*init_attr*/true, name
,
1363 metric_id
, pmu
, &config_terms
, /*auto_merge_stats=*/false,
1364 /*cpu_list=*/NULL
, /*alternate_hw_config=*/PERF_COUNT_HW_MAX
1365 ) == NULL
? -ENOMEM
: 0;
1366 free_config_terms(&config_terms
);
1370 int parse_events_add_numeric(struct parse_events_state
*parse_state
,
1371 struct list_head
*list
,
1372 u32 type
, u64 config
,
1373 const struct parse_events_terms
*head_config
,
1376 struct perf_pmu
*pmu
= NULL
;
1377 bool found_supported
= false;
1379 /* Wildcards on numeric values are only supported by core PMUs. */
1380 if (wildcard
&& perf_pmus__supports_extended_type()) {
1381 while ((pmu
= perf_pmus__scan_core(pmu
)) != NULL
) {
1384 found_supported
= true;
1385 if (parse_events__filter_pmu(parse_state
, pmu
))
1388 ret
= __parse_events_add_numeric(parse_state
, list
, pmu
,
1390 config
, head_config
);
1394 if (found_supported
)
1397 return __parse_events_add_numeric(parse_state
, list
, perf_pmus__find_by_type(type
),
1398 type
, /*extended_type=*/0, config
, head_config
);
1401 static bool config_term_percore(struct list_head
*config_terms
)
1403 struct evsel_config_term
*term
;
1405 list_for_each_entry(term
, config_terms
, list
) {
1406 if (term
->type
== EVSEL__CONFIG_TERM_PERCORE
)
1407 return term
->val
.percore
;
1413 static int parse_events_add_pmu(struct parse_events_state
*parse_state
,
1414 struct list_head
*list
, struct perf_pmu
*pmu
,
1415 const struct parse_events_terms
*const_parsed_terms
,
1416 bool auto_merge_stats
, u64 alternate_hw_config
)
1418 struct perf_event_attr attr
;
1419 struct perf_pmu_info info
;
1420 struct evsel
*evsel
;
1421 struct parse_events_error
*err
= parse_state
->error
;
1422 LIST_HEAD(config_terms
);
1423 struct parse_events_terms parsed_terms
;
1424 bool alias_rewrote_terms
= false;
1429 strbuf_init(&sb
, /*hint=*/ 0);
1430 if (pmu
->selectable
&& const_parsed_terms
&&
1431 list_empty(&const_parsed_terms
->terms
)) {
1432 strbuf_addf(&sb
, "%s//", pmu
->name
);
1434 strbuf_addf(&sb
, "%s/", pmu
->name
);
1435 parse_events_terms__to_strbuf(const_parsed_terms
, &sb
);
1436 strbuf_addch(&sb
, '/');
1438 fprintf(stderr
, "Attempt to add: %s\n", sb
.buf
);
1439 strbuf_release(&sb
);
1442 memset(&attr
, 0, sizeof(attr
));
1443 if (pmu
->perf_event_attr_init_default
)
1444 pmu
->perf_event_attr_init_default(pmu
, &attr
);
1446 attr
.type
= pmu
->type
;
1448 if (!const_parsed_terms
|| list_empty(&const_parsed_terms
->terms
)) {
1449 evsel
= __add_event(list
, &parse_state
->idx
, &attr
,
1450 /*init_attr=*/true, /*name=*/NULL
,
1451 /*metric_id=*/NULL
, pmu
,
1452 /*config_terms=*/NULL
, auto_merge_stats
,
1453 /*cpu_list=*/NULL
, alternate_hw_config
);
1454 return evsel
? 0 : -ENOMEM
;
1457 parse_events_terms__init(&parsed_terms
);
1458 if (const_parsed_terms
) {
1459 int ret
= parse_events_terms__copy(const_parsed_terms
, &parsed_terms
);
1464 fix_raw(&parsed_terms
, pmu
);
1466 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1467 if (config_attr(&attr
, &parsed_terms
, parse_state
->error
, config_term_pmu
)) {
1468 parse_events_terms__exit(&parsed_terms
);
1472 /* Look for event names in the terms and rewrite into format based terms. */
1473 if (perf_pmu__check_alias(pmu
, &parsed_terms
,
1474 &info
, &alias_rewrote_terms
,
1475 &alternate_hw_config
, err
)) {
1476 parse_events_terms__exit(&parsed_terms
);
1483 strbuf_init(&sb
, /*hint=*/ 0);
1484 parse_events_terms__to_strbuf(&parsed_terms
, &sb
);
1485 fprintf(stderr
, "..after resolving event: %s/%s/\n", pmu
->name
, sb
.buf
);
1486 strbuf_release(&sb
);
1489 /* Configure attr/terms again if an alias was expanded. */
1490 if (alias_rewrote_terms
&&
1491 config_attr(&attr
, &parsed_terms
, parse_state
->error
, config_term_pmu
)) {
1492 parse_events_terms__exit(&parsed_terms
);
1496 if (get_config_terms(&parsed_terms
, &config_terms
)) {
1497 parse_events_terms__exit(&parsed_terms
);
1502 * When using default config, record which bits of attr->config were
1503 * changed by the user.
1505 if (pmu
->perf_event_attr_init_default
&&
1506 get_config_chgs(pmu
, &parsed_terms
, &config_terms
)) {
1507 parse_events_terms__exit(&parsed_terms
);
1511 /* Skip configuring hard coded terms that were applied by config_attr. */
1512 if (perf_pmu__config(pmu
, &attr
, &parsed_terms
, /*apply_hardcoded=*/false,
1513 parse_state
->error
)) {
1514 free_config_terms(&config_terms
);
1515 parse_events_terms__exit(&parsed_terms
);
1519 evsel
= __add_event(list
, &parse_state
->idx
, &attr
, /*init_attr=*/true,
1520 get_config_name(&parsed_terms
),
1521 get_config_metric_id(&parsed_terms
), pmu
,
1522 &config_terms
, auto_merge_stats
, /*cpu_list=*/NULL
,
1523 alternate_hw_config
);
1525 parse_events_terms__exit(&parsed_terms
);
1530 evsel
->use_config_name
= true;
1532 evsel
->percore
= config_term_percore(&evsel
->config_terms
);
1534 parse_events_terms__exit(&parsed_terms
);
1535 free((char *)evsel
->unit
);
1536 evsel
->unit
= strdup(info
.unit
);
1537 evsel
->scale
= info
.scale
;
1538 evsel
->per_pkg
= info
.per_pkg
;
1539 evsel
->snapshot
= info
.snapshot
;
1543 int parse_events_multi_pmu_add(struct parse_events_state
*parse_state
,
1544 const char *event_name
, u64 hw_config
,
1545 const struct parse_events_terms
*const_parsed_terms
,
1546 struct list_head
**listp
, void *loc_
)
1548 struct parse_events_term
*term
;
1549 struct list_head
*list
= NULL
;
1550 struct perf_pmu
*pmu
= NULL
;
1551 YYLTYPE
*loc
= loc_
;
1554 struct parse_events_terms parsed_terms
;
1558 parse_events_terms__init(&parsed_terms
);
1559 if (const_parsed_terms
) {
1560 int ret
= parse_events_terms__copy(const_parsed_terms
, &parsed_terms
);
1566 config
= strdup(event_name
);
1570 if (parse_events_term__num(&term
,
1571 PARSE_EVENTS__TERM_TYPE_USER
,
1572 config
, /*num=*/1, /*novalue=*/true,
1573 loc
, /*loc_val=*/NULL
) < 0) {
1577 list_add_tail(&term
->list
, &parsed_terms
.terms
);
1579 /* Add it for all PMUs that support the alias */
1580 list
= malloc(sizeof(struct list_head
));
1584 INIT_LIST_HEAD(list
);
1586 while ((pmu
= perf_pmus__scan(pmu
)) != NULL
) {
1587 bool auto_merge_stats
;
1589 if (parse_events__filter_pmu(parse_state
, pmu
))
1592 if (!perf_pmu__have_event(pmu
, event_name
))
1595 auto_merge_stats
= perf_pmu__auto_merge_stats(pmu
);
1596 if (!parse_events_add_pmu(parse_state
, list
, pmu
,
1597 &parsed_terms
, auto_merge_stats
, hw_config
)) {
1600 strbuf_init(&sb
, /*hint=*/ 0);
1601 parse_events_terms__to_strbuf(&parsed_terms
, &sb
);
1602 pr_debug("%s -> %s/%s/\n", event_name
, pmu
->name
, sb
.buf
);
1603 strbuf_release(&sb
);
1608 if (parse_state
->fake_pmu
) {
1609 if (!parse_events_add_pmu(parse_state
, list
, perf_pmus__fake_pmu(), &parsed_terms
,
1610 /*auto_merge_stats=*/true, hw_config
)) {
1613 strbuf_init(&sb
, /*hint=*/ 0);
1614 parse_events_terms__to_strbuf(&parsed_terms
, &sb
);
1615 pr_debug("%s -> fake/%s/\n", event_name
, sb
.buf
);
1616 strbuf_release(&sb
);
1622 parse_events_terms__exit(&parsed_terms
);
1631 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state
*parse_state
,
1632 const char *event_or_pmu
,
1633 const struct parse_events_terms
*const_parsed_terms
,
1634 struct list_head
**listp
,
1637 YYLTYPE
*loc
= loc_
;
1638 struct perf_pmu
*pmu
;
1642 *listp
= malloc(sizeof(**listp
));
1646 INIT_LIST_HEAD(*listp
);
1648 /* Attempt to add to list assuming event_or_pmu is a PMU name. */
1649 pmu
= perf_pmus__find(event_or_pmu
);
1650 if (pmu
&& !parse_events_add_pmu(parse_state
, *listp
, pmu
, const_parsed_terms
,
1651 /*auto_merge_stats=*/false,
1652 /*alternate_hw_config=*/PERF_COUNT_HW_MAX
))
1655 if (parse_state
->fake_pmu
) {
1656 if (!parse_events_add_pmu(parse_state
, *listp
, perf_pmus__fake_pmu(),
1658 /*auto_merge_stats=*/false,
1659 /*alternate_hw_config=*/PERF_COUNT_HW_MAX
))
1664 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1665 while ((pmu
= perf_pmus__scan(pmu
)) != NULL
) {
1666 if (!parse_events__filter_pmu(parse_state
, pmu
) &&
1667 perf_pmu__match(pmu
, event_or_pmu
)) {
1668 bool auto_merge_stats
= perf_pmu__auto_merge_stats(pmu
);
1670 if (!parse_events_add_pmu(parse_state
, *listp
, pmu
,
1673 /*alternate_hw_config=*/PERF_COUNT_HW_MAX
)) {
1675 parse_state
->wild_card_pmus
= true;
1682 /* Failure to add, assume event_or_pmu is an event name. */
1684 if (!parse_events_multi_pmu_add(parse_state
, event_or_pmu
, PERF_COUNT_HW_MAX
,
1685 const_parsed_terms
, listp
, loc
))
1688 if (asprintf(&help
, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu
) < 0)
1690 parse_events_error__handle(parse_state
->error
, loc
->first_column
,
1691 strdup("Bad event or PMU"),
1697 void parse_events__set_leader(char *name
, struct list_head
*list
)
1699 struct evsel
*leader
;
1701 if (list_empty(list
)) {
1702 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1706 leader
= list_first_entry(list
, struct evsel
, core
.node
);
1707 __perf_evlist__set_leader(list
, &leader
->core
);
1708 zfree(&leader
->group_name
);
1709 leader
->group_name
= name
;
1712 static int parse_events__modifier_list(struct parse_events_state
*parse_state
,
1714 struct list_head
*list
,
1715 struct parse_events_modifier mod
,
1718 struct evsel
*evsel
;
1720 if (!group
&& mod
.weak
) {
1721 parse_events_error__handle(parse_state
->error
, loc
->first_column
,
1722 strdup("Weak modifier is for use with groups"), NULL
);
1726 __evlist__for_each_entry(list
, evsel
) {
1727 /* Translate modifiers into the equivalent evsel excludes. */
1728 int eu
= group
? evsel
->core
.attr
.exclude_user
: 0;
1729 int ek
= group
? evsel
->core
.attr
.exclude_kernel
: 0;
1730 int eh
= group
? evsel
->core
.attr
.exclude_hv
: 0;
1731 int eH
= group
? evsel
->core
.attr
.exclude_host
: 0;
1732 int eG
= group
? evsel
->core
.attr
.exclude_guest
: 0;
1733 int exclude
= eu
| ek
| eh
;
1734 int exclude_GH
= group
? evsel
->exclude_GH
: 0;
1738 exclude
= eu
= ek
= eh
= 1;
1739 if (!exclude_GH
&& !perf_guest
&& exclude_GH_default
)
1745 exclude
= eu
= ek
= eh
= 1;
1748 if (mod
.hypervisor
) {
1750 exclude
= eu
= ek
= eh
= 1;
1755 exclude_GH
= eG
= eH
= 1;
1760 exclude_GH
= eG
= eH
= 1;
1763 evsel
->core
.attr
.exclude_user
= eu
;
1764 evsel
->core
.attr
.exclude_kernel
= ek
;
1765 evsel
->core
.attr
.exclude_hv
= eh
;
1766 evsel
->core
.attr
.exclude_host
= eH
;
1767 evsel
->core
.attr
.exclude_guest
= eG
;
1768 evsel
->exclude_GH
= exclude_GH
;
1770 /* Simple modifiers copied to the evsel. */
1772 u8 precise
= evsel
->core
.attr
.precise_ip
+ mod
.precise
;
1776 * 0 - SAMPLE_IP can have arbitrary skid
1777 * 1 - SAMPLE_IP must have constant skid
1778 * 2 - SAMPLE_IP requested to have 0 skid
1779 * 3 - SAMPLE_IP must have 0 skid
1781 * See also PERF_RECORD_MISC_EXACT_IP
1787 "Maximum combined precise value is 3, adding precision to \"%s\"",
1788 evsel__name(evsel
)) > 0) {
1789 parse_events_error__handle(parse_state
->error
,
1795 evsel
->core
.attr
.precise_ip
= precise
;
1797 if (mod
.precise_max
)
1798 evsel
->precise_max
= 1;
1800 evsel
->core
.attr
.exclude_idle
= 1;
1801 if (mod
.sample_read
)
1802 evsel
->sample_read
= 1;
1803 if (mod
.pinned
&& evsel__is_group_leader(evsel
))
1804 evsel
->core
.attr
.pinned
= 1;
1805 if (mod
.exclusive
&& evsel__is_group_leader(evsel
))
1806 evsel
->core
.attr
.exclusive
= 1;
1808 evsel
->weak_group
= true;
1810 evsel
->bpf_counter
= true;
1812 evsel
->retire_lat
= true;
1817 int parse_events__modifier_group(struct parse_events_state
*parse_state
, void *loc
,
1818 struct list_head
*list
,
1819 struct parse_events_modifier mod
)
1821 return parse_events__modifier_list(parse_state
, loc
, list
, mod
, /*group=*/true);
1824 int parse_events__modifier_event(struct parse_events_state
*parse_state
, void *loc
,
1825 struct list_head
*list
,
1826 struct parse_events_modifier mod
)
1828 return parse_events__modifier_list(parse_state
, loc
, list
, mod
, /*group=*/false);
1831 int parse_events__set_default_name(struct list_head
*list
, char *name
)
1833 struct evsel
*evsel
;
1834 bool used_name
= false;
1836 __evlist__for_each_entry(list
, evsel
) {
1838 evsel
->name
= used_name
? strdup(name
) : name
;
1849 static int parse_events__scanner(const char *str
,
1851 struct parse_events_state
*parse_state
)
1853 YY_BUFFER_STATE buffer
;
1857 ret
= parse_events_lex_init_extra(parse_state
, &scanner
);
1862 buffer
= parse_events__scan_string(str
, scanner
);
1864 parse_events_set_in(input
, scanner
);
1867 parse_events_debug
= 1;
1868 parse_events_set_debug(1, scanner
);
1870 ret
= parse_events_parse(parse_state
, scanner
);
1873 parse_events__flush_buffer(buffer
, scanner
);
1874 parse_events__delete_buffer(buffer
, scanner
);
1876 parse_events_lex_destroy(scanner
);
1881 * parse event config string, return a list of event terms.
1883 int parse_events_terms(struct parse_events_terms
*terms
, const char *str
, FILE *input
)
1885 struct parse_events_state parse_state
= {
1887 .stoken
= PE_START_TERMS
,
1891 ret
= parse_events__scanner(str
, input
, &parse_state
);
1893 list_splice(&parse_state
.terms
->terms
, &terms
->terms
);
1895 zfree(&parse_state
.terms
);
1899 static int evsel__compute_group_pmu_name(struct evsel
*evsel
,
1900 const struct list_head
*head
)
1902 struct evsel
*leader
= evsel__leader(evsel
);
1904 const char *group_pmu_name
;
1905 struct perf_pmu
*pmu
= evsel__find_pmu(evsel
);
1909 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
1910 * is a core PMU, but in heterogeneous systems this is
1911 * unknown. For now pick the first core PMU.
1913 pmu
= perf_pmus__scan_core(NULL
);
1916 pr_debug("No PMU found for '%s'\n", evsel__name(evsel
));
1919 group_pmu_name
= pmu
->name
;
1921 * Software events may be in a group with other uncore PMU events. Use
1922 * the pmu_name of the first non-software event to avoid breaking the
1923 * software event out of the group.
1925 * Aux event leaders, like intel_pt, expect a group with events from
1926 * other PMUs, so substitute the AUX event's PMU in this case.
1928 if (perf_pmu__is_software(pmu
) || evsel__is_aux_event(leader
)) {
1929 struct perf_pmu
*leader_pmu
= evsel__find_pmu(leader
);
1932 /* As with determining pmu above. */
1933 leader_pmu
= perf_pmus__scan_core(NULL
);
1936 * Starting with the leader, find the first event with a named
1937 * non-software PMU. for_each_group_(member|evsel) isn't used as
1938 * the list isn't yet sorted putting evsel's in the same group
1941 if (leader_pmu
&& !perf_pmu__is_software(leader_pmu
)) {
1942 group_pmu_name
= leader_pmu
->name
;
1943 } else if (leader
->core
.nr_members
> 1) {
1944 list_for_each_entry(pos
, head
, core
.node
) {
1945 struct perf_pmu
*pos_pmu
;
1947 if (pos
== leader
|| evsel__leader(pos
) != leader
)
1949 pos_pmu
= evsel__find_pmu(pos
);
1951 /* As with determining pmu above. */
1952 pos_pmu
= perf_pmus__scan_core(NULL
);
1954 if (pos_pmu
&& !perf_pmu__is_software(pos_pmu
)) {
1955 group_pmu_name
= pos_pmu
->name
;
1961 /* Record computed name. */
1962 evsel
->group_pmu_name
= strdup(group_pmu_name
);
1963 return evsel
->group_pmu_name
? 0 : -ENOMEM
;
1966 __weak
int arch_evlist__cmp(const struct evsel
*lhs
, const struct evsel
*rhs
)
1968 /* Order by insertion index. */
1969 return lhs
->core
.idx
- rhs
->core
.idx
;
1972 static int evlist__cmp(void *_fg_idx
, const struct list_head
*l
, const struct list_head
*r
)
1974 const struct perf_evsel
*lhs_core
= container_of(l
, struct perf_evsel
, node
);
1975 const struct evsel
*lhs
= container_of(lhs_core
, struct evsel
, core
);
1976 const struct perf_evsel
*rhs_core
= container_of(r
, struct perf_evsel
, node
);
1977 const struct evsel
*rhs
= container_of(rhs_core
, struct evsel
, core
);
1978 int *force_grouped_idx
= _fg_idx
;
1979 int lhs_sort_idx
, rhs_sort_idx
, ret
;
1980 const char *lhs_pmu_name
, *rhs_pmu_name
;
1981 bool lhs_has_group
, rhs_has_group
;
1984 * First sort by grouping/leader. Read the leader idx only if the evsel
1985 * is part of a group, by default ungrouped events will be sorted
1986 * relative to grouped events based on where the first ungrouped event
1987 * occurs. If both events don't have a group we want to fall-through to
1988 * the arch specific sorting, that can reorder and fix things like
1989 * Intel's topdown events.
1991 if (lhs_core
->leader
!= lhs_core
|| lhs_core
->nr_members
> 1) {
1992 lhs_has_group
= true;
1993 lhs_sort_idx
= lhs_core
->leader
->idx
;
1995 lhs_has_group
= false;
1996 lhs_sort_idx
= *force_grouped_idx
!= -1 && arch_evsel__must_be_in_group(lhs
)
1997 ? *force_grouped_idx
2000 if (rhs_core
->leader
!= rhs_core
|| rhs_core
->nr_members
> 1) {
2001 rhs_has_group
= true;
2002 rhs_sort_idx
= rhs_core
->leader
->idx
;
2004 rhs_has_group
= false;
2005 rhs_sort_idx
= *force_grouped_idx
!= -1 && arch_evsel__must_be_in_group(rhs
)
2006 ? *force_grouped_idx
2010 if (lhs_sort_idx
!= rhs_sort_idx
)
2011 return lhs_sort_idx
- rhs_sort_idx
;
2013 /* Group by PMU if there is a group. Groups can't span PMUs. */
2014 if (lhs_has_group
&& rhs_has_group
) {
2015 lhs_pmu_name
= lhs
->group_pmu_name
;
2016 rhs_pmu_name
= rhs
->group_pmu_name
;
2017 ret
= strcmp(lhs_pmu_name
, rhs_pmu_name
);
2022 /* Architecture specific sorting. */
2023 return arch_evlist__cmp(lhs
, rhs
);
2026 static int parse_events__sort_events_and_fix_groups(struct list_head
*list
)
2028 int idx
= 0, force_grouped_idx
= -1;
2029 struct evsel
*pos
, *cur_leader
= NULL
;
2030 struct perf_evsel
*cur_leaders_grp
= NULL
;
2031 bool idx_changed
= false, cur_leader_force_grouped
= false;
2032 int orig_num_leaders
= 0, num_leaders
= 0;
2036 * Compute index to insert ungrouped events at. Place them where the
2037 * first ungrouped event appears.
2039 list_for_each_entry(pos
, list
, core
.node
) {
2040 const struct evsel
*pos_leader
= evsel__leader(pos
);
2042 ret
= evsel__compute_group_pmu_name(pos
, list
);
2046 if (pos
== pos_leader
)
2050 * Ensure indexes are sequential, in particular for multiple
2051 * event lists being merged. The indexes are used to detect when
2052 * the user order is modified.
2054 pos
->core
.idx
= idx
++;
2056 /* Remember an index to sort all forced grouped events together to. */
2057 if (force_grouped_idx
== -1 && pos
== pos_leader
&& pos
->core
.nr_members
< 2 &&
2058 arch_evsel__must_be_in_group(pos
))
2059 force_grouped_idx
= pos
->core
.idx
;
2063 list_sort(&force_grouped_idx
, list
, evlist__cmp
);
2066 * Recompute groups, splitting for PMUs and adding groups for events
2067 * that require them.
2070 list_for_each_entry(pos
, list
, core
.node
) {
2071 const struct evsel
*pos_leader
= evsel__leader(pos
);
2072 const char *pos_pmu_name
= pos
->group_pmu_name
;
2073 const char *cur_leader_pmu_name
;
2074 bool pos_force_grouped
= force_grouped_idx
!= -1 &&
2075 arch_evsel__must_be_in_group(pos
);
2077 /* Reset index and nr_members. */
2078 if (pos
->core
.idx
!= idx
)
2080 pos
->core
.idx
= idx
++;
2081 pos
->core
.nr_members
= 0;
2084 * Set the group leader respecting the given groupings and that
2085 * groups can't span PMUs.
2090 cur_leader_pmu_name
= cur_leader
->group_pmu_name
;
2091 if ((cur_leaders_grp
!= pos
->core
.leader
&&
2092 (!pos_force_grouped
|| !cur_leader_force_grouped
)) ||
2093 strcmp(cur_leader_pmu_name
, pos_pmu_name
)) {
2094 /* Event is for a different group/PMU than last. */
2097 * Remember the leader's group before it is overwritten,
2098 * so that later events match as being in the same
2101 cur_leaders_grp
= pos
->core
.leader
;
2103 * Avoid forcing events into groups with events that
2104 * don't need to be in the group.
2106 cur_leader_force_grouped
= pos_force_grouped
;
2108 if (pos_leader
!= cur_leader
) {
2109 /* The leader changed so update it. */
2110 evsel__set_leader(pos
, cur_leader
);
2113 list_for_each_entry(pos
, list
, core
.node
) {
2114 struct evsel
*pos_leader
= evsel__leader(pos
);
2116 if (pos
== pos_leader
)
2118 pos_leader
->core
.nr_members
++;
2120 return (idx_changed
|| num_leaders
!= orig_num_leaders
) ? 1 : 0;
2123 int __parse_events(struct evlist
*evlist
, const char *str
, const char *pmu_filter
,
2124 struct parse_events_error
*err
, bool fake_pmu
,
2125 bool warn_if_reordered
, bool fake_tp
)
2127 struct parse_events_state parse_state
= {
2128 .list
= LIST_HEAD_INIT(parse_state
.list
),
2129 .idx
= evlist
->core
.nr_entries
,
2131 .stoken
= PE_START_EVENTS
,
2132 .fake_pmu
= fake_pmu
,
2134 .pmu_filter
= pmu_filter
,
2135 .match_legacy_cache_terms
= true,
2139 ret
= parse_events__scanner(str
, /*input=*/ NULL
, &parse_state
);
2141 if (!ret
&& list_empty(&parse_state
.list
)) {
2142 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2146 ret2
= parse_events__sort_events_and_fix_groups(&parse_state
.list
);
2150 if (ret2
&& warn_if_reordered
&& !parse_state
.wild_card_pmus
)
2151 pr_warning("WARNING: events were regrouped to match PMUs\n");
2154 * Add list to the evlist even with errors to allow callers to clean up.
2156 evlist__splice_list_tail(evlist
, &parse_state
.list
);
2161 last
= evlist__last(evlist
);
2162 last
->cmdline_group_boundary
= true;
2168 * There are 2 users - builtin-record and builtin-test objects.
2169 * Both call evlist__delete in case of error, so we dont
2175 int parse_event(struct evlist
*evlist
, const char *str
)
2177 struct parse_events_error err
;
2180 parse_events_error__init(&err
);
2181 ret
= parse_events(evlist
, str
, &err
);
2182 parse_events_error__exit(&err
);
2186 struct parse_events_error_entry
{
2187 /** @list: The list the error is part of. */
2188 struct list_head list
;
2189 /** @idx: index in the parsed string */
2191 /** @str: string to display at the index */
2193 /** @help: optional help string */
2197 void parse_events_error__init(struct parse_events_error
*err
)
2199 INIT_LIST_HEAD(&err
->list
);
2202 void parse_events_error__exit(struct parse_events_error
*err
)
2204 struct parse_events_error_entry
*pos
, *tmp
;
2206 list_for_each_entry_safe(pos
, tmp
, &err
->list
, list
) {
2209 list_del_init(&pos
->list
);
2214 void parse_events_error__handle(struct parse_events_error
*err
, int idx
,
2215 char *str
, char *help
)
2217 struct parse_events_error_entry
*entry
;
2219 if (WARN(!str
|| !err
, "WARNING: failed to provide error string or struct\n"))
2222 entry
= zalloc(sizeof(*entry
));
2224 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2225 str
, help
?: "<no help>");
2231 list_add(&entry
->list
, &err
->list
);
2238 #define MAX_WIDTH 1000
2239 static int get_term_width(void)
2243 get_term_dimensions(&ws
);
2244 return ws
.ws_col
> MAX_WIDTH
? MAX_WIDTH
: ws
.ws_col
;
2247 static void __parse_events_error__print(int err_idx
, const char *err_str
,
2248 const char *err_help
, const char *event
)
2250 const char *str
= "invalid or unsupported event: ";
2251 char _buf
[MAX_WIDTH
];
2252 char *buf
= (char *) event
;
2255 /* -2 for extra '' in the final fprintf */
2256 int width
= get_term_width() - 2;
2257 int len_event
= strlen(event
);
2258 int len_str
, max_len
, cut
= 0;
2261 * Maximum error index indent, we will cut
2262 * the event string if it's bigger.
2264 int max_err_idx
= 13;
2267 * Let's be specific with the message when
2268 * we have the precise error.
2270 str
= "event syntax error: ";
2271 len_str
= strlen(str
);
2272 max_len
= width
- len_str
;
2276 /* We're cutting from the beginning. */
2277 if (err_idx
> max_err_idx
)
2278 cut
= err_idx
- max_err_idx
;
2280 strncpy(buf
, event
+ cut
, max_len
);
2282 /* Mark cut parts with '..' on both sides. */
2284 buf
[0] = buf
[1] = '.';
2286 if ((len_event
- cut
) > max_len
) {
2287 buf
[max_len
- 1] = buf
[max_len
- 2] = '.';
2291 idx
= len_str
+ err_idx
- cut
;
2294 fprintf(stderr
, "%s'%s'\n", str
, buf
);
2296 fprintf(stderr
, "%*s\\___ %s\n", idx
+ 1, "", err_str
);
2298 fprintf(stderr
, "\n%s\n", err_help
);
2302 void parse_events_error__print(const struct parse_events_error
*err
,
2305 struct parse_events_error_entry
*pos
;
2308 list_for_each_entry(pos
, &err
->list
, list
) {
2310 fputs("\n", stderr
);
2311 __parse_events_error__print(pos
->idx
, pos
->str
, pos
->help
, event
);
2317 * In the list of errors err, do any of the error strings (str) contain the
2318 * given needle string?
2320 bool parse_events_error__contains(const struct parse_events_error
*err
,
2323 struct parse_events_error_entry
*pos
;
2325 list_for_each_entry(pos
, &err
->list
, list
) {
2326 if (strstr(pos
->str
, needle
) != NULL
)
2334 int parse_events_option(const struct option
*opt
, const char *str
,
2335 int unset __maybe_unused
)
2337 struct parse_events_option_args
*args
= opt
->value
;
2338 struct parse_events_error err
;
2341 parse_events_error__init(&err
);
2342 ret
= __parse_events(*args
->evlistp
, str
, args
->pmu_filter
, &err
,
2343 /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2347 parse_events_error__print(&err
, str
);
2348 fprintf(stderr
, "Run 'perf list' for a list of valid events\n");
2350 parse_events_error__exit(&err
);
2355 int parse_events_option_new_evlist(const struct option
*opt
, const char *str
, int unset
)
2357 struct parse_events_option_args
*args
= opt
->value
;
2360 if (*args
->evlistp
== NULL
) {
2361 *args
->evlistp
= evlist__new();
2363 if (*args
->evlistp
== NULL
) {
2364 fprintf(stderr
, "Not enough memory to create evlist\n");
2368 ret
= parse_events_option(opt
, str
, unset
);
2370 evlist__delete(*args
->evlistp
);
2371 *args
->evlistp
= NULL
;
2378 foreach_evsel_in_last_glob(struct evlist
*evlist
,
2379 int (*func
)(struct evsel
*evsel
,
2383 struct evsel
*last
= NULL
;
2387 * Don't return when list_empty, give func a chance to report
2388 * error when it found last == NULL.
2390 * So no need to WARN here, let *func do this.
2392 if (evlist
->core
.nr_entries
> 0)
2393 last
= evlist__last(evlist
);
2396 err
= (*func
)(last
, arg
);
2402 if (last
->core
.node
.prev
== &evlist
->core
.entries
)
2404 last
= list_entry(last
->core
.node
.prev
, struct evsel
, core
.node
);
2405 } while (!last
->cmdline_group_boundary
);
2410 static int set_filter(struct evsel
*evsel
, const void *arg
)
2412 const char *str
= arg
;
2414 int nr_addr_filters
= 0;
2415 struct perf_pmu
*pmu
= NULL
;
2417 if (evsel
== NULL
) {
2419 "--filter option should follow a -e tracepoint or HW tracer option\n");
2423 if (evsel
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
) {
2424 if (evsel__append_tp_filter(evsel
, str
) < 0) {
2426 "not enough memory to hold filter string\n");
2433 while ((pmu
= perf_pmus__scan(pmu
)) != NULL
)
2434 if (pmu
->type
== evsel
->core
.attr
.type
) {
2440 perf_pmu__scan_file(pmu
, "nr_addr_filters",
2441 "%d", &nr_addr_filters
);
2443 if (!nr_addr_filters
)
2444 return perf_bpf_filter__parse(&evsel
->bpf_filters
, str
);
2446 if (evsel__append_addr_filter(evsel
, str
) < 0) {
2448 "not enough memory to hold filter string\n");
2455 int parse_filter(const struct option
*opt
, const char *str
,
2456 int unset __maybe_unused
)
2458 struct evlist
*evlist
= *(struct evlist
**)opt
->value
;
2460 return foreach_evsel_in_last_glob(evlist
, set_filter
,
2464 static int add_exclude_perf_filter(struct evsel
*evsel
,
2465 const void *arg __maybe_unused
)
2467 char new_filter
[64];
2469 if (evsel
== NULL
|| evsel
->core
.attr
.type
!= PERF_TYPE_TRACEPOINT
) {
2471 "--exclude-perf option should follow a -e tracepoint option\n");
2475 snprintf(new_filter
, sizeof(new_filter
), "common_pid != %d", getpid());
2477 if (evsel__append_tp_filter(evsel
, new_filter
) < 0) {
2479 "not enough memory to hold filter string\n");
2486 int exclude_perf(const struct option
*opt
,
2487 const char *arg __maybe_unused
,
2488 int unset __maybe_unused
)
2490 struct evlist
*evlist
= *(struct evlist
**)opt
->value
;
2492 return foreach_evsel_in_last_glob(evlist
, add_exclude_perf_filter
,
2496 int parse_events__is_hardcoded_term(struct parse_events_term
*term
)
2498 return term
->type_term
!= PARSE_EVENTS__TERM_TYPE_USER
;
2501 static int new_term(struct parse_events_term
**_term
,
2502 struct parse_events_term
*temp
,
2505 struct parse_events_term
*term
;
2507 term
= malloc(sizeof(*term
));
2512 INIT_LIST_HEAD(&term
->list
);
2515 switch (term
->type_val
) {
2516 case PARSE_EVENTS__TERM_TYPE_NUM
:
2517 term
->val
.num
= num
;
2519 case PARSE_EVENTS__TERM_TYPE_STR
:
2520 term
->val
.str
= str
;
2531 int parse_events_term__num(struct parse_events_term
**term
,
2532 enum parse_events__term_type type_term
,
2533 const char *config
, u64 num
,
2535 void *loc_term_
, void *loc_val_
)
2537 YYLTYPE
*loc_term
= loc_term_
;
2538 YYLTYPE
*loc_val
= loc_val_
;
2540 struct parse_events_term temp
= {
2541 .type_val
= PARSE_EVENTS__TERM_TYPE_NUM
,
2542 .type_term
= type_term
,
2543 .config
= config
? : strdup(parse_events__term_type_str(type_term
)),
2544 .no_value
= no_value
,
2545 .err_term
= loc_term
? loc_term
->first_column
: 0,
2546 .err_val
= loc_val
? loc_val
->first_column
: 0,
2549 return new_term(term
, &temp
, /*str=*/NULL
, num
);
2552 int parse_events_term__str(struct parse_events_term
**term
,
2553 enum parse_events__term_type type_term
,
2554 char *config
, char *str
,
2555 void *loc_term_
, void *loc_val_
)
2557 YYLTYPE
*loc_term
= loc_term_
;
2558 YYLTYPE
*loc_val
= loc_val_
;
2560 struct parse_events_term temp
= {
2561 .type_val
= PARSE_EVENTS__TERM_TYPE_STR
,
2562 .type_term
= type_term
,
2564 .err_term
= loc_term
? loc_term
->first_column
: 0,
2565 .err_val
= loc_val
? loc_val
->first_column
: 0,
2568 return new_term(term
, &temp
, str
, /*num=*/0);
2571 int parse_events_term__term(struct parse_events_term
**term
,
2572 enum parse_events__term_type term_lhs
,
2573 enum parse_events__term_type term_rhs
,
2574 void *loc_term
, void *loc_val
)
2576 return parse_events_term__str(term
, term_lhs
, NULL
,
2577 strdup(parse_events__term_type_str(term_rhs
)),
2581 int parse_events_term__clone(struct parse_events_term
**new,
2582 const struct parse_events_term
*term
)
2585 struct parse_events_term temp
= *term
;
2589 temp
.config
= strdup(term
->config
);
2593 if (term
->type_val
== PARSE_EVENTS__TERM_TYPE_NUM
)
2594 return new_term(new, &temp
, /*str=*/NULL
, term
->val
.num
);
2596 str
= strdup(term
->val
.str
);
2598 zfree(&temp
.config
);
2601 return new_term(new, &temp
, str
, /*num=*/0);
2604 void parse_events_term__delete(struct parse_events_term
*term
)
2606 if (term
->type_val
!= PARSE_EVENTS__TERM_TYPE_NUM
)
2607 zfree(&term
->val
.str
);
2609 zfree(&term
->config
);
2613 static int parse_events_terms__copy(const struct parse_events_terms
*src
,
2614 struct parse_events_terms
*dest
)
2616 struct parse_events_term
*term
;
2618 list_for_each_entry (term
, &src
->terms
, list
) {
2619 struct parse_events_term
*n
;
2622 ret
= parse_events_term__clone(&n
, term
);
2626 list_add_tail(&n
->list
, &dest
->terms
);
2631 void parse_events_terms__init(struct parse_events_terms
*terms
)
2633 INIT_LIST_HEAD(&terms
->terms
);
2636 void parse_events_terms__exit(struct parse_events_terms
*terms
)
2638 struct parse_events_term
*term
, *h
;
2640 list_for_each_entry_safe(term
, h
, &terms
->terms
, list
) {
2641 list_del_init(&term
->list
);
2642 parse_events_term__delete(term
);
2646 void parse_events_terms__delete(struct parse_events_terms
*terms
)
2650 parse_events_terms__exit(terms
);
2654 int parse_events_terms__to_strbuf(const struct parse_events_terms
*terms
, struct strbuf
*sb
)
2656 struct parse_events_term
*term
;
2662 list_for_each_entry(term
, &terms
->terms
, list
) {
2666 ret
= strbuf_addch(sb
, ',');
2672 if (term
->type_val
== PARSE_EVENTS__TERM_TYPE_NUM
)
2673 if (term
->no_value
) {
2674 assert(term
->val
.num
== 1);
2675 ret
= strbuf_addf(sb
, "%s", term
->config
);
2677 ret
= strbuf_addf(sb
, "%s=%#"PRIx64
, term
->config
, term
->val
.num
);
2678 else if (term
->type_val
== PARSE_EVENTS__TERM_TYPE_STR
) {
2680 ret
= strbuf_addf(sb
, "%s=", term
->config
);
2683 } else if ((unsigned int)term
->type_term
< __PARSE_EVENTS__TERM_TYPE_NR
) {
2684 ret
= strbuf_addf(sb
, "%s=",
2685 parse_events__term_type_str(term
->type_term
));
2689 assert(!term
->no_value
);
2690 ret
= strbuf_addf(sb
, "%s", term
->val
.str
);
2698 static void config_terms_list(char *buf
, size_t buf_sz
)
2704 for (i
= 0; i
< __PARSE_EVENTS__TERM_TYPE_NR
; i
++) {
2705 const char *name
= parse_events__term_type_str(i
);
2707 if (!config_term_avail(i
, NULL
))
2714 if (strlen(buf
) + strlen(name
) + 2 >= buf_sz
)
2726 * Return string contains valid config terms of an event.
2727 * @additional_terms: For terms such as PMU sysfs terms.
2729 char *parse_events_formats_error_string(char *additional_terms
)
2732 /* "no-overwrite" is the longest name */
2733 char static_terms
[__PARSE_EVENTS__TERM_TYPE_NR
*
2734 (sizeof("no-overwrite") - 1)];
2736 config_terms_list(static_terms
, sizeof(static_terms
));
2738 if (additional_terms
) {
2739 if (asprintf(&str
, "valid terms: %s,%s",
2740 additional_terms
, static_terms
) < 0)
2743 if (asprintf(&str
, "valid terms: %s", static_terms
) < 0)