Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / perf / util / parse-events.c
blobafeb8d815bbff26d96451ead9bc8d91cda3c67fa
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/list_sort.h>
5 #include <linux/zalloc.h>
6 #include <dirent.h>
7 #include <errno.h>
8 #include <sys/ioctl.h>
9 #include <sys/param.h>
10 #include "term.h"
11 #include "env.h"
12 #include "evlist.h"
13 #include "evsel.h"
14 #include <subcmd/parse-options.h>
15 #include "parse-events.h"
16 #include "string2.h"
17 #include "strbuf.h"
18 #include "debug.h"
19 #include <api/fs/tracing_path.h>
20 #include <perf/cpumap.h>
21 #include <util/parse-events-bison.h>
22 #include <util/parse-events-flex.h>
23 #include "pmu.h"
24 #include "pmus.h"
25 #include "asm/bug.h"
26 #include "util/parse-branch-options.h"
27 #include "util/evsel_config.h"
28 #include "util/event.h"
29 #include "util/bpf-filter.h"
30 #include "util/util.h"
31 #include "tracepoint.h"
33 #define MAX_NAME_LEN 100
35 static int get_config_terms(const struct parse_events_terms *head_config,
36 struct list_head *head_terms);
37 static int parse_events_terms__copy(const struct parse_events_terms *src,
38 struct parse_events_terms *dest);
40 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
41 [PERF_COUNT_HW_CPU_CYCLES] = {
42 .symbol = "cpu-cycles",
43 .alias = "cycles",
45 [PERF_COUNT_HW_INSTRUCTIONS] = {
46 .symbol = "instructions",
47 .alias = "",
49 [PERF_COUNT_HW_CACHE_REFERENCES] = {
50 .symbol = "cache-references",
51 .alias = "",
53 [PERF_COUNT_HW_CACHE_MISSES] = {
54 .symbol = "cache-misses",
55 .alias = "",
57 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
58 .symbol = "branch-instructions",
59 .alias = "branches",
61 [PERF_COUNT_HW_BRANCH_MISSES] = {
62 .symbol = "branch-misses",
63 .alias = "",
65 [PERF_COUNT_HW_BUS_CYCLES] = {
66 .symbol = "bus-cycles",
67 .alias = "",
69 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
70 .symbol = "stalled-cycles-frontend",
71 .alias = "idle-cycles-frontend",
73 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
74 .symbol = "stalled-cycles-backend",
75 .alias = "idle-cycles-backend",
77 [PERF_COUNT_HW_REF_CPU_CYCLES] = {
78 .symbol = "ref-cycles",
79 .alias = "",
83 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
84 [PERF_COUNT_SW_CPU_CLOCK] = {
85 .symbol = "cpu-clock",
86 .alias = "",
88 [PERF_COUNT_SW_TASK_CLOCK] = {
89 .symbol = "task-clock",
90 .alias = "",
92 [PERF_COUNT_SW_PAGE_FAULTS] = {
93 .symbol = "page-faults",
94 .alias = "faults",
96 [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
97 .symbol = "context-switches",
98 .alias = "cs",
100 [PERF_COUNT_SW_CPU_MIGRATIONS] = {
101 .symbol = "cpu-migrations",
102 .alias = "migrations",
104 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
105 .symbol = "minor-faults",
106 .alias = "",
108 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
109 .symbol = "major-faults",
110 .alias = "",
112 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
113 .symbol = "alignment-faults",
114 .alias = "",
116 [PERF_COUNT_SW_EMULATION_FAULTS] = {
117 .symbol = "emulation-faults",
118 .alias = "",
120 [PERF_COUNT_SW_DUMMY] = {
121 .symbol = "dummy",
122 .alias = "",
124 [PERF_COUNT_SW_BPF_OUTPUT] = {
125 .symbol = "bpf-output",
126 .alias = "",
128 [PERF_COUNT_SW_CGROUP_SWITCHES] = {
129 .symbol = "cgroup-switches",
130 .alias = "",
134 const char *event_type(int type)
136 switch (type) {
137 case PERF_TYPE_HARDWARE:
138 return "hardware";
140 case PERF_TYPE_SOFTWARE:
141 return "software";
143 case PERF_TYPE_TRACEPOINT:
144 return "tracepoint";
146 case PERF_TYPE_HW_CACHE:
147 return "hardware-cache";
149 default:
150 break;
153 return "unknown";
156 static char *get_config_str(const struct parse_events_terms *head_terms,
157 enum parse_events__term_type type_term)
159 struct parse_events_term *term;
161 if (!head_terms)
162 return NULL;
164 list_for_each_entry(term, &head_terms->terms, list)
165 if (term->type_term == type_term)
166 return term->val.str;
168 return NULL;
171 static char *get_config_metric_id(const struct parse_events_terms *head_terms)
173 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
176 static char *get_config_name(const struct parse_events_terms *head_terms)
178 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
182 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
183 * matches the raw's string value. If the string value matches an
184 * event then change the term to be an event, if not then change it to
185 * be a config term. For example, "read" may be an event of the PMU or
186 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of
187 * the event can be determined and we don't need to scan all PMUs
188 * ahead-of-time.
189 * @config_terms: the list of terms that may contain a raw term.
190 * @pmu: the PMU to scan for events from.
192 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
194 struct parse_events_term *term;
196 list_for_each_entry(term, &config_terms->terms, list) {
197 u64 num;
199 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
200 continue;
202 if (perf_pmu__have_event(pmu, term->val.str)) {
203 zfree(&term->config);
204 term->config = term->val.str;
205 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
206 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
207 term->val.num = 1;
208 term->no_value = true;
209 continue;
212 zfree(&term->config);
213 term->config = strdup("config");
214 errno = 0;
215 num = strtoull(term->val.str + 1, NULL, 16);
216 assert(errno == 0);
217 free(term->val.str);
218 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
219 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
220 term->val.num = num;
221 term->no_value = false;
225 static struct evsel *
226 __add_event(struct list_head *list, int *idx,
227 struct perf_event_attr *attr,
228 bool init_attr,
229 const char *name, const char *metric_id, struct perf_pmu *pmu,
230 struct list_head *config_terms, bool auto_merge_stats,
231 struct perf_cpu_map *cpu_list, u64 alternate_hw_config)
233 struct evsel *evsel;
234 struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list;
236 cpus = perf_cpu_map__get(cpus);
237 if (pmu)
238 perf_pmu__warn_invalid_formats(pmu);
240 if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) {
241 perf_pmu__warn_invalid_config(pmu, attr->config, name,
242 PERF_PMU_FORMAT_VALUE_CONFIG, "config");
243 perf_pmu__warn_invalid_config(pmu, attr->config1, name,
244 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
245 perf_pmu__warn_invalid_config(pmu, attr->config2, name,
246 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
247 perf_pmu__warn_invalid_config(pmu, attr->config3, name,
248 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
250 if (init_attr)
251 event_attr_init(attr);
253 evsel = evsel__new_idx(attr, *idx);
254 if (!evsel) {
255 perf_cpu_map__put(cpus);
256 return NULL;
259 (*idx)++;
260 evsel->core.cpus = cpus;
261 evsel->core.own_cpus = perf_cpu_map__get(cpus);
262 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
263 evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
264 evsel->auto_merge_stats = auto_merge_stats;
265 evsel->pmu = pmu;
266 evsel->alternate_hw_config = alternate_hw_config;
268 if (name)
269 evsel->name = strdup(name);
271 if (metric_id)
272 evsel->metric_id = strdup(metric_id);
274 if (config_terms)
275 list_splice_init(config_terms, &evsel->config_terms);
277 if (list)
278 list_add_tail(&evsel->core.node, list);
280 return evsel;
283 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
284 const char *name, const char *metric_id,
285 struct perf_pmu *pmu)
287 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
288 metric_id, pmu, /*config_terms=*/NULL,
289 /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
290 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
293 static int add_event(struct list_head *list, int *idx,
294 struct perf_event_attr *attr, const char *name,
295 const char *metric_id, struct list_head *config_terms,
296 u64 alternate_hw_config)
298 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
299 /*pmu=*/NULL, config_terms,
300 /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
301 alternate_hw_config) ? 0 : -ENOMEM;
305 * parse_aliases - search names for entries beginning or equalling str ignoring
306 * case. If mutliple entries in names match str then the longest
307 * is chosen.
308 * @str: The needle to look for.
309 * @names: The haystack to search.
310 * @size: The size of the haystack.
311 * @longest: Out argument giving the length of the matching entry.
313 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
314 int *longest)
316 *longest = -1;
317 for (int i = 0; i < size; i++) {
318 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
319 int n = strlen(names[i][j]);
321 if (n > *longest && !strncasecmp(str, names[i][j], n))
322 *longest = n;
324 if (*longest > 0)
325 return i;
328 return -1;
331 typedef int config_term_func_t(struct perf_event_attr *attr,
332 struct parse_events_term *term,
333 struct parse_events_error *err);
334 static int config_term_common(struct perf_event_attr *attr,
335 struct parse_events_term *term,
336 struct parse_events_error *err);
337 static int config_attr(struct perf_event_attr *attr,
338 const struct parse_events_terms *head,
339 struct parse_events_error *err,
340 config_term_func_t config_term);
343 * parse_events__decode_legacy_cache - Search name for the legacy cache event
344 * name composed of 1, 2 or 3 hyphen
345 * separated sections. The first section is
346 * the cache type while the others are the
347 * optional op and optional result. To make
348 * life hard the names in the table also
349 * contain hyphens and the longest name
350 * should always be selected.
352 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
354 int len, cache_type = -1, cache_op = -1, cache_result = -1;
355 const char *name_end = &name[strlen(name) + 1];
356 const char *str = name;
358 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
359 if (cache_type == -1)
360 return -EINVAL;
361 str += len + 1;
363 if (str < name_end) {
364 cache_op = parse_aliases(str, evsel__hw_cache_op,
365 PERF_COUNT_HW_CACHE_OP_MAX, &len);
366 if (cache_op >= 0) {
367 if (!evsel__is_cache_op_valid(cache_type, cache_op))
368 return -EINVAL;
369 str += len + 1;
370 } else {
371 cache_result = parse_aliases(str, evsel__hw_cache_result,
372 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
373 if (cache_result >= 0)
374 str += len + 1;
377 if (str < name_end) {
378 if (cache_op < 0) {
379 cache_op = parse_aliases(str, evsel__hw_cache_op,
380 PERF_COUNT_HW_CACHE_OP_MAX, &len);
381 if (cache_op >= 0) {
382 if (!evsel__is_cache_op_valid(cache_type, cache_op))
383 return -EINVAL;
385 } else if (cache_result < 0) {
386 cache_result = parse_aliases(str, evsel__hw_cache_result,
387 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
392 * Fall back to reads:
394 if (cache_op == -1)
395 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
398 * Fall back to accesses:
400 if (cache_result == -1)
401 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
403 *config = cache_type | (cache_op << 8) | (cache_result << 16);
404 if (perf_pmus__supports_extended_type())
405 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
406 return 0;
410 * parse_events__filter_pmu - returns false if a wildcard PMU should be
411 * considered, true if it should be filtered.
413 bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
414 const struct perf_pmu *pmu)
416 if (parse_state->pmu_filter == NULL)
417 return false;
419 return strcmp(parse_state->pmu_filter, pmu->name) != 0;
422 static int parse_events_add_pmu(struct parse_events_state *parse_state,
423 struct list_head *list, struct perf_pmu *pmu,
424 const struct parse_events_terms *const_parsed_terms,
425 bool auto_merge_stats, u64 alternate_hw_config);
427 int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
428 struct parse_events_state *parse_state,
429 struct parse_events_terms *parsed_terms)
431 struct perf_pmu *pmu = NULL;
432 bool found_supported = false;
433 const char *config_name = get_config_name(parsed_terms);
434 const char *metric_id = get_config_metric_id(parsed_terms);
436 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
437 LIST_HEAD(config_terms);
438 struct perf_event_attr attr;
439 int ret;
441 if (parse_events__filter_pmu(parse_state, pmu))
442 continue;
444 if (perf_pmu__have_event(pmu, name)) {
446 * The PMU has the event so add as not a legacy cache
447 * event.
449 ret = parse_events_add_pmu(parse_state, list, pmu,
450 parsed_terms,
451 perf_pmu__auto_merge_stats(pmu),
452 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
453 if (ret)
454 return ret;
455 continue;
458 if (!pmu->is_core) {
459 /* Legacy cache events are only supported by core PMUs. */
460 continue;
463 memset(&attr, 0, sizeof(attr));
464 attr.type = PERF_TYPE_HW_CACHE;
466 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
467 if (ret)
468 return ret;
470 found_supported = true;
472 if (parsed_terms) {
473 if (config_attr(&attr, parsed_terms, parse_state->error,
474 config_term_common))
475 return -EINVAL;
477 if (get_config_terms(parsed_terms, &config_terms))
478 return -ENOMEM;
481 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
482 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
483 /*cpu_list=*/NULL,
484 /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
485 return -ENOMEM;
487 free_config_terms(&config_terms);
489 return found_supported ? 0 : -EINVAL;
492 #ifdef HAVE_LIBTRACEEVENT
493 static void tracepoint_error(struct parse_events_error *e, int err,
494 const char *sys, const char *name, int column)
496 const char *str;
497 char help[BUFSIZ];
499 if (!e)
500 return;
503 * We get error directly from syscall errno ( > 0),
504 * or from encoded pointer's error ( < 0).
506 err = abs(err);
508 switch (err) {
509 case EACCES:
510 str = "can't access trace events";
511 break;
512 case ENOENT:
513 str = "unknown tracepoint";
514 break;
515 default:
516 str = "failed to add tracepoint";
517 break;
520 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
521 parse_events_error__handle(e, column, strdup(str), strdup(help));
524 static int add_tracepoint(struct parse_events_state *parse_state,
525 struct list_head *list,
526 const char *sys_name, const char *evt_name,
527 struct parse_events_error *err,
528 struct parse_events_terms *head_config, void *loc_)
530 YYLTYPE *loc = loc_;
531 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
532 !parse_state->fake_tp);
534 if (IS_ERR(evsel)) {
535 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
536 return PTR_ERR(evsel);
539 if (head_config) {
540 LIST_HEAD(config_terms);
542 if (get_config_terms(head_config, &config_terms))
543 return -ENOMEM;
544 list_splice(&config_terms, &evsel->config_terms);
547 list_add_tail(&evsel->core.node, list);
548 return 0;
551 static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
552 struct list_head *list,
553 const char *sys_name, const char *evt_name,
554 struct parse_events_error *err,
555 struct parse_events_terms *head_config, YYLTYPE *loc)
557 char *evt_path;
558 struct dirent *evt_ent;
559 DIR *evt_dir;
560 int ret = 0, found = 0;
562 evt_path = get_events_file(sys_name);
563 if (!evt_path) {
564 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
565 return -1;
567 evt_dir = opendir(evt_path);
568 if (!evt_dir) {
569 put_events_file(evt_path);
570 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
571 return -1;
574 while (!ret && (evt_ent = readdir(evt_dir))) {
575 if (!strcmp(evt_ent->d_name, ".")
576 || !strcmp(evt_ent->d_name, "..")
577 || !strcmp(evt_ent->d_name, "enable")
578 || !strcmp(evt_ent->d_name, "filter"))
579 continue;
581 if (!strglobmatch(evt_ent->d_name, evt_name))
582 continue;
584 found++;
586 ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name,
587 err, head_config, loc);
590 if (!found) {
591 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
592 ret = -1;
595 put_events_file(evt_path);
596 closedir(evt_dir);
597 return ret;
600 static int add_tracepoint_event(struct parse_events_state *parse_state,
601 struct list_head *list,
602 const char *sys_name, const char *evt_name,
603 struct parse_events_error *err,
604 struct parse_events_terms *head_config, YYLTYPE *loc)
606 return strpbrk(evt_name, "*?") ?
607 add_tracepoint_multi_event(parse_state, list, sys_name, evt_name,
608 err, head_config, loc) :
609 add_tracepoint(parse_state, list, sys_name, evt_name,
610 err, head_config, loc);
613 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
614 struct list_head *list,
615 const char *sys_name, const char *evt_name,
616 struct parse_events_error *err,
617 struct parse_events_terms *head_config, YYLTYPE *loc)
619 struct dirent *events_ent;
620 DIR *events_dir;
621 int ret = 0;
623 events_dir = tracing_events__opendir();
624 if (!events_dir) {
625 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
626 return -1;
629 while (!ret && (events_ent = readdir(events_dir))) {
630 if (!strcmp(events_ent->d_name, ".")
631 || !strcmp(events_ent->d_name, "..")
632 || !strcmp(events_ent->d_name, "enable")
633 || !strcmp(events_ent->d_name, "header_event")
634 || !strcmp(events_ent->d_name, "header_page"))
635 continue;
637 if (!strglobmatch(events_ent->d_name, sys_name))
638 continue;
640 ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
641 evt_name, err, head_config, loc);
644 closedir(events_dir);
645 return ret;
647 #endif /* HAVE_LIBTRACEEVENT */
649 size_t default_breakpoint_len(void)
651 #if defined(__i386__)
652 static int len;
654 if (len == 0) {
655 struct perf_env env = {};
657 perf_env__init(&env);
658 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
659 perf_env__exit(&env);
661 return len;
662 #elif defined(__aarch64__)
663 return 4;
664 #else
665 return sizeof(long);
666 #endif
669 static int
670 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
672 int i;
674 for (i = 0; i < 3; i++) {
675 if (!type || !type[i])
676 break;
678 #define CHECK_SET_TYPE(bit) \
679 do { \
680 if (attr->bp_type & bit) \
681 return -EINVAL; \
682 else \
683 attr->bp_type |= bit; \
684 } while (0)
686 switch (type[i]) {
687 case 'r':
688 CHECK_SET_TYPE(HW_BREAKPOINT_R);
689 break;
690 case 'w':
691 CHECK_SET_TYPE(HW_BREAKPOINT_W);
692 break;
693 case 'x':
694 CHECK_SET_TYPE(HW_BREAKPOINT_X);
695 break;
696 default:
697 return -EINVAL;
701 #undef CHECK_SET_TYPE
703 if (!attr->bp_type) /* Default */
704 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
706 return 0;
709 int parse_events_add_breakpoint(struct parse_events_state *parse_state,
710 struct list_head *list,
711 u64 addr, char *type, u64 len,
712 struct parse_events_terms *head_config)
714 struct perf_event_attr attr;
715 LIST_HEAD(config_terms);
716 const char *name;
718 memset(&attr, 0, sizeof(attr));
719 attr.bp_addr = addr;
721 if (parse_breakpoint_type(type, &attr))
722 return -EINVAL;
724 /* Provide some defaults if len is not specified */
725 if (!len) {
726 if (attr.bp_type == HW_BREAKPOINT_X)
727 len = default_breakpoint_len();
728 else
729 len = HW_BREAKPOINT_LEN_4;
732 attr.bp_len = len;
734 attr.type = PERF_TYPE_BREAKPOINT;
735 attr.sample_period = 1;
737 if (head_config) {
738 if (config_attr(&attr, head_config, parse_state->error,
739 config_term_common))
740 return -EINVAL;
742 if (get_config_terms(head_config, &config_terms))
743 return -ENOMEM;
746 name = get_config_name(head_config);
748 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
749 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
752 static int check_type_val(struct parse_events_term *term,
753 struct parse_events_error *err,
754 enum parse_events__term_val_type type)
756 if (type == term->type_val)
757 return 0;
759 if (err) {
760 parse_events_error__handle(err, term->err_val,
761 type == PARSE_EVENTS__TERM_TYPE_NUM
762 ? strdup("expected numeric value")
763 : strdup("expected string value"),
764 NULL);
766 return -EINVAL;
769 static bool config_term_shrinked;
771 const char *parse_events__term_type_str(enum parse_events__term_type term_type)
774 * Update according to parse-events.l
776 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
777 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
778 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
779 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
780 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
781 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
782 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
783 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
784 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
785 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
786 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
787 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
788 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
789 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
790 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
791 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
792 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
793 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
794 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
795 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
796 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
797 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
798 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
799 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
800 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
801 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
802 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
804 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
805 return "unknown term";
807 return config_term_names[term_type];
810 static bool
811 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
813 char *err_str;
815 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
816 parse_events_error__handle(err, -1,
817 strdup("Invalid term_type"), NULL);
818 return false;
820 if (!config_term_shrinked)
821 return true;
823 switch (term_type) {
824 case PARSE_EVENTS__TERM_TYPE_CONFIG:
825 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
826 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
827 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
828 case PARSE_EVENTS__TERM_TYPE_NAME:
829 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
830 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
831 case PARSE_EVENTS__TERM_TYPE_PERCORE:
832 return true;
833 case PARSE_EVENTS__TERM_TYPE_USER:
834 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
835 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
836 case PARSE_EVENTS__TERM_TYPE_TIME:
837 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
838 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
839 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
840 case PARSE_EVENTS__TERM_TYPE_INHERIT:
841 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
842 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
843 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
844 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
845 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
846 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
847 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
848 case PARSE_EVENTS__TERM_TYPE_RAW:
849 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
850 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
851 default:
852 if (!err)
853 return false;
855 /* term_type is validated so indexing is safe */
856 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
857 parse_events__term_type_str(term_type)) >= 0)
858 parse_events_error__handle(err, -1, err_str, NULL);
859 return false;
863 void parse_events__shrink_config_terms(void)
865 config_term_shrinked = true;
868 static int config_term_common(struct perf_event_attr *attr,
869 struct parse_events_term *term,
870 struct parse_events_error *err)
872 #define CHECK_TYPE_VAL(type) \
873 do { \
874 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
875 return -EINVAL; \
876 } while (0)
878 switch (term->type_term) {
879 case PARSE_EVENTS__TERM_TYPE_CONFIG:
880 CHECK_TYPE_VAL(NUM);
881 attr->config = term->val.num;
882 break;
883 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
884 CHECK_TYPE_VAL(NUM);
885 attr->config1 = term->val.num;
886 break;
887 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
888 CHECK_TYPE_VAL(NUM);
889 attr->config2 = term->val.num;
890 break;
891 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
892 CHECK_TYPE_VAL(NUM);
893 attr->config3 = term->val.num;
894 break;
895 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
896 CHECK_TYPE_VAL(NUM);
897 break;
898 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
899 CHECK_TYPE_VAL(NUM);
900 break;
901 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
902 CHECK_TYPE_VAL(STR);
903 if (strcmp(term->val.str, "no") &&
904 parse_branch_str(term->val.str,
905 &attr->branch_sample_type)) {
906 parse_events_error__handle(err, term->err_val,
907 strdup("invalid branch sample type"),
908 NULL);
909 return -EINVAL;
911 break;
912 case PARSE_EVENTS__TERM_TYPE_TIME:
913 CHECK_TYPE_VAL(NUM);
914 if (term->val.num > 1) {
915 parse_events_error__handle(err, term->err_val,
916 strdup("expected 0 or 1"),
917 NULL);
918 return -EINVAL;
920 break;
921 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
922 CHECK_TYPE_VAL(STR);
923 break;
924 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
925 CHECK_TYPE_VAL(NUM);
926 break;
927 case PARSE_EVENTS__TERM_TYPE_INHERIT:
928 CHECK_TYPE_VAL(NUM);
929 break;
930 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
931 CHECK_TYPE_VAL(NUM);
932 break;
933 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
934 CHECK_TYPE_VAL(NUM);
935 break;
936 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
937 CHECK_TYPE_VAL(NUM);
938 break;
939 case PARSE_EVENTS__TERM_TYPE_NAME:
940 CHECK_TYPE_VAL(STR);
941 break;
942 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
943 CHECK_TYPE_VAL(STR);
944 break;
945 case PARSE_EVENTS__TERM_TYPE_RAW:
946 CHECK_TYPE_VAL(STR);
947 break;
948 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
949 CHECK_TYPE_VAL(NUM);
950 break;
951 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
952 CHECK_TYPE_VAL(NUM);
953 break;
954 case PARSE_EVENTS__TERM_TYPE_PERCORE:
955 CHECK_TYPE_VAL(NUM);
956 if ((unsigned int)term->val.num > 1) {
957 parse_events_error__handle(err, term->err_val,
958 strdup("expected 0 or 1"),
959 NULL);
960 return -EINVAL;
962 break;
963 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
964 CHECK_TYPE_VAL(NUM);
965 break;
966 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
967 CHECK_TYPE_VAL(NUM);
968 if (term->val.num > UINT_MAX) {
969 parse_events_error__handle(err, term->err_val,
970 strdup("too big"),
971 NULL);
972 return -EINVAL;
974 break;
975 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
976 case PARSE_EVENTS__TERM_TYPE_USER:
977 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
978 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
979 default:
980 parse_events_error__handle(err, term->err_term,
981 strdup(parse_events__term_type_str(term->type_term)),
982 parse_events_formats_error_string(NULL));
983 return -EINVAL;
987 * Check term availability after basic checking so
988 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
990 * If check availability at the entry of this function,
991 * user will see "'<sysfs term>' is not usable in 'perf stat'"
992 * if an invalid config term is provided for legacy events
993 * (for example, instructions/badterm/...), which is confusing.
995 if (!config_term_avail(term->type_term, err))
996 return -EINVAL;
997 return 0;
998 #undef CHECK_TYPE_VAL
1001 static int config_term_pmu(struct perf_event_attr *attr,
1002 struct parse_events_term *term,
1003 struct parse_events_error *err)
1005 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
1006 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1008 if (!pmu) {
1009 char *err_str;
1011 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1012 parse_events_error__handle(err, term->err_term,
1013 err_str, /*help=*/NULL);
1014 return -EINVAL;
1017 * Rewrite the PMU event to a legacy cache one unless the PMU
1018 * doesn't support legacy cache events or the event is present
1019 * within the PMU.
1021 if (perf_pmu__supports_legacy_cache(pmu) &&
1022 !perf_pmu__have_event(pmu, term->config)) {
1023 attr->type = PERF_TYPE_HW_CACHE;
1024 return parse_events__decode_legacy_cache(term->config, pmu->type,
1025 &attr->config);
1026 } else {
1027 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1028 term->no_value = true;
1031 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
1032 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1034 if (!pmu) {
1035 char *err_str;
1037 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1038 parse_events_error__handle(err, term->err_term,
1039 err_str, /*help=*/NULL);
1040 return -EINVAL;
1043 * If the PMU has a sysfs or json event prefer it over
1044 * legacy. ARM requires this.
1046 if (perf_pmu__have_event(pmu, term->config)) {
1047 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1048 term->no_value = true;
1049 term->alternate_hw_config = true;
1050 } else {
1051 attr->type = PERF_TYPE_HARDWARE;
1052 attr->config = term->val.num;
1053 if (perf_pmus__supports_extended_type())
1054 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
1056 return 0;
1058 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1059 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1061 * Always succeed for sysfs terms, as we dont know
1062 * at this point what type they need to have.
1064 return 0;
1066 return config_term_common(attr, term, err);
1069 #ifdef HAVE_LIBTRACEEVENT
1070 static int config_term_tracepoint(struct perf_event_attr *attr,
1071 struct parse_events_term *term,
1072 struct parse_events_error *err)
1074 switch (term->type_term) {
1075 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1076 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1077 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1078 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1079 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1080 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1081 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1082 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1083 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1084 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1085 return config_term_common(attr, term, err);
1086 case PARSE_EVENTS__TERM_TYPE_USER:
1087 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1088 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1089 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1090 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1091 case PARSE_EVENTS__TERM_TYPE_NAME:
1092 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1093 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1094 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1095 case PARSE_EVENTS__TERM_TYPE_TIME:
1096 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1097 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1098 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1099 case PARSE_EVENTS__TERM_TYPE_RAW:
1100 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1101 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1102 default:
1103 if (err) {
1104 parse_events_error__handle(err, term->err_term,
1105 strdup(parse_events__term_type_str(term->type_term)),
1106 strdup("valid terms: call-graph,stack-size\n")
1109 return -EINVAL;
1112 return 0;
1114 #endif
1116 static int config_attr(struct perf_event_attr *attr,
1117 const struct parse_events_terms *head,
1118 struct parse_events_error *err,
1119 config_term_func_t config_term)
1121 struct parse_events_term *term;
1123 list_for_each_entry(term, &head->terms, list)
1124 if (config_term(attr, term, err))
1125 return -EINVAL;
1127 return 0;
1130 static int get_config_terms(const struct parse_events_terms *head_config,
1131 struct list_head *head_terms)
1133 #define ADD_CONFIG_TERM(__type, __weak) \
1134 struct evsel_config_term *__t; \
1136 __t = zalloc(sizeof(*__t)); \
1137 if (!__t) \
1138 return -ENOMEM; \
1140 INIT_LIST_HEAD(&__t->list); \
1141 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1142 __t->weak = __weak; \
1143 list_add_tail(&__t->list, head_terms)
1145 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1146 do { \
1147 ADD_CONFIG_TERM(__type, __weak); \
1148 __t->val.__name = __val; \
1149 } while (0)
1151 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1152 do { \
1153 ADD_CONFIG_TERM(__type, __weak); \
1154 __t->val.str = strdup(__val); \
1155 if (!__t->val.str) { \
1156 zfree(&__t); \
1157 return -ENOMEM; \
1159 __t->free_str = true; \
1160 } while (0)
1162 struct parse_events_term *term;
1164 list_for_each_entry(term, &head_config->terms, list) {
1165 switch (term->type_term) {
1166 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1167 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1168 break;
1169 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1170 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1171 break;
1172 case PARSE_EVENTS__TERM_TYPE_TIME:
1173 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1174 break;
1175 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1176 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1177 break;
1178 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1179 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1180 break;
1181 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1182 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1183 term->val.num, term->weak);
1184 break;
1185 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1186 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1187 term->val.num ? 1 : 0, term->weak);
1188 break;
1189 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1190 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1191 term->val.num ? 0 : 1, term->weak);
1192 break;
1193 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1194 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1195 term->val.num, term->weak);
1196 break;
1197 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1198 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1199 term->val.num, term->weak);
1200 break;
1201 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1202 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1203 term->val.num ? 1 : 0, term->weak);
1204 break;
1205 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1206 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1207 term->val.num ? 0 : 1, term->weak);
1208 break;
1209 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1210 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1211 break;
1212 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1213 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1214 term->val.num ? true : false, term->weak);
1215 break;
1216 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1217 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1218 term->val.num ? 1 : 0, term->weak);
1219 break;
1220 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1221 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1222 term->val.num, term->weak);
1223 break;
1224 case PARSE_EVENTS__TERM_TYPE_USER:
1225 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1226 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1227 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1228 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1229 case PARSE_EVENTS__TERM_TYPE_NAME:
1230 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1231 case PARSE_EVENTS__TERM_TYPE_RAW:
1232 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1233 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1234 default:
1235 break;
1238 return 0;
1242 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1243 * each bit of attr->config that the user has changed.
1245 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1246 struct list_head *head_terms)
1248 struct parse_events_term *term;
1249 u64 bits = 0;
1250 int type;
1252 list_for_each_entry(term, &head_config->terms, list) {
1253 switch (term->type_term) {
1254 case PARSE_EVENTS__TERM_TYPE_USER:
1255 type = perf_pmu__format_type(pmu, term->config);
1256 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1257 continue;
1258 bits |= perf_pmu__format_bits(pmu, term->config);
1259 break;
1260 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1261 bits = ~(u64)0;
1262 break;
1263 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1264 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1265 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1266 case PARSE_EVENTS__TERM_TYPE_NAME:
1267 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1268 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1269 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1270 case PARSE_EVENTS__TERM_TYPE_TIME:
1271 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1272 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1273 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1274 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1275 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1276 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1277 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1278 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1279 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1280 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1281 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1282 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1283 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1284 case PARSE_EVENTS__TERM_TYPE_RAW:
1285 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1286 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1287 default:
1288 break;
1292 if (bits)
1293 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1295 #undef ADD_CONFIG_TERM
1296 return 0;
1299 int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1300 struct list_head *list,
1301 const char *sys, const char *event,
1302 struct parse_events_error *err,
1303 struct parse_events_terms *head_config, void *loc_)
1305 YYLTYPE *loc = loc_;
1306 #ifdef HAVE_LIBTRACEEVENT
1307 if (head_config) {
1308 struct perf_event_attr attr;
1310 if (config_attr(&attr, head_config, err,
1311 config_term_tracepoint))
1312 return -EINVAL;
1315 if (strpbrk(sys, "*?"))
1316 return add_tracepoint_multi_sys(parse_state, list, sys, event,
1317 err, head_config, loc);
1318 else
1319 return add_tracepoint_event(parse_state, list, sys, event,
1320 err, head_config, loc);
1321 #else
1322 (void)parse_state;
1323 (void)list;
1324 (void)sys;
1325 (void)event;
1326 (void)head_config;
1327 parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"),
1328 strdup("libtraceevent is necessary for tracepoint support"));
1329 return -1;
1330 #endif
1333 static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1334 struct list_head *list,
1335 struct perf_pmu *pmu, u32 type, u32 extended_type,
1336 u64 config, const struct parse_events_terms *head_config)
1338 struct perf_event_attr attr;
1339 LIST_HEAD(config_terms);
1340 const char *name, *metric_id;
1341 int ret;
1343 memset(&attr, 0, sizeof(attr));
1344 attr.type = type;
1345 attr.config = config;
1346 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1347 assert(perf_pmus__supports_extended_type());
1348 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1351 if (head_config) {
1352 if (config_attr(&attr, head_config, parse_state->error,
1353 config_term_common))
1354 return -EINVAL;
1356 if (get_config_terms(head_config, &config_terms))
1357 return -ENOMEM;
1360 name = get_config_name(head_config);
1361 metric_id = get_config_metric_id(head_config);
1362 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1363 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
1364 /*cpu_list=*/NULL, /*alternate_hw_config=*/PERF_COUNT_HW_MAX
1365 ) == NULL ? -ENOMEM : 0;
1366 free_config_terms(&config_terms);
1367 return ret;
1370 int parse_events_add_numeric(struct parse_events_state *parse_state,
1371 struct list_head *list,
1372 u32 type, u64 config,
1373 const struct parse_events_terms *head_config,
1374 bool wildcard)
1376 struct perf_pmu *pmu = NULL;
1377 bool found_supported = false;
1379 /* Wildcards on numeric values are only supported by core PMUs. */
1380 if (wildcard && perf_pmus__supports_extended_type()) {
1381 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1382 int ret;
1384 found_supported = true;
1385 if (parse_events__filter_pmu(parse_state, pmu))
1386 continue;
1388 ret = __parse_events_add_numeric(parse_state, list, pmu,
1389 type, pmu->type,
1390 config, head_config);
1391 if (ret)
1392 return ret;
1394 if (found_supported)
1395 return 0;
1397 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1398 type, /*extended_type=*/0, config, head_config);
1401 static bool config_term_percore(struct list_head *config_terms)
1403 struct evsel_config_term *term;
1405 list_for_each_entry(term, config_terms, list) {
1406 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1407 return term->val.percore;
1410 return false;
1413 static int parse_events_add_pmu(struct parse_events_state *parse_state,
1414 struct list_head *list, struct perf_pmu *pmu,
1415 const struct parse_events_terms *const_parsed_terms,
1416 bool auto_merge_stats, u64 alternate_hw_config)
1418 struct perf_event_attr attr;
1419 struct perf_pmu_info info;
1420 struct evsel *evsel;
1421 struct parse_events_error *err = parse_state->error;
1422 LIST_HEAD(config_terms);
1423 struct parse_events_terms parsed_terms;
1424 bool alias_rewrote_terms = false;
1426 if (verbose > 1) {
1427 struct strbuf sb;
1429 strbuf_init(&sb, /*hint=*/ 0);
1430 if (pmu->selectable && const_parsed_terms &&
1431 list_empty(&const_parsed_terms->terms)) {
1432 strbuf_addf(&sb, "%s//", pmu->name);
1433 } else {
1434 strbuf_addf(&sb, "%s/", pmu->name);
1435 parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1436 strbuf_addch(&sb, '/');
1438 fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1439 strbuf_release(&sb);
1442 memset(&attr, 0, sizeof(attr));
1443 if (pmu->perf_event_attr_init_default)
1444 pmu->perf_event_attr_init_default(pmu, &attr);
1446 attr.type = pmu->type;
1448 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1449 evsel = __add_event(list, &parse_state->idx, &attr,
1450 /*init_attr=*/true, /*name=*/NULL,
1451 /*metric_id=*/NULL, pmu,
1452 /*config_terms=*/NULL, auto_merge_stats,
1453 /*cpu_list=*/NULL, alternate_hw_config);
1454 return evsel ? 0 : -ENOMEM;
1457 parse_events_terms__init(&parsed_terms);
1458 if (const_parsed_terms) {
1459 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1461 if (ret)
1462 return ret;
1464 fix_raw(&parsed_terms, pmu);
1466 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1467 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1468 parse_events_terms__exit(&parsed_terms);
1469 return -EINVAL;
1472 /* Look for event names in the terms and rewrite into format based terms. */
1473 if (perf_pmu__check_alias(pmu, &parsed_terms,
1474 &info, &alias_rewrote_terms,
1475 &alternate_hw_config, err)) {
1476 parse_events_terms__exit(&parsed_terms);
1477 return -EINVAL;
1480 if (verbose > 1) {
1481 struct strbuf sb;
1483 strbuf_init(&sb, /*hint=*/ 0);
1484 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1485 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1486 strbuf_release(&sb);
1489 /* Configure attr/terms again if an alias was expanded. */
1490 if (alias_rewrote_terms &&
1491 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1492 parse_events_terms__exit(&parsed_terms);
1493 return -EINVAL;
1496 if (get_config_terms(&parsed_terms, &config_terms)) {
1497 parse_events_terms__exit(&parsed_terms);
1498 return -ENOMEM;
1502 * When using default config, record which bits of attr->config were
1503 * changed by the user.
1505 if (pmu->perf_event_attr_init_default &&
1506 get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1507 parse_events_terms__exit(&parsed_terms);
1508 return -ENOMEM;
1511 /* Skip configuring hard coded terms that were applied by config_attr. */
1512 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
1513 parse_state->error)) {
1514 free_config_terms(&config_terms);
1515 parse_events_terms__exit(&parsed_terms);
1516 return -EINVAL;
1519 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1520 get_config_name(&parsed_terms),
1521 get_config_metric_id(&parsed_terms), pmu,
1522 &config_terms, auto_merge_stats, /*cpu_list=*/NULL,
1523 alternate_hw_config);
1524 if (!evsel) {
1525 parse_events_terms__exit(&parsed_terms);
1526 return -ENOMEM;
1529 if (evsel->name)
1530 evsel->use_config_name = true;
1532 evsel->percore = config_term_percore(&evsel->config_terms);
1534 parse_events_terms__exit(&parsed_terms);
1535 free((char *)evsel->unit);
1536 evsel->unit = strdup(info.unit);
1537 evsel->scale = info.scale;
1538 evsel->per_pkg = info.per_pkg;
1539 evsel->snapshot = info.snapshot;
1540 return 0;
1543 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1544 const char *event_name, u64 hw_config,
1545 const struct parse_events_terms *const_parsed_terms,
1546 struct list_head **listp, void *loc_)
1548 struct parse_events_term *term;
1549 struct list_head *list = NULL;
1550 struct perf_pmu *pmu = NULL;
1551 YYLTYPE *loc = loc_;
1552 int ok = 0;
1553 const char *config;
1554 struct parse_events_terms parsed_terms;
1556 *listp = NULL;
1558 parse_events_terms__init(&parsed_terms);
1559 if (const_parsed_terms) {
1560 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1562 if (ret)
1563 return ret;
1566 config = strdup(event_name);
1567 if (!config)
1568 goto out_err;
1570 if (parse_events_term__num(&term,
1571 PARSE_EVENTS__TERM_TYPE_USER,
1572 config, /*num=*/1, /*novalue=*/true,
1573 loc, /*loc_val=*/NULL) < 0) {
1574 zfree(&config);
1575 goto out_err;
1577 list_add_tail(&term->list, &parsed_terms.terms);
1579 /* Add it for all PMUs that support the alias */
1580 list = malloc(sizeof(struct list_head));
1581 if (!list)
1582 goto out_err;
1584 INIT_LIST_HEAD(list);
1586 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
1587 bool auto_merge_stats;
1589 if (parse_events__filter_pmu(parse_state, pmu))
1590 continue;
1592 if (!perf_pmu__have_event(pmu, event_name))
1593 continue;
1595 auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
1596 if (!parse_events_add_pmu(parse_state, list, pmu,
1597 &parsed_terms, auto_merge_stats, hw_config)) {
1598 struct strbuf sb;
1600 strbuf_init(&sb, /*hint=*/ 0);
1601 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1602 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1603 strbuf_release(&sb);
1604 ok++;
1608 if (parse_state->fake_pmu) {
1609 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1610 /*auto_merge_stats=*/true, hw_config)) {
1611 struct strbuf sb;
1613 strbuf_init(&sb, /*hint=*/ 0);
1614 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1615 pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1616 strbuf_release(&sb);
1617 ok++;
1621 out_err:
1622 parse_events_terms__exit(&parsed_terms);
1623 if (ok)
1624 *listp = list;
1625 else
1626 free(list);
1628 return ok ? 0 : -1;
1631 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1632 const char *event_or_pmu,
1633 const struct parse_events_terms *const_parsed_terms,
1634 struct list_head **listp,
1635 void *loc_)
1637 YYLTYPE *loc = loc_;
1638 struct perf_pmu *pmu;
1639 int ok = 0;
1640 char *help;
1642 *listp = malloc(sizeof(**listp));
1643 if (!*listp)
1644 return -ENOMEM;
1646 INIT_LIST_HEAD(*listp);
1648 /* Attempt to add to list assuming event_or_pmu is a PMU name. */
1649 pmu = perf_pmus__find(event_or_pmu);
1650 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1651 /*auto_merge_stats=*/false,
1652 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1653 return 0;
1655 if (parse_state->fake_pmu) {
1656 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1657 const_parsed_terms,
1658 /*auto_merge_stats=*/false,
1659 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1660 return 0;
1663 pmu = NULL;
1664 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1665 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
1666 if (!parse_events__filter_pmu(parse_state, pmu) &&
1667 perf_pmu__match(pmu, event_or_pmu)) {
1668 bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
1670 if (!parse_events_add_pmu(parse_state, *listp, pmu,
1671 const_parsed_terms,
1672 auto_merge_stats,
1673 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
1674 ok++;
1675 parse_state->wild_card_pmus = true;
1679 if (ok)
1680 return 0;
1682 /* Failure to add, assume event_or_pmu is an event name. */
1683 zfree(listp);
1684 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX,
1685 const_parsed_terms, listp, loc))
1686 return 0;
1688 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1689 help = NULL;
1690 parse_events_error__handle(parse_state->error, loc->first_column,
1691 strdup("Bad event or PMU"),
1692 help);
1693 zfree(listp);
1694 return -EINVAL;
1697 void parse_events__set_leader(char *name, struct list_head *list)
1699 struct evsel *leader;
1701 if (list_empty(list)) {
1702 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1703 return;
1706 leader = list_first_entry(list, struct evsel, core.node);
1707 __perf_evlist__set_leader(list, &leader->core);
1708 zfree(&leader->group_name);
1709 leader->group_name = name;
1712 static int parse_events__modifier_list(struct parse_events_state *parse_state,
1713 YYLTYPE *loc,
1714 struct list_head *list,
1715 struct parse_events_modifier mod,
1716 bool group)
1718 struct evsel *evsel;
1720 if (!group && mod.weak) {
1721 parse_events_error__handle(parse_state->error, loc->first_column,
1722 strdup("Weak modifier is for use with groups"), NULL);
1723 return -EINVAL;
1726 __evlist__for_each_entry(list, evsel) {
1727 /* Translate modifiers into the equivalent evsel excludes. */
1728 int eu = group ? evsel->core.attr.exclude_user : 0;
1729 int ek = group ? evsel->core.attr.exclude_kernel : 0;
1730 int eh = group ? evsel->core.attr.exclude_hv : 0;
1731 int eH = group ? evsel->core.attr.exclude_host : 0;
1732 int eG = group ? evsel->core.attr.exclude_guest : 0;
1733 int exclude = eu | ek | eh;
1734 int exclude_GH = group ? evsel->exclude_GH : 0;
1736 if (mod.user) {
1737 if (!exclude)
1738 exclude = eu = ek = eh = 1;
1739 if (!exclude_GH && !perf_guest && exclude_GH_default)
1740 eG = 1;
1741 eu = 0;
1743 if (mod.kernel) {
1744 if (!exclude)
1745 exclude = eu = ek = eh = 1;
1746 ek = 0;
1748 if (mod.hypervisor) {
1749 if (!exclude)
1750 exclude = eu = ek = eh = 1;
1751 eh = 0;
1753 if (mod.guest) {
1754 if (!exclude_GH)
1755 exclude_GH = eG = eH = 1;
1756 eG = 0;
1758 if (mod.host) {
1759 if (!exclude_GH)
1760 exclude_GH = eG = eH = 1;
1761 eH = 0;
1763 evsel->core.attr.exclude_user = eu;
1764 evsel->core.attr.exclude_kernel = ek;
1765 evsel->core.attr.exclude_hv = eh;
1766 evsel->core.attr.exclude_host = eH;
1767 evsel->core.attr.exclude_guest = eG;
1768 evsel->exclude_GH = exclude_GH;
1770 /* Simple modifiers copied to the evsel. */
1771 if (mod.precise) {
1772 u8 precise = evsel->core.attr.precise_ip + mod.precise;
1774 * precise ip:
1776 * 0 - SAMPLE_IP can have arbitrary skid
1777 * 1 - SAMPLE_IP must have constant skid
1778 * 2 - SAMPLE_IP requested to have 0 skid
1779 * 3 - SAMPLE_IP must have 0 skid
1781 * See also PERF_RECORD_MISC_EXACT_IP
1783 if (precise > 3) {
1784 char *help;
1786 if (asprintf(&help,
1787 "Maximum combined precise value is 3, adding precision to \"%s\"",
1788 evsel__name(evsel)) > 0) {
1789 parse_events_error__handle(parse_state->error,
1790 loc->first_column,
1791 help, NULL);
1793 return -EINVAL;
1795 evsel->core.attr.precise_ip = precise;
1797 if (mod.precise_max)
1798 evsel->precise_max = 1;
1799 if (mod.non_idle)
1800 evsel->core.attr.exclude_idle = 1;
1801 if (mod.sample_read)
1802 evsel->sample_read = 1;
1803 if (mod.pinned && evsel__is_group_leader(evsel))
1804 evsel->core.attr.pinned = 1;
1805 if (mod.exclusive && evsel__is_group_leader(evsel))
1806 evsel->core.attr.exclusive = 1;
1807 if (mod.weak)
1808 evsel->weak_group = true;
1809 if (mod.bpf)
1810 evsel->bpf_counter = true;
1811 if (mod.retire_lat)
1812 evsel->retire_lat = true;
1814 return 0;
1817 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1818 struct list_head *list,
1819 struct parse_events_modifier mod)
1821 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1824 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1825 struct list_head *list,
1826 struct parse_events_modifier mod)
1828 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1831 int parse_events__set_default_name(struct list_head *list, char *name)
1833 struct evsel *evsel;
1834 bool used_name = false;
1836 __evlist__for_each_entry(list, evsel) {
1837 if (!evsel->name) {
1838 evsel->name = used_name ? strdup(name) : name;
1839 used_name = true;
1840 if (!evsel->name)
1841 return -ENOMEM;
1844 if (!used_name)
1845 free(name);
1846 return 0;
1849 static int parse_events__scanner(const char *str,
1850 FILE *input,
1851 struct parse_events_state *parse_state)
1853 YY_BUFFER_STATE buffer;
1854 void *scanner;
1855 int ret;
1857 ret = parse_events_lex_init_extra(parse_state, &scanner);
1858 if (ret)
1859 return ret;
1861 if (str)
1862 buffer = parse_events__scan_string(str, scanner);
1863 else
1864 parse_events_set_in(input, scanner);
1866 #ifdef PARSER_DEBUG
1867 parse_events_debug = 1;
1868 parse_events_set_debug(1, scanner);
1869 #endif
1870 ret = parse_events_parse(parse_state, scanner);
1872 if (str) {
1873 parse_events__flush_buffer(buffer, scanner);
1874 parse_events__delete_buffer(buffer, scanner);
1876 parse_events_lex_destroy(scanner);
1877 return ret;
1881 * parse event config string, return a list of event terms.
1883 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
1885 struct parse_events_state parse_state = {
1886 .terms = NULL,
1887 .stoken = PE_START_TERMS,
1889 int ret;
1891 ret = parse_events__scanner(str, input, &parse_state);
1892 if (!ret)
1893 list_splice(&parse_state.terms->terms, &terms->terms);
1895 zfree(&parse_state.terms);
1896 return ret;
1899 static int evsel__compute_group_pmu_name(struct evsel *evsel,
1900 const struct list_head *head)
1902 struct evsel *leader = evsel__leader(evsel);
1903 struct evsel *pos;
1904 const char *group_pmu_name;
1905 struct perf_pmu *pmu = evsel__find_pmu(evsel);
1907 if (!pmu) {
1909 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
1910 * is a core PMU, but in heterogeneous systems this is
1911 * unknown. For now pick the first core PMU.
1913 pmu = perf_pmus__scan_core(NULL);
1915 if (!pmu) {
1916 pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
1917 return -EINVAL;
1919 group_pmu_name = pmu->name;
1921 * Software events may be in a group with other uncore PMU events. Use
1922 * the pmu_name of the first non-software event to avoid breaking the
1923 * software event out of the group.
1925 * Aux event leaders, like intel_pt, expect a group with events from
1926 * other PMUs, so substitute the AUX event's PMU in this case.
1928 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
1929 struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
1931 if (!leader_pmu) {
1932 /* As with determining pmu above. */
1933 leader_pmu = perf_pmus__scan_core(NULL);
1936 * Starting with the leader, find the first event with a named
1937 * non-software PMU. for_each_group_(member|evsel) isn't used as
1938 * the list isn't yet sorted putting evsel's in the same group
1939 * together.
1941 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
1942 group_pmu_name = leader_pmu->name;
1943 } else if (leader->core.nr_members > 1) {
1944 list_for_each_entry(pos, head, core.node) {
1945 struct perf_pmu *pos_pmu;
1947 if (pos == leader || evsel__leader(pos) != leader)
1948 continue;
1949 pos_pmu = evsel__find_pmu(pos);
1950 if (!pos_pmu) {
1951 /* As with determining pmu above. */
1952 pos_pmu = perf_pmus__scan_core(NULL);
1954 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
1955 group_pmu_name = pos_pmu->name;
1956 break;
1961 /* Record computed name. */
1962 evsel->group_pmu_name = strdup(group_pmu_name);
1963 return evsel->group_pmu_name ? 0 : -ENOMEM;
1966 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
1968 /* Order by insertion index. */
1969 return lhs->core.idx - rhs->core.idx;
1972 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
1974 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
1975 const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
1976 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
1977 const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
1978 int *force_grouped_idx = _fg_idx;
1979 int lhs_sort_idx, rhs_sort_idx, ret;
1980 const char *lhs_pmu_name, *rhs_pmu_name;
1981 bool lhs_has_group, rhs_has_group;
1984 * First sort by grouping/leader. Read the leader idx only if the evsel
1985 * is part of a group, by default ungrouped events will be sorted
1986 * relative to grouped events based on where the first ungrouped event
1987 * occurs. If both events don't have a group we want to fall-through to
1988 * the arch specific sorting, that can reorder and fix things like
1989 * Intel's topdown events.
1991 if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
1992 lhs_has_group = true;
1993 lhs_sort_idx = lhs_core->leader->idx;
1994 } else {
1995 lhs_has_group = false;
1996 lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)
1997 ? *force_grouped_idx
1998 : lhs_core->idx;
2000 if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
2001 rhs_has_group = true;
2002 rhs_sort_idx = rhs_core->leader->idx;
2003 } else {
2004 rhs_has_group = false;
2005 rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)
2006 ? *force_grouped_idx
2007 : rhs_core->idx;
2010 if (lhs_sort_idx != rhs_sort_idx)
2011 return lhs_sort_idx - rhs_sort_idx;
2013 /* Group by PMU if there is a group. Groups can't span PMUs. */
2014 if (lhs_has_group && rhs_has_group) {
2015 lhs_pmu_name = lhs->group_pmu_name;
2016 rhs_pmu_name = rhs->group_pmu_name;
2017 ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2018 if (ret)
2019 return ret;
2022 /* Architecture specific sorting. */
2023 return arch_evlist__cmp(lhs, rhs);
2026 static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2028 int idx = 0, force_grouped_idx = -1;
2029 struct evsel *pos, *cur_leader = NULL;
2030 struct perf_evsel *cur_leaders_grp = NULL;
2031 bool idx_changed = false, cur_leader_force_grouped = false;
2032 int orig_num_leaders = 0, num_leaders = 0;
2033 int ret;
2036 * Compute index to insert ungrouped events at. Place them where the
2037 * first ungrouped event appears.
2039 list_for_each_entry(pos, list, core.node) {
2040 const struct evsel *pos_leader = evsel__leader(pos);
2042 ret = evsel__compute_group_pmu_name(pos, list);
2043 if (ret)
2044 return ret;
2046 if (pos == pos_leader)
2047 orig_num_leaders++;
2050 * Ensure indexes are sequential, in particular for multiple
2051 * event lists being merged. The indexes are used to detect when
2052 * the user order is modified.
2054 pos->core.idx = idx++;
2056 /* Remember an index to sort all forced grouped events together to. */
2057 if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 &&
2058 arch_evsel__must_be_in_group(pos))
2059 force_grouped_idx = pos->core.idx;
2062 /* Sort events. */
2063 list_sort(&force_grouped_idx, list, evlist__cmp);
2066 * Recompute groups, splitting for PMUs and adding groups for events
2067 * that require them.
2069 idx = 0;
2070 list_for_each_entry(pos, list, core.node) {
2071 const struct evsel *pos_leader = evsel__leader(pos);
2072 const char *pos_pmu_name = pos->group_pmu_name;
2073 const char *cur_leader_pmu_name;
2074 bool pos_force_grouped = force_grouped_idx != -1 &&
2075 arch_evsel__must_be_in_group(pos);
2077 /* Reset index and nr_members. */
2078 if (pos->core.idx != idx)
2079 idx_changed = true;
2080 pos->core.idx = idx++;
2081 pos->core.nr_members = 0;
2084 * Set the group leader respecting the given groupings and that
2085 * groups can't span PMUs.
2087 if (!cur_leader)
2088 cur_leader = pos;
2090 cur_leader_pmu_name = cur_leader->group_pmu_name;
2091 if ((cur_leaders_grp != pos->core.leader &&
2092 (!pos_force_grouped || !cur_leader_force_grouped)) ||
2093 strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2094 /* Event is for a different group/PMU than last. */
2095 cur_leader = pos;
2097 * Remember the leader's group before it is overwritten,
2098 * so that later events match as being in the same
2099 * group.
2101 cur_leaders_grp = pos->core.leader;
2103 * Avoid forcing events into groups with events that
2104 * don't need to be in the group.
2106 cur_leader_force_grouped = pos_force_grouped;
2108 if (pos_leader != cur_leader) {
2109 /* The leader changed so update it. */
2110 evsel__set_leader(pos, cur_leader);
2113 list_for_each_entry(pos, list, core.node) {
2114 struct evsel *pos_leader = evsel__leader(pos);
2116 if (pos == pos_leader)
2117 num_leaders++;
2118 pos_leader->core.nr_members++;
2120 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2123 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2124 struct parse_events_error *err, bool fake_pmu,
2125 bool warn_if_reordered, bool fake_tp)
2127 struct parse_events_state parse_state = {
2128 .list = LIST_HEAD_INIT(parse_state.list),
2129 .idx = evlist->core.nr_entries,
2130 .error = err,
2131 .stoken = PE_START_EVENTS,
2132 .fake_pmu = fake_pmu,
2133 .fake_tp = fake_tp,
2134 .pmu_filter = pmu_filter,
2135 .match_legacy_cache_terms = true,
2137 int ret, ret2;
2139 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
2141 if (!ret && list_empty(&parse_state.list)) {
2142 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2143 return -1;
2146 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2147 if (ret2 < 0)
2148 return ret;
2150 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus)
2151 pr_warning("WARNING: events were regrouped to match PMUs\n");
2154 * Add list to the evlist even with errors to allow callers to clean up.
2156 evlist__splice_list_tail(evlist, &parse_state.list);
2158 if (!ret) {
2159 struct evsel *last;
2161 last = evlist__last(evlist);
2162 last->cmdline_group_boundary = true;
2164 return 0;
2168 * There are 2 users - builtin-record and builtin-test objects.
2169 * Both call evlist__delete in case of error, so we dont
2170 * need to bother.
2172 return ret;
2175 int parse_event(struct evlist *evlist, const char *str)
2177 struct parse_events_error err;
2178 int ret;
2180 parse_events_error__init(&err);
2181 ret = parse_events(evlist, str, &err);
2182 parse_events_error__exit(&err);
2183 return ret;
2186 struct parse_events_error_entry {
2187 /** @list: The list the error is part of. */
2188 struct list_head list;
2189 /** @idx: index in the parsed string */
2190 int idx;
2191 /** @str: string to display at the index */
2192 char *str;
2193 /** @help: optional help string */
2194 char *help;
2197 void parse_events_error__init(struct parse_events_error *err)
2199 INIT_LIST_HEAD(&err->list);
2202 void parse_events_error__exit(struct parse_events_error *err)
2204 struct parse_events_error_entry *pos, *tmp;
2206 list_for_each_entry_safe(pos, tmp, &err->list, list) {
2207 zfree(&pos->str);
2208 zfree(&pos->help);
2209 list_del_init(&pos->list);
2210 free(pos);
2214 void parse_events_error__handle(struct parse_events_error *err, int idx,
2215 char *str, char *help)
2217 struct parse_events_error_entry *entry;
2219 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2220 goto out_free;
2222 entry = zalloc(sizeof(*entry));
2223 if (!entry) {
2224 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2225 str, help ?: "<no help>");
2226 goto out_free;
2228 entry->idx = idx;
2229 entry->str = str;
2230 entry->help = help;
2231 list_add(&entry->list, &err->list);
2232 return;
2233 out_free:
2234 free(str);
2235 free(help);
2238 #define MAX_WIDTH 1000
2239 static int get_term_width(void)
2241 struct winsize ws;
2243 get_term_dimensions(&ws);
2244 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2247 static void __parse_events_error__print(int err_idx, const char *err_str,
2248 const char *err_help, const char *event)
2250 const char *str = "invalid or unsupported event: ";
2251 char _buf[MAX_WIDTH];
2252 char *buf = (char *) event;
2253 int idx = 0;
2254 if (err_str) {
2255 /* -2 for extra '' in the final fprintf */
2256 int width = get_term_width() - 2;
2257 int len_event = strlen(event);
2258 int len_str, max_len, cut = 0;
2261 * Maximum error index indent, we will cut
2262 * the event string if it's bigger.
2264 int max_err_idx = 13;
2267 * Let's be specific with the message when
2268 * we have the precise error.
2270 str = "event syntax error: ";
2271 len_str = strlen(str);
2272 max_len = width - len_str;
2274 buf = _buf;
2276 /* We're cutting from the beginning. */
2277 if (err_idx > max_err_idx)
2278 cut = err_idx - max_err_idx;
2280 strncpy(buf, event + cut, max_len);
2282 /* Mark cut parts with '..' on both sides. */
2283 if (cut)
2284 buf[0] = buf[1] = '.';
2286 if ((len_event - cut) > max_len) {
2287 buf[max_len - 1] = buf[max_len - 2] = '.';
2288 buf[max_len] = 0;
2291 idx = len_str + err_idx - cut;
2294 fprintf(stderr, "%s'%s'\n", str, buf);
2295 if (idx) {
2296 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2297 if (err_help)
2298 fprintf(stderr, "\n%s\n", err_help);
2302 void parse_events_error__print(const struct parse_events_error *err,
2303 const char *event)
2305 struct parse_events_error_entry *pos;
2306 bool first = true;
2308 list_for_each_entry(pos, &err->list, list) {
2309 if (!first)
2310 fputs("\n", stderr);
2311 __parse_events_error__print(pos->idx, pos->str, pos->help, event);
2312 first = false;
2317 * In the list of errors err, do any of the error strings (str) contain the
2318 * given needle string?
2320 bool parse_events_error__contains(const struct parse_events_error *err,
2321 const char *needle)
2323 struct parse_events_error_entry *pos;
2325 list_for_each_entry(pos, &err->list, list) {
2326 if (strstr(pos->str, needle) != NULL)
2327 return true;
2329 return false;
2332 #undef MAX_WIDTH
2334 int parse_events_option(const struct option *opt, const char *str,
2335 int unset __maybe_unused)
2337 struct parse_events_option_args *args = opt->value;
2338 struct parse_events_error err;
2339 int ret;
2341 parse_events_error__init(&err);
2342 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2343 /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2344 /*fake_tp=*/false);
2346 if (ret) {
2347 parse_events_error__print(&err, str);
2348 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2350 parse_events_error__exit(&err);
2352 return ret;
2355 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2357 struct parse_events_option_args *args = opt->value;
2358 int ret;
2360 if (*args->evlistp == NULL) {
2361 *args->evlistp = evlist__new();
2363 if (*args->evlistp == NULL) {
2364 fprintf(stderr, "Not enough memory to create evlist\n");
2365 return -1;
2368 ret = parse_events_option(opt, str, unset);
2369 if (ret) {
2370 evlist__delete(*args->evlistp);
2371 *args->evlistp = NULL;
2374 return ret;
2377 static int
2378 foreach_evsel_in_last_glob(struct evlist *evlist,
2379 int (*func)(struct evsel *evsel,
2380 const void *arg),
2381 const void *arg)
2383 struct evsel *last = NULL;
2384 int err;
2387 * Don't return when list_empty, give func a chance to report
2388 * error when it found last == NULL.
2390 * So no need to WARN here, let *func do this.
2392 if (evlist->core.nr_entries > 0)
2393 last = evlist__last(evlist);
2395 do {
2396 err = (*func)(last, arg);
2397 if (err)
2398 return -1;
2399 if (!last)
2400 return 0;
2402 if (last->core.node.prev == &evlist->core.entries)
2403 return 0;
2404 last = list_entry(last->core.node.prev, struct evsel, core.node);
2405 } while (!last->cmdline_group_boundary);
2407 return 0;
2410 static int set_filter(struct evsel *evsel, const void *arg)
2412 const char *str = arg;
2413 bool found = false;
2414 int nr_addr_filters = 0;
2415 struct perf_pmu *pmu = NULL;
2417 if (evsel == NULL) {
2418 fprintf(stderr,
2419 "--filter option should follow a -e tracepoint or HW tracer option\n");
2420 return -1;
2423 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2424 if (evsel__append_tp_filter(evsel, str) < 0) {
2425 fprintf(stderr,
2426 "not enough memory to hold filter string\n");
2427 return -1;
2430 return 0;
2433 while ((pmu = perf_pmus__scan(pmu)) != NULL)
2434 if (pmu->type == evsel->core.attr.type) {
2435 found = true;
2436 break;
2439 if (found)
2440 perf_pmu__scan_file(pmu, "nr_addr_filters",
2441 "%d", &nr_addr_filters);
2443 if (!nr_addr_filters)
2444 return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2446 if (evsel__append_addr_filter(evsel, str) < 0) {
2447 fprintf(stderr,
2448 "not enough memory to hold filter string\n");
2449 return -1;
2452 return 0;
2455 int parse_filter(const struct option *opt, const char *str,
2456 int unset __maybe_unused)
2458 struct evlist *evlist = *(struct evlist **)opt->value;
2460 return foreach_evsel_in_last_glob(evlist, set_filter,
2461 (const void *)str);
2464 static int add_exclude_perf_filter(struct evsel *evsel,
2465 const void *arg __maybe_unused)
2467 char new_filter[64];
2469 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2470 fprintf(stderr,
2471 "--exclude-perf option should follow a -e tracepoint option\n");
2472 return -1;
2475 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2477 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2478 fprintf(stderr,
2479 "not enough memory to hold filter string\n");
2480 return -1;
2483 return 0;
2486 int exclude_perf(const struct option *opt,
2487 const char *arg __maybe_unused,
2488 int unset __maybe_unused)
2490 struct evlist *evlist = *(struct evlist **)opt->value;
2492 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2493 NULL);
2496 int parse_events__is_hardcoded_term(struct parse_events_term *term)
2498 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2501 static int new_term(struct parse_events_term **_term,
2502 struct parse_events_term *temp,
2503 char *str, u64 num)
2505 struct parse_events_term *term;
2507 term = malloc(sizeof(*term));
2508 if (!term)
2509 return -ENOMEM;
2511 *term = *temp;
2512 INIT_LIST_HEAD(&term->list);
2513 term->weak = false;
2515 switch (term->type_val) {
2516 case PARSE_EVENTS__TERM_TYPE_NUM:
2517 term->val.num = num;
2518 break;
2519 case PARSE_EVENTS__TERM_TYPE_STR:
2520 term->val.str = str;
2521 break;
2522 default:
2523 free(term);
2524 return -EINVAL;
2527 *_term = term;
2528 return 0;
2531 int parse_events_term__num(struct parse_events_term **term,
2532 enum parse_events__term_type type_term,
2533 const char *config, u64 num,
2534 bool no_value,
2535 void *loc_term_, void *loc_val_)
2537 YYLTYPE *loc_term = loc_term_;
2538 YYLTYPE *loc_val = loc_val_;
2540 struct parse_events_term temp = {
2541 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2542 .type_term = type_term,
2543 .config = config ? : strdup(parse_events__term_type_str(type_term)),
2544 .no_value = no_value,
2545 .err_term = loc_term ? loc_term->first_column : 0,
2546 .err_val = loc_val ? loc_val->first_column : 0,
2549 return new_term(term, &temp, /*str=*/NULL, num);
2552 int parse_events_term__str(struct parse_events_term **term,
2553 enum parse_events__term_type type_term,
2554 char *config, char *str,
2555 void *loc_term_, void *loc_val_)
2557 YYLTYPE *loc_term = loc_term_;
2558 YYLTYPE *loc_val = loc_val_;
2560 struct parse_events_term temp = {
2561 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2562 .type_term = type_term,
2563 .config = config,
2564 .err_term = loc_term ? loc_term->first_column : 0,
2565 .err_val = loc_val ? loc_val->first_column : 0,
2568 return new_term(term, &temp, str, /*num=*/0);
2571 int parse_events_term__term(struct parse_events_term **term,
2572 enum parse_events__term_type term_lhs,
2573 enum parse_events__term_type term_rhs,
2574 void *loc_term, void *loc_val)
2576 return parse_events_term__str(term, term_lhs, NULL,
2577 strdup(parse_events__term_type_str(term_rhs)),
2578 loc_term, loc_val);
2581 int parse_events_term__clone(struct parse_events_term **new,
2582 const struct parse_events_term *term)
2584 char *str;
2585 struct parse_events_term temp = *term;
2587 temp.used = false;
2588 if (term->config) {
2589 temp.config = strdup(term->config);
2590 if (!temp.config)
2591 return -ENOMEM;
2593 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2594 return new_term(new, &temp, /*str=*/NULL, term->val.num);
2596 str = strdup(term->val.str);
2597 if (!str) {
2598 zfree(&temp.config);
2599 return -ENOMEM;
2601 return new_term(new, &temp, str, /*num=*/0);
2604 void parse_events_term__delete(struct parse_events_term *term)
2606 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2607 zfree(&term->val.str);
2609 zfree(&term->config);
2610 free(term);
2613 static int parse_events_terms__copy(const struct parse_events_terms *src,
2614 struct parse_events_terms *dest)
2616 struct parse_events_term *term;
2618 list_for_each_entry (term, &src->terms, list) {
2619 struct parse_events_term *n;
2620 int ret;
2622 ret = parse_events_term__clone(&n, term);
2623 if (ret)
2624 return ret;
2626 list_add_tail(&n->list, &dest->terms);
2628 return 0;
2631 void parse_events_terms__init(struct parse_events_terms *terms)
2633 INIT_LIST_HEAD(&terms->terms);
2636 void parse_events_terms__exit(struct parse_events_terms *terms)
2638 struct parse_events_term *term, *h;
2640 list_for_each_entry_safe(term, h, &terms->terms, list) {
2641 list_del_init(&term->list);
2642 parse_events_term__delete(term);
2646 void parse_events_terms__delete(struct parse_events_terms *terms)
2648 if (!terms)
2649 return;
2650 parse_events_terms__exit(terms);
2651 free(terms);
2654 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2656 struct parse_events_term *term;
2657 bool first = true;
2659 if (!terms)
2660 return 0;
2662 list_for_each_entry(term, &terms->terms, list) {
2663 int ret;
2665 if (!first) {
2666 ret = strbuf_addch(sb, ',');
2667 if (ret < 0)
2668 return ret;
2670 first = false;
2672 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2673 if (term->no_value) {
2674 assert(term->val.num == 1);
2675 ret = strbuf_addf(sb, "%s", term->config);
2676 } else
2677 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2678 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2679 if (term->config) {
2680 ret = strbuf_addf(sb, "%s=", term->config);
2681 if (ret < 0)
2682 return ret;
2683 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2684 ret = strbuf_addf(sb, "%s=",
2685 parse_events__term_type_str(term->type_term));
2686 if (ret < 0)
2687 return ret;
2689 assert(!term->no_value);
2690 ret = strbuf_addf(sb, "%s", term->val.str);
2692 if (ret < 0)
2693 return ret;
2695 return 0;
2698 static void config_terms_list(char *buf, size_t buf_sz)
2700 int i;
2701 bool first = true;
2703 buf[0] = '\0';
2704 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2705 const char *name = parse_events__term_type_str(i);
2707 if (!config_term_avail(i, NULL))
2708 continue;
2709 if (!name)
2710 continue;
2711 if (name[0] == '<')
2712 continue;
2714 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2715 return;
2717 if (!first)
2718 strcat(buf, ",");
2719 else
2720 first = false;
2721 strcat(buf, name);
2726 * Return string contains valid config terms of an event.
2727 * @additional_terms: For terms such as PMU sysfs terms.
2729 char *parse_events_formats_error_string(char *additional_terms)
2731 char *str;
2732 /* "no-overwrite" is the longest name */
2733 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2734 (sizeof("no-overwrite") - 1)];
2736 config_terms_list(static_terms, sizeof(static_terms));
2737 /* valid terms */
2738 if (additional_terms) {
2739 if (asprintf(&str, "valid terms: %s,%s",
2740 additional_terms, static_terms) < 0)
2741 goto fail;
2742 } else {
2743 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2744 goto fail;
2746 return str;
2748 fail:
2749 return NULL;