Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / perf / util / metricgroup.c
blob46920ebadfd1bc8bdd959cc456810eb81427d532
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2017, Intel Corporation.
4 */
6 /* Manage metrics and groups of metrics from JSON files */
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmus.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "tool_pmu.h"
18 #include "expr.h"
19 #include "rblist.h"
20 #include <string.h>
21 #include <errno.h>
22 #include "strlist.h"
23 #include <assert.h>
24 #include <linux/ctype.h>
25 #include <linux/list_sort.h>
26 #include <linux/string.h>
27 #include <linux/zalloc.h>
28 #include <perf/cpumap.h>
29 #include <subcmd/parse-options.h>
30 #include <api/fs/fs.h>
31 #include "util.h"
32 #include <asm/bug.h>
33 #include "cgroup.h"
34 #include "util/hashmap.h"
36 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
37 struct evsel *evsel,
38 bool create)
40 struct rb_node *nd;
41 struct metric_event me = {
42 .evsel = evsel
45 if (!metric_events)
46 return NULL;
48 if (evsel && evsel->metric_leader)
49 me.evsel = evsel->metric_leader;
50 nd = rblist__find(metric_events, &me);
51 if (nd)
52 return container_of(nd, struct metric_event, nd);
53 if (create) {
54 rblist__add_node(metric_events, &me);
55 nd = rblist__find(metric_events, &me);
56 if (nd)
57 return container_of(nd, struct metric_event, nd);
59 return NULL;
62 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
64 struct metric_event *a = container_of(rb_node,
65 struct metric_event,
66 nd);
67 const struct metric_event *b = entry;
69 if (a->evsel == b->evsel)
70 return 0;
71 if ((char *)a->evsel < (char *)b->evsel)
72 return -1;
73 return +1;
76 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
77 const void *entry)
79 struct metric_event *me = malloc(sizeof(struct metric_event));
81 if (!me)
82 return NULL;
83 memcpy(me, entry, sizeof(struct metric_event));
84 me->evsel = ((struct metric_event *)entry)->evsel;
85 me->is_default = false;
86 INIT_LIST_HEAD(&me->head);
87 return &me->nd;
90 static void metric_event_delete(struct rblist *rblist __maybe_unused,
91 struct rb_node *rb_node)
93 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
94 struct metric_expr *expr, *tmp;
96 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
97 zfree(&expr->metric_name);
98 zfree(&expr->metric_refs);
99 zfree(&expr->metric_events);
100 free(expr);
103 free(me);
106 static void metricgroup__rblist_init(struct rblist *metric_events)
108 rblist__init(metric_events);
109 metric_events->node_cmp = metric_event_cmp;
110 metric_events->node_new = metric_event_new;
111 metric_events->node_delete = metric_event_delete;
114 void metricgroup__rblist_exit(struct rblist *metric_events)
116 rblist__exit(metric_events);
120 * The metric under construction. The data held here will be placed in a
121 * metric_expr.
123 struct metric {
124 struct list_head nd;
126 * The expression parse context importantly holding the IDs contained
127 * within the expression.
129 struct expr_parse_ctx *pctx;
130 const char *pmu;
131 /** The name of the metric such as "IPC". */
132 const char *metric_name;
133 /** Modifier on the metric such as "u" or NULL for none. */
134 const char *modifier;
135 /** The expression to parse, for example, "instructions/cycles". */
136 const char *metric_expr;
137 /** Optional threshold expression where zero value is green, otherwise red. */
138 const char *metric_threshold;
140 * The "ScaleUnit" that scales and adds a unit to the metric during
141 * output.
143 const char *metric_unit;
145 * Optional name of the metric group reported
146 * if the Default metric group is being processed.
148 const char *default_metricgroup_name;
149 /** Optional null terminated array of referenced metrics. */
150 struct metric_ref *metric_refs;
152 * Should events of the metric be grouped?
154 bool group_events;
156 * Parsed events for the metric. Optional as events may be taken from a
157 * different metric whose group contains all the IDs necessary for this
158 * one.
160 struct evlist *evlist;
163 static void metric__watchdog_constraint_hint(const char *name, bool foot)
165 static bool violate_nmi_constraint;
167 if (!foot) {
168 pr_warning("Not grouping metric %s's events.\n", name);
169 violate_nmi_constraint = true;
170 return;
173 if (!violate_nmi_constraint)
174 return;
176 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
177 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
178 " perf stat ...\n"
179 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
182 static bool metric__group_events(const struct pmu_metric *pm)
184 switch (pm->event_grouping) {
185 case MetricNoGroupEvents:
186 return false;
187 case MetricNoGroupEventsNmi:
188 if (!sysctl__nmi_watchdog_enabled())
189 return true;
190 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
191 return false;
192 case MetricNoGroupEventsSmt:
193 return !smt_on();
194 case MetricGroupEvents:
195 default:
196 return true;
200 static void metric__free(struct metric *m)
202 if (!m)
203 return;
205 zfree(&m->metric_refs);
206 expr__ctx_free(m->pctx);
207 zfree(&m->modifier);
208 evlist__delete(m->evlist);
209 free(m);
212 static struct metric *metric__new(const struct pmu_metric *pm,
213 const char *modifier,
214 bool metric_no_group,
215 int runtime,
216 const char *user_requested_cpu_list,
217 bool system_wide)
219 struct metric *m;
221 m = zalloc(sizeof(*m));
222 if (!m)
223 return NULL;
225 m->pctx = expr__ctx_new();
226 if (!m->pctx)
227 goto out_err;
229 m->pmu = pm->pmu ?: "cpu";
230 m->metric_name = pm->metric_name;
231 m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
232 m->modifier = NULL;
233 if (modifier) {
234 m->modifier = strdup(modifier);
235 if (!m->modifier)
236 goto out_err;
238 m->metric_expr = pm->metric_expr;
239 m->metric_threshold = pm->metric_threshold;
240 m->metric_unit = pm->unit;
241 m->pctx->sctx.user_requested_cpu_list = NULL;
242 if (user_requested_cpu_list) {
243 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
244 if (!m->pctx->sctx.user_requested_cpu_list)
245 goto out_err;
247 m->pctx->sctx.runtime = runtime;
248 m->pctx->sctx.system_wide = system_wide;
249 m->group_events = !metric_no_group && metric__group_events(pm);
250 m->metric_refs = NULL;
251 m->evlist = NULL;
253 return m;
254 out_err:
255 metric__free(m);
256 return NULL;
259 static bool contains_metric_id(struct evsel **metric_events, int num_events,
260 const char *metric_id)
262 int i;
264 for (i = 0; i < num_events; i++) {
265 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
266 return true;
268 return false;
272 * setup_metric_events - Find a group of events in metric_evlist that correspond
273 * to the IDs from a parsed metric expression.
274 * @pmu: The PMU for the IDs.
275 * @ids: the metric IDs to match.
276 * @metric_evlist: the list of perf events.
277 * @out_metric_events: holds the created metric events array.
279 static int setup_metric_events(const char *pmu, struct hashmap *ids,
280 struct evlist *metric_evlist,
281 struct evsel ***out_metric_events)
283 struct evsel **metric_events;
284 const char *metric_id;
285 struct evsel *ev;
286 size_t ids_size, matched_events, i;
287 bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
289 *out_metric_events = NULL;
290 ids_size = hashmap__size(ids);
292 metric_events = calloc(ids_size + 1, sizeof(void *));
293 if (!metric_events)
294 return -ENOMEM;
296 matched_events = 0;
297 evlist__for_each_entry(metric_evlist, ev) {
298 struct expr_id_data *val_ptr;
300 /* Don't match events for the wrong hybrid PMU. */
301 if (!all_pmus && ev->pmu && evsel__is_hybrid(ev) &&
302 strcmp(ev->pmu->name, pmu))
303 continue;
305 * Check for duplicate events with the same name. For
306 * example, uncore_imc/cas_count_read/ will turn into 6
307 * events per socket on skylakex. Only the first such
308 * event is placed in metric_events.
310 metric_id = evsel__metric_id(ev);
311 if (contains_metric_id(metric_events, matched_events, metric_id))
312 continue;
314 * Does this event belong to the parse context? For
315 * combined or shared groups, this metric may not care
316 * about this event.
318 if (hashmap__find(ids, metric_id, &val_ptr)) {
319 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
320 metric_events[matched_events++] = ev;
322 if (matched_events >= ids_size)
323 break;
326 if (matched_events < ids_size) {
327 free(metric_events);
328 return -EINVAL;
330 for (i = 0; i < ids_size; i++) {
331 ev = metric_events[i];
332 ev->collect_stat = true;
335 * The metric leader points to the identically named
336 * event in metric_events.
338 ev->metric_leader = ev;
340 * Mark two events with identical names in the same
341 * group (or globally) as being in use as uncore events
342 * may be duplicated for each pmu. Set the metric leader
343 * of such events to be the event that appears in
344 * metric_events.
346 metric_id = evsel__metric_id(ev);
347 evlist__for_each_entry_continue(metric_evlist, ev) {
348 if (!strcmp(evsel__metric_id(ev), metric_id))
349 ev->metric_leader = metric_events[i];
352 *out_metric_events = metric_events;
353 return 0;
356 static bool match_metric(const char *metric_or_groups, const char *sought)
358 int len;
359 char *m;
361 if (!sought)
362 return false;
363 if (!strcmp(sought, "all"))
364 return true;
365 if (!metric_or_groups)
366 return !strcasecmp(sought, "No_group");
367 len = strlen(sought);
368 if (!strncasecmp(metric_or_groups, sought, len) &&
369 (metric_or_groups[len] == 0 || metric_or_groups[len] == ';'))
370 return true;
371 m = strchr(metric_or_groups, ';');
372 return m && match_metric(m + 1, sought);
375 static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric)
377 const char *pm_pmu = pm->pmu ?: "cpu";
379 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
380 return false;
382 return match_metric(pm->metric_group, metric) ||
383 match_metric(pm->metric_name, metric);
386 /** struct mep - RB-tree node for building printing information. */
387 struct mep {
388 /** nd - RB-tree element. */
389 struct rb_node nd;
390 /** @metric_group: Owned metric group name, separated others with ';'. */
391 char *metric_group;
392 const char *metric_name;
393 const char *metric_desc;
394 const char *metric_long_desc;
395 const char *metric_expr;
396 const char *metric_threshold;
397 const char *metric_unit;
400 static int mep_cmp(struct rb_node *rb_node, const void *entry)
402 struct mep *a = container_of(rb_node, struct mep, nd);
403 struct mep *b = (struct mep *)entry;
404 int ret;
406 ret = strcmp(a->metric_group, b->metric_group);
407 if (ret)
408 return ret;
410 return strcmp(a->metric_name, b->metric_name);
413 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
415 struct mep *me = malloc(sizeof(struct mep));
417 if (!me)
418 return NULL;
420 memcpy(me, entry, sizeof(struct mep));
421 return &me->nd;
424 static void mep_delete(struct rblist *rl __maybe_unused,
425 struct rb_node *nd)
427 struct mep *me = container_of(nd, struct mep, nd);
429 zfree(&me->metric_group);
430 free(me);
433 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
434 const char *metric_name)
436 struct rb_node *nd;
437 struct mep me = {
438 .metric_group = strdup(metric_group),
439 .metric_name = metric_name,
441 nd = rblist__find(groups, &me);
442 if (nd) {
443 free(me.metric_group);
444 return container_of(nd, struct mep, nd);
446 rblist__add_node(groups, &me);
447 nd = rblist__find(groups, &me);
448 if (nd)
449 return container_of(nd, struct mep, nd);
450 return NULL;
453 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
454 struct rblist *groups)
456 const char *g;
457 char *omg, *mg;
459 mg = strdup(pm->metric_group ?: pm->metric_name);
460 if (!mg)
461 return -ENOMEM;
462 omg = mg;
463 while ((g = strsep(&mg, ";")) != NULL) {
464 struct mep *me;
466 g = skip_spaces(g);
467 if (strlen(g))
468 me = mep_lookup(groups, g, pm->metric_name);
469 else
470 me = mep_lookup(groups, pm->metric_name, pm->metric_name);
472 if (me) {
473 me->metric_desc = pm->desc;
474 me->metric_long_desc = pm->long_desc;
475 me->metric_expr = pm->metric_expr;
476 me->metric_threshold = pm->metric_threshold;
477 me->metric_unit = pm->unit;
480 free(omg);
482 return 0;
485 struct metricgroup_iter_data {
486 pmu_metric_iter_fn fn;
487 void *data;
490 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
491 const struct pmu_metrics_table *table,
492 void *data)
494 struct metricgroup_iter_data *d = data;
495 struct perf_pmu *pmu = NULL;
497 if (!pm->metric_expr || !pm->compat)
498 return 0;
500 while ((pmu = perf_pmus__scan(pmu))) {
502 if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
503 continue;
505 return d->fn(pm, table, d->data);
507 return 0;
510 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
511 const struct pmu_metrics_table *table __maybe_unused,
512 void *vdata)
514 struct rblist *groups = vdata;
516 return metricgroup__add_to_mep_groups(pm, groups);
519 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
521 struct rblist groups;
522 const struct pmu_metrics_table *table;
523 struct rb_node *node, *next;
525 rblist__init(&groups);
526 groups.node_new = mep_new;
527 groups.node_cmp = mep_cmp;
528 groups.node_delete = mep_delete;
529 table = pmu_metrics_table__find();
530 if (table) {
531 pmu_metrics_table__for_each_metric(table,
532 metricgroup__add_to_mep_groups_callback,
533 &groups);
536 struct metricgroup_iter_data data = {
537 .fn = metricgroup__add_to_mep_groups_callback,
538 .data = &groups,
540 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
543 for (node = rb_first_cached(&groups.entries); node; node = next) {
544 struct mep *me = container_of(node, struct mep, nd);
546 print_cb->print_metric(print_state,
547 me->metric_group,
548 me->metric_name,
549 me->metric_desc,
550 me->metric_long_desc,
551 me->metric_expr,
552 me->metric_threshold,
553 me->metric_unit);
554 next = rb_next(node);
555 rblist__remove_node(&groups, node);
559 static const char *code_characters = ",-=@";
561 static int encode_metric_id(struct strbuf *sb, const char *x)
563 char *c;
564 int ret = 0;
566 for (; *x; x++) {
567 c = strchr(code_characters, *x);
568 if (c) {
569 ret = strbuf_addch(sb, '!');
570 if (ret)
571 break;
573 ret = strbuf_addch(sb, '0' + (c - code_characters));
574 if (ret)
575 break;
576 } else {
577 ret = strbuf_addch(sb, *x);
578 if (ret)
579 break;
582 return ret;
585 static int decode_metric_id(struct strbuf *sb, const char *x)
587 const char *orig = x;
588 size_t i;
589 char c;
590 int ret;
592 for (; *x; x++) {
593 c = *x;
594 if (*x == '!') {
595 x++;
596 i = *x - '0';
597 if (i > strlen(code_characters)) {
598 pr_err("Bad metric-id encoding in: '%s'", orig);
599 return -1;
601 c = code_characters[i];
603 ret = strbuf_addch(sb, c);
604 if (ret)
605 return ret;
607 return 0;
610 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
612 struct evsel *ev;
613 struct strbuf sb = STRBUF_INIT;
614 char *cur;
615 int ret = 0;
617 evlist__for_each_entry(perf_evlist, ev) {
618 if (!ev->metric_id)
619 continue;
621 ret = strbuf_setlen(&sb, 0);
622 if (ret)
623 break;
625 ret = decode_metric_id(&sb, ev->metric_id);
626 if (ret)
627 break;
629 free((char *)ev->metric_id);
630 ev->metric_id = strdup(sb.buf);
631 if (!ev->metric_id) {
632 ret = -ENOMEM;
633 break;
636 * If the name is just the parsed event, use the metric-id to
637 * give a more friendly display version.
639 if (strstr(ev->name, "metric-id=")) {
640 bool has_slash = false;
642 zfree(&ev->name);
643 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
644 *cur = '/';
645 has_slash = true;
648 if (modifier) {
649 if (!has_slash && !strchr(sb.buf, ':')) {
650 ret = strbuf_addch(&sb, ':');
651 if (ret)
652 break;
654 ret = strbuf_addstr(&sb, modifier);
655 if (ret)
656 break;
658 ev->name = strdup(sb.buf);
659 if (!ev->name) {
660 ret = -ENOMEM;
661 break;
665 strbuf_release(&sb);
666 return ret;
669 static int metricgroup__build_event_string(struct strbuf *events,
670 const struct expr_parse_ctx *ctx,
671 const char *modifier,
672 bool group_events)
674 struct hashmap_entry *cur;
675 size_t bkt;
676 bool no_group = true, has_tool_events = false;
677 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
678 int ret = 0;
680 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
682 hashmap__for_each_entry(ctx->ids, cur, bkt) {
683 const char *sep, *rsep, *id = cur->pkey;
684 enum tool_pmu_event ev;
686 pr_debug("found event %s\n", id);
688 /* Always move tool events outside of the group. */
689 ev = tool_pmu__str_to_event(id);
690 if (ev != TOOL_PMU__EVENT_NONE) {
691 has_tool_events = true;
692 tool_events[ev] = true;
693 continue;
695 /* Separate events with commas and open the group if necessary. */
696 if (no_group) {
697 if (group_events) {
698 ret = strbuf_addch(events, '{');
699 RETURN_IF_NON_ZERO(ret);
702 no_group = false;
703 } else {
704 ret = strbuf_addch(events, ',');
705 RETURN_IF_NON_ZERO(ret);
708 * Encode the ID as an event string. Add a qualifier for
709 * metric_id that is the original name except with characters
710 * that parse-events can't parse replaced. For example,
711 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
713 sep = strchr(id, '@');
714 if (sep != NULL) {
715 ret = strbuf_add(events, id, sep - id);
716 RETURN_IF_NON_ZERO(ret);
717 ret = strbuf_addch(events, '/');
718 RETURN_IF_NON_ZERO(ret);
719 rsep = strrchr(sep, '@');
720 ret = strbuf_add(events, sep + 1, rsep - sep - 1);
721 RETURN_IF_NON_ZERO(ret);
722 ret = strbuf_addstr(events, ",metric-id=");
723 RETURN_IF_NON_ZERO(ret);
724 sep = rsep;
725 } else {
726 sep = strchr(id, ':');
727 if (sep != NULL) {
728 ret = strbuf_add(events, id, sep - id);
729 RETURN_IF_NON_ZERO(ret);
730 } else {
731 ret = strbuf_addstr(events, id);
732 RETURN_IF_NON_ZERO(ret);
734 ret = strbuf_addstr(events, "/metric-id=");
735 RETURN_IF_NON_ZERO(ret);
737 ret = encode_metric_id(events, id);
738 RETURN_IF_NON_ZERO(ret);
739 ret = strbuf_addstr(events, "/");
740 RETURN_IF_NON_ZERO(ret);
742 if (sep != NULL) {
743 ret = strbuf_addstr(events, sep + 1);
744 RETURN_IF_NON_ZERO(ret);
746 if (modifier) {
747 ret = strbuf_addstr(events, modifier);
748 RETURN_IF_NON_ZERO(ret);
751 if (!no_group && group_events) {
752 ret = strbuf_addf(events, "}:W");
753 RETURN_IF_NON_ZERO(ret);
755 if (has_tool_events) {
756 int i;
758 tool_pmu__for_each_event(i) {
759 if (tool_events[i]) {
760 if (!no_group) {
761 ret = strbuf_addch(events, ',');
762 RETURN_IF_NON_ZERO(ret);
764 no_group = false;
765 ret = strbuf_addstr(events, tool_pmu__event_to_str(i));
766 RETURN_IF_NON_ZERO(ret);
771 return ret;
772 #undef RETURN_IF_NON_ZERO
775 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
777 return 1;
781 * A singly linked list on the stack of the names of metrics being
782 * processed. Used to identify recursion.
784 struct visited_metric {
785 const char *name;
786 const struct visited_metric *parent;
789 struct metricgroup_add_iter_data {
790 struct list_head *metric_list;
791 const char *pmu;
792 const char *metric_name;
793 const char *modifier;
794 int *ret;
795 bool *has_match;
796 bool metric_no_group;
797 bool metric_no_threshold;
798 const char *user_requested_cpu_list;
799 bool system_wide;
800 struct metric *root_metric;
801 const struct visited_metric *visited;
802 const struct pmu_metrics_table *table;
805 static bool metricgroup__find_metric(const char *pmu,
806 const char *metric,
807 const struct pmu_metrics_table *table,
808 struct pmu_metric *pm);
810 static int add_metric(struct list_head *metric_list,
811 const struct pmu_metric *pm,
812 const char *modifier,
813 bool metric_no_group,
814 bool metric_no_threshold,
815 const char *user_requested_cpu_list,
816 bool system_wide,
817 struct metric *root_metric,
818 const struct visited_metric *visited,
819 const struct pmu_metrics_table *table);
822 * resolve_metric - Locate metrics within the root metric and recursively add
823 * references to them.
824 * @metric_list: The list the metric is added to.
825 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
826 * @modifier: if non-null event modifiers like "u".
827 * @metric_no_group: Should events written to events be grouped "{}" or
828 * global. Grouping is the default but due to multiplexing the
829 * user may override.
830 * @user_requested_cpu_list: Command line specified CPUs to record on.
831 * @system_wide: Are events for all processes recorded.
832 * @root_metric: Metrics may reference other metrics to form a tree. In this
833 * case the root_metric holds all the IDs and a list of referenced
834 * metrics. When adding a root this argument is NULL.
835 * @visited: A singly linked list of metric names being added that is used to
836 * detect recursion.
837 * @table: The table that is searched for metrics, most commonly the table for the
838 * architecture perf is running upon.
840 static int resolve_metric(struct list_head *metric_list,
841 const char *pmu,
842 const char *modifier,
843 bool metric_no_group,
844 bool metric_no_threshold,
845 const char *user_requested_cpu_list,
846 bool system_wide,
847 struct metric *root_metric,
848 const struct visited_metric *visited,
849 const struct pmu_metrics_table *table)
851 struct hashmap_entry *cur;
852 size_t bkt;
853 struct to_resolve {
854 /* The metric to resolve. */
855 struct pmu_metric pm;
857 * The key in the IDs map, this may differ from in case,
858 * etc. from pm->metric_name.
860 const char *key;
861 } *pending = NULL;
862 int i, ret = 0, pending_cnt = 0;
865 * Iterate all the parsed IDs and if there's a matching metric and it to
866 * the pending array.
868 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
869 struct pmu_metric pm;
871 if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) {
872 pending = realloc(pending,
873 (pending_cnt + 1) * sizeof(struct to_resolve));
874 if (!pending)
875 return -ENOMEM;
877 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
878 pending[pending_cnt].key = cur->pkey;
879 pending_cnt++;
883 /* Remove the metric IDs from the context. */
884 for (i = 0; i < pending_cnt; i++)
885 expr__del_id(root_metric->pctx, pending[i].key);
888 * Recursively add all the metrics, IDs are added to the root metric's
889 * context.
891 for (i = 0; i < pending_cnt; i++) {
892 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
893 metric_no_threshold, user_requested_cpu_list, system_wide,
894 root_metric, visited, table);
895 if (ret)
896 break;
899 free(pending);
900 return ret;
904 * __add_metric - Add a metric to metric_list.
905 * @metric_list: The list the metric is added to.
906 * @pm: The pmu_metric containing the metric to be added.
907 * @modifier: if non-null event modifiers like "u".
908 * @metric_no_group: Should events written to events be grouped "{}" or
909 * global. Grouping is the default but due to multiplexing the
910 * user may override.
911 * @metric_no_threshold: Should threshold expressions be ignored?
912 * @runtime: A special argument for the parser only known at runtime.
913 * @user_requested_cpu_list: Command line specified CPUs to record on.
914 * @system_wide: Are events for all processes recorded.
915 * @root_metric: Metrics may reference other metrics to form a tree. In this
916 * case the root_metric holds all the IDs and a list of referenced
917 * metrics. When adding a root this argument is NULL.
918 * @visited: A singly linked list of metric names being added that is used to
919 * detect recursion.
920 * @table: The table that is searched for metrics, most commonly the table for the
921 * architecture perf is running upon.
923 static int __add_metric(struct list_head *metric_list,
924 const struct pmu_metric *pm,
925 const char *modifier,
926 bool metric_no_group,
927 bool metric_no_threshold,
928 int runtime,
929 const char *user_requested_cpu_list,
930 bool system_wide,
931 struct metric *root_metric,
932 const struct visited_metric *visited,
933 const struct pmu_metrics_table *table)
935 const struct visited_metric *vm;
936 int ret;
937 bool is_root = !root_metric;
938 const char *expr;
939 struct visited_metric visited_node = {
940 .name = pm->metric_name,
941 .parent = visited,
944 for (vm = visited; vm; vm = vm->parent) {
945 if (!strcmp(pm->metric_name, vm->name)) {
946 pr_err("failed: recursion detected for %s\n", pm->metric_name);
947 return -1;
951 if (is_root) {
953 * This metric is the root of a tree and may reference other
954 * metrics that are added recursively.
956 root_metric = metric__new(pm, modifier, metric_no_group, runtime,
957 user_requested_cpu_list, system_wide);
958 if (!root_metric)
959 return -ENOMEM;
961 } else {
962 int cnt = 0;
965 * This metric was referenced in a metric higher in the
966 * tree. Check if the same metric is already resolved in the
967 * metric_refs list.
969 if (root_metric->metric_refs) {
970 for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
971 if (!strcmp(pm->metric_name,
972 root_metric->metric_refs[cnt].metric_name))
973 return 0;
977 /* Create reference. Need space for the entry and the terminator. */
978 root_metric->metric_refs = realloc(root_metric->metric_refs,
979 (cnt + 2) * sizeof(struct metric_ref));
980 if (!root_metric->metric_refs)
981 return -ENOMEM;
984 * Intentionally passing just const char pointers,
985 * from 'pe' object, so they never go away. We don't
986 * need to change them, so there's no need to create
987 * our own copy.
989 root_metric->metric_refs[cnt].metric_name = pm->metric_name;
990 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
992 /* Null terminate array. */
993 root_metric->metric_refs[cnt+1].metric_name = NULL;
994 root_metric->metric_refs[cnt+1].metric_expr = NULL;
998 * For both the parent and referenced metrics, we parse
999 * all the metric's IDs and add it to the root context.
1001 ret = 0;
1002 expr = pm->metric_expr;
1003 if (is_root && pm->metric_threshold) {
1005 * Threshold expressions are built off the actual metric. Switch
1006 * to use that in case of additional necessary events. Change
1007 * the visited node name to avoid this being flagged as
1008 * recursion. If the threshold events are disabled, just use the
1009 * metric's name as a reference. This allows metric threshold
1010 * computation if there are sufficient events.
1012 assert(strstr(pm->metric_threshold, pm->metric_name));
1013 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
1014 visited_node.name = "__threshold__";
1016 if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
1017 /* Broken metric. */
1018 ret = -EINVAL;
1020 if (!ret) {
1021 /* Resolve referenced metrics. */
1022 const char *pmu = pm->pmu ?: "cpu";
1024 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
1025 metric_no_threshold, user_requested_cpu_list,
1026 system_wide, root_metric, &visited_node,
1027 table);
1029 if (ret) {
1030 if (is_root)
1031 metric__free(root_metric);
1033 } else if (is_root)
1034 list_add(&root_metric->nd, metric_list);
1036 return ret;
1039 struct metricgroup__find_metric_data {
1040 const char *pmu;
1041 const char *metric;
1042 struct pmu_metric *pm;
1045 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
1046 const struct pmu_metrics_table *table __maybe_unused,
1047 void *vdata)
1049 struct metricgroup__find_metric_data *data = vdata;
1050 const char *pm_pmu = pm->pmu ?: "cpu";
1052 if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu))
1053 return 0;
1055 if (!match_metric(pm->metric_name, data->metric))
1056 return 0;
1058 memcpy(data->pm, pm, sizeof(*pm));
1059 return 1;
1062 static bool metricgroup__find_metric(const char *pmu,
1063 const char *metric,
1064 const struct pmu_metrics_table *table,
1065 struct pmu_metric *pm)
1067 struct metricgroup__find_metric_data data = {
1068 .pmu = pmu,
1069 .metric = metric,
1070 .pm = pm,
1073 return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
1074 ? true : false;
1077 static int add_metric(struct list_head *metric_list,
1078 const struct pmu_metric *pm,
1079 const char *modifier,
1080 bool metric_no_group,
1081 bool metric_no_threshold,
1082 const char *user_requested_cpu_list,
1083 bool system_wide,
1084 struct metric *root_metric,
1085 const struct visited_metric *visited,
1086 const struct pmu_metrics_table *table)
1088 int ret = 0;
1090 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
1092 if (!strstr(pm->metric_expr, "?")) {
1093 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1094 metric_no_threshold, 0, user_requested_cpu_list,
1095 system_wide, root_metric, visited, table);
1096 } else {
1097 int j, count;
1099 count = arch_get_runtimeparam(pm);
1101 /* This loop is added to create multiple
1102 * events depend on count value and add
1103 * those events to metric_list.
1106 for (j = 0; j < count && !ret; j++)
1107 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1108 metric_no_threshold, j, user_requested_cpu_list,
1109 system_wide, root_metric, visited, table);
1112 return ret;
1115 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
1116 const struct pmu_metrics_table *table __maybe_unused,
1117 void *data)
1119 struct metricgroup_add_iter_data *d = data;
1120 int ret;
1122 if (!match_pm_metric(pm, d->pmu, d->metric_name))
1123 return 0;
1125 ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
1126 d->metric_no_threshold, d->user_requested_cpu_list,
1127 d->system_wide, d->root_metric, d->visited, d->table);
1128 if (ret)
1129 goto out;
1131 *(d->has_match) = true;
1133 out:
1134 *(d->ret) = ret;
1135 return ret;
1139 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1140 * the front. tool events are excluded from the count.
1142 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1143 const struct list_head *r)
1145 const struct metric *left = container_of(l, struct metric, nd);
1146 const struct metric *right = container_of(r, struct metric, nd);
1147 struct expr_id_data *data;
1148 int i, left_count, right_count;
1150 left_count = hashmap__size(left->pctx->ids);
1151 tool_pmu__for_each_event(i) {
1152 if (!expr__get_id(left->pctx, tool_pmu__event_to_str(i), &data))
1153 left_count--;
1156 right_count = hashmap__size(right->pctx->ids);
1157 tool_pmu__for_each_event(i) {
1158 if (!expr__get_id(right->pctx, tool_pmu__event_to_str(i), &data))
1159 right_count--;
1162 return right_count - left_count;
1166 * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1167 * that first sorts by default_metricgroup_name, then
1168 * metric_name.
1170 static int default_metricgroup_cmp(void *priv __maybe_unused,
1171 const struct list_head *l,
1172 const struct list_head *r)
1174 const struct metric *left = container_of(l, struct metric, nd);
1175 const struct metric *right = container_of(r, struct metric, nd);
1176 int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1178 if (diff)
1179 return diff;
1181 return strcmp(right->metric_name, left->metric_name);
1184 struct metricgroup__add_metric_data {
1185 struct list_head *list;
1186 const char *pmu;
1187 const char *metric_name;
1188 const char *modifier;
1189 const char *user_requested_cpu_list;
1190 bool metric_no_group;
1191 bool metric_no_threshold;
1192 bool system_wide;
1193 bool has_match;
1196 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1197 const struct pmu_metrics_table *table,
1198 void *vdata)
1200 struct metricgroup__add_metric_data *data = vdata;
1201 int ret = 0;
1203 if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) {
1204 bool metric_no_group = data->metric_no_group ||
1205 match_metric(pm->metricgroup_no_group, data->metric_name);
1207 data->has_match = true;
1208 ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1209 data->metric_no_threshold, data->user_requested_cpu_list,
1210 data->system_wide, /*root_metric=*/NULL,
1211 /*visited_metrics=*/NULL, table);
1213 return ret;
1217 * metricgroup__add_metric - Find and add a metric, or a metric group.
1218 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1219 * @metric_name: The name of the metric or metric group. For example, "IPC"
1220 * could be the name of a metric and "TopDownL1" the name of a
1221 * metric group.
1222 * @modifier: if non-null event modifiers like "u".
1223 * @metric_no_group: Should events written to events be grouped "{}" or
1224 * global. Grouping is the default but due to multiplexing the
1225 * user may override.
1226 * @user_requested_cpu_list: Command line specified CPUs to record on.
1227 * @system_wide: Are events for all processes recorded.
1228 * @metric_list: The list that the metric or metric group are added to.
1229 * @table: The table that is searched for metrics, most commonly the table for the
1230 * architecture perf is running upon.
1232 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1233 bool metric_no_group, bool metric_no_threshold,
1234 const char *user_requested_cpu_list,
1235 bool system_wide,
1236 struct list_head *metric_list,
1237 const struct pmu_metrics_table *table)
1239 LIST_HEAD(list);
1240 int ret;
1241 bool has_match = false;
1244 struct metricgroup__add_metric_data data = {
1245 .list = &list,
1246 .pmu = pmu,
1247 .metric_name = metric_name,
1248 .modifier = modifier,
1249 .metric_no_group = metric_no_group,
1250 .metric_no_threshold = metric_no_threshold,
1251 .user_requested_cpu_list = user_requested_cpu_list,
1252 .system_wide = system_wide,
1253 .has_match = false,
1256 * Iterate over all metrics seeing if metric matches either the
1257 * name or group. When it does add the metric to the list.
1259 ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
1260 &data);
1261 if (ret)
1262 goto out;
1264 has_match = data.has_match;
1267 struct metricgroup_iter_data data = {
1268 .fn = metricgroup__add_metric_sys_event_iter,
1269 .data = (void *) &(struct metricgroup_add_iter_data) {
1270 .metric_list = &list,
1271 .pmu = pmu,
1272 .metric_name = metric_name,
1273 .modifier = modifier,
1274 .metric_no_group = metric_no_group,
1275 .user_requested_cpu_list = user_requested_cpu_list,
1276 .system_wide = system_wide,
1277 .has_match = &has_match,
1278 .ret = &ret,
1279 .table = table,
1283 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
1285 /* End of pmu events. */
1286 if (!has_match)
1287 ret = -EINVAL;
1289 out:
1291 * add to metric_list so that they can be released
1292 * even if it's failed
1294 list_splice(&list, metric_list);
1295 return ret;
1299 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1300 * specified in a list.
1301 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1302 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1303 * would match the IPC and CPI metrics, and TopDownL1 would match all
1304 * the metrics in the TopDownL1 group.
1305 * @metric_no_group: Should events written to events be grouped "{}" or
1306 * global. Grouping is the default but due to multiplexing the
1307 * user may override.
1308 * @user_requested_cpu_list: Command line specified CPUs to record on.
1309 * @system_wide: Are events for all processes recorded.
1310 * @metric_list: The list that metrics are added to.
1311 * @table: The table that is searched for metrics, most commonly the table for the
1312 * architecture perf is running upon.
1314 static int metricgroup__add_metric_list(const char *pmu, const char *list,
1315 bool metric_no_group,
1316 bool metric_no_threshold,
1317 const char *user_requested_cpu_list,
1318 bool system_wide, struct list_head *metric_list,
1319 const struct pmu_metrics_table *table)
1321 char *list_itr, *list_copy, *metric_name, *modifier;
1322 int ret, count = 0;
1324 list_copy = strdup(list);
1325 if (!list_copy)
1326 return -ENOMEM;
1327 list_itr = list_copy;
1329 while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1330 modifier = strchr(metric_name, ':');
1331 if (modifier)
1332 *modifier++ = '\0';
1334 ret = metricgroup__add_metric(pmu, metric_name, modifier,
1335 metric_no_group, metric_no_threshold,
1336 user_requested_cpu_list,
1337 system_wide, metric_list, table);
1338 if (ret == -EINVAL)
1339 pr_err("Cannot find metric or group `%s'\n", metric_name);
1341 if (ret)
1342 break;
1344 count++;
1346 free(list_copy);
1348 if (!ret) {
1350 * Warn about nmi_watchdog if any parsed metrics had the
1351 * NO_NMI_WATCHDOG constraint.
1353 metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1354 /* No metrics. */
1355 if (count == 0)
1356 return -EINVAL;
1358 return ret;
1361 static void metricgroup__free_metrics(struct list_head *metric_list)
1363 struct metric *m, *tmp;
1365 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1366 list_del_init(&m->nd);
1367 metric__free(m);
1372 * find_tool_events - Search for the pressence of tool events in metric_list.
1373 * @metric_list: List to take metrics from.
1374 * @tool_events: Array of false values, indices corresponding to tool events set
1375 * to true if tool event is found.
1377 static void find_tool_events(const struct list_head *metric_list,
1378 bool tool_events[TOOL_PMU__EVENT_MAX])
1380 struct metric *m;
1382 list_for_each_entry(m, metric_list, nd) {
1383 int i;
1385 tool_pmu__for_each_event(i) {
1386 struct expr_id_data *data;
1388 if (!tool_events[i] &&
1389 !expr__get_id(m->pctx, tool_pmu__event_to_str(i), &data))
1390 tool_events[i] = true;
1396 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1397 * metric IDs, as the IDs are held in a set,
1398 * duplicates will be removed.
1399 * @metric_list: List to take metrics from.
1400 * @combined: Out argument for result.
1402 static int build_combined_expr_ctx(const struct list_head *metric_list,
1403 struct expr_parse_ctx **combined)
1405 struct hashmap_entry *cur;
1406 size_t bkt;
1407 struct metric *m;
1408 char *dup;
1409 int ret;
1411 *combined = expr__ctx_new();
1412 if (!*combined)
1413 return -ENOMEM;
1415 list_for_each_entry(m, metric_list, nd) {
1416 if (!m->group_events && !m->modifier) {
1417 hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1418 dup = strdup(cur->pkey);
1419 if (!dup) {
1420 ret = -ENOMEM;
1421 goto err_out;
1423 ret = expr__add_id(*combined, dup);
1424 if (ret)
1425 goto err_out;
1429 return 0;
1430 err_out:
1431 expr__ctx_free(*combined);
1432 *combined = NULL;
1433 return ret;
1437 * parse_ids - Build the event string for the ids and parse them creating an
1438 * evlist. The encoded metric_ids are decoded.
1439 * @metric_no_merge: is metric sharing explicitly disabled.
1440 * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU.
1441 * @ids: the event identifiers parsed from a metric.
1442 * @modifier: any modifiers added to the events.
1443 * @group_events: should events be placed in a weak group.
1444 * @tool_events: entries set true if the tool event of index could be present in
1445 * the overall list of metrics.
1446 * @out_evlist: the created list of events.
1448 static int parse_ids(bool metric_no_merge, bool fake_pmu,
1449 struct expr_parse_ctx *ids, const char *modifier,
1450 bool group_events, const bool tool_events[TOOL_PMU__EVENT_MAX],
1451 struct evlist **out_evlist)
1453 struct parse_events_error parse_error;
1454 struct evlist *parsed_evlist;
1455 struct strbuf events = STRBUF_INIT;
1456 int ret;
1458 *out_evlist = NULL;
1459 if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1460 bool added_event = false;
1461 int i;
1463 * We may fail to share events between metrics because a tool
1464 * event isn't present in one metric. For example, a ratio of
1465 * cache misses doesn't need duration_time but the same events
1466 * may be used for a misses per second. Events without sharing
1467 * implies multiplexing, that is best avoided, so place
1468 * all tool events in every group.
1470 * Also, there may be no ids/events in the expression parsing
1471 * context because of constant evaluation, e.g.:
1472 * event1 if #smt_on else 0
1473 * Add a tool event to avoid a parse error on an empty string.
1475 tool_pmu__for_each_event(i) {
1476 if (tool_events[i]) {
1477 char *tmp = strdup(tool_pmu__event_to_str(i));
1479 if (!tmp)
1480 return -ENOMEM;
1481 ids__insert(ids->ids, tmp);
1482 added_event = true;
1485 if (!added_event && hashmap__size(ids->ids) == 0) {
1486 char *tmp = strdup("duration_time");
1488 if (!tmp)
1489 return -ENOMEM;
1490 ids__insert(ids->ids, tmp);
1493 ret = metricgroup__build_event_string(&events, ids, modifier,
1494 group_events);
1495 if (ret)
1496 return ret;
1498 parsed_evlist = evlist__new();
1499 if (!parsed_evlist) {
1500 ret = -ENOMEM;
1501 goto err_out;
1503 pr_debug("Parsing metric events '%s'\n", events.buf);
1504 parse_events_error__init(&parse_error);
1505 ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
1506 &parse_error, fake_pmu, /*warn_if_reordered=*/false,
1507 /*fake_tp=*/false);
1508 if (ret) {
1509 parse_events_error__print(&parse_error, events.buf);
1510 goto err_out;
1512 ret = decode_all_metric_ids(parsed_evlist, modifier);
1513 if (ret)
1514 goto err_out;
1516 *out_evlist = parsed_evlist;
1517 parsed_evlist = NULL;
1518 err_out:
1519 parse_events_error__exit(&parse_error);
1520 evlist__delete(parsed_evlist);
1521 strbuf_release(&events);
1522 return ret;
1525 static int parse_groups(struct evlist *perf_evlist,
1526 const char *pmu, const char *str,
1527 bool metric_no_group,
1528 bool metric_no_merge,
1529 bool metric_no_threshold,
1530 const char *user_requested_cpu_list,
1531 bool system_wide,
1532 bool fake_pmu,
1533 struct rblist *metric_events_list,
1534 const struct pmu_metrics_table *table)
1536 struct evlist *combined_evlist = NULL;
1537 LIST_HEAD(metric_list);
1538 struct metric *m;
1539 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
1540 bool is_default = !strcmp(str, "Default");
1541 int ret;
1543 if (metric_events_list->nr_entries == 0)
1544 metricgroup__rblist_init(metric_events_list);
1545 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1546 user_requested_cpu_list,
1547 system_wide, &metric_list, table);
1548 if (ret)
1549 goto out;
1551 /* Sort metrics from largest to smallest. */
1552 list_sort(NULL, &metric_list, metric_list_cmp);
1554 if (!metric_no_merge) {
1555 struct expr_parse_ctx *combined = NULL;
1557 find_tool_events(&metric_list, tool_events);
1559 ret = build_combined_expr_ctx(&metric_list, &combined);
1561 if (!ret && combined && hashmap__size(combined->ids)) {
1562 ret = parse_ids(metric_no_merge, fake_pmu, combined,
1563 /*modifier=*/NULL,
1564 /*group_events=*/false,
1565 tool_events,
1566 &combined_evlist);
1568 if (combined)
1569 expr__ctx_free(combined);
1571 if (ret)
1572 goto out;
1575 if (is_default)
1576 list_sort(NULL, &metric_list, default_metricgroup_cmp);
1578 list_for_each_entry(m, &metric_list, nd) {
1579 struct metric_event *me;
1580 struct evsel **metric_events;
1581 struct evlist *metric_evlist = NULL;
1582 struct metric *n;
1583 struct metric_expr *expr;
1585 if (combined_evlist && !m->group_events) {
1586 metric_evlist = combined_evlist;
1587 } else if (!metric_no_merge) {
1589 * See if the IDs for this metric are a subset of an
1590 * earlier metric.
1592 list_for_each_entry(n, &metric_list, nd) {
1593 if (m == n)
1594 break;
1596 if (n->evlist == NULL)
1597 continue;
1599 if ((!m->modifier && n->modifier) ||
1600 (m->modifier && !n->modifier) ||
1601 (m->modifier && n->modifier &&
1602 strcmp(m->modifier, n->modifier)))
1603 continue;
1605 if ((!m->pmu && n->pmu) ||
1606 (m->pmu && !n->pmu) ||
1607 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1608 continue;
1610 if (expr__subset_of_ids(n->pctx, m->pctx)) {
1611 pr_debug("Events in '%s' fully contained within '%s'\n",
1612 m->metric_name, n->metric_name);
1613 metric_evlist = n->evlist;
1614 break;
1619 if (!metric_evlist) {
1620 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1621 m->group_events, tool_events, &m->evlist);
1622 if (ret)
1623 goto out;
1625 metric_evlist = m->evlist;
1627 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1628 metric_evlist, &metric_events);
1629 if (ret) {
1630 pr_err("Cannot resolve IDs for %s: %s\n",
1631 m->metric_name, m->metric_expr);
1632 goto out;
1635 me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1637 expr = malloc(sizeof(struct metric_expr));
1638 if (!expr) {
1639 ret = -ENOMEM;
1640 free(metric_events);
1641 goto out;
1644 expr->metric_refs = m->metric_refs;
1645 m->metric_refs = NULL;
1646 expr->metric_expr = m->metric_expr;
1647 if (m->modifier) {
1648 char *tmp;
1650 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1651 expr->metric_name = NULL;
1652 else
1653 expr->metric_name = tmp;
1654 } else
1655 expr->metric_name = strdup(m->metric_name);
1657 if (!expr->metric_name) {
1658 ret = -ENOMEM;
1659 free(metric_events);
1660 goto out;
1662 expr->metric_threshold = m->metric_threshold;
1663 expr->metric_unit = m->metric_unit;
1664 expr->metric_events = metric_events;
1665 expr->runtime = m->pctx->sctx.runtime;
1666 expr->default_metricgroup_name = m->default_metricgroup_name;
1667 me->is_default = is_default;
1668 list_add(&expr->nd, &me->head);
1672 if (combined_evlist) {
1673 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1674 evlist__delete(combined_evlist);
1677 list_for_each_entry(m, &metric_list, nd) {
1678 if (m->evlist)
1679 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1682 out:
1683 metricgroup__free_metrics(&metric_list);
1684 return ret;
1687 int metricgroup__parse_groups(struct evlist *perf_evlist,
1688 const char *pmu,
1689 const char *str,
1690 bool metric_no_group,
1691 bool metric_no_merge,
1692 bool metric_no_threshold,
1693 const char *user_requested_cpu_list,
1694 bool system_wide,
1695 bool hardware_aware_grouping,
1696 struct rblist *metric_events)
1698 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1700 if (!table)
1701 return -EINVAL;
1702 if (hardware_aware_grouping)
1703 pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
1705 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1706 metric_no_threshold, user_requested_cpu_list, system_wide,
1707 /*fake_pmu=*/false, metric_events, table);
1710 int metricgroup__parse_groups_test(struct evlist *evlist,
1711 const struct pmu_metrics_table *table,
1712 const char *str,
1713 struct rblist *metric_events)
1715 return parse_groups(evlist, "all", str,
1716 /*metric_no_group=*/false,
1717 /*metric_no_merge=*/false,
1718 /*metric_no_threshold=*/false,
1719 /*user_requested_cpu_list=*/NULL,
1720 /*system_wide=*/false,
1721 /*fake_pmu=*/true, metric_events, table);
1724 struct metricgroup__has_metric_data {
1725 const char *pmu;
1726 const char *metric;
1728 static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
1729 const struct pmu_metrics_table *table __maybe_unused,
1730 void *vdata)
1732 struct metricgroup__has_metric_data *data = vdata;
1734 return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0;
1737 bool metricgroup__has_metric(const char *pmu, const char *metric)
1739 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1740 struct metricgroup__has_metric_data data = {
1741 .pmu = pmu,
1742 .metric = metric,
1745 if (!table)
1746 return false;
1748 return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
1749 ? true : false;
1752 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1753 const struct pmu_metrics_table *table __maybe_unused,
1754 void *data)
1756 unsigned int *max_level = data;
1757 unsigned int level;
1758 const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1760 if (!p || p[8] == '\0')
1761 return 0;
1763 level = p[8] - '0';
1764 if (level > *max_level)
1765 *max_level = level;
1767 return 0;
1770 unsigned int metricgroups__topdown_max_level(void)
1772 unsigned int max_level = 0;
1773 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1775 if (!table)
1776 return false;
1778 pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1779 &max_level);
1780 return max_level;
1783 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1784 struct rblist *new_metric_events,
1785 struct rblist *old_metric_events)
1787 unsigned int i;
1789 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1790 struct rb_node *nd;
1791 struct metric_event *old_me, *new_me;
1792 struct metric_expr *old_expr, *new_expr;
1793 struct evsel *evsel;
1794 size_t alloc_size;
1795 int idx, nr;
1797 nd = rblist__entry(old_metric_events, i);
1798 old_me = container_of(nd, struct metric_event, nd);
1800 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1801 if (!evsel)
1802 return -EINVAL;
1803 new_me = metricgroup__lookup(new_metric_events, evsel, true);
1804 if (!new_me)
1805 return -ENOMEM;
1807 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1808 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1810 list_for_each_entry(old_expr, &old_me->head, nd) {
1811 new_expr = malloc(sizeof(*new_expr));
1812 if (!new_expr)
1813 return -ENOMEM;
1815 new_expr->metric_expr = old_expr->metric_expr;
1816 new_expr->metric_threshold = old_expr->metric_threshold;
1817 new_expr->metric_name = strdup(old_expr->metric_name);
1818 if (!new_expr->metric_name)
1819 return -ENOMEM;
1821 new_expr->metric_unit = old_expr->metric_unit;
1822 new_expr->runtime = old_expr->runtime;
1824 if (old_expr->metric_refs) {
1825 /* calculate number of metric_events */
1826 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1827 continue;
1828 alloc_size = sizeof(*new_expr->metric_refs);
1829 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1830 if (!new_expr->metric_refs) {
1831 free(new_expr);
1832 return -ENOMEM;
1835 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1836 nr * alloc_size);
1837 } else {
1838 new_expr->metric_refs = NULL;
1841 /* calculate number of metric_events */
1842 for (nr = 0; old_expr->metric_events[nr]; nr++)
1843 continue;
1844 alloc_size = sizeof(*new_expr->metric_events);
1845 new_expr->metric_events = calloc(nr + 1, alloc_size);
1846 if (!new_expr->metric_events) {
1847 zfree(&new_expr->metric_refs);
1848 free(new_expr);
1849 return -ENOMEM;
1852 /* copy evsel in the same position */
1853 for (idx = 0; idx < nr; idx++) {
1854 evsel = old_expr->metric_events[idx];
1855 evsel = evlist__find_evsel(evlist, evsel->core.idx);
1856 if (evsel == NULL) {
1857 zfree(&new_expr->metric_events);
1858 zfree(&new_expr->metric_refs);
1859 free(new_expr);
1860 return -EINVAL;
1862 new_expr->metric_events[idx] = evsel;
1865 list_add(&new_expr->nd, &new_me->head);
1868 return 0;