WIP FPC-III support
[linux/fpc-iii.git] / tools / perf / util / metricgroup.c
blobee94d3e8dd654e389eeb811af211e0d4c0a92149
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2017, Intel Corporation.
4 */
6 /* Manage metrics and groups of metrics from JSON files */
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "expr.h"
15 #include "rblist.h"
16 #include <string.h>
17 #include <errno.h>
18 #include "strlist.h"
19 #include <assert.h>
20 #include <linux/ctype.h>
21 #include <linux/string.h>
22 #include <linux/zalloc.h>
23 #include <subcmd/parse-options.h>
24 #include <api/fs/fs.h>
25 #include "util.h"
26 #include <asm/bug.h>
27 #include "cgroup.h"
29 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
30 struct evsel *evsel,
31 bool create)
33 struct rb_node *nd;
34 struct metric_event me = {
35 .evsel = evsel
38 if (!metric_events)
39 return NULL;
41 nd = rblist__find(metric_events, &me);
42 if (nd)
43 return container_of(nd, struct metric_event, nd);
44 if (create) {
45 rblist__add_node(metric_events, &me);
46 nd = rblist__find(metric_events, &me);
47 if (nd)
48 return container_of(nd, struct metric_event, nd);
50 return NULL;
53 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
55 struct metric_event *a = container_of(rb_node,
56 struct metric_event,
57 nd);
58 const struct metric_event *b = entry;
60 if (a->evsel == b->evsel)
61 return 0;
62 if ((char *)a->evsel < (char *)b->evsel)
63 return -1;
64 return +1;
67 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
68 const void *entry)
70 struct metric_event *me = malloc(sizeof(struct metric_event));
72 if (!me)
73 return NULL;
74 memcpy(me, entry, sizeof(struct metric_event));
75 me->evsel = ((struct metric_event *)entry)->evsel;
76 INIT_LIST_HEAD(&me->head);
77 return &me->nd;
80 static void metric_event_delete(struct rblist *rblist __maybe_unused,
81 struct rb_node *rb_node)
83 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
84 struct metric_expr *expr, *tmp;
86 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
87 free(expr->metric_refs);
88 free(expr->metric_events);
89 free(expr);
92 free(me);
95 static void metricgroup__rblist_init(struct rblist *metric_events)
97 rblist__init(metric_events);
98 metric_events->node_cmp = metric_event_cmp;
99 metric_events->node_new = metric_event_new;
100 metric_events->node_delete = metric_event_delete;
103 void metricgroup__rblist_exit(struct rblist *metric_events)
105 rblist__exit(metric_events);
109 * A node in the list of referenced metrics. metric_expr
110 * is held as a convenience to avoid a search through the
111 * metric list.
113 struct metric_ref_node {
114 const char *metric_name;
115 const char *metric_expr;
116 struct list_head list;
119 struct metric {
120 struct list_head nd;
121 struct expr_parse_ctx pctx;
122 const char *metric_name;
123 const char *metric_expr;
124 const char *metric_unit;
125 struct list_head metric_refs;
126 int metric_refs_cnt;
127 int runtime;
128 bool has_constraint;
131 #define RECURSION_ID_MAX 1000
133 struct expr_ids {
134 struct expr_id id[RECURSION_ID_MAX];
135 int cnt;
138 static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
140 if (ids->cnt >= RECURSION_ID_MAX)
141 return NULL;
142 return &ids->id[ids->cnt++];
145 static void expr_ids__exit(struct expr_ids *ids)
147 int i;
149 for (i = 0; i < ids->cnt; i++)
150 free(ids->id[i].id);
153 static bool contains_event(struct evsel **metric_events, int num_events,
154 const char *event_name)
156 int i;
158 for (i = 0; i < num_events; i++) {
159 if (!strcmp(metric_events[i]->name, event_name))
160 return true;
162 return false;
166 * Find a group of events in perf_evlist that correspond to those from a parsed
167 * metric expression. Note, as find_evsel_group is called in the same order as
168 * perf_evlist was constructed, metric_no_merge doesn't need to test for
169 * underfilling a group.
170 * @perf_evlist: a list of events something like: {metric1 leader, metric1
171 * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
172 * metric2 sibling}:W,duration_time
173 * @pctx: the parse context for the metric expression.
174 * @metric_no_merge: don't attempt to share events for the metric with other
175 * metrics.
176 * @has_constraint: is there a contraint on the group of events? In which case
177 * the events won't be grouped.
178 * @metric_events: out argument, null terminated array of evsel's associated
179 * with the metric.
180 * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
181 * @return the first metric event or NULL on failure.
183 static struct evsel *find_evsel_group(struct evlist *perf_evlist,
184 struct expr_parse_ctx *pctx,
185 bool metric_no_merge,
186 bool has_constraint,
187 struct evsel **metric_events,
188 unsigned long *evlist_used)
190 struct evsel *ev, *current_leader = NULL;
191 struct expr_id_data *val_ptr;
192 int i = 0, matched_events = 0, events_to_match;
193 const int idnum = (int)hashmap__size(&pctx->ids);
196 * duration_time is always grouped separately, when events are grouped
197 * (ie has_constraint is false) then ignore it in the matching loop and
198 * add it to metric_events at the end.
200 if (!has_constraint &&
201 hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
202 events_to_match = idnum - 1;
203 else
204 events_to_match = idnum;
206 evlist__for_each_entry (perf_evlist, ev) {
208 * Events with a constraint aren't grouped and match the first
209 * events available.
211 if (has_constraint && ev->weak_group)
212 continue;
213 /* Ignore event if already used and merging is disabled. */
214 if (metric_no_merge && test_bit(ev->idx, evlist_used))
215 continue;
216 if (!has_constraint && ev->leader != current_leader) {
218 * Start of a new group, discard the whole match and
219 * start again.
221 matched_events = 0;
222 memset(metric_events, 0,
223 sizeof(struct evsel *) * idnum);
224 current_leader = ev->leader;
227 * Check for duplicate events with the same name. For example,
228 * uncore_imc/cas_count_read/ will turn into 6 events per socket
229 * on skylakex. Only the first such event is placed in
230 * metric_events. If events aren't grouped then this also
231 * ensures that the same event in different sibling groups
232 * aren't both added to metric_events.
234 if (contains_event(metric_events, matched_events, ev->name))
235 continue;
236 /* Does this event belong to the parse context? */
237 if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
238 metric_events[matched_events++] = ev;
240 if (matched_events == events_to_match)
241 break;
244 if (events_to_match != idnum) {
245 /* Add the first duration_time. */
246 evlist__for_each_entry(perf_evlist, ev) {
247 if (!strcmp(ev->name, "duration_time")) {
248 metric_events[matched_events++] = ev;
249 break;
254 if (matched_events != idnum) {
255 /* Not a whole match */
256 return NULL;
259 metric_events[idnum] = NULL;
261 for (i = 0; i < idnum; i++) {
262 ev = metric_events[i];
263 /* Don't free the used events. */
264 set_bit(ev->idx, evlist_used);
266 * The metric leader points to the identically named event in
267 * metric_events.
269 ev->metric_leader = ev;
271 * Mark two events with identical names in the same group (or
272 * globally) as being in use as uncore events may be duplicated
273 * for each pmu. Set the metric leader of such events to be the
274 * event that appears in metric_events.
276 evlist__for_each_entry_continue(perf_evlist, ev) {
278 * If events are grouped then the search can terminate
279 * when then group is left.
281 if (!has_constraint &&
282 ev->leader != metric_events[i]->leader &&
283 !strcmp(ev->leader->pmu_name,
284 metric_events[i]->leader->pmu_name))
285 break;
286 if (!strcmp(metric_events[i]->name, ev->name)) {
287 set_bit(ev->idx, evlist_used);
288 ev->metric_leader = metric_events[i];
293 return metric_events[0];
296 static int metricgroup__setup_events(struct list_head *groups,
297 bool metric_no_merge,
298 struct evlist *perf_evlist,
299 struct rblist *metric_events_list)
301 struct metric_event *me;
302 struct metric_expr *expr;
303 int i = 0;
304 int ret = 0;
305 struct metric *m;
306 struct evsel *evsel, *tmp;
307 unsigned long *evlist_used;
309 evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
310 if (!evlist_used)
311 return -ENOMEM;
313 list_for_each_entry (m, groups, nd) {
314 struct evsel **metric_events;
315 struct metric_ref *metric_refs = NULL;
317 metric_events = calloc(sizeof(void *),
318 hashmap__size(&m->pctx.ids) + 1);
319 if (!metric_events) {
320 ret = -ENOMEM;
321 break;
323 evsel = find_evsel_group(perf_evlist, &m->pctx,
324 metric_no_merge,
325 m->has_constraint, metric_events,
326 evlist_used);
327 if (!evsel) {
328 pr_debug("Cannot resolve %s: %s\n",
329 m->metric_name, m->metric_expr);
330 free(metric_events);
331 continue;
333 for (i = 0; metric_events[i]; i++)
334 metric_events[i]->collect_stat = true;
335 me = metricgroup__lookup(metric_events_list, evsel, true);
336 if (!me) {
337 ret = -ENOMEM;
338 free(metric_events);
339 break;
341 expr = malloc(sizeof(struct metric_expr));
342 if (!expr) {
343 ret = -ENOMEM;
344 free(metric_events);
345 break;
349 * Collect and store collected nested expressions
350 * for metric processing.
352 if (m->metric_refs_cnt) {
353 struct metric_ref_node *ref;
355 metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
356 if (!metric_refs) {
357 ret = -ENOMEM;
358 free(metric_events);
359 free(expr);
360 break;
363 i = 0;
364 list_for_each_entry(ref, &m->metric_refs, list) {
366 * Intentionally passing just const char pointers,
367 * originally from 'struct pmu_event' object.
368 * We don't need to change them, so there's no
369 * need to create our own copy.
371 metric_refs[i].metric_name = ref->metric_name;
372 metric_refs[i].metric_expr = ref->metric_expr;
373 i++;
377 expr->metric_refs = metric_refs;
378 expr->metric_expr = m->metric_expr;
379 expr->metric_name = m->metric_name;
380 expr->metric_unit = m->metric_unit;
381 expr->metric_events = metric_events;
382 expr->runtime = m->runtime;
383 list_add(&expr->nd, &me->head);
386 evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
387 if (!test_bit(evsel->idx, evlist_used)) {
388 evlist__remove(perf_evlist, evsel);
389 evsel__delete(evsel);
392 bitmap_free(evlist_used);
394 return ret;
397 static bool match_metric(const char *n, const char *list)
399 int len;
400 char *m;
402 if (!list)
403 return false;
404 if (!strcmp(list, "all"))
405 return true;
406 if (!n)
407 return !strcasecmp(list, "No_group");
408 len = strlen(list);
409 m = strcasestr(n, list);
410 if (!m)
411 return false;
412 if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
413 (m[len] == 0 || m[len] == ';'))
414 return true;
415 return false;
418 static bool match_pe_metric(struct pmu_event *pe, const char *metric)
420 return match_metric(pe->metric_group, metric) ||
421 match_metric(pe->metric_name, metric);
424 struct mep {
425 struct rb_node nd;
426 const char *name;
427 struct strlist *metrics;
430 static int mep_cmp(struct rb_node *rb_node, const void *entry)
432 struct mep *a = container_of(rb_node, struct mep, nd);
433 struct mep *b = (struct mep *)entry;
435 return strcmp(a->name, b->name);
438 static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
439 const void *entry)
441 struct mep *me = malloc(sizeof(struct mep));
443 if (!me)
444 return NULL;
445 memcpy(me, entry, sizeof(struct mep));
446 me->name = strdup(me->name);
447 if (!me->name)
448 goto out_me;
449 me->metrics = strlist__new(NULL, NULL);
450 if (!me->metrics)
451 goto out_name;
452 return &me->nd;
453 out_name:
454 zfree(&me->name);
455 out_me:
456 free(me);
457 return NULL;
460 static struct mep *mep_lookup(struct rblist *groups, const char *name)
462 struct rb_node *nd;
463 struct mep me = {
464 .name = name
466 nd = rblist__find(groups, &me);
467 if (nd)
468 return container_of(nd, struct mep, nd);
469 rblist__add_node(groups, &me);
470 nd = rblist__find(groups, &me);
471 if (nd)
472 return container_of(nd, struct mep, nd);
473 return NULL;
476 static void mep_delete(struct rblist *rl __maybe_unused,
477 struct rb_node *nd)
479 struct mep *me = container_of(nd, struct mep, nd);
481 strlist__delete(me->metrics);
482 zfree(&me->name);
483 free(me);
486 static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
488 struct str_node *sn;
489 int n = 0;
491 strlist__for_each_entry (sn, metrics) {
492 if (raw)
493 printf("%s%s", n > 0 ? " " : "", sn->s);
494 else
495 printf(" %s\n", sn->s);
496 n++;
498 if (raw)
499 putchar('\n');
502 static int metricgroup__print_pmu_event(struct pmu_event *pe,
503 bool metricgroups, char *filter,
504 bool raw, bool details,
505 struct rblist *groups,
506 struct strlist *metriclist)
508 const char *g;
509 char *omg, *mg;
511 g = pe->metric_group;
512 if (!g && pe->metric_name) {
513 if (pe->name)
514 return 0;
515 g = "No_group";
518 if (!g)
519 return 0;
521 mg = strdup(g);
523 if (!mg)
524 return -ENOMEM;
525 omg = mg;
526 while ((g = strsep(&mg, ";")) != NULL) {
527 struct mep *me;
528 char *s;
530 g = skip_spaces(g);
531 if (*g == 0)
532 g = "No_group";
533 if (filter && !strstr(g, filter))
534 continue;
535 if (raw)
536 s = (char *)pe->metric_name;
537 else {
538 if (asprintf(&s, "%s\n%*s%s]",
539 pe->metric_name, 8, "[", pe->desc) < 0)
540 return -1;
541 if (details) {
542 if (asprintf(&s, "%s\n%*s%s]",
543 s, 8, "[", pe->metric_expr) < 0)
544 return -1;
548 if (!s)
549 continue;
551 if (!metricgroups) {
552 strlist__add(metriclist, s);
553 } else {
554 me = mep_lookup(groups, g);
555 if (!me)
556 continue;
557 strlist__add(me->metrics, s);
560 if (!raw)
561 free(s);
563 free(omg);
565 return 0;
568 struct metricgroup_print_sys_idata {
569 struct strlist *metriclist;
570 char *filter;
571 struct rblist *groups;
572 bool metricgroups;
573 bool raw;
574 bool details;
577 typedef int (*metricgroup_sys_event_iter_fn)(struct pmu_event *pe, void *);
579 struct metricgroup_iter_data {
580 metricgroup_sys_event_iter_fn fn;
581 void *data;
584 static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
586 struct metricgroup_iter_data *d = data;
587 struct perf_pmu *pmu = NULL;
589 if (!pe->metric_expr || !pe->compat)
590 return 0;
592 while ((pmu = perf_pmu__scan(pmu))) {
594 if (!pmu->id || strcmp(pmu->id, pe->compat))
595 continue;
597 return d->fn(pe, d->data);
600 return 0;
603 static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
605 struct metricgroup_print_sys_idata *d = data;
607 return metricgroup__print_pmu_event(pe, d->metricgroups, d->filter, d->raw,
608 d->details, d->groups, d->metriclist);
611 void metricgroup__print(bool metrics, bool metricgroups, char *filter,
612 bool raw, bool details)
614 struct pmu_events_map *map = perf_pmu__find_map(NULL);
615 struct pmu_event *pe;
616 int i;
617 struct rblist groups;
618 struct rb_node *node, *next;
619 struct strlist *metriclist = NULL;
621 if (!metricgroups) {
622 metriclist = strlist__new(NULL, NULL);
623 if (!metriclist)
624 return;
627 rblist__init(&groups);
628 groups.node_new = mep_new;
629 groups.node_cmp = mep_cmp;
630 groups.node_delete = mep_delete;
631 for (i = 0; map; i++) {
632 pe = &map->table[i];
634 if (!pe->name && !pe->metric_group && !pe->metric_name)
635 break;
636 if (!pe->metric_expr)
637 continue;
638 if (metricgroup__print_pmu_event(pe, metricgroups, filter,
639 raw, details, &groups,
640 metriclist) < 0)
641 return;
645 struct metricgroup_iter_data data = {
646 .fn = metricgroup__print_sys_event_iter,
647 .data = (void *) &(struct metricgroup_print_sys_idata){
648 .metriclist = metriclist,
649 .metricgroups = metricgroups,
650 .filter = filter,
651 .raw = raw,
652 .details = details,
653 .groups = &groups,
657 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
660 if (!filter || !rblist__empty(&groups)) {
661 if (metricgroups && !raw)
662 printf("\nMetric Groups:\n\n");
663 else if (metrics && !raw)
664 printf("\nMetrics:\n\n");
667 for (node = rb_first_cached(&groups.entries); node; node = next) {
668 struct mep *me = container_of(node, struct mep, nd);
670 if (metricgroups)
671 printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
672 if (metrics)
673 metricgroup__print_strlist(me->metrics, raw);
674 next = rb_next(node);
675 rblist__remove_node(&groups, node);
677 if (!metricgroups)
678 metricgroup__print_strlist(metriclist, raw);
679 strlist__delete(metriclist);
682 static void metricgroup__add_metric_weak_group(struct strbuf *events,
683 struct expr_parse_ctx *ctx)
685 struct hashmap_entry *cur;
686 size_t bkt;
687 bool no_group = true, has_duration = false;
689 hashmap__for_each_entry((&ctx->ids), cur, bkt) {
690 pr_debug("found event %s\n", (const char *)cur->key);
692 * Duration time maps to a software event and can make
693 * groups not count. Always use it outside a
694 * group.
696 if (!strcmp(cur->key, "duration_time")) {
697 has_duration = true;
698 continue;
700 strbuf_addf(events, "%s%s",
701 no_group ? "{" : ",",
702 (const char *)cur->key);
703 no_group = false;
705 if (!no_group) {
706 strbuf_addf(events, "}:W");
707 if (has_duration)
708 strbuf_addf(events, ",duration_time");
709 } else if (has_duration)
710 strbuf_addf(events, "duration_time");
713 static void metricgroup__add_metric_non_group(struct strbuf *events,
714 struct expr_parse_ctx *ctx)
716 struct hashmap_entry *cur;
717 size_t bkt;
718 bool first = true;
720 hashmap__for_each_entry((&ctx->ids), cur, bkt) {
721 if (!first)
722 strbuf_addf(events, ",");
723 strbuf_addf(events, "%s", (const char *)cur->key);
724 first = false;
728 static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
730 static bool violate_nmi_constraint;
732 if (!foot) {
733 pr_warning("Splitting metric group %s into standalone metrics.\n", name);
734 violate_nmi_constraint = true;
735 return;
738 if (!violate_nmi_constraint)
739 return;
741 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
742 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
743 " perf stat ...\n"
744 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
747 static bool metricgroup__has_constraint(struct pmu_event *pe)
749 if (!pe->metric_constraint)
750 return false;
752 if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
753 sysctl__nmi_watchdog_enabled()) {
754 metricgroup___watchdog_constraint_hint(pe->metric_name, false);
755 return true;
758 return false;
761 int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
763 return 1;
766 struct metricgroup_add_iter_data {
767 struct list_head *metric_list;
768 const char *metric;
769 struct metric **m;
770 struct expr_ids *ids;
771 int *ret;
772 bool *has_match;
773 bool metric_no_group;
776 static int __add_metric(struct list_head *metric_list,
777 struct pmu_event *pe,
778 bool metric_no_group,
779 int runtime,
780 struct metric **mp,
781 struct expr_id *parent,
782 struct expr_ids *ids)
784 struct metric_ref_node *ref;
785 struct metric *m;
787 if (*mp == NULL) {
789 * We got in here for the parent group,
790 * allocate it and put it on the list.
792 m = zalloc(sizeof(*m));
793 if (!m)
794 return -ENOMEM;
796 expr__ctx_init(&m->pctx);
797 m->metric_name = pe->metric_name;
798 m->metric_expr = pe->metric_expr;
799 m->metric_unit = pe->unit;
800 m->runtime = runtime;
801 m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
802 INIT_LIST_HEAD(&m->metric_refs);
803 m->metric_refs_cnt = 0;
805 parent = expr_ids__alloc(ids);
806 if (!parent) {
807 free(m);
808 return -EINVAL;
811 parent->id = strdup(pe->metric_name);
812 if (!parent->id) {
813 free(m);
814 return -ENOMEM;
816 *mp = m;
817 } else {
819 * We got here for the referenced metric, via the
820 * recursive metricgroup__add_metric call, add
821 * it to the parent group.
823 m = *mp;
825 ref = malloc(sizeof(*ref));
826 if (!ref)
827 return -ENOMEM;
830 * Intentionally passing just const char pointers,
831 * from 'pe' object, so they never go away. We don't
832 * need to change them, so there's no need to create
833 * our own copy.
835 ref->metric_name = pe->metric_name;
836 ref->metric_expr = pe->metric_expr;
838 list_add(&ref->list, &m->metric_refs);
839 m->metric_refs_cnt++;
842 /* Force all found IDs in metric to have us as parent ID. */
843 WARN_ON_ONCE(!parent);
844 m->pctx.parent = parent;
847 * For both the parent and referenced metrics, we parse
848 * all the metric's IDs and add it to the parent context.
850 if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
851 if (m->metric_refs_cnt == 0) {
852 expr__ctx_clear(&m->pctx);
853 free(m);
854 *mp = NULL;
856 return -EINVAL;
860 * We add new group only in the 'parent' call,
861 * so bail out for referenced metric case.
863 if (m->metric_refs_cnt)
864 return 0;
866 if (list_empty(metric_list))
867 list_add(&m->nd, metric_list);
868 else {
869 struct list_head *pos;
871 /* Place the largest groups at the front. */
872 list_for_each_prev(pos, metric_list) {
873 struct metric *old = list_entry(pos, struct metric, nd);
875 if (hashmap__size(&m->pctx.ids) <=
876 hashmap__size(&old->pctx.ids))
877 break;
879 list_add(&m->nd, pos);
882 return 0;
885 #define map_for_each_event(__pe, __idx, __map) \
886 if (__map) \
887 for (__idx = 0, __pe = &__map->table[__idx]; \
888 __pe->name || __pe->metric_group || __pe->metric_name; \
889 __pe = &__map->table[++__idx])
891 #define map_for_each_metric(__pe, __idx, __map, __metric) \
892 map_for_each_event(__pe, __idx, __map) \
893 if (__pe->metric_expr && \
894 (match_metric(__pe->metric_group, __metric) || \
895 match_metric(__pe->metric_name, __metric)))
897 static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
899 struct pmu_event *pe;
900 int i;
902 map_for_each_event(pe, i, map) {
903 if (match_metric(pe->metric_name, metric))
904 return pe;
907 return NULL;
910 static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
911 struct expr_ids *ids)
913 struct expr_id_data *data;
914 struct expr_id *p;
915 int ret;
918 * We get the parent referenced by 'id' argument and
919 * traverse through all the parent object IDs to check
920 * if we already processed 'id', if we did, it's recursion
921 * and we fail.
923 ret = expr__get_id(&m->pctx, id, &data);
924 if (ret)
925 return ret;
927 p = expr_id_data__parent(data);
929 while (p->parent) {
930 if (!strcmp(p->id, id)) {
931 pr_err("failed: recursion detected for %s\n", id);
932 return -1;
934 p = p->parent;
938 * If we are over the limit of static entris, the metric
939 * is too difficult/nested to process, fail as well.
941 p = expr_ids__alloc(ids);
942 if (!p) {
943 pr_err("failed: too many nested metrics\n");
944 return -EINVAL;
947 p->id = strdup(id);
948 p->parent = expr_id_data__parent(data);
949 *parent = p;
951 return p->id ? 0 : -ENOMEM;
954 static int add_metric(struct list_head *metric_list,
955 struct pmu_event *pe,
956 bool metric_no_group,
957 struct metric **mp,
958 struct expr_id *parent,
959 struct expr_ids *ids);
961 static int __resolve_metric(struct metric *m,
962 bool metric_no_group,
963 struct list_head *metric_list,
964 struct pmu_events_map *map,
965 struct expr_ids *ids)
967 struct hashmap_entry *cur;
968 size_t bkt;
969 bool all;
970 int ret;
973 * Iterate all the parsed IDs and if there's metric,
974 * add it to the context.
976 do {
977 all = true;
978 hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
979 struct expr_id *parent;
980 struct pmu_event *pe;
982 pe = find_metric(cur->key, map);
983 if (!pe)
984 continue;
986 ret = recursion_check(m, cur->key, &parent, ids);
987 if (ret)
988 return ret;
990 all = false;
991 /* The metric key itself needs to go out.. */
992 expr__del_id(&m->pctx, cur->key);
994 /* ... and it gets resolved to the parent context. */
995 ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
996 if (ret)
997 return ret;
1000 * We added new metric to hashmap, so we need
1001 * to break the iteration and start over.
1003 break;
1005 } while (!all);
1007 return 0;
1010 static int resolve_metric(bool metric_no_group,
1011 struct list_head *metric_list,
1012 struct pmu_events_map *map,
1013 struct expr_ids *ids)
1015 struct metric *m;
1016 int err;
1018 list_for_each_entry(m, metric_list, nd) {
1019 err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
1020 if (err)
1021 return err;
1023 return 0;
1026 static int add_metric(struct list_head *metric_list,
1027 struct pmu_event *pe,
1028 bool metric_no_group,
1029 struct metric **m,
1030 struct expr_id *parent,
1031 struct expr_ids *ids)
1033 struct metric *orig = *m;
1034 int ret = 0;
1036 pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
1038 if (!strstr(pe->metric_expr, "?")) {
1039 ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
1040 } else {
1041 int j, count;
1043 count = arch_get_runtimeparam(pe);
1045 /* This loop is added to create multiple
1046 * events depend on count value and add
1047 * those events to metric_list.
1050 for (j = 0; j < count && !ret; j++, *m = orig)
1051 ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
1054 return ret;
1057 static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
1058 void *data)
1060 struct metricgroup_add_iter_data *d = data;
1061 int ret;
1063 if (!match_pe_metric(pe, d->metric))
1064 return 0;
1066 ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
1067 if (ret)
1068 return ret;
1070 ret = resolve_metric(d->metric_no_group,
1071 d->metric_list, NULL, d->ids);
1072 if (ret)
1073 return ret;
1075 *(d->has_match) = true;
1077 return *d->ret;
1080 static int metricgroup__add_metric(const char *metric, bool metric_no_group,
1081 struct strbuf *events,
1082 struct list_head *metric_list,
1083 struct pmu_events_map *map)
1085 struct expr_ids ids = { .cnt = 0, };
1086 struct pmu_event *pe;
1087 struct metric *m;
1088 LIST_HEAD(list);
1089 int i, ret;
1090 bool has_match = false;
1092 map_for_each_metric(pe, i, map, metric) {
1093 has_match = true;
1094 m = NULL;
1096 ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
1097 if (ret)
1098 goto out;
1101 * Process any possible referenced metrics
1102 * included in the expression.
1104 ret = resolve_metric(metric_no_group,
1105 &list, map, &ids);
1106 if (ret)
1107 goto out;
1111 struct metricgroup_iter_data data = {
1112 .fn = metricgroup__add_metric_sys_event_iter,
1113 .data = (void *) &(struct metricgroup_add_iter_data) {
1114 .metric_list = &list,
1115 .metric = metric,
1116 .metric_no_group = metric_no_group,
1117 .m = &m,
1118 .ids = &ids,
1119 .has_match = &has_match,
1120 .ret = &ret,
1124 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
1126 /* End of pmu events. */
1127 if (!has_match) {
1128 ret = -EINVAL;
1129 goto out;
1132 list_for_each_entry(m, &list, nd) {
1133 if (events->len > 0)
1134 strbuf_addf(events, ",");
1136 if (m->has_constraint) {
1137 metricgroup__add_metric_non_group(events,
1138 &m->pctx);
1139 } else {
1140 metricgroup__add_metric_weak_group(events,
1141 &m->pctx);
1145 out:
1147 * add to metric_list so that they can be released
1148 * even if it's failed
1150 list_splice(&list, metric_list);
1151 expr_ids__exit(&ids);
1152 return ret;
1155 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1156 struct strbuf *events,
1157 struct list_head *metric_list,
1158 struct pmu_events_map *map)
1160 char *llist, *nlist, *p;
1161 int ret = -EINVAL;
1163 nlist = strdup(list);
1164 if (!nlist)
1165 return -ENOMEM;
1166 llist = nlist;
1168 strbuf_init(events, 100);
1169 strbuf_addf(events, "%s", "");
1171 while ((p = strsep(&llist, ",")) != NULL) {
1172 ret = metricgroup__add_metric(p, metric_no_group, events,
1173 metric_list, map);
1174 if (ret == -EINVAL) {
1175 fprintf(stderr, "Cannot find metric or group `%s'\n",
1177 break;
1180 free(nlist);
1182 if (!ret)
1183 metricgroup___watchdog_constraint_hint(NULL, true);
1185 return ret;
1188 static void metric__free_refs(struct metric *metric)
1190 struct metric_ref_node *ref, *tmp;
1192 list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
1193 list_del(&ref->list);
1194 free(ref);
1198 static void metricgroup__free_metrics(struct list_head *metric_list)
1200 struct metric *m, *tmp;
1202 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1203 metric__free_refs(m);
1204 expr__ctx_clear(&m->pctx);
1205 list_del_init(&m->nd);
1206 free(m);
1210 static int parse_groups(struct evlist *perf_evlist, const char *str,
1211 bool metric_no_group,
1212 bool metric_no_merge,
1213 struct perf_pmu *fake_pmu,
1214 struct rblist *metric_events,
1215 struct pmu_events_map *map)
1217 struct parse_events_error parse_error;
1218 struct strbuf extra_events;
1219 LIST_HEAD(metric_list);
1220 int ret;
1222 if (metric_events->nr_entries == 0)
1223 metricgroup__rblist_init(metric_events);
1224 ret = metricgroup__add_metric_list(str, metric_no_group,
1225 &extra_events, &metric_list, map);
1226 if (ret)
1227 goto out;
1228 pr_debug("adding %s\n", extra_events.buf);
1229 bzero(&parse_error, sizeof(parse_error));
1230 ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
1231 if (ret) {
1232 parse_events_print_error(&parse_error, extra_events.buf);
1233 goto out;
1235 ret = metricgroup__setup_events(&metric_list, metric_no_merge,
1236 perf_evlist, metric_events);
1237 out:
1238 metricgroup__free_metrics(&metric_list);
1239 strbuf_release(&extra_events);
1240 return ret;
1243 int metricgroup__parse_groups(const struct option *opt,
1244 const char *str,
1245 bool metric_no_group,
1246 bool metric_no_merge,
1247 struct rblist *metric_events)
1249 struct evlist *perf_evlist = *(struct evlist **)opt->value;
1250 struct pmu_events_map *map = perf_pmu__find_map(NULL);
1253 return parse_groups(perf_evlist, str, metric_no_group,
1254 metric_no_merge, NULL, metric_events, map);
1257 int metricgroup__parse_groups_test(struct evlist *evlist,
1258 struct pmu_events_map *map,
1259 const char *str,
1260 bool metric_no_group,
1261 bool metric_no_merge,
1262 struct rblist *metric_events)
1264 return parse_groups(evlist, str, metric_no_group,
1265 metric_no_merge, &perf_pmu__fake, metric_events, map);
1268 bool metricgroup__has_metric(const char *metric)
1270 struct pmu_events_map *map = perf_pmu__find_map(NULL);
1271 struct pmu_event *pe;
1272 int i;
1274 if (!map)
1275 return false;
1277 for (i = 0; ; i++) {
1278 pe = &map->table[i];
1280 if (!pe->name && !pe->metric_group && !pe->metric_name)
1281 break;
1282 if (!pe->metric_expr)
1283 continue;
1284 if (match_metric(pe->metric_name, metric))
1285 return true;
1287 return false;
1290 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1291 struct rblist *new_metric_events,
1292 struct rblist *old_metric_events)
1294 unsigned i;
1296 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1297 struct rb_node *nd;
1298 struct metric_event *old_me, *new_me;
1299 struct metric_expr *old_expr, *new_expr;
1300 struct evsel *evsel;
1301 size_t alloc_size;
1302 int idx, nr;
1304 nd = rblist__entry(old_metric_events, i);
1305 old_me = container_of(nd, struct metric_event, nd);
1307 evsel = evlist__find_evsel(evlist, old_me->evsel->idx);
1308 if (!evsel)
1309 return -EINVAL;
1310 new_me = metricgroup__lookup(new_metric_events, evsel, true);
1311 if (!new_me)
1312 return -ENOMEM;
1314 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1315 cgrp ? cgrp->name : "root", evsel->name, evsel->idx);
1317 list_for_each_entry(old_expr, &old_me->head, nd) {
1318 new_expr = malloc(sizeof(*new_expr));
1319 if (!new_expr)
1320 return -ENOMEM;
1322 new_expr->metric_expr = old_expr->metric_expr;
1323 new_expr->metric_name = old_expr->metric_name;
1324 new_expr->metric_unit = old_expr->metric_unit;
1325 new_expr->runtime = old_expr->runtime;
1327 if (old_expr->metric_refs) {
1328 /* calculate number of metric_events */
1329 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1330 continue;
1331 alloc_size = sizeof(*new_expr->metric_refs);
1332 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1333 if (!new_expr->metric_refs) {
1334 free(new_expr);
1335 return -ENOMEM;
1338 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1339 nr * alloc_size);
1340 } else {
1341 new_expr->metric_refs = NULL;
1344 /* calculate number of metric_events */
1345 for (nr = 0; old_expr->metric_events[nr]; nr++)
1346 continue;
1347 alloc_size = sizeof(*new_expr->metric_events);
1348 new_expr->metric_events = calloc(nr + 1, alloc_size);
1349 if (!new_expr->metric_events) {
1350 free(new_expr->metric_refs);
1351 free(new_expr);
1352 return -ENOMEM;
1355 /* copy evsel in the same position */
1356 for (idx = 0; idx < nr; idx++) {
1357 evsel = old_expr->metric_events[idx];
1358 evsel = evlist__find_evsel(evlist, evsel->idx);
1359 if (evsel == NULL) {
1360 free(new_expr->metric_events);
1361 free(new_expr->metric_refs);
1362 free(new_expr);
1363 return -EINVAL;
1365 new_expr->metric_events[idx] = evsel;
1368 list_add(&new_expr->nd, &new_me->head);
1371 return 0;