1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2017, Intel Corporation.
6 /* Manage metrics and groups of metrics from JSON files */
8 #include "metricgroup.h"
20 #include <linux/ctype.h>
21 #include <linux/string.h>
22 #include <linux/zalloc.h>
23 #include <subcmd/parse-options.h>
24 #include <api/fs/fs.h>
29 struct metric_event
*metricgroup__lookup(struct rblist
*metric_events
,
34 struct metric_event me
= {
41 nd
= rblist__find(metric_events
, &me
);
43 return container_of(nd
, struct metric_event
, nd
);
45 rblist__add_node(metric_events
, &me
);
46 nd
= rblist__find(metric_events
, &me
);
48 return container_of(nd
, struct metric_event
, nd
);
53 static int metric_event_cmp(struct rb_node
*rb_node
, const void *entry
)
55 struct metric_event
*a
= container_of(rb_node
,
58 const struct metric_event
*b
= entry
;
60 if (a
->evsel
== b
->evsel
)
62 if ((char *)a
->evsel
< (char *)b
->evsel
)
67 static struct rb_node
*metric_event_new(struct rblist
*rblist __maybe_unused
,
70 struct metric_event
*me
= malloc(sizeof(struct metric_event
));
74 memcpy(me
, entry
, sizeof(struct metric_event
));
75 me
->evsel
= ((struct metric_event
*)entry
)->evsel
;
76 INIT_LIST_HEAD(&me
->head
);
80 static void metric_event_delete(struct rblist
*rblist __maybe_unused
,
81 struct rb_node
*rb_node
)
83 struct metric_event
*me
= container_of(rb_node
, struct metric_event
, nd
);
84 struct metric_expr
*expr
, *tmp
;
86 list_for_each_entry_safe(expr
, tmp
, &me
->head
, nd
) {
87 free(expr
->metric_refs
);
88 free(expr
->metric_events
);
95 static void metricgroup__rblist_init(struct rblist
*metric_events
)
97 rblist__init(metric_events
);
98 metric_events
->node_cmp
= metric_event_cmp
;
99 metric_events
->node_new
= metric_event_new
;
100 metric_events
->node_delete
= metric_event_delete
;
103 void metricgroup__rblist_exit(struct rblist
*metric_events
)
105 rblist__exit(metric_events
);
109 * A node in the list of referenced metrics. metric_expr
110 * is held as a convenience to avoid a search through the
113 struct metric_ref_node
{
114 const char *metric_name
;
115 const char *metric_expr
;
116 struct list_head list
;
121 struct expr_parse_ctx pctx
;
122 const char *metric_name
;
123 const char *metric_expr
;
124 const char *metric_unit
;
125 struct list_head metric_refs
;
131 #define RECURSION_ID_MAX 1000
134 struct expr_id id
[RECURSION_ID_MAX
];
138 static struct expr_id
*expr_ids__alloc(struct expr_ids
*ids
)
140 if (ids
->cnt
>= RECURSION_ID_MAX
)
142 return &ids
->id
[ids
->cnt
++];
145 static void expr_ids__exit(struct expr_ids
*ids
)
149 for (i
= 0; i
< ids
->cnt
; i
++)
153 static bool contains_event(struct evsel
**metric_events
, int num_events
,
154 const char *event_name
)
158 for (i
= 0; i
< num_events
; i
++) {
159 if (!strcmp(metric_events
[i
]->name
, event_name
))
166 * Find a group of events in perf_evlist that correspond to those from a parsed
167 * metric expression. Note, as find_evsel_group is called in the same order as
168 * perf_evlist was constructed, metric_no_merge doesn't need to test for
169 * underfilling a group.
170 * @perf_evlist: a list of events something like: {metric1 leader, metric1
171 * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
172 * metric2 sibling}:W,duration_time
173 * @pctx: the parse context for the metric expression.
174 * @metric_no_merge: don't attempt to share events for the metric with other
176 * @has_constraint: is there a contraint on the group of events? In which case
177 * the events won't be grouped.
178 * @metric_events: out argument, null terminated array of evsel's associated
180 * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
181 * @return the first metric event or NULL on failure.
183 static struct evsel
*find_evsel_group(struct evlist
*perf_evlist
,
184 struct expr_parse_ctx
*pctx
,
185 bool metric_no_merge
,
187 struct evsel
**metric_events
,
188 unsigned long *evlist_used
)
190 struct evsel
*ev
, *current_leader
= NULL
;
191 struct expr_id_data
*val_ptr
;
192 int i
= 0, matched_events
= 0, events_to_match
;
193 const int idnum
= (int)hashmap__size(&pctx
->ids
);
196 * duration_time is always grouped separately, when events are grouped
197 * (ie has_constraint is false) then ignore it in the matching loop and
198 * add it to metric_events at the end.
200 if (!has_constraint
&&
201 hashmap__find(&pctx
->ids
, "duration_time", (void **)&val_ptr
))
202 events_to_match
= idnum
- 1;
204 events_to_match
= idnum
;
206 evlist__for_each_entry (perf_evlist
, ev
) {
208 * Events with a constraint aren't grouped and match the first
211 if (has_constraint
&& ev
->weak_group
)
213 /* Ignore event if already used and merging is disabled. */
214 if (metric_no_merge
&& test_bit(ev
->idx
, evlist_used
))
216 if (!has_constraint
&& ev
->leader
!= current_leader
) {
218 * Start of a new group, discard the whole match and
222 memset(metric_events
, 0,
223 sizeof(struct evsel
*) * idnum
);
224 current_leader
= ev
->leader
;
227 * Check for duplicate events with the same name. For example,
228 * uncore_imc/cas_count_read/ will turn into 6 events per socket
229 * on skylakex. Only the first such event is placed in
230 * metric_events. If events aren't grouped then this also
231 * ensures that the same event in different sibling groups
232 * aren't both added to metric_events.
234 if (contains_event(metric_events
, matched_events
, ev
->name
))
236 /* Does this event belong to the parse context? */
237 if (hashmap__find(&pctx
->ids
, ev
->name
, (void **)&val_ptr
))
238 metric_events
[matched_events
++] = ev
;
240 if (matched_events
== events_to_match
)
244 if (events_to_match
!= idnum
) {
245 /* Add the first duration_time. */
246 evlist__for_each_entry(perf_evlist
, ev
) {
247 if (!strcmp(ev
->name
, "duration_time")) {
248 metric_events
[matched_events
++] = ev
;
254 if (matched_events
!= idnum
) {
255 /* Not a whole match */
259 metric_events
[idnum
] = NULL
;
261 for (i
= 0; i
< idnum
; i
++) {
262 ev
= metric_events
[i
];
263 /* Don't free the used events. */
264 set_bit(ev
->idx
, evlist_used
);
266 * The metric leader points to the identically named event in
269 ev
->metric_leader
= ev
;
271 * Mark two events with identical names in the same group (or
272 * globally) as being in use as uncore events may be duplicated
273 * for each pmu. Set the metric leader of such events to be the
274 * event that appears in metric_events.
276 evlist__for_each_entry_continue(perf_evlist
, ev
) {
278 * If events are grouped then the search can terminate
279 * when then group is left.
281 if (!has_constraint
&&
282 ev
->leader
!= metric_events
[i
]->leader
&&
283 !strcmp(ev
->leader
->pmu_name
,
284 metric_events
[i
]->leader
->pmu_name
))
286 if (!strcmp(metric_events
[i
]->name
, ev
->name
)) {
287 set_bit(ev
->idx
, evlist_used
);
288 ev
->metric_leader
= metric_events
[i
];
293 return metric_events
[0];
296 static int metricgroup__setup_events(struct list_head
*groups
,
297 bool metric_no_merge
,
298 struct evlist
*perf_evlist
,
299 struct rblist
*metric_events_list
)
301 struct metric_event
*me
;
302 struct metric_expr
*expr
;
306 struct evsel
*evsel
, *tmp
;
307 unsigned long *evlist_used
;
309 evlist_used
= bitmap_alloc(perf_evlist
->core
.nr_entries
);
313 list_for_each_entry (m
, groups
, nd
) {
314 struct evsel
**metric_events
;
315 struct metric_ref
*metric_refs
= NULL
;
317 metric_events
= calloc(sizeof(void *),
318 hashmap__size(&m
->pctx
.ids
) + 1);
319 if (!metric_events
) {
323 evsel
= find_evsel_group(perf_evlist
, &m
->pctx
,
325 m
->has_constraint
, metric_events
,
328 pr_debug("Cannot resolve %s: %s\n",
329 m
->metric_name
, m
->metric_expr
);
333 for (i
= 0; metric_events
[i
]; i
++)
334 metric_events
[i
]->collect_stat
= true;
335 me
= metricgroup__lookup(metric_events_list
, evsel
, true);
341 expr
= malloc(sizeof(struct metric_expr
));
349 * Collect and store collected nested expressions
350 * for metric processing.
352 if (m
->metric_refs_cnt
) {
353 struct metric_ref_node
*ref
;
355 metric_refs
= zalloc(sizeof(struct metric_ref
) * (m
->metric_refs_cnt
+ 1));
364 list_for_each_entry(ref
, &m
->metric_refs
, list
) {
366 * Intentionally passing just const char pointers,
367 * originally from 'struct pmu_event' object.
368 * We don't need to change them, so there's no
369 * need to create our own copy.
371 metric_refs
[i
].metric_name
= ref
->metric_name
;
372 metric_refs
[i
].metric_expr
= ref
->metric_expr
;
377 expr
->metric_refs
= metric_refs
;
378 expr
->metric_expr
= m
->metric_expr
;
379 expr
->metric_name
= m
->metric_name
;
380 expr
->metric_unit
= m
->metric_unit
;
381 expr
->metric_events
= metric_events
;
382 expr
->runtime
= m
->runtime
;
383 list_add(&expr
->nd
, &me
->head
);
386 evlist__for_each_entry_safe(perf_evlist
, tmp
, evsel
) {
387 if (!test_bit(evsel
->idx
, evlist_used
)) {
388 evlist__remove(perf_evlist
, evsel
);
389 evsel__delete(evsel
);
392 bitmap_free(evlist_used
);
397 static bool match_metric(const char *n
, const char *list
)
404 if (!strcmp(list
, "all"))
407 return !strcasecmp(list
, "No_group");
409 m
= strcasestr(n
, list
);
412 if ((m
== n
|| m
[-1] == ';' || m
[-1] == ' ') &&
413 (m
[len
] == 0 || m
[len
] == ';'))
418 static bool match_pe_metric(struct pmu_event
*pe
, const char *metric
)
420 return match_metric(pe
->metric_group
, metric
) ||
421 match_metric(pe
->metric_name
, metric
);
427 struct strlist
*metrics
;
430 static int mep_cmp(struct rb_node
*rb_node
, const void *entry
)
432 struct mep
*a
= container_of(rb_node
, struct mep
, nd
);
433 struct mep
*b
= (struct mep
*)entry
;
435 return strcmp(a
->name
, b
->name
);
438 static struct rb_node
*mep_new(struct rblist
*rl __maybe_unused
,
441 struct mep
*me
= malloc(sizeof(struct mep
));
445 memcpy(me
, entry
, sizeof(struct mep
));
446 me
->name
= strdup(me
->name
);
449 me
->metrics
= strlist__new(NULL
, NULL
);
460 static struct mep
*mep_lookup(struct rblist
*groups
, const char *name
)
466 nd
= rblist__find(groups
, &me
);
468 return container_of(nd
, struct mep
, nd
);
469 rblist__add_node(groups
, &me
);
470 nd
= rblist__find(groups
, &me
);
472 return container_of(nd
, struct mep
, nd
);
476 static void mep_delete(struct rblist
*rl __maybe_unused
,
479 struct mep
*me
= container_of(nd
, struct mep
, nd
);
481 strlist__delete(me
->metrics
);
486 static void metricgroup__print_strlist(struct strlist
*metrics
, bool raw
)
491 strlist__for_each_entry (sn
, metrics
) {
493 printf("%s%s", n
> 0 ? " " : "", sn
->s
);
495 printf(" %s\n", sn
->s
);
502 static int metricgroup__print_pmu_event(struct pmu_event
*pe
,
503 bool metricgroups
, char *filter
,
504 bool raw
, bool details
,
505 struct rblist
*groups
,
506 struct strlist
*metriclist
)
511 g
= pe
->metric_group
;
512 if (!g
&& pe
->metric_name
) {
526 while ((g
= strsep(&mg
, ";")) != NULL
) {
533 if (filter
&& !strstr(g
, filter
))
536 s
= (char *)pe
->metric_name
;
538 if (asprintf(&s
, "%s\n%*s%s]",
539 pe
->metric_name
, 8, "[", pe
->desc
) < 0)
542 if (asprintf(&s
, "%s\n%*s%s]",
543 s
, 8, "[", pe
->metric_expr
) < 0)
552 strlist__add(metriclist
, s
);
554 me
= mep_lookup(groups
, g
);
557 strlist__add(me
->metrics
, s
);
568 struct metricgroup_print_sys_idata
{
569 struct strlist
*metriclist
;
571 struct rblist
*groups
;
577 typedef int (*metricgroup_sys_event_iter_fn
)(struct pmu_event
*pe
, void *);
579 struct metricgroup_iter_data
{
580 metricgroup_sys_event_iter_fn fn
;
584 static int metricgroup__sys_event_iter(struct pmu_event
*pe
, void *data
)
586 struct metricgroup_iter_data
*d
= data
;
587 struct perf_pmu
*pmu
= NULL
;
589 if (!pe
->metric_expr
|| !pe
->compat
)
592 while ((pmu
= perf_pmu__scan(pmu
))) {
594 if (!pmu
->id
|| strcmp(pmu
->id
, pe
->compat
))
597 return d
->fn(pe
, d
->data
);
603 static int metricgroup__print_sys_event_iter(struct pmu_event
*pe
, void *data
)
605 struct metricgroup_print_sys_idata
*d
= data
;
607 return metricgroup__print_pmu_event(pe
, d
->metricgroups
, d
->filter
, d
->raw
,
608 d
->details
, d
->groups
, d
->metriclist
);
611 void metricgroup__print(bool metrics
, bool metricgroups
, char *filter
,
612 bool raw
, bool details
)
614 struct pmu_events_map
*map
= perf_pmu__find_map(NULL
);
615 struct pmu_event
*pe
;
617 struct rblist groups
;
618 struct rb_node
*node
, *next
;
619 struct strlist
*metriclist
= NULL
;
622 metriclist
= strlist__new(NULL
, NULL
);
627 rblist__init(&groups
);
628 groups
.node_new
= mep_new
;
629 groups
.node_cmp
= mep_cmp
;
630 groups
.node_delete
= mep_delete
;
631 for (i
= 0; map
; i
++) {
634 if (!pe
->name
&& !pe
->metric_group
&& !pe
->metric_name
)
636 if (!pe
->metric_expr
)
638 if (metricgroup__print_pmu_event(pe
, metricgroups
, filter
,
639 raw
, details
, &groups
,
645 struct metricgroup_iter_data data
= {
646 .fn
= metricgroup__print_sys_event_iter
,
647 .data
= (void *) &(struct metricgroup_print_sys_idata
){
648 .metriclist
= metriclist
,
649 .metricgroups
= metricgroups
,
657 pmu_for_each_sys_event(metricgroup__sys_event_iter
, &data
);
660 if (!filter
|| !rblist__empty(&groups
)) {
661 if (metricgroups
&& !raw
)
662 printf("\nMetric Groups:\n\n");
663 else if (metrics
&& !raw
)
664 printf("\nMetrics:\n\n");
667 for (node
= rb_first_cached(&groups
.entries
); node
; node
= next
) {
668 struct mep
*me
= container_of(node
, struct mep
, nd
);
671 printf("%s%s%s", me
->name
, metrics
&& !raw
? ":" : "", raw
? " " : "\n");
673 metricgroup__print_strlist(me
->metrics
, raw
);
674 next
= rb_next(node
);
675 rblist__remove_node(&groups
, node
);
678 metricgroup__print_strlist(metriclist
, raw
);
679 strlist__delete(metriclist
);
682 static void metricgroup__add_metric_weak_group(struct strbuf
*events
,
683 struct expr_parse_ctx
*ctx
)
685 struct hashmap_entry
*cur
;
687 bool no_group
= true, has_duration
= false;
689 hashmap__for_each_entry((&ctx
->ids
), cur
, bkt
) {
690 pr_debug("found event %s\n", (const char *)cur
->key
);
692 * Duration time maps to a software event and can make
693 * groups not count. Always use it outside a
696 if (!strcmp(cur
->key
, "duration_time")) {
700 strbuf_addf(events
, "%s%s",
701 no_group
? "{" : ",",
702 (const char *)cur
->key
);
706 strbuf_addf(events
, "}:W");
708 strbuf_addf(events
, ",duration_time");
709 } else if (has_duration
)
710 strbuf_addf(events
, "duration_time");
713 static void metricgroup__add_metric_non_group(struct strbuf
*events
,
714 struct expr_parse_ctx
*ctx
)
716 struct hashmap_entry
*cur
;
720 hashmap__for_each_entry((&ctx
->ids
), cur
, bkt
) {
722 strbuf_addf(events
, ",");
723 strbuf_addf(events
, "%s", (const char *)cur
->key
);
728 static void metricgroup___watchdog_constraint_hint(const char *name
, bool foot
)
730 static bool violate_nmi_constraint
;
733 pr_warning("Splitting metric group %s into standalone metrics.\n", name
);
734 violate_nmi_constraint
= true;
738 if (!violate_nmi_constraint
)
741 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
742 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
744 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
747 static bool metricgroup__has_constraint(struct pmu_event
*pe
)
749 if (!pe
->metric_constraint
)
752 if (!strcmp(pe
->metric_constraint
, "NO_NMI_WATCHDOG") &&
753 sysctl__nmi_watchdog_enabled()) {
754 metricgroup___watchdog_constraint_hint(pe
->metric_name
, false);
761 int __weak
arch_get_runtimeparam(struct pmu_event
*pe __maybe_unused
)
766 struct metricgroup_add_iter_data
{
767 struct list_head
*metric_list
;
770 struct expr_ids
*ids
;
773 bool metric_no_group
;
776 static int __add_metric(struct list_head
*metric_list
,
777 struct pmu_event
*pe
,
778 bool metric_no_group
,
781 struct expr_id
*parent
,
782 struct expr_ids
*ids
)
784 struct metric_ref_node
*ref
;
789 * We got in here for the parent group,
790 * allocate it and put it on the list.
792 m
= zalloc(sizeof(*m
));
796 expr__ctx_init(&m
->pctx
);
797 m
->metric_name
= pe
->metric_name
;
798 m
->metric_expr
= pe
->metric_expr
;
799 m
->metric_unit
= pe
->unit
;
800 m
->runtime
= runtime
;
801 m
->has_constraint
= metric_no_group
|| metricgroup__has_constraint(pe
);
802 INIT_LIST_HEAD(&m
->metric_refs
);
803 m
->metric_refs_cnt
= 0;
805 parent
= expr_ids__alloc(ids
);
811 parent
->id
= strdup(pe
->metric_name
);
819 * We got here for the referenced metric, via the
820 * recursive metricgroup__add_metric call, add
821 * it to the parent group.
825 ref
= malloc(sizeof(*ref
));
830 * Intentionally passing just const char pointers,
831 * from 'pe' object, so they never go away. We don't
832 * need to change them, so there's no need to create
835 ref
->metric_name
= pe
->metric_name
;
836 ref
->metric_expr
= pe
->metric_expr
;
838 list_add(&ref
->list
, &m
->metric_refs
);
839 m
->metric_refs_cnt
++;
842 /* Force all found IDs in metric to have us as parent ID. */
843 WARN_ON_ONCE(!parent
);
844 m
->pctx
.parent
= parent
;
847 * For both the parent and referenced metrics, we parse
848 * all the metric's IDs and add it to the parent context.
850 if (expr__find_other(pe
->metric_expr
, NULL
, &m
->pctx
, runtime
) < 0) {
851 if (m
->metric_refs_cnt
== 0) {
852 expr__ctx_clear(&m
->pctx
);
860 * We add new group only in the 'parent' call,
861 * so bail out for referenced metric case.
863 if (m
->metric_refs_cnt
)
866 if (list_empty(metric_list
))
867 list_add(&m
->nd
, metric_list
);
869 struct list_head
*pos
;
871 /* Place the largest groups at the front. */
872 list_for_each_prev(pos
, metric_list
) {
873 struct metric
*old
= list_entry(pos
, struct metric
, nd
);
875 if (hashmap__size(&m
->pctx
.ids
) <=
876 hashmap__size(&old
->pctx
.ids
))
879 list_add(&m
->nd
, pos
);
885 #define map_for_each_event(__pe, __idx, __map) \
887 for (__idx = 0, __pe = &__map->table[__idx]; \
888 __pe->name || __pe->metric_group || __pe->metric_name; \
889 __pe = &__map->table[++__idx])
891 #define map_for_each_metric(__pe, __idx, __map, __metric) \
892 map_for_each_event(__pe, __idx, __map) \
893 if (__pe->metric_expr && \
894 (match_metric(__pe->metric_group, __metric) || \
895 match_metric(__pe->metric_name, __metric)))
897 static struct pmu_event
*find_metric(const char *metric
, struct pmu_events_map
*map
)
899 struct pmu_event
*pe
;
902 map_for_each_event(pe
, i
, map
) {
903 if (match_metric(pe
->metric_name
, metric
))
910 static int recursion_check(struct metric
*m
, const char *id
, struct expr_id
**parent
,
911 struct expr_ids
*ids
)
913 struct expr_id_data
*data
;
918 * We get the parent referenced by 'id' argument and
919 * traverse through all the parent object IDs to check
920 * if we already processed 'id', if we did, it's recursion
923 ret
= expr__get_id(&m
->pctx
, id
, &data
);
927 p
= expr_id_data__parent(data
);
930 if (!strcmp(p
->id
, id
)) {
931 pr_err("failed: recursion detected for %s\n", id
);
938 * If we are over the limit of static entris, the metric
939 * is too difficult/nested to process, fail as well.
941 p
= expr_ids__alloc(ids
);
943 pr_err("failed: too many nested metrics\n");
948 p
->parent
= expr_id_data__parent(data
);
951 return p
->id
? 0 : -ENOMEM
;
954 static int add_metric(struct list_head
*metric_list
,
955 struct pmu_event
*pe
,
956 bool metric_no_group
,
958 struct expr_id
*parent
,
959 struct expr_ids
*ids
);
961 static int __resolve_metric(struct metric
*m
,
962 bool metric_no_group
,
963 struct list_head
*metric_list
,
964 struct pmu_events_map
*map
,
965 struct expr_ids
*ids
)
967 struct hashmap_entry
*cur
;
973 * Iterate all the parsed IDs and if there's metric,
974 * add it to the context.
978 hashmap__for_each_entry((&m
->pctx
.ids
), cur
, bkt
) {
979 struct expr_id
*parent
;
980 struct pmu_event
*pe
;
982 pe
= find_metric(cur
->key
, map
);
986 ret
= recursion_check(m
, cur
->key
, &parent
, ids
);
991 /* The metric key itself needs to go out.. */
992 expr__del_id(&m
->pctx
, cur
->key
);
994 /* ... and it gets resolved to the parent context. */
995 ret
= add_metric(metric_list
, pe
, metric_no_group
, &m
, parent
, ids
);
1000 * We added new metric to hashmap, so we need
1001 * to break the iteration and start over.
1010 static int resolve_metric(bool metric_no_group
,
1011 struct list_head
*metric_list
,
1012 struct pmu_events_map
*map
,
1013 struct expr_ids
*ids
)
1018 list_for_each_entry(m
, metric_list
, nd
) {
1019 err
= __resolve_metric(m
, metric_no_group
, metric_list
, map
, ids
);
1026 static int add_metric(struct list_head
*metric_list
,
1027 struct pmu_event
*pe
,
1028 bool metric_no_group
,
1030 struct expr_id
*parent
,
1031 struct expr_ids
*ids
)
1033 struct metric
*orig
= *m
;
1036 pr_debug("metric expr %s for %s\n", pe
->metric_expr
, pe
->metric_name
);
1038 if (!strstr(pe
->metric_expr
, "?")) {
1039 ret
= __add_metric(metric_list
, pe
, metric_no_group
, 1, m
, parent
, ids
);
1043 count
= arch_get_runtimeparam(pe
);
1045 /* This loop is added to create multiple
1046 * events depend on count value and add
1047 * those events to metric_list.
1050 for (j
= 0; j
< count
&& !ret
; j
++, *m
= orig
)
1051 ret
= __add_metric(metric_list
, pe
, metric_no_group
, j
, m
, parent
, ids
);
1057 static int metricgroup__add_metric_sys_event_iter(struct pmu_event
*pe
,
1060 struct metricgroup_add_iter_data
*d
= data
;
1063 if (!match_pe_metric(pe
, d
->metric
))
1066 ret
= add_metric(d
->metric_list
, pe
, d
->metric_no_group
, d
->m
, NULL
, d
->ids
);
1070 ret
= resolve_metric(d
->metric_no_group
,
1071 d
->metric_list
, NULL
, d
->ids
);
1075 *(d
->has_match
) = true;
1080 static int metricgroup__add_metric(const char *metric
, bool metric_no_group
,
1081 struct strbuf
*events
,
1082 struct list_head
*metric_list
,
1083 struct pmu_events_map
*map
)
1085 struct expr_ids ids
= { .cnt
= 0, };
1086 struct pmu_event
*pe
;
1090 bool has_match
= false;
1092 map_for_each_metric(pe
, i
, map
, metric
) {
1096 ret
= add_metric(&list
, pe
, metric_no_group
, &m
, NULL
, &ids
);
1101 * Process any possible referenced metrics
1102 * included in the expression.
1104 ret
= resolve_metric(metric_no_group
,
1111 struct metricgroup_iter_data data
= {
1112 .fn
= metricgroup__add_metric_sys_event_iter
,
1113 .data
= (void *) &(struct metricgroup_add_iter_data
) {
1114 .metric_list
= &list
,
1116 .metric_no_group
= metric_no_group
,
1119 .has_match
= &has_match
,
1124 pmu_for_each_sys_event(metricgroup__sys_event_iter
, &data
);
1126 /* End of pmu events. */
1132 list_for_each_entry(m
, &list
, nd
) {
1133 if (events
->len
> 0)
1134 strbuf_addf(events
, ",");
1136 if (m
->has_constraint
) {
1137 metricgroup__add_metric_non_group(events
,
1140 metricgroup__add_metric_weak_group(events
,
1147 * add to metric_list so that they can be released
1148 * even if it's failed
1150 list_splice(&list
, metric_list
);
1151 expr_ids__exit(&ids
);
1155 static int metricgroup__add_metric_list(const char *list
, bool metric_no_group
,
1156 struct strbuf
*events
,
1157 struct list_head
*metric_list
,
1158 struct pmu_events_map
*map
)
1160 char *llist
, *nlist
, *p
;
1163 nlist
= strdup(list
);
1168 strbuf_init(events
, 100);
1169 strbuf_addf(events
, "%s", "");
1171 while ((p
= strsep(&llist
, ",")) != NULL
) {
1172 ret
= metricgroup__add_metric(p
, metric_no_group
, events
,
1174 if (ret
== -EINVAL
) {
1175 fprintf(stderr
, "Cannot find metric or group `%s'\n",
1183 metricgroup___watchdog_constraint_hint(NULL
, true);
1188 static void metric__free_refs(struct metric
*metric
)
1190 struct metric_ref_node
*ref
, *tmp
;
1192 list_for_each_entry_safe(ref
, tmp
, &metric
->metric_refs
, list
) {
1193 list_del(&ref
->list
);
1198 static void metricgroup__free_metrics(struct list_head
*metric_list
)
1200 struct metric
*m
, *tmp
;
1202 list_for_each_entry_safe (m
, tmp
, metric_list
, nd
) {
1203 metric__free_refs(m
);
1204 expr__ctx_clear(&m
->pctx
);
1205 list_del_init(&m
->nd
);
1210 static int parse_groups(struct evlist
*perf_evlist
, const char *str
,
1211 bool metric_no_group
,
1212 bool metric_no_merge
,
1213 struct perf_pmu
*fake_pmu
,
1214 struct rblist
*metric_events
,
1215 struct pmu_events_map
*map
)
1217 struct parse_events_error parse_error
;
1218 struct strbuf extra_events
;
1219 LIST_HEAD(metric_list
);
1222 if (metric_events
->nr_entries
== 0)
1223 metricgroup__rblist_init(metric_events
);
1224 ret
= metricgroup__add_metric_list(str
, metric_no_group
,
1225 &extra_events
, &metric_list
, map
);
1228 pr_debug("adding %s\n", extra_events
.buf
);
1229 bzero(&parse_error
, sizeof(parse_error
));
1230 ret
= __parse_events(perf_evlist
, extra_events
.buf
, &parse_error
, fake_pmu
);
1232 parse_events_print_error(&parse_error
, extra_events
.buf
);
1235 ret
= metricgroup__setup_events(&metric_list
, metric_no_merge
,
1236 perf_evlist
, metric_events
);
1238 metricgroup__free_metrics(&metric_list
);
1239 strbuf_release(&extra_events
);
1243 int metricgroup__parse_groups(const struct option
*opt
,
1245 bool metric_no_group
,
1246 bool metric_no_merge
,
1247 struct rblist
*metric_events
)
1249 struct evlist
*perf_evlist
= *(struct evlist
**)opt
->value
;
1250 struct pmu_events_map
*map
= perf_pmu__find_map(NULL
);
1253 return parse_groups(perf_evlist
, str
, metric_no_group
,
1254 metric_no_merge
, NULL
, metric_events
, map
);
1257 int metricgroup__parse_groups_test(struct evlist
*evlist
,
1258 struct pmu_events_map
*map
,
1260 bool metric_no_group
,
1261 bool metric_no_merge
,
1262 struct rblist
*metric_events
)
1264 return parse_groups(evlist
, str
, metric_no_group
,
1265 metric_no_merge
, &perf_pmu__fake
, metric_events
, map
);
1268 bool metricgroup__has_metric(const char *metric
)
1270 struct pmu_events_map
*map
= perf_pmu__find_map(NULL
);
1271 struct pmu_event
*pe
;
1277 for (i
= 0; ; i
++) {
1278 pe
= &map
->table
[i
];
1280 if (!pe
->name
&& !pe
->metric_group
&& !pe
->metric_name
)
1282 if (!pe
->metric_expr
)
1284 if (match_metric(pe
->metric_name
, metric
))
1290 int metricgroup__copy_metric_events(struct evlist
*evlist
, struct cgroup
*cgrp
,
1291 struct rblist
*new_metric_events
,
1292 struct rblist
*old_metric_events
)
1296 for (i
= 0; i
< rblist__nr_entries(old_metric_events
); i
++) {
1298 struct metric_event
*old_me
, *new_me
;
1299 struct metric_expr
*old_expr
, *new_expr
;
1300 struct evsel
*evsel
;
1304 nd
= rblist__entry(old_metric_events
, i
);
1305 old_me
= container_of(nd
, struct metric_event
, nd
);
1307 evsel
= evlist__find_evsel(evlist
, old_me
->evsel
->idx
);
1310 new_me
= metricgroup__lookup(new_metric_events
, evsel
, true);
1314 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1315 cgrp
? cgrp
->name
: "root", evsel
->name
, evsel
->idx
);
1317 list_for_each_entry(old_expr
, &old_me
->head
, nd
) {
1318 new_expr
= malloc(sizeof(*new_expr
));
1322 new_expr
->metric_expr
= old_expr
->metric_expr
;
1323 new_expr
->metric_name
= old_expr
->metric_name
;
1324 new_expr
->metric_unit
= old_expr
->metric_unit
;
1325 new_expr
->runtime
= old_expr
->runtime
;
1327 if (old_expr
->metric_refs
) {
1328 /* calculate number of metric_events */
1329 for (nr
= 0; old_expr
->metric_refs
[nr
].metric_name
; nr
++)
1331 alloc_size
= sizeof(*new_expr
->metric_refs
);
1332 new_expr
->metric_refs
= calloc(nr
+ 1, alloc_size
);
1333 if (!new_expr
->metric_refs
) {
1338 memcpy(new_expr
->metric_refs
, old_expr
->metric_refs
,
1341 new_expr
->metric_refs
= NULL
;
1344 /* calculate number of metric_events */
1345 for (nr
= 0; old_expr
->metric_events
[nr
]; nr
++)
1347 alloc_size
= sizeof(*new_expr
->metric_events
);
1348 new_expr
->metric_events
= calloc(nr
+ 1, alloc_size
);
1349 if (!new_expr
->metric_events
) {
1350 free(new_expr
->metric_refs
);
1355 /* copy evsel in the same position */
1356 for (idx
= 0; idx
< nr
; idx
++) {
1357 evsel
= old_expr
->metric_events
[idx
];
1358 evsel
= evlist__find_evsel(evlist
, evsel
->idx
);
1359 if (evsel
== NULL
) {
1360 free(new_expr
->metric_events
);
1361 free(new_expr
->metric_refs
);
1365 new_expr
->metric_events
[idx
] = evsel
;
1368 list_add(&new_expr
->nd
, &new_me
->head
);