1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2017, Intel Corporation.
6 /* Manage metrics and groups of metrics from JSON files */
8 #include "metricgroup.h"
15 #include "print-events.h"
24 #include <linux/ctype.h>
25 #include <linux/list_sort.h>
26 #include <linux/string.h>
27 #include <linux/zalloc.h>
28 #include <perf/cpumap.h>
29 #include <subcmd/parse-options.h>
30 #include <api/fs/fs.h>
34 #include "util/hashmap.h"
36 struct metric_event
*metricgroup__lookup(struct rblist
*metric_events
,
41 struct metric_event me
= {
48 if (evsel
&& evsel
->metric_leader
)
49 me
.evsel
= evsel
->metric_leader
;
50 nd
= rblist__find(metric_events
, &me
);
52 return container_of(nd
, struct metric_event
, nd
);
54 rblist__add_node(metric_events
, &me
);
55 nd
= rblist__find(metric_events
, &me
);
57 return container_of(nd
, struct metric_event
, nd
);
62 static int metric_event_cmp(struct rb_node
*rb_node
, const void *entry
)
64 struct metric_event
*a
= container_of(rb_node
,
67 const struct metric_event
*b
= entry
;
69 if (a
->evsel
== b
->evsel
)
71 if ((char *)a
->evsel
< (char *)b
->evsel
)
76 static struct rb_node
*metric_event_new(struct rblist
*rblist __maybe_unused
,
79 struct metric_event
*me
= malloc(sizeof(struct metric_event
));
83 memcpy(me
, entry
, sizeof(struct metric_event
));
84 me
->evsel
= ((struct metric_event
*)entry
)->evsel
;
85 me
->is_default
= false;
86 INIT_LIST_HEAD(&me
->head
);
90 static void metric_event_delete(struct rblist
*rblist __maybe_unused
,
91 struct rb_node
*rb_node
)
93 struct metric_event
*me
= container_of(rb_node
, struct metric_event
, nd
);
94 struct metric_expr
*expr
, *tmp
;
96 list_for_each_entry_safe(expr
, tmp
, &me
->head
, nd
) {
97 zfree(&expr
->metric_name
);
98 zfree(&expr
->metric_refs
);
99 zfree(&expr
->metric_events
);
106 static void metricgroup__rblist_init(struct rblist
*metric_events
)
108 rblist__init(metric_events
);
109 metric_events
->node_cmp
= metric_event_cmp
;
110 metric_events
->node_new
= metric_event_new
;
111 metric_events
->node_delete
= metric_event_delete
;
114 void metricgroup__rblist_exit(struct rblist
*metric_events
)
116 rblist__exit(metric_events
);
120 * The metric under construction. The data held here will be placed in a
126 * The expression parse context importantly holding the IDs contained
127 * within the expression.
129 struct expr_parse_ctx
*pctx
;
131 /** The name of the metric such as "IPC". */
132 const char *metric_name
;
133 /** Modifier on the metric such as "u" or NULL for none. */
134 const char *modifier
;
135 /** The expression to parse, for example, "instructions/cycles". */
136 const char *metric_expr
;
137 /** Optional threshold expression where zero value is green, otherwise red. */
138 const char *metric_threshold
;
140 * The "ScaleUnit" that scales and adds a unit to the metric during
143 const char *metric_unit
;
145 * Optional name of the metric group reported
146 * if the Default metric group is being processed.
148 const char *default_metricgroup_name
;
149 /** Optional null terminated array of referenced metrics. */
150 struct metric_ref
*metric_refs
;
152 * Should events of the metric be grouped?
156 * Parsed events for the metric. Optional as events may be taken from a
157 * different metric whose group contains all the IDs necessary for this
160 struct evlist
*evlist
;
163 static void metric__watchdog_constraint_hint(const char *name
, bool foot
)
165 static bool violate_nmi_constraint
;
168 pr_warning("Not grouping metric %s's events.\n", name
);
169 violate_nmi_constraint
= true;
173 if (!violate_nmi_constraint
)
176 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
177 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
179 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
182 static bool metric__group_events(const struct pmu_metric
*pm
)
184 switch (pm
->event_grouping
) {
185 case MetricNoGroupEvents
:
187 case MetricNoGroupEventsNmi
:
188 if (!sysctl__nmi_watchdog_enabled())
190 metric__watchdog_constraint_hint(pm
->metric_name
, /*foot=*/false);
192 case MetricNoGroupEventsSmt
:
194 case MetricGroupEvents
:
200 static void metric__free(struct metric
*m
)
205 zfree(&m
->metric_refs
);
206 expr__ctx_free(m
->pctx
);
208 evlist__delete(m
->evlist
);
212 static struct metric
*metric__new(const struct pmu_metric
*pm
,
213 const char *modifier
,
214 bool metric_no_group
,
216 const char *user_requested_cpu_list
,
221 m
= zalloc(sizeof(*m
));
225 m
->pctx
= expr__ctx_new();
229 m
->pmu
= pm
->pmu
?: "cpu";
230 m
->metric_name
= pm
->metric_name
;
231 m
->default_metricgroup_name
= pm
->default_metricgroup_name
?: "";
234 m
->modifier
= strdup(modifier
);
238 m
->metric_expr
= pm
->metric_expr
;
239 m
->metric_threshold
= pm
->metric_threshold
;
240 m
->metric_unit
= pm
->unit
;
241 m
->pctx
->sctx
.user_requested_cpu_list
= NULL
;
242 if (user_requested_cpu_list
) {
243 m
->pctx
->sctx
.user_requested_cpu_list
= strdup(user_requested_cpu_list
);
244 if (!m
->pctx
->sctx
.user_requested_cpu_list
)
247 m
->pctx
->sctx
.runtime
= runtime
;
248 m
->pctx
->sctx
.system_wide
= system_wide
;
249 m
->group_events
= !metric_no_group
&& metric__group_events(pm
);
250 m
->metric_refs
= NULL
;
259 static bool contains_metric_id(struct evsel
**metric_events
, int num_events
,
260 const char *metric_id
)
264 for (i
= 0; i
< num_events
; i
++) {
265 if (!strcmp(evsel__metric_id(metric_events
[i
]), metric_id
))
272 * setup_metric_events - Find a group of events in metric_evlist that correspond
273 * to the IDs from a parsed metric expression.
274 * @pmu: The PMU for the IDs.
275 * @ids: the metric IDs to match.
276 * @metric_evlist: the list of perf events.
277 * @out_metric_events: holds the created metric events array.
279 static int setup_metric_events(const char *pmu
, struct hashmap
*ids
,
280 struct evlist
*metric_evlist
,
281 struct evsel
***out_metric_events
)
283 struct evsel
**metric_events
;
284 const char *metric_id
;
286 size_t ids_size
, matched_events
, i
;
287 bool all_pmus
= !strcmp(pmu
, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu
);
289 *out_metric_events
= NULL
;
290 ids_size
= hashmap__size(ids
);
292 metric_events
= calloc(ids_size
+ 1, sizeof(void *));
297 evlist__for_each_entry(metric_evlist
, ev
) {
298 struct expr_id_data
*val_ptr
;
300 /* Don't match events for the wrong hybrid PMU. */
301 if (!all_pmus
&& ev
->pmu
&& evsel__is_hybrid(ev
) &&
302 strcmp(ev
->pmu
->name
, pmu
))
305 * Check for duplicate events with the same name. For
306 * example, uncore_imc/cas_count_read/ will turn into 6
307 * events per socket on skylakex. Only the first such
308 * event is placed in metric_events.
310 metric_id
= evsel__metric_id(ev
);
311 if (contains_metric_id(metric_events
, matched_events
, metric_id
))
314 * Does this event belong to the parse context? For
315 * combined or shared groups, this metric may not care
318 if (hashmap__find(ids
, metric_id
, &val_ptr
)) {
319 pr_debug("Matched metric-id %s to %s\n", metric_id
, evsel__name(ev
));
320 metric_events
[matched_events
++] = ev
;
322 if (matched_events
>= ids_size
)
326 if (matched_events
< ids_size
) {
330 for (i
= 0; i
< ids_size
; i
++) {
331 ev
= metric_events
[i
];
332 ev
->collect_stat
= true;
335 * The metric leader points to the identically named
336 * event in metric_events.
338 ev
->metric_leader
= ev
;
340 * Mark two events with identical names in the same
341 * group (or globally) as being in use as uncore events
342 * may be duplicated for each pmu. Set the metric leader
343 * of such events to be the event that appears in
346 metric_id
= evsel__metric_id(ev
);
347 evlist__for_each_entry_continue(metric_evlist
, ev
) {
348 if (!strcmp(evsel__metric_id(ev
), metric_id
))
349 ev
->metric_leader
= metric_events
[i
];
352 *out_metric_events
= metric_events
;
356 static bool match_metric(const char *metric_or_groups
, const char *sought
)
363 if (!strcmp(sought
, "all"))
365 if (!metric_or_groups
)
366 return !strcasecmp(sought
, "No_group");
367 len
= strlen(sought
);
368 if (!strncasecmp(metric_or_groups
, sought
, len
) &&
369 (metric_or_groups
[len
] == 0 || metric_or_groups
[len
] == ';'))
371 m
= strchr(metric_or_groups
, ';');
372 return m
&& match_metric(m
+ 1, sought
);
375 static bool match_pm_metric(const struct pmu_metric
*pm
, const char *pmu
, const char *metric
)
377 const char *pm_pmu
= pm
->pmu
?: "cpu";
379 if (strcmp(pmu
, "all") && strcmp(pm_pmu
, pmu
))
382 return match_metric(pm
->metric_group
, metric
) ||
383 match_metric(pm
->metric_name
, metric
);
386 /** struct mep - RB-tree node for building printing information. */
388 /** nd - RB-tree element. */
390 /** @metric_group: Owned metric group name, separated others with ';'. */
392 const char *metric_name
;
393 const char *metric_desc
;
394 const char *metric_long_desc
;
395 const char *metric_expr
;
396 const char *metric_threshold
;
397 const char *metric_unit
;
400 static int mep_cmp(struct rb_node
*rb_node
, const void *entry
)
402 struct mep
*a
= container_of(rb_node
, struct mep
, nd
);
403 struct mep
*b
= (struct mep
*)entry
;
406 ret
= strcmp(a
->metric_group
, b
->metric_group
);
410 return strcmp(a
->metric_name
, b
->metric_name
);
413 static struct rb_node
*mep_new(struct rblist
*rl __maybe_unused
, const void *entry
)
415 struct mep
*me
= malloc(sizeof(struct mep
));
420 memcpy(me
, entry
, sizeof(struct mep
));
424 static void mep_delete(struct rblist
*rl __maybe_unused
,
427 struct mep
*me
= container_of(nd
, struct mep
, nd
);
429 zfree(&me
->metric_group
);
433 static struct mep
*mep_lookup(struct rblist
*groups
, const char *metric_group
,
434 const char *metric_name
)
438 .metric_group
= strdup(metric_group
),
439 .metric_name
= metric_name
,
441 nd
= rblist__find(groups
, &me
);
443 free(me
.metric_group
);
444 return container_of(nd
, struct mep
, nd
);
446 rblist__add_node(groups
, &me
);
447 nd
= rblist__find(groups
, &me
);
449 return container_of(nd
, struct mep
, nd
);
453 static int metricgroup__add_to_mep_groups(const struct pmu_metric
*pm
,
454 struct rblist
*groups
)
459 mg
= strdup(pm
->metric_group
?: pm
->metric_name
);
463 while ((g
= strsep(&mg
, ";")) != NULL
) {
468 me
= mep_lookup(groups
, g
, pm
->metric_name
);
470 me
= mep_lookup(groups
, pm
->metric_name
, pm
->metric_name
);
473 me
->metric_desc
= pm
->desc
;
474 me
->metric_long_desc
= pm
->long_desc
;
475 me
->metric_expr
= pm
->metric_expr
;
476 me
->metric_threshold
= pm
->metric_threshold
;
477 me
->metric_unit
= pm
->unit
;
485 struct metricgroup_iter_data
{
486 pmu_metric_iter_fn fn
;
490 static int metricgroup__sys_event_iter(const struct pmu_metric
*pm
,
491 const struct pmu_metrics_table
*table
,
494 struct metricgroup_iter_data
*d
= data
;
495 struct perf_pmu
*pmu
= NULL
;
497 if (!pm
->metric_expr
|| !pm
->compat
)
500 while ((pmu
= perf_pmus__scan(pmu
))) {
502 if (!pmu
->id
|| !pmu_uncore_identifier_match(pm
->compat
, pmu
->id
))
505 return d
->fn(pm
, table
, d
->data
);
510 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric
*pm
,
511 const struct pmu_metrics_table
*table __maybe_unused
,
514 struct rblist
*groups
= vdata
;
516 return metricgroup__add_to_mep_groups(pm
, groups
);
519 void metricgroup__print(const struct print_callbacks
*print_cb
, void *print_state
)
521 struct rblist groups
;
522 const struct pmu_metrics_table
*table
;
523 struct rb_node
*node
, *next
;
525 rblist__init(&groups
);
526 groups
.node_new
= mep_new
;
527 groups
.node_cmp
= mep_cmp
;
528 groups
.node_delete
= mep_delete
;
529 table
= pmu_metrics_table__find();
531 pmu_metrics_table__for_each_metric(table
,
532 metricgroup__add_to_mep_groups_callback
,
536 struct metricgroup_iter_data data
= {
537 .fn
= metricgroup__add_to_mep_groups_callback
,
540 pmu_for_each_sys_metric(metricgroup__sys_event_iter
, &data
);
543 for (node
= rb_first_cached(&groups
.entries
); node
; node
= next
) {
544 struct mep
*me
= container_of(node
, struct mep
, nd
);
546 print_cb
->print_metric(print_state
,
550 me
->metric_long_desc
,
552 me
->metric_threshold
,
554 next
= rb_next(node
);
555 rblist__remove_node(&groups
, node
);
559 static const char *code_characters
= ",-=@";
561 static int encode_metric_id(struct strbuf
*sb
, const char *x
)
567 c
= strchr(code_characters
, *x
);
569 ret
= strbuf_addch(sb
, '!');
573 ret
= strbuf_addch(sb
, '0' + (c
- code_characters
));
577 ret
= strbuf_addch(sb
, *x
);
585 static int decode_metric_id(struct strbuf
*sb
, const char *x
)
587 const char *orig
= x
;
597 if (i
> strlen(code_characters
)) {
598 pr_err("Bad metric-id encoding in: '%s'", orig
);
601 c
= code_characters
[i
];
603 ret
= strbuf_addch(sb
, c
);
610 static int decode_all_metric_ids(struct evlist
*perf_evlist
, const char *modifier
)
613 struct strbuf sb
= STRBUF_INIT
;
617 evlist__for_each_entry(perf_evlist
, ev
) {
621 ret
= strbuf_setlen(&sb
, 0);
625 ret
= decode_metric_id(&sb
, ev
->metric_id
);
629 free((char *)ev
->metric_id
);
630 ev
->metric_id
= strdup(sb
.buf
);
631 if (!ev
->metric_id
) {
636 * If the name is just the parsed event, use the metric-id to
637 * give a more friendly display version.
639 if (strstr(ev
->name
, "metric-id=")) {
640 bool has_slash
= false;
643 for (cur
= strchr(sb
.buf
, '@') ; cur
; cur
= strchr(++cur
, '@')) {
649 if (!has_slash
&& !strchr(sb
.buf
, ':')) {
650 ret
= strbuf_addch(&sb
, ':');
654 ret
= strbuf_addstr(&sb
, modifier
);
658 ev
->name
= strdup(sb
.buf
);
669 static int metricgroup__build_event_string(struct strbuf
*events
,
670 const struct expr_parse_ctx
*ctx
,
671 const char *modifier
,
674 struct hashmap_entry
*cur
;
676 bool no_group
= true, has_tool_events
= false;
677 bool tool_events
[TOOL_PMU__EVENT_MAX
] = {false};
680 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
682 hashmap__for_each_entry(ctx
->ids
, cur
, bkt
) {
683 const char *sep
, *rsep
, *id
= cur
->pkey
;
684 enum tool_pmu_event ev
;
686 pr_debug("found event %s\n", id
);
688 /* Always move tool events outside of the group. */
689 ev
= tool_pmu__str_to_event(id
);
690 if (ev
!= TOOL_PMU__EVENT_NONE
) {
691 has_tool_events
= true;
692 tool_events
[ev
] = true;
695 /* Separate events with commas and open the group if necessary. */
698 ret
= strbuf_addch(events
, '{');
699 RETURN_IF_NON_ZERO(ret
);
704 ret
= strbuf_addch(events
, ',');
705 RETURN_IF_NON_ZERO(ret
);
708 * Encode the ID as an event string. Add a qualifier for
709 * metric_id that is the original name except with characters
710 * that parse-events can't parse replaced. For example,
711 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
713 sep
= strchr(id
, '@');
715 ret
= strbuf_add(events
, id
, sep
- id
);
716 RETURN_IF_NON_ZERO(ret
);
717 ret
= strbuf_addch(events
, '/');
718 RETURN_IF_NON_ZERO(ret
);
719 rsep
= strrchr(sep
, '@');
720 ret
= strbuf_add(events
, sep
+ 1, rsep
- sep
- 1);
721 RETURN_IF_NON_ZERO(ret
);
722 ret
= strbuf_addstr(events
, ",metric-id=");
723 RETURN_IF_NON_ZERO(ret
);
726 sep
= strchr(id
, ':');
728 ret
= strbuf_add(events
, id
, sep
- id
);
729 RETURN_IF_NON_ZERO(ret
);
731 ret
= strbuf_addstr(events
, id
);
732 RETURN_IF_NON_ZERO(ret
);
734 ret
= strbuf_addstr(events
, "/metric-id=");
735 RETURN_IF_NON_ZERO(ret
);
737 ret
= encode_metric_id(events
, id
);
738 RETURN_IF_NON_ZERO(ret
);
739 ret
= strbuf_addstr(events
, "/");
740 RETURN_IF_NON_ZERO(ret
);
743 ret
= strbuf_addstr(events
, sep
+ 1);
744 RETURN_IF_NON_ZERO(ret
);
747 ret
= strbuf_addstr(events
, modifier
);
748 RETURN_IF_NON_ZERO(ret
);
751 if (!no_group
&& group_events
) {
752 ret
= strbuf_addf(events
, "}:W");
753 RETURN_IF_NON_ZERO(ret
);
755 if (has_tool_events
) {
758 tool_pmu__for_each_event(i
) {
759 if (tool_events
[i
]) {
761 ret
= strbuf_addch(events
, ',');
762 RETURN_IF_NON_ZERO(ret
);
765 ret
= strbuf_addstr(events
, tool_pmu__event_to_str(i
));
766 RETURN_IF_NON_ZERO(ret
);
772 #undef RETURN_IF_NON_ZERO
775 int __weak
arch_get_runtimeparam(const struct pmu_metric
*pm __maybe_unused
)
781 * A singly linked list on the stack of the names of metrics being
782 * processed. Used to identify recursion.
784 struct visited_metric
{
786 const struct visited_metric
*parent
;
789 struct metricgroup_add_iter_data
{
790 struct list_head
*metric_list
;
792 const char *metric_name
;
793 const char *modifier
;
796 bool metric_no_group
;
797 bool metric_no_threshold
;
798 const char *user_requested_cpu_list
;
800 struct metric
*root_metric
;
801 const struct visited_metric
*visited
;
802 const struct pmu_metrics_table
*table
;
805 static bool metricgroup__find_metric(const char *pmu
,
807 const struct pmu_metrics_table
*table
,
808 struct pmu_metric
*pm
);
810 static int add_metric(struct list_head
*metric_list
,
811 const struct pmu_metric
*pm
,
812 const char *modifier
,
813 bool metric_no_group
,
814 bool metric_no_threshold
,
815 const char *user_requested_cpu_list
,
817 struct metric
*root_metric
,
818 const struct visited_metric
*visited
,
819 const struct pmu_metrics_table
*table
);
822 * resolve_metric - Locate metrics within the root metric and recursively add
823 * references to them.
824 * @metric_list: The list the metric is added to.
825 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
826 * @modifier: if non-null event modifiers like "u".
827 * @metric_no_group: Should events written to events be grouped "{}" or
828 * global. Grouping is the default but due to multiplexing the
830 * @user_requested_cpu_list: Command line specified CPUs to record on.
831 * @system_wide: Are events for all processes recorded.
832 * @root_metric: Metrics may reference other metrics to form a tree. In this
833 * case the root_metric holds all the IDs and a list of referenced
834 * metrics. When adding a root this argument is NULL.
835 * @visited: A singly linked list of metric names being added that is used to
837 * @table: The table that is searched for metrics, most commonly the table for the
838 * architecture perf is running upon.
840 static int resolve_metric(struct list_head
*metric_list
,
842 const char *modifier
,
843 bool metric_no_group
,
844 bool metric_no_threshold
,
845 const char *user_requested_cpu_list
,
847 struct metric
*root_metric
,
848 const struct visited_metric
*visited
,
849 const struct pmu_metrics_table
*table
)
851 struct hashmap_entry
*cur
;
854 /* The metric to resolve. */
855 struct pmu_metric pm
;
857 * The key in the IDs map, this may differ from in case,
858 * etc. from pm->metric_name.
862 int i
, ret
= 0, pending_cnt
= 0;
865 * Iterate all the parsed IDs and if there's a matching metric and it to
868 hashmap__for_each_entry(root_metric
->pctx
->ids
, cur
, bkt
) {
869 struct pmu_metric pm
;
871 if (metricgroup__find_metric(pmu
, cur
->pkey
, table
, &pm
)) {
872 pending
= realloc(pending
,
873 (pending_cnt
+ 1) * sizeof(struct to_resolve
));
877 memcpy(&pending
[pending_cnt
].pm
, &pm
, sizeof(pm
));
878 pending
[pending_cnt
].key
= cur
->pkey
;
883 /* Remove the metric IDs from the context. */
884 for (i
= 0; i
< pending_cnt
; i
++)
885 expr__del_id(root_metric
->pctx
, pending
[i
].key
);
888 * Recursively add all the metrics, IDs are added to the root metric's
891 for (i
= 0; i
< pending_cnt
; i
++) {
892 ret
= add_metric(metric_list
, &pending
[i
].pm
, modifier
, metric_no_group
,
893 metric_no_threshold
, user_requested_cpu_list
, system_wide
,
894 root_metric
, visited
, table
);
904 * __add_metric - Add a metric to metric_list.
905 * @metric_list: The list the metric is added to.
906 * @pm: The pmu_metric containing the metric to be added.
907 * @modifier: if non-null event modifiers like "u".
908 * @metric_no_group: Should events written to events be grouped "{}" or
909 * global. Grouping is the default but due to multiplexing the
911 * @metric_no_threshold: Should threshold expressions be ignored?
912 * @runtime: A special argument for the parser only known at runtime.
913 * @user_requested_cpu_list: Command line specified CPUs to record on.
914 * @system_wide: Are events for all processes recorded.
915 * @root_metric: Metrics may reference other metrics to form a tree. In this
916 * case the root_metric holds all the IDs and a list of referenced
917 * metrics. When adding a root this argument is NULL.
918 * @visited: A singly linked list of metric names being added that is used to
920 * @table: The table that is searched for metrics, most commonly the table for the
921 * architecture perf is running upon.
923 static int __add_metric(struct list_head
*metric_list
,
924 const struct pmu_metric
*pm
,
925 const char *modifier
,
926 bool metric_no_group
,
927 bool metric_no_threshold
,
929 const char *user_requested_cpu_list
,
931 struct metric
*root_metric
,
932 const struct visited_metric
*visited
,
933 const struct pmu_metrics_table
*table
)
935 const struct visited_metric
*vm
;
937 bool is_root
= !root_metric
;
939 struct visited_metric visited_node
= {
940 .name
= pm
->metric_name
,
944 for (vm
= visited
; vm
; vm
= vm
->parent
) {
945 if (!strcmp(pm
->metric_name
, vm
->name
)) {
946 pr_err("failed: recursion detected for %s\n", pm
->metric_name
);
953 * This metric is the root of a tree and may reference other
954 * metrics that are added recursively.
956 root_metric
= metric__new(pm
, modifier
, metric_no_group
, runtime
,
957 user_requested_cpu_list
, system_wide
);
965 * This metric was referenced in a metric higher in the
966 * tree. Check if the same metric is already resolved in the
969 if (root_metric
->metric_refs
) {
970 for (; root_metric
->metric_refs
[cnt
].metric_name
; cnt
++) {
971 if (!strcmp(pm
->metric_name
,
972 root_metric
->metric_refs
[cnt
].metric_name
))
977 /* Create reference. Need space for the entry and the terminator. */
978 root_metric
->metric_refs
= realloc(root_metric
->metric_refs
,
979 (cnt
+ 2) * sizeof(struct metric_ref
));
980 if (!root_metric
->metric_refs
)
984 * Intentionally passing just const char pointers,
985 * from 'pe' object, so they never go away. We don't
986 * need to change them, so there's no need to create
989 root_metric
->metric_refs
[cnt
].metric_name
= pm
->metric_name
;
990 root_metric
->metric_refs
[cnt
].metric_expr
= pm
->metric_expr
;
992 /* Null terminate array. */
993 root_metric
->metric_refs
[cnt
+1].metric_name
= NULL
;
994 root_metric
->metric_refs
[cnt
+1].metric_expr
= NULL
;
998 * For both the parent and referenced metrics, we parse
999 * all the metric's IDs and add it to the root context.
1002 expr
= pm
->metric_expr
;
1003 if (is_root
&& pm
->metric_threshold
) {
1005 * Threshold expressions are built off the actual metric. Switch
1006 * to use that in case of additional necessary events. Change
1007 * the visited node name to avoid this being flagged as
1008 * recursion. If the threshold events are disabled, just use the
1009 * metric's name as a reference. This allows metric threshold
1010 * computation if there are sufficient events.
1012 assert(strstr(pm
->metric_threshold
, pm
->metric_name
));
1013 expr
= metric_no_threshold
? pm
->metric_name
: pm
->metric_threshold
;
1014 visited_node
.name
= "__threshold__";
1016 if (expr__find_ids(expr
, NULL
, root_metric
->pctx
) < 0) {
1017 /* Broken metric. */
1021 /* Resolve referenced metrics. */
1022 const char *pmu
= pm
->pmu
?: "cpu";
1024 ret
= resolve_metric(metric_list
, pmu
, modifier
, metric_no_group
,
1025 metric_no_threshold
, user_requested_cpu_list
,
1026 system_wide
, root_metric
, &visited_node
,
1031 metric__free(root_metric
);
1034 list_add(&root_metric
->nd
, metric_list
);
1039 struct metricgroup__find_metric_data
{
1042 struct pmu_metric
*pm
;
1045 static int metricgroup__find_metric_callback(const struct pmu_metric
*pm
,
1046 const struct pmu_metrics_table
*table __maybe_unused
,
1049 struct metricgroup__find_metric_data
*data
= vdata
;
1050 const char *pm_pmu
= pm
->pmu
?: "cpu";
1052 if (strcmp(data
->pmu
, "all") && strcmp(pm_pmu
, data
->pmu
))
1055 if (!match_metric(pm
->metric_name
, data
->metric
))
1058 memcpy(data
->pm
, pm
, sizeof(*pm
));
1062 static bool metricgroup__find_metric(const char *pmu
,
1064 const struct pmu_metrics_table
*table
,
1065 struct pmu_metric
*pm
)
1067 struct metricgroup__find_metric_data data
= {
1073 return pmu_metrics_table__for_each_metric(table
, metricgroup__find_metric_callback
, &data
)
1077 static int add_metric(struct list_head
*metric_list
,
1078 const struct pmu_metric
*pm
,
1079 const char *modifier
,
1080 bool metric_no_group
,
1081 bool metric_no_threshold
,
1082 const char *user_requested_cpu_list
,
1084 struct metric
*root_metric
,
1085 const struct visited_metric
*visited
,
1086 const struct pmu_metrics_table
*table
)
1090 pr_debug("metric expr %s for %s\n", pm
->metric_expr
, pm
->metric_name
);
1092 if (!strstr(pm
->metric_expr
, "?")) {
1093 ret
= __add_metric(metric_list
, pm
, modifier
, metric_no_group
,
1094 metric_no_threshold
, 0, user_requested_cpu_list
,
1095 system_wide
, root_metric
, visited
, table
);
1099 count
= arch_get_runtimeparam(pm
);
1101 /* This loop is added to create multiple
1102 * events depend on count value and add
1103 * those events to metric_list.
1106 for (j
= 0; j
< count
&& !ret
; j
++)
1107 ret
= __add_metric(metric_list
, pm
, modifier
, metric_no_group
,
1108 metric_no_threshold
, j
, user_requested_cpu_list
,
1109 system_wide
, root_metric
, visited
, table
);
1115 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric
*pm
,
1116 const struct pmu_metrics_table
*table __maybe_unused
,
1119 struct metricgroup_add_iter_data
*d
= data
;
1122 if (!match_pm_metric(pm
, d
->pmu
, d
->metric_name
))
1125 ret
= add_metric(d
->metric_list
, pm
, d
->modifier
, d
->metric_no_group
,
1126 d
->metric_no_threshold
, d
->user_requested_cpu_list
,
1127 d
->system_wide
, d
->root_metric
, d
->visited
, d
->table
);
1131 *(d
->has_match
) = true;
1139 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1140 * the front. tool events are excluded from the count.
1142 static int metric_list_cmp(void *priv __maybe_unused
, const struct list_head
*l
,
1143 const struct list_head
*r
)
1145 const struct metric
*left
= container_of(l
, struct metric
, nd
);
1146 const struct metric
*right
= container_of(r
, struct metric
, nd
);
1147 struct expr_id_data
*data
;
1148 int i
, left_count
, right_count
;
1150 left_count
= hashmap__size(left
->pctx
->ids
);
1151 tool_pmu__for_each_event(i
) {
1152 if (!expr__get_id(left
->pctx
, tool_pmu__event_to_str(i
), &data
))
1156 right_count
= hashmap__size(right
->pctx
->ids
);
1157 tool_pmu__for_each_event(i
) {
1158 if (!expr__get_id(right
->pctx
, tool_pmu__event_to_str(i
), &data
))
1162 return right_count
- left_count
;
1166 * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1167 * that first sorts by default_metricgroup_name, then
1170 static int default_metricgroup_cmp(void *priv __maybe_unused
,
1171 const struct list_head
*l
,
1172 const struct list_head
*r
)
1174 const struct metric
*left
= container_of(l
, struct metric
, nd
);
1175 const struct metric
*right
= container_of(r
, struct metric
, nd
);
1176 int diff
= strcmp(right
->default_metricgroup_name
, left
->default_metricgroup_name
);
1181 return strcmp(right
->metric_name
, left
->metric_name
);
1184 struct metricgroup__add_metric_data
{
1185 struct list_head
*list
;
1187 const char *metric_name
;
1188 const char *modifier
;
1189 const char *user_requested_cpu_list
;
1190 bool metric_no_group
;
1191 bool metric_no_threshold
;
1196 static int metricgroup__add_metric_callback(const struct pmu_metric
*pm
,
1197 const struct pmu_metrics_table
*table
,
1200 struct metricgroup__add_metric_data
*data
= vdata
;
1203 if (pm
->metric_expr
&& match_pm_metric(pm
, data
->pmu
, data
->metric_name
)) {
1204 bool metric_no_group
= data
->metric_no_group
||
1205 match_metric(pm
->metricgroup_no_group
, data
->metric_name
);
1207 data
->has_match
= true;
1208 ret
= add_metric(data
->list
, pm
, data
->modifier
, metric_no_group
,
1209 data
->metric_no_threshold
, data
->user_requested_cpu_list
,
1210 data
->system_wide
, /*root_metric=*/NULL
,
1211 /*visited_metrics=*/NULL
, table
);
1217 * metricgroup__add_metric - Find and add a metric, or a metric group.
1218 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1219 * @metric_name: The name of the metric or metric group. For example, "IPC"
1220 * could be the name of a metric and "TopDownL1" the name of a
1222 * @modifier: if non-null event modifiers like "u".
1223 * @metric_no_group: Should events written to events be grouped "{}" or
1224 * global. Grouping is the default but due to multiplexing the
1225 * user may override.
1226 * @user_requested_cpu_list: Command line specified CPUs to record on.
1227 * @system_wide: Are events for all processes recorded.
1228 * @metric_list: The list that the metric or metric group are added to.
1229 * @table: The table that is searched for metrics, most commonly the table for the
1230 * architecture perf is running upon.
1232 static int metricgroup__add_metric(const char *pmu
, const char *metric_name
, const char *modifier
,
1233 bool metric_no_group
, bool metric_no_threshold
,
1234 const char *user_requested_cpu_list
,
1236 struct list_head
*metric_list
,
1237 const struct pmu_metrics_table
*table
)
1241 bool has_match
= false;
1244 struct metricgroup__add_metric_data data
= {
1247 .metric_name
= metric_name
,
1248 .modifier
= modifier
,
1249 .metric_no_group
= metric_no_group
,
1250 .metric_no_threshold
= metric_no_threshold
,
1251 .user_requested_cpu_list
= user_requested_cpu_list
,
1252 .system_wide
= system_wide
,
1256 * Iterate over all metrics seeing if metric matches either the
1257 * name or group. When it does add the metric to the list.
1259 ret
= pmu_metrics_table__for_each_metric(table
, metricgroup__add_metric_callback
,
1264 has_match
= data
.has_match
;
1267 struct metricgroup_iter_data data
= {
1268 .fn
= metricgroup__add_metric_sys_event_iter
,
1269 .data
= (void *) &(struct metricgroup_add_iter_data
) {
1270 .metric_list
= &list
,
1272 .metric_name
= metric_name
,
1273 .modifier
= modifier
,
1274 .metric_no_group
= metric_no_group
,
1275 .user_requested_cpu_list
= user_requested_cpu_list
,
1276 .system_wide
= system_wide
,
1277 .has_match
= &has_match
,
1283 pmu_for_each_sys_metric(metricgroup__sys_event_iter
, &data
);
1285 /* End of pmu events. */
1291 * add to metric_list so that they can be released
1292 * even if it's failed
1294 list_splice(&list
, metric_list
);
1299 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1300 * specified in a list.
1301 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1302 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1303 * would match the IPC and CPI metrics, and TopDownL1 would match all
1304 * the metrics in the TopDownL1 group.
1305 * @metric_no_group: Should events written to events be grouped "{}" or
1306 * global. Grouping is the default but due to multiplexing the
1307 * user may override.
1308 * @user_requested_cpu_list: Command line specified CPUs to record on.
1309 * @system_wide: Are events for all processes recorded.
1310 * @metric_list: The list that metrics are added to.
1311 * @table: The table that is searched for metrics, most commonly the table for the
1312 * architecture perf is running upon.
1314 static int metricgroup__add_metric_list(const char *pmu
, const char *list
,
1315 bool metric_no_group
,
1316 bool metric_no_threshold
,
1317 const char *user_requested_cpu_list
,
1318 bool system_wide
, struct list_head
*metric_list
,
1319 const struct pmu_metrics_table
*table
)
1321 char *list_itr
, *list_copy
, *metric_name
, *modifier
;
1324 list_copy
= strdup(list
);
1327 list_itr
= list_copy
;
1329 while ((metric_name
= strsep(&list_itr
, ",")) != NULL
) {
1330 modifier
= strchr(metric_name
, ':');
1334 ret
= metricgroup__add_metric(pmu
, metric_name
, modifier
,
1335 metric_no_group
, metric_no_threshold
,
1336 user_requested_cpu_list
,
1337 system_wide
, metric_list
, table
);
1339 pr_err("Cannot find metric or group `%s'\n", metric_name
);
1350 * Warn about nmi_watchdog if any parsed metrics had the
1351 * NO_NMI_WATCHDOG constraint.
1353 metric__watchdog_constraint_hint(NULL
, /*foot=*/true);
1361 static void metricgroup__free_metrics(struct list_head
*metric_list
)
1363 struct metric
*m
, *tmp
;
1365 list_for_each_entry_safe (m
, tmp
, metric_list
, nd
) {
1366 list_del_init(&m
->nd
);
1372 * find_tool_events - Search for the pressence of tool events in metric_list.
1373 * @metric_list: List to take metrics from.
1374 * @tool_events: Array of false values, indices corresponding to tool events set
1375 * to true if tool event is found.
1377 static void find_tool_events(const struct list_head
*metric_list
,
1378 bool tool_events
[TOOL_PMU__EVENT_MAX
])
1382 list_for_each_entry(m
, metric_list
, nd
) {
1385 tool_pmu__for_each_event(i
) {
1386 struct expr_id_data
*data
;
1388 if (!tool_events
[i
] &&
1389 !expr__get_id(m
->pctx
, tool_pmu__event_to_str(i
), &data
))
1390 tool_events
[i
] = true;
1396 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1397 * metric IDs, as the IDs are held in a set,
1398 * duplicates will be removed.
1399 * @metric_list: List to take metrics from.
1400 * @combined: Out argument for result.
1402 static int build_combined_expr_ctx(const struct list_head
*metric_list
,
1403 struct expr_parse_ctx
**combined
)
1405 struct hashmap_entry
*cur
;
1411 *combined
= expr__ctx_new();
1415 list_for_each_entry(m
, metric_list
, nd
) {
1416 if (!m
->group_events
&& !m
->modifier
) {
1417 hashmap__for_each_entry(m
->pctx
->ids
, cur
, bkt
) {
1418 dup
= strdup(cur
->pkey
);
1423 ret
= expr__add_id(*combined
, dup
);
1431 expr__ctx_free(*combined
);
1437 * parse_ids - Build the event string for the ids and parse them creating an
1438 * evlist. The encoded metric_ids are decoded.
1439 * @metric_no_merge: is metric sharing explicitly disabled.
1440 * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU.
1441 * @ids: the event identifiers parsed from a metric.
1442 * @modifier: any modifiers added to the events.
1443 * @group_events: should events be placed in a weak group.
1444 * @tool_events: entries set true if the tool event of index could be present in
1445 * the overall list of metrics.
1446 * @out_evlist: the created list of events.
1448 static int parse_ids(bool metric_no_merge
, bool fake_pmu
,
1449 struct expr_parse_ctx
*ids
, const char *modifier
,
1450 bool group_events
, const bool tool_events
[TOOL_PMU__EVENT_MAX
],
1451 struct evlist
**out_evlist
)
1453 struct parse_events_error parse_error
;
1454 struct evlist
*parsed_evlist
;
1455 struct strbuf events
= STRBUF_INIT
;
1459 if (!metric_no_merge
|| hashmap__size(ids
->ids
) == 0) {
1460 bool added_event
= false;
1463 * We may fail to share events between metrics because a tool
1464 * event isn't present in one metric. For example, a ratio of
1465 * cache misses doesn't need duration_time but the same events
1466 * may be used for a misses per second. Events without sharing
1467 * implies multiplexing, that is best avoided, so place
1468 * all tool events in every group.
1470 * Also, there may be no ids/events in the expression parsing
1471 * context because of constant evaluation, e.g.:
1472 * event1 if #smt_on else 0
1473 * Add a tool event to avoid a parse error on an empty string.
1475 tool_pmu__for_each_event(i
) {
1476 if (tool_events
[i
]) {
1477 char *tmp
= strdup(tool_pmu__event_to_str(i
));
1481 ids__insert(ids
->ids
, tmp
);
1485 if (!added_event
&& hashmap__size(ids
->ids
) == 0) {
1486 char *tmp
= strdup("duration_time");
1490 ids__insert(ids
->ids
, tmp
);
1493 ret
= metricgroup__build_event_string(&events
, ids
, modifier
,
1498 parsed_evlist
= evlist__new();
1499 if (!parsed_evlist
) {
1503 pr_debug("Parsing metric events '%s'\n", events
.buf
);
1504 parse_events_error__init(&parse_error
);
1505 ret
= __parse_events(parsed_evlist
, events
.buf
, /*pmu_filter=*/NULL
,
1506 &parse_error
, fake_pmu
, /*warn_if_reordered=*/false,
1509 parse_events_error__print(&parse_error
, events
.buf
);
1512 ret
= decode_all_metric_ids(parsed_evlist
, modifier
);
1516 *out_evlist
= parsed_evlist
;
1517 parsed_evlist
= NULL
;
1519 parse_events_error__exit(&parse_error
);
1520 evlist__delete(parsed_evlist
);
1521 strbuf_release(&events
);
1525 static int parse_groups(struct evlist
*perf_evlist
,
1526 const char *pmu
, const char *str
,
1527 bool metric_no_group
,
1528 bool metric_no_merge
,
1529 bool metric_no_threshold
,
1530 const char *user_requested_cpu_list
,
1533 struct rblist
*metric_events_list
,
1534 const struct pmu_metrics_table
*table
)
1536 struct evlist
*combined_evlist
= NULL
;
1537 LIST_HEAD(metric_list
);
1539 bool tool_events
[TOOL_PMU__EVENT_MAX
] = {false};
1540 bool is_default
= !strcmp(str
, "Default");
1543 if (metric_events_list
->nr_entries
== 0)
1544 metricgroup__rblist_init(metric_events_list
);
1545 ret
= metricgroup__add_metric_list(pmu
, str
, metric_no_group
, metric_no_threshold
,
1546 user_requested_cpu_list
,
1547 system_wide
, &metric_list
, table
);
1551 /* Sort metrics from largest to smallest. */
1552 list_sort(NULL
, &metric_list
, metric_list_cmp
);
1554 if (!metric_no_merge
) {
1555 struct expr_parse_ctx
*combined
= NULL
;
1557 find_tool_events(&metric_list
, tool_events
);
1559 ret
= build_combined_expr_ctx(&metric_list
, &combined
);
1561 if (!ret
&& combined
&& hashmap__size(combined
->ids
)) {
1562 ret
= parse_ids(metric_no_merge
, fake_pmu
, combined
,
1564 /*group_events=*/false,
1569 expr__ctx_free(combined
);
1576 list_sort(NULL
, &metric_list
, default_metricgroup_cmp
);
1578 list_for_each_entry(m
, &metric_list
, nd
) {
1579 struct metric_event
*me
;
1580 struct evsel
**metric_events
;
1581 struct evlist
*metric_evlist
= NULL
;
1583 struct metric_expr
*expr
;
1585 if (combined_evlist
&& !m
->group_events
) {
1586 metric_evlist
= combined_evlist
;
1587 } else if (!metric_no_merge
) {
1589 * See if the IDs for this metric are a subset of an
1592 list_for_each_entry(n
, &metric_list
, nd
) {
1596 if (n
->evlist
== NULL
)
1599 if ((!m
->modifier
&& n
->modifier
) ||
1600 (m
->modifier
&& !n
->modifier
) ||
1601 (m
->modifier
&& n
->modifier
&&
1602 strcmp(m
->modifier
, n
->modifier
)))
1605 if ((!m
->pmu
&& n
->pmu
) ||
1606 (m
->pmu
&& !n
->pmu
) ||
1607 (m
->pmu
&& n
->pmu
&& strcmp(m
->pmu
, n
->pmu
)))
1610 if (expr__subset_of_ids(n
->pctx
, m
->pctx
)) {
1611 pr_debug("Events in '%s' fully contained within '%s'\n",
1612 m
->metric_name
, n
->metric_name
);
1613 metric_evlist
= n
->evlist
;
1619 if (!metric_evlist
) {
1620 ret
= parse_ids(metric_no_merge
, fake_pmu
, m
->pctx
, m
->modifier
,
1621 m
->group_events
, tool_events
, &m
->evlist
);
1625 metric_evlist
= m
->evlist
;
1627 ret
= setup_metric_events(fake_pmu
? "all" : m
->pmu
, m
->pctx
->ids
,
1628 metric_evlist
, &metric_events
);
1630 pr_err("Cannot resolve IDs for %s: %s\n",
1631 m
->metric_name
, m
->metric_expr
);
1635 me
= metricgroup__lookup(metric_events_list
, metric_events
[0], true);
1637 expr
= malloc(sizeof(struct metric_expr
));
1640 free(metric_events
);
1644 expr
->metric_refs
= m
->metric_refs
;
1645 m
->metric_refs
= NULL
;
1646 expr
->metric_expr
= m
->metric_expr
;
1650 if (asprintf(&tmp
, "%s:%s", m
->metric_name
, m
->modifier
) < 0)
1651 expr
->metric_name
= NULL
;
1653 expr
->metric_name
= tmp
;
1655 expr
->metric_name
= strdup(m
->metric_name
);
1657 if (!expr
->metric_name
) {
1659 free(metric_events
);
1662 expr
->metric_threshold
= m
->metric_threshold
;
1663 expr
->metric_unit
= m
->metric_unit
;
1664 expr
->metric_events
= metric_events
;
1665 expr
->runtime
= m
->pctx
->sctx
.runtime
;
1666 expr
->default_metricgroup_name
= m
->default_metricgroup_name
;
1667 me
->is_default
= is_default
;
1668 list_add(&expr
->nd
, &me
->head
);
1672 if (combined_evlist
) {
1673 evlist__splice_list_tail(perf_evlist
, &combined_evlist
->core
.entries
);
1674 evlist__delete(combined_evlist
);
1677 list_for_each_entry(m
, &metric_list
, nd
) {
1679 evlist__splice_list_tail(perf_evlist
, &m
->evlist
->core
.entries
);
1683 metricgroup__free_metrics(&metric_list
);
1687 int metricgroup__parse_groups(struct evlist
*perf_evlist
,
1690 bool metric_no_group
,
1691 bool metric_no_merge
,
1692 bool metric_no_threshold
,
1693 const char *user_requested_cpu_list
,
1695 bool hardware_aware_grouping
,
1696 struct rblist
*metric_events
)
1698 const struct pmu_metrics_table
*table
= pmu_metrics_table__find();
1702 if (hardware_aware_grouping
)
1703 pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
1705 return parse_groups(perf_evlist
, pmu
, str
, metric_no_group
, metric_no_merge
,
1706 metric_no_threshold
, user_requested_cpu_list
, system_wide
,
1707 /*fake_pmu=*/false, metric_events
, table
);
1710 int metricgroup__parse_groups_test(struct evlist
*evlist
,
1711 const struct pmu_metrics_table
*table
,
1713 struct rblist
*metric_events
)
1715 return parse_groups(evlist
, "all", str
,
1716 /*metric_no_group=*/false,
1717 /*metric_no_merge=*/false,
1718 /*metric_no_threshold=*/false,
1719 /*user_requested_cpu_list=*/NULL
,
1720 /*system_wide=*/false,
1721 /*fake_pmu=*/true, metric_events
, table
);
1724 struct metricgroup__has_metric_data
{
1728 static int metricgroup__has_metric_callback(const struct pmu_metric
*pm
,
1729 const struct pmu_metrics_table
*table __maybe_unused
,
1732 struct metricgroup__has_metric_data
*data
= vdata
;
1734 return match_pm_metric(pm
, data
->pmu
, data
->metric
) ? 1 : 0;
1737 bool metricgroup__has_metric(const char *pmu
, const char *metric
)
1739 const struct pmu_metrics_table
*table
= pmu_metrics_table__find();
1740 struct metricgroup__has_metric_data data
= {
1748 return pmu_metrics_table__for_each_metric(table
, metricgroup__has_metric_callback
, &data
)
1752 static int metricgroup__topdown_max_level_callback(const struct pmu_metric
*pm
,
1753 const struct pmu_metrics_table
*table __maybe_unused
,
1756 unsigned int *max_level
= data
;
1758 const char *p
= strstr(pm
->metric_group
?: "", "TopdownL");
1760 if (!p
|| p
[8] == '\0')
1764 if (level
> *max_level
)
1770 unsigned int metricgroups__topdown_max_level(void)
1772 unsigned int max_level
= 0;
1773 const struct pmu_metrics_table
*table
= pmu_metrics_table__find();
1778 pmu_metrics_table__for_each_metric(table
, metricgroup__topdown_max_level_callback
,
1783 int metricgroup__copy_metric_events(struct evlist
*evlist
, struct cgroup
*cgrp
,
1784 struct rblist
*new_metric_events
,
1785 struct rblist
*old_metric_events
)
1789 for (i
= 0; i
< rblist__nr_entries(old_metric_events
); i
++) {
1791 struct metric_event
*old_me
, *new_me
;
1792 struct metric_expr
*old_expr
, *new_expr
;
1793 struct evsel
*evsel
;
1797 nd
= rblist__entry(old_metric_events
, i
);
1798 old_me
= container_of(nd
, struct metric_event
, nd
);
1800 evsel
= evlist__find_evsel(evlist
, old_me
->evsel
->core
.idx
);
1803 new_me
= metricgroup__lookup(new_metric_events
, evsel
, true);
1807 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1808 cgrp
? cgrp
->name
: "root", evsel
->name
, evsel
->core
.idx
);
1810 list_for_each_entry(old_expr
, &old_me
->head
, nd
) {
1811 new_expr
= malloc(sizeof(*new_expr
));
1815 new_expr
->metric_expr
= old_expr
->metric_expr
;
1816 new_expr
->metric_threshold
= old_expr
->metric_threshold
;
1817 new_expr
->metric_name
= strdup(old_expr
->metric_name
);
1818 if (!new_expr
->metric_name
)
1821 new_expr
->metric_unit
= old_expr
->metric_unit
;
1822 new_expr
->runtime
= old_expr
->runtime
;
1824 if (old_expr
->metric_refs
) {
1825 /* calculate number of metric_events */
1826 for (nr
= 0; old_expr
->metric_refs
[nr
].metric_name
; nr
++)
1828 alloc_size
= sizeof(*new_expr
->metric_refs
);
1829 new_expr
->metric_refs
= calloc(nr
+ 1, alloc_size
);
1830 if (!new_expr
->metric_refs
) {
1835 memcpy(new_expr
->metric_refs
, old_expr
->metric_refs
,
1838 new_expr
->metric_refs
= NULL
;
1841 /* calculate number of metric_events */
1842 for (nr
= 0; old_expr
->metric_events
[nr
]; nr
++)
1844 alloc_size
= sizeof(*new_expr
->metric_events
);
1845 new_expr
->metric_events
= calloc(nr
+ 1, alloc_size
);
1846 if (!new_expr
->metric_events
) {
1847 zfree(&new_expr
->metric_refs
);
1852 /* copy evsel in the same position */
1853 for (idx
= 0; idx
< nr
; idx
++) {
1854 evsel
= old_expr
->metric_events
[idx
];
1855 evsel
= evlist__find_evsel(evlist
, evsel
->core
.idx
);
1856 if (evsel
== NULL
) {
1857 zfree(&new_expr
->metric_events
);
1858 zfree(&new_expr
->metric_refs
);
1862 new_expr
->metric_events
[idx
] = evsel
;
1865 list_add(&new_expr
->nd
, &new_me
->head
);