1 // SPDX-License-Identifier: GPL-2.0
16 #include "thread_map.h"
17 #include "util/hashmap.h"
18 #include <linux/zalloc.h>
20 void update_stats(struct stats
*stats
, u64 val
)
25 delta
= val
- stats
->mean
;
26 stats
->mean
+= delta
/ stats
->n
;
27 stats
->M2
+= delta
*(val
- stats
->mean
);
36 double avg_stats(struct stats
*stats
)
42 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
44 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
45 * s^2 = -------------------------------
48 * http://en.wikipedia.org/wiki/Stddev
50 * The std dev of the mean is related to the std dev by:
57 double stddev_stats(struct stats
*stats
)
59 double variance
, variance_mean
;
64 variance
= stats
->M2
/ (stats
->n
- 1);
65 variance_mean
= variance
/ stats
->n
;
67 return sqrt(variance_mean
);
70 double rel_stddev_stats(double stddev
, double avg
)
75 pct
= 100.0 * stddev
/avg
;
80 static void evsel__reset_aggr_stats(struct evsel
*evsel
)
82 struct perf_stat_evsel
*ps
= evsel
->stats
;
83 struct perf_stat_aggr
*aggr
= ps
->aggr
;
86 memset(aggr
, 0, sizeof(*aggr
) * ps
->nr_aggr
);
89 static void evsel__reset_stat_priv(struct evsel
*evsel
)
91 struct perf_stat_evsel
*ps
= evsel
->stats
;
93 init_stats(&ps
->res_stats
);
94 evsel__reset_aggr_stats(evsel
);
97 static int evsel__alloc_aggr_stats(struct evsel
*evsel
, int nr_aggr
)
99 struct perf_stat_evsel
*ps
= evsel
->stats
;
104 ps
->nr_aggr
= nr_aggr
;
105 ps
->aggr
= calloc(nr_aggr
, sizeof(*ps
->aggr
));
106 if (ps
->aggr
== NULL
)
112 int evlist__alloc_aggr_stats(struct evlist
*evlist
, int nr_aggr
)
116 evlist__for_each_entry(evlist
, evsel
) {
117 if (evsel__alloc_aggr_stats(evsel
, nr_aggr
) < 0)
123 static int evsel__alloc_stat_priv(struct evsel
*evsel
, int nr_aggr
)
125 struct perf_stat_evsel
*ps
;
127 ps
= zalloc(sizeof(*ps
));
133 if (nr_aggr
&& evsel__alloc_aggr_stats(evsel
, nr_aggr
) < 0) {
139 evsel__reset_stat_priv(evsel
);
143 static void evsel__free_stat_priv(struct evsel
*evsel
)
145 struct perf_stat_evsel
*ps
= evsel
->stats
;
149 zfree(&ps
->group_data
);
151 zfree(&evsel
->stats
);
154 static int evsel__alloc_prev_raw_counts(struct evsel
*evsel
)
156 int cpu_map_nr
= evsel__nr_cpus(evsel
);
157 int nthreads
= perf_thread_map__nr(evsel
->core
.threads
);
158 struct perf_counts
*counts
;
160 counts
= perf_counts__new(cpu_map_nr
, nthreads
);
162 evsel
->prev_raw_counts
= counts
;
164 return counts
? 0 : -ENOMEM
;
167 static void evsel__free_prev_raw_counts(struct evsel
*evsel
)
169 perf_counts__delete(evsel
->prev_raw_counts
);
170 evsel
->prev_raw_counts
= NULL
;
173 static void evsel__reset_prev_raw_counts(struct evsel
*evsel
)
175 if (evsel
->prev_raw_counts
)
176 perf_counts__reset(evsel
->prev_raw_counts
);
179 static int evsel__alloc_stats(struct evsel
*evsel
, int nr_aggr
, bool alloc_raw
)
181 if (evsel__alloc_stat_priv(evsel
, nr_aggr
) < 0 ||
182 evsel__alloc_counts(evsel
) < 0 ||
183 (alloc_raw
&& evsel__alloc_prev_raw_counts(evsel
) < 0))
189 int evlist__alloc_stats(struct perf_stat_config
*config
,
190 struct evlist
*evlist
, bool alloc_raw
)
195 if (config
&& config
->aggr_map
)
196 nr_aggr
= config
->aggr_map
->nr
;
198 evlist__for_each_entry(evlist
, evsel
) {
199 if (evsel__alloc_stats(evsel
, nr_aggr
, alloc_raw
))
206 evlist__free_stats(evlist
);
210 void evlist__free_stats(struct evlist
*evlist
)
214 evlist__for_each_entry(evlist
, evsel
) {
215 evsel__free_stat_priv(evsel
);
216 evsel__free_counts(evsel
);
217 evsel__free_prev_raw_counts(evsel
);
221 void evlist__reset_stats(struct evlist
*evlist
)
225 evlist__for_each_entry(evlist
, evsel
) {
226 evsel__reset_stat_priv(evsel
);
227 evsel__reset_counts(evsel
);
231 void evlist__reset_aggr_stats(struct evlist
*evlist
)
235 evlist__for_each_entry(evlist
, evsel
)
236 evsel__reset_aggr_stats(evsel
);
239 void evlist__reset_prev_raw_counts(struct evlist
*evlist
)
243 evlist__for_each_entry(evlist
, evsel
)
244 evsel__reset_prev_raw_counts(evsel
);
247 static void evsel__copy_prev_raw_counts(struct evsel
*evsel
)
249 int idx
, nthreads
= perf_thread_map__nr(evsel
->core
.threads
);
251 for (int thread
= 0; thread
< nthreads
; thread
++) {
252 perf_cpu_map__for_each_idx(idx
, evsel__cpus(evsel
)) {
253 *perf_counts(evsel
->counts
, idx
, thread
) =
254 *perf_counts(evsel
->prev_raw_counts
, idx
, thread
);
259 void evlist__copy_prev_raw_counts(struct evlist
*evlist
)
263 evlist__for_each_entry(evlist
, evsel
)
264 evsel__copy_prev_raw_counts(evsel
);
267 static void evsel__copy_res_stats(struct evsel
*evsel
)
269 struct perf_stat_evsel
*ps
= evsel
->stats
;
272 * For GLOBAL aggregation mode, it updates the counts for each run
273 * in the evsel->stats.res_stats. See perf_stat_process_counter().
275 *ps
->aggr
[0].counts
.values
= avg_stats(&ps
->res_stats
);
278 void evlist__copy_res_stats(struct perf_stat_config
*config
, struct evlist
*evlist
)
282 if (config
->aggr_mode
!= AGGR_GLOBAL
)
285 evlist__for_each_entry(evlist
, evsel
)
286 evsel__copy_res_stats(evsel
);
289 static size_t pkg_id_hash(long __key
, void *ctx __maybe_unused
)
291 uint64_t *key
= (uint64_t *) __key
;
293 return *key
& 0xffffffff;
296 static bool pkg_id_equal(long __key1
, long __key2
, void *ctx __maybe_unused
)
298 uint64_t *key1
= (uint64_t *) __key1
;
299 uint64_t *key2
= (uint64_t *) __key2
;
301 return *key1
== *key2
;
304 static int check_per_pkg(struct evsel
*counter
, struct perf_counts_values
*vals
,
305 int cpu_map_idx
, bool *skip
)
307 struct hashmap
*mask
= counter
->per_pkg_mask
;
308 struct perf_cpu_map
*cpus
= evsel__cpus(counter
);
309 struct perf_cpu cpu
= perf_cpu_map__cpu(cpus
, cpu_map_idx
);
315 if (!counter
->per_pkg
)
318 if (perf_cpu_map__is_any_cpu_or_is_empty(cpus
))
322 mask
= hashmap__new(pkg_id_hash
, pkg_id_equal
, NULL
);
326 counter
->per_pkg_mask
= mask
;
330 * we do not consider an event that has not run as a good
331 * instance to mark a package as used (skip=1). Otherwise
332 * we may run into a situation where the first CPU in a package
333 * is not running anything, yet the second is, and this function
334 * would mark the package as used after the first CPU and would
335 * not read the values from the second CPU.
337 if (!(vals
->run
&& vals
->ena
))
340 s
= cpu__get_socket_id(cpu
);
345 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
346 * We use hashmap(socket, die) to check the used socket+die pair.
348 d
= cpu__get_die_id(cpu
);
352 key
= malloc(sizeof(*key
));
356 *key
= (uint64_t)d
<< 32 | s
;
357 if (hashmap__find(mask
, key
, NULL
)) {
361 ret
= hashmap__add(mask
, key
, 1);
366 static bool evsel__count_has_error(struct evsel
*evsel
,
367 struct perf_counts_values
*count
,
368 struct perf_stat_config
*config
)
370 /* the evsel was failed already */
371 if (evsel
->err
|| evsel
->counts
->scaled
== -1)
374 /* this is meaningful for CPU aggregation modes only */
375 if (config
->aggr_mode
== AGGR_GLOBAL
)
378 /* it's considered ok when it actually ran */
379 if (count
->ena
!= 0 && count
->run
!= 0)
386 process_counter_values(struct perf_stat_config
*config
, struct evsel
*evsel
,
387 int cpu_map_idx
, int thread
,
388 struct perf_counts_values
*count
)
390 struct perf_stat_evsel
*ps
= evsel
->stats
;
391 static struct perf_counts_values zero
;
394 if (check_per_pkg(evsel
, count
, cpu_map_idx
, &skip
)) {
395 pr_err("failed to read per-pkg counter\n");
402 if (!evsel
->snapshot
)
403 evsel__compute_deltas(evsel
, cpu_map_idx
, thread
, count
);
404 perf_counts_values__scale(count
, config
->scale
, NULL
);
406 if (config
->aggr_mode
== AGGR_THREAD
) {
407 struct perf_counts_values
*aggr_counts
= &ps
->aggr
[thread
].counts
;
410 * Skip value 0 when enabling --per-thread globally,
411 * otherwise too many 0 output.
413 if (count
->val
== 0 && config
->system_wide
)
416 ps
->aggr
[thread
].nr
++;
418 aggr_counts
->val
+= count
->val
;
419 aggr_counts
->ena
+= count
->ena
;
420 aggr_counts
->run
+= count
->run
;
425 struct perf_cpu cpu
= perf_cpu_map__cpu(evsel
->core
.cpus
, cpu_map_idx
);
426 struct aggr_cpu_id aggr_id
= config
->aggr_get_id(config
, cpu
);
427 struct perf_stat_aggr
*ps_aggr
;
430 for (i
= 0; i
< ps
->nr_aggr
; i
++) {
431 if (!aggr_cpu_id__equal(&aggr_id
, &config
->aggr_map
->map
[i
]))
434 ps_aggr
= &ps
->aggr
[i
];
438 * When any result is bad, make them all to give consistent output
439 * in interval mode. But per-task counters can have 0 enabled time
440 * when some tasks are idle.
442 if (evsel__count_has_error(evsel
, count
, config
) && !ps_aggr
->failed
) {
443 ps_aggr
->counts
.val
= 0;
444 ps_aggr
->counts
.ena
= 0;
445 ps_aggr
->counts
.run
= 0;
446 ps_aggr
->failed
= true;
449 if (!ps_aggr
->failed
) {
450 ps_aggr
->counts
.val
+= count
->val
;
451 ps_aggr
->counts
.ena
+= count
->ena
;
452 ps_aggr
->counts
.run
+= count
->run
;
461 static int process_counter_maps(struct perf_stat_config
*config
,
462 struct evsel
*counter
)
464 int nthreads
= perf_thread_map__nr(counter
->core
.threads
);
465 int ncpus
= evsel__nr_cpus(counter
);
468 for (thread
= 0; thread
< nthreads
; thread
++) {
469 for (idx
= 0; idx
< ncpus
; idx
++) {
470 if (process_counter_values(config
, counter
, idx
, thread
,
471 perf_counts(counter
->counts
, idx
, thread
)))
479 int perf_stat_process_counter(struct perf_stat_config
*config
,
480 struct evsel
*counter
)
482 struct perf_stat_evsel
*ps
= counter
->stats
;
486 if (counter
->per_pkg
)
487 evsel__zero_per_pkg(counter
);
489 ret
= process_counter_maps(config
, counter
);
493 if (config
->aggr_mode
!= AGGR_GLOBAL
)
497 * GLOBAL aggregation mode only has a single aggr counts,
498 * so we can use ps->aggr[0] as the actual output.
500 count
= ps
->aggr
[0].counts
.values
;
501 update_stats(&ps
->res_stats
, *count
);
504 fprintf(config
->output
, "%s: %" PRIu64
" %" PRIu64
" %" PRIu64
"\n",
505 evsel__name(counter
), count
[0], count
[1], count
[2]);
511 static int evsel__merge_aggr_counters(struct evsel
*evsel
, struct evsel
*alias
)
513 struct perf_stat_evsel
*ps_a
= evsel
->stats
;
514 struct perf_stat_evsel
*ps_b
= alias
->stats
;
517 if (ps_a
->aggr
== NULL
&& ps_b
->aggr
== NULL
)
520 if (ps_a
->nr_aggr
!= ps_b
->nr_aggr
) {
521 pr_err("Unmatched aggregation mode between aliases\n");
525 for (i
= 0; i
< ps_a
->nr_aggr
; i
++) {
526 struct perf_counts_values
*aggr_counts_a
= &ps_a
->aggr
[i
].counts
;
527 struct perf_counts_values
*aggr_counts_b
= &ps_b
->aggr
[i
].counts
;
529 /* NB: don't increase aggr.nr for aliases */
531 aggr_counts_a
->val
+= aggr_counts_b
->val
;
532 aggr_counts_a
->ena
+= aggr_counts_b
->ena
;
533 aggr_counts_a
->run
+= aggr_counts_b
->run
;
538 /* events should have the same name, scale, unit, cgroup but on different PMUs */
539 static bool evsel__is_alias(struct evsel
*evsel_a
, struct evsel
*evsel_b
)
541 if (strcmp(evsel__name(evsel_a
), evsel__name(evsel_b
)))
544 if (evsel_a
->scale
!= evsel_b
->scale
)
547 if (evsel_a
->cgrp
!= evsel_b
->cgrp
)
550 if (strcmp(evsel_a
->unit
, evsel_b
->unit
))
553 if (evsel__is_clock(evsel_a
) != evsel__is_clock(evsel_b
))
556 return evsel_a
->pmu
!= evsel_b
->pmu
;
559 static void evsel__merge_aliases(struct evsel
*evsel
)
561 struct evlist
*evlist
= evsel
->evlist
;
564 alias
= list_prepare_entry(evsel
, &(evlist
->core
.entries
), core
.node
);
565 list_for_each_entry_continue(alias
, &evlist
->core
.entries
, core
.node
) {
566 /* Merge the same events on different PMUs. */
567 if (evsel__is_alias(evsel
, alias
)) {
568 evsel__merge_aggr_counters(evsel
, alias
);
569 alias
->merged_stat
= true;
574 static bool evsel__should_merge_hybrid(const struct evsel
*evsel
,
575 const struct perf_stat_config
*config
)
577 return config
->hybrid_merge
&& evsel__is_hybrid(evsel
);
580 static void evsel__merge_stats(struct evsel
*evsel
, struct perf_stat_config
*config
)
582 /* this evsel is already merged */
583 if (evsel
->merged_stat
)
586 if (evsel
->auto_merge_stats
|| evsel__should_merge_hybrid(evsel
, config
))
587 evsel__merge_aliases(evsel
);
590 /* merge the same uncore and hybrid events if requested */
591 void perf_stat_merge_counters(struct perf_stat_config
*config
, struct evlist
*evlist
)
595 if (config
->aggr_mode
== AGGR_NONE
)
598 evlist__for_each_entry(evlist
, evsel
)
599 evsel__merge_stats(evsel
, config
);
602 static void evsel__update_percore_stats(struct evsel
*evsel
, struct aggr_cpu_id
*core_id
)
604 struct perf_stat_evsel
*ps
= evsel
->stats
;
605 struct perf_counts_values counts
= { 0, };
606 struct aggr_cpu_id id
;
610 /* collect per-core counts */
611 perf_cpu_map__for_each_cpu(cpu
, idx
, evsel
->core
.cpus
) {
612 struct perf_stat_aggr
*aggr
= &ps
->aggr
[idx
];
614 id
= aggr_cpu_id__core(cpu
, NULL
);
615 if (!aggr_cpu_id__equal(core_id
, &id
))
618 counts
.val
+= aggr
->counts
.val
;
619 counts
.ena
+= aggr
->counts
.ena
;
620 counts
.run
+= aggr
->counts
.run
;
623 /* update aggregated per-core counts for each CPU */
624 perf_cpu_map__for_each_cpu(cpu
, idx
, evsel
->core
.cpus
) {
625 struct perf_stat_aggr
*aggr
= &ps
->aggr
[idx
];
627 id
= aggr_cpu_id__core(cpu
, NULL
);
628 if (!aggr_cpu_id__equal(core_id
, &id
))
631 aggr
->counts
.val
= counts
.val
;
632 aggr
->counts
.ena
= counts
.ena
;
633 aggr
->counts
.run
= counts
.run
;
639 /* we have an aggr_map for cpu, but want to aggregate the counters per-core */
640 static void evsel__process_percore(struct evsel
*evsel
)
642 struct perf_stat_evsel
*ps
= evsel
->stats
;
643 struct aggr_cpu_id core_id
;
650 perf_cpu_map__for_each_cpu(cpu
, idx
, evsel
->core
.cpus
) {
651 struct perf_stat_aggr
*aggr
= &ps
->aggr
[idx
];
656 core_id
= aggr_cpu_id__core(cpu
, NULL
);
657 evsel__update_percore_stats(evsel
, &core_id
);
661 /* process cpu stats on per-core events */
662 void perf_stat_process_percore(struct perf_stat_config
*config
, struct evlist
*evlist
)
666 if (config
->aggr_mode
!= AGGR_NONE
)
669 evlist__for_each_entry(evlist
, evsel
)
670 evsel__process_percore(evsel
);
673 int perf_event__process_stat_event(struct perf_session
*session
,
674 union perf_event
*event
)
676 struct perf_counts_values count
, *ptr
;
677 struct perf_record_stat
*st
= &event
->stat
;
678 struct evsel
*counter
;
685 counter
= evlist__id2evsel(session
->evlist
, st
->id
);
687 pr_err("Failed to resolve counter for stat event.\n");
690 cpu_map_idx
= perf_cpu_map__idx(evsel__cpus(counter
), (struct perf_cpu
){.cpu
= st
->cpu
});
691 if (cpu_map_idx
== -1) {
692 pr_err("Invalid CPU %d for event %s.\n", st
->cpu
, evsel__name(counter
));
695 ptr
= perf_counts(counter
->counts
, cpu_map_idx
, st
->thread
);
697 pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
698 st
->cpu
, st
->thread
, evsel__name(counter
));
702 counter
->supported
= true;
706 size_t perf_event__fprintf_stat(union perf_event
*event
, FILE *fp
)
708 struct perf_record_stat
*st
= (struct perf_record_stat
*)event
;
711 ret
= fprintf(fp
, "\n... id %" PRI_lu64
", cpu %d, thread %d\n",
712 st
->id
, st
->cpu
, st
->thread
);
713 ret
+= fprintf(fp
, "... value %" PRI_lu64
", enabled %" PRI_lu64
", running %" PRI_lu64
"\n",
714 st
->val
, st
->ena
, st
->run
);
719 size_t perf_event__fprintf_stat_round(union perf_event
*event
, FILE *fp
)
721 struct perf_record_stat_round
*rd
= (struct perf_record_stat_round
*)event
;
724 ret
= fprintf(fp
, "\n... time %" PRI_lu64
", type %s\n", rd
->time
,
725 rd
->type
== PERF_STAT_ROUND_TYPE__FINAL
? "FINAL" : "INTERVAL");
730 size_t perf_event__fprintf_stat_config(union perf_event
*event
, FILE *fp
)
732 struct perf_stat_config sc
= {};
735 perf_event__read_stat_config(&sc
, &event
->stat_config
);
737 ret
= fprintf(fp
, "\n");
738 ret
+= fprintf(fp
, "... aggr_mode %d\n", sc
.aggr_mode
);
739 ret
+= fprintf(fp
, "... scale %d\n", sc
.scale
);
740 ret
+= fprintf(fp
, "... interval %u\n", sc
.interval
);
745 int create_perf_stat_counter(struct evsel
*evsel
,
746 struct perf_stat_config
*config
,
747 struct target
*target
,
750 struct perf_event_attr
*attr
= &evsel
->core
.attr
;
751 struct evsel
*leader
= evsel__leader(evsel
);
753 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
754 PERF_FORMAT_TOTAL_TIME_RUNNING
;
757 * The event is part of non trivial group, let's enable
758 * the group read (for leader) and ID retrieval for all
761 if (leader
->core
.nr_members
> 1)
762 attr
->read_format
|= PERF_FORMAT_ID
|PERF_FORMAT_GROUP
;
764 attr
->inherit
= !config
->no_inherit
&& list_empty(&evsel
->bpf_counter_list
);
767 * Some events get initialized with sample_(period/type) set,
768 * like tracepoints. Clear it up for counting.
770 attr
->sample_period
= 0;
772 if (config
->identifier
)
773 attr
->sample_type
= PERF_SAMPLE_IDENTIFIER
;
775 if (config
->all_user
) {
776 attr
->exclude_kernel
= 1;
777 attr
->exclude_user
= 0;
780 if (config
->all_kernel
) {
781 attr
->exclude_kernel
= 0;
782 attr
->exclude_user
= 1;
786 * Disabling all counters initially, they will be enabled
787 * either manually by us or by kernel via enable_on_exec
790 if (evsel__is_group_leader(evsel
)) {
793 if (target__enable_on_exec(target
))
794 attr
->enable_on_exec
= 1;
797 if (target__has_cpu(target
) && !target__has_per_thread(target
))
798 return evsel__open_per_cpu(evsel
, evsel__cpus(evsel
), cpu_map_idx
);
800 return evsel__open_per_thread(evsel
, evsel
->core
.threads
);