1 // SPDX-License-Identifier: GPL-2.0-only
7 #include "print-events.h"
9 #include "time-utils.h"
12 #include <api/fs/fs.h>
14 #include <internal/threadmap.h>
15 #include <perf/threadmap.h>
19 static const char *const tool_pmu__event_names
[TOOL_PMU__EVENT_MAX
] = {
35 bool tool_pmu__skip_event(const char *name __maybe_unused
)
37 #if !defined(__aarch64__)
38 /* The slots event should only appear on arm64. */
39 if (strcasecmp(name
, "slots") == 0)
42 #if !defined(__i386__) && !defined(__x86_64__)
43 /* The system_tsc_freq event should only appear on x86. */
44 if (strcasecmp(name
, "system_tsc_freq") == 0)
50 int tool_pmu__num_skip_events(void)
54 #if !defined(__aarch64__)
57 #if !defined(__i386__) && !defined(__x86_64__)
63 const char *tool_pmu__event_to_str(enum tool_pmu_event ev
)
65 if (ev
> TOOL_PMU__EVENT_NONE
&& ev
< TOOL_PMU__EVENT_MAX
)
66 return tool_pmu__event_names
[ev
];
71 enum tool_pmu_event
tool_pmu__str_to_event(const char *str
)
75 if (tool_pmu__skip_event(str
))
76 return TOOL_PMU__EVENT_NONE
;
78 tool_pmu__for_each_event(i
) {
79 if (!strcasecmp(str
, tool_pmu__event_names
[i
]))
82 return TOOL_PMU__EVENT_NONE
;
85 bool perf_pmu__is_tool(const struct perf_pmu
*pmu
)
87 return pmu
&& pmu
->type
== PERF_PMU_TYPE_TOOL
;
90 bool evsel__is_tool(const struct evsel
*evsel
)
92 return perf_pmu__is_tool(evsel
->pmu
);
95 enum tool_pmu_event
evsel__tool_event(const struct evsel
*evsel
)
97 if (!evsel__is_tool(evsel
))
98 return TOOL_PMU__EVENT_NONE
;
100 return (enum tool_pmu_event
)evsel
->core
.attr
.config
;
103 const char *evsel__tool_pmu_event_name(const struct evsel
*evsel
)
105 return tool_pmu__event_to_str(evsel
->core
.attr
.config
);
108 static bool read_until_char(struct io
*io
, char e
)
113 c
= io__get_char(io
);
120 static int read_stat_field(int fd
, struct perf_cpu cpu
, int field
, __u64
*val
)
126 io__init(&io
, fd
, buf
, sizeof(buf
));
128 /* Skip lines to relevant CPU. */
129 for (i
= -1; i
< cpu
.cpu
; i
++) {
130 if (!read_until_char(&io
, '\n'))
134 if (io__get_char(&io
) != 'c') return -EINVAL
;
135 if (io__get_char(&io
) != 'p') return -EINVAL
;
136 if (io__get_char(&io
) != 'u') return -EINVAL
;
138 /* Skip N of cpuN. */
139 if (!read_until_char(&io
, ' '))
144 if (io__get_dec(&io
, val
) != ' ')
153 static int read_pid_stat_field(int fd
, int field
, __u64
*val
)
159 io__init(&io
, fd
, buf
, sizeof(buf
));
160 if (io__get_dec(&io
, val
) != ' ')
166 if (io__get_char(&io
) != '(' || !read_until_char(&io
, ')'))
169 return -EINVAL
; /* String can't be returned. */
172 if (io__get_char(&io
) != ' ' || io__get_char(&io
) == -1)
175 return -EINVAL
; /* String can't be returned. */
177 /* Loop over numeric fields*/
178 if (io__get_char(&io
) != ' ')
183 c
= io__get_dec(&io
, val
);
187 /* Assume a -ve was read */
188 c
= io__get_dec(&io
, val
);
200 int evsel__tool_pmu_prepare_open(struct evsel
*evsel
,
201 struct perf_cpu_map
*cpus
,
204 if ((evsel__tool_event(evsel
) == TOOL_PMU__EVENT_SYSTEM_TIME
||
205 evsel__tool_event(evsel
) == TOOL_PMU__EVENT_USER_TIME
) &&
206 !evsel
->start_times
) {
207 evsel
->start_times
= xyarray__new(perf_cpu_map__nr(cpus
),
210 if (!evsel
->start_times
)
216 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
218 int evsel__tool_pmu_open(struct evsel
*evsel
,
219 struct perf_thread_map
*threads
,
220 int start_cpu_map_idx
, int end_cpu_map_idx
)
222 enum tool_pmu_event ev
= evsel__tool_event(evsel
);
223 int pid
= -1, idx
= 0, thread
= 0, nthreads
, err
= 0, old_errno
;
225 if (ev
== TOOL_PMU__EVENT_NUM_CPUS
)
228 if (ev
== TOOL_PMU__EVENT_DURATION_TIME
) {
229 if (evsel
->core
.attr
.sample_period
) /* no sampling */
231 evsel
->start_time
= rdclock();
236 pid
= evsel
->cgrp
->fd
;
238 nthreads
= perf_thread_map__nr(threads
);
239 for (idx
= start_cpu_map_idx
; idx
< end_cpu_map_idx
; idx
++) {
240 for (thread
= 0; thread
< nthreads
; thread
++) {
241 if (thread
>= nthreads
)
244 if (!evsel
->cgrp
&& !evsel
->core
.system_wide
)
245 pid
= perf_thread_map__pid(threads
, thread
);
247 if (ev
== TOOL_PMU__EVENT_USER_TIME
|| ev
== TOOL_PMU__EVENT_SYSTEM_TIME
) {
248 bool system
= ev
== TOOL_PMU__EVENT_SYSTEM_TIME
;
249 __u64
*start_time
= NULL
;
252 if (evsel
->core
.attr
.sample_period
) {
260 snprintf(buf
, sizeof(buf
), "/proc/%d/stat", pid
);
261 fd
= open(buf
, O_RDONLY
);
262 evsel
->pid_stat
= true;
264 fd
= open("/proc/stat", O_RDONLY
);
266 FD(evsel
, idx
, thread
) = fd
;
271 start_time
= xyarray__entry(evsel
->start_times
, idx
, thread
);
273 err
= read_pid_stat_field(fd
, system
? 15 : 14,
278 cpu
= perf_cpu_map__cpu(evsel
->core
.cpus
, idx
);
279 err
= read_stat_field(fd
, cpu
, system
? 3 : 1,
291 threads
->err_thread
= thread
;
295 while (--thread
>= 0) {
296 if (FD(evsel
, idx
, thread
) >= 0)
297 close(FD(evsel
, idx
, thread
));
298 FD(evsel
, idx
, thread
) = -1;
301 } while (--idx
>= 0);
306 #if !defined(__i386__) && !defined(__x86_64__)
307 u64
arch_get_tsc_freq(void)
313 #if !defined(__aarch64__)
314 u64
tool_pmu__cpu_slots_per_cycle(void)
320 static bool has_pmem(void)
322 static bool has_pmem
, cached
;
323 const char *sysfs
= sysfs__mountpoint();
327 snprintf(path
, sizeof(path
), "%s/firmware/acpi/tables/NFIT", sysfs
);
328 has_pmem
= access(path
, F_OK
) == 0;
334 bool tool_pmu__read_event(enum tool_pmu_event ev
, u64
*result
)
336 const struct cpu_topology
*topology
;
339 case TOOL_PMU__EVENT_HAS_PMEM
:
340 *result
= has_pmem() ? 1 : 0;
343 case TOOL_PMU__EVENT_NUM_CORES
:
344 topology
= online_topology();
345 *result
= topology
->core_cpus_lists
;
348 case TOOL_PMU__EVENT_NUM_CPUS
:
349 *result
= cpu__max_present_cpu().cpu
;
352 case TOOL_PMU__EVENT_NUM_CPUS_ONLINE
: {
353 struct perf_cpu_map
*online
= cpu_map__online();
356 *result
= perf_cpu_map__nr(online
);
361 case TOOL_PMU__EVENT_NUM_DIES
:
362 topology
= online_topology();
363 *result
= topology
->die_cpus_lists
;
366 case TOOL_PMU__EVENT_NUM_PACKAGES
:
367 topology
= online_topology();
368 *result
= topology
->package_cpus_lists
;
371 case TOOL_PMU__EVENT_SLOTS
:
372 *result
= tool_pmu__cpu_slots_per_cycle();
373 return *result
? true : false;
375 case TOOL_PMU__EVENT_SMT_ON
:
376 *result
= smt_on() ? 1 : 0;
379 case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ
:
380 *result
= arch_get_tsc_freq();
383 case TOOL_PMU__EVENT_NONE
:
384 case TOOL_PMU__EVENT_DURATION_TIME
:
385 case TOOL_PMU__EVENT_USER_TIME
:
386 case TOOL_PMU__EVENT_SYSTEM_TIME
:
387 case TOOL_PMU__EVENT_MAX
:
393 int evsel__tool_pmu_read(struct evsel
*evsel
, int cpu_map_idx
, int thread
)
395 __u64
*start_time
, cur_time
, delta_start
;
398 struct perf_counts_values
*count
, *old_count
= NULL
;
400 enum tool_pmu_event ev
= evsel__tool_event(evsel
);
402 count
= perf_counts(evsel
->counts
, cpu_map_idx
, thread
);
405 case TOOL_PMU__EVENT_HAS_PMEM
:
406 case TOOL_PMU__EVENT_NUM_CORES
:
407 case TOOL_PMU__EVENT_NUM_CPUS
:
408 case TOOL_PMU__EVENT_NUM_CPUS_ONLINE
:
409 case TOOL_PMU__EVENT_NUM_DIES
:
410 case TOOL_PMU__EVENT_NUM_PACKAGES
:
411 case TOOL_PMU__EVENT_SLOTS
:
412 case TOOL_PMU__EVENT_SMT_ON
:
413 case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ
:
414 if (evsel
->prev_raw_counts
)
415 old_count
= perf_counts(evsel
->prev_raw_counts
, cpu_map_idx
, thread
);
417 if (cpu_map_idx
== 0 && thread
== 0) {
418 if (!tool_pmu__read_event(ev
, &val
)) {
424 count
->val
= old_count
->val
+ val
;
425 count
->run
= old_count
->run
+ 1;
426 count
->ena
= old_count
->ena
+ 1;
433 case TOOL_PMU__EVENT_DURATION_TIME
:
435 * Pretend duration_time is only on the first CPU and thread, or
436 * else aggregation will scale duration_time by the number of
439 start_time
= &evsel
->start_time
;
440 if (cpu_map_idx
== 0 && thread
== 0)
441 cur_time
= rdclock();
443 cur_time
= *start_time
;
445 case TOOL_PMU__EVENT_USER_TIME
:
446 case TOOL_PMU__EVENT_SYSTEM_TIME
: {
447 bool system
= evsel__tool_event(evsel
) == TOOL_PMU__EVENT_SYSTEM_TIME
;
449 start_time
= xyarray__entry(evsel
->start_times
, cpu_map_idx
, thread
);
450 fd
= FD(evsel
, cpu_map_idx
, thread
);
451 lseek(fd
, SEEK_SET
, 0);
452 if (evsel
->pid_stat
) {
453 /* The event exists solely on 1 CPU. */
454 if (cpu_map_idx
== 0)
455 err
= read_pid_stat_field(fd
, system
? 15 : 14, &cur_time
);
459 /* The event is for all threads. */
461 struct perf_cpu cpu
= perf_cpu_map__cpu(evsel
->core
.cpus
,
464 err
= read_stat_field(fd
, cpu
, system
? 3 : 1, &cur_time
);
472 case TOOL_PMU__EVENT_NONE
:
473 case TOOL_PMU__EVENT_MAX
:
480 delta_start
= cur_time
- *start_time
;
482 __u64 ticks_per_sec
= sysconf(_SC_CLK_TCK
);
484 delta_start
*= 1000000000 / ticks_per_sec
;
486 count
->val
= delta_start
;
487 count
->ena
= count
->run
= delta_start
;
492 struct perf_pmu
*perf_pmus__tool_pmu(void)
494 static struct perf_pmu tool
= {
496 .type
= PERF_PMU_TYPE_TOOL
,
497 .aliases
= LIST_HEAD_INIT(tool
.aliases
),
498 .caps
= LIST_HEAD_INIT(tool
.caps
),
499 .format
= LIST_HEAD_INIT(tool
.format
),
501 if (!tool
.events_table
)
502 tool
.events_table
= find_core_events_table("common", "common");