1 // SPDX-License-Identifier: GPL-2.0
3 #include <sys/utsname.h>
8 #include <linux/zalloc.h>
9 #include <perf/cpumap.h>
18 #define PACKAGE_CPUS_FMT \
19 "%s/devices/system/cpu/cpu%d/topology/package_cpus_list"
20 #define PACKAGE_CPUS_FMT_OLD \
21 "%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
22 #define DIE_CPUS_FMT \
23 "%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
24 #define CORE_CPUS_FMT \
25 "%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
26 #define CORE_CPUS_FMT_OLD \
27 "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
28 #define NODE_ONLINE_FMT \
29 "%s/devices/system/node/online"
30 #define NODE_MEMINFO_FMT \
31 "%s/devices/system/node/node%d/meminfo"
32 #define NODE_CPULIST_FMT \
33 "%s/devices/system/node/node%d/cpulist"
35 static int build_cpu_topology(struct cpu_topology
*tp
, int cpu
)
38 char filename
[MAXPATHLEN
];
45 scnprintf(filename
, MAXPATHLEN
, PACKAGE_CPUS_FMT
,
46 sysfs__mountpoint(), cpu
);
47 if (access(filename
, F_OK
) == -1) {
48 scnprintf(filename
, MAXPATHLEN
, PACKAGE_CPUS_FMT_OLD
,
49 sysfs__mountpoint(), cpu
);
51 fp
= fopen(filename
, "r");
55 sret
= getline(&buf
, &len
, fp
);
60 p
= strchr(buf
, '\n');
64 for (i
= 0; i
< tp
->package_cpus_lists
; i
++) {
65 if (!strcmp(buf
, tp
->package_cpus_list
[i
]))
68 if (i
== tp
->package_cpus_lists
) {
69 tp
->package_cpus_list
[i
] = buf
;
70 tp
->package_cpus_lists
++;
77 if (!tp
->die_cpus_list
)
80 scnprintf(filename
, MAXPATHLEN
, DIE_CPUS_FMT
,
81 sysfs__mountpoint(), cpu
);
82 fp
= fopen(filename
, "r");
86 sret
= getline(&buf
, &len
, fp
);
91 p
= strchr(buf
, '\n');
95 for (i
= 0; i
< tp
->die_cpus_lists
; i
++) {
96 if (!strcmp(buf
, tp
->die_cpus_list
[i
]))
99 if (i
== tp
->die_cpus_lists
) {
100 tp
->die_cpus_list
[i
] = buf
;
101 tp
->die_cpus_lists
++;
108 scnprintf(filename
, MAXPATHLEN
, CORE_CPUS_FMT
,
109 sysfs__mountpoint(), cpu
);
110 if (access(filename
, F_OK
) == -1) {
111 scnprintf(filename
, MAXPATHLEN
, CORE_CPUS_FMT_OLD
,
112 sysfs__mountpoint(), cpu
);
114 fp
= fopen(filename
, "r");
118 if (getline(&buf
, &len
, fp
) <= 0)
121 p
= strchr(buf
, '\n');
125 for (i
= 0; i
< tp
->core_cpus_lists
; i
++) {
126 if (!strcmp(buf
, tp
->core_cpus_list
[i
]))
129 if (i
== tp
->core_cpus_lists
) {
130 tp
->core_cpus_list
[i
] = buf
;
131 tp
->core_cpus_lists
++;
142 void cpu_topology__delete(struct cpu_topology
*tp
)
149 for (i
= 0 ; i
< tp
->package_cpus_lists
; i
++)
150 zfree(&tp
->package_cpus_list
[i
]);
152 for (i
= 0 ; i
< tp
->die_cpus_lists
; i
++)
153 zfree(&tp
->die_cpus_list
[i
]);
155 for (i
= 0 ; i
< tp
->core_cpus_lists
; i
++)
156 zfree(&tp
->core_cpus_list
[i
]);
161 bool cpu_topology__smt_on(const struct cpu_topology
*topology
)
163 for (u32 i
= 0; i
< topology
->core_cpus_lists
; i
++) {
164 const char *cpu_list
= topology
->core_cpus_list
[i
];
167 * If there is a need to separate siblings in a core then SMT is
170 if (strchr(cpu_list
, ',') || strchr(cpu_list
, '-'))
176 bool cpu_topology__core_wide(const struct cpu_topology
*topology
,
177 const char *user_requested_cpu_list
)
179 struct perf_cpu_map
*user_requested_cpus
;
182 * If user_requested_cpu_list is empty then all CPUs are recorded and so
185 if (!user_requested_cpu_list
)
188 user_requested_cpus
= perf_cpu_map__new(user_requested_cpu_list
);
189 /* Check that every user requested CPU is the complete set of SMT threads on a core. */
190 for (u32 i
= 0; i
< topology
->core_cpus_lists
; i
++) {
191 const char *core_cpu_list
= topology
->core_cpus_list
[i
];
192 struct perf_cpu_map
*core_cpus
= perf_cpu_map__new(core_cpu_list
);
195 bool has_first
, first
= true;
197 perf_cpu_map__for_each_cpu(cpu
, idx
, core_cpus
) {
199 has_first
= perf_cpu_map__has(user_requested_cpus
, cpu
);
203 * If the first core CPU is user requested then
204 * all subsequent CPUs in the core must be user
205 * requested too. If the first CPU isn't user
206 * requested then none of the others must be
209 if (perf_cpu_map__has(user_requested_cpus
, cpu
) != has_first
) {
210 perf_cpu_map__put(core_cpus
);
211 perf_cpu_map__put(user_requested_cpus
);
216 perf_cpu_map__put(core_cpus
);
218 perf_cpu_map__put(user_requested_cpus
);
222 static bool has_die_topology(void)
224 char filename
[MAXPATHLEN
];
230 if (strncmp(uts
.machine
, "x86_64", 6) &&
231 strncmp(uts
.machine
, "s390x", 5))
234 scnprintf(filename
, MAXPATHLEN
, DIE_CPUS_FMT
,
235 sysfs__mountpoint(), 0);
236 if (access(filename
, F_OK
) == -1)
242 const struct cpu_topology
*online_topology(void)
244 static const struct cpu_topology
*topology
;
247 topology
= cpu_topology__new();
249 pr_err("Error creating CPU topology");
256 struct cpu_topology
*cpu_topology__new(void)
258 struct cpu_topology
*tp
= NULL
;
264 struct perf_cpu_map
*map
;
265 bool has_die
= has_die_topology();
267 ncpus
= cpu__max_present_cpu().cpu
;
269 /* build online CPU map */
270 map
= perf_cpu_map__new_online_cpus();
272 pr_debug("failed to get system cpumap\n");
276 nr
= (u32
)(ncpus
& UINT_MAX
);
278 sz
= nr
* sizeof(char *);
283 addr
= calloc(1, sizeof(*tp
) + nr_addr
* sz
);
289 tp
->package_cpus_list
= addr
;
292 tp
->die_cpus_list
= addr
;
295 tp
->core_cpus_list
= addr
;
297 for (i
= 0; i
< nr
; i
++) {
298 if (!perf_cpu_map__has(map
, (struct perf_cpu
){ .cpu
= i
}))
301 ret
= build_cpu_topology(tp
, i
);
307 perf_cpu_map__put(map
);
309 cpu_topology__delete(tp
);
315 static int load_numa_node(struct numa_topology_node
*node
, int nr
)
317 char str
[MAXPATHLEN
];
319 char *buf
= NULL
, *p
;
325 node
->node
= (u32
) nr
;
327 scnprintf(str
, MAXPATHLEN
, NODE_MEMINFO_FMT
,
328 sysfs__mountpoint(), nr
);
329 fp
= fopen(str
, "r");
333 while (getline(&buf
, &len
, fp
) > 0) {
334 /* skip over invalid lines */
335 if (!strchr(buf
, ':'))
337 if (sscanf(buf
, "%*s %*d %31s %"PRIu64
, field
, &mem
) != 2)
339 if (!strcmp(field
, "MemTotal:"))
340 node
->mem_total
= mem
;
341 if (!strcmp(field
, "MemFree:"))
342 node
->mem_free
= mem
;
343 if (node
->mem_total
&& node
->mem_free
)
350 scnprintf(str
, MAXPATHLEN
, NODE_CPULIST_FMT
,
351 sysfs__mountpoint(), nr
);
353 fp
= fopen(str
, "r");
357 if (getline(&buf
, &len
, fp
) <= 0)
360 p
= strchr(buf
, '\n');
375 struct numa_topology
*numa_topology__new(void)
377 struct perf_cpu_map
*node_map
= NULL
;
378 struct numa_topology
*tp
= NULL
;
379 char path
[MAXPATHLEN
];
386 scnprintf(path
, MAXPATHLEN
, NODE_ONLINE_FMT
,
387 sysfs__mountpoint());
389 fp
= fopen(path
, "r");
393 if (getline(&buf
, &len
, fp
) <= 0)
396 c
= strchr(buf
, '\n');
400 node_map
= perf_cpu_map__new(buf
);
404 nr
= (u32
) perf_cpu_map__nr(node_map
);
406 tp
= zalloc(sizeof(*tp
) + sizeof(tp
->nodes
[0])*nr
);
412 for (i
= 0; i
< nr
; i
++) {
413 if (load_numa_node(&tp
->nodes
[i
], perf_cpu_map__cpu(node_map
, i
).cpu
)) {
414 numa_topology__delete(tp
);
423 perf_cpu_map__put(node_map
);
427 void numa_topology__delete(struct numa_topology
*tp
)
431 for (i
= 0; i
< tp
->nr
; i
++)
432 zfree(&tp
->nodes
[i
].cpus
);
437 static int load_hybrid_node(struct hybrid_topology_node
*node
,
438 struct perf_pmu
*pmu
)
440 char *buf
= NULL
, *p
;
444 node
->pmu_name
= strdup(pmu
->name
);
448 fp
= perf_pmu__open_file(pmu
, "cpus");
452 if (getline(&buf
, &len
, fp
) <= 0) {
457 p
= strchr(buf
, '\n');
466 zfree(&node
->pmu_name
);
471 struct hybrid_topology
*hybrid_topology__new(void)
473 struct perf_pmu
*pmu
= NULL
;
474 struct hybrid_topology
*tp
= NULL
;
475 int nr
= perf_pmus__num_core_pmus(), i
= 0;
480 tp
= zalloc(sizeof(*tp
) + sizeof(tp
->nodes
[0]) * nr
);
485 while ((pmu
= perf_pmus__scan_core(pmu
)) != NULL
) {
486 if (load_hybrid_node(&tp
->nodes
[i
], pmu
)) {
487 hybrid_topology__delete(tp
);
496 void hybrid_topology__delete(struct hybrid_topology
*tp
)
500 for (i
= 0; i
< tp
->nr
; i
++) {
501 zfree(&tp
->nodes
[i
].pmu_name
);
502 zfree(&tp
->nodes
[i
].cpus
);